applied-ai-018 commited on
Commit
f507358
·
verified ·
1 Parent(s): 9b5270d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/18.input_layernorm.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/autograd/__init__.py +515 -0
  5. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py +1 -0
  18. venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py +63 -0
  22. venv/lib/python3.10/site-packages/torch/autograd/_functions/utils.py +62 -0
  23. venv/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py +119 -0
  24. venv/lib/python3.10/site-packages/torch/autograd/forward_ad.py +227 -0
  25. venv/lib/python3.10/site-packages/torch/autograd/function.py +883 -0
  26. venv/lib/python3.10/site-packages/torch/autograd/functional.py +1182 -0
  27. venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py +396 -0
  28. venv/lib/python3.10/site-packages/torch/autograd/gradcheck.py +2266 -0
  29. venv/lib/python3.10/site-packages/torch/autograd/graph.py +749 -0
  30. venv/lib/python3.10/site-packages/torch/autograd/profiler.py +1042 -0
  31. venv/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py +303 -0
  32. venv/lib/python3.10/site-packages/torch/autograd/profiler_util.py +1178 -0
  33. venv/lib/python3.10/site-packages/torch/autograd/variable.py +14 -0
  34. venv/lib/python3.10/site-packages/torch/distributions/__init__.py +171 -0
  35. venv/lib/python3.10/site-packages/torch/distributions/bernoulli.py +130 -0
  36. venv/lib/python3.10/site-packages/torch/distributions/binomial.py +165 -0
  37. venv/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py +235 -0
  38. venv/lib/python3.10/site-packages/torch/distributions/dirichlet.py +123 -0
  39. venv/lib/python3.10/site-packages/torch/distributions/exp_family.py +62 -0
  40. venv/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py +98 -0
  41. venv/lib/python3.10/site-packages/torch/distributions/geometric.py +128 -0
  42. venv/lib/python3.10/site-packages/torch/distributions/gumbel.py +81 -0
  43. venv/lib/python3.10/site-packages/torch/distributions/half_cauchy.py +82 -0
  44. venv/lib/python3.10/site-packages/torch/distributions/half_normal.py +74 -0
  45. venv/lib/python3.10/site-packages/torch/distributions/kl.py +971 -0
  46. venv/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py +97 -0
  47. venv/lib/python3.10/site-packages/torch/distributions/logistic_normal.py +54 -0
  48. venv/lib/python3.10/site-packages/torch/distributions/mixture_same_family.py +214 -0
  49. venv/lib/python3.10/site-packages/torch/distributions/multivariate_normal.py +262 -0
  50. venv/lib/python3.10/site-packages/torch/distributions/negative_binomial.py +133 -0
ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad01436c399a30e54c45db8c6b8313628737f7fdc6939a76ab61d7c29054deaa
3
+ size 9372
ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f8df3c11c25505cda7aff772aab3184b918cbe1b84b7146cdc6b37fc1d33a1
3
+ size 9387
ckpts/universal/global_step120/zero/18.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec9763f703d3503eb2e5a786f5e0bd2379b616665c964c1b4646a759f1406bed
3
+ size 9293
venv/lib/python3.10/site-packages/torch/autograd/__init__.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ``torch.autograd`` provides classes and functions implementing automatic
3
+ differentiation of arbitrary scalar valued functions. It requires minimal
4
+ changes to the existing code - you only need to declare :class:`Tensor` s
5
+ for which gradients should be computed with the ``requires_grad=True`` keyword.
6
+ As of now, we only support autograd for floating point :class:`Tensor` types (
7
+ half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble).
8
+ """
9
+ import warnings
10
+ from typing import Any, Callable, cast, List, Optional, Sequence, Tuple, Union
11
+
12
+ import torch
13
+
14
+ from torch.types import _size, _TensorOrTensors, _TensorOrTensorsOrGradEdge
15
+ from .. import _vmap_internals
16
+ from ..overrides import handle_torch_function, has_torch_function, is_tensor_like
17
+ from . import forward_ad, functional, graph
18
+ from .anomaly_mode import detect_anomaly, set_detect_anomaly
19
+ from .function import Function, NestedIOFunction
20
+ from .grad_mode import (
21
+ _force_original_view_tracking,
22
+ _unsafe_preserve_version_counter,
23
+ enable_grad,
24
+ inference_mode,
25
+ no_grad,
26
+ set_grad_enabled,
27
+ set_multithreading_enabled,
28
+ )
29
+ from .gradcheck import gradcheck, gradgradcheck
30
+ from .graph import _engine_run_backward
31
+
32
+ from .variable import Variable
33
+
34
+ __all__ = ["Variable", "Function", "backward", "grad_mode"]
35
+
36
+ _OptionalTensor = Optional[torch.Tensor]
37
+ _ShapeorNestedShape = Union[_size, Sequence[_size], torch.Tensor]
38
+
39
+
40
+ def _calculate_shape(
41
+ output: torch.Tensor, grad: torch.Tensor, is_grads_batched: bool
42
+ ) -> Tuple[_ShapeorNestedShape, _ShapeorNestedShape]:
43
+ # is_same_size ensures that both tensors are either nested or non nested
44
+ # circular import
45
+ from torch.nested._internal.nested_tensor import NestedTensor
46
+
47
+ if output.is_nested and not isinstance(output, NestedTensor):
48
+ if is_grads_batched:
49
+ raise RuntimeError("Batched grads are not supported with Nested Tensor.")
50
+ out_shape = output._nested_tensor_size()
51
+ grad_shape = grad._nested_tensor_size()
52
+
53
+ return out_shape, grad_shape
54
+
55
+ reg_out_shape = output.shape
56
+ reg_grad_shape = grad.shape if not is_grads_batched else grad.shape[1:]
57
+ return reg_out_shape, reg_grad_shape
58
+
59
+
60
+ def _make_grads(
61
+ outputs: Sequence[torch.Tensor],
62
+ grads: Sequence[_OptionalTensor],
63
+ is_grads_batched: bool,
64
+ ) -> Tuple[_OptionalTensor, ...]:
65
+ new_grads: List[_OptionalTensor] = []
66
+ for out, grad in zip(outputs, grads):
67
+ if isinstance(grad, torch.Tensor):
68
+ from torch.fx.experimental.symbolic_shapes import expect_true, sym_eq
69
+
70
+ first_grad = grad if not is_grads_batched else grad[0]
71
+ # TODO: We can remove this conditional once we uniformly use
72
+ # singleton int to represent jagged dimension, so that size() call
73
+ # on nested tensor works
74
+ if out.is_nested or first_grad.is_nested:
75
+ shape_matches = torch.is_same_size(out, first_grad)
76
+ else:
77
+ # We need to do a regular size check, without going through
78
+ # the operator, to be able to handle unbacked symints
79
+ # (expect_true ensures we can deal with unbacked)
80
+ shape_matches = expect_true(sym_eq(out.size(), first_grad.size()))
81
+ if not shape_matches:
82
+ out_shape, grad_shape = _calculate_shape(
83
+ out, first_grad, is_grads_batched
84
+ )
85
+ if is_grads_batched:
86
+ raise RuntimeError(
87
+ "If `is_grads_batched=True`, we interpret the first "
88
+ "dimension of each grad_output as the batch dimension. "
89
+ "The sizes of the remaining dimensions are expected to match "
90
+ "the shape of corresponding output, but a mismatch "
91
+ "was detected: grad_output["
92
+ + str(grads.index(grad))
93
+ + "] has a shape of "
94
+ + str(grad_shape)
95
+ + " and output["
96
+ + str(outputs.index(out))
97
+ + "] has a shape of "
98
+ + str(out_shape)
99
+ + ". "
100
+ "If you only want some tensors in `grad_output` to be considered "
101
+ "batched, consider using vmap."
102
+ )
103
+ else:
104
+ raise RuntimeError(
105
+ "Mismatch in shape: grad_output["
106
+ + str(grads.index(grad))
107
+ + "] has a shape of "
108
+ + str(grad_shape)
109
+ + " and output["
110
+ + str(outputs.index(out))
111
+ + "] has a shape of "
112
+ + str(out_shape)
113
+ + "."
114
+ )
115
+ if out.dtype.is_complex != grad.dtype.is_complex:
116
+ raise RuntimeError(
117
+ "For complex Tensors, both grad_output and output"
118
+ " are required to have the same dtype."
119
+ " Mismatch in dtype: grad_output["
120
+ + str(grads.index(grad))
121
+ + "] has a dtype of "
122
+ + str(grad.dtype)
123
+ + " and output["
124
+ + str(outputs.index(out))
125
+ + "] has a dtype of "
126
+ + str(out.dtype)
127
+ + "."
128
+ )
129
+ new_grads.append(grad)
130
+ elif grad is None:
131
+ if out.requires_grad:
132
+ if out.numel() != 1:
133
+ raise RuntimeError(
134
+ "grad can be implicitly created only for scalar outputs"
135
+ )
136
+ if not out.dtype.is_floating_point:
137
+ msg = (
138
+ "grad can be implicitly created only for real scalar outputs"
139
+ f" but got {out.dtype}"
140
+ )
141
+ raise RuntimeError(msg)
142
+ new_grads.append(
143
+ torch.ones_like(out, memory_format=torch.preserve_format)
144
+ )
145
+ else:
146
+ new_grads.append(None)
147
+ else:
148
+ raise TypeError(
149
+ "gradients can be either Tensors or None, but got "
150
+ + type(grad).__name__
151
+ )
152
+ return tuple(new_grads)
153
+
154
+
155
+ def _tensor_or_tensors_to_tuple(
156
+ tensors: Optional[_TensorOrTensors], length: int
157
+ ) -> Tuple[_OptionalTensor, ...]:
158
+ if tensors is None:
159
+ return (None,) * length
160
+ if isinstance(tensors, torch.Tensor):
161
+ return (tensors,)
162
+ return tuple(tensors)
163
+
164
+
165
+ def backward(
166
+ tensors: _TensorOrTensors,
167
+ grad_tensors: Optional[_TensorOrTensors] = None,
168
+ retain_graph: Optional[bool] = None,
169
+ create_graph: bool = False,
170
+ grad_variables: Optional[_TensorOrTensors] = None,
171
+ inputs: Optional[_TensorOrTensorsOrGradEdge] = None,
172
+ ) -> None:
173
+ r"""Computes the sum of gradients of given tensors with respect to graph
174
+ leaves.
175
+
176
+ The graph is differentiated using the chain rule. If any of ``tensors``
177
+ are non-scalar (i.e. their data has more than one element) and require
178
+ gradient, then the Jacobian-vector product would be computed, in this
179
+ case the function additionally requires specifying ``grad_tensors``.
180
+ It should be a sequence of matching length, that contains the "vector"
181
+ in the Jacobian-vector product, usually the gradient of the differentiated
182
+ function w.r.t. corresponding tensors (``None`` is an acceptable value for
183
+ all tensors that don't need gradient tensors).
184
+
185
+ This function accumulates gradients in the leaves - you might need to zero
186
+ ``.grad`` attributes or set them to ``None`` before calling it.
187
+ See :ref:`Default gradient layouts<default-grad-layouts>`
188
+ for details on the memory layout of accumulated gradients.
189
+
190
+ .. note::
191
+ Using this method with ``create_graph=True`` will create a reference cycle
192
+ between the parameter and its gradient which can cause a memory leak.
193
+ We recommend using ``autograd.grad`` when creating the graph to avoid this.
194
+ If you have to use this function, make sure to reset the ``.grad`` fields of your
195
+ parameters to ``None`` after use to break the cycle and avoid the leak.
196
+
197
+ .. note::
198
+
199
+ If you run any forward ops, create ``grad_tensors``, and/or call ``backward``
200
+ in a user-specified CUDA stream context, see
201
+ :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
202
+
203
+ .. note::
204
+
205
+ When ``inputs`` are provided and a given input is not a leaf,
206
+ the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
207
+ It is an implementation detail on which the user should not rely.
208
+ See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
209
+
210
+ Args:
211
+ tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be
212
+ computed.
213
+ grad_tensors (Sequence[Tensor or None] or Tensor, optional): The "vector" in
214
+ the Jacobian-vector product, usually gradients w.r.t. each element of
215
+ corresponding tensors. None values can be specified for scalar Tensors or
216
+ ones that don't require grad. If a None value would be acceptable for all
217
+ grad_tensors, then this argument is optional.
218
+ retain_graph (bool, optional): If ``False``, the graph used to compute the grad
219
+ will be freed. Note that in nearly all cases setting this option to ``True``
220
+ is not needed and often can be worked around in a much more efficient
221
+ way. Defaults to the value of ``create_graph``.
222
+ create_graph (bool, optional): If ``True``, graph of the derivative will
223
+ be constructed, allowing to compute higher order derivative products.
224
+ Defaults to ``False``.
225
+ inputs (Sequence[Tensor] or Tensor or Sequence[GradientEdge], optional): Inputs w.r.t. which the gradient
226
+ be will accumulated into ``.grad``. All other Tensors will be ignored. If
227
+ not provided, the gradient is accumulated into all the leaf Tensors that
228
+ were used to compute the :attr:`tensors`.
229
+ """
230
+ if torch._C._are_functorch_transforms_active():
231
+ raise RuntimeError(
232
+ "backward() called inside a functorch transform. This is not "
233
+ "supported, please use functorch.grad or functorch.vjp instead "
234
+ "or call backward() outside of functorch transforms."
235
+ )
236
+
237
+ if grad_variables is not None:
238
+ warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.")
239
+ if grad_tensors is None:
240
+ grad_tensors = grad_variables
241
+ else:
242
+ raise RuntimeError(
243
+ "'grad_tensors' and 'grad_variables' (deprecated) "
244
+ "arguments both passed to backward(). Please only "
245
+ "use 'grad_tensors'."
246
+ )
247
+ if inputs is not None and len(inputs) == 0:
248
+ raise RuntimeError("'inputs' argument to backward() cannot be empty.")
249
+
250
+ tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors)
251
+ inputs = (
252
+ (inputs,)
253
+ if isinstance(inputs, (torch.Tensor, graph.GradientEdge))
254
+ else tuple(inputs)
255
+ if inputs is not None
256
+ else tuple()
257
+ )
258
+
259
+ grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors))
260
+ grad_tensors_ = _make_grads(tensors, grad_tensors_, is_grads_batched=False)
261
+ if retain_graph is None:
262
+ retain_graph = create_graph
263
+
264
+ # The reason we repeat the same comment below is that
265
+ # some Python versions print out the first line of a multi-line function
266
+ # calls in the traceback and some print out the last line
267
+ _engine_run_backward(
268
+ tensors,
269
+ grad_tensors_,
270
+ retain_graph,
271
+ create_graph,
272
+ inputs,
273
+ allow_unreachable=True,
274
+ accumulate_grad=True,
275
+ )
276
+
277
+
278
+ def grad(
279
+ outputs: _TensorOrTensors,
280
+ inputs: _TensorOrTensorsOrGradEdge,
281
+ grad_outputs: Optional[_TensorOrTensors] = None,
282
+ retain_graph: Optional[bool] = None,
283
+ create_graph: bool = False,
284
+ only_inputs: bool = True,
285
+ allow_unused: Optional[bool] = None,
286
+ is_grads_batched: bool = False,
287
+ materialize_grads: bool = False,
288
+ ) -> Tuple[torch.Tensor, ...]:
289
+ r"""Computes and returns the sum of gradients of outputs with respect to
290
+ the inputs.
291
+
292
+ ``grad_outputs`` should be a sequence of length matching ``output``
293
+ containing the "vector" in vector-Jacobian product, usually the pre-computed
294
+ gradients w.r.t. each of the outputs. If an output doesn't require_grad,
295
+ then the gradient can be ``None``).
296
+
297
+ .. note::
298
+
299
+ If you run any forward ops, create ``grad_outputs``, and/or call ``grad``
300
+ in a user-specified CUDA stream context, see
301
+ :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
302
+
303
+ .. note::
304
+
305
+ ``only_inputs`` argument is deprecated and is ignored now (defaults to ``True``).
306
+ To accumulate gradient for other parts of the graph, please use
307
+ ``torch.autograd.backward``.
308
+
309
+ Args:
310
+ outputs (sequence of Tensor): outputs of the differentiated function.
311
+ inputs (sequence of Tensor or GradientEdge): Inputs w.r.t. which the gradient will be
312
+ returned (and not accumulated into ``.grad``).
313
+ grad_outputs (sequence of Tensor): The "vector" in the vector-Jacobian product.
314
+ Usually gradients w.r.t. each output. None values can be specified for scalar
315
+ Tensors or ones that don't require grad. If a None value would be acceptable
316
+ for all grad_tensors, then this argument is optional. Default: None.
317
+ retain_graph (bool, optional): If ``False``, the graph used to compute the grad
318
+ will be freed. Note that in nearly all cases setting this option to ``True``
319
+ is not needed and often can be worked around in a much more efficient
320
+ way. Defaults to the value of ``create_graph``.
321
+ create_graph (bool, optional): If ``True``, graph of the derivative will
322
+ be constructed, allowing to compute higher order derivative products.
323
+ Default: ``False``.
324
+ allow_unused (Optional[bool], optional): If ``False``, specifying inputs
325
+ that were not used when computing outputs (and therefore their grad is
326
+ always zero) is an error. Defaults to the value of ``materialize_grads``.
327
+ is_grads_batched (bool, optional): If ``True``, the first dimension of each
328
+ tensor in ``grad_outputs`` will be interpreted as the batch dimension.
329
+ Instead of computing a single vector-Jacobian product, we compute a
330
+ batch of vector-Jacobian products for each "vector" in the batch.
331
+ We use the vmap prototype feature as the backend to vectorize calls
332
+ to the autograd engine so that this computation can be performed in a
333
+ single call. This should lead to performance improvements when compared
334
+ to manually looping and performing backward multiple times. Note that
335
+ due to this feature being experimental, there may be performance
336
+ cliffs. Please use ``torch._C._debug_only_display_vmap_fallback_warnings(True)``
337
+ to show any performance warnings and file an issue on github if warnings exist
338
+ for your use case. Defaults to ``False``.
339
+ materialize_grads (bool, optional): If ``True``, set the gradient for unused inputs
340
+ to zero instead of None. This is useful when computing higher-order derivatives.
341
+ If ``materialize_grads`` is ``True`` and ``allow_unused`` is ``False``, an error
342
+ will be raised. Defaults to ``False``.
343
+
344
+ """
345
+ if materialize_grads and allow_unused is False:
346
+ raise ValueError(
347
+ "Expected allow_unused to be True or not passed when materialize_grads=True, "
348
+ "but got: allow_unused=False."
349
+ )
350
+ if allow_unused is None:
351
+ allow_unused = materialize_grads
352
+ t_outputs = cast(
353
+ Tuple[torch.Tensor, ...],
354
+ (outputs,) if is_tensor_like(outputs) else tuple(outputs),
355
+ )
356
+ if is_tensor_like(inputs) or isinstance(inputs, graph.GradientEdge):
357
+ inputs = cast(_TensorOrTensorsOrGradEdge, (inputs,))
358
+ else:
359
+ inputs = tuple(inputs)
360
+ t_inputs = tuple(i for i in inputs if is_tensor_like(i))
361
+ overridable_args = t_outputs + t_inputs
362
+ if has_torch_function(overridable_args):
363
+ return handle_torch_function(
364
+ grad,
365
+ overridable_args,
366
+ t_outputs,
367
+ inputs,
368
+ grad_outputs=grad_outputs,
369
+ retain_graph=retain_graph,
370
+ create_graph=create_graph,
371
+ only_inputs=only_inputs,
372
+ allow_unused=allow_unused,
373
+ is_grads_batched=is_grads_batched,
374
+ materialize_grads=materialize_grads,
375
+ )
376
+
377
+ if not only_inputs:
378
+ warnings.warn(
379
+ "only_inputs argument is deprecated and is ignored now "
380
+ "(defaults to True). To accumulate gradient for other "
381
+ "parts of the graph, please use torch.autograd.backward."
382
+ )
383
+
384
+ grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs))
385
+ grad_outputs_ = _make_grads(
386
+ t_outputs, grad_outputs_, is_grads_batched=is_grads_batched
387
+ )
388
+
389
+ if retain_graph is None:
390
+ retain_graph = create_graph
391
+
392
+ # The reason we repeat the same comment several times below is because
393
+ # some Python versions print out the first line of multi-line function
394
+ # calls in the traceback and some print out the last line
395
+ if is_grads_batched:
396
+
397
+ def vjp(gO):
398
+ return _engine_run_backward(
399
+ t_outputs,
400
+ gO,
401
+ retain_graph,
402
+ create_graph,
403
+ inputs,
404
+ allow_unused,
405
+ accumulate_grad=False,
406
+ )
407
+
408
+ result = _vmap_internals._vmap(vjp, 0, 0, allow_none_pass_through=True)(
409
+ grad_outputs_
410
+ )
411
+ else:
412
+ result = _engine_run_backward(
413
+ t_outputs,
414
+ grad_outputs_,
415
+ retain_graph,
416
+ create_graph,
417
+ inputs,
418
+ allow_unused,
419
+ accumulate_grad=False,
420
+ )
421
+ if materialize_grads:
422
+ if any(
423
+ result[i] is None and not is_tensor_like(inputs[i])
424
+ for i in range(len(inputs))
425
+ ):
426
+ raise RuntimeError(
427
+ "materialize_grads cannot be used when the given input is a GradientEdge"
428
+ )
429
+ result = tuple(
430
+ output
431
+ if output is not None
432
+ else torch.zeros_like(input, requires_grad=True)
433
+ for (output, input) in zip(result, inputs)
434
+ )
435
+ return result
436
+
437
+
438
+ # This function applies in case of gradient checkpointing for memory
439
+ # optimization. Currently, gradient checkpointing is supported only if the
440
+ # execution engine is invoked through torch.autograd.backward() and its
441
+ # inputs argument is not passed. It is not supported for torch.autograd.grad().
442
+ # This is because if inputs are specified, the gradient won't be calculated for
443
+ # anything else e.g. model parameters like weights, bias etc.
444
+ #
445
+ # This function returns whether the checkpointing is valid i.e. torch.autograd.backward
446
+ # or not i.e. torch.autograd.grad. The implementation works by maintaining a thread
447
+ # local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask
448
+ # in the stack and before a NodeTask is executed in evaluate_function, it
449
+ # checks for whether reentrant backwards is imperative or not.
450
+ # See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context
451
+ def _is_checkpoint_valid():
452
+ return Variable._execution_engine.is_checkpoint_valid()
453
+
454
+
455
+ def variable(*args, **kwargs):
456
+ raise RuntimeError(
457
+ "torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead"
458
+ )
459
+
460
+
461
+ # Monkey patching variable.Variable to fix FX codegen. FX generates a call by roughly doing
462
+ # f"{fn.__module__}.{fn.__name__}(...). This yields torch.autograd.variable.Variable(...) in the
463
+ # output of an FX graph. Unfortunately the module name torch.autograd.variable is shadowed by the
464
+ # deprecated function - variable(...).
465
+ variable.Variable = Variable # type: ignore[attr-defined]
466
+
467
+ if not torch._C._autograd_init():
468
+ raise RuntimeError("autograd initialization failed")
469
+
470
+ # Import all native method/classes
471
+ from torch._C._autograd import (
472
+ _add_metadata_json,
473
+ _disable_profiler,
474
+ _disable_profiler_legacy,
475
+ _enable_profiler,
476
+ _enable_profiler_legacy,
477
+ _enable_record_function,
478
+ _get_sequence_nr,
479
+ _kineto_step,
480
+ _KinetoEvent,
481
+ _pop_saved_tensors_default_hooks,
482
+ _prepare_profiler,
483
+ _profiler_enabled,
484
+ _ProfilerResult,
485
+ _push_saved_tensors_default_hooks,
486
+ _record_function_with_args_enter,
487
+ _record_function_with_args_exit,
488
+ _set_empty_test_observer,
489
+ _supported_activities,
490
+ DeviceType,
491
+ kineto_available,
492
+ ProfilerEvent,
493
+ SavedTensor,
494
+ )
495
+
496
+ from torch._C._profiler import ProfilerActivity, ProfilerConfig, ProfilerState
497
+
498
+ from . import profiler
499
+
500
+
501
+ def _register_py_tensor_class_for_device(device, cls):
502
+ if not isinstance(cls, type):
503
+ raise RuntimeError("cls isn't a typeinfo object")
504
+ torch._C._register_py_class_for_device(device, cls)
505
+
506
+
507
+ is_multithreading_enabled = torch._C._is_multithreading_enabled
508
+ torch._C._add_docstr(
509
+ is_multithreading_enabled, "Returns True if multithreading is currently enabled."
510
+ )
511
+
512
+ is_view_replay_enabled = torch._C._is_view_replay_enabled
513
+ torch._C._add_docstr(
514
+ is_view_replay_enabled, "Returns True if view-replay is currently enabled."
515
+ )
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc ADDED
Binary file (5.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc ADDED
Binary file (36 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc ADDED
Binary file (61.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (35.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc ADDED
Binary file (7.52 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc ADDED
Binary file (845 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .tensor import * # noqa: F403
venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (216 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import warnings
3
+ from functools import reduce
4
+
5
+ import torch
6
+ import torch._utils
7
+ from ..function import Function
8
+
9
+
10
+ class Type(Function):
11
+ @staticmethod
12
+ def forward(ctx, i, dest_type):
13
+ warnings.warn(
14
+ "torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use "
15
+ "torch.tensor.to(dtype=dtype) instead."
16
+ )
17
+ ctx.input_type = type(i)
18
+ ctx.input_device = -1 if not i.is_cuda else i.get_device()
19
+ return i.type(dest_type)
20
+
21
+ @staticmethod
22
+ def backward(ctx, grad_output):
23
+ if ctx.input_device == -1:
24
+ return grad_output.type(ctx.input_type), None
25
+ else:
26
+ with torch.cuda.device(ctx.input_device):
27
+ return grad_output.type(ctx.input_type), None
28
+
29
+
30
+ # TODO: deprecate this
31
+ class Resize(Function):
32
+ @staticmethod
33
+ def forward(ctx, tensor, sizes):
34
+ ctx.sizes = sizes
35
+ ctx.numel = reduce(operator.mul, sizes, 1)
36
+ if tensor.numel() != ctx.numel:
37
+ raise RuntimeError(
38
+ (
39
+ "requested resize to {} ({} elements in total), "
40
+ "but the given tensor has a size of {} ({} elements). "
41
+ "autograd's resize can only change the shape of a given "
42
+ "tensor, while preserving the number of elements. "
43
+ ).format(
44
+ "x".join(map(str, sizes)),
45
+ ctx.numel,
46
+ "x".join(map(str, tensor.size())),
47
+ tensor.numel(),
48
+ )
49
+ )
50
+ ctx.input_sizes = tensor.size()
51
+ if tensor.is_quantized:
52
+ tensor.copy_(tensor)
53
+ return tensor.contiguous().view(*sizes)
54
+ if tensor.is_contiguous():
55
+ result = tensor.new(tensor).contiguous().view(*sizes)
56
+ return result
57
+ else:
58
+ return tensor.contiguous().view(*sizes)
59
+
60
+ @staticmethod
61
+ def backward(ctx, grad_output):
62
+ assert grad_output.numel() == ctx.numel
63
+ return grad_output.contiguous().view(ctx.input_sizes), None
venv/lib/python3.10/site-packages/torch/autograd/_functions/utils.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from functools import reduce
3
+
4
+
5
+ def maybe_view(tensor, size, check_same_size=True):
6
+ if check_same_size and tensor.size() == size:
7
+ return tensor
8
+ return tensor.contiguous().view(size)
9
+
10
+
11
+ def maybe_unexpand(tensor, old_size, check_same_size=True):
12
+ if check_same_size and tensor.size() == old_size:
13
+ return tensor
14
+ num_unsqueezed = tensor.dim() - len(old_size)
15
+ expanded_dims = [
16
+ dim
17
+ for dim, (expanded, original) in enumerate(
18
+ zip(tensor.size()[num_unsqueezed:], old_size)
19
+ )
20
+ if expanded != original
21
+ ]
22
+
23
+ for _ in range(num_unsqueezed):
24
+ tensor = tensor.sum(0, keepdim=False)
25
+ for dim in expanded_dims:
26
+ tensor = tensor.sum(dim, keepdim=True)
27
+ return tensor
28
+
29
+
30
+ # Check whether the op enable broadcasting, and whether it is supported by ONNX.
31
+ # If dims1 and dims2 are different, then broadcast is True.
32
+ # We always assume the combination of dims1 and dims2 is broadcastable.
33
+ # The following types of broadcasting are supported in ONNX:
34
+ # 1) Only one element in dims2, such as dims2 = [1, 1]
35
+ # 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4]
36
+ # Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
37
+ def check_onnx_broadcast(dims1, dims2):
38
+ broadcast = False
39
+ supported = True
40
+ len1 = len(dims1)
41
+ len2 = len(dims2)
42
+ numel1 = reduce(operator.mul, dims1)
43
+ numel2 = reduce(operator.mul, dims2)
44
+ if len1 < len2:
45
+ broadcast = True
46
+ if numel2 != 1:
47
+ supported = False
48
+ elif len1 > len2:
49
+ broadcast = True
50
+ if numel2 != 1 and dims1[len1 - len2 :] != dims2:
51
+ supported = False
52
+ else:
53
+ if dims1 != dims2:
54
+ broadcast = True
55
+ if numel2 != 1:
56
+ supported = False
57
+
58
+ if not supported:
59
+ raise ValueError(
60
+ f"Numpy style broadcasting is not supported in ONNX. Input dims are: {dims1}, {dims2}"
61
+ )
62
+ return broadcast
venv/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import torch
4
+
5
+ __all__ = ["detect_anomaly", "set_detect_anomaly"]
6
+
7
+
8
+ class detect_anomaly:
9
+ r"""Context-manager that enable anomaly detection for the autograd engine.
10
+
11
+ This does two things:
12
+
13
+ - Running the forward pass with detection enabled will allow the backward
14
+ pass to print the traceback of the forward operation that created the failing
15
+ backward function.
16
+ - If ``check_nan`` is ``True``, any backward computation that generate "nan"
17
+ value will raise an error. Default ``True``.
18
+
19
+ .. warning::
20
+ This mode should be enabled only for debugging as the different tests
21
+ will slow down your program execution.
22
+
23
+ Example:
24
+
25
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ANOMALY)
26
+ >>> import torch
27
+ >>> from torch import autograd
28
+ >>> class MyFunc(autograd.Function):
29
+ ... @staticmethod
30
+ ... def forward(ctx, inp):
31
+ ... return inp.clone()
32
+ ... @staticmethod
33
+ ... def backward(ctx, gO):
34
+ ... # Error during the backward pass
35
+ ... raise RuntimeError("Some error in backward")
36
+ ... return gO.clone()
37
+ >>> def run_fn(a):
38
+ ... out = MyFunc.apply(a)
39
+ ... return out.sum()
40
+ >>> inp = torch.rand(10, 10, requires_grad=True)
41
+ >>> out = run_fn(inp)
42
+ >>> out.backward()
43
+ Traceback (most recent call last):
44
+ File "<stdin>", line 1, in <module>
45
+ File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
46
+ torch.autograd.backward(self, gradient, retain_graph, create_graph)
47
+ File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
48
+ allow_unreachable=True) # allow_unreachable flag
49
+ File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
50
+ return self._forward_cls.backward(self, *args)
51
+ File "<stdin>", line 8, in backward
52
+ RuntimeError: Some error in backward
53
+ >>> with autograd.detect_anomaly():
54
+ ... inp = torch.rand(10, 10, requires_grad=True)
55
+ ... out = run_fn(inp)
56
+ ... out.backward()
57
+ Traceback of forward call that caused the error:
58
+ File "tmp.py", line 53, in <module>
59
+ out = run_fn(inp)
60
+ File "tmp.py", line 44, in run_fn
61
+ out = MyFunc.apply(a)
62
+ Traceback (most recent call last):
63
+ File "<stdin>", line 4, in <module>
64
+ File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
65
+ torch.autograd.backward(self, gradient, retain_graph, create_graph)
66
+ File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
67
+ allow_unreachable=True) # allow_unreachable flag
68
+ File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
69
+ return self._forward_cls.backward(self, *args)
70
+ File "<stdin>", line 8, in backward
71
+ RuntimeError: Some error in backward
72
+
73
+ """
74
+
75
+ def __init__(self, check_nan=True) -> None:
76
+ self.prev = torch.is_anomaly_enabled()
77
+ self.check_nan = check_nan
78
+ self.prev_check_nan = torch.is_anomaly_check_nan_enabled()
79
+ warnings.warn(
80
+ "Anomaly Detection has been enabled. "
81
+ "This mode will increase the runtime "
82
+ "and should only be enabled for debugging.",
83
+ stacklevel=2,
84
+ )
85
+
86
+ def __enter__(self) -> None:
87
+ torch.set_anomaly_enabled(True, self.check_nan)
88
+
89
+ def __exit__(self, *args: object) -> None:
90
+ torch.set_anomaly_enabled(self.prev, self.prev_check_nan)
91
+
92
+
93
+ class set_detect_anomaly:
94
+ r"""Context-manager that sets the anomaly detection for the autograd engine on or off.
95
+
96
+ ``set_detect_anomaly`` will enable or disable the autograd anomaly detection
97
+ based on its argument :attr:`mode`.
98
+ It can be used as a context-manager or as a function.
99
+
100
+ See ``detect_anomaly`` above for details of the anomaly detection behaviour.
101
+
102
+ Args:
103
+ mode (bool): Flag whether to enable anomaly detection (``True``),
104
+ or disable (``False``).
105
+ check_nan (bool): Flag whether to raise an error when the backward
106
+ generate "nan"
107
+
108
+ """
109
+
110
+ def __init__(self, mode: bool, check_nan: bool = True) -> None:
111
+ self.prev = torch.is_anomaly_enabled()
112
+ self.prev_check_nan = torch.is_anomaly_check_nan_enabled()
113
+ torch.set_anomaly_enabled(mode, check_nan)
114
+
115
+ def __enter__(self) -> None:
116
+ pass
117
+
118
+ def __exit__(self, *args: object) -> None:
119
+ torch.set_anomaly_enabled(self.prev, self.prev_check_nan)
venv/lib/python3.10/site-packages/torch/autograd/forward_ad.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import namedtuple
3
+
4
+ from typing import Any
5
+
6
+ import torch
7
+ from .grad_mode import _DecoratorContextManager
8
+
9
+ __all__ = [
10
+ "UnpackedDualTensor",
11
+ "enter_dual_level",
12
+ "exit_dual_level",
13
+ "make_dual",
14
+ "unpack_dual",
15
+ "dual_level",
16
+ ]
17
+
18
+ # Global variable used to make the python API simpler to use
19
+ _current_level = -1
20
+
21
+
22
+ def enter_dual_level():
23
+ r"""Enter a new forward grad level.
24
+
25
+ This level can be used to make and unpack dual Tensors to compute
26
+ forward gradients.
27
+
28
+ This function also updates the current level that is used by default
29
+ by the other functions in this API.
30
+ """
31
+ global _current_level
32
+ new_level = torch._C._enter_dual_level()
33
+ if new_level != _current_level + 1:
34
+ raise RuntimeError(
35
+ "Entering a new forward AD level but the current level "
36
+ "is not valid. Make sure you did not modified it directly."
37
+ )
38
+ _current_level = new_level
39
+ return new_level
40
+
41
+
42
+ def exit_dual_level(*, level=None):
43
+ r"""Exit a forward grad level.
44
+
45
+ This function deletes all the gradients associated with this
46
+ level. Only deleting the latest entered level is allowed.
47
+
48
+ This function also updates the current level that is used by default
49
+ by the other functions in this API.
50
+ """
51
+ global _current_level
52
+ if level is None:
53
+ level = _current_level
54
+ if level != _current_level:
55
+ raise RuntimeError(
56
+ "Trying to exit a forward AD level that was not the last one "
57
+ "that was created. This is not supported."
58
+ )
59
+ torch._C._exit_dual_level(level=level)
60
+ _current_level = level - 1
61
+
62
+
63
+ def make_dual(tensor, tangent, *, level=None):
64
+ r"""Associate a tensor value with its tangent to create a "dual tensor" for forward AD gradient computation.
65
+
66
+ The result is a new tensor aliased to :attr:`tensor` with :attr:`tangent` embedded
67
+ as an attribute as-is if it has the same storage layout or copied otherwise.
68
+ The tangent attribute can be recovered with :func:`unpack_dual`.
69
+
70
+ This function is backward differentiable.
71
+
72
+ Given a function `f` whose jacobian is `J`, it allows one to compute the Jacobian-vector product (`jvp`)
73
+ between `J` and a given vector `v` as follows.
74
+
75
+ Example::
76
+
77
+ >>> # xdoctest: +SKIP("Undefined variables")
78
+ >>> with dual_level():
79
+ ... inp = make_dual(x, v)
80
+ ... out = f(inp)
81
+ ... y, jvp = unpack_dual(out)
82
+
83
+ Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
84
+ for detailed steps on how to use this API.
85
+
86
+ """
87
+ # See NOTE: [forward-mode AD decompositions mechanism]
88
+ #
89
+ # Import from torch._decomp import decompositions_for_jvp to register
90
+ # decompositions for jvp to the jit registry
91
+ #
92
+ # FIXME: We specify that __debug__ must be True because
93
+ # if python is run with -OO or -O flags (i.e., __debug__ is False), we encounter the
94
+ # following error:
95
+ #
96
+ # Return value was annotated as having type Tuple[NoneType, NoneType] but is actually of
97
+ # type Tuple[Tensor, Tensor]:
98
+ # File ".../torch/_decomp/__init__.py", line 1585
99
+ # else:
100
+ # buffer = z
101
+ # return min - torch.log1p(z), buffer
102
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
103
+ if os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__:
104
+ from torch._decomp import decompositions_for_jvp # noqa: F401
105
+
106
+ if level is None:
107
+ level = _current_level
108
+
109
+ if level < 0:
110
+ raise RuntimeError(
111
+ "Trying to create a dual Tensor for forward AD but no level "
112
+ "exists, make sure to enter_dual_level() first."
113
+ )
114
+ if not (tensor.is_floating_point() or tensor.is_complex()):
115
+ raise ValueError(
116
+ f"Expected primal to be floating point or complex, but got: {tensor.dtype}"
117
+ )
118
+ if not (tangent.is_floating_point() or tangent.is_complex()):
119
+ raise ValueError(
120
+ f"Expected tangent to be floating point or complex, but got: {tangent.dtype}"
121
+ )
122
+
123
+ return torch._VF._make_dual(tensor, tangent, level=level)
124
+
125
+
126
+ _UnpackedDualTensor = namedtuple("_UnpackedDualTensor", ["primal", "tangent"])
127
+
128
+
129
+ class UnpackedDualTensor(_UnpackedDualTensor):
130
+ r"""Namedtuple returned by :func:`unpack_dual` containing the primal and tangent components of the dual tensor.
131
+
132
+ See :func:`unpack_dual` for more details.
133
+
134
+ """
135
+
136
+ pass
137
+
138
+
139
+ def unpack_dual(tensor, *, level=None):
140
+ r"""Unpack a "dual tensor" to get both its Tensor value and its forward AD gradient.
141
+
142
+ The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of
143
+ :attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is.
144
+ Neither of these tensors can be dual tensor of level :attr:`level`.
145
+
146
+ This function is backward differentiable.
147
+
148
+ Example::
149
+
150
+ >>> # xdoctest: +SKIP("Undefined variables")
151
+ >>> with dual_level():
152
+ ... inp = make_dual(x, x_t)
153
+ ... out = f(inp)
154
+ ... y, jvp = unpack_dual(out)
155
+ ... jvp = unpack_dual(out).tangent
156
+
157
+ Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
158
+ for detailed steps on how to use this API.
159
+ """
160
+ if level is None:
161
+ level = _current_level
162
+
163
+ if level < 0:
164
+ return UnpackedDualTensor(tensor, None)
165
+
166
+ primal, dual = torch._VF._unpack_dual(tensor, level=level)
167
+
168
+ return UnpackedDualTensor(primal, dual)
169
+
170
+
171
+ class dual_level(_DecoratorContextManager):
172
+ r"""Context-manager for forward AD, where all forward AD computation must occur within the ``dual_level`` context.
173
+
174
+ .. Note::
175
+
176
+ The ``dual_level`` context appropriately enters and exit the dual level to
177
+ controls the current forward AD level, which is used by default by the other
178
+ functions in this API.
179
+
180
+ We currently don't plan to support nested ``dual_level`` contexts, however, so
181
+ only a single forward AD level is supported. To compute higher-order
182
+ forward grads, one can use :func:`torch.func.jvp`.
183
+
184
+ Example::
185
+
186
+ >>> # xdoctest: +SKIP("Undefined variables")
187
+ >>> x = torch.tensor([1])
188
+ >>> x_t = torch.tensor([1])
189
+ >>> with dual_level():
190
+ ... inp = make_dual(x, x_t)
191
+ ... # Do computations with inp
192
+ ... out = your_fn(inp)
193
+ ... _, grad = unpack_dual(out)
194
+ >>> grad is None
195
+ False
196
+ >>> # After exiting the level, the grad is deleted
197
+ >>> _, grad_after = unpack_dual(out)
198
+ >>> grad is None
199
+ True
200
+
201
+ Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
202
+ for detailed steps on how to use this API.
203
+ """
204
+
205
+ def __enter__(self):
206
+ return enter_dual_level()
207
+
208
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
209
+ exit_dual_level()
210
+
211
+
212
+ # Private helper functions
213
+ _is_fwd_grad_enabled = torch._C._is_fwd_grad_enabled
214
+
215
+
216
+ # Private helper function to enable or disable fwd grad.
217
+ # If you're a user and want to use this, please file an issue to discuss the use case.
218
+ class _set_fwd_grad_enabled(_DecoratorContextManager):
219
+ def __init__(self, mode: bool) -> None:
220
+ self.prev = _is_fwd_grad_enabled()
221
+ torch._C._set_fwd_grad_enabled(mode)
222
+
223
+ def __enter__(self) -> None:
224
+ pass
225
+
226
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
227
+ torch._C._set_fwd_grad_enabled(self.prev)
venv/lib/python3.10/site-packages/torch/autograd/function.py ADDED
@@ -0,0 +1,883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import inspect
3
+ import itertools
4
+ import warnings
5
+ from collections import OrderedDict
6
+ from typing import Any, List, Optional, Tuple
7
+
8
+ import torch
9
+ import torch._C as _C
10
+ import torch._functorch as _functorch
11
+ import torch.utils.hooks as hooks
12
+ from torch._C import _functions
13
+ from torch._functorch.autograd_function import custom_function_call
14
+
15
+ __all__ = [
16
+ "FunctionCtx",
17
+ "BackwardCFunction",
18
+ "FunctionMeta",
19
+ "Function",
20
+ "once_differentiable",
21
+ "traceable",
22
+ "InplaceFunction",
23
+ "NestedIOFunction",
24
+ ]
25
+
26
+ # Unique id provider for each class inheriting from Function
27
+ # This is incremented in FunctionMeta during class definition
28
+ AUTOGRAD_FUNCTION_COUNTER = itertools.count()
29
+
30
+
31
+ # Formerly known as: _ContextMethodMixin
32
+ class FunctionCtx:
33
+ def save_for_backward(self, *tensors: torch.Tensor):
34
+ r"""Save given tensors for a future call to :func:`~Function.backward`.
35
+
36
+ ``save_for_backward`` should be called at most once, only from inside the
37
+ :func:`forward` method, and only with tensors.
38
+
39
+ All tensors intended to be used in the backward pass should be saved
40
+ with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
41
+ incorrect gradients and memory leaks, and enable the application of saved
42
+ tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`.
43
+
44
+ Note that if intermediary tensors, tensors that are neither inputs
45
+ nor outputs of :func:`forward`, are saved for backward, your custom Function
46
+ may not support double backward.
47
+ Custom Functions that do not support double backward should decorate their
48
+ :func:`backward` method with ``@once_differentiable`` so that performing
49
+ double backward raises an error. If you'd like to support double backward,
50
+ you can either recompute intermediaries based on the inputs during backward
51
+ or return the intermediaries as the outputs of the custom Function. See the
52
+ `double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutorial.html>`_
53
+ for more details.
54
+
55
+ In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
56
+ attribute. Before returning them to the user, a check is made to ensure
57
+ they weren't used in any in-place operation that modified their content.
58
+
59
+ Arguments can also be ``None``. This is a no-op.
60
+
61
+ See :ref:`extending-autograd` for more details on how to use this method.
62
+
63
+ Example::
64
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
65
+ >>> class Func(Function):
66
+ >>> @staticmethod
67
+ >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
68
+ >>> w = x * z
69
+ >>> out = x * y + y * z + w * y
70
+ >>> ctx.save_for_backward(x, y, w, out)
71
+ >>> ctx.z = z # z is not a tensor
72
+ >>> return out
73
+ >>>
74
+ >>> @staticmethod
75
+ >>> @once_differentiable
76
+ >>> def backward(ctx, grad_out):
77
+ >>> x, y, w, out = ctx.saved_tensors
78
+ >>> z = ctx.z
79
+ >>> gx = grad_out * (y + y * z)
80
+ >>> gy = grad_out * (x + z + w)
81
+ >>> gz = None
82
+ >>> return gx, gy, gz
83
+ >>>
84
+ >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
85
+ >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
86
+ >>> c = 4
87
+ >>> d = Func.apply(a, b, c)
88
+
89
+ """
90
+ self.to_save = tensors
91
+
92
+ def save_for_forward(self, *tensors: torch.Tensor):
93
+ r"""Save given tensors for a future call to :func:`~Function.jvp`.
94
+
95
+ ``save_for_forward`` should be only called once, from inside the :func:`forward`
96
+ method, and only be called with tensors.
97
+
98
+ In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
99
+ attribute.
100
+
101
+ Arguments can also be ``None``. This is a no-op.
102
+
103
+ See :ref:`extending-autograd` for more details on how to use this method.
104
+
105
+ Example::
106
+ >>> # xdoctest: +SKIP
107
+ >>> class Func(torch.autograd.Function):
108
+ >>> @staticmethod
109
+ >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
110
+ >>> ctx.save_for_backward(x, y)
111
+ >>> ctx.save_for_forward(x, y)
112
+ >>> ctx.z = z
113
+ >>> return x * y * z
114
+ >>>
115
+ >>> @staticmethod
116
+ >>> def jvp(ctx, x_t, y_t, _):
117
+ >>> x, y = ctx.saved_tensors
118
+ >>> z = ctx.z
119
+ >>> return z * (y * x_t + x * y_t)
120
+ >>>
121
+ >>> @staticmethod
122
+ >>> def vjp(ctx, grad_out):
123
+ >>> x, y = ctx.saved_tensors
124
+ >>> z = ctx.z
125
+ >>> return z * grad_out * y, z * grad_out * x, None
126
+ >>>
127
+ >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
128
+ >>> t = torch.tensor(1., dtype=torch.double)
129
+ >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
130
+ >>> c = 4
131
+ >>>
132
+ >>> with fwAD.dual_level():
133
+ >>> a_dual = fwAD.make_dual(a, t)
134
+ >>> d = Func.apply(a_dual, b, c)
135
+
136
+ """
137
+ for tensor in tensors:
138
+ assert isinstance(tensor, torch.Tensor) or tensor is None, (
139
+ "save_for_forward expects all arguments to be tensors; you should "
140
+ "save non-tensors as attributes on ctx."
141
+ )
142
+
143
+ self.saved_for_forward = tensors
144
+
145
+ def mark_dirty(self, *args: torch.Tensor):
146
+ r"""Mark given tensors as modified in an in-place operation.
147
+
148
+ **This should be called at most once, only from inside the**
149
+ :func:`forward` **method, and all arguments should be inputs.**
150
+
151
+ Every tensor that's been modified in-place in a call to :func:`forward`
152
+ should be given to this function, to ensure correctness of our checks.
153
+ It doesn't matter whether the function is called before or after
154
+ modification.
155
+
156
+ Examples::
157
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
158
+ >>> class Inplace(Function):
159
+ >>> @staticmethod
160
+ >>> def forward(ctx, x):
161
+ >>> x_npy = x.numpy() # x_npy shares storage with x
162
+ >>> x_npy += 1
163
+ >>> ctx.mark_dirty(x)
164
+ >>> return x
165
+ >>>
166
+ >>> @staticmethod
167
+ >>> @once_differentiable
168
+ >>> def backward(ctx, grad_output):
169
+ >>> return grad_output
170
+ >>>
171
+ >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone()
172
+ >>> b = a * a
173
+ >>> Inplace.apply(a) # This would lead to wrong gradients!
174
+ >>> # but the engine would not know unless we mark_dirty
175
+ >>> # xdoctest: +SKIP
176
+ >>> b.backward() # RuntimeError: one of the variables needed for gradient
177
+ >>> # computation has been modified by an inplace operation
178
+
179
+ """
180
+ self.dirty_tensors = args
181
+
182
+ def mark_shared_storage(self, *pairs):
183
+ warnings.warn(
184
+ "mark_shared_storage is deprecated. "
185
+ "Tensors with shared storages are automatically tracked. Note "
186
+ "that calls to `set_()` are not tracked"
187
+ )
188
+
189
+ def mark_non_differentiable(self, *args: torch.Tensor):
190
+ r"""Mark outputs as non-differentiable.
191
+
192
+ **This should be called at most once, only from inside the**
193
+ :func:`forward` **method, and all arguments should be tensor outputs.**
194
+
195
+ This will mark outputs as not requiring gradients, increasing the
196
+ efficiency of backward computation. You still need to accept a gradient
197
+ for each output in :meth:`~Function.backward`, but it's always going to
198
+ be a zero tensor with the same shape as the shape of a corresponding
199
+ output.
200
+
201
+ This is used e.g. for indices returned from a sort. See example::
202
+ >>> class Func(Function):
203
+ >>> @staticmethod
204
+ >>> def forward(ctx, x):
205
+ >>> sorted, idx = x.sort()
206
+ >>> ctx.mark_non_differentiable(idx)
207
+ >>> ctx.save_for_backward(x, idx)
208
+ >>> return sorted, idx
209
+ >>>
210
+ >>> @staticmethod
211
+ >>> @once_differentiable
212
+ >>> def backward(ctx, g1, g2): # still need to accept g2
213
+ >>> x, idx = ctx.saved_tensors
214
+ >>> grad_input = torch.zeros_like(x)
215
+ >>> grad_input.index_add_(0, idx, g1)
216
+ >>> return grad_input
217
+
218
+ """
219
+ self.non_differentiable = args
220
+
221
+ def set_materialize_grads(self, value: bool):
222
+ r"""Set whether to materialize grad tensors. Default is ``True``.
223
+
224
+ **This should be called only from inside the** :func:`forward` **method**
225
+
226
+ If ``True``, undefined grad tensors will be expanded to tensors full of zeros
227
+ prior to calling the :func:`backward` and :func:`jvp` methods.
228
+
229
+ Example::
230
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
231
+ >>> class SimpleFunc(Function):
232
+ >>> @staticmethod
233
+ >>> def forward(ctx, x):
234
+ >>> return x.clone(), x.clone()
235
+ >>>
236
+ >>> @staticmethod
237
+ >>> @once_differentiable
238
+ >>> def backward(ctx, g1, g2):
239
+ >>> return g1 + g2 # No check for None necessary
240
+ >>>
241
+ >>> # We modify SimpleFunc to handle non-materialized grad outputs
242
+ >>> class Func(Function):
243
+ >>> @staticmethod
244
+ >>> def forward(ctx, x):
245
+ >>> ctx.set_materialize_grads(False)
246
+ >>> ctx.save_for_backward(x)
247
+ >>> return x.clone(), x.clone()
248
+ >>>
249
+ >>> @staticmethod
250
+ >>> @once_differentiable
251
+ >>> def backward(ctx, g1, g2):
252
+ >>> x, = ctx.saved_tensors
253
+ >>> grad_input = torch.zeros_like(x)
254
+ >>> if g1 is not None: # We must check for None now
255
+ >>> grad_input += g1
256
+ >>> if g2 is not None:
257
+ >>> grad_input += g2
258
+ >>> return grad_input
259
+ >>>
260
+ >>> a = torch.tensor(1., requires_grad=True)
261
+ >>> b, _ = Func.apply(a) # induces g2 to be undefined
262
+
263
+ """
264
+ self.materialize_grads = value
265
+
266
+
267
+ # DO NOT USE: This is only defined to be able to load old serialized models
268
+ _ContextMethodMixin = FunctionCtx
269
+
270
+
271
+ class _HookMixin:
272
+ @staticmethod
273
+ def _register_hook(backward_hooks, hook):
274
+ if backward_hooks is None:
275
+ backward_hooks = OrderedDict()
276
+ handle = hooks.RemovableHandle(backward_hooks)
277
+ backward_hooks[handle.id] = hook
278
+ return backward_hooks, handle
279
+
280
+
281
+ class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):
282
+ r"""
283
+ This class is used for internal autograd work. Do not use.
284
+ """
285
+
286
+ def apply(self, *args):
287
+ r"""
288
+ Apply method used when executing this Node during the backward
289
+ """
290
+ # _forward_cls is defined by derived class
291
+ # The user should define either backward or vjp but never both.
292
+ backward_fn = self._forward_cls.backward # type: ignore[attr-defined]
293
+ vjp_fn = self._forward_cls.vjp # type: ignore[attr-defined]
294
+ if backward_fn is not Function.backward and vjp_fn is not Function.vjp:
295
+ raise RuntimeError(
296
+ "Implementing both 'backward' and 'vjp' for a custom "
297
+ "Function is not allowed. You should only implement one "
298
+ "of them."
299
+ )
300
+ user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn
301
+ return user_fn(self, *args)
302
+
303
+ def apply_jvp(self, *args):
304
+ r"""
305
+ Apply method used when executing forward mode AD during the forward
306
+ """
307
+ # _forward_cls is defined by derived class
308
+ return self._forward_cls.jvp(self, *args) # type: ignore[attr-defined]
309
+
310
+ def _compiled_autograd_key(self):
311
+ return self._forward_cls._compiled_autograd_key(self) # type: ignore[attr-defined]
312
+
313
+
314
+ def _warn_traceable_deprecated():
315
+ warnings.warn(
316
+ "The is_traceable field on torch.autograd.Function is deprecated "
317
+ "and will be removed in PyTorch 2.4.",
318
+ stacklevel=3,
319
+ )
320
+
321
+
322
+ class FunctionMeta(type):
323
+ """Function metaclass.
324
+
325
+ This metaclass sets up the following properties:
326
+ _backward_cls: The Function class corresponding to the differentiated
327
+ version of this function (which is generated on the fly by this
328
+ metaclass).
329
+ """
330
+
331
+ def __init__(cls, name, bases, attrs):
332
+ backward_fn = type(
333
+ name + "Backward", (BackwardCFunction,), {"_forward_cls": cls}
334
+ )
335
+ backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER) # type: ignore[attr-defined]
336
+ backward_fn._compiled_autograd_should_lift = attrs.get( # type: ignore[attr-defined]
337
+ "_compiled_autograd_should_lift", True
338
+ )
339
+ cls._backward_cls = backward_fn
340
+
341
+ if "is_traceable" in attrs and attrs["is_traceable"] is True:
342
+ _warn_traceable_deprecated()
343
+
344
+ super().__init__(name, bases, attrs)
345
+
346
+ def __getattribute__(cls, name):
347
+ if name == "is_traceable":
348
+ _warn_traceable_deprecated()
349
+ return super().__getattribute__(name)
350
+
351
+ def __setattr__(cls, name, value):
352
+ if name == "is_traceable" and value is True:
353
+ warnings.warn(
354
+ "The is_traceable field on torch.autograd.Function is deprecated "
355
+ "and will be removed in PyTorch 2.4.",
356
+ stacklevel=2,
357
+ )
358
+ return super().__setattr__(name, value)
359
+
360
+
361
+ class _SingleLevelFunction(
362
+ _C._FunctionBase, FunctionCtx, _HookMixin, metaclass=FunctionMeta
363
+ ):
364
+ @staticmethod
365
+ def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
366
+ r"""Define the forward of the custom autograd Function.
367
+
368
+ This function is to be overridden by all subclasses.
369
+ There are two ways to define forward:
370
+
371
+ Usage 1 (Combined forward and ctx)::
372
+
373
+ @staticmethod
374
+ def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
375
+ pass
376
+
377
+ - It must accept a context ctx as the first argument, followed by any
378
+ number of arguments (tensors or other types).
379
+ - See :ref:`combining-forward-context` for more details
380
+
381
+ Usage 2 (Separate forward and ctx)::
382
+
383
+ @staticmethod
384
+ def forward(*args: Any, **kwargs: Any) -> Any:
385
+ pass
386
+
387
+ @staticmethod
388
+ def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
389
+ pass
390
+
391
+ - The forward no longer accepts a ctx argument.
392
+ - Instead, you must also override the :meth:`torch.autograd.Function.setup_context`
393
+ staticmethod to handle setting up the ``ctx`` object.
394
+ ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs
395
+ to the forward.
396
+ - See :ref:`extending-autograd` for more details
397
+
398
+ The context can be used to store arbitrary data that can be then
399
+ retrieved during the backward pass. Tensors should not be stored
400
+ directly on `ctx` (though this is not currently enforced for
401
+ backward compatibility). Instead, tensors should be saved either with
402
+ :func:`ctx.save_for_backward` if they are intended to be used in
403
+ ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward`
404
+ if they are intended to be used for in ``jvp``.
405
+ """
406
+ raise NotImplementedError(
407
+ "You must implement the forward function for custom autograd.Function."
408
+ )
409
+
410
+ @staticmethod
411
+ def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> Any:
412
+ r"""There are two ways to define the forward pass of an autograd.Function.
413
+
414
+ Either:
415
+
416
+ 1. Override forward with the signature ``forward(ctx, *args, **kwargs)``.
417
+ ``setup_context`` is not overridden. Setting up the ctx for backward
418
+ happens inside the ``forward``.
419
+ 2. Override forward with the signature ``forward(*args, **kwargs)`` and
420
+ override ``setup_context``. Setting up the ctx for backward happens
421
+ inside ``setup_context`` (as opposed to inside the ``forward``)
422
+
423
+ See :meth:`torch.autograd.Function.forward` and :ref:`extending-autograd` for more details.
424
+ """
425
+ raise NotImplementedError("setup_context is not implemented.")
426
+
427
+ @staticmethod
428
+ def backward(ctx: Any, *grad_outputs: Any) -> Any:
429
+ r"""Define a formula for differentiating the operation with backward mode automatic differentiation.
430
+
431
+ This function is to be overridden by all subclasses.
432
+ (Defining this function is equivalent to defining the ``vjp`` function.)
433
+
434
+ It must accept a context :attr:`ctx` as the first argument, followed by
435
+ as many outputs as the :func:`forward` returned (None will be passed in
436
+ for non tensor outputs of the forward function),
437
+ and it should return as many tensors, as there were inputs to
438
+ :func:`forward`. Each argument is the gradient w.r.t the given output,
439
+ and each returned value should be the gradient w.r.t. the
440
+ corresponding input. If an input is not a Tensor or is a Tensor not
441
+ requiring grads, you can just pass None as a gradient for that input.
442
+
443
+ The context can be used to retrieve tensors saved during the forward
444
+ pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
445
+ of booleans representing whether each input needs gradient. E.g.,
446
+ :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
447
+ first input to :func:`forward` needs gradient computed w.r.t. the
448
+ output.
449
+ """
450
+ raise NotImplementedError(
451
+ "You must implement either the backward or vjp method for "
452
+ "your custom autograd.Function to use it with backward "
453
+ "mode AD."
454
+ )
455
+
456
+ # vjp and backward are alias of each other
457
+ vjp = backward
458
+
459
+ @staticmethod
460
+ def jvp(ctx: Any, *grad_inputs: Any) -> Any:
461
+ r"""Define a formula for differentiating the operation with forward mode automatic differentiation.
462
+
463
+ This function is to be overridden by all subclasses.
464
+ It must accept a context :attr:`ctx` as the first argument, followed by
465
+ as many inputs as the :func:`forward` got (None will be passed in
466
+ for non tensor inputs of the forward function),
467
+ and it should return as many tensors as there were outputs to
468
+ :func:`forward`. Each argument is the gradient w.r.t the given input,
469
+ and each returned value should be the gradient w.r.t. the
470
+ corresponding output. If an output is not a Tensor or the function is not
471
+ differentiable with respect to that output, you can just pass None as a
472
+ gradient for that input.
473
+
474
+ You can use the :attr:`ctx` object to pass any value from the forward to this
475
+ functions.
476
+ """
477
+ raise NotImplementedError(
478
+ "You must implement the jvp function for custom "
479
+ "autograd.Function to use it with forward mode AD."
480
+ )
481
+
482
+
483
+ class Function(_SingleLevelFunction):
484
+ r"""Base class to create custom `autograd.Function`.
485
+
486
+ To create a custom `autograd.Function`, subclass this class and implement
487
+ the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom
488
+ op in the forward pass, call the class method ``apply``. Do not call
489
+ :meth:`forward` directly.
490
+
491
+ To ensure correctness and best performance, make sure you are calling the
492
+ correct methods on ``ctx`` and validating your backward function using
493
+ :func:`torch.autograd.gradcheck`.
494
+
495
+ See :ref:`extending-autograd` for more details on how to use this class.
496
+
497
+ Examples::
498
+
499
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
500
+ >>> class Exp(Function):
501
+ >>> @staticmethod
502
+ >>> def forward(ctx, i):
503
+ >>> result = i.exp()
504
+ >>> ctx.save_for_backward(result)
505
+ >>> return result
506
+ >>>
507
+ >>> @staticmethod
508
+ >>> def backward(ctx, grad_output):
509
+ >>> result, = ctx.saved_tensors
510
+ >>> return grad_output * result
511
+ >>>
512
+ >>> # Use it by calling the apply method:
513
+ >>> # xdoctest: +SKIP
514
+ >>> output = Exp.apply(input)
515
+ """
516
+
517
+ def __init__(self, *args, **kwargs):
518
+ cls = self.__class__
519
+ warnings.warn(
520
+ f"{cls} should not be instantiated. Methods on autograd functions"
521
+ "are all static, so you should invoke them on the class itself. "
522
+ "Instantiating an autograd function will raise an "
523
+ "error in a future version of PyTorch.",
524
+ DeprecationWarning,
525
+ stacklevel=2,
526
+ )
527
+
528
+ def __call__(self, *args, **kwargs):
529
+ raise RuntimeError(
530
+ "Legacy autograd function with non-static forward method is deprecated. "
531
+ "Please use new-style autograd function with static forward method. "
532
+ "(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)"
533
+ )
534
+
535
+ # for the tracer
536
+ is_traceable = False
537
+
538
+ """
539
+ Bool that specifies if PyTorch should attempt to autogenerate
540
+ :func:`torch.vmap` support for this autograd.Function. You may set this to
541
+ True only if this autograd.Function's forward, backward, and jvp (if they
542
+ exist) are written using PyTorch operations; otherwise, please override
543
+ :meth:`torch.autograd.Function.vmap` to add support for :func:`torch.vmap`.
544
+
545
+ Please see :ref:`func-autograd-function` for more details.
546
+ """
547
+ generate_vmap_rule = False
548
+
549
+ @staticmethod
550
+ def vmap(info, in_dims, *args):
551
+ r"""Define the behavior for this autograd.Function underneath :func:`torch.vmap`.
552
+
553
+ For a :func:`torch.autograd.Function` to support
554
+ :func:`torch.vmap`, you must either override this static method, or set
555
+ ``generate_vmap_rule`` to ``True`` (you may not do both).
556
+
557
+ If you choose to override this staticmethod: it must accept
558
+
559
+ - an ``info`` object as the first argument. ``info.batch_size``
560
+ specifies the size of the dimension being vmapped over,
561
+ while ``info.randomness`` is the randomness option passed to
562
+ :func:`torch.vmap`.
563
+ - an ``in_dims`` tuple as the second argument.
564
+ For each arg in ``args``, ``in_dims`` has a corresponding
565
+ ``Optional[int]``. It is ``None`` if the arg is not a Tensor or if
566
+ the arg is not being vmapped over, otherwise, it is an integer
567
+ specifying what dimension of the Tensor is being vmapped over.
568
+ - ``*args``, which is the same as the args to :meth:`~Function.forward`.
569
+
570
+ The return of the vmap staticmethod is a tuple of ``(output, out_dims)``.
571
+ Similar to ``in_dims``, ``out_dims`` should be of the same structure as
572
+ ``output`` and contain one ``out_dim`` per output that specifies if the
573
+ output has the vmapped dimension and what index it is in.
574
+
575
+ Please see :ref:`func-autograd-function` for more details.
576
+ """
577
+ raise NotImplementedError(
578
+ "To use autograd.Function with vmap, you must either override the "
579
+ "vmap staticmethod or set generate_vmap_rule=True."
580
+ )
581
+
582
+ @classmethod
583
+ def apply(cls, *args, **kwargs):
584
+ def bind_default_args(func, *args, **kwargs):
585
+ signature = inspect.signature(func)
586
+ bound_args = signature.bind(*args, **kwargs)
587
+ bound_args.apply_defaults()
588
+
589
+ return bound_args.args
590
+
591
+ is_setup_ctx_defined = cls.setup_context != _SingleLevelFunction.setup_context
592
+ if is_setup_ctx_defined:
593
+ args = bind_default_args(cls.forward, *args, **kwargs)
594
+
595
+ if not torch._C._are_functorch_transforms_active():
596
+ # See NOTE: [functorch vjp and autograd interaction]
597
+ args = _functorch.utils.unwrap_dead_wrappers(args)
598
+ return super().apply(*args, **kwargs) # type: ignore[misc]
599
+
600
+ if not is_setup_ctx_defined:
601
+ raise RuntimeError(
602
+ "In order to use an autograd.Function with functorch transforms "
603
+ "(vmap, grad, jvp, jacrev, ...), it must override the setup_context "
604
+ "staticmethod. For more details, please see "
605
+ "https://pytorch.org/docs/master/notes/extending.func.html"
606
+ )
607
+
608
+ return custom_function_call(cls, *args, **kwargs)
609
+
610
+ @staticmethod
611
+ def _compiled_autograd_key(ctx):
612
+ return (ctx._autograd_function_id,)
613
+
614
+
615
+ def once_differentiable(fn):
616
+ @functools.wraps(fn)
617
+ def wrapper(ctx, *args):
618
+ with torch.no_grad():
619
+ outputs = fn(ctx, *args)
620
+
621
+ if not torch.is_grad_enabled():
622
+ return outputs
623
+
624
+ # If any of the inputs have requires_grad=True, we force the outputs
625
+ # to have requires_grad=True but point to a grad_fn which throws an
626
+ # error message during (double) back-propagation.
627
+ # XXX: this is only an approximation of requires_grad - there's no way
628
+ # to figure out if fn didn't use ctx.saved_tensors and as a result
629
+ # some Tensors might require grad, even if no args do.
630
+ # Unfortunately, this leads to unexpected error messages ("no nodes
631
+ # require computing gradients"), but I don't have a better idea.
632
+ # These functions would raise an error in backward anyway.
633
+ requires_grad = any(
634
+ isinstance(arg, torch.Tensor) and arg.requires_grad for arg in args
635
+ )
636
+ if not requires_grad:
637
+ return outputs
638
+
639
+ if not isinstance(outputs, tuple):
640
+ outputs = (outputs,)
641
+
642
+ err_fn = _functions.DelayedError(
643
+ b"trying to differentiate twice a function that was marked "
644
+ b"with @once_differentiable",
645
+ len(outputs),
646
+ )
647
+
648
+ # Create aliases of each output that has requires_grad=True. We need
649
+ # at least one of the inputs to err_fn to require grad so that the
650
+ # output will have a grad_fn.
651
+ def fake_requires_grad(var):
652
+ if var is not None:
653
+ var = var.detach()
654
+ var.requires_grad = True
655
+ return var
656
+
657
+ return err_fn(*[fake_requires_grad(v) for v in outputs])
658
+
659
+ return wrapper
660
+
661
+
662
+ def traceable(fn_cls):
663
+ r"""Mark Function as traceable for the JIT.
664
+
665
+ Traceable functions have additional restrictions - they can't pass any
666
+ data-dependent values to backward (e.g. Prod passes the output, which makes
667
+ it non-traceable), and their backward should be implemented entirely in terms
668
+ of operations on autograd Tensors in all cases.
669
+
670
+ DON'T USE THIS DECORATOR. IT IS FOR INTERNAL USE ONLY AND SHOULD BE HANDLED WITH
671
+ CARE (or can give incorrect results otherwise).
672
+ """
673
+ warnings.warn(
674
+ "torch.autograd.function.traceable is deprecated "
675
+ "and will be removed in PyTorch 2.4.",
676
+ stacklevel=2,
677
+ )
678
+ fn_cls.is_traceable = True
679
+ return fn_cls
680
+
681
+
682
+ class InplaceFunction(Function):
683
+ r"""
684
+ This class is here only for backward compatibility reasons.
685
+ Use :class:`Function` instead of this for any new use case.
686
+ """
687
+
688
+ def __init__(self, inplace=False):
689
+ super().__init__()
690
+ self.inplace = inplace
691
+
692
+
693
+ def _nested_map(condition, fn, condition_msg=None):
694
+ def _map(obj):
695
+ if condition(obj):
696
+ return fn(obj)
697
+ elif obj is None:
698
+ return None
699
+ elif isinstance(obj, (list, tuple)):
700
+ mapped = (_map(x) for x in obj)
701
+ if hasattr(obj, "_fields"):
702
+ # obj is namedtuple
703
+ return type(obj)(*mapped)
704
+ return type(obj)(mapped)
705
+ elif isinstance(obj, dict):
706
+ return {x: _map(obj[x]) for x in obj}
707
+ else:
708
+ raise ValueError(
709
+ "Auto nesting doesn't know how to process "
710
+ "an input object of type "
711
+ + torch.typename(obj)
712
+ + (
713
+ ". Accepted types: " + condition_msg + ", or lists/tuples of them"
714
+ if condition_msg
715
+ else ""
716
+ )
717
+ )
718
+
719
+ return _map
720
+
721
+
722
+ def _jit_unwrap_structured(obj):
723
+ if hasattr(obj, "_jit_unwrap"):
724
+ return obj._jit_unwrap()
725
+ return obj
726
+
727
+
728
+ def _iter_filter(condition, allow_unknown=False, condition_msg=None, conversion=None):
729
+ def _iter(obj):
730
+ if conversion is not None:
731
+ obj = conversion(obj)
732
+ if condition(obj):
733
+ yield obj
734
+ elif obj is None:
735
+ return
736
+ elif isinstance(obj, (list, tuple)):
737
+ for o in obj:
738
+ yield from _iter(o)
739
+ elif isinstance(obj, dict):
740
+ # We only accept primitive key types, so we needn't inspect them
741
+ for o in obj.values():
742
+ yield from _iter(o)
743
+ elif allow_unknown:
744
+ yield obj
745
+ else:
746
+ raise ValueError(
747
+ "Auto nesting doesn't know how to process "
748
+ "an input object of type "
749
+ + torch.typename(obj)
750
+ + (
751
+ ". Accepted types: " + condition_msg + ", or lists/tuples of them"
752
+ if condition_msg
753
+ else ""
754
+ )
755
+ )
756
+
757
+ return _iter
758
+
759
+
760
+ def _unflatten(input, proto):
761
+ # unflatten a list or tuple input into a nested list/tuple structure
762
+ # specified by proto
763
+ def unflatten_helper(input, proto):
764
+ res: List[Optional[torch.Tensor]] = []
765
+ if hasattr(proto, "_jit_wrap"):
766
+ return proto._jit_wrap(input)
767
+ if not isinstance(proto, (list, tuple)):
768
+ return input[0], input[1:]
769
+ for e in proto:
770
+ if e is None:
771
+ res.append(e)
772
+ else:
773
+ res_e, input = unflatten_helper(input, e)
774
+ res.append(res_e)
775
+ return type(proto)(res), input
776
+
777
+ return unflatten_helper(input, proto)[0]
778
+
779
+
780
+ _iter_jit_values = _iter_filter(
781
+ lambda o: o is None or isinstance(o, torch._C.Value),
782
+ condition_msg="jit's Values or None",
783
+ )
784
+ _iter_tensors = _iter_filter(
785
+ lambda x: isinstance(x, torch.Tensor),
786
+ condition_msg="Tensors",
787
+ conversion=_jit_unwrap_structured,
788
+ )
789
+ _iter_tensors_permissive = _iter_filter(
790
+ lambda x: isinstance(x, torch.Tensor),
791
+ allow_unknown=True,
792
+ condition_msg="Tensors (permissive)",
793
+ )
794
+ _iter_None_tensors = _iter_filter(
795
+ lambda o: o is None or isinstance(o, torch.Tensor), condition_msg="Tensors or None"
796
+ )
797
+ _map_tensor_data = _nested_map(
798
+ lambda x: isinstance(x, torch.Tensor), lambda o: o.data, condition_msg="Tensors"
799
+ )
800
+
801
+
802
+ class NestedIOFunction(Function):
803
+ r"""
804
+ This class is here only for backward compatibility reasons.
805
+ Use :class:`Function` instead of this for any new use case.
806
+ """
807
+ # The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the
808
+ # superclass (Function) but are instance methods here, which mypy reports as incompatible.
809
+
810
+ def _do_forward(self, *input):
811
+ self._nested_input = input
812
+ flat_input = tuple(_iter_tensors(input))
813
+ flat_output = super()._do_forward(*flat_input) # type: ignore[misc]
814
+ nested_output = self._nested_output
815
+ nested_tensors = _unflatten(flat_output, self._nested_output)
816
+ return nested_tensors
817
+
818
+ def _do_backward(self, gradients, retain_variables):
819
+ self.retain_variables = retain_variables
820
+ result = super()._do_backward(gradients, retain_variables) # type: ignore[misc]
821
+ if not retain_variables:
822
+ del self._nested_output
823
+ del self._to_save_nested
824
+ return result
825
+
826
+ def backward(self, *gradients: Any) -> Any: # type: ignore[override]
827
+ r"""
828
+ Shared backward utility.
829
+ """
830
+ nested_gradients = _unflatten(gradients, self._nested_output)
831
+ result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value]
832
+ return tuple(_iter_None_tensors(result))
833
+
834
+ __call__ = _do_forward
835
+
836
+ def forward(self, *args: Any) -> Any: # type: ignore[override]
837
+ r"""
838
+ Shared forward utility.
839
+ """
840
+ nested_tensors = _map_tensor_data(self._nested_input)
841
+ result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value]
842
+ del self._nested_input
843
+ self._nested_output = result
844
+ return tuple(_iter_tensors(result))
845
+
846
+ def save_for_backward(self, *args: Any) -> None:
847
+ r"""
848
+ See :meth:`Function.save_for_backward`.
849
+ """
850
+ self.to_save = tuple(_iter_tensors(args))
851
+ self._to_save_nested = args
852
+
853
+ @property
854
+ def saved_tensors(self):
855
+ r"""
856
+ See :meth:`Function.saved_tensors`.
857
+ """
858
+ flat_tensors = super().saved_tensors # type: ignore[misc]
859
+ return _unflatten(flat_tensors, self._to_save_nested)
860
+
861
+ def mark_dirty(self, *args: Any, **kwargs: Any) -> None:
862
+ r"""
863
+ See :meth:`Function.mark_dirty`.
864
+ """
865
+ self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))
866
+
867
+ def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None:
868
+ r"""
869
+ See :meth:`Function.mark_non_differentiable`.
870
+ """
871
+ self.non_differentiable = tuple(_iter_tensors((args, kwargs)))
872
+
873
+ def forward_extended(self, *input: Any) -> None:
874
+ r"""
875
+ User defined forward.
876
+ """
877
+ raise NotImplementedError
878
+
879
+ def backward_extended(self, *grad_output: Any) -> None:
880
+ r"""
881
+ User defined backward.
882
+ """
883
+ raise NotImplementedError
venv/lib/python3.10/site-packages/torch/autograd/functional.py ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+
3
+ import torch
4
+ from torch._vmap_internals import _vmap
5
+ from . import forward_ad as fwAD
6
+
7
+ __all__ = ["vjp", "jvp", "jacobian", "hessian", "hvp", "vhp"]
8
+
9
+ # Utility functions
10
+
11
+
12
+ def _as_tuple_nocheck(x):
13
+ if isinstance(x, tuple):
14
+ return x
15
+ elif isinstance(x, list):
16
+ return tuple(x)
17
+ else:
18
+ return (x,)
19
+
20
+
21
+ def _as_tuple(inp, arg_name=None, fn_name=None):
22
+ # Ensures that inp is a tuple of Tensors
23
+ # Returns whether or not the original inp was a tuple and the tupled version of the input
24
+ if arg_name is None and fn_name is None:
25
+ return _as_tuple_nocheck(inp)
26
+
27
+ is_inp_tuple = True
28
+ if not isinstance(inp, tuple):
29
+ inp = (inp,)
30
+ is_inp_tuple = False
31
+
32
+ for i, el in enumerate(inp):
33
+ if not isinstance(el, torch.Tensor):
34
+ if is_inp_tuple:
35
+ raise TypeError(
36
+ f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the"
37
+ f" value at index {i} has type {type(el)}."
38
+ )
39
+ else:
40
+ raise TypeError(
41
+ f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the"
42
+ f" given {arg_name} has type {type(el)}."
43
+ )
44
+
45
+ return is_inp_tuple, inp
46
+
47
+
48
+ def _tuple_postprocess(res, to_unpack):
49
+ # Unpacks a potentially nested tuple of Tensors
50
+ # to_unpack should be a single boolean or a tuple of two booleans.
51
+ # It is used to:
52
+ # - invert _as_tuple when res should match the inp given to _as_tuple
53
+ # - optionally remove nesting of two tuples created by multiple calls to _as_tuple
54
+ if isinstance(to_unpack, tuple):
55
+ assert len(to_unpack) == 2
56
+ if not to_unpack[1]:
57
+ res = tuple(el[0] for el in res)
58
+ if not to_unpack[0]:
59
+ res = res[0]
60
+ else:
61
+ if not to_unpack:
62
+ res = res[0]
63
+ return res
64
+
65
+
66
+ def _grad_preprocess(inputs, create_graph, need_graph):
67
+ # Preprocess the inputs to make sure they require gradient
68
+ # inputs is a tuple of Tensors to preprocess
69
+ # create_graph specifies if the user wants gradients to flow back to the Tensors in inputs
70
+ # need_graph specifies if we internally want gradients to flow back to the Tensors in res
71
+ # Note that we *always* create a new Tensor object to be able to see the difference between
72
+ # inputs given as arguments and the same Tensors automatically captured by the user function.
73
+ # Check this issue for more details on how that can happen: https://github.com/pytorch/pytorch/issues/32576
74
+ res = []
75
+ for inp in inputs:
76
+ if create_graph and inp.requires_grad:
77
+ # Create at least a new Tensor object in a differentiable way
78
+ if not inp.is_sparse:
79
+ # Use .view_as() to get a shallow copy
80
+ res.append(inp.view_as(inp))
81
+ else:
82
+ # We cannot use view for sparse Tensors so we clone
83
+ res.append(inp.clone())
84
+ else:
85
+ res.append(inp.detach().requires_grad_(need_graph))
86
+ return tuple(res)
87
+
88
+
89
+ def _grad_postprocess(inputs, create_graph):
90
+ # Postprocess the generated Tensors to avoid returning Tensors with history when the user did not
91
+ # request it.
92
+ if isinstance(inputs[0], torch.Tensor):
93
+ if not create_graph:
94
+ return tuple(inp.detach() for inp in inputs)
95
+ else:
96
+ return inputs
97
+ else:
98
+ return tuple(_grad_postprocess(inp, create_graph) for inp in inputs)
99
+
100
+
101
+ def _validate_v(v, other, is_other_tuple):
102
+ # This assumes that other is the correct shape, and v should match
103
+ # Both are assumed to be tuples of Tensors
104
+ if len(other) != len(v):
105
+ if is_other_tuple:
106
+ raise RuntimeError(
107
+ f"v is a tuple of invalid length: should be {len(other)} but got {len(v)}."
108
+ )
109
+ else:
110
+ raise RuntimeError("The given v should contain a single Tensor.")
111
+
112
+ for idx, (el_v, el_other) in enumerate(zip(v, other)):
113
+ if el_v.size() != el_other.size():
114
+ prepend = ""
115
+ if is_other_tuple:
116
+ prepend = f"Entry {idx} in "
117
+ raise RuntimeError(
118
+ f"{prepend}v has invalid size: should be {el_other.size()} but got {el_v.size()}."
119
+ )
120
+
121
+
122
+ def _check_requires_grad(inputs, input_type, strict):
123
+ # Used to make all the necessary checks to raise nice errors in strict mode.
124
+ if not strict:
125
+ return
126
+
127
+ if input_type not in ["outputs", "grad_inputs", "jacobian", "hessian"]:
128
+ raise RuntimeError("Invalid input_type to _check_requires_grad")
129
+ for i, inp in enumerate(inputs):
130
+ if inp is None:
131
+ # This can only be reached for grad_inputs.
132
+ raise RuntimeError(
133
+ f"The output of the user-provided function is independent of input {i}."
134
+ " This is not allowed in strict mode."
135
+ )
136
+ if not inp.requires_grad:
137
+ if input_type == "hessian":
138
+ raise RuntimeError(
139
+ f"The hessian of the user-provided function with respect to input {i}"
140
+ " is independent of the input. This is not allowed in strict mode."
141
+ " You should ensure that your function is thrice differentiable and that"
142
+ " the hessian depends on the inputs."
143
+ )
144
+ elif input_type == "jacobian":
145
+ raise RuntimeError(
146
+ "While computing the hessian, found that the jacobian of the user-provided"
147
+ f" function with respect to input {i} is independent of the input. This is not"
148
+ " allowed in strict mode. You should ensure that your function is twice"
149
+ " differentiable and that the jacobian depends on the inputs (this would be"
150
+ " violated by a linear function for example)."
151
+ )
152
+ elif input_type == "grad_inputs":
153
+ raise RuntimeError(
154
+ f"The gradient with respect to input {i} is independent of the inputs of the"
155
+ " user-provided function. This is not allowed in strict mode."
156
+ )
157
+ else:
158
+ raise RuntimeError(
159
+ f"Output {i} of the user-provided function does not require gradients."
160
+ " The outputs must be computed in a differentiable manner from the input"
161
+ " when running in strict mode."
162
+ )
163
+
164
+
165
+ def _autograd_grad(
166
+ outputs,
167
+ inputs,
168
+ grad_outputs=None,
169
+ create_graph=False,
170
+ retain_graph=None,
171
+ is_grads_batched=False,
172
+ ):
173
+ # Version of autograd.grad that accepts `None` in outputs and do not compute gradients for them.
174
+ # This has the extra constraint that inputs has to be a tuple
175
+ assert isinstance(outputs, tuple)
176
+ if grad_outputs is None:
177
+ grad_outputs = (None,) * len(outputs)
178
+ assert isinstance(grad_outputs, tuple)
179
+ assert len(outputs) == len(grad_outputs)
180
+
181
+ new_outputs: Tuple[torch.Tensor, ...] = tuple()
182
+ new_grad_outputs: Tuple[torch.Tensor, ...] = tuple()
183
+ for out, grad_out in zip(outputs, grad_outputs):
184
+ if out is not None and out.requires_grad:
185
+ new_outputs += (out,)
186
+ new_grad_outputs += (grad_out,)
187
+
188
+ if len(new_outputs) == 0:
189
+ # No differentiable output, we don't need to call the autograd engine
190
+ return (None,) * len(inputs)
191
+ else:
192
+ return torch.autograd.grad(
193
+ new_outputs,
194
+ inputs,
195
+ new_grad_outputs,
196
+ allow_unused=True,
197
+ create_graph=create_graph,
198
+ retain_graph=retain_graph,
199
+ is_grads_batched=is_grads_batched,
200
+ )
201
+
202
+
203
+ def _fill_in_zeros(grads, refs, strict, create_graph, stage):
204
+ # Used to detect None in the grads and depending on the flags, either replace them
205
+ # with Tensors full of 0s of the appropriate size based on the refs or raise an error.
206
+ # strict and create graph allow us to detect when it is appropriate to raise an error
207
+ # stage gives us information of which backward call we consider to give good error message
208
+ if stage not in ["back", "back_trick", "double_back", "double_back_trick"]:
209
+ raise RuntimeError(f"Invalid stage argument '{stage}' to _fill_in_zeros")
210
+
211
+ res: Tuple[torch.Tensor, ...] = tuple()
212
+ for i, grads_i in enumerate(grads):
213
+ if grads_i is None:
214
+ if strict:
215
+ if stage == "back":
216
+ raise RuntimeError(
217
+ "The output of the user-provided function is independent of "
218
+ f"input {i}. This is not allowed in strict mode."
219
+ )
220
+ elif stage == "back_trick":
221
+ raise RuntimeError(
222
+ f"The gradient with respect to the input is independent of entry {i}"
223
+ " in the grad_outputs when using the double backward trick to compute"
224
+ " forward mode gradients. This is not allowed in strict mode."
225
+ )
226
+ elif stage == "double_back":
227
+ raise RuntimeError(
228
+ "The jacobian of the user-provided function is independent of "
229
+ f"input {i}. This is not allowed in strict mode."
230
+ )
231
+ else:
232
+ raise RuntimeError(
233
+ "The hessian of the user-provided function is independent of "
234
+ f"entry {i} in the grad_jacobian. This is not allowed in strict "
235
+ "mode as it prevents from using the double backward trick to "
236
+ "replace forward mode AD."
237
+ )
238
+
239
+ grads_i = torch.zeros_like(refs[i])
240
+ else:
241
+ if strict and create_graph and not grads_i.requires_grad:
242
+ if "double" not in stage:
243
+ raise RuntimeError(
244
+ "The jacobian of the user-provided function is independent of "
245
+ f"input {i}. This is not allowed in strict mode when create_graph=True."
246
+ )
247
+ else:
248
+ raise RuntimeError(
249
+ "The hessian of the user-provided function is independent of "
250
+ f"input {i}. This is not allowed in strict mode when create_graph=True."
251
+ )
252
+
253
+ res += (grads_i,)
254
+
255
+ return res
256
+
257
+
258
+ # Public API
259
+
260
+
261
+ def vjp(func, inputs, v=None, create_graph=False, strict=False):
262
+ r"""Compute the dot product between a vector ``v`` and the Jacobian of the given function at the point given by the inputs.
263
+
264
+ Args:
265
+ func (function): a Python function that takes Tensor inputs and returns
266
+ a tuple of Tensors or a Tensor.
267
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
268
+ v (tuple of Tensors or Tensor): The vector for which the vector
269
+ Jacobian product is computed. Must be the same size as the output
270
+ of ``func``. This argument is optional when the output of ``func``
271
+ contains a single element and (if it is not provided) will be set
272
+ as a Tensor containing a single ``1``.
273
+ create_graph (bool, optional): If ``True``, both the output and result
274
+ will be computed in a differentiable way. Note that when ``strict``
275
+ is ``False``, the result can not require gradients or be
276
+ disconnected from the inputs. Defaults to ``False``.
277
+ strict (bool, optional): If ``True``, an error will be raised when we
278
+ detect that there exists an input such that all the outputs are
279
+ independent of it. If ``False``, we return a Tensor of zeros as the
280
+ vjp for said inputs, which is the expected mathematical value.
281
+ Defaults to ``False``.
282
+
283
+ Returns:
284
+ output (tuple): tuple with:
285
+ func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
286
+
287
+ vjp (tuple of Tensors or Tensor): result of the dot product with
288
+ the same shape as the inputs.
289
+
290
+ Example:
291
+
292
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
293
+ >>> def exp_reducer(x):
294
+ ... return x.exp().sum(dim=1)
295
+ >>> inputs = torch.rand(4, 4)
296
+ >>> v = torch.ones(4)
297
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
298
+ >>> vjp(exp_reducer, inputs, v)
299
+ (tensor([5.7817, 7.2458, 5.7830, 6.7782]),
300
+ tensor([[1.4458, 1.3962, 1.3042, 1.6354],
301
+ [2.1288, 1.0652, 1.5483, 2.5035],
302
+ [2.2046, 1.1292, 1.1432, 1.3059],
303
+ [1.3225, 1.6652, 1.7753, 2.0152]]))
304
+
305
+ >>> vjp(exp_reducer, inputs, v, create_graph=True)
306
+ (tensor([5.7817, 7.2458, 5.7830, 6.7782], grad_fn=<SumBackward1>),
307
+ tensor([[1.4458, 1.3962, 1.3042, 1.6354],
308
+ [2.1288, 1.0652, 1.5483, 2.5035],
309
+ [2.2046, 1.1292, 1.1432, 1.3059],
310
+ [1.3225, 1.6652, 1.7753, 2.0152]], grad_fn=<MulBackward0>))
311
+
312
+ >>> def adder(x, y):
313
+ ... return 2 * x + 3 * y
314
+ >>> inputs = (torch.rand(2), torch.rand(2))
315
+ >>> v = torch.ones(2)
316
+ >>> vjp(adder, inputs, v)
317
+ (tensor([2.4225, 2.3340]),
318
+ (tensor([2., 2.]), tensor([3., 3.])))
319
+ """
320
+ with torch.enable_grad():
321
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vjp")
322
+ inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
323
+
324
+ outputs = func(*inputs)
325
+ is_outputs_tuple, outputs = _as_tuple(
326
+ outputs, "outputs of the user-provided function", "vjp"
327
+ )
328
+ _check_requires_grad(outputs, "outputs", strict=strict)
329
+
330
+ if v is not None:
331
+ _, v = _as_tuple(v, "v", "vjp")
332
+ v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
333
+ _validate_v(v, outputs, is_outputs_tuple)
334
+ else:
335
+ if len(outputs) != 1 or outputs[0].nelement() != 1:
336
+ raise RuntimeError(
337
+ "The vector v can only be None if the "
338
+ "user-provided function returns "
339
+ "a single Tensor with a single element."
340
+ )
341
+
342
+ enable_grad = True if create_graph else torch.is_grad_enabled()
343
+ with torch.set_grad_enabled(enable_grad):
344
+ grad_res = _autograd_grad(outputs, inputs, v, create_graph=create_graph)
345
+ vjp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "back")
346
+
347
+ # Cleanup objects and return them to the user
348
+ outputs = _grad_postprocess(outputs, create_graph)
349
+ vjp = _grad_postprocess(vjp, create_graph)
350
+
351
+ return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(
352
+ vjp, is_inputs_tuple
353
+ )
354
+
355
+
356
+ def jvp(func, inputs, v=None, create_graph=False, strict=False):
357
+ r"""Compute the dot product between the Jacobian of the given function at the point given by the inputs and a vector ``v``.
358
+
359
+ Args:
360
+ func (function): a Python function that takes Tensor inputs and returns
361
+ a tuple of Tensors or a Tensor.
362
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
363
+ v (tuple of Tensors or Tensor): The vector for which the Jacobian
364
+ vector product is computed. Must be the same size as the input of
365
+ ``func``. This argument is optional when the input to ``func``
366
+ contains a single element and (if it is not provided) will be set
367
+ as a Tensor containing a single ``1``.
368
+ create_graph (bool, optional): If ``True``, both the output and result
369
+ will be computed in a differentiable way. Note that when ``strict``
370
+ is ``False``, the result can not require gradients or be
371
+ disconnected from the inputs. Defaults to ``False``.
372
+ strict (bool, optional): If ``True``, an error will be raised when we
373
+ detect that there exists an input such that all the outputs are
374
+ independent of it. If ``False``, we return a Tensor of zeros as the
375
+ jvp for said inputs, which is the expected mathematical value.
376
+ Defaults to ``False``.
377
+
378
+ Returns:
379
+ output (tuple): tuple with:
380
+ func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
381
+
382
+ jvp (tuple of Tensors or Tensor): result of the dot product with
383
+ the same shape as the output.
384
+
385
+ Note:
386
+ ``autograd.functional.jvp`` computes the jvp by using the backward of
387
+ the backward (sometimes called the double backwards trick). This is not
388
+ the most performant way of computing the jvp. Please consider using
389
+ :func:`torch.func.jvp` or the
390
+ :ref:`low-level forward-mode AD API <forward-mode-ad>` instead.
391
+
392
+ Example:
393
+
394
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
395
+ >>> def exp_reducer(x):
396
+ ... return x.exp().sum(dim=1)
397
+ >>> inputs = torch.rand(4, 4)
398
+ >>> v = torch.ones(4, 4)
399
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
400
+ >>> jvp(exp_reducer, inputs, v)
401
+ (tensor([6.3090, 4.6742, 7.9114, 8.2106]),
402
+ tensor([6.3090, 4.6742, 7.9114, 8.2106]))
403
+
404
+ >>> jvp(exp_reducer, inputs, v, create_graph=True)
405
+ (tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=<SumBackward1>),
406
+ tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=<SqueezeBackward1>))
407
+
408
+ >>> def adder(x, y):
409
+ ... return 2 * x + 3 * y
410
+ >>> inputs = (torch.rand(2), torch.rand(2))
411
+ >>> v = (torch.ones(2), torch.ones(2))
412
+ >>> jvp(adder, inputs, v)
413
+ (tensor([2.2399, 2.5005]),
414
+ tensor([5., 5.]))
415
+
416
+ """
417
+ with torch.enable_grad():
418
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jvp")
419
+ inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
420
+
421
+ if v is not None:
422
+ _, v = _as_tuple(v, "v", "jvp")
423
+ v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
424
+ _validate_v(v, inputs, is_inputs_tuple)
425
+ else:
426
+ if len(inputs) != 1 or inputs[0].nelement() != 1:
427
+ raise RuntimeError(
428
+ "The vector v can only be None if the input to "
429
+ "the user-provided function is a single Tensor "
430
+ "with a single element."
431
+ )
432
+
433
+ outputs = func(*inputs)
434
+ is_outputs_tuple, outputs = _as_tuple(
435
+ outputs, "outputs of the user-provided function", "jvp"
436
+ )
437
+ _check_requires_grad(outputs, "outputs", strict=strict)
438
+ # The backward is linear so the value of grad_outputs is not important as
439
+ # it won't appear in the double backward graph. We only need to ensure that
440
+ # it does not contain inf or nan.
441
+ grad_outputs = tuple(
442
+ torch.zeros_like(out, requires_grad=True) for out in outputs
443
+ )
444
+
445
+ grad_inputs = _autograd_grad(outputs, inputs, grad_outputs, create_graph=True)
446
+ _check_requires_grad(grad_inputs, "grad_inputs", strict=strict)
447
+
448
+ if create_graph:
449
+ with torch.enable_grad():
450
+ grad_res = _autograd_grad(
451
+ grad_inputs, grad_outputs, v, create_graph=create_graph
452
+ )
453
+ jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick")
454
+ else:
455
+ grad_res = _autograd_grad(
456
+ grad_inputs, grad_outputs, v, create_graph=create_graph
457
+ )
458
+ jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick")
459
+
460
+ # Cleanup objects and return them to the user
461
+ outputs = _grad_postprocess(outputs, create_graph)
462
+ jvp = _grad_postprocess(jvp, create_graph)
463
+
464
+ return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(
465
+ jvp, is_outputs_tuple
466
+ )
467
+
468
+
469
+ def _construct_standard_basis_for(
470
+ tensors: Tuple[torch.Tensor, ...], tensor_numels: Tuple[int, ...]
471
+ ) -> Tuple[torch.Tensor, ...]:
472
+ # This function:
473
+ # - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix.
474
+ # - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`.
475
+ # - Each chunk corresponds to one tensor. The chunk has the same dtype and
476
+ # device as the tensor
477
+ #
478
+ # For example, with tensor_numels = [1, 2, 1], this function returns:
479
+ # ( tensor([[1], tensor([[0, 0], tensor([[0],
480
+ # [0], [1, 0], [0],
481
+ # [0], [0, 1], [0],
482
+ # [0]]) , [0, 0]]) , [1]]) )
483
+ #
484
+ # Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors)
485
+ # Precondition: tensors always has at least one element.
486
+ #
487
+ # See NOTE: [Computing jacobian with vmap and grad for multiple tensors]
488
+ # for context behind this function. All the pre-conditions are guarded for
489
+ # in torch.autograd.functional.jacobian.
490
+ assert len(tensors) == len(tensor_numels)
491
+ assert len(tensors) > 0
492
+ total_numel = sum(tensor_numels)
493
+ chunks = tuple(
494
+ tensor.new_zeros(total_numel, tensor_numel)
495
+ for tensor, tensor_numel in zip(tensors, tensor_numels)
496
+ )
497
+ diag_start_idx = 0
498
+ for chunk, numel in zip(chunks, tensor_numels):
499
+ chunk.diagonal(diag_start_idx).fill_(1)
500
+ diag_start_idx -= numel
501
+ return chunks
502
+
503
+
504
+ def _jacfwd(func, inputs, strict=False, vectorize=False):
505
+ if strict:
506
+ raise RuntimeError(
507
+ "torch.autograd.functional.jacobian: `strict=True` "
508
+ 'and `strategy="forward-mode"` are not supported together (yet). '
509
+ "Please either set `strict=False` or "
510
+ '`strategy="reverse-mode"`.'
511
+ )
512
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian")
513
+ output_info = []
514
+
515
+ if vectorize:
516
+ # See NOTE: [Computing jacobian with vmap and grad for multiple outputs]
517
+ input_numels = tuple(input.numel() for input in inputs)
518
+
519
+ # Step 1: Prepare tangents
520
+ tangents = _construct_standard_basis_for(inputs, input_numels)
521
+
522
+ # Step 2: Compute vmap over computation with dual tensors
523
+ def jvp(tangents):
524
+ with fwAD.dual_level():
525
+ dual_inputs = tuple(
526
+ fwAD.make_dual(input, tangent.view_as(input))
527
+ for input, tangent in zip(inputs, tangents)
528
+ )
529
+ _is_outputs_tuple, dual_outputs = _as_tuple(
530
+ func(*dual_inputs), "outputs"
531
+ )
532
+ output_info.append(_is_outputs_tuple)
533
+ jv = []
534
+ primal_outs = []
535
+ for dual_out in dual_outputs:
536
+ primal, tangent = fwAD.unpack_dual(dual_out)
537
+ primal_outs.append(primal)
538
+ if tangent is not None:
539
+ jv.append(tangent)
540
+ else:
541
+ jv.append(torch.zeros_like(primal))
542
+ output_info.append(primal_outs)
543
+ return tuple(jv)
544
+
545
+ outputs_before_split = _vmap(jvp)(tangents)
546
+ is_outputs_tuple, outputs = output_info
547
+ # Step 3: for each of the output tangents, split along dim 0
548
+ jacobian_input_output = []
549
+ for jac_output_i, output_i in zip(outputs_before_split, outputs):
550
+ jacobian_output_i_output = []
551
+ for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs):
552
+ # We need to transpose the Jacobian because in forward AD, the
553
+ # batch dimension represents that of the inputs
554
+ jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape(
555
+ (*output_i.shape, *input_j.shape)
556
+ ) # noqa: C409
557
+
558
+ jacobian_output_i_output.append(jacobian_input_i_output_j)
559
+ jacobian_input_output.append(jacobian_output_i_output)
560
+
561
+ # Omit [Step 4] because everything is already transposed w/ forward AD
562
+ return _tuple_postprocess(
563
+ jacobian_input_output, (is_outputs_tuple, is_inputs_tuple)
564
+ )
565
+ else:
566
+ raise NotImplementedError(
567
+ "Computing Jacobian using forward-AD or forward-over-reverse Hessian is"
568
+ "only implemented for `vectorize=True`."
569
+ )
570
+
571
+
572
+ def jacobian(
573
+ func,
574
+ inputs,
575
+ create_graph=False,
576
+ strict=False,
577
+ vectorize=False,
578
+ strategy="reverse-mode",
579
+ ):
580
+ r"""Compute the Jacobian of a given function.
581
+
582
+ Args:
583
+ func (function): a Python function that takes Tensor inputs and returns
584
+ a tuple of Tensors or a Tensor.
585
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
586
+ create_graph (bool, optional): If ``True``, the Jacobian will be
587
+ computed in a differentiable manner. Note that when ``strict`` is
588
+ ``False``, the result can not require gradients or be disconnected
589
+ from the inputs. Defaults to ``False``.
590
+ strict (bool, optional): If ``True``, an error will be raised when we
591
+ detect that there exists an input such that all the outputs are
592
+ independent of it. If ``False``, we return a Tensor of zeros as the
593
+ jacobian for said inputs, which is the expected mathematical value.
594
+ Defaults to ``False``.
595
+ vectorize (bool, optional): This feature is experimental.
596
+ Please consider using :func:`torch.func.jacrev` or
597
+ :func:`torch.func.jacfwd` instead if you are looking for something
598
+ less experimental and more performant.
599
+ When computing the jacobian, usually we invoke
600
+ ``autograd.grad`` once per row of the jacobian. If this flag is
601
+ ``True``, we perform only a single ``autograd.grad`` call with
602
+ ``batched_grad=True`` which uses the vmap prototype feature.
603
+ Though this should lead to performance improvements in many cases,
604
+ because this feature is still experimental, there may be performance
605
+ cliffs. See :func:`torch.autograd.grad`'s ``batched_grad`` parameter for
606
+ more information.
607
+ strategy (str, optional): Set to ``"forward-mode"`` or ``"reverse-mode"`` to
608
+ determine whether the Jacobian will be computed with forward or reverse
609
+ mode AD. Currently, ``"forward-mode"`` requires ``vectorized=True``.
610
+ Defaults to ``"reverse-mode"``. If ``func`` has more outputs than
611
+ inputs, ``"forward-mode"`` tends to be more performant. Otherwise,
612
+ prefer to use ``"reverse-mode"``.
613
+
614
+ Returns:
615
+ Jacobian (Tensor or nested tuple of Tensors): if there is a single
616
+ input and output, this will be a single Tensor containing the
617
+ Jacobian for the linearized inputs and output. If one of the two is
618
+ a tuple, then the Jacobian will be a tuple of Tensors. If both of
619
+ them are tuples, then the Jacobian will be a tuple of tuple of
620
+ Tensors where ``Jacobian[i][j]`` will contain the Jacobian of the
621
+ ``i``\th output and ``j``\th input and will have as size the
622
+ concatenation of the sizes of the corresponding output and the
623
+ corresponding input and will have same dtype and device as the
624
+ corresponding input. If strategy is ``forward-mode``, the dtype will be
625
+ that of the output; otherwise, the input.
626
+
627
+ Example:
628
+
629
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
630
+ >>> def exp_reducer(x):
631
+ ... return x.exp().sum(dim=1)
632
+ >>> inputs = torch.rand(2, 2)
633
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
634
+ >>> jacobian(exp_reducer, inputs)
635
+ tensor([[[1.4917, 2.4352],
636
+ [0.0000, 0.0000]],
637
+ [[0.0000, 0.0000],
638
+ [2.4369, 2.3799]]])
639
+
640
+ >>> jacobian(exp_reducer, inputs, create_graph=True)
641
+ tensor([[[1.4917, 2.4352],
642
+ [0.0000, 0.0000]],
643
+ [[0.0000, 0.0000],
644
+ [2.4369, 2.3799]]], grad_fn=<ViewBackward>)
645
+
646
+ >>> def exp_adder(x, y):
647
+ ... return 2 * x.exp() + 3 * y
648
+ >>> inputs = (torch.rand(2), torch.rand(2))
649
+ >>> jacobian(exp_adder, inputs)
650
+ (tensor([[2.8052, 0.0000],
651
+ [0.0000, 3.3963]]),
652
+ tensor([[3., 0.],
653
+ [0., 3.]]))
654
+ """
655
+ assert strategy in ("forward-mode", "reverse-mode"), (
656
+ 'Expected strategy to be either "forward-mode" or "reverse-mode". Hint: If your '
657
+ 'function has more outputs than inputs, "forward-mode" tends to be more performant. '
658
+ 'Otherwise, prefer to use "reverse-mode".'
659
+ )
660
+ if strategy == "forward-mode":
661
+ if create_graph:
662
+ raise NotImplementedError(
663
+ "torch.autograd.functional.jacobian: `create_graph=True` "
664
+ 'and `strategy="forward-mode"` are not supported together (yet). '
665
+ "Please either set `create_graph=False` or "
666
+ '`strategy="reverse-mode"`.'
667
+ )
668
+ return _jacfwd(func, inputs, strict, vectorize)
669
+
670
+ with torch.enable_grad():
671
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian")
672
+ inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
673
+
674
+ outputs = func(*inputs)
675
+ is_outputs_tuple, outputs = _as_tuple(
676
+ outputs, "outputs of the user-provided function", "jacobian"
677
+ )
678
+ _check_requires_grad(outputs, "outputs", strict=strict)
679
+
680
+ if vectorize:
681
+ if strict:
682
+ raise RuntimeError(
683
+ "torch.autograd.functional.jacobian: `strict=True` "
684
+ "and `vectorized=True` are not supported together. "
685
+ "Please either set `strict=False` or "
686
+ "`vectorize=False`."
687
+ )
688
+ # NOTE: [Computing jacobian with vmap and grad for multiple outputs]
689
+ #
690
+ # Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3).
691
+ # It turns out we can compute the jacobian of this function with a single
692
+ # call to autograd.grad by using vmap over the correct grad_outputs.
693
+ #
694
+ # Firstly, one way to compute the jacobian is to stack x**2 and x.sum()
695
+ # into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()])
696
+ #
697
+ # To get the first row of the jacobian, we call
698
+ # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0]))
699
+ # To get the 2nd row of the jacobian, we call
700
+ # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0]))
701
+ # and so on.
702
+ #
703
+ # Using vmap, we can vectorize all 4 of these computations into one by
704
+ # passing the standard basis for R^4 as the grad_output.
705
+ # vmap(partial(autograd.grad, g(x), x))(torch.eye(4)).
706
+ #
707
+ # Now, how do we compute the jacobian *without stacking the output*?
708
+ # We can just split the standard basis across the outputs. So to
709
+ # compute the jacobian of f(x), we'd use
710
+ # >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...))
711
+ # The grad_outputs looks like the following:
712
+ # ( torch.tensor([[1, 0, 0],
713
+ # [0, 1, 0],
714
+ # [0, 0, 1],
715
+ # [0, 0, 0]]),
716
+ # torch.tensor([[0],
717
+ # [0],
718
+ # [0],
719
+ # [1]]) )
720
+ #
721
+ # But we're not done yet!
722
+ # >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...)))
723
+ # returns a Tensor of shape [4, 3]. We have to remember to split the
724
+ # jacobian of shape [4, 3] into two:
725
+ # - one of shape [3, 3] for the first output
726
+ # - one of shape [ 3] for the second output
727
+
728
+ # Step 1: Construct grad_outputs by splitting the standard basis
729
+ output_numels = tuple(output.numel() for output in outputs)
730
+ grad_outputs = _construct_standard_basis_for(outputs, output_numels)
731
+ flat_outputs = tuple(output.reshape(-1) for output in outputs)
732
+
733
+ # Step 2: Call vmap + autograd.grad
734
+ def vjp(grad_output):
735
+ vj = list(
736
+ _autograd_grad(
737
+ flat_outputs,
738
+ inputs,
739
+ grad_output,
740
+ create_graph=create_graph,
741
+ is_grads_batched=True,
742
+ )
743
+ )
744
+ for el_idx, vj_el in enumerate(vj):
745
+ if vj_el is not None:
746
+ continue
747
+ vj[el_idx] = torch.zeros_like(inputs[el_idx]).expand(
748
+ (sum(output_numels),) + inputs[el_idx].shape
749
+ )
750
+ return tuple(vj)
751
+
752
+ jacobians_of_flat_output = vjp(grad_outputs)
753
+
754
+ # Step 3: The returned jacobian is one big tensor per input. In this step,
755
+ # we split each Tensor by output.
756
+ jacobian_input_output = []
757
+ for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs):
758
+ jacobian_input_i_output = []
759
+ for jac, output_j in zip(
760
+ jac_input_i.split(output_numels, dim=0), outputs
761
+ ):
762
+ jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape)
763
+ jacobian_input_i_output.append(jacobian_input_i_output_j)
764
+ jacobian_input_output.append(jacobian_input_i_output)
765
+
766
+ # Step 4: Right now, `jacobian` is a List[List[Tensor]].
767
+ # The outer List corresponds to the number of inputs,
768
+ # the inner List corresponds to the number of outputs.
769
+ # We need to exchange the order of these and convert to tuples
770
+ # before returning.
771
+ jacobian_output_input = tuple(zip(*jacobian_input_output))
772
+
773
+ jacobian_output_input = _grad_postprocess(
774
+ jacobian_output_input, create_graph
775
+ )
776
+ return _tuple_postprocess(
777
+ jacobian_output_input, (is_outputs_tuple, is_inputs_tuple)
778
+ )
779
+
780
+ jacobian: Tuple[torch.Tensor, ...] = tuple()
781
+
782
+ for i, out in enumerate(outputs):
783
+ # mypy complains that expression and variable have different types due to the empty list
784
+ jac_i: Tuple[List[torch.Tensor]] = tuple([] for _ in range(len(inputs))) # type: ignore[assignment]
785
+ for j in range(out.nelement()):
786
+ vj = _autograd_grad(
787
+ (out.reshape(-1)[j],),
788
+ inputs,
789
+ retain_graph=True,
790
+ create_graph=create_graph,
791
+ )
792
+
793
+ for el_idx, (jac_i_el, vj_el, inp_el) in enumerate(
794
+ zip(jac_i, vj, inputs)
795
+ ):
796
+ if vj_el is not None:
797
+ if strict and create_graph and not vj_el.requires_grad:
798
+ msg = (
799
+ "The jacobian of the user-provided function is "
800
+ f"independent of input {i}. This is not allowed in "
801
+ "strict mode when create_graph=True."
802
+ )
803
+ raise RuntimeError(msg)
804
+ jac_i_el.append(vj_el)
805
+ else:
806
+ if strict:
807
+ msg = (
808
+ f"Output {i} of the user-provided function is "
809
+ f"independent of input {el_idx}. This is not allowed in "
810
+ "strict mode."
811
+ )
812
+ raise RuntimeError(msg)
813
+ jac_i_el.append(torch.zeros_like(inp_el))
814
+
815
+ jacobian += (
816
+ tuple(
817
+ torch.stack(jac_i_el, dim=0).view(
818
+ out.size() + inputs[el_idx].size() # type: ignore[operator]
819
+ )
820
+ for (el_idx, jac_i_el) in enumerate(jac_i)
821
+ ),
822
+ )
823
+
824
+ jacobian = _grad_postprocess(jacobian, create_graph)
825
+
826
+ return _tuple_postprocess(jacobian, (is_outputs_tuple, is_inputs_tuple))
827
+
828
+
829
+ def hessian(
830
+ func,
831
+ inputs,
832
+ create_graph=False,
833
+ strict=False,
834
+ vectorize=False,
835
+ outer_jacobian_strategy="reverse-mode",
836
+ ):
837
+ r"""Compute the Hessian of a given scalar function.
838
+
839
+ Args:
840
+ func (function): a Python function that takes Tensor inputs and returns
841
+ a Tensor with a single element.
842
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
843
+ create_graph (bool, optional): If ``True``, the Hessian will be computed in
844
+ a differentiable manner. Note that when ``strict`` is ``False``, the result can not
845
+ require gradients or be disconnected from the inputs.
846
+ Defaults to ``False``.
847
+ strict (bool, optional): If ``True``, an error will be raised when we detect that there exists an input
848
+ such that all the outputs are independent of it. If ``False``, we return a Tensor of zeros as the
849
+ hessian for said inputs, which is the expected mathematical value.
850
+ Defaults to ``False``.
851
+ vectorize (bool, optional): This feature is experimental.
852
+ Please consider using :func:`torch.func.hessian`
853
+ instead if you are looking for something less experimental and more performant.
854
+ When computing the hessian, usually we invoke
855
+ ``autograd.grad`` once per row of the hessian. If this flag is
856
+ ``True``, we use the vmap prototype feature as the backend to
857
+ vectorize calls to ``autograd.grad`` so we only invoke it once
858
+ instead of once per row. This should lead to performance
859
+ improvements in many use cases, however, due to this feature
860
+ being incomplete, there may be performance cliffs. Please
861
+ use `torch._C._debug_only_display_vmap_fallback_warnings(True)`
862
+ to show any performance warnings and file us issues if
863
+ warnings exist for your use case. Defaults to ``False``.
864
+ outer_jacobian_strategy (str, optional): The Hessian is computed by
865
+ computing the Jacobian of a Jacobian. The inner Jacobian is always
866
+ computed in reverse-mode AD. Setting strategy to ``"forward-mode"``
867
+ or ``"reverse-mode"`` determines whether the outer Jacobian will be
868
+ computed with forward or reverse mode AD. Currently, computing the outer
869
+ Jacobian in ``"forward-mode"`` requires ``vectorized=True``. Defaults
870
+ to ``"reverse-mode"``.
871
+
872
+ Returns:
873
+ Hessian (Tensor or a tuple of tuple of Tensors): if there is a single input,
874
+ this will be a single Tensor containing the Hessian for the input.
875
+ If it is a tuple, then the Hessian will be a tuple of tuples where
876
+ ``Hessian[i][j]`` will contain the Hessian of the ``i``\th input
877
+ and ``j``\th input with size the sum of the size of the ``i``\th input plus
878
+ the size of the ``j``\th input. ``Hessian[i][j]`` will have the same
879
+ dtype and device as the corresponding ``i``\th input.
880
+
881
+ Example:
882
+
883
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
884
+ >>> def pow_reducer(x):
885
+ ... return x.pow(3).sum()
886
+ >>> inputs = torch.rand(2, 2)
887
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
888
+ >>> hessian(pow_reducer, inputs)
889
+ tensor([[[[5.2265, 0.0000],
890
+ [0.0000, 0.0000]],
891
+ [[0.0000, 4.8221],
892
+ [0.0000, 0.0000]]],
893
+ [[[0.0000, 0.0000],
894
+ [1.9456, 0.0000]],
895
+ [[0.0000, 0.0000],
896
+ [0.0000, 3.2550]]]])
897
+
898
+ >>> hessian(pow_reducer, inputs, create_graph=True)
899
+ tensor([[[[5.2265, 0.0000],
900
+ [0.0000, 0.0000]],
901
+ [[0.0000, 4.8221],
902
+ [0.0000, 0.0000]]],
903
+ [[[0.0000, 0.0000],
904
+ [1.9456, 0.0000]],
905
+ [[0.0000, 0.0000],
906
+ [0.0000, 3.2550]]]], grad_fn=<ViewBackward>)
907
+
908
+
909
+ >>> def pow_adder_reducer(x, y):
910
+ ... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
911
+ >>> inputs = (torch.rand(2), torch.rand(2))
912
+ >>> hessian(pow_adder_reducer, inputs)
913
+ ((tensor([[4., 0.],
914
+ [0., 4.]]),
915
+ tensor([[0., 0.],
916
+ [0., 0.]])),
917
+ (tensor([[0., 0.],
918
+ [0., 0.]]),
919
+ tensor([[6., 0.],
920
+ [0., 6.]])))
921
+ """
922
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hessian")
923
+ assert outer_jacobian_strategy in (
924
+ "forward-mode",
925
+ "reverse-mode",
926
+ ), 'Expected strategy to be either "forward-mode" or "reverse-mode".'
927
+
928
+ def ensure_single_output_function(*inp):
929
+ out = func(*inp)
930
+ is_out_tuple, t_out = _as_tuple(
931
+ out, "outputs of the user-provided function", "hessian"
932
+ )
933
+ _check_requires_grad(t_out, "outputs", strict=strict)
934
+
935
+ if is_out_tuple or not isinstance(out, torch.Tensor):
936
+ raise RuntimeError(
937
+ "The function given to hessian should return a single Tensor"
938
+ )
939
+
940
+ if out.nelement() != 1:
941
+ raise RuntimeError(
942
+ "The Tensor returned by the function given to hessian should contain a single element"
943
+ )
944
+
945
+ return out.squeeze()
946
+
947
+ def jac_func(*inp):
948
+ if outer_jacobian_strategy == "forward-mode":
949
+ # _grad_preprocess requires create_graph=True and input to require_grad
950
+ # or else the input will be detached
951
+ inp = tuple(t.requires_grad_(True) for t in inp)
952
+ jac = jacobian(ensure_single_output_function, inp, create_graph=True)
953
+ _check_requires_grad(jac, "jacobian", strict=strict)
954
+ return jac
955
+
956
+ res = jacobian(
957
+ jac_func,
958
+ inputs,
959
+ create_graph=create_graph,
960
+ strict=strict,
961
+ vectorize=vectorize,
962
+ strategy=outer_jacobian_strategy,
963
+ )
964
+ return _tuple_postprocess(res, (is_inputs_tuple, is_inputs_tuple))
965
+
966
+
967
+ def vhp(func, inputs, v=None, create_graph=False, strict=False):
968
+ r"""Compute the dot product between vector ``v`` and Hessian of a given scalar function at a specified point.
969
+
970
+ Args:
971
+ func (function): a Python function that takes Tensor inputs and returns
972
+ a Tensor with a single element.
973
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
974
+ v (tuple of Tensors or Tensor): The vector for which the vector Hessian
975
+ product is computed. Must be the same size as the input of
976
+ ``func``. This argument is optional when ``func``'s input contains
977
+ a single element and (if it is not provided) will be set as a
978
+ Tensor containing a single ``1``.
979
+ create_graph (bool, optional): If ``True``, both the output and result
980
+ will be computed in a differentiable way. Note that when ``strict``
981
+ is ``False``, the result can not require gradients or be
982
+ disconnected from the inputs.
983
+ Defaults to ``False``.
984
+ strict (bool, optional): If ``True``, an error will be raised when we
985
+ detect that there exists an input such that all the outputs are
986
+ independent of it. If ``False``, we return a Tensor of zeros as the
987
+ vhp for said inputs, which is the expected mathematical value.
988
+ Defaults to ``False``.
989
+
990
+ Returns:
991
+ output (tuple): tuple with:
992
+ func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
993
+
994
+ vhp (tuple of Tensors or Tensor): result of the dot product with the
995
+ same shape as the inputs.
996
+
997
+ Example:
998
+
999
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
1000
+ >>> def pow_reducer(x):
1001
+ ... return x.pow(3).sum()
1002
+ >>> inputs = torch.rand(2, 2)
1003
+ >>> v = torch.ones(2, 2)
1004
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
1005
+ >>> vhp(pow_reducer, inputs, v)
1006
+ (tensor(0.5591),
1007
+ tensor([[1.0689, 1.2431],
1008
+ [3.0989, 4.4456]]))
1009
+ >>> vhp(pow_reducer, inputs, v, create_graph=True)
1010
+ (tensor(0.5591, grad_fn=<SumBackward0>),
1011
+ tensor([[1.0689, 1.2431],
1012
+ [3.0989, 4.4456]], grad_fn=<MulBackward0>))
1013
+ >>> def pow_adder_reducer(x, y):
1014
+ ... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
1015
+ >>> inputs = (torch.rand(2), torch.rand(2))
1016
+ >>> v = (torch.zeros(2), torch.ones(2))
1017
+ >>> vhp(pow_adder_reducer, inputs, v)
1018
+ (tensor(4.8053),
1019
+ (tensor([0., 0.]),
1020
+ tensor([6., 6.])))
1021
+ """
1022
+ with torch.enable_grad():
1023
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vhp")
1024
+ inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
1025
+
1026
+ if v is not None:
1027
+ _, v = _as_tuple(v, "v", "vhp")
1028
+ v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
1029
+ _validate_v(v, inputs, is_inputs_tuple)
1030
+ else:
1031
+ if len(inputs) != 1 or inputs[0].nelement() != 1:
1032
+ raise RuntimeError(
1033
+ "The vector v can only be None if the input to the user-provided function "
1034
+ "is a single Tensor with a single element."
1035
+ )
1036
+ outputs = func(*inputs)
1037
+ is_outputs_tuple, outputs = _as_tuple(
1038
+ outputs, "outputs of the user-provided function", "vhp"
1039
+ )
1040
+ _check_requires_grad(outputs, "outputs", strict=strict)
1041
+
1042
+ if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor):
1043
+ raise RuntimeError(
1044
+ "The function given to vhp should return a single Tensor"
1045
+ )
1046
+
1047
+ if outputs[0].nelement() != 1:
1048
+ raise RuntimeError(
1049
+ "The Tensor returned by the function given to vhp should contain a single element"
1050
+ )
1051
+
1052
+ jac = _autograd_grad(outputs, inputs, create_graph=True)
1053
+ _check_requires_grad(jac, "jacobian", strict=strict)
1054
+
1055
+ enable_grad = True if create_graph else torch.is_grad_enabled()
1056
+ with torch.set_grad_enabled(enable_grad):
1057
+ grad_res = _autograd_grad(jac, inputs, v, create_graph=create_graph)
1058
+ vhp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back")
1059
+
1060
+ outputs = _grad_postprocess(outputs, create_graph)
1061
+ vhp = _grad_postprocess(vhp, create_graph)
1062
+
1063
+ return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(
1064
+ vhp, is_inputs_tuple
1065
+ )
1066
+
1067
+
1068
+ def hvp(func, inputs, v=None, create_graph=False, strict=False):
1069
+ r"""Compute the dot product between the scalar function's Hessian and a vector ``v`` at a specified point.
1070
+
1071
+ Args:
1072
+ func (function): a Python function that takes Tensor inputs and returns
1073
+ a Tensor with a single element.
1074
+ inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
1075
+ v (tuple of Tensors or Tensor): The vector for which the Hessian vector
1076
+ product is computed. Must be the same size as the input of
1077
+ ``func``. This argument is optional when ``func``'s input contains
1078
+ a single element and (if it is not provided) will be set as a
1079
+ Tensor containing a single ``1``.
1080
+ create_graph (bool, optional): If ``True``, both the output and result will be
1081
+ computed in a differentiable way. Note that when ``strict`` is
1082
+ ``False``, the result can not require gradients or be disconnected
1083
+ from the inputs. Defaults to ``False``.
1084
+ strict (bool, optional): If ``True``, an error will be raised when we
1085
+ detect that there exists an input such that all the outputs are
1086
+ independent of it. If ``False``, we return a Tensor of zeros as the
1087
+ hvp for said inputs, which is the expected mathematical value.
1088
+ Defaults to ``False``.
1089
+ Returns:
1090
+ output (tuple): tuple with:
1091
+ func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
1092
+
1093
+ hvp (tuple of Tensors or Tensor): result of the dot product with
1094
+ the same shape as the inputs.
1095
+
1096
+ Example:
1097
+
1098
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
1099
+ >>> def pow_reducer(x):
1100
+ ... return x.pow(3).sum()
1101
+ >>> inputs = torch.rand(2, 2)
1102
+ >>> v = torch.ones(2, 2)
1103
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
1104
+ >>> hvp(pow_reducer, inputs, v)
1105
+ (tensor(0.1448),
1106
+ tensor([[2.0239, 1.6456],
1107
+ [2.4988, 1.4310]]))
1108
+
1109
+ >>> hvp(pow_reducer, inputs, v, create_graph=True)
1110
+ (tensor(0.1448, grad_fn=<SumBackward0>),
1111
+ tensor([[2.0239, 1.6456],
1112
+ [2.4988, 1.4310]], grad_fn=<MulBackward0>))
1113
+
1114
+
1115
+ >>> def pow_adder_reducer(x, y):
1116
+ ... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
1117
+ >>> inputs = (torch.rand(2), torch.rand(2))
1118
+ >>> v = (torch.zeros(2), torch.ones(2))
1119
+ >>> hvp(pow_adder_reducer, inputs, v)
1120
+ (tensor(2.3030),
1121
+ (tensor([0., 0.]),
1122
+ tensor([6., 6.])))
1123
+
1124
+ Note:
1125
+
1126
+ This function is significantly slower than `vhp` due to backward mode AD constraints.
1127
+ If your functions is twice continuously differentiable, then hvp = vhp.t(). So if you
1128
+ know that your function satisfies this condition, you should use vhp instead that is
1129
+ much faster with the current implementation.
1130
+
1131
+ """
1132
+ with torch.enable_grad():
1133
+ is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hvp")
1134
+ inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
1135
+
1136
+ if v is not None:
1137
+ _, v = _as_tuple(v, "v", "hvp")
1138
+ v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
1139
+ _validate_v(v, inputs, is_inputs_tuple)
1140
+ else:
1141
+ if len(inputs) != 1 or inputs[0].nelement() != 1:
1142
+ raise RuntimeError(
1143
+ "The vector v can only be None if the input to the user-provided function "
1144
+ "is a single Tensor with a single element."
1145
+ )
1146
+ outputs = func(*inputs)
1147
+ is_outputs_tuple, outputs = _as_tuple(
1148
+ outputs, "outputs of the user-provided function", "hvp"
1149
+ )
1150
+ _check_requires_grad(outputs, "outputs", strict=strict)
1151
+
1152
+ if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor):
1153
+ raise RuntimeError(
1154
+ "The function given to hvp should return a single Tensor"
1155
+ )
1156
+
1157
+ if outputs[0].nelement() != 1:
1158
+ raise RuntimeError(
1159
+ "The Tensor returned by the function given to hvp should contain a single element"
1160
+ )
1161
+
1162
+ jac = _autograd_grad(outputs, inputs, create_graph=True)
1163
+ _check_requires_grad(jac, "jacobian", strict=strict)
1164
+
1165
+ grad_jac = tuple(torch.zeros_like(inp, requires_grad=True) for inp in inputs)
1166
+
1167
+ double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True)
1168
+ _check_requires_grad(jac, "hessian", strict=strict)
1169
+
1170
+ enable_grad = True if create_graph else torch.is_grad_enabled()
1171
+ with torch.set_grad_enabled(enable_grad):
1172
+ grad_res = _autograd_grad(double_back, grad_jac, v, create_graph=create_graph)
1173
+ hvp = _fill_in_zeros(
1174
+ grad_res, inputs, strict, create_graph, "double_back_trick"
1175
+ )
1176
+
1177
+ outputs = _grad_postprocess(outputs, create_graph)
1178
+ hvp = _grad_postprocess(hvp, create_graph)
1179
+
1180
+ return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(
1181
+ hvp, is_inputs_tuple
1182
+ )
venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import torch
4
+
5
+ from torch.utils._contextlib import (
6
+ _DecoratorContextManager,
7
+ _NoParamDecoratorContextManager,
8
+ F,
9
+ )
10
+
11
+ __all__ = [
12
+ "no_grad",
13
+ "enable_grad",
14
+ "set_grad_enabled",
15
+ "inference_mode",
16
+ "set_multithreading_enabled",
17
+ ]
18
+
19
+
20
+ class no_grad(_NoParamDecoratorContextManager):
21
+ r"""Context-manager that disables gradient calculation.
22
+
23
+ Disabling gradient calculation is useful for inference, when you are sure
24
+ that you will not call :meth:`Tensor.backward()`. It will reduce memory
25
+ consumption for computations that would otherwise have `requires_grad=True`.
26
+
27
+ In this mode, the result of every computation will have
28
+ `requires_grad=False`, even when the inputs have `requires_grad=True`.
29
+ There is an exception! All factory functions, or functions that create
30
+ a new Tensor and take a requires_grad kwarg, will NOT be affected by
31
+ this mode.
32
+
33
+ This context manager is thread local; it will not affect computation
34
+ in other threads.
35
+
36
+ Also functions as a decorator.
37
+
38
+ .. note::
39
+ No-grad is one of several mechanisms that can enable or
40
+ disable gradients locally see :ref:`locally-disable-grad-doc` for
41
+ more information on how they compare.
42
+
43
+ .. note::
44
+ This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
45
+ If you want to disable forward AD for a computation, you can unpack
46
+ your dual tensors.
47
+
48
+ Example::
49
+ >>> # xdoctest: +SKIP
50
+ >>> x = torch.tensor([1.], requires_grad=True)
51
+ >>> with torch.no_grad():
52
+ ... y = x * 2
53
+ >>> y.requires_grad
54
+ False
55
+ >>> @torch.no_grad()
56
+ ... def doubler(x):
57
+ ... return x * 2
58
+ >>> z = doubler(x)
59
+ >>> z.requires_grad
60
+ False
61
+ >>> @torch.no_grad
62
+ ... def tripler(x):
63
+ ... return x * 3
64
+ >>> z = tripler(x)
65
+ >>> z.requires_grad
66
+ False
67
+ >>> # factory function exception
68
+ >>> with torch.no_grad():
69
+ ... a = torch.nn.Parameter(torch.rand(10))
70
+ >>> a.requires_grad
71
+ True
72
+ """
73
+
74
+ def __init__(self) -> None:
75
+ if not torch._jit_internal.is_scripting():
76
+ super().__init__()
77
+ self.prev = False
78
+
79
+ def __enter__(self) -> None:
80
+ self.prev = torch.is_grad_enabled()
81
+ torch.set_grad_enabled(False)
82
+
83
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
84
+ torch.set_grad_enabled(self.prev)
85
+
86
+
87
+ class enable_grad(_NoParamDecoratorContextManager):
88
+ r"""Context-manager that enables gradient calculation.
89
+
90
+ Enables gradient calculation, if it has been disabled via :class:`~no_grad`
91
+ or :class:`~set_grad_enabled`.
92
+
93
+ This context manager is thread local; it will not affect computation
94
+ in other threads.
95
+
96
+ Also functions as a decorator.
97
+
98
+ .. note::
99
+ enable_grad is one of several mechanisms that can enable or
100
+ disable gradients locally see :ref:`locally-disable-grad-doc` for
101
+ more information on how they compare.
102
+
103
+ .. note::
104
+ This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
105
+
106
+ Example::
107
+ >>> # xdoctest: +SKIP
108
+ >>> x = torch.tensor([1.], requires_grad=True)
109
+ >>> with torch.no_grad():
110
+ ... with torch.enable_grad():
111
+ ... y = x * 2
112
+ >>> y.requires_grad
113
+ True
114
+ >>> y.backward()
115
+ >>> x.grad
116
+ tensor([2.])
117
+ >>> @torch.enable_grad()
118
+ ... def doubler(x):
119
+ ... return x * 2
120
+ >>> with torch.no_grad():
121
+ ... z = doubler(x)
122
+ >>> z.requires_grad
123
+ True
124
+ >>> @torch.enable_grad
125
+ ... def tripler(x):
126
+ ... return x * 3
127
+ >>> with torch.no_grad():
128
+ ... z = tripler(x)
129
+ >>> z.requires_grad
130
+ True
131
+
132
+ """
133
+
134
+ def __enter__(self) -> None:
135
+ self.prev = torch.is_grad_enabled()
136
+ torch._C._set_grad_enabled(True)
137
+
138
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
139
+ torch._C._set_grad_enabled(self.prev)
140
+
141
+
142
+ class set_grad_enabled(_DecoratorContextManager):
143
+ r"""Context-manager that sets gradient calculation on or off.
144
+
145
+ ``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`.
146
+ It can be used as a context-manager or as a function.
147
+
148
+ This context manager is thread local; it will not affect computation
149
+ in other threads.
150
+
151
+ Args:
152
+ mode (bool): Flag whether to enable grad (``True``), or disable
153
+ (``False``). This can be used to conditionally enable
154
+ gradients.
155
+
156
+ .. note::
157
+ set_grad_enabled is one of several mechanisms that can enable or
158
+ disable gradients locally see :ref:`locally-disable-grad-doc` for
159
+ more information on how they compare.
160
+
161
+ .. note::
162
+ This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
163
+
164
+ Example::
165
+ >>> # xdoctest: +SKIP
166
+ >>> x = torch.tensor([1.], requires_grad=True)
167
+ >>> is_train = False
168
+ >>> with torch.set_grad_enabled(is_train):
169
+ ... y = x * 2
170
+ >>> y.requires_grad
171
+ False
172
+ >>> _ = torch.set_grad_enabled(True)
173
+ >>> y = x * 2
174
+ >>> y.requires_grad
175
+ True
176
+ >>> _ = torch.set_grad_enabled(False)
177
+ >>> y = x * 2
178
+ >>> y.requires_grad
179
+ False
180
+
181
+ """
182
+
183
+ def __init__(self, mode: bool) -> None:
184
+ self.prev = torch.is_grad_enabled()
185
+ self.mode = mode
186
+ torch._C._set_grad_enabled(mode)
187
+
188
+ def __call__(self, orig_func: F) -> F:
189
+ torch._C._set_grad_enabled(self.prev)
190
+ return super().__call__(orig_func)
191
+
192
+ def __enter__(self) -> None:
193
+ torch._C._set_grad_enabled(self.mode)
194
+
195
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
196
+ torch._C._set_grad_enabled(self.prev)
197
+
198
+ def clone(self) -> "set_grad_enabled":
199
+ r"""
200
+ Create a copy of this class
201
+ """
202
+ return self.__class__(self.mode)
203
+
204
+
205
+ class inference_mode(_DecoratorContextManager):
206
+ r"""Context-manager that enables or disables inference mode.
207
+
208
+ InferenceMode is a new context manager analogous to :class:`~no_grad`
209
+ to be used when you are certain your operations will have no interactions
210
+ with autograd (e.g., model training). Code run under this mode gets better
211
+ performance by disabling view tracking and version counter bumps. Note that
212
+ unlike some other mechanisms that locally enable or disable grad,
213
+ entering inference_mode also disables to :ref:`forward-mode AD <forward-mode-ad>`.
214
+
215
+ This context manager is thread local; it will not affect computation
216
+ in other threads.
217
+
218
+ Also functions as a decorator.
219
+
220
+ .. note::
221
+ Inference mode is one of several mechanisms that can enable or
222
+ disable gradients locally see :ref:`locally-disable-grad-doc` for
223
+ more information on how they compare.
224
+
225
+ Args:
226
+ mode (bool or function): Either a boolean flag whether to enable or
227
+ disable inference mode or a Python function to decorate with
228
+ inference mode enabled
229
+
230
+ Example::
231
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
232
+ >>> import torch
233
+ >>> x = torch.ones(1, 2, 3, requires_grad=True)
234
+ >>> with torch.inference_mode():
235
+ ... y = x * x
236
+ >>> y.requires_grad
237
+ False
238
+ >>> # xdoctest: +SKIP("want string isnt quite right")
239
+ >>> y._version
240
+ Traceback (most recent call last):
241
+ File "<stdin>", line 1, in <module>
242
+ RuntimeError: Inference tensors do not track version counter.
243
+ >>> @torch.inference_mode()
244
+ ... def func(x):
245
+ ... return x * x
246
+ >>> out = func(x)
247
+ >>> out.requires_grad
248
+ False
249
+ >>> @torch.inference_mode
250
+ ... def doubler(x):
251
+ ... return x * 2
252
+ >>> out = doubler(x)
253
+ >>> out.requires_grad
254
+ False
255
+
256
+ """
257
+
258
+ def __init__(self, mode: bool = True) -> None:
259
+ if not torch._jit_internal.is_scripting():
260
+ super().__init__()
261
+ self.mode = mode
262
+
263
+ def __new__(cls, mode=True):
264
+ if isinstance(mode, bool):
265
+ return super().__new__(cls)
266
+ return cls()(mode)
267
+
268
+ def __enter__(self) -> None:
269
+ self._inference_mode_context = torch._C._InferenceMode(self.mode)
270
+ self._inference_mode_context.__enter__()
271
+
272
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
273
+ self._inference_mode_context.__exit__(exc_type, exc_value, traceback)
274
+
275
+ def clone(self) -> "inference_mode":
276
+ r"""
277
+ Create a copy of this class
278
+ """
279
+ return self.__class__(self.mode)
280
+
281
+
282
+ def _enter_inference_mode(mode):
283
+ mode_context = torch._C._InferenceMode(mode)
284
+ mode_context.__enter__()
285
+ return mode_context
286
+
287
+
288
+ def _exit_inference_mode(mode):
289
+ mode.__exit__(None, None, None)
290
+
291
+
292
+ class set_multithreading_enabled(_DecoratorContextManager):
293
+ r"""Context-manager that sets multithreaded backwards on or off.
294
+
295
+ ``set_multithreading_enabled`` will enable or disable multithreaded backwards based on its argument :attr:`mode`.
296
+ It can be used as a context-manager or as a function.
297
+
298
+ This context manager is thread local; it will not affect computation
299
+ in other threads.
300
+
301
+ Args:
302
+ mode (bool): Flag whether to enable multithreaded backwards (``True``), or disable
303
+ (``False``).
304
+
305
+ .. note::
306
+ This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
307
+
308
+ """
309
+
310
+ def __init__(self, mode: bool) -> None:
311
+ self.prev = torch._C._is_multithreading_enabled()
312
+ torch._C._set_multithreading_enabled(mode)
313
+ self.mode = mode
314
+
315
+ def __enter__(self) -> None:
316
+ pass
317
+
318
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
319
+ torch._C._set_multithreading_enabled(self.prev)
320
+
321
+ def clone(self) -> "set_multithreading_enabled":
322
+ r"""
323
+ Create a copy of this class
324
+ """
325
+ return self.__class__(self.mode)
326
+
327
+
328
+ class _force_original_view_tracking(_DecoratorContextManager):
329
+ r"""Context-manager that sets whether or not to always enable view-replay in autograd.
330
+
331
+ ``set_view_replay_enabled`` will enable or disable view-replay based on its argument :attr:`mode`.
332
+ It can be used as a context-manager or as a function.
333
+
334
+ This context manager is thread local; it will not affect computation
335
+ in other threads.
336
+
337
+ When a tensor view is mutated, the autograd engine needs to decide whether or not
338
+ to regenerate the "updated view" by either replaying the chain of views from the updated base,
339
+ or with a single call to as_strided.
340
+
341
+ If set_view_replay_enabled is set to True, then autograd will always use view replay.
342
+ Otherwise, it will fall back to its existing logic.
343
+
344
+ Args:
345
+ mode (bool): Flag whether to enable view-replay (``True``), or disable
346
+ (``False``).
347
+
348
+ """
349
+
350
+ def __init__(self, mode: bool) -> None:
351
+ self.prev = torch._C._is_view_replay_enabled()
352
+ torch._C._set_view_replay_enabled(mode)
353
+ self.mode = mode
354
+
355
+ def __enter__(self) -> None:
356
+ pass
357
+
358
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
359
+ torch._C._set_view_replay_enabled(self.prev)
360
+
361
+ def clone(self):
362
+ return self.__class__(self.mode)
363
+
364
+
365
+ class _unsafe_preserve_version_counter(_DecoratorContextManager):
366
+ r"""DO NOT USE THIS UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING.
367
+
368
+ This context manager can lead to arbitrary silent-correctness issues in any other part of your code
369
+ (even the ones not touched directly by the context manager)!
370
+
371
+ Ordinarily, autograd will track mutations to tensors by incrementing it's `._version` attribute.
372
+ This is generally important for correctness, as for example, mutating a tensor that autograd has saved
373
+ for the backwards pass can result in incorrect gradients, and autograd uses the version counter to detect
374
+ and error out in this situation.
375
+
376
+ However, there are rare instances where it might be useful to hide mutations from autograd. For example:
377
+ if a tensor is very large, and you'd like to free its memory by storing it elsewhere, and re-populate
378
+ the tensor right before it is needed by autograd.
379
+
380
+ Args:
381
+ tensor (torch.Tensor): the tensor in question, that you would like to preserve the version counter of.
382
+
383
+ .. note::
384
+ This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
385
+
386
+ """
387
+
388
+ def __init__(self, tensor: torch.Tensor) -> None:
389
+ self.tensor = tensor
390
+ self.prev_version = tensor._version
391
+
392
+ def __enter__(self) -> None:
393
+ pass
394
+
395
+ def __exit__(self, *args) -> None:
396
+ torch._C._autograd._unsafe_set_version_counter(self.tensor, self.prev_version)
venv/lib/python3.10/site-packages/torch/autograd/gradcheck.py ADDED
@@ -0,0 +1,2266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+ import warnings
4
+ from itertools import product
5
+ from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.testing
9
+ from torch._vmap_internals import _vmap, vmap
10
+ from torch.overrides import is_tensor_like
11
+ from torch.types import _TensorOrTensors
12
+
13
+ # Note: `get_*_jacobian` functions are added here even though we didn't intend to make them public
14
+ # since they have been exposed from before we added `__all__` and we already maintain BC for them
15
+ # We should eventually deprecate them and remove them from `__all__`
16
+ __all__ = [
17
+ "gradcheck",
18
+ "gradgradcheck",
19
+ "GradcheckError",
20
+ "get_numerical_jacobian",
21
+ "get_analytical_jacobian",
22
+ "get_numerical_jacobian_wrt_specific_input",
23
+ ]
24
+
25
+
26
+ class GradcheckError(RuntimeError):
27
+ r"""Error raised by :func:`gradcheck` and :func:`gradgradcheck`."""
28
+
29
+ pass
30
+
31
+
32
+ def _is_sparse_compressed_tensor(obj: torch.Tensor):
33
+ return obj.layout in {
34
+ torch.sparse_csr,
35
+ torch.sparse_csc,
36
+ torch.sparse_bsr,
37
+ torch.sparse_bsc,
38
+ }
39
+
40
+
41
+ def _is_sparse_any_tensor(obj: torch.Tensor):
42
+ return _is_sparse_compressed_tensor(obj) or obj.layout is torch.sparse_coo
43
+
44
+
45
+ def _is_float_or_complex_tensor(obj):
46
+ return is_tensor_like(obj) and (obj.is_floating_point() or obj.is_complex())
47
+
48
+
49
+ def _allocate_jacobians_with_inputs(
50
+ input_tensors: Tuple, numel_output
51
+ ) -> Tuple[torch.Tensor, ...]:
52
+ # Makes zero-filled tensors from inputs. If `numel_output` is not None, for
53
+ # each tensor in `input_tensors`, returns a new zero-filled tensor with height
54
+ # of `t.numel` and width of `numel_output`. Otherwise, for each tensor, returns
55
+ # a 1-d tensor with size `(t.numel,)`. Each new tensor will be strided and have
56
+ # the same dtype and device as those of the corresponding input.
57
+ out: List[torch.Tensor] = []
58
+ for t in input_tensors:
59
+ if _is_float_or_complex_tensor(t) and t.requires_grad:
60
+ out.append(t.new_zeros((t.numel(), numel_output), layout=torch.strided))
61
+ return tuple(out)
62
+
63
+
64
+ def _allocate_jacobians_with_outputs(
65
+ output_tensors: Tuple, numel_input, dtype=None, device=None
66
+ ) -> Tuple[torch.Tensor, ...]:
67
+ # Makes zero-filled tensors from outputs. If `dim` is not None, for each tensor
68
+ # in `output_tensors`, returns a new zero-filled tensor with height of `dim` and
69
+ # width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size
70
+ # (t.numel,).
71
+ out: List[torch.Tensor] = []
72
+ options = {"dtype": dtype, "device": device, "layout": torch.strided}
73
+ for t in output_tensors:
74
+ if _is_float_or_complex_tensor(t):
75
+ out.append(t.new_zeros((numel_input, t.numel()), **options))
76
+ return tuple(out)
77
+
78
+
79
+ def _iter_tensors(
80
+ x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False
81
+ ) -> Iterable[torch.Tensor]:
82
+ if is_tensor_like(x):
83
+ # mypy doesn't narrow type of `x` to torch.Tensor
84
+ if x.requires_grad or not only_requiring_grad: # type: ignore[union-attr]
85
+ yield x # type: ignore[misc]
86
+ elif isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
87
+ for elem in x:
88
+ yield from _iter_tensors(elem, only_requiring_grad)
89
+
90
+
91
+ def _densify(x):
92
+ # return a copy of sparse x with all unspecified elements
93
+ # "replaced" with zero-valued elements
94
+ if isinstance(x, (list, tuple)):
95
+ return type(x)(map(_densify, x))
96
+ elif not is_tensor_like(x) or x.layout in {torch.strided, torch._mkldnn}: # type: ignore[attr-defined] # no attr _mkldnn
97
+ return x
98
+ elif x.layout is torch.sparse_coo:
99
+ device = x.device
100
+ indices_dtype = x._indices().dtype
101
+ tmp = torch.ones(x.shape[: x.sparse_dim()], dtype=torch.int8, device=device)
102
+ indices = tmp.nonzero().t().to(dtype=indices_dtype)
103
+ values = torch.zeros(
104
+ (tmp.numel(), *x.shape[x.sparse_dim() :]), dtype=x.dtype, device=device
105
+ )
106
+ x_coalesced = x.detach().coalesce()
107
+ if x_coalesced.numel() > 0:
108
+ stride = tmp.stride()
109
+ flat_indices = (
110
+ x_coalesced.indices()
111
+ .mul(
112
+ torch.tensor(stride, dtype=indices_dtype, device=device).unsqueeze(
113
+ 1
114
+ )
115
+ )
116
+ .sum(0)
117
+ )
118
+ values[flat_indices] = x_coalesced.values()
119
+ return (
120
+ torch.sparse_coo_tensor(indices, values, x.shape)
121
+ ._coalesced_(True)
122
+ .requires_grad_(x.requires_grad)
123
+ )
124
+ elif _is_sparse_compressed_tensor(x):
125
+ blocksize = (
126
+ x.values().shape[1:3]
127
+ if x.layout in {torch.sparse_bsr, torch.sparse_bsc}
128
+ else None
129
+ )
130
+ compressed_indices = (
131
+ x.crow_indices()
132
+ if x.layout in {torch.sparse_csr, torch.sparse_bsr}
133
+ else x.ccol_indices()
134
+ )
135
+ # We'll use intermediate sparse COO for simplicity
136
+ r = _densify(x.detach().to_sparse(layout=torch.sparse_coo)).to_sparse(
137
+ layout=x.layout, blocksize=blocksize
138
+ )
139
+ # Check that all elements are specified also after `to_sparse` op:
140
+ dense_numel = r.values().numel() // max(1, r.values().shape[0])
141
+ batch_numel = compressed_indices.numel() // compressed_indices.shape[-1]
142
+ sparse_numel = r.numel() // max(1, dense_numel * batch_numel)
143
+ if sparse_numel != r._nnz():
144
+ raise AssertionError(
145
+ f"{x.layout} densify failed: expected nnz={sparse_numel} but got {r._nnz()}"
146
+ )
147
+ return r.requires_grad_(x.requires_grad)
148
+ elif _is_sparse_any_tensor(x):
149
+ raise NotImplementedError(x.layout)
150
+ return x
151
+
152
+
153
+ def _iter_tensor(x_tensor):
154
+ # (Only used for slow gradcheck) Returns a generator that yields the following
155
+ # elements at each iteration:
156
+ # 1) a tensor: the same tensor is returned across all iterations. The tensor
157
+ # is not the same as the original x_tensor as given as input - it is
158
+ # prepared so that it can be modified in-place. Depending on whether the
159
+ # input tensor is strided, sparse, or dense, the returned tensor may or may
160
+ # not share storage with x_tensor.
161
+ # 2) a tuple of indices that can be used with advanced indexing (yielded in
162
+ # dictionary order)
163
+ # 3) flattened index that will be used to index into the Jacobian tensor
164
+ #
165
+ # For a tensor t with size (2, 2), _iter_tensor yields:
166
+ # `x, (0, 0), 0`, `x, (0, 1), 1`, `x, (1, 0), 2`, `x, (1, 1), 3`
167
+ #
168
+ # where x is the t.data of the original tensor. Perturbing the entry of x
169
+ # at index (1, 1) yields the 3rd column of the overall Jacobian matrix.
170
+ if _is_sparse_any_tensor(x_tensor):
171
+
172
+ def get_stride(size):
173
+ dim = len(size)
174
+ tmp = 1
175
+ stride = [0] * dim
176
+ for i in reversed(range(dim)):
177
+ stride[i] = tmp
178
+ tmp *= size[i]
179
+ return stride
180
+
181
+ x_nnz = x_tensor._nnz()
182
+ x_size = list(x_tensor.size())
183
+ if x_tensor.layout is torch.sparse_coo:
184
+ x_indices = x_tensor._indices().t()
185
+ x_values = x_tensor._values()
186
+ elif x_tensor.layout is torch.sparse_csr:
187
+ x_indices = torch._convert_indices_from_csr_to_coo(
188
+ x_tensor.crow_indices(), x_tensor.col_indices()
189
+ ).t()
190
+ x_values = x_tensor.values()
191
+ elif x_tensor.layout is torch.sparse_csc:
192
+ x_indices = torch._convert_indices_from_csr_to_coo(
193
+ x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True
194
+ ).t()
195
+ x_values = x_tensor.values()
196
+ elif x_tensor.layout is torch.sparse_bsr:
197
+ x_block_values = x_tensor.values()
198
+ x_blocksize = x_block_values.size()[1:3]
199
+ x_indices = (
200
+ torch._convert_indices_from_csr_to_coo(
201
+ x_tensor.crow_indices(), x_tensor.col_indices()
202
+ )
203
+ .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1)
204
+ .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1))
205
+ .add_(
206
+ torch.stack(
207
+ torch.where(torch.ones(x_blocksize, device=x_tensor.device))
208
+ ).repeat(1, x_nnz)
209
+ )
210
+ .t()
211
+ )
212
+ x_values = x_block_values.flatten(0, 2)
213
+ x_nnz = x_values.size(0)
214
+ elif x_tensor.layout is torch.sparse_bsc:
215
+ x_block_values = x_tensor.values()
216
+ x_blocksize = x_block_values.size()[1:3]
217
+ x_indices = (
218
+ torch._convert_indices_from_csr_to_coo(
219
+ x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True
220
+ )
221
+ .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1)
222
+ .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1))
223
+ .add_(
224
+ torch.stack(
225
+ torch.where(torch.ones(x_blocksize, device=x_tensor.device))
226
+ ).repeat(1, x_nnz)
227
+ )
228
+ .t()
229
+ )
230
+ x_values = x_block_values.flatten(0, 2)
231
+ x_nnz = x_values.size(0)
232
+ else:
233
+ raise NotImplementedError(f"_iter_tensor for {x_tensor.layout} input")
234
+ x_stride = get_stride(x_size)
235
+ # Use .data here to get around the version check
236
+ x_values = x_values.data
237
+ for i in range(x_nnz):
238
+ x_value = x_values[i]
239
+ for x_idx in product(*[range(m) for m in x_values.size()[1:]]):
240
+ indices = x_indices[i].tolist() + list(x_idx)
241
+ d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
242
+ yield x_value, x_idx, d_idx
243
+ elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined]
244
+ for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
245
+ # this is really inefficient, but without indexing implemented, there's
246
+ # not really a better way than converting back and forth
247
+ x_tensor_dense = x_tensor.to_dense()
248
+ yield x_tensor_dense, x_idx, d_idx
249
+ else:
250
+ # Use .data here to get around the version check
251
+ x_tensor = x_tensor.data
252
+ for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
253
+ yield x_tensor, x_idx, d_idx
254
+
255
+
256
+ def _get_numerical_jacobian(
257
+ fn, inputs, outputs=None, target=None, eps=1e-3, is_forward_ad=False
258
+ ) -> List[Tuple[torch.Tensor, ...]]:
259
+ """Compute the numerical Jacobian of `fn(inputs)` with respect to `target`.
260
+
261
+ If not specified, targets are the input. Returns M * N Jacobians where N is the
262
+ number of tensors in target that require grad and M is the number of non-integral
263
+ outputs.
264
+
265
+ Args:
266
+ fn: the function to compute the jacobian for
267
+ inputs: inputs to `fn`
268
+ outputs: provide precomputed outputs to avoid one extra invocation of fn
269
+ target: the Tensors wrt whom Jacobians are calculated (default=`inputs`)
270
+ eps: the magnitude of the perturbation during finite differencing
271
+ (default=`1e-3`)
272
+ is_forward_ad: if this numerical jacobian is computed to be checked wrt
273
+ forward AD gradients (this is used for error checking only)
274
+
275
+ Returns:
276
+ A list of M N-tuples of tensors
277
+
278
+ Note that `target` may not even be part of `input` to `fn`, so please be
279
+ **very careful** in this to not clone `target`.
280
+ """
281
+ jacobians: List[Tuple[torch.Tensor, ...]] = []
282
+ if outputs is None:
283
+ outputs = _as_tuple(fn(*_as_tuple(inputs)))
284
+ if not is_forward_ad and any(o.is_complex() for o in outputs):
285
+ raise ValueError(
286
+ "Expected output to be non-complex. get_numerical_jacobian no "
287
+ "longer supports functions that return complex outputs."
288
+ )
289
+ if target is None:
290
+ target = inputs
291
+ inp_indices = [
292
+ i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad
293
+ ]
294
+ for i, (inp, inp_idx) in enumerate(zip(_iter_tensors(target, True), inp_indices)):
295
+ jacobians += [
296
+ get_numerical_jacobian_wrt_specific_input(
297
+ fn,
298
+ inp_idx,
299
+ inputs,
300
+ outputs,
301
+ eps,
302
+ input=inp,
303
+ is_forward_ad=is_forward_ad,
304
+ )
305
+ ]
306
+ return jacobians
307
+
308
+
309
+ def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
310
+ """Compute the numerical Jacobian for a given fn and its inputs.
311
+
312
+ This is a Deprecated API.
313
+
314
+ Args:
315
+ fn: the function to compute the Jacobian for (must take inputs as a tuple)
316
+ input: input to `fn`
317
+ target: the Tensors wrt whom Jacobians are calculated (default=`input`)
318
+ eps: the magnitude of the perturbation during finite differencing
319
+ (default=`1e-3`)
320
+
321
+ Returns:
322
+ A list of Jacobians of `fn` (restricted to its first output) with respect to
323
+ each input or target, if provided.
324
+
325
+ Note that `target` may not even be part of `input` to `fn`, so please be
326
+ **very careful** in this to not clone `target`.
327
+ """
328
+ warnings.warn(
329
+ "get_numerical_jacobian was part of PyTorch's private API and not "
330
+ "meant to be exposed. We are deprecating it and it will be removed "
331
+ "in a future version of PyTorch. If you have a specific use for "
332
+ "this or feature request for this to be a stable API, please file "
333
+ "us an issue at https://github.com/pytorch/pytorch/issues/new"
334
+ )
335
+ if (
336
+ grad_out != 1.0
337
+ ): # grad_out param is only kept for backward compatibility reasons
338
+ raise ValueError(
339
+ "Expected grad_out to be 1.0. get_numerical_jacobian no longer "
340
+ "supports values of grad_out != 1.0."
341
+ )
342
+
343
+ def fn_pack_inps(*inps):
344
+ return fn(inps)
345
+
346
+ jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps)
347
+
348
+ return tuple(jacobian_for_each_output[0] for jacobian_for_each_output in jacobians)
349
+
350
+
351
+ def _compute_numerical_gradient(fn, entry, v, norm_v, nbhd_checks_fn):
352
+ # Computes numerical directional derivative as finite difference
353
+ # of function `fn` at input `entry`, perturbed by vector `v`.
354
+ if _is_sparse_compressed_tensor(entry):
355
+ # sparse compressed tensors don't implement sub/add/copy_
356
+ # yet. However, in non-masked semantics context entry and v
357
+ # have the same sparse indices ...
358
+ assert entry.layout == v.layout, (entry.layout, v.layout)
359
+ assert entry._nnz() == v._nnz(), (entry._nnz(), v._nnz(), entry.shape)
360
+ # ... the finite differencing can be performed on values only:
361
+ entry = entry.values()
362
+ v = v.values()
363
+ # we'll detach to avoid backward computations that sparse
364
+ # tensors have limited support for.
365
+ entry = entry.detach()
366
+
367
+ orig = entry.clone()
368
+ entry.copy_(orig - v)
369
+ outa = fn()
370
+ entry.copy_(orig + v)
371
+ outb = fn()
372
+ entry.copy_(orig)
373
+
374
+ def compute(a, b):
375
+ nbhd_checks_fn(a, b)
376
+ ret = (b - a) / (2 * norm_v) # use central difference approx
377
+ return ret.detach().reshape(-1)
378
+
379
+ return tuple(compute(a, b) for (a, b) in zip(outa, outb))
380
+
381
+
382
+ def _compute_numerical_jvps_wrt_specific_input(
383
+ jvp_fn, delta, input_is_complex, is_forward_ad=False
384
+ ) -> List[torch.Tensor]:
385
+ # Computing the jacobian only works for real delta
386
+ # For details on the algorithm used here, refer:
387
+ # Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf
388
+ # s = fn(z) where z = x for real valued input
389
+ # and z = x + yj for complex valued input
390
+ jvps: List[torch.Tensor] = []
391
+ ds_dx_tup = jvp_fn(delta[0] if isinstance(delta, tuple) else delta)
392
+
393
+ if input_is_complex: # C -> R
394
+ ds_dy_tup = (
395
+ jvp_fn(delta[1] * 1j) if isinstance(delta, tuple) else jvp_fn(delta * 1j)
396
+ )
397
+ for ds_dx, ds_dy in zip(ds_dx_tup, ds_dy_tup):
398
+ assert not ds_dx.is_complex()
399
+ # conjugate wirtinger derivative
400
+ conj_w_d = ds_dx + ds_dy * 1j
401
+ jvps.append(conj_w_d)
402
+ else:
403
+ for ds_dx in ds_dx_tup: # R -> R or (R -> C for the forward AD case)
404
+ assert is_forward_ad or not ds_dx.is_complex()
405
+ jvps.append(ds_dx)
406
+ return jvps
407
+
408
+
409
+ def _combine_jacobian_cols(
410
+ jacobians_cols: Dict[int, List[torch.Tensor]], outputs, input, numel
411
+ ) -> Tuple[torch.Tensor, ...]:
412
+ # jacobian_cols maps column_idx -> output_idx -> single column of jacobian Tensor
413
+ # we return a list that maps output_idx -> full jacobian Tensor
414
+ jacobians = _allocate_jacobians_with_outputs(
415
+ outputs, numel, dtype=input.dtype if input.dtype.is_complex else None
416
+ )
417
+ for i, jacobian in enumerate(jacobians):
418
+ for k, v in jacobians_cols.items():
419
+ jacobian[k] = v[i]
420
+ return jacobians
421
+
422
+
423
+ def _prepare_input(
424
+ input: torch.Tensor, maybe_perturbed_input: Optional[torch.Tensor], fast_mode=False
425
+ ) -> torch.Tensor:
426
+ # Prepares the inputs to be passed into the function while including the new
427
+ # modified input.
428
+ if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
429
+ # Convert back to mkldnn
430
+ if maybe_perturbed_input is not None:
431
+ return maybe_perturbed_input.to_mkldnn()
432
+ else:
433
+ return input
434
+ elif _is_sparse_any_tensor(input):
435
+ if fast_mode and maybe_perturbed_input is not None:
436
+ # entry is already a "cloned" version of the original tensor
437
+ # thus changes to entry are not reflected in the input
438
+ return maybe_perturbed_input
439
+ else:
440
+ return input
441
+ else:
442
+ # We cannot use entry (input.data) if we want gradgrad to work because
443
+ # fn (in the gradgrad case) needs to compute grad wrt input
444
+ return input
445
+
446
+
447
+ def _check_outputs_same_dtype_and_shape(output1, output2, eps, idx=None) -> None:
448
+ # Check that the returned outputs don't have different dtype or shape when you
449
+ # perturb the input
450
+ on_index = "on index {idx} " if idx is not None else ""
451
+ assert output1.shape == output2.shape, (
452
+ f"Expected `func` to return outputs with the same shape"
453
+ f" when inputs are perturbed {on_index}by {eps}, but got:"
454
+ f" shapes {output1.shape} and {output2.shape}."
455
+ )
456
+ assert output1.dtype == output2.dtype, (
457
+ f"Expected `func` to return outputs with the same dtype"
458
+ f" when inputs are perturbed {on_index}by {eps}, but got:"
459
+ f" dtypes {output1.dtype} and {output2.dtype}."
460
+ )
461
+
462
+
463
+ def get_numerical_jacobian_wrt_specific_input(
464
+ fn, input_idx, inputs, outputs, eps, input=None, is_forward_ad=False
465
+ ) -> Tuple[torch.Tensor, ...]:
466
+ # Computes the numerical jacobians wrt to a single input. Returns N jacobian
467
+ # tensors, where N is the number of outputs. We use a dictionary for
468
+ # jacobian_cols because indices aren't necessarily consecutive for sparse inputs
469
+ # When we perturb only a single element of the input tensor at a time, the jvp
470
+ # is equivalent to a single col of the Jacobian matrix of fn.
471
+ jacobian_cols: Dict[int, List[torch.Tensor]] = {}
472
+ input = inputs[input_idx] if input is None else input
473
+ assert input.requires_grad
474
+ for x, idx, d_idx in _iter_tensor(input):
475
+ wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, x)
476
+ input_to_perturb = x[idx]
477
+ nbhd_checks_fn = functools.partial(
478
+ _check_outputs_same_dtype_and_shape, idx=idx, eps=eps
479
+ )
480
+ jvp_fn = _get_numerical_jvp_fn(
481
+ wrapped_fn, input_to_perturb, eps, nbhd_checks_fn
482
+ )
483
+ jacobian_cols[d_idx] = _compute_numerical_jvps_wrt_specific_input(
484
+ jvp_fn, eps, x.is_complex(), is_forward_ad
485
+ )
486
+ return _combine_jacobian_cols(jacobian_cols, outputs, input, input.numel())
487
+
488
+
489
+ def _get_analytical_jacobian_forward_ad(
490
+ fn, inputs, outputs, *, check_grad_dtypes=False, all_u=None
491
+ ) -> Tuple[Tuple[torch.Tensor, ...], ...]:
492
+ """Compute the analytical Jacobian using forward mode AD of `fn(inputs)` using forward mode AD with respect to `target`.
493
+
494
+ Return N * M Jacobians where N is the number of tensors in target that require grad and
495
+ M is the number of non-integral outputs.
496
+ Contrary to other functions here, this function requires "inputs" to actually be used by the function.
497
+ The computed value is expected to be wrong if the function captures the inputs by side effect instead of
498
+ using the passed ones (many torch.nn tests do this).
499
+
500
+ Args:
501
+ fn: the function to compute the jacobian for
502
+ inputs: inputs to `fn`
503
+ outputs: provide precomputed outputs to avoid one extra invocation of fn
504
+ check_grad_dtypes: if True, will check that the gradient dtype are valid
505
+ all_u (optional): if provided, the Jacobian will be right multiplied with this vector
506
+
507
+ Returns:
508
+ A tuple of M N-tuples of tensors
509
+ """
510
+ # To avoid early import issues
511
+ fwAD = torch.autograd.forward_ad
512
+
513
+ tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad)
514
+
515
+ if any(i.is_complex() for i in tensor_inputs):
516
+ raise ValueError(
517
+ "Expected inputs to be non-complex for _get_analytical_jacobian_forward_ad."
518
+ )
519
+
520
+ if all_u:
521
+ jacobians = tuple(
522
+ _allocate_jacobians_with_outputs(outputs, 1) for i in tensor_inputs
523
+ )
524
+ else:
525
+ jacobians = tuple(
526
+ _allocate_jacobians_with_outputs(outputs, i.numel()) for i in tensor_inputs
527
+ )
528
+
529
+ with fwAD.dual_level():
530
+ fw_grads = []
531
+ dual_inputs = []
532
+ for i, inp in enumerate(inputs):
533
+ if is_tensor_like(inp) and inp.requires_grad:
534
+ if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
535
+ raise ValueError(
536
+ "MKLDNN inputs are not support for forward AD gradcheck."
537
+ )
538
+
539
+ inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
540
+ # If inp is a differentiable view, the dual might not be the tangent given to
541
+ # make_dual, so read it explicitly from the dual tensor
542
+ fw_grads.append(fwAD.unpack_dual(inp)[1])
543
+ dual_inputs.append(inp)
544
+
545
+ if all_u:
546
+ # Do the full reduction in one pass
547
+ # To be consistent with numerical evaluation, we actually compute one reduction per input
548
+ for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)):
549
+ fw_grad.copy_(u.view_as(fw_grad))
550
+ raw_outputs = _as_tuple(fn(*dual_inputs))
551
+ dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
552
+ for index_o, d_o in enumerate(dual_outputs):
553
+ val, res = fwAD.unpack_dual(d_o)
554
+ if (
555
+ check_grad_dtypes
556
+ and res is not None
557
+ and val.is_complex() != res.is_complex()
558
+ ):
559
+ raise GradcheckError("Forward AD gradient has dtype mismatch.")
560
+
561
+ # Remove extra dimension of size 1 corresponding to the reduced input
562
+ jacobians[i][index_o].squeeze_(0)
563
+ if res is None:
564
+ jacobians[i][index_o].zero_()
565
+ else:
566
+ jacobians[i][index_o].copy_(res.reshape(-1))
567
+ fw_grad.zero_()
568
+ else:
569
+ # Reconstruct the full Jacobian column by column
570
+ for i, fw_grad in enumerate(fw_grads):
571
+ for lin_idx, grad_idx in enumerate(
572
+ product(*[range(m) for m in fw_grad.size()])
573
+ ):
574
+ fw_grad[grad_idx] = 1.0
575
+ raw_outputs = _as_tuple(fn(*dual_inputs))
576
+ dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
577
+ for index_o, d_o in enumerate(dual_outputs):
578
+ val, res = fwAD.unpack_dual(d_o)
579
+ if (
580
+ check_grad_dtypes
581
+ and res is not None
582
+ and val.is_complex() != res.is_complex()
583
+ ):
584
+ raise GradcheckError(
585
+ "Forward AD gradient has dtype mismatch."
586
+ )
587
+
588
+ if res is None:
589
+ jacobians[i][index_o][lin_idx].zero_()
590
+ else:
591
+ jacobians[i][index_o][lin_idx].copy_(res.reshape(-1))
592
+ fw_grad[grad_idx] = 0.0
593
+
594
+ return jacobians
595
+
596
+
597
+ def _get_input_to_perturb(input):
598
+ # Prepare the input so that it can be modified in-place and do certain
599
+ # operations that require the tensor to have strides. If fast_mode=False,
600
+ # _iter_tensor would handle the below cases:
601
+ if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
602
+ # Convert to dense so we can perform operations that require strided tensors
603
+ input_to_perturb = input.to_dense()
604
+ elif _is_sparse_any_tensor(input):
605
+ # Clone because input may require grad, and copy_ calls resize_,
606
+ # which is not allowed for .data
607
+ input_to_perturb = input.clone()
608
+ else:
609
+ input_to_perturb = input.data
610
+ return input_to_perturb
611
+
612
+
613
+ def _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, fast_mode=False):
614
+ # Wraps `fn` so that its inputs are already supplied
615
+ def wrapped_fn():
616
+ inp = tuple(
617
+ _prepare_input(a, input_to_perturb if i == input_idx else None, fast_mode)
618
+ if is_tensor_like(a)
619
+ else a
620
+ for i, a in enumerate(_as_tuple(inputs))
621
+ )
622
+ return tuple(a.clone() for a in _as_tuple(fn(*inp)))
623
+
624
+ return wrapped_fn
625
+
626
+
627
+ def _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn):
628
+ # Wraps jvp_fn so that certain arguments are already supplied
629
+ def jvp_fn(delta):
630
+ return _compute_numerical_gradient(
631
+ wrapped_fn, input_to_perturb, delta, eps, nbhd_checks_fn
632
+ )
633
+
634
+ return jvp_fn
635
+
636
+
637
+ def _reshape_tensor_or_tuple(u, shape):
638
+ # We don't need to reshape when input corresponding to u is sparse
639
+ if isinstance(u, tuple):
640
+ if not _is_sparse_any_tensor(u[0]):
641
+ return (u[0].reshape(shape), u[1].reshape(shape))
642
+ else:
643
+ if not _is_sparse_any_tensor(u):
644
+ return u.reshape(shape)
645
+ return u
646
+
647
+
648
+ def _mul_tensor_or_tuple(u, k):
649
+ if isinstance(u, tuple):
650
+ return (k * u[0], k * u[1])
651
+ else:
652
+ return k * u
653
+
654
+
655
+ def _get_numerical_jvp_wrt_specific_input(
656
+ fn, input_idx, inputs, u, eps, is_forward_ad=False
657
+ ) -> List[torch.Tensor]:
658
+ input = inputs[input_idx]
659
+ input_to_perturb = _get_input_to_perturb(input)
660
+ wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, True)
661
+ nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, eps=eps)
662
+ jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn)
663
+ u = _reshape_tensor_or_tuple(u, input_to_perturb.shape)
664
+ u = _mul_tensor_or_tuple(u, eps)
665
+ return _compute_numerical_jvps_wrt_specific_input(
666
+ jvp_fn, u, input.is_complex(), is_forward_ad
667
+ )
668
+
669
+
670
+ def _get_numerical_vJu(
671
+ fn, inputs, inp_indices, func_out, all_u, all_v, eps, is_forward_ad
672
+ ):
673
+ # Note that all_v can also be None, in that case, this function only computes Ju.
674
+ reduced_jacobians: List[List[torch.Tensor]] = []
675
+ for i, (inp_idx, u) in enumerate(zip(inp_indices, all_u)):
676
+ all_Ju = _get_numerical_jvp_wrt_specific_input(
677
+ fn, inp_idx, inputs, u, eps, is_forward_ad
678
+ )
679
+ # Filter out the Ju for non floating point outputs
680
+ filtered_Ju = []
681
+ func_out = _as_tuple(func_out)
682
+ assert len(all_Ju) == len(func_out)
683
+ for Ju, output in zip(all_Ju, func_out):
684
+ if _is_float_or_complex_tensor(output):
685
+ filtered_Ju.append(Ju)
686
+ else:
687
+ # TODO: handle the other Ju
688
+ pass
689
+ if all_v is not None:
690
+ jacobian_scalars: List[torch.Tensor] = []
691
+ for v, Ju in zip(all_v, filtered_Ju):
692
+ jacobian_scalars.append(_dot_with_type_promotion(v, Ju))
693
+ reduced_jacobians.append(jacobian_scalars)
694
+ else:
695
+ reduced_jacobians.append(filtered_Ju)
696
+ return reduced_jacobians
697
+
698
+
699
+ def _check_jacobians_equal(j1, j2, atol):
700
+ # Check whether the max difference between two Jacobian tensors are within some
701
+ # tolerance `atol`.
702
+ for j1_x, j2_x in zip(j1, j2):
703
+ if j1_x.numel() != 0 and (j1_x - j2_x).abs().max() > atol:
704
+ return False
705
+ return True
706
+
707
+
708
+ def _stack_and_check_tensors(
709
+ list_of_list_of_tensors, inputs, numel_outputs
710
+ ) -> Tuple[Tuple[torch.Tensor, ...], bool, bool]:
711
+ # For the ith tensor in the inner list checks whether it has the same size and
712
+ # dtype as the ith differentiable input.
713
+ out_jacobians = _allocate_jacobians_with_inputs(inputs, numel_outputs)
714
+ diff_input_list = list(_iter_tensors(inputs, True))
715
+ correct_grad_sizes = True
716
+ correct_grad_types = True
717
+ for i, tensor_list in enumerate(list_of_list_of_tensors):
718
+ inp = diff_input_list[i]
719
+ out_jacobian = out_jacobians[i]
720
+ for j, tensor in enumerate(tensor_list):
721
+ if tensor is not None and tensor.size() != inp.size():
722
+ correct_grad_sizes = False
723
+ elif tensor is not None and tensor.dtype != inp.dtype:
724
+ correct_grad_types = False
725
+ if tensor is None:
726
+ out_jacobian[:, j].zero_()
727
+ else:
728
+ dense = (
729
+ tensor.to_dense() if not tensor.layout == torch.strided else tensor
730
+ )
731
+ assert out_jacobian[:, j].numel() == dense.numel()
732
+ out_jacobian[:, j] = dense.reshape(-1)
733
+ return out_jacobians, correct_grad_sizes, correct_grad_types
734
+
735
+
736
+ FAILED_NONDET_MSG = """\n
737
+ NOTE: If your op relies on non-deterministic operations i.e., it is listed here:
738
+ https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
739
+ this failure might be expected.
740
+
741
+ If you are adding a new operator, please file an issue and then use one of the
742
+ workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
743
+ If the test
744
+ - manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
745
+ with `nondet_tol=<tol>` as a keyword argument.
746
+ - is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
747
+ to have `gradcheck_nondet_tol=<tol>`.
748
+ - is a Module test (e.g., in common_nn.py), then modify the corresponding
749
+ module_test entry to have `gradcheck_nondet_tol=<tol>`
750
+ """
751
+
752
+
753
+ def _check_analytical_jacobian_attributes(
754
+ inputs, output, nondet_tol, check_grad_dtypes, fast_mode=False, v=None
755
+ ) -> Tuple[torch.Tensor, ...]:
756
+ # This is used by both fast and slow mode:
757
+ # - For slow mode, vjps[i][j] is the jth row of the Jacobian wrt the ith
758
+ # input.
759
+ # - For fast mode, vjps[i][0] is a linear combination of the rows
760
+ # of the Jacobian wrt the ith input
761
+ diff_input_list = list(_iter_tensors(inputs, True))
762
+
763
+ def vjp_fn(grad_output):
764
+ return torch.autograd.grad(
765
+ output, diff_input_list, grad_output, retain_graph=True, allow_unused=True
766
+ )
767
+
768
+ # Compute everything twice to check for nondeterminism (which we call reentrancy)
769
+ if fast_mode:
770
+ vjps1 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
771
+ vjps2 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
772
+ else:
773
+ vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
774
+ vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
775
+
776
+ output_numel = output.numel() if not fast_mode else 1
777
+ jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(
778
+ vjps1, inputs, output_numel
779
+ )
780
+ jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
781
+ reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
782
+
783
+ if not types_ok and check_grad_dtypes:
784
+ raise GradcheckError("Gradient has dtype mismatch")
785
+ if not sizes_ok:
786
+ raise GradcheckError("Analytical gradient has incorrect size")
787
+ if not reentrant:
788
+ raise GradcheckError(
789
+ "Backward is not reentrant, i.e., running backward with "
790
+ "same input and grad_output multiple times gives different values, "
791
+ "although analytical gradient matches numerical gradient."
792
+ f"The tolerance for nondeterminism was {nondet_tol}." + FAILED_NONDET_MSG
793
+ )
794
+ return jacobians1
795
+
796
+
797
+ def _get_analytical_vJu_backward_mode(
798
+ inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u
799
+ ):
800
+ reduced_jacobians: List[List[torch.Tensor]] = []
801
+ for output, v in zip(outputs, all_v):
802
+ all_vJ = _check_analytical_jacobian_attributes(
803
+ inputs, output, nondet_tol, check_grad_dtypes, fast_mode=True, v=v
804
+ )
805
+ jacobian_scalars: List[torch.Tensor] = []
806
+ for vJ, u in zip(all_vJ, all_u):
807
+ # Why do we need squeeze here? vJ is a 2-d tensor so that we can reuse
808
+ # the error checking logic from slow mode
809
+ vJ = vJ.T.squeeze(0)
810
+ if vJ.is_complex(): # C -> R
811
+ tv = torch.view_as_real(vJ.resolve_conj())
812
+ tr = tv.select(-1, 0)
813
+ ti = tv.select(-1, 1)
814
+ jacobian_scalars.append(tr.dot(u[0]) + 1j * ti.dot(u[1]))
815
+ else: # R -> R
816
+ jacobian_scalars.append(vJ.dot(u))
817
+ reduced_jacobians.append(jacobian_scalars)
818
+ return reduced_jacobians
819
+
820
+
821
+ def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0):
822
+ # Replicates the behavior of the old get_analytical_jacobian before the refactor
823
+ # This shares much of its code with _check_analytical_jacobian_attributes
824
+ warnings.warn(
825
+ "get_analytical_jacobian was part of PyTorch's private API and not "
826
+ "meant to be exposed. We are deprecating it and it will be removed "
827
+ "in a future version of PyTorch. If you have a specific use for "
828
+ "this or feature request for this to be a stable API, please file "
829
+ "us an issue at https://github.com/pytorch/pytorch/issues/new"
830
+ )
831
+ if (
832
+ grad_out != 1.0
833
+ ): # grad_out param is only kept for backward compatibility reasons
834
+ raise ValueError(
835
+ "Expected grad_out to be 1.0. get_analytical_jacobian no longer "
836
+ "supports values of grad_out != 1.0."
837
+ )
838
+ if output.is_complex():
839
+ raise ValueError(
840
+ "Expected output to be non-complex. get_analytical_jacobian no "
841
+ "longer supports functions that return complex outputs."
842
+ )
843
+ diff_input_list = list(_iter_tensors(inputs, True))
844
+
845
+ def vjp_fn(grad_output):
846
+ return torch.autograd.grad(
847
+ output, diff_input_list, grad_output, retain_graph=True, allow_unused=True
848
+ )
849
+
850
+ # Compute everything twice to check for nondeterminism (which we call reentrancy)
851
+ vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
852
+ vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
853
+
854
+ output_numel = output.numel()
855
+ jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(
856
+ vjps1, inputs, output_numel
857
+ )
858
+ jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
859
+ reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
860
+
861
+ return jacobians1, reentrant, sizes_ok, types_ok
862
+
863
+
864
+ def _get_analytical_jacobian(inputs, outputs, input_idx, output_idx):
865
+ # Computes the analytical Jacobian in slow mode for a single input-output pair.
866
+ # Forgoes performing checks on dtype, shape, and reentrancy.
867
+ jacobians = _check_analytical_jacobian_attributes(
868
+ inputs, outputs[output_idx], nondet_tol=float("inf"), check_grad_dtypes=False
869
+ )
870
+ return jacobians[input_idx]
871
+
872
+
873
+ def _compute_analytical_jacobian_rows(
874
+ vjp_fn, sample_output
875
+ ) -> List[List[Optional[torch.Tensor]]]:
876
+ # Computes Jacobian row-by-row by projecting `vjp_fn` = v^T J on standard basis
877
+ # vectors: vjp_fn(e) = e^T J is a corresponding row of the Jacobian.
878
+ # NB: this function does not assume vjp_fn(v) to return tensors with the same
879
+ # number of elements for different v. This is checked when we later combine the
880
+ # rows into a single tensor.
881
+ grad_out_base = torch.zeros_like(
882
+ sample_output, memory_format=torch.legacy_contiguous_format
883
+ )
884
+ flat_grad_out = grad_out_base.view(-1)
885
+ # jacobians_rows[i][j] is the Jacobian jth row for the ith input
886
+ jacobians_rows: List[List[Optional[torch.Tensor]]] = []
887
+ for j in range(flat_grad_out.numel()):
888
+ flat_grad_out.zero_()
889
+ flat_grad_out[j] = 1.0 # projection for jth row of Jacobian
890
+ grad_inputs = vjp_fn(grad_out_base)
891
+ for i, d_x in enumerate(grad_inputs):
892
+ if j == 0:
893
+ jacobians_rows.append([])
894
+ jacobians_rows[i] += [
895
+ d_x.clone() if isinstance(d_x, torch.Tensor) else None
896
+ ]
897
+ return jacobians_rows
898
+
899
+
900
+ def _get_analytical_vjps_wrt_specific_output(
901
+ vjp_fn, sample_output, v
902
+ ) -> List[List[Optional[torch.Tensor]]]:
903
+ vjps: List[List[Optional[torch.Tensor]]] = []
904
+ grad_inputs = vjp_fn(v.reshape(sample_output.shape))
905
+ for vjp in grad_inputs:
906
+ vjps.append([vjp.clone() if isinstance(vjp, torch.Tensor) else None])
907
+ return vjps
908
+
909
+
910
+ def _check_inputs(tupled_inputs) -> bool:
911
+ # Make sure that gradients are saved for at least one input
912
+ any_input_requiring_grad = False
913
+ for idx, inp in enumerate(tupled_inputs):
914
+ if is_tensor_like(inp) and inp.requires_grad:
915
+ if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128):
916
+ warnings.warn(
917
+ f"Input #{idx} requires gradient and "
918
+ "is not a double precision floating point or complex. "
919
+ "This check will likely fail if all the inputs are "
920
+ "not of double precision floating point or complex. "
921
+ )
922
+ if inp.is_sparse:
923
+ content = inp._values()
924
+ elif _is_sparse_compressed_tensor(inp):
925
+ content = inp.values()
926
+ else:
927
+ content = inp
928
+ # TODO: To cover more problematic cases, replace stride = 0 check with
929
+ # "any overlap in memory" once we have a proper function to check it.
930
+ if content.layout is not torch._mkldnn: # type: ignore[attr-defined]
931
+ if not all(
932
+ st > 0 or sz <= 1
933
+ for st, sz in zip(content.stride(), content.size())
934
+ ):
935
+ raise RuntimeError(
936
+ f"The {idx}th input has a dimension with stride 0. gradcheck only "
937
+ "supports inputs that are non-overlapping to be able to "
938
+ "compute the numerical gradients correctly. You should call "
939
+ ".contiguous on the input before passing it to gradcheck."
940
+ )
941
+ any_input_requiring_grad = True
942
+
943
+ if not any_input_requiring_grad:
944
+ raise ValueError(
945
+ "gradcheck expects at least one input tensor to require gradient, "
946
+ "but none of the them have requires_grad=True."
947
+ )
948
+ return True
949
+
950
+
951
+ def _check_outputs(outputs) -> None:
952
+ if any(_is_sparse_any_tensor(t) for t in outputs if isinstance(t, torch.Tensor)):
953
+ # it is easier to call to_dense() on the sparse output than
954
+ # to modify analytical jacobian
955
+ raise ValueError(
956
+ "Sparse output is not supported at gradcheck yet. "
957
+ "Please call to_dense(masked_grad=...) on the output of fn for gradcheck."
958
+ )
959
+ if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore[attr-defined]
960
+ raise ValueError(
961
+ "MKLDNN output is not supported at gradcheck yet. "
962
+ "Please call to_dense(masked_grad=...) on the output of fn for gradcheck."
963
+ )
964
+
965
+
966
+ def _check_no_differentiable_outputs(
967
+ func, inputs, func_out, eps, *, is_forward_ad
968
+ ) -> bool:
969
+ # When there are no differentiable outputs, numerical gradient for a function is
970
+ # expected to be zero.
971
+ jacobians_all_inputs_outputs = _get_numerical_jacobian(
972
+ func, inputs, func_out, eps=eps, is_forward_ad=is_forward_ad
973
+ )
974
+ for jacobians_all_outputs_and_fixed_input in jacobians_all_inputs_outputs:
975
+ for jacobian in jacobians_all_outputs_and_fixed_input:
976
+ if torch.ne(jacobian, 0).sum() > 0:
977
+ raise GradcheckError(
978
+ "Numerical gradient for function expected to be zero"
979
+ )
980
+ return True
981
+
982
+
983
+ def _check_no_differentiable_outputs_fast(
984
+ func, func_out, all_inputs, inputs_indices, all_u, eps, nondet_tol
985
+ ):
986
+ for inp_idx, u in zip(inputs_indices, all_u):
987
+ jvps = _get_numerical_jvp_wrt_specific_input(func, inp_idx, all_inputs, u, eps)
988
+ for jvp in jvps:
989
+ if jvp.numel() == 0:
990
+ continue
991
+ if (jvp - torch.zeros_like(jvp)).abs().max() > nondet_tol:
992
+ raise GradcheckError(
993
+ "Numerical gradient for function expected to be zero"
994
+ )
995
+ return True
996
+
997
+
998
+ FAILED_BATCHED_GRAD_MSG = """
999
+ gradcheck or gradgradcheck failed while testing batched gradient computation.
1000
+ This could have been invoked in a number of ways (via a test that calls
1001
+ gradcheck/gradgradcheck directly or via an autogenerated test).
1002
+
1003
+ If you are adding a new operator, please file an issue and then use one of the
1004
+ workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
1005
+ If the test
1006
+ - manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
1007
+ with `check_batched_grad=False` as a keyword argument.
1008
+ - is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
1009
+ to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`.
1010
+
1011
+ If you're modifying an existing operator that supports batched grad computation,
1012
+ or wish to make a new operator work with batched grad computation, please read
1013
+ the following.
1014
+
1015
+ To compute batched grads (e.g., jacobians, hessians), we vmap over the backward
1016
+ computation. The most common failure case is if there is a 'vmap-incompatible
1017
+ operation' in the backward pass. Please see
1018
+ NOTE: [How to write vmap-compatible backward formulas]
1019
+ in the codebase for an explanation of how to fix this.
1020
+ """.strip()
1021
+
1022
+ FAILED_BATCHED_GRAD_MSG_FWD_AD = """
1023
+ gradcheck failed while testing batched gradient computation with forward-mode AD.
1024
+ This test is enabled automatically when both `check_batched_grad=True`
1025
+ and `check_forward_ad=True`, but can be disabled in the following ways
1026
+ dependong on how the test was invoked (via a test that calls gradcheck
1027
+ directly or via an autogenerated test).
1028
+
1029
+ If you are adding a new operator, please file an issue and then use one of the
1030
+ workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
1031
+ If the test
1032
+ - manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
1033
+ with `check_batched_forward_grad=False` as a keyword argument.
1034
+ - is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
1035
+ to have `check_batched_forward_grad=False`
1036
+ """
1037
+
1038
+
1039
+ def _get_failed_batched_grad_test_msg(
1040
+ output_idx, input_idx, res, exp, is_forward_ad=False
1041
+ ):
1042
+ return f"""
1043
+ For output {output_idx} and input {input_idx}:
1044
+
1045
+ {FAILED_BATCHED_GRAD_MSG_FWD_AD if is_forward_ad else FAILED_BATCHED_GRAD_MSG}
1046
+
1047
+ Got:
1048
+ {res}
1049
+
1050
+ Expected:
1051
+ {exp}
1052
+ """.strip()
1053
+
1054
+
1055
+ def _test_batched_grad_forward_ad(func, inputs) -> bool:
1056
+ fwAD = torch.autograd.forward_ad # To avoid early import issues (do we need this?)
1057
+ assert isinstance(inputs, tuple)
1058
+
1059
+ for input_idx, current_input in enumerate(inputs):
1060
+ if not (is_tensor_like(current_input) and current_input.requires_grad):
1061
+ continue
1062
+
1063
+ def jvp(tangent: torch.Tensor):
1064
+ with fwAD.dual_level():
1065
+ dual = fwAD.make_dual(current_input.detach(), tangent)
1066
+ inputs_with_dual = tuple(
1067
+ dual
1068
+ if idx == input_idx
1069
+ else (inp.detach() if is_tensor_like(inp) else inp)
1070
+ for idx, inp in enumerate(inputs)
1071
+ )
1072
+ dual_outputs = _as_tuple(func(*inputs_with_dual))
1073
+ ret = []
1074
+ for dual_output in dual_outputs:
1075
+ if dual_output is None:
1076
+ continue
1077
+ primal_out, tangent_out = fwAD.unpack_dual(dual_output)
1078
+ if tangent_out is not None:
1079
+ ret.append(tangent_out)
1080
+ else:
1081
+ ret.append(
1082
+ torch.zeros(
1083
+ [], dtype=primal_out.dtype, device=primal_out.device
1084
+ ).expand(primal_out.shape)
1085
+ )
1086
+ return tuple(ret)
1087
+
1088
+ if not _is_float_or_complex_tensor(current_input):
1089
+ continue
1090
+
1091
+ tangents = [torch.randn_like(current_input) for _ in range(2)]
1092
+ expected = [jvp(t) for t in tangents]
1093
+ expected = [torch.stack(shards) for shards in zip(*expected)]
1094
+
1095
+ try:
1096
+ result = _vmap(jvp)(torch.stack(tangents))
1097
+ except RuntimeError as ex:
1098
+ # Rethrow to provide a better error message
1099
+ raise GradcheckError(
1100
+ f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG_FWD_AD}"
1101
+ ) from ex
1102
+
1103
+ for input_idx, (res, exp) in enumerate(zip(result, expected)):
1104
+ if torch.allclose(res, exp):
1105
+ continue
1106
+ raise GradcheckError(
1107
+ _get_failed_batched_grad_test_msg(
1108
+ input_idx, input_idx, res, exp, is_forward_ad=True
1109
+ )
1110
+ )
1111
+ return True
1112
+
1113
+
1114
+ def _test_batched_grad(input, output, output_idx) -> bool:
1115
+ # NB: _test_batched_grad compares two autograd.grad invocations with a single
1116
+ # vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the
1117
+ # sense that we're not comparing an analytical jacobian with a numeric one,
1118
+ # but it is morally similar (we could have computed a full analytic jac
1119
+ # via vmap, but that is potentially slow)
1120
+ diff_input_list = list(_iter_tensors(input, True))
1121
+ grad = functools.partial(
1122
+ torch.autograd.grad,
1123
+ output,
1124
+ diff_input_list,
1125
+ retain_graph=True,
1126
+ allow_unused=True,
1127
+ )
1128
+
1129
+ def vjp(v):
1130
+ results = grad(v)
1131
+ results = tuple(
1132
+ grad
1133
+ if grad is not None
1134
+ else torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape)
1135
+ for grad, inp in zip(results, diff_input_list)
1136
+ )
1137
+ return results
1138
+
1139
+ grad_outputs = [torch.randn_like(output) for _ in range(2)]
1140
+
1141
+ expected = [vjp(gO) for gO in grad_outputs]
1142
+ expected = [torch.stack(shards) for shards in zip(*expected)]
1143
+
1144
+ # Squash warnings since these are expected to happen in most cases
1145
+ # NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209
1146
+ with warnings.catch_warnings():
1147
+ warnings.filterwarnings("ignore", message="There is a performance drop")
1148
+ warnings.filterwarnings("ignore", message="Please use torch.vmap")
1149
+ try:
1150
+ result = vmap(vjp)(torch.stack(grad_outputs))
1151
+ except RuntimeError as ex:
1152
+ # It's OK that we're not raising the error at the correct callsite.
1153
+ # That's because the callsite is always going to inside the Python
1154
+ # autograd.grad instead of the C++ traceback of what line in the
1155
+ # backward formula
1156
+ raise GradcheckError(
1157
+ f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}"
1158
+ ) from ex
1159
+
1160
+ for input_idx, (res, exp) in enumerate(zip(result, expected)):
1161
+ if torch.allclose(res, exp):
1162
+ continue
1163
+ raise GradcheckError(
1164
+ _get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp)
1165
+ )
1166
+ return True
1167
+
1168
+
1169
+ def _test_backward_mul_by_grad_output(outputs, inputs, masked) -> bool:
1170
+ # Tests that backward is multiplied by grad_output
1171
+ diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True))
1172
+ if not diff_input_list:
1173
+ raise GradcheckError("no Tensors requiring grad found in input")
1174
+ grads_input = torch.autograd.grad(
1175
+ outputs,
1176
+ diff_input_list,
1177
+ [
1178
+ torch.zeros_like(o, memory_format=torch.legacy_contiguous_format)
1179
+ for o in outputs
1180
+ ],
1181
+ allow_unused=True,
1182
+ )
1183
+ for gi, di in zip(grads_input, diff_input_list):
1184
+ if gi is None:
1185
+ continue
1186
+ if isinstance(gi, torch.Tensor) and gi.layout != torch.strided:
1187
+ if gi.layout != di.layout:
1188
+ raise GradcheckError(
1189
+ "grad is incorrect layout ("
1190
+ + str(gi.layout)
1191
+ + " is not "
1192
+ + str(di.layout)
1193
+ + ")"
1194
+ )
1195
+ if _is_sparse_any_tensor(gi):
1196
+ sparse_kind = str(gi.layout).replace("torch.", "").replace("_coo", "")
1197
+ if gi.sparse_dim() != di.sparse_dim():
1198
+ raise GradcheckError(
1199
+ f"grad is {sparse_kind} tensor, but has incorrect sparse_dim"
1200
+ f" {gi.sparse_dim()}, expected {di.sparse_dim()}"
1201
+ )
1202
+ if gi.dense_dim() != di.dense_dim():
1203
+ raise GradcheckError(
1204
+ f"grad is {sparse_kind} tensor, but has incorrect dense_dim"
1205
+ f" {gi.dense_dim()}, expected {di.dense_dim()}"
1206
+ )
1207
+ gi = gi.to_dense()
1208
+ di = di.to_dense()
1209
+ if masked:
1210
+ if not torch.allclose(gi, torch.zeros_like(gi)):
1211
+ raise GradcheckError("backward not multiplied by grad_output")
1212
+ elif not gi.eq(0).all():
1213
+ raise GradcheckError("backward not multiplied by grad_output")
1214
+ if gi.dtype != di.dtype:
1215
+ raise GradcheckError("grad is incorrect type")
1216
+ if gi.device != di.device:
1217
+ raise GradcheckError("grad is incorrect device")
1218
+ if gi.size() != di.size():
1219
+ raise GradcheckError("grad is incorrect size")
1220
+ return True
1221
+
1222
+
1223
+ def _test_undefined_forward_mode(func, outputs, inputs):
1224
+ fwAD = torch.autograd.forward_ad
1225
+
1226
+ inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
1227
+ all_v, all_u, all_u_dense = _make_vectors(inp_tensors, outputs, use_forward_ad=True)
1228
+
1229
+ tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad)
1230
+
1231
+ with fwAD.dual_level():
1232
+ fw_grads = []
1233
+ dual_inputs = []
1234
+ tensor_indices = set()
1235
+ for i, inp in enumerate(inputs):
1236
+ if is_tensor_like(inp) and inp.requires_grad:
1237
+ if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
1238
+ raise ValueError(
1239
+ "MKLDNN inputs are not support for forward AD gradcheck."
1240
+ )
1241
+
1242
+ inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
1243
+ # If inp is a differentiable view, the dual might not be the tangent given to
1244
+ # make_dual, so read it explicitly from the dual tensor
1245
+ fw_grads.append(fwAD.unpack_dual(inp)[1])
1246
+ tensor_indices.add(i)
1247
+ dual_inputs.append(inp)
1248
+
1249
+ for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)):
1250
+ fw_grad.copy_(u.view_as(fw_grad))
1251
+
1252
+ for idx, inp in enumerate(inputs):
1253
+ if idx not in tensor_indices:
1254
+ continue
1255
+ dual_inp_obj = dual_inputs[idx]
1256
+
1257
+ # case 1 (Materialized Zero Tensor Tangent)
1258
+ dual_inputs[idx] = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
1259
+ raw_outputs = _as_tuple(func(*dual_inputs))
1260
+ dual_outputs1 = filter(_is_float_or_complex_tensor, raw_outputs)
1261
+
1262
+ # case 2 (Efficient Zero Tensor Tangent since we don't make a dual object and pass a regular tensor)
1263
+ dual_inputs[idx] = inp.detach()
1264
+ raw_outputs = _as_tuple(func(*dual_inputs))
1265
+ dual_outputs2 = filter(_is_float_or_complex_tensor, raw_outputs)
1266
+
1267
+ # reset
1268
+ dual_inputs[idx] = dual_inp_obj
1269
+
1270
+ for index_o, (d_o1, d_o2) in enumerate(zip(dual_outputs1, dual_outputs2)):
1271
+ val1, res1 = fwAD.unpack_dual(d_o1)
1272
+ val2, res2 = fwAD.unpack_dual(d_o2)
1273
+
1274
+ if not (res1 is None or res2 is None):
1275
+ if not torch.allclose(res1, res2):
1276
+ raise GradcheckError(
1277
+ "Mismatch in tangent values for output with index: ",
1278
+ index_o,
1279
+ " when input: ",
1280
+ inp,
1281
+ " has an undefined tangent value. ",
1282
+ " Got: ",
1283
+ res1,
1284
+ " but expected: ",
1285
+ res2,
1286
+ )
1287
+ return True
1288
+
1289
+
1290
+ def _test_undefined_backward_mode(func, outputs, inputs) -> bool:
1291
+ diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True))
1292
+ if not diff_input_list:
1293
+ raise GradcheckError("no Tensors requiring grad found in input")
1294
+
1295
+ def warn_bc_breaking():
1296
+ warnings.warn(
1297
+ "Backwards compatibility: New undefined gradient support checking "
1298
+ "feature is enabled by default, but it may break existing callers "
1299
+ "of this function. If this is true for you, you can call this "
1300
+ 'function with "check_undefined_grad=False" to disable the feature'
1301
+ )
1302
+
1303
+ def check_undefined_grad_support(output_to_check):
1304
+ grads_output = [
1305
+ torch.zeros_like(o, memory_format=torch.legacy_contiguous_format)
1306
+ for o in output_to_check
1307
+ ]
1308
+ try:
1309
+ grads_input = torch.autograd.grad(
1310
+ output_to_check, diff_input_list, grads_output, allow_unused=True
1311
+ )
1312
+ except RuntimeError as e:
1313
+ warn_bc_breaking()
1314
+ raise GradcheckError(
1315
+ "Expected backward function to handle undefined output grads. "
1316
+ 'Please look at "Notes about undefined output gradients" in '
1317
+ '"tools/autograd/derivatives.yaml"'
1318
+ ) from e
1319
+
1320
+ for gi, i in zip(grads_input, diff_input_list):
1321
+ if (gi is not None) and (not gi.eq(0).all()):
1322
+ warn_bc_breaking()
1323
+ raise GradcheckError(
1324
+ "Expected all input grads to be undefined or zero when all output grads are undefined "
1325
+ 'or zero. Please look at "Notes about undefined output gradients" in '
1326
+ '"tools/autograd/derivatives.yaml"'
1327
+ )
1328
+ return True
1329
+
1330
+ # All backward functions must work properly if all output grads are undefined
1331
+ outputs_to_check = [
1332
+ [
1333
+ torch._C._functions.UndefinedGrad()(o)
1334
+ for o in _differentiable_outputs(func(*inputs))
1335
+ # This check filters out Tensor-likes that aren't instances of Tensor.
1336
+ if isinstance(o, torch.Tensor)
1337
+ ]
1338
+ ]
1339
+
1340
+ # If there are multiple output grads, we should be able to undef one at a time without error
1341
+ if len(outputs_to_check[0]) > 1:
1342
+ for undef_grad_idx in range(len(outputs)):
1343
+ output_to_check = _differentiable_outputs(func(*inputs))
1344
+ outputs_to_check.append(
1345
+ [
1346
+ torch._C._functions.UndefinedGrad()(o)
1347
+ if idx == undef_grad_idx
1348
+ else o
1349
+ for idx, o in enumerate(output_to_check)
1350
+ ]
1351
+ )
1352
+
1353
+ return all(check_undefined_grad_support(output) for output in outputs_to_check)
1354
+
1355
+
1356
+ def _as_tuple(x):
1357
+ if isinstance(x, tuple):
1358
+ return x
1359
+ elif isinstance(x, list):
1360
+ return tuple(x)
1361
+ else:
1362
+ return (x,)
1363
+
1364
+
1365
+ def _differentiable_outputs(x):
1366
+ return tuple(o for o in _as_tuple(x) if o.requires_grad)
1367
+
1368
+
1369
+ def _get_notallclose_msg(
1370
+ analytical,
1371
+ numerical,
1372
+ output_idx,
1373
+ input_idx,
1374
+ complex_indices,
1375
+ test_imag=False,
1376
+ is_forward_ad=False,
1377
+ ) -> str:
1378
+ out_is_complex = (
1379
+ (not is_forward_ad) and complex_indices and output_idx in complex_indices
1380
+ )
1381
+ inp_is_complex = is_forward_ad and complex_indices and input_idx in complex_indices
1382
+ part = "imaginary" if test_imag else "real"
1383
+ element = "inputs" if is_forward_ad else "outputs"
1384
+ prefix = (
1385
+ ""
1386
+ if not (out_is_complex or inp_is_complex)
1387
+ else f"While considering the {part} part of complex {element} only, "
1388
+ )
1389
+ mode = "computed with forward mode " if is_forward_ad else ""
1390
+ return (
1391
+ prefix + "Jacobian %smismatch for output %d with respect to input %d,\n"
1392
+ "numerical:%s\nanalytical:%s\n"
1393
+ % (mode, output_idx, input_idx, numerical, analytical)
1394
+ )
1395
+
1396
+
1397
+ def _transpose(matrix_of_tensors):
1398
+ # returns list of tuples
1399
+ return list(zip(*matrix_of_tensors))
1400
+
1401
+
1402
+ def _real_and_imag_output(fn):
1403
+ # returns new functions real(fn), and imag(fn) where real(fn) and imag(fn) behave the same as
1404
+ # the original fn, except torch.real or torch.imag are applied to the complex outputs
1405
+ def apply_to_c_outs(fn, fn_to_apply):
1406
+ def wrapped_fn(*inputs):
1407
+ outs = _as_tuple(fn(*inputs))
1408
+ return tuple(fn_to_apply(o) if o.is_complex() else o for o in outs)
1409
+
1410
+ return wrapped_fn
1411
+
1412
+ return apply_to_c_outs(fn, torch.real), apply_to_c_outs(fn, torch.imag)
1413
+
1414
+
1415
+ def _real_and_imag_input(fn, complex_inp_indices, tupled_inputs):
1416
+ # returns new functions that take real inputs instead of complex inputs as
1417
+ # (x, y) -> fn(x + y * 1j). And it computes: inp -> fn(inp + y * 1j) and inp -> fn(x + inp * 1j).
1418
+ # In each case, the other part is considered constant.
1419
+ # We do not use 0 for the constant here to make sure we always call the user function with a valid input.
1420
+ def apply_to_c_inps(fn, fn_to_apply):
1421
+ def wrapped_fn(*inputs):
1422
+ new_inputs = list(inputs)
1423
+ for should_be_complex in complex_inp_indices:
1424
+ new_inputs[should_be_complex] = fn_to_apply(
1425
+ new_inputs[should_be_complex], tupled_inputs[should_be_complex]
1426
+ )
1427
+ return _as_tuple(fn(*new_inputs))
1428
+
1429
+ return wrapped_fn
1430
+
1431
+ real_fn = apply_to_c_inps(fn, lambda inp, orig: inp + orig.imag * 1j)
1432
+ imag_fn = apply_to_c_inps(fn, lambda inp, orig: orig.real + inp * 1j)
1433
+ return real_fn, imag_fn
1434
+
1435
+
1436
+ def _gradcheck_real_imag(
1437
+ gradcheck_fn,
1438
+ func,
1439
+ func_out,
1440
+ tupled_inputs,
1441
+ outputs,
1442
+ eps,
1443
+ rtol,
1444
+ atol,
1445
+ check_grad_dtypes,
1446
+ check_forward_ad,
1447
+ check_backward_ad,
1448
+ nondet_tol,
1449
+ check_undefined_grad,
1450
+ ):
1451
+ complex_out_indices = [i for i, o in enumerate(outputs) if o.is_complex()]
1452
+ has_any_complex_output = any(o.is_complex() for o in _as_tuple(func_out))
1453
+ if check_backward_ad:
1454
+ if has_any_complex_output:
1455
+ real_fn, imag_fn = _real_and_imag_output(func)
1456
+
1457
+ imag_func_out = imag_fn(*tupled_inputs)
1458
+ imag_outputs = _differentiable_outputs(imag_func_out)
1459
+ gradcheck_fn(
1460
+ imag_fn,
1461
+ imag_func_out,
1462
+ tupled_inputs,
1463
+ imag_outputs,
1464
+ eps,
1465
+ rtol,
1466
+ atol,
1467
+ check_grad_dtypes,
1468
+ nondet_tol,
1469
+ complex_indices=complex_out_indices,
1470
+ test_imag=True,
1471
+ )
1472
+
1473
+ real_func_out = real_fn(*tupled_inputs)
1474
+ real_outputs = _differentiable_outputs(real_func_out)
1475
+ gradcheck_fn(
1476
+ real_fn,
1477
+ real_func_out,
1478
+ tupled_inputs,
1479
+ real_outputs,
1480
+ eps,
1481
+ rtol,
1482
+ atol,
1483
+ check_grad_dtypes,
1484
+ nondet_tol,
1485
+ complex_indices=complex_out_indices,
1486
+ )
1487
+ else:
1488
+ gradcheck_fn(
1489
+ func,
1490
+ func_out,
1491
+ tupled_inputs,
1492
+ outputs,
1493
+ eps,
1494
+ rtol,
1495
+ atol,
1496
+ check_grad_dtypes,
1497
+ nondet_tol,
1498
+ )
1499
+
1500
+ if check_forward_ad:
1501
+ complex_inp_indices = [
1502
+ i
1503
+ for i, inp in enumerate(tupled_inputs)
1504
+ if is_tensor_like(inp) and inp.is_complex()
1505
+ ]
1506
+ if complex_inp_indices:
1507
+ real_fn, imag_fn = _real_and_imag_input(
1508
+ func, complex_inp_indices, tupled_inputs
1509
+ )
1510
+
1511
+ imag_inputs = [
1512
+ inp.imag if is_tensor_like(inp) and inp.is_complex() else inp
1513
+ for inp in tupled_inputs
1514
+ ]
1515
+ imag_func_out = imag_fn(*imag_inputs)
1516
+ diff_imag_func_out = _differentiable_outputs(imag_func_out)
1517
+ gradcheck_fn(
1518
+ imag_fn,
1519
+ imag_func_out,
1520
+ imag_inputs,
1521
+ diff_imag_func_out,
1522
+ eps,
1523
+ rtol,
1524
+ atol,
1525
+ check_grad_dtypes,
1526
+ nondet_tol,
1527
+ complex_indices=complex_inp_indices,
1528
+ test_imag=True,
1529
+ use_forward_ad=True,
1530
+ )
1531
+
1532
+ real_inputs = [
1533
+ inp.real if is_tensor_like(inp) and inp.is_complex() else inp
1534
+ for inp in tupled_inputs
1535
+ ]
1536
+ real_func_out = real_fn(*real_inputs)
1537
+ diff_real_func_out = _differentiable_outputs(real_func_out)
1538
+ gradcheck_fn(
1539
+ real_fn,
1540
+ real_func_out,
1541
+ real_inputs,
1542
+ diff_real_func_out,
1543
+ eps,
1544
+ rtol,
1545
+ atol,
1546
+ check_grad_dtypes,
1547
+ nondet_tol,
1548
+ complex_indices=complex_inp_indices,
1549
+ use_forward_ad=True,
1550
+ )
1551
+ if check_undefined_grad:
1552
+ _test_undefined_forward_mode(imag_fn, imag_func_out, imag_inputs)
1553
+ _test_undefined_forward_mode(real_fn, real_func_out, real_inputs)
1554
+ else:
1555
+ gradcheck_fn(
1556
+ func,
1557
+ func_out,
1558
+ tupled_inputs,
1559
+ outputs,
1560
+ eps,
1561
+ rtol,
1562
+ atol,
1563
+ check_grad_dtypes,
1564
+ nondet_tol,
1565
+ use_forward_ad=True,
1566
+ )
1567
+ if check_undefined_grad:
1568
+ _test_undefined_forward_mode(func, outputs, tupled_inputs)
1569
+
1570
+
1571
+ def _slow_gradcheck(
1572
+ func,
1573
+ func_out,
1574
+ tupled_inputs,
1575
+ outputs,
1576
+ eps,
1577
+ rtol,
1578
+ atol,
1579
+ check_grad_dtypes,
1580
+ nondet_tol,
1581
+ *,
1582
+ use_forward_ad=False,
1583
+ complex_indices=None,
1584
+ test_imag=False,
1585
+ masked=False,
1586
+ ):
1587
+ func_out = _as_tuple(func_out)
1588
+ if not outputs:
1589
+ return _check_no_differentiable_outputs(
1590
+ func, tupled_inputs, func_out, eps=eps, is_forward_ad=use_forward_ad
1591
+ )
1592
+ tupled_inputs_numerical = tupled_inputs if masked else _densify(tupled_inputs)
1593
+
1594
+ numerical = _transpose(
1595
+ _get_numerical_jacobian(
1596
+ func,
1597
+ tupled_inputs_numerical,
1598
+ func_out,
1599
+ eps=eps,
1600
+ is_forward_ad=use_forward_ad,
1601
+ )
1602
+ )
1603
+ # Note: [numerical vs analytical output length]
1604
+ # The numerical path returns jacobian quantity for all outputs, even if requires_grad of that
1605
+ # output is False. This behavior is necessary for _check_no_differentiable_outputs to work.
1606
+ numerical = [nj for o, nj in zip(func_out, numerical) if o.requires_grad]
1607
+ if use_forward_ad:
1608
+ analytical_forward = _get_analytical_jacobian_forward_ad(
1609
+ func, tupled_inputs, func_out, check_grad_dtypes=check_grad_dtypes
1610
+ )
1611
+
1612
+ for i, n_per_out in enumerate(numerical):
1613
+ for j, n in enumerate(n_per_out):
1614
+ a = analytical_forward[j][i]
1615
+ if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
1616
+ raise GradcheckError(
1617
+ _get_notallclose_msg(
1618
+ a, n, i, j, complex_indices, test_imag, is_forward_ad=True
1619
+ )
1620
+ )
1621
+ else:
1622
+ for i, o in enumerate(outputs):
1623
+ analytical = _check_analytical_jacobian_attributes(
1624
+ tupled_inputs, o, nondet_tol, check_grad_dtypes
1625
+ )
1626
+
1627
+ for j, (a, n) in enumerate(zip(analytical, numerical[i])):
1628
+ if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
1629
+ raise GradcheckError(
1630
+ _get_notallclose_msg(a, n, i, j, complex_indices, test_imag)
1631
+ )
1632
+
1633
+ return True
1634
+
1635
+
1636
+ def _dot_with_type_promotion(u, v):
1637
+ assert u.dim() == 1 and v.dim() == 1
1638
+ return (u * v).sum()
1639
+
1640
+
1641
+ def _allclose_with_type_promotion(a, b, rtol, atol):
1642
+ promoted_type = torch.promote_types(a.dtype, b.dtype)
1643
+ a = a.to(dtype=promoted_type)
1644
+ b = b.to(dtype=promoted_type)
1645
+ return torch.allclose(a, b, rtol, atol)
1646
+
1647
+
1648
+ def _to_real_dtype(dtype):
1649
+ if dtype == torch.complex128:
1650
+ return torch.float64
1651
+ elif dtype == torch.complex64:
1652
+ return torch.float32
1653
+ else:
1654
+ return dtype
1655
+
1656
+
1657
+ def _vec_from_tensor(x, generator, downcast_complex=False):
1658
+ # Create a random vector with the same number of elements as x and the same
1659
+ # dtype/device. If x is complex and downcast_complex is False, we create a
1660
+ # complex tensor with only real component.
1661
+ if x.layout == torch.sparse_coo:
1662
+ # For sparse, create a random sparse vec with random values in the same
1663
+ # indices. Make sure size is set so that it isn't inferred to be smaller.
1664
+ x_values = x._values()
1665
+ dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
1666
+ values = (
1667
+ torch.rand(x_values.numel(), generator=generator)
1668
+ .to(dtype=dtype, device=x.device)
1669
+ .view(x_values.shape)
1670
+ )
1671
+ values /= values.norm()
1672
+ vec = torch.sparse_coo_tensor(x._indices(), values, x.size(), device=x.device)
1673
+ elif _is_sparse_compressed_tensor(x):
1674
+ if x.layout in {torch.sparse_csr, torch.sparse_bsr}:
1675
+ compressed_indices, plain_indices = x.crow_indices(), x.col_indices()
1676
+ else:
1677
+ compressed_indices, plain_indices = x.ccol_indices(), x.row_indices()
1678
+ x_values = x.values()
1679
+ dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
1680
+ values = (
1681
+ torch.rand(x_values.numel(), generator=generator)
1682
+ .to(dtype=dtype, device=x.device)
1683
+ .view(x_values.shape)
1684
+ )
1685
+ values /= values.norm()
1686
+ vec = torch.sparse_compressed_tensor(
1687
+ compressed_indices,
1688
+ plain_indices,
1689
+ values,
1690
+ x.size(),
1691
+ layout=x.layout,
1692
+ device=x.device,
1693
+ )
1694
+ else:
1695
+ dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
1696
+ vec = torch.rand(x.numel(), generator=generator).to(
1697
+ dtype=dtype, device=x.device
1698
+ )
1699
+ vec /= vec.norm()
1700
+ return vec
1701
+
1702
+
1703
+ def _get_inp_tensors(tupled_inputs):
1704
+ inp_idx_tup = [
1705
+ (i, t)
1706
+ for i, t in enumerate(tupled_inputs)
1707
+ if is_tensor_like(t) and t.requires_grad
1708
+ ]
1709
+ return [tup[0] for tup in inp_idx_tup], [tup[1] for tup in inp_idx_tup]
1710
+
1711
+
1712
+ def _adjusted_atol(atol, u, v):
1713
+ # In slow gradcheck, we compare A and B element-wise, i.e., for some a, b we
1714
+ # allow: |a - b| < atol + rtol * b. But since we now compare q1 = v^T A u and
1715
+ # q2 = v^T B u, we must allow |q1 - q2| < v^T E u + rtol * v^T B u, where E is
1716
+ # the correctly sized matrix in which each entry is atol.
1717
+ #
1718
+ # We see that atol needs to be scaled by v^T M u (where M is an all-ones M x N
1719
+ # matrix): v^T M u = \sum_{i} \sum_{j} u_i * v_j = (\sum_{i} u_i)(\sum_{i} v_i)
1720
+ # TODO: properly handle case when u is tuple instead of only taking first element
1721
+ u = u[0] if isinstance(u, tuple) else u
1722
+ sum_u = u.sum()
1723
+ sum_v = 1.0 if v is None else v.sum()
1724
+ return atol * float(sum_u) * float(sum_v)
1725
+
1726
+
1727
+ FAST_FAIL_SLOW_OK_MSG = """
1728
+ Fast gradcheck failed but element-wise differences are small. This means that the
1729
+ test might've passed in slow_mode!
1730
+
1731
+ If you are adding a new operator, please file an issue and then use one of the
1732
+ workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck:
1733
+
1734
+ If the test
1735
+ - manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
1736
+ with `fast_mode=False` as a keyword argument.
1737
+ - is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
1738
+ to have `gradcheck_fast_mode=False`
1739
+ - is a Module test (e.g., in common_nn.py), then modify the corresponding
1740
+ module_test entry to have `gradcheck_fast_mode=False`
1741
+ """.strip()
1742
+
1743
+
1744
+ def _run_slow_mode_and_get_error(
1745
+ func, tupled_inputs, outputs, input_idx, output_idx, rtol, atol, eps, is_forward_ad
1746
+ ):
1747
+ # Compute jacobians in slow mode for better error message
1748
+ slow_numerical = _get_numerical_jacobian(
1749
+ func, tupled_inputs, outputs, eps=eps, is_forward_ad=is_forward_ad
1750
+ )[input_idx][output_idx]
1751
+ if is_forward_ad:
1752
+
1753
+ def new_fn(inp):
1754
+ new_inputs = list(tupled_inputs)
1755
+ new_inputs[input_idx] = inp
1756
+ return _as_tuple(func(*new_inputs))[output_idx]
1757
+
1758
+ slow_analytical = _get_analytical_jacobian_forward_ad(
1759
+ new_fn, (tupled_inputs[input_idx],), (outputs[output_idx],)
1760
+ )[0][0]
1761
+ else:
1762
+ slow_analytical = _get_analytical_jacobian(
1763
+ tupled_inputs, outputs, input_idx, output_idx
1764
+ )
1765
+
1766
+ # Assume jacobians are non-empty and have the same shape
1767
+ slow_max_diff = (slow_numerical - slow_analytical).abs().max()
1768
+
1769
+ slow_allclose = torch.allclose(slow_analytical, slow_numerical, rtol, atol)
1770
+ msg = (
1771
+ "\nThe above quantities relating the numerical and analytical jacobians are computed \n"
1772
+ "in fast mode. See: https://github.com/pytorch/pytorch/issues/53876 for more background \n"
1773
+ "about fast mode. Below, we recompute numerical and analytical jacobians in slow mode:\n\n"
1774
+ f"Numerical:\n {slow_numerical}\n"
1775
+ f"Analytical:\n{slow_analytical}\n\n"
1776
+ f"The max per-element difference (slow mode) is: {slow_max_diff}.\n"
1777
+ )
1778
+ if slow_allclose:
1779
+ # Slow gradcheck would've passed!
1780
+ msg += FAST_FAIL_SLOW_OK_MSG
1781
+ return msg
1782
+
1783
+
1784
+ def _to_flat_dense_if_sparse(tensor):
1785
+ if _is_sparse_any_tensor(tensor):
1786
+ return tensor.to_dense().reshape(-1)
1787
+ else:
1788
+ return tensor
1789
+
1790
+
1791
+ def _make_vectors(inp_tensors, outputs, *, use_forward_ad):
1792
+ # Use our own generator to avoid messing with the user's RNG state
1793
+ g_cpu = torch.Generator()
1794
+
1795
+ def _vec_from_tensor_cpu(*args):
1796
+ # Default allocate all tensors on CPU, so they are on the same device as the generator
1797
+ # even if the user specified a default device
1798
+ with torch.device("cpu"):
1799
+ return _vec_from_tensor(*args)
1800
+
1801
+ all_u = []
1802
+ all_u_dense = []
1803
+ for inp in inp_tensors:
1804
+ ur = _vec_from_tensor_cpu(inp, g_cpu, True)
1805
+ ur_dense = _to_flat_dense_if_sparse(ur)
1806
+ if inp.is_complex():
1807
+ ui = _vec_from_tensor_cpu(inp, g_cpu, True)
1808
+ all_u.append((ur, ui))
1809
+ ui_dense = _to_flat_dense_if_sparse(ui)
1810
+ all_u_dense.append((ur_dense, ui_dense))
1811
+ else:
1812
+ all_u.append(ur)
1813
+ all_u_dense.append(ur_dense)
1814
+ all_v = (
1815
+ None
1816
+ if use_forward_ad
1817
+ else [_vec_from_tensor_cpu(out, g_cpu) for out in outputs]
1818
+ )
1819
+ return all_v, all_u, all_u_dense
1820
+
1821
+
1822
+ def _check_analytical_numerical_equal(
1823
+ all_analytical,
1824
+ all_numerical,
1825
+ complex_indices,
1826
+ tupled_inputs,
1827
+ outputs,
1828
+ func,
1829
+ all_v,
1830
+ all_u,
1831
+ rtol,
1832
+ atol,
1833
+ eps,
1834
+ test_imag,
1835
+ *,
1836
+ is_forward_ad=False,
1837
+ ):
1838
+ for i, all_numerical_for_input_i in enumerate(all_numerical):
1839
+ for j, n in enumerate(all_numerical_for_input_i):
1840
+ # Forward AD generates the transpose of what this function expects
1841
+ if is_forward_ad:
1842
+ a = all_analytical[i][j]
1843
+ else:
1844
+ a = all_analytical[j][i]
1845
+ n = n.to(device=a.device)
1846
+ updated_atol = _adjusted_atol(atol, all_u[i], all_v[j] if all_v else None)
1847
+ if not _allclose_with_type_promotion(a, n.to(a.device), rtol, updated_atol):
1848
+ jacobians_str = _run_slow_mode_and_get_error(
1849
+ func, tupled_inputs, outputs, i, j, rtol, atol, eps, is_forward_ad
1850
+ )
1851
+ raise GradcheckError(
1852
+ _get_notallclose_msg(
1853
+ a, n, j, i, complex_indices, test_imag, is_forward_ad
1854
+ )
1855
+ + jacobians_str
1856
+ )
1857
+
1858
+
1859
+ def _fast_gradcheck(
1860
+ func,
1861
+ func_out,
1862
+ inputs,
1863
+ outputs,
1864
+ eps,
1865
+ rtol,
1866
+ atol,
1867
+ check_grad_dtypes,
1868
+ nondet_tol,
1869
+ *,
1870
+ use_forward_ad=False,
1871
+ complex_indices=None,
1872
+ test_imag=False,
1873
+ masked=False,
1874
+ ):
1875
+ # See https://github.com/pytorch/pytorch/issues/53876 for details
1876
+ inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
1877
+ # Backward mode computes v^T * J (VJP)
1878
+ # Since we computed J * u (JVP) through finite difference method, we perform an equality check
1879
+ # between VJP * u, v * JVP
1880
+ # ----
1881
+ # Forward mode computes J * u (JVP)
1882
+ # Since we already compute JVP through finite difference method,
1883
+ # we don't need v for correctness check here as asserted below
1884
+ all_v, all_u, all_u_dense = _make_vectors(
1885
+ inp_tensors, outputs, use_forward_ad=use_forward_ad
1886
+ )
1887
+
1888
+ inputs_numerical, all_u_numerical, all_v_numerical = (
1889
+ (inputs, all_u, all_v) if masked else _densify((inputs, all_u, all_v))
1890
+ )
1891
+
1892
+ numerical_vJu = _get_numerical_vJu(
1893
+ func,
1894
+ inputs_numerical,
1895
+ inp_tensors_idx,
1896
+ func_out,
1897
+ all_u_numerical,
1898
+ all_v_numerical,
1899
+ eps,
1900
+ is_forward_ad=use_forward_ad,
1901
+ )
1902
+ # TODO: replicate https://github.com/pytorch/pytorch/pull/77743 for fast gradcheck as well
1903
+ if use_forward_ad:
1904
+ assert all_v is None
1905
+ analytical_vJu = _get_analytical_jacobian_forward_ad(
1906
+ func,
1907
+ inputs,
1908
+ _as_tuple(func_out),
1909
+ all_u=all_u,
1910
+ check_grad_dtypes=check_grad_dtypes,
1911
+ )
1912
+ else:
1913
+ if not outputs:
1914
+ _check_no_differentiable_outputs_fast(
1915
+ func, func_out, inputs, inp_tensors_idx, all_u, eps, nondet_tol
1916
+ )
1917
+
1918
+ analytical_vJu = _get_analytical_vJu_backward_mode(
1919
+ inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u_dense
1920
+ )
1921
+
1922
+ _check_analytical_numerical_equal(
1923
+ analytical_vJu,
1924
+ numerical_vJu,
1925
+ complex_indices,
1926
+ inputs,
1927
+ outputs,
1928
+ func,
1929
+ all_v,
1930
+ all_u,
1931
+ rtol,
1932
+ atol,
1933
+ eps,
1934
+ test_imag,
1935
+ is_forward_ad=use_forward_ad,
1936
+ )
1937
+
1938
+ return True
1939
+
1940
+
1941
+ # Note [VarArg of Tensors]
1942
+ # ~~~~~~~~~~~~~~~~~~~~~~~~
1943
+ # 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment.
1944
+ # If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted,
1945
+ # the '...' first argument of Callable can be replaced with VarArg(Tensor).
1946
+ # For now, we permit any input.
1947
+ def gradcheck(
1948
+ func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors]
1949
+ inputs: _TensorOrTensors,
1950
+ *,
1951
+ eps: float = 1e-6,
1952
+ atol: float = 1e-5,
1953
+ rtol: float = 1e-3,
1954
+ raise_exception: bool = True,
1955
+ nondet_tol: float = 0.0,
1956
+ check_undefined_grad: bool = True,
1957
+ check_grad_dtypes: bool = False,
1958
+ check_batched_grad: bool = False,
1959
+ check_batched_forward_grad: bool = False,
1960
+ check_forward_ad: bool = False,
1961
+ check_backward_ad: bool = True,
1962
+ fast_mode: bool = False,
1963
+ masked: Optional[bool] = None,
1964
+ ) -> bool: # noqa: D400,D205
1965
+ r"""Check gradients computed via small finite differences against analytical
1966
+ gradients wrt tensors in :attr:`inputs` that are of floating point or complex type
1967
+ and with ``requires_grad=True``.
1968
+
1969
+ The check between numerical and analytical gradients uses :func:`~torch.allclose`.
1970
+
1971
+ For most of the complex functions we consider for optimization purposes, no notion of
1972
+ Jacobian exists. Instead, gradcheck verifies if the numerical and analytical values of
1973
+ the Wirtinger and Conjugate Wirtinger derivatives are consistent. Because the gradient
1974
+ computation is done under the assumption that the overall function has a real-valued
1975
+ output, we treat functions with complex output in a special way. For these functions,
1976
+ gradcheck is applied to two real-valued functions corresponding to taking the real
1977
+ components of the complex outputs for the first, and taking the imaginary components
1978
+ of the complex outputs for the second. For more details, check out
1979
+ :ref:`complex_autograd-doc`.
1980
+
1981
+ .. note::
1982
+ The default values are designed for :attr:`input` of double precision.
1983
+ This check will likely fail if :attr:`input` is of less precision, e.g.,
1984
+ ``FloatTensor``.
1985
+
1986
+ .. note::
1987
+ Gradcheck may fail when evaluated on non-differentiable points
1988
+ because the numerically computed gradients via finite differencing may differ
1989
+ those computed analytically (not necessarily because either is incorrect).
1990
+ For more context, see :ref:`non-differentiable-func-grad`.
1991
+
1992
+ .. warning::
1993
+ If any checked tensor in :attr:`input` has overlapping memory, i.e.,
1994
+ different indices pointing to the same memory address (e.g., from
1995
+ :func:`torch.expand`), this check will likely fail because the numerical
1996
+ gradients computed by point perturbation at such indices will change
1997
+ values at all other indices that share the same memory address.
1998
+
1999
+ Args:
2000
+ func (function): a Python function that takes Tensor inputs and returns
2001
+ a Tensor or a tuple of Tensors
2002
+ inputs (tuple of Tensor or Tensor): inputs to the function
2003
+ eps (float, optional): perturbation for finite differences
2004
+ atol (float, optional): absolute tolerance
2005
+ rtol (float, optional): relative tolerance
2006
+ raise_exception (bool, optional): indicating whether to raise an exception if
2007
+ the check fails. The exception gives more information about the
2008
+ exact nature of the failure. This is helpful when debugging gradchecks.
2009
+ nondet_tol (float, optional): tolerance for non-determinism. When running
2010
+ identical inputs through the differentiation, the results must either match
2011
+ exactly (default, 0.0) or be within this tolerance.
2012
+ check_undefined_grad (bool, optional): if ``True``, check if undefined output grads
2013
+ are supported and treated as zeros, for ``Tensor`` outputs.
2014
+ check_batched_grad (bool, optional): if ``True``, check if we can compute
2015
+ batched gradients using prototype vmap support. Defaults to False.
2016
+ check_batched_forward_grad (bool, optional): if ``True``, checks if we can compute
2017
+ batched forward gradients using forward ad and prototype vmap support. Defaults to ``False``.
2018
+ check_forward_ad (bool, optional): if ``True``, check that the gradients computed with forward
2019
+ mode AD match the numerical ones. Defaults to ``False``.
2020
+ check_backward_ad (bool, optional): if ``False``, do not perform any checks that rely on
2021
+ backward mode AD to be implemented. Defaults to ``True``.
2022
+ fast_mode (bool, optional): Fast mode for gradcheck and gradgradcheck is currently only
2023
+ implemented for R to R functions. If none of the inputs and outputs are complex
2024
+ a faster implementation of gradcheck that no longer computes the entire jacobian
2025
+ is run; otherwise, we fall back to the slow implementation.
2026
+ masked (bool, optional): if ``True``, the gradients of unspecified elements of
2027
+ sparse tensors are ignored. Defaults to ``False``.
2028
+ Returns:
2029
+ ``True`` if all differences satisfy allclose condition
2030
+
2031
+ """
2032
+ assert (
2033
+ check_forward_ad or check_backward_ad
2034
+ ), "Expected at least one of check_forward_ad or check_backward_ad to be True"
2035
+ assert not (
2036
+ check_batched_grad and not check_backward_ad
2037
+ ), "Setting check_batched_grad=True requires check_backward_ad to be True"
2038
+ assert not (
2039
+ check_batched_forward_grad and not check_forward_ad
2040
+ ), "Setting check_batched_forward_grad=True requires check_forward_ad to be True"
2041
+ args = locals().copy()
2042
+ args.pop("raise_exception")
2043
+ if not raise_exception:
2044
+ try:
2045
+ return _gradcheck_helper(**args)
2046
+ except GradcheckError as e:
2047
+ return False
2048
+ else:
2049
+ return _gradcheck_helper(**args)
2050
+
2051
+
2052
+ def _gradcheck_helper(
2053
+ func,
2054
+ inputs,
2055
+ eps,
2056
+ atol,
2057
+ rtol,
2058
+ nondet_tol,
2059
+ check_undefined_grad,
2060
+ check_grad_dtypes,
2061
+ check_batched_grad,
2062
+ check_batched_forward_grad,
2063
+ check_forward_ad,
2064
+ check_backward_ad,
2065
+ fast_mode,
2066
+ masked,
2067
+ ):
2068
+ tupled_inputs = _as_tuple(inputs)
2069
+ _check_inputs(tupled_inputs)
2070
+
2071
+ func_out = func(*tupled_inputs)
2072
+ outputs = _differentiable_outputs(func_out)
2073
+ _check_outputs(outputs)
2074
+
2075
+ gradcheck_fn = functools.partial(
2076
+ _fast_gradcheck if fast_mode else _slow_gradcheck, masked=masked
2077
+ )
2078
+ _gradcheck_real_imag(
2079
+ gradcheck_fn,
2080
+ func,
2081
+ func_out,
2082
+ tupled_inputs,
2083
+ outputs,
2084
+ eps,
2085
+ rtol,
2086
+ atol,
2087
+ check_grad_dtypes,
2088
+ check_forward_ad=check_forward_ad,
2089
+ check_backward_ad=check_backward_ad,
2090
+ nondet_tol=nondet_tol,
2091
+ check_undefined_grad=check_undefined_grad,
2092
+ )
2093
+
2094
+ if check_batched_forward_grad:
2095
+ _test_batched_grad_forward_ad(func, tupled_inputs)
2096
+
2097
+ # Short circuit because remaining tests rely on backward AD to be implemented
2098
+ if not check_backward_ad:
2099
+ return True
2100
+
2101
+ for i, o in enumerate(outputs):
2102
+ if check_batched_grad:
2103
+ _test_batched_grad(tupled_inputs, o, i)
2104
+
2105
+ _test_backward_mul_by_grad_output(outputs, tupled_inputs, masked)
2106
+
2107
+ if check_undefined_grad and check_backward_ad:
2108
+ _test_undefined_backward_mode(func, outputs, tupled_inputs)
2109
+ return True
2110
+
2111
+
2112
+ def gradgradcheck(
2113
+ func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors]
2114
+ inputs: _TensorOrTensors,
2115
+ grad_outputs: Optional[_TensorOrTensors] = None,
2116
+ *,
2117
+ eps: float = 1e-6,
2118
+ atol: float = 1e-5,
2119
+ rtol: float = 1e-3,
2120
+ gen_non_contig_grad_outputs: bool = False,
2121
+ raise_exception: bool = True,
2122
+ nondet_tol: float = 0.0,
2123
+ check_undefined_grad: bool = True,
2124
+ check_grad_dtypes: bool = False,
2125
+ check_batched_grad: bool = False,
2126
+ check_fwd_over_rev: bool = False,
2127
+ check_rev_over_rev: bool = True,
2128
+ fast_mode: bool = False,
2129
+ masked: bool = False,
2130
+ ) -> bool: # noqa: D400,D205
2131
+ r"""Check gradients of gradients computed via small finite differences
2132
+ against analytical gradients wrt tensors in :attr:`inputs` and
2133
+ :attr:`grad_outputs` that are of floating point or complex type and with
2134
+ ``requires_grad=True``.
2135
+
2136
+ This function checks that backpropagating through the gradients computed
2137
+ to the given :attr:`grad_outputs` are correct.
2138
+
2139
+ The check between numerical and analytical gradients uses :func:`~torch.allclose`.
2140
+
2141
+ .. note::
2142
+ The default values are designed for :attr:`input` and
2143
+ :attr:`grad_outputs` of double precision. This check will likely fail if
2144
+ they are of less precision, e.g., ``FloatTensor``.
2145
+
2146
+ .. warning::
2147
+ If any checked tensor in :attr:`input` and :attr:`grad_outputs` has
2148
+ overlapping memory, i.e., different indices pointing to the same memory
2149
+ address (e.g., from :func:`torch.expand`), this check will likely fail
2150
+ because the numerical gradients computed by point perturbation at such
2151
+ indices will change values at all other indices that share the same
2152
+ memory address.
2153
+
2154
+ Args:
2155
+ func (function): a Python function that takes Tensor inputs and returns
2156
+ a Tensor or a tuple of Tensors
2157
+ inputs (tuple of Tensor or Tensor): inputs to the function
2158
+ grad_outputs (tuple of Tensor or Tensor, optional): The gradients with
2159
+ respect to the function's outputs.
2160
+ eps (float, optional): perturbation for finite differences
2161
+ atol (float, optional): absolute tolerance
2162
+ rtol (float, optional): relative tolerance
2163
+ gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is
2164
+ ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the
2165
+ randomly generated gradient outputs are made to be noncontiguous
2166
+ raise_exception (bool, optional): indicating whether to raise an exception if
2167
+ the check fails. The exception gives more information about the
2168
+ exact nature of the failure. This is helpful when debugging gradchecks.
2169
+ nondet_tol (float, optional): tolerance for non-determinism. When running
2170
+ identical inputs through the differentiation, the results must either match
2171
+ exactly (default, 0.0) or be within this tolerance. Note that a small amount
2172
+ of nondeterminism in the gradient will lead to larger inaccuracies in
2173
+ the second derivative.
2174
+ check_undefined_grad (bool, optional): if True, check if undefined output grads
2175
+ are supported and treated as zeros
2176
+ check_batched_grad (bool, optional): if True, check if we can compute
2177
+ batched gradients using prototype vmap support. Defaults to False.
2178
+ fast_mode (bool, optional): if True, run a faster implementation of gradgradcheck that
2179
+ no longer computes the entire jacobian.
2180
+ masked (bool, optional): if True, the gradients of unspecified elements of
2181
+ sparse tensors are ignored (default, False).
2182
+ Returns:
2183
+ True if all differences satisfy allclose condition
2184
+ """
2185
+ assert (
2186
+ check_fwd_over_rev or check_rev_over_rev
2187
+ ), "Expected at least one of check_fwd_over_rev or check_rev_over_rev to be True"
2188
+ assert not (
2189
+ check_undefined_grad and not check_rev_over_rev
2190
+ ), "Setting check_undefined_grad=True requires check_rev_over_rev to be True"
2191
+ assert not (
2192
+ check_batched_grad and not check_rev_over_rev
2193
+ ), "Setting check_batched_grad=True requires check_rev_over_rev to be True"
2194
+ # TODO: do we want to test this too?
2195
+ # assert not (check_batched_forward_grad and not check_fwd_over_rev), (
2196
+ # "Setting check_batched_forward_grad=True requires check_fwd_over_rev to be True")
2197
+ tupled_inputs = _as_tuple(inputs)
2198
+
2199
+ if grad_outputs is None:
2200
+ # If grad_outputs is not specified, create random Tensors of the same shape, type, and device as the outputs
2201
+
2202
+ outputs = _differentiable_outputs(func(*tupled_inputs))
2203
+ tupled_grad_outputs = tuple(
2204
+ torch.testing.make_tensor(
2205
+ x.shape,
2206
+ dtype=x.dtype
2207
+ if x.is_floating_point() or x.is_complex()
2208
+ else torch.double,
2209
+ device=x.device,
2210
+ low=-1,
2211
+ high=1,
2212
+ requires_grad=True,
2213
+ noncontiguous=gen_non_contig_grad_outputs,
2214
+ )
2215
+ for x in outputs
2216
+ )
2217
+ else:
2218
+ tupled_grad_outputs = _as_tuple(grad_outputs)
2219
+
2220
+ num_outputs = len(tupled_grad_outputs)
2221
+
2222
+ # NB: We need to save the requires_grad information about the inputs here because gradcheck detaches inputs
2223
+ # before running forward mode AD
2224
+ diff_input_args_indices = {
2225
+ i for i, x in enumerate(tupled_inputs) if is_tensor_like(x) and x.requires_grad
2226
+ }
2227
+ diff_grad_output_indices = {
2228
+ i for i, x in enumerate(tupled_grad_outputs) if x.requires_grad
2229
+ }
2230
+
2231
+ def new_func(*args):
2232
+ # Restore the requires_grad information
2233
+ input_args = tuple(
2234
+ x.requires_grad_() if i in diff_input_args_indices else x
2235
+ for i, x in enumerate(args[:-num_outputs])
2236
+ )
2237
+ outputs = _differentiable_outputs(func(*input_args))
2238
+ grad_outputs = tuple(
2239
+ x.requires_grad_() if i in diff_grad_output_indices else x
2240
+ for i, x in enumerate(args[-num_outputs:])
2241
+ )
2242
+ diff_input_args = tuple(
2243
+ x for i, x in enumerate(input_args) if i in diff_input_args_indices
2244
+ )
2245
+ grad_inputs = torch.autograd.grad(
2246
+ outputs, diff_input_args, grad_outputs, create_graph=True, allow_unused=True
2247
+ )
2248
+ grad_inputs = tuple(g for g in grad_inputs if g is not None)
2249
+ return grad_inputs
2250
+
2251
+ return gradcheck(
2252
+ new_func,
2253
+ tupled_inputs + tupled_grad_outputs,
2254
+ eps=eps,
2255
+ atol=atol,
2256
+ rtol=rtol,
2257
+ raise_exception=raise_exception,
2258
+ nondet_tol=nondet_tol,
2259
+ check_undefined_grad=check_undefined_grad,
2260
+ check_grad_dtypes=check_grad_dtypes,
2261
+ check_batched_grad=check_batched_grad,
2262
+ fast_mode=fast_mode,
2263
+ check_forward_ad=check_fwd_over_rev,
2264
+ check_backward_ad=check_rev_over_rev,
2265
+ masked=masked,
2266
+ )
venv/lib/python3.10/site-packages/torch/autograd/graph.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import collections
3
+ import contextlib
4
+ import functools
5
+ import logging
6
+ import threading
7
+ import weakref
8
+ from collections import defaultdict, namedtuple
9
+ from typing import (
10
+ Any,
11
+ Callable,
12
+ cast,
13
+ Deque,
14
+ Dict,
15
+ List,
16
+ Optional,
17
+ Sequence,
18
+ Set,
19
+ Tuple,
20
+ Union,
21
+ )
22
+
23
+ import torch
24
+ from torch.autograd.variable import Variable
25
+ from torch.utils._python_dispatch import TorchDispatchMode
26
+ from torch.utils.hooks import RemovableHandle
27
+
28
+ log = logging.getLogger(__name__)
29
+
30
+
31
+ __all__ = [
32
+ "saved_tensors_hooks",
33
+ "save_on_cpu",
34
+ "disable_saved_tensors_hooks",
35
+ "register_multi_grad_hook",
36
+ "allow_mutation_on_saved_tensors",
37
+ "Node",
38
+ "GradientEdge",
39
+ "get_gradient_edge",
40
+ "increment_version",
41
+ ]
42
+
43
+
44
+ class Node(abc.ABC):
45
+ @abc.abstractmethod
46
+ def name(self) -> str:
47
+ r"""Return the name.
48
+
49
+ Example::
50
+
51
+ >>> import torch
52
+ >>> a = torch.tensor([0., 0., 0.], requires_grad=True)
53
+ >>> b = a.clone()
54
+ >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node)
55
+ >>> print(b.grad_fn.name())
56
+ CloneBackward0
57
+ """
58
+ ...
59
+
60
+ @property
61
+ @abc.abstractmethod
62
+ def next_functions(self) -> Tuple[Tuple[Optional["Node"], int], ...]:
63
+ ...
64
+
65
+ @abc.abstractmethod
66
+ def metadata(self) -> dict:
67
+ r"""Return the metadata."""
68
+ ...
69
+
70
+ @abc.abstractmethod
71
+ def _register_hook_dict(self, tensor: torch.Tensor) -> None:
72
+ ...
73
+
74
+ @abc.abstractmethod
75
+ def register_hook(self, fn: Callable[..., Any]) -> RemovableHandle:
76
+ r"""Register a backward hook.
77
+
78
+ The hook will be called every time a gradient with respect to the
79
+ Node is computed. The hook should have the following signature::
80
+
81
+ hook(grad_inputs: Tuple[Tensor], grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None
82
+
83
+
84
+ The hook should not modify its argument, but it can optionally return
85
+ a new gradient which will be used in place of :attr:`grad_inputs`.
86
+
87
+ This function returns a handle with a method ``handle.remove()``
88
+ that removes the hook from the module.
89
+
90
+ .. note::
91
+ See :ref:`backward-hooks-execution` for more information on how when this hook
92
+ is executed, and how its execution is ordered relative to other hooks.
93
+
94
+ Example::
95
+
96
+ >>> import torch
97
+ >>> a = torch.tensor([0., 0., 0.], requires_grad=True)
98
+ >>> b = a.clone()
99
+ >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node)
100
+ >>> handle = b.grad_fn.register_hook(lambda gI, gO: (gO[0] * 2,))
101
+ >>> b.sum().backward(retain_graph=True)
102
+ >>> print(a.grad)
103
+ tensor([2., 2., 2.])
104
+ >>> handle.remove() # Removes the hook
105
+ >>> a.grad = None
106
+ >>> b.sum().backward(retain_graph=True)
107
+ >>> print(a.grad)
108
+ tensor([1., 1., 1.])
109
+ """
110
+ ...
111
+
112
+ @abc.abstractmethod
113
+ def register_prehook(self, fn: Callable[..., Any]) -> RemovableHandle:
114
+ r"""Register a backward pre-hook.
115
+
116
+ The hook will be called every time a gradient with respect to the
117
+ Node is computed. The hook should have the following signature::
118
+
119
+ hook(grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None
120
+
121
+ The hook should not modify its argument, but it can optionally return
122
+ a new gradient which will be used in place of :attr:`grad_outputs`.
123
+
124
+ This function returns a handle with a method ``handle.remove()``
125
+ that removes the hook from the module.
126
+
127
+ .. note::
128
+ See :ref:`backward-hooks-execution` for more information on how when this hook
129
+ is executed, and how its execution is ordered relative to other hooks.
130
+
131
+ Example::
132
+
133
+ >>> a = torch.tensor([0., 0., 0.], requires_grad=True)
134
+ >>> b = a.clone()
135
+ >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node)
136
+ >>> handle = b.grad_fn.register_prehook(lambda gI: (gI[0] * 2,))
137
+ >>> b.sum().backward(retain_graph=True)
138
+ >>> print(a.grad)
139
+ tensor([2., 2., 2.])
140
+ >>> handle.remove()
141
+ >>> a.grad = None
142
+ >>> b.sum().backward(retain_graph=True)
143
+ >>> print(a.grad)
144
+ tensor([1., 1., 1.])
145
+ """
146
+ ...
147
+
148
+ @classmethod
149
+ def __subclasshook__(cls, C):
150
+ if cls is Node:
151
+ if (
152
+ C is not None and C is getattr(torch._C._functions, C.__name__, None)
153
+ ) or issubclass(C, torch.autograd.function.BackwardCFunction):
154
+ return True
155
+ return NotImplemented
156
+
157
+
158
+ def _get_grad_fn_or_grad_acc(t):
159
+ if t.requires_grad and t.grad_fn is None:
160
+ return t.view_as(t).grad_fn.next_functions[0][0]
161
+ else:
162
+ return t.grad_fn
163
+
164
+
165
+ GradientEdge = namedtuple("GradientEdge", ("node output_nr"))
166
+ GradientEdge.__doc__ = """\
167
+ Object representing a given gradient edge within the autograd graph.
168
+ To get the gradient edge where a given Tensor gradient will be computed,
169
+ you can do ``edge = autograd.graph.get_gradient_edge(tensor)``.
170
+ """
171
+
172
+
173
+ def get_gradient_edge(tensor):
174
+ """Get the gradient edge for computing the gradient of the given Tensor.
175
+
176
+ In particular, it is equivalent to call
177
+ ``g = autograd.grad(loss, input)`` and ``g = autograd.grad(loss, get_gradient_edge(input))``.
178
+ """
179
+ if not tensor.requires_grad:
180
+ raise RuntimeError(
181
+ "It is not possible to get the gradient edge for a Tensor that does not require gradients"
182
+ )
183
+ grad_fn = _get_grad_fn_or_grad_acc(tensor)
184
+
185
+ # Note that output_nr default to 0 which is the right value
186
+ # for the AccumulateGrad node.
187
+ return GradientEdge(grad_fn, tensor.output_nr)
188
+
189
+
190
+ def increment_version(tensor):
191
+ """Update autograd metadata tracking whether the given Tensor was modified in place.
192
+
193
+ This is to enable more accurate error checking within the autograd engine.
194
+ It is already done automatically by PyTorch functions and within custom Function
195
+ when mark_dirty() is called appropriately so you only need to call this explicitly
196
+ if you are doing inplace operation on the Tensor data in a way that Pytorch doesn't
197
+ know about. For example a custom kernel that reads the Tensor data_ptr and modifies
198
+ the memory inplace based on this pointer.
199
+
200
+ Note that incrementing the version counter multiple times for a single inplace operation
201
+ is not problematic.
202
+ """
203
+ torch._C._increment_version(tensor)
204
+
205
+
206
+ class saved_tensors_hooks:
207
+ """Context-manager that sets a pair of pack / unpack hooks for saved tensors.
208
+
209
+ Use this context-manager to define how intermediary results of an operation
210
+ should be packed before saving, and unpacked on retrieval.
211
+
212
+ In that context, the ``pack_hook`` function will be called everytime an
213
+ operation saves a tensor for backward (this includes intermediary results
214
+ saved using
215
+ :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but
216
+ also those recorded by a PyTorch-defined operation). The output of
217
+ ``pack_hook`` is then stored in the computation graph instead of the
218
+ original tensor.
219
+
220
+ The ``unpack_hook`` is called when the saved tensor needs to be accessed,
221
+ namely when executing :func:`torch.Tensor.backward()` or
222
+ :func:`torch.autograd.grad()`. It takes as argument the *packed* object
223
+ returned by ``pack_hook`` and should return a tensor which has the same
224
+ content as the original tensor (passed as input to the corresponding
225
+ ``pack_hook``).
226
+
227
+ The hooks should have the following signatures:
228
+
229
+ pack_hook(tensor: Tensor) -> Any
230
+
231
+ unpack_hook(Any) -> Tensor
232
+
233
+ where the return value of ``pack_hook`` is a valid input to ``unpack_hook``.
234
+
235
+ In general, you want ``unpack_hook(pack_hook(t))`` to be equal to ``t`` in terms
236
+ of value, size, dtype and device.
237
+
238
+ Example::
239
+
240
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
241
+ >>> def pack_hook(x):
242
+ ... print("Packing", x)
243
+ ... return x
244
+ >>>
245
+ >>> def unpack_hook(x):
246
+ ... print("Unpacking", x)
247
+ ... return x
248
+ >>>
249
+ >>> a = torch.ones(5, requires_grad=True)
250
+ >>> b = torch.ones(5, requires_grad=True) * 2
251
+ >>> with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook):
252
+ ... y = a * b
253
+ Packing tensor([1., 1., 1., 1., 1.], requires_grad=True)
254
+ Packing tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>)
255
+ >>> y.sum().backward()
256
+ Unpacking tensor([1., 1., 1., 1., 1.], requires_grad=True)
257
+ Unpacking tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>)
258
+
259
+ .. warning ::
260
+ Performing an inplace operation on the input to either hooks may lead
261
+ to undefined behavior.
262
+
263
+ .. warning ::
264
+ Only one pair of hooks is allowed at a time. When recursively nesting this
265
+ context-manager, only the inner-most pair of hooks will be applied.
266
+ """
267
+
268
+ def __init__(
269
+ self,
270
+ pack_hook: Callable[[torch.Tensor], Any],
271
+ unpack_hook: Callable[[Any], torch.Tensor],
272
+ ):
273
+ self.pack_hook = pack_hook
274
+ self.unpack_hook = unpack_hook
275
+
276
+ def __enter__(self):
277
+ torch._C._autograd._push_saved_tensors_default_hooks(
278
+ self.pack_hook, self.unpack_hook
279
+ )
280
+
281
+ def __exit__(self, *args: object):
282
+ torch._C._autograd._pop_saved_tensors_default_hooks()
283
+
284
+
285
+ class save_on_cpu(saved_tensors_hooks):
286
+ """Context manager under which tensors saved by the forward pass will be stored on cpu, then retrieved for backward.
287
+
288
+ When performing operations within this context manager, intermediary
289
+ results saved in the graph during the forward pass will be moved to CPU,
290
+ then copied back to the original device when needed for the backward pass.
291
+ If the graph was already on CPU, no tensor copy is performed.
292
+
293
+ Use this context-manager to trade compute for GPU memory usage (e.g.
294
+ when your model doesn't fit in GPU memory during training).
295
+
296
+ Args:
297
+ pin_memory (bool): If ``True`` tensors will be saved to CPU pinned memory
298
+ during packing and copied to GPU asynchronously during unpacking.
299
+ Defaults to ``False``.
300
+ Also see :ref:`cuda-memory-pinning`.
301
+
302
+
303
+ Example::
304
+
305
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
306
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
307
+ >>> a = torch.randn(5, requires_grad=True, device="cuda")
308
+ >>> b = torch.randn(5, requires_grad=True, device="cuda")
309
+ >>> c = torch.randn(5, requires_grad=True, device="cuda")
310
+ >>>
311
+ >>> def f(a, b, c):
312
+ ... prod_1 = a * b # a and b are saved on GPU
313
+ ... with torch.autograd.graph.save_on_cpu():
314
+ ... prod_2 = prod_1 * c # prod_1 and c are saved on CPU
315
+ ... y = prod_2 * a # prod_2 and a are saved on GPU
316
+ ... return y
317
+ >>>
318
+ >>> y = f(a, b, c)
319
+ >>> del a, b, c # for illustration only
320
+ >>> # the content of a, b, and prod_2 are still alive on GPU
321
+ >>> # the content of prod_1 and c only live on CPU
322
+ >>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward
323
+ >>> # all intermediary tensors are released (deleted) after the call to backward
324
+
325
+ """
326
+
327
+ def __init__(self, pin_memory=False, device_type="cuda"):
328
+ device_module = getattr(torch, device_type, torch.cuda)
329
+
330
+ def pack_to_cpu(tensor):
331
+ if not pin_memory:
332
+ return (tensor.device, tensor.cpu())
333
+ packed = torch.empty(
334
+ tensor.size(),
335
+ dtype=tensor.dtype,
336
+ layout=tensor.layout,
337
+ pin_memory=(device_module.is_available() and not tensor.is_sparse),
338
+ )
339
+ packed.copy_(tensor)
340
+ return (tensor.device, packed)
341
+
342
+ def unpack_from_cpu(packed):
343
+ device, tensor = packed
344
+ return tensor.to(device, non_blocking=pin_memory)
345
+
346
+ super().__init__(pack_to_cpu, unpack_from_cpu)
347
+
348
+
349
+ @contextlib.contextmanager
350
+ def disable_saved_tensors_hooks(error_message):
351
+ """Context-manager that disables the saved tensors default hooks feature.
352
+
353
+ Useful for if you are creating a feature that does not work with saved
354
+ tensors default hooks.
355
+
356
+ Args:
357
+ error_message (str): When saved tensors default hooks are used when they
358
+ have been are disabled, a RuntimeError with this
359
+ error message gets raised.
360
+
361
+ Example::
362
+
363
+ >>> # xdoctest: +SKIP(failing)
364
+ >>> message = "saved tensors default hooks are disabled"
365
+ >>> with torch.autograd.graph.disable_saved_tensors_hooks(message):
366
+ ... # Raises RuntimeError: saved tensors default hooks are disabled
367
+ ... with torch.autograd.graph.save_on_cpu():
368
+ ... pass
369
+
370
+ """
371
+ try:
372
+ maybe_prev_message = (
373
+ torch._C._autograd._saved_tensors_hooks_get_disabled_error_message()
374
+ )
375
+ torch._C._autograd._saved_tensors_hooks_disable(error_message)
376
+ yield
377
+ finally:
378
+ # See NOTE: [disabled_error_message invariant]
379
+ if maybe_prev_message is None:
380
+ torch._C._autograd._saved_tensors_hooks_enable()
381
+ else:
382
+ torch._C._autograd._saved_tensors_hooks_disable(maybe_prev_message)
383
+
384
+
385
+ def register_multi_grad_hook(
386
+ tensors: Sequence[torch.Tensor],
387
+ fn: Union[
388
+ Callable[[Sequence[Optional[torch.Tensor]]], None],
389
+ Callable[[torch.Tensor], None],
390
+ ],
391
+ *,
392
+ mode: str = "all",
393
+ ):
394
+ r"""Register a multi-grad backward hook.
395
+
396
+ There are two supported modes: ``"all"`` and ``"any"``.
397
+
398
+ Under the ``"all"`` mode, the hook will be called after gradients with respect to every tensor in
399
+ :attr:`tensors` have been computed. If a tensor is in :attr:`tensors` but
400
+ is not part of the graph, or if a tensor is not needed to compute the gradients
401
+ for any ``inputs`` specified for the current ``.backward()`` or ``.grad()`` call,
402
+ this tensor will be ignored and the hook will not wait for its gradient to be
403
+ computed.
404
+
405
+ After every non-ignored tensor's gradient has been computed, :attr:`fn` will be
406
+ called with those gradients. ``None`` will be passed for tensors that did not
407
+ have their gradients computed.
408
+
409
+ Under the ``"any"`` mode, the hook will be called after the first gradient
410
+ with respect to a tensor in :attr:`tensors` has been computed. The hook
411
+ will be called with that gradient as its argument.
412
+
413
+ The hook should not modify its arguments.
414
+
415
+ This function returns a handle with a method ``handle.remove()`` that removes the hook.
416
+
417
+ .. note::
418
+ See :ref:`backward-hooks-execution` for more information on how when this hook
419
+ is executed, and how its execution is ordered relative to other hooks.
420
+
421
+ Example::
422
+
423
+ >>> import torch
424
+ >>>
425
+ >>> a = torch.rand(2, 3, requires_grad=True)
426
+ >>> b = torch.rand(2, 3, requires_grad=True)
427
+ >>> c = a * b
428
+ >>> d = a * b
429
+ >>>
430
+ >>> def fn(grads):
431
+ ... print([g is not None for g in grads])
432
+ ...
433
+ >>> torch.autograd.graph.register_multi_grad_hook((a, b, c, d), fn)
434
+ >>>
435
+ >>> c.sum().backward(retain_graph=True)
436
+ [True, True, True, False]
437
+ >>> c.sum().backward(inputs=(a,), retain_graph=True)
438
+ [True, False, True, False]
439
+ >>>
440
+ """
441
+ supported_modes = ("all", "any")
442
+ if mode not in supported_modes:
443
+ raise ValueError(f"Expects mode to be one of {supported_modes} but got {mode}")
444
+
445
+ class Handle(RemovableHandle):
446
+ handles: Tuple[RemovableHandle, ...]
447
+
448
+ def __init__(self, handles: Tuple[RemovableHandle, ...]):
449
+ self.handles = handles
450
+
451
+ def remove(self):
452
+ for handle in self.handles:
453
+ handle.remove()
454
+
455
+ def __getstate__(self):
456
+ return self.handles
457
+
458
+ def __setstate__(self, state):
459
+ self.handles = state
460
+
461
+ if mode == "all":
462
+ count: Dict[int, int] = dict()
463
+ nb_calls = None
464
+ buffer: Dict[int, List[Optional[torch.Tensor]]] = dict()
465
+
466
+ grad_fns = list(map(_get_grad_fn_or_grad_acc, tensors))
467
+ len_tensors = len(tensors)
468
+
469
+ def get_inner_hook(idx):
470
+ def inner_hook(grad: torch.Tensor):
471
+ nonlocal count, nb_calls, buffer, fn
472
+ id = torch._C._current_graph_task_id()
473
+ assert (
474
+ id != -1
475
+ ), "expected this hook to be called inside a backward call"
476
+ count[id] = count.get(id, 0)
477
+ buffer[id] = buffer.get(id, [None] * len_tensors)
478
+
479
+ if count[id] == 0:
480
+ # On the first call, compute the actual nb_calls and buffer
481
+ nb_calls = sum(torch._C._will_engine_execute_node(g) for g in grad_fns) # type: ignore[attr-defined]
482
+
483
+ buffer[id][idx] = grad
484
+ count[id] += 1
485
+
486
+ if count[id] == nb_calls:
487
+ fn = cast(Callable[[Sequence[Optional[torch.Tensor]]], None], fn)
488
+ fn(buffer[id])
489
+ del count[id]
490
+ del buffer[id]
491
+
492
+ return inner_hook
493
+
494
+ handles: Tuple[RemovableHandle] = tuple(
495
+ t.register_hook(get_inner_hook(i)) for i, t in enumerate(tensors)
496
+ )
497
+ elif mode == "any":
498
+ fn = cast(Callable[[torch.Tensor], None], fn)
499
+ lock = threading.Lock()
500
+ ran_hook: Dict[int, bool] = defaultdict(bool)
501
+
502
+ @functools.wraps(fn)
503
+ def wrapped_fn(grad: torch.Tensor):
504
+ nonlocal ran_hook
505
+ id = torch._C._current_graph_task_id()
506
+ assert id != -1, "expected this hook to be called inside a backward call"
507
+ with lock:
508
+ prev, ran_hook[id] = ran_hook[id], True
509
+ if prev:
510
+ return
511
+ fn(grad)
512
+
513
+ handles = tuple(
514
+ tensor.register_hook(wrapped_fn)
515
+ for tensor in tensors
516
+ if tensor.requires_grad
517
+ )
518
+
519
+ return Handle(handles) # type: ignore[possibly-undefined]
520
+
521
+
522
+ # NOTE [Allow mutation on tensors saved for backward]
523
+ #
524
+ # 1. Tensor gets saved for backward
525
+ # - remember the python object id and the version of the tensor
526
+ # - remember aliasing information (data_ptr of base + version)
527
+ # - save the original so we control its lifetime
528
+ # 2. Any time a tensor gets in-placed
529
+ # - for each tensor aliased to it:
530
+ # - check using its object id and version to see if it has been saved
531
+ # - if it has been saved, clone it
532
+ # - delete the reference to the original
533
+ # 3. during backward
534
+ # - if the clone exists, the tensor must've been modified in-place
535
+ _allow_mutation_on_saved_tensors_enabled = False
536
+
537
+
538
+ def _get_tid(t) -> Tuple[int, int, int]:
539
+ return (id(t), t.data_ptr(), t._version)
540
+
541
+
542
+ def _get_sid(t) -> Tuple[int, int]:
543
+ return (t.data_ptr(), t._version)
544
+
545
+
546
+ class _Handle:
547
+ pass
548
+
549
+
550
+ class _swap_with_cloned(saved_tensors_hooks):
551
+ def __init__(self, ctx):
552
+ def pack_hook(t):
553
+ tid = _get_tid(t)
554
+ sid = _get_sid(t)
555
+ # Tensors saved for backward have an entry in _tid_to_weakhandle
556
+ handle: Optional[_Handle] = None
557
+
558
+ # Save aliasing information
559
+ ctx.sid_to_tid[sid].add(tid)
560
+
561
+ # NB: The same tensor (of the same version) can be saved multiple times
562
+ if tid not in ctx.tid_to_weakhandle:
563
+ handle = _Handle()
564
+ ctx.tid_to_weakhandle[tid] = handle
565
+ ctx.original[handle] = t
566
+ else:
567
+ # Store an additional strong reference to the handle
568
+ handle = ctx.tid_to_weakhandle[tid]
569
+ return handle
570
+
571
+ def unpack_hook(tup):
572
+ handle = tup
573
+ error_msg = (
574
+ "Trying to backward outside of the 'allow_mutation_on_saved_tensors' context"
575
+ "in which the graph was originally recorded."
576
+ )
577
+ assert _allow_mutation_on_saved_tensors_enabled, error_msg
578
+ if handle in ctx.cloned:
579
+ res = ctx.cloned[handle]
580
+ else:
581
+ assert handle in ctx.original, error_msg
582
+ res = ctx.original[handle]
583
+ return res
584
+
585
+ super().__init__(pack_hook, unpack_hook)
586
+
587
+
588
+ class _CloneArgBeforeMutateMode(TorchDispatchMode):
589
+ def __init__(self, ctx):
590
+ self.ctx = ctx
591
+
592
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
593
+ kwargs = kwargs or {}
594
+
595
+ for idx, arg in enumerate(func._schema.arguments):
596
+ if arg.alias_info is not None and arg.alias_info.is_write:
597
+ t = kwargs["out"] if arg.is_out else args[idx]
598
+ tid = _get_tid(t)
599
+ sid = _get_sid(t)
600
+ ctx = self.ctx
601
+ if sid in ctx.sid_to_tid:
602
+ for tid in ctx.sid_to_tid[sid]:
603
+ if tid not in ctx.tid_to_weakhandle:
604
+ # We know that if tid is in sid_to_tid, then it must also be in
605
+ # tid_to_weakhandle. However, it is possible for the tensor to be
606
+ # saved at one point, but cleared by backward before it is modified
607
+ # in-place. Consider the following example:
608
+ #
609
+ # >>> a = torch.randn(2, 3, requires_grad=True).clone()
610
+ # >>> out = (a**2).sum()
611
+ # >>> out.backward()
612
+ # >>> a.sin_()
613
+ continue
614
+ handle = ctx.tid_to_weakhandle[tid]
615
+ if handle in ctx.cloned:
616
+ # The same exact tensor has been cloned already
617
+ continue
618
+ ctx.cloned[handle] = ctx.original[handle].clone()
619
+ del ctx.original[handle]
620
+
621
+ rs = func(*args, **kwargs)
622
+ return rs
623
+
624
+
625
+ class _AllowMutationOnSavedContext:
626
+ def __init__(self):
627
+ self.cloned: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
628
+ self.original: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
629
+ self.tid_to_weakhandle: weakref.WeakValueDictionary = (
630
+ weakref.WeakValueDictionary()
631
+ )
632
+ self.sid_to_tid: Dict[Tuple[int, int], Set[Tuple[int, int, int]]] = defaultdict(
633
+ set
634
+ )
635
+
636
+ def clear(self):
637
+ self.cloned.clear()
638
+ self.original.clear()
639
+ self.tid_to_weakhandle.clear()
640
+ self.sid_to_tid.clear()
641
+
642
+
643
+ @contextlib.contextmanager
644
+ def allow_mutation_on_saved_tensors():
645
+ """Context manager under which mutating tensors saved for backward is allowed.
646
+
647
+ Under this context manager, tensors saved for backward are cloned on mutation,
648
+ so the original version can still be used during backward. Normally, mutating a tensor
649
+ saved for backward will result in an error raised when it's used during backward.
650
+
651
+ To ensure the correct behavior, both the forward and backward should be run under
652
+ the same context manager.
653
+
654
+ returns:
655
+ An _AllowMutationOnSavedContext object storing the state managed by this
656
+ context manager. This object can be useful for debugging purposes. The state
657
+ managed by the context manager is automatically cleared upon exiting.
658
+
659
+ Example::
660
+
661
+ >>> import torch
662
+ >>> with torch.autograd.graph.allow_mutation_on_saved_tensors():
663
+ ... # forward
664
+ ... a = torch.ones(2, 3, requires_grad=True)
665
+ ... b = a.clone()
666
+ ... out = (b**2).sum()
667
+ ... b.sin_()
668
+ ... # backward
669
+ ... out.sum().backward()
670
+ ...
671
+ tensor([[0.8415, 0.8415, 0.8415],
672
+ [0.8415, 0.8415, 0.8415]], grad_fn=<SinBackward0>)
673
+ """
674
+ global _allow_mutation_on_saved_tensors_enabled
675
+
676
+ ctx = _AllowMutationOnSavedContext()
677
+
678
+ with _swap_with_cloned(ctx), _CloneArgBeforeMutateMode(ctx):
679
+ try:
680
+ if _allow_mutation_on_saved_tensors_enabled:
681
+ raise RuntimeError(
682
+ "allow_mutation_on_saved_tensors contexts cannot be nested"
683
+ )
684
+ _allow_mutation_on_saved_tensors_enabled = True
685
+ yield ctx
686
+ finally:
687
+ ctx.clear()
688
+ _allow_mutation_on_saved_tensors_enabled = False
689
+
690
+
691
+ def _register_logging_hooks_on_whole_graph(t_outputs: List[torch.Tensor]):
692
+ grad_fns = list(map(_get_grad_fn_or_grad_acc, t_outputs))
693
+
694
+ def iter_graph(roots):
695
+ if not roots:
696
+ return
697
+ seen = set()
698
+ q: Deque = collections.deque()
699
+ for node in roots:
700
+ if node is not None:
701
+ seen.add(node)
702
+ q.append(node)
703
+
704
+ while q:
705
+ node = q.popleft()
706
+ for fn, _idx in node.next_functions:
707
+ if fn in seen or fn is None:
708
+ continue
709
+ seen.add(fn)
710
+ q.append(fn)
711
+
712
+ yield node
713
+
714
+ def fmt(t):
715
+ # Avoid circular import
716
+ from torch.testing._internal.common_utils import dtype_abbrs
717
+
718
+ if t is None:
719
+ return "None"
720
+ return f"{dtype_abbrs[t.dtype]}[{', '.join(map(str, t.shape))}]"
721
+
722
+ def prehook(grad_outputs):
723
+ node = torch._C._current_autograd_node()
724
+ grad_outputs_str = f"[{','.join(fmt(t) for t in grad_outputs)}]"
725
+ log_str = f"Executing: {node} with grad_outputs: {grad_outputs_str}"
726
+ log.debug(log_str)
727
+
728
+ handles = []
729
+ for node in iter_graph(grad_fns):
730
+ handles.append(node.register_prehook(prehook))
731
+
732
+ def unregister_hooks():
733
+ for handle in handles:
734
+ handle.remove()
735
+
736
+ return unregister_hooks
737
+
738
+
739
+ def _engine_run_backward(t_outputs, *args, **kwargs):
740
+ attach_logging_hooks = log.getEffectiveLevel() <= logging.DEBUG
741
+ if attach_logging_hooks:
742
+ unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs)
743
+ try:
744
+ return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
745
+ t_outputs, *args, **kwargs
746
+ ) # Calls into the C++ engine to run the backward pass
747
+ finally:
748
+ if attach_logging_hooks:
749
+ unregister_hooks() # type: ignore[possibly-undefined]
venv/lib/python3.10/site-packages/torch/autograd/profiler.py ADDED
@@ -0,0 +1,1042 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from typing import Any, Dict, List, Optional
3
+ from warnings import warn
4
+
5
+ import torch
6
+
7
+ import torch.cuda
8
+ from torch._C import _get_privateuse1_backend_name
9
+ from torch._C._profiler import _ExperimentalConfig
10
+
11
+ from torch.autograd import (
12
+ _disable_profiler,
13
+ _enable_profiler,
14
+ _kineto_step,
15
+ _prepare_profiler,
16
+ _ProfilerResult,
17
+ _supported_activities,
18
+ DeviceType,
19
+ kineto_available,
20
+ ProfilerActivity,
21
+ ProfilerConfig,
22
+ ProfilerState,
23
+ )
24
+ from torch.autograd.profiler_util import (
25
+ _filter_name,
26
+ _filter_stack_entry,
27
+ _rewrite_name,
28
+ EventList,
29
+ FunctionEvent,
30
+ MEMORY_EVENT_NAME,
31
+ MemRecordsAcc,
32
+ OUT_OF_MEMORY_EVENT_NAME,
33
+ )
34
+ from torch.futures import Future
35
+
36
+ __all__ = [
37
+ "profile",
38
+ "record_function",
39
+ "emit_itt",
40
+ "emit_nvtx",
41
+ "load_nvprof",
42
+ "EnforceUnique",
43
+ "parse_nvprof_trace",
44
+ "KinetoStepTracker",
45
+ "EventList",
46
+ "FunctionEvent",
47
+ "MemRecordsAcc",
48
+ ]
49
+
50
+ try:
51
+ # Available in Python >= 3.2
52
+ from contextlib import ContextDecorator as _ContextDecorator
53
+ except ImportError:
54
+ import functools
55
+
56
+ class _ContextDecorator: # type: ignore[no-redef]
57
+ def __enter__(self):
58
+ raise NotImplementedError
59
+
60
+ def __exit__(self, exc_type, exc_val, exc_tb):
61
+ raise NotImplementedError
62
+
63
+ def __call__(self, func):
64
+ @functools.wraps(func)
65
+ def wrapped(*args, **kwargs):
66
+ with self:
67
+ return func(*args, **kwargs)
68
+
69
+ return wrapped
70
+
71
+
72
+ # global python state - whether profiler is currently enabled
73
+ # useful for fast python checks to reduce latency
74
+ _is_profiler_enabled: bool = False
75
+
76
+
77
+ def _set_is_profiler_enabled(enable: bool):
78
+ global _is_profiler_enabled
79
+ _is_profiler_enabled = enable
80
+
81
+
82
+ def _run_on_profiler_start():
83
+ _set_is_profiler_enabled(True)
84
+
85
+
86
+ def _run_on_profiler_stop():
87
+ _set_is_profiler_enabled(False)
88
+
89
+
90
+ class profile:
91
+ """Context manager that manages autograd profiler state and holds a summary of results.
92
+
93
+ Under the hood it just records events of functions being executed in C++ and
94
+ exposes those events to Python. You can wrap any code into it and it will
95
+ only report runtime of PyTorch functions.
96
+ Note: profiler is thread local and is automatically propagated into the async tasks
97
+
98
+ Args:
99
+ enabled (bool, optional): Setting this to False makes this context manager a no-op.
100
+
101
+ use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
102
+ Adds approximately 4us of overhead to each tensor operation.
103
+
104
+ record_shapes (bool, optional): If shapes recording is set, information
105
+ about input dimensions will be collected. This allows one to see which
106
+ dimensions have been used under the hood and further group by them
107
+ using prof.key_averages(group_by_input_shape=True). Please note that
108
+ shape recording might skew your profiling data. It is recommended to
109
+ use separate runs with and without shape recording to validate the timing.
110
+ Most likely the skew will be negligible for bottom most events (in a case
111
+ of nested function calls). But for higher level functions the total
112
+ self cpu time might be artificially increased because of the shape
113
+ collection.
114
+
115
+ with_flops (bool, optional): If with_flops is set, the profiler will estimate
116
+ the FLOPs (floating point operations) value using the operator's input shape.
117
+ This allows one to estimate the hardware performance. Currently,
118
+ this option only works for the matrix multiplication and 2D convolution operators.
119
+
120
+ profile_memory (bool, optional): track tensor memory allocation/deallocation.
121
+
122
+ with_stack (bool, optional): record source information (file and line number) for the ops.
123
+
124
+ with_modules (bool): record module hierarchy (including function names)
125
+ corresponding to the callstack of the op. e.g. If module A's forward call's
126
+ module B's forward which contains an aten::add op,
127
+ then aten::add's module hierarchy is A.B
128
+ Note that this support exist, at the moment, only for TorchScript models
129
+ and not eager mode models.
130
+
131
+ use_kineto (bool, optional): experimental, enable profiling with Kineto profiler.
132
+
133
+ use_cpu (bool, optional): profile CPU events; setting to ``False`` requires
134
+ ``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling.
135
+
136
+ experimental_config (_ExperimentalConfig) : A set of experimental options
137
+ used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed.
138
+
139
+
140
+ .. warning:
141
+ Enabling memory profiling or source attribution incurs additional profiler
142
+ overhead
143
+
144
+ .. warning:
145
+ This context managers should not be called recursively, i.e. no nested
146
+ instances are allowed
147
+
148
+ .. warning:
149
+ Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
150
+ one cannot use the profiler with ``use_cuda = True`` to benchmark
151
+ DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
152
+ please use ``use_cuda = False`` or ``num_workers = 0``.
153
+
154
+ Example:
155
+ >>> # xdoctest: +SKIP
156
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
157
+ >>> x = torch.randn((1, 1), requires_grad=True)
158
+ >>> with torch.autograd.profiler.profile() as prof:
159
+ >>> for _ in range(100): # any normal python code, really!
160
+ >>> y = x ** 2
161
+ >>> y.backward()
162
+ >>> # NOTE: some columns were removed for brevity
163
+ >>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
164
+ ----------------------------------- --------------- --------------- ---------------
165
+ Name Self CPU total CPU time avg Number of Calls
166
+ ----------------------------------- --------------- --------------- ---------------
167
+ mul 32.048ms 32.048ms 200
168
+ pow 27.041ms 27.041ms 200
169
+ PowBackward0 9.727ms 55.483ms 100
170
+ torch::autograd::AccumulateGrad 9.148ms 9.148ms 100
171
+ torch::autograd::GraphRoot 691.816us 691.816us 100
172
+ ----------------------------------- --------------- --------------- ---------------
173
+
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ enabled=True,
179
+ *,
180
+ use_cuda=False,
181
+ use_device=None,
182
+ record_shapes=False,
183
+ with_flops=False,
184
+ profile_memory=False,
185
+ with_stack=False,
186
+ with_modules=False,
187
+ use_kineto=False,
188
+ use_cpu=True,
189
+ use_mtia=False,
190
+ experimental_config=None,
191
+ ):
192
+ self.enabled: bool = enabled
193
+ if not self.enabled:
194
+ return
195
+ self.use_cuda = use_cuda
196
+ self.use_device: Optional[str] = (
197
+ use_device if use_device != "privateuseone" else None
198
+ )
199
+ self.function_events: Optional[EventList] = None
200
+ self.entered = False
201
+ self.record_shapes = record_shapes
202
+ self.with_flops = with_flops
203
+ self.record_shapes |= self.with_flops
204
+ self.profile_memory = profile_memory
205
+ self.with_stack = with_stack
206
+ self.with_modules = with_modules
207
+ self.use_cpu = use_cpu
208
+ self.use_mtia = use_mtia
209
+ if experimental_config is None:
210
+ experimental_config = _ExperimentalConfig()
211
+ self.experimental_config = experimental_config
212
+ self.kineto_results: Optional[_ProfilerResult] = None
213
+
214
+ if not self.use_cpu:
215
+ assert (
216
+ use_kineto
217
+ ), "Device-only events supported only with Kineto (use_kineto=True)"
218
+
219
+ if self.use_device == "cuda":
220
+ self.use_device = None
221
+ self.use_cuda = True
222
+
223
+ if self.use_device and self.use_device != _get_privateuse1_backend_name():
224
+ warn(f"{self.use_device} doesn't support profile.")
225
+ self.use_device = None
226
+
227
+ if self.use_cuda and not torch.cuda.is_available():
228
+ warn("CUDA is not available, disabling CUDA profiling")
229
+ self.use_cuda = False
230
+
231
+ self.kineto_activities = set()
232
+ if self.use_cpu:
233
+ self.kineto_activities.add(ProfilerActivity.CPU)
234
+ if self.use_mtia:
235
+ self.kineto_activities.add(ProfilerActivity.MTIA)
236
+
237
+ self.profiler_kind = ProfilerState.KINETO
238
+ if self.use_cuda:
239
+ if not use_kineto or ProfilerActivity.CUDA not in _supported_activities():
240
+ assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True"
241
+ self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK
242
+ else:
243
+ self.kineto_activities.add(ProfilerActivity.CUDA)
244
+
245
+ if self.use_device:
246
+ if (
247
+ not use_kineto
248
+ or ProfilerActivity.PrivateUse1 not in _supported_activities()
249
+ ):
250
+ assert (
251
+ self.use_cpu
252
+ ), "Legacy custombackend profiling requires use_cpu=True"
253
+ self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1_FALLBACK
254
+ else:
255
+ self.kineto_activities.add(ProfilerActivity.PrivateUse1)
256
+ self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1
257
+
258
+ assert (
259
+ len(self.kineto_activities) > 0
260
+ ), "No activities specified for the profiler"
261
+
262
+ def config(self):
263
+ return ProfilerConfig(
264
+ self.profiler_kind,
265
+ self.record_shapes,
266
+ self.profile_memory,
267
+ self.with_stack,
268
+ self.with_flops,
269
+ self.with_modules,
270
+ self.experimental_config,
271
+ )
272
+
273
+ def __enter__(self):
274
+ if not self.enabled:
275
+ return
276
+ if self.entered:
277
+ raise RuntimeError("Profiler context manager is not reentrant")
278
+ self._prepare_trace()
279
+ self._start_trace()
280
+ return self
281
+
282
+ def _prepare_trace(self):
283
+ self.entered = True
284
+ _prepare_profiler(self.config(), self.kineto_activities)
285
+
286
+ def _start_trace(self):
287
+ self.entered = True
288
+ _run_on_profiler_start()
289
+ _enable_profiler(self.config(), self.kineto_activities)
290
+
291
+ def __exit__(self, exc_type, exc_val, exc_tb):
292
+ if not self.enabled:
293
+ return
294
+ if self.use_cuda:
295
+ torch.cuda.synchronize()
296
+ self.kineto_results = _disable_profiler()
297
+ _run_on_profiler_stop()
298
+ parsed_results = self._parse_kineto_results(self.kineto_results)
299
+ self.function_events = EventList(
300
+ parsed_results,
301
+ use_cuda=self.use_cuda,
302
+ use_device=self.use_device,
303
+ profile_memory=self.profile_memory,
304
+ with_flops=self.with_flops,
305
+ )
306
+ self.function_events._build_tree()
307
+ return False
308
+
309
+ def __repr__(self):
310
+ if self.function_events is None:
311
+ return "<unfinished torch.autograd.profile>"
312
+ return repr(self.function_events)
313
+
314
+ def __str__(self):
315
+ if self.function_events is None:
316
+ return "<unfinished torch.autograd.profile>"
317
+ return str(self.function_events)
318
+
319
+ def _check_finish(self):
320
+ if self.function_events is None:
321
+ raise RuntimeError("Profiler didn't finish running")
322
+
323
+ def table(
324
+ self,
325
+ sort_by=None,
326
+ row_limit=100,
327
+ max_src_column_width=75,
328
+ max_name_column_width=55,
329
+ max_shapes_column_width=80,
330
+ header=None,
331
+ top_level_events_only=False,
332
+ ):
333
+ self._check_finish()
334
+ assert self.function_events is not None
335
+ return self.function_events.table(
336
+ sort_by=sort_by,
337
+ row_limit=row_limit,
338
+ max_src_column_width=max_src_column_width,
339
+ max_name_column_width=max_name_column_width,
340
+ max_shapes_column_width=max_shapes_column_width,
341
+ header=header,
342
+ top_level_events_only=top_level_events_only,
343
+ )
344
+
345
+ table.__doc__ = EventList.table.__doc__
346
+
347
+ def export_chrome_trace(self, path):
348
+ self._check_finish()
349
+ if kineto_available():
350
+ self.kineto_results.save(path) # type: ignore[union-attr]
351
+ else:
352
+ return self.function_events.export_chrome_trace(path) # type: ignore[union-attr]
353
+
354
+ export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
355
+
356
+ def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
357
+ self._check_finish()
358
+ assert self.function_events is not None, "Expected profiling results"
359
+ assert self.with_stack, "export_stacks() requires with_stack=True"
360
+ return self.function_events.export_stacks(path, metric)
361
+
362
+ def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
363
+ self._check_finish()
364
+ assert self.function_events is not None, "Expected profiling results"
365
+ return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
366
+
367
+ key_averages.__doc__ = EventList.key_averages.__doc__
368
+
369
+ def total_average(self):
370
+ self._check_finish()
371
+ assert self.function_events is not None, "Expected profiling results"
372
+ return self.function_events.total_average()
373
+
374
+ total_average.__doc__ = EventList.total_average.__doc__
375
+
376
+ @property
377
+ def self_cpu_time_total(self):
378
+ """Returns total time spent on CPU.
379
+
380
+ The total time is a sum of all self times across all the events.
381
+ """
382
+ self._check_finish()
383
+ assert self.function_events is not None
384
+ return self.function_events.self_cpu_time_total
385
+
386
+ def _parse_kineto_results(self, result: _ProfilerResult):
387
+ # result.events() has most of the events - PyTorch op-level and device-level events
388
+
389
+ trace_start_us = result.trace_start_us()
390
+ mem_records = [
391
+ [evt, False] for evt in result.events() if evt.name() == MEMORY_EVENT_NAME
392
+ ]
393
+ oom_records = [
394
+ evt for evt in result.events() if evt.name() == OUT_OF_MEMORY_EVENT_NAME
395
+ ]
396
+ mem_records_acc = MemRecordsAcc(mem_records)
397
+
398
+ def _cpu_memory_usage(mem_record):
399
+ return (
400
+ mem_record.nbytes()
401
+ if mem_record.device_type()
402
+ in [DeviceType.CPU, DeviceType.MKLDNN, DeviceType.IDEEP]
403
+ else 0
404
+ )
405
+
406
+ def _cuda_memory_usage(mem_record):
407
+ return (
408
+ mem_record.nbytes()
409
+ if mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP]
410
+ else 0
411
+ )
412
+
413
+ def _privateuse1_memory_usage(mem_record):
414
+ return (
415
+ mem_record.nbytes()
416
+ if mem_record.device_type() in [DeviceType.PrivateUse1]
417
+ else 0
418
+ )
419
+
420
+ # Create and return FunctionEvent list
421
+ function_events = []
422
+ device_corr_map: Dict[int, List[FunctionEvent]] = {}
423
+ max_evt_id = 0
424
+ for kineto_event in result.events():
425
+ if _filter_name(kineto_event.name()):
426
+ continue
427
+ rel_start_us = kineto_event.start_us() - trace_start_us
428
+ rel_end_us = rel_start_us + kineto_event.duration_us()
429
+ abs_end_us = kineto_event.start_us() + kineto_event.duration_us()
430
+
431
+ cpu_memory_usage = 0
432
+ cuda_memory_usage = 0
433
+ privateuse1_memory_usage = 0
434
+ if kineto_event.device_type() == DeviceType.CPU:
435
+ # find the corresponding memory allocation events
436
+ for mem_record in mem_records_acc.in_interval(
437
+ kineto_event.start_us(), abs_end_us
438
+ ):
439
+ cpu_memory_usage += _cpu_memory_usage(mem_record[0])
440
+ cuda_memory_usage += _cuda_memory_usage(mem_record[0])
441
+ privateuse1_memory_usage += _privateuse1_memory_usage(mem_record[0])
442
+ mem_record[1] = True
443
+
444
+ is_async = kineto_event.is_async() or (
445
+ kineto_event.start_thread_id() != kineto_event.end_thread_id()
446
+ )
447
+
448
+ fe = FunctionEvent(
449
+ id=kineto_event.correlation_id(),
450
+ name=_rewrite_name(name=kineto_event.name(), with_wildcard=True),
451
+ trace_name=_rewrite_name(name=kineto_event.name(), with_wildcard=False),
452
+ thread=kineto_event.start_thread_id(),
453
+ start_us=rel_start_us,
454
+ end_us=rel_end_us,
455
+ fwd_thread=kineto_event.fwd_thread_id(),
456
+ input_shapes=kineto_event.shapes(),
457
+ concrete_inputs=kineto_event.concrete_inputs(),
458
+ stack=[
459
+ entry
460
+ for entry in kineto_event.stack()
461
+ if _filter_stack_entry(entry)
462
+ ],
463
+ scope=kineto_event.scope(),
464
+ use_device=self.use_device,
465
+ cpu_memory_usage=cpu_memory_usage,
466
+ cuda_memory_usage=cuda_memory_usage,
467
+ privateuse1_memory_usage=privateuse1_memory_usage,
468
+ is_async=is_async,
469
+ sequence_nr=kineto_event.sequence_nr(),
470
+ device_type=kineto_event.device_type(),
471
+ device_index=kineto_event.device_index(),
472
+ flops=kineto_event.flops(),
473
+ )
474
+ max_evt_id = max(max_evt_id, fe.id)
475
+ if fe.device_type == DeviceType.CPU and not fe.is_async:
476
+ if self.use_device:
477
+ privateuse1_time = kineto_event.privateuse1_elapsed_us()
478
+ if privateuse1_time > 0:
479
+ fe.append_kernel(fe.name, fe.device_index, privateuse1_time)
480
+ fe.is_legacy = True
481
+ else:
482
+ # Check if we have CUDA time as a fallback
483
+ cuda_time = kineto_event.cuda_elapsed_us()
484
+ if cuda_time > 0:
485
+ fe.append_kernel(fe.name, fe.device_index, cuda_time)
486
+ fe.is_legacy = True
487
+ function_events.append(fe)
488
+ corr_id = kineto_event.linked_correlation_id()
489
+ if corr_id > 0:
490
+ if corr_id not in device_corr_map:
491
+ device_corr_map[corr_id] = []
492
+ device_corr_map[corr_id].append(fe)
493
+
494
+ # associate CUDA kernels and CUDA runtime (CPU) with CPU events
495
+ for fe in function_events:
496
+ if (
497
+ fe.device_type == DeviceType.CPU
498
+ and not fe.is_async
499
+ and fe.id in device_corr_map
500
+ ):
501
+ for f_evt in device_corr_map[fe.id]:
502
+ if f_evt.device_type == DeviceType.CUDA:
503
+ fe.append_kernel(
504
+ f_evt.name,
505
+ f_evt.device_index,
506
+ f_evt.time_range.end - f_evt.time_range.start,
507
+ )
508
+ elif f_evt.device_type == DeviceType.CPU:
509
+ # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated
510
+ # with the 'thread' of the corresponding linked PyTorch event to properly track
511
+ # parents and children
512
+ f_evt.thread = fe.thread
513
+
514
+ def createFunctionEventForMemoryEvents(evt):
515
+ rel_start_us = evt.start_us() - trace_start_us
516
+ fe = FunctionEvent(
517
+ id=max_evt_id,
518
+ name=evt.name(),
519
+ trace_name=None, # not outputting in the trace
520
+ thread=evt.start_thread_id(),
521
+ start_us=rel_start_us,
522
+ end_us=rel_start_us, # no duration
523
+ fwd_thread=evt.start_thread_id(),
524
+ input_shapes=[],
525
+ stack=[],
526
+ scope=0, # RecordScope::FUNCTION
527
+ use_device=self.use_device,
528
+ cpu_memory_usage=_cpu_memory_usage(evt),
529
+ cuda_memory_usage=_cuda_memory_usage(evt),
530
+ privateuse1_memory_usage=_privateuse1_memory_usage(evt),
531
+ is_async=False,
532
+ sequence_nr=-1,
533
+ device_type=DeviceType.CPU,
534
+ device_index=0,
535
+ )
536
+ return fe
537
+
538
+ # output top-level memory events
539
+ for mem_record in mem_records:
540
+ if not mem_record[1]:
541
+ max_evt_id += 1
542
+ fe = createFunctionEventForMemoryEvents(mem_record[0])
543
+ function_events.append(fe)
544
+
545
+ for oom_record in oom_records:
546
+ max_evt_id += 1
547
+ fe = createFunctionEventForMemoryEvents(oom_record)
548
+ function_events.append(fe)
549
+
550
+ function_events.sort(
551
+ key=lambda evt: [evt.time_range.start, -evt.time_range.end]
552
+ )
553
+ return function_events
554
+
555
+
556
+ class record_function(_ContextDecorator):
557
+ """Context manager/function decorator that adds a label to a code block/function when running autograd profiler.
558
+
559
+ It is useful when tracing the code profile.
560
+
561
+ Args:
562
+ name (str): Label assigned to the block of code.
563
+ node_id (int): ID of node, for distributed profiling. Unset in
564
+ non-distributed cases.
565
+
566
+ Example:
567
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
568
+ >>> x = torch.randn((1, 1), requires_grad=True)
569
+ >>> with torch.autograd.profiler.profile() as prof:
570
+ ... y = x ** 2
571
+ ... with torch.autograd.profiler.record_function("label-z"): # label the block
572
+ ... z = y ** 3
573
+ ... y.backward()
574
+ ...
575
+ >>> # xdoctest: +IGNORE_WANT
576
+ >>> # NOTE: some columns were removed for brevity
577
+ >>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
578
+ ----------------------------------- --------------- --------------- ---------------
579
+ Name Self CPU total % CPU time avg Number of Calls
580
+ ----------------------------------- --------------- --------------- ---------------
581
+ pow 60.77% 47.470us 3
582
+ mul 21.73% 25.465us 2
583
+ PowBackward0 12.03% 121.891us 1
584
+ torch::autograd::AccumulateGrad 2.70% 6.324us 1
585
+ label-z 2.13% 12.421us 1
586
+ torch::autograd::GraphRoot 0.64% 1.503us 1
587
+ ----------------------------------- --------------- --------------- ---------------
588
+ Self CPU time total: 234.344us
589
+ CUDA time total: 0.000us
590
+
591
+ """
592
+
593
+ def __init__(self, name: str, args: Optional[str] = None):
594
+ self.name: str = name
595
+ self.args: Optional[str] = args
596
+ # Whether or not we should run record function's end callbacks when exiting.
597
+ self.run_callbacks_on_exit: bool = True
598
+ # TODO: TorchScript ignores standard type annotation here
599
+ # self.record: Optional["torch.classes.profiler._RecordFunction"] = None
600
+ self.record = torch.jit.annotate(
601
+ Optional["torch.classes.profiler._RecordFunction"], None
602
+ )
603
+
604
+ def __enter__(self):
605
+ self.record = torch.ops.profiler._record_function_enter_new(
606
+ self.name, self.args
607
+ )
608
+ return self
609
+
610
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
611
+ if not self.run_callbacks_on_exit:
612
+ return
613
+
614
+ # Local variable is needed by TorchScript to refine Optional[T] to T
615
+ record = self.record
616
+ assert record is not None
617
+
618
+ # TODO: Too slow with __torch_function__ handling enabled
619
+ # See https://github.com/pytorch/pytorch/issues/76410
620
+ if not torch.jit.is_scripting():
621
+ with torch._C.DisableTorchFunctionSubclass():
622
+ torch.ops.profiler._record_function_exit._RecordFunction(record)
623
+ else:
624
+ torch.ops.profiler._record_function_exit(record)
625
+
626
+ def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
627
+ """Use for profiling async calls that return a future.
628
+
629
+ Calling this function will extend recording beyond this scope, until the future is
630
+ satisfied. It is useful for profiling the end to end time of asynchronous calls.
631
+ This function should only be called once to attach the callback onto the future, and
632
+ will throw if called multiple times.
633
+
634
+ Args:
635
+ fut: (torch._C.Future): future for which to schedule
636
+ callback for.
637
+
638
+ Returns:
639
+ A future that completes with the value of the passed in future when
640
+ the profiling callbacks have ran.
641
+
642
+ """
643
+ # Throw if we have already attached a callback onto the future.
644
+ if not self.run_callbacks_on_exit:
645
+ raise RuntimeError("_call_end_callbacks_on_future can only be called once.")
646
+
647
+ # We are scheduling to run this RecordFunction's end callbacks when the
648
+ # passed in future completes, so don't run end callbacks on exit.
649
+ self.run_callbacks_on_exit = False
650
+
651
+ # Local variable is needed by TorchScript to refine Optional[T] to T
652
+ record = self.record
653
+ assert record is not None
654
+
655
+ # TODO: Too slow with __torch_function__ handling enabled
656
+ # See https://github.com/pytorch/pytorch/issues/76410
657
+ if not torch.jit.is_scripting():
658
+ with torch._C.DisableTorchFunctionSubclass():
659
+ profiled_future = (
660
+ torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction(
661
+ record, fut
662
+ )
663
+ )
664
+ else:
665
+ profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(
666
+ record, fut
667
+ )
668
+ return profiled_future
669
+
670
+
671
+ class emit_itt:
672
+ """Context manager that makes every autograd operation emit an ITT range.
673
+
674
+ It is useful when running the program under Intel(R) VTune Profiler::
675
+
676
+ vtune <--vtune-flags> <regular command here>
677
+
678
+ The Instrumentation and Tracing Technology (ITT) API enables your application to generate and
679
+ control the collection of trace data during its execution across different Intel tools.
680
+ This context manager is to annotate Intel(R) VTune Profiling trace. With help of this context manager,
681
+ you will be able to see labled ranges in Intel(R) VTune Profiler GUI.
682
+
683
+ .. warning:
684
+ This context manager should not be called recursively, i.e. at most one
685
+ instance should be enabled at any given time.
686
+
687
+ Args:
688
+ enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op.
689
+ Default: ``True``.
690
+ record_shapes (bool, optional): If ``record_shapes=True``, the itt range wrapping
691
+ each autograd op will append information about the sizes of Tensor arguments received
692
+ by that op, in the following format:
693
+ ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
694
+ Non-tensor arguments will be represented by ``[]``.
695
+ Arguments will be listed in the order they are received by the backend op.
696
+ Please note that this order may not match the order in which those arguments were passed
697
+ on the Python side. Also note that shape recording may increase the overhead of itt range creation.
698
+ Default: ``False``
699
+
700
+ Example:
701
+ >>> # xdoctest: +SKIP("Undefined variables")
702
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
703
+ >>> with torch.autograd.profiler.emit_itt():
704
+ ... model(x)
705
+
706
+ """
707
+
708
+ def __init__(self, enabled=True, record_shapes=False):
709
+ self.enabled = enabled
710
+ self.entered = False
711
+ self.record_shapes = record_shapes
712
+
713
+ def __enter__(self):
714
+ if not self.enabled:
715
+ return
716
+ if self.entered:
717
+ raise RuntimeError("ITT annotation context manager is not reentrant")
718
+ self.entered = True
719
+ _run_on_profiler_start()
720
+ _enable_profiler(
721
+ ProfilerConfig(
722
+ ProfilerState.ITT,
723
+ self.record_shapes,
724
+ False,
725
+ False,
726
+ False,
727
+ False,
728
+ _ExperimentalConfig(),
729
+ ),
730
+ set(),
731
+ )
732
+ return self
733
+
734
+ def __exit__(self, exc_type, exc_val, exc_tb):
735
+ if not self.enabled:
736
+ return
737
+ _disable_profiler()
738
+ _run_on_profiler_stop()
739
+ return False
740
+
741
+
742
+ class emit_nvtx:
743
+ """Context manager that makes every autograd operation emit an NVTX range.
744
+
745
+ It is useful when running the program under nvprof::
746
+
747
+ nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
748
+
749
+ Unfortunately, there's no way to force nvprof to flush the data it collected
750
+ to disk, so for CUDA profiling one has to use this context manager to annotate
751
+ nvprof traces and wait for the process to exit before inspecting them.
752
+ Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or
753
+ :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection
754
+ e.g. in Python REPL.
755
+
756
+ .. warning:
757
+ This context manager should not be called recursively, i.e. at most one
758
+ instance should be enabled at any given time.
759
+
760
+ Args:
761
+ enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op.
762
+ Default: ``True``.
763
+ record_shapes (bool, optional): If ``record_shapes=True``, the nvtx range wrapping
764
+ each autograd op will append information about the sizes of Tensor arguments received
765
+ by that op, in the following format:
766
+ ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
767
+ Non-tensor arguments will be represented by ``[]``.
768
+ Arguments will be listed in the order they are received by the backend op.
769
+ Please note that this order may not match the order in which those arguments were passed
770
+ on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.
771
+ Default: ``False``
772
+
773
+ Example:
774
+ >>> # xdoctest: +SKIP("undefined variables")
775
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
776
+ >>> with torch.cuda.profiler.profile():
777
+ ... model(x) # Warmup CUDA memory allocator and profiler
778
+ ... with torch.autograd.profiler.emit_nvtx():
779
+ ... model(x)
780
+
781
+ **Forward-backward correlation**
782
+
783
+ When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,
784
+ correlating each backward-pass op with the corresponding forward-pass op can be difficult.
785
+ To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it
786
+ generates.
787
+
788
+ During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running
789
+ counter, incremented each time a new backward Function object is created and stashed for backward.
790
+ Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that
791
+ if a backward Function object is created by this forward function,
792
+ the backward object will receive sequence number N.
793
+ During the backward pass, the top-level range wrapping each C++ backward Function's
794
+ ``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that
795
+ the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq``
796
+ numbers in forward, you can track down which forward op created each backward Function.
797
+
798
+ Any functions executed during the backward pass are also decorated with ``seq=<N>``. During
799
+ default backward (with ``create_graph=False``) this information is irrelevant, and in fact,
800
+ ``N`` may simply be 0 for all such functions. Only the top-level ranges associated with
801
+ backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function
802
+ objects with the earlier forward pass.
803
+
804
+ **Double-backward**
805
+
806
+ If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,
807
+ if you are setting up for a double-backward), each function's execution during backward
808
+ is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects
809
+ to be executed later during double-backward, just as the original functions in the forward pass did.
810
+ The relationship between backward and double-backward is conceptually the same as the relationship
811
+ between forward and backward: The functions still emit current-sequence-number-tagged ranges,
812
+ the Function objects they create still stash those sequence numbers, and during the eventual
813
+ double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq``
814
+ numbers, which can be compared to `seq` numbers from the backward pass.
815
+
816
+ .. warning:
817
+ The sequence number is thread-local, and some forward functions don't create an associated
818
+ backward Function object (instead delegating that to sub-functions further down the call chain).
819
+ For these reasons, the correspondence of stashed sequence numbers in
820
+ backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is
821
+ not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully
822
+ disambiguate which forward function created which
823
+ backward Function object. You may need to make a judgment based on analytic knowledge of what
824
+ the expected correspondence should be.
825
+ """
826
+
827
+ def __init__(self, enabled=True, record_shapes=False):
828
+ self.enabled = enabled
829
+ self.entered = False
830
+ self.record_shapes = record_shapes
831
+
832
+ def __enter__(self):
833
+ if not self.enabled:
834
+ return
835
+ if self.entered:
836
+ raise RuntimeError("NVTX annotation context manager is not reentrant")
837
+ self.entered = True
838
+ torch.cuda.synchronize()
839
+ _run_on_profiler_start()
840
+ _enable_profiler(
841
+ ProfilerConfig(
842
+ ProfilerState.NVTX,
843
+ self.record_shapes,
844
+ False,
845
+ False,
846
+ False,
847
+ False,
848
+ _ExperimentalConfig(),
849
+ ),
850
+ set(),
851
+ )
852
+ return self
853
+
854
+ def __exit__(self, exc_type, exc_val, exc_tb):
855
+ if not self.enabled:
856
+ return
857
+ torch.cuda.synchronize()
858
+ _disable_profiler()
859
+ _run_on_profiler_stop()
860
+ return False
861
+
862
+
863
+ def load_nvprof(path):
864
+ """Open an nvprof trace file and parses autograd annotations.
865
+
866
+ Args:
867
+ path (str): path to nvprof trace
868
+ """
869
+ return EventList(parse_nvprof_trace(path))
870
+
871
+
872
+ class EnforceUnique:
873
+ """Raises an error if a key is seen more than once."""
874
+
875
+ def __init__(self):
876
+ self.seen = set()
877
+
878
+ def see(self, *key):
879
+ r"""
880
+ Observe a key and raise an error if it is seen multiple times.
881
+ """
882
+ if key in self.seen:
883
+ raise RuntimeError("duplicate key: " + str(key))
884
+ self.seen.add(key)
885
+
886
+
887
+ def parse_nvprof_trace(path):
888
+ import sqlite3
889
+
890
+ conn = sqlite3.connect(path)
891
+ conn.row_factory = sqlite3.Row
892
+
893
+ # Parse strings table
894
+ strings = {}
895
+ for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
896
+ strings[r["id"]] = torch._C._demangle(r["value"])
897
+
898
+ # First, find all functions and create FunctionEvents for them
899
+ marker_query = """
900
+ SELECT
901
+ start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
902
+ FROM
903
+ CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
904
+ ON start.id = end.id
905
+ WHERE
906
+ start.name != 0 AND end.name = 0
907
+ """
908
+ functions = []
909
+ functions_map = {}
910
+ unique = EnforceUnique()
911
+ for row in conn.execute(marker_query):
912
+ unique.see(row["marker_id"])
913
+ evt = FunctionEvent(
914
+ id=row["marker_id"],
915
+ node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure
916
+ # that pytorch doesn't crash when creating a FunctionEvent() object
917
+ name=strings[row["name"]],
918
+ start_us=row["start_time"],
919
+ end_us=row["end_time"],
920
+ thread=0,
921
+ ) # TODO: find in sqlite database
922
+ functions.append(evt)
923
+ functions_map[evt.id] = evt
924
+
925
+ # Now, correlate all kernels with FunctionEvents
926
+ kernel_query = """
927
+ SELECT
928
+ start.id AS marker_id, start.name, start.timestamp, end.timestamp,
929
+ runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,
930
+ kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name
931
+ FROM
932
+ CUPTI_ACTIVITY_KIND_MARKER AS start
933
+ INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
934
+ ON start.id = end.id
935
+ INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime
936
+ ON (start.timestamp < runtime.start AND runtime.end < end.timestamp)
937
+ INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel
938
+ ON kernel.correlationId = runtime.correlationId
939
+ """
940
+ unique = EnforceUnique()
941
+ for row in conn.execute(kernel_query):
942
+ unique.see(row["marker_id"], row["runtime_id"])
943
+ # 211 is cudaKernelLaunch for cuda >= 9.2
944
+ assert row["cbid"] == 211
945
+ evt = functions_map[row["marker_id"]]
946
+ evt.append_kernel(
947
+ row["kernel_name"], 0, row["kernel_end"] - row["kernel_start"]
948
+ )
949
+
950
+ functions.sort(key=lambda evt: evt.time_range.start)
951
+ return functions
952
+
953
+
954
+ class KinetoStepTracker:
955
+ """Provides an abstraction for incrementing the step count globally.
956
+
957
+ Previously, we only had one place to mark that a step() has occurred
958
+ in the program via pytorch profiler step(). We will now add step hooks
959
+ in the Optimizer class https://github.com/pytorch/pytorch/issues/88446
960
+
961
+ - This could mean programs that already call profiler.step() every
962
+ iteration can end up double incrementing step count.
963
+ - If a model uses multiple optimizers we can also have double or more
964
+ counting of the step.
965
+
966
+ We fix this by adding a layer of abstraction before calling step()
967
+ to the kineto library. The idea is to maintain steps per requester in a dict:
968
+
969
+ .. code-block::
970
+
971
+ {
972
+ "ProfilerStep": 100, # triggered by profiler step() call
973
+ "Optimizer1Step": 100, # Optimizer 1 or 2 are just examples, could be SGD, Adam etc
974
+ "Optimizer2Step": 100,
975
+ }
976
+
977
+ To figure out the global step count just take the max of dict values (100).
978
+
979
+ If one of the count increments the max will go up.
980
+
981
+ .. code-block::
982
+
983
+ {
984
+ "ProfilerStep": 100,
985
+ "Optimizer1Step": 101, # Optimizer1 got incremented first say
986
+ "Optimizer2Step": 100,
987
+ }
988
+
989
+ Then global step count is 101
990
+ We only call the kineto step() function when global count increments.
991
+
992
+ NOTE: Please do not use the KinetoStepTracker in modules beside the Optimizer
993
+ for now. The result could be incorrect increments of the step count.
994
+ """
995
+
996
+ _current_step = 0
997
+ _step_dict: Dict[str, int] = defaultdict(int)
998
+
999
+ @classmethod
1000
+ def init_step_count(cls, requester: str):
1001
+ r"""
1002
+ Initialize for a given requester.
1003
+ """
1004
+ cls._step_dict[requester] = cls._current_step
1005
+
1006
+ @classmethod
1007
+ def erase_step_count(cls, requester: str) -> bool:
1008
+ r"""
1009
+ Remove a given requester.
1010
+ """
1011
+ return cls._step_dict.pop(requester, None) is not None
1012
+
1013
+ @classmethod
1014
+ def increment_step(cls, requester: str) -> int:
1015
+ """Increments the step count for the requester.
1016
+
1017
+ Additionally if the max over all step counts has incremented then
1018
+ trigger the _kineto_step() returns global step count
1019
+ """
1020
+ if requester not in cls._step_dict:
1021
+ cls.init_step_count(requester)
1022
+ cls._step_dict[requester] += 1
1023
+
1024
+ new_step = max(cls._step_dict.values())
1025
+ if new_step > cls._current_step:
1026
+ delta = new_step - cls._current_step
1027
+ if delta > 1:
1028
+ warn(
1029
+ "Profiler step count has increased more than 1 - "
1030
+ f"current_step = {cls._current_step} step dict = {cls._step_dict}"
1031
+ )
1032
+ for _ in range(0, delta):
1033
+ _kineto_step()
1034
+ cls._current_step = new_step
1035
+ return cls._current_step
1036
+
1037
+ @classmethod
1038
+ def current_step(cls) -> int:
1039
+ r"""
1040
+ Get the latest step for any requester
1041
+ """
1042
+ return cls._current_step
venv/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from warnings import warn
3
+
4
+ import torch
5
+ import torch.cuda
6
+
7
+ from torch.autograd import (
8
+ _disable_profiler_legacy,
9
+ _enable_profiler_legacy,
10
+ DeviceType,
11
+ ProfilerConfig,
12
+ ProfilerState,
13
+ )
14
+ from torch.autograd.profiler_util import (
15
+ _filter_name,
16
+ _filter_stack_entry,
17
+ _rewrite_name,
18
+ EventList,
19
+ FunctionEvent,
20
+ MEMORY_EVENT_NAME,
21
+ )
22
+
23
+ __all__ = ["profile"]
24
+
25
+
26
+ class profile:
27
+ """DEPRECATED: use torch.profiler instead."""
28
+
29
+ def __init__(
30
+ self,
31
+ enabled=True,
32
+ *,
33
+ use_cuda=False,
34
+ record_shapes=False,
35
+ with_flops=False,
36
+ profile_memory=False,
37
+ with_stack=False,
38
+ with_modules=False,
39
+ ):
40
+ self.enabled: bool = enabled
41
+ if not self.enabled:
42
+ return
43
+ self.use_cuda = use_cuda
44
+ self.function_events = None
45
+ self.entered = False
46
+ self.record_shapes = record_shapes
47
+ self.with_flops = with_flops
48
+ self.record_shapes |= self.with_flops
49
+ self.profile_memory = profile_memory
50
+ self.with_stack = with_stack
51
+ self.with_modules = with_modules
52
+
53
+ if self.use_cuda and not torch.cuda.is_available():
54
+ warn("CUDA is not available, disabling CUDA profiling")
55
+ self.use_cuda = False
56
+
57
+ if self.use_cuda:
58
+ self.profiler_kind = ProfilerState.CUDA
59
+ else:
60
+ self.profiler_kind = ProfilerState.CPU
61
+
62
+ def config(self):
63
+ return ProfilerConfig(
64
+ self.profiler_kind,
65
+ self.record_shapes,
66
+ self.profile_memory,
67
+ self.with_stack,
68
+ self.with_flops,
69
+ self.with_modules,
70
+ # avoid exposing _ExperimentalConfig this in legacy public API
71
+ torch._C._profiler._ExperimentalConfig(),
72
+ )
73
+
74
+ def __enter__(self):
75
+ if not self.enabled:
76
+ return
77
+ if self.entered:
78
+ raise RuntimeError("Profiler context manager is not reentrant")
79
+ self.entered = True
80
+ self._start_trace()
81
+ return self
82
+
83
+ def _start_trace(self):
84
+ _enable_profiler_legacy(self.config())
85
+
86
+ def __exit__(self, exc_type, exc_val, exc_tb):
87
+ if not self.enabled:
88
+ return
89
+ if self.use_cuda:
90
+ torch.cuda.synchronize()
91
+
92
+ records = _disable_profiler_legacy()
93
+ parsed_results = _parse_legacy_records(records)
94
+ self.function_events = EventList(
95
+ parsed_results,
96
+ use_cuda=self.use_cuda,
97
+ profile_memory=self.profile_memory,
98
+ with_flops=self.with_flops,
99
+ )
100
+ self.function_events._build_tree()
101
+ return False
102
+
103
+ def __repr__(self):
104
+ if self.function_events is None:
105
+ return "<unfinished profiler_legacy.profile>"
106
+ return repr(self.function_events)
107
+
108
+ def __str__(self):
109
+ if self.function_events is None:
110
+ return "<unfinished profile.profiler_legacy.profile>"
111
+ return str(self.function_events)
112
+
113
+ def _check_finish(self):
114
+ if self.function_events is None:
115
+ raise RuntimeError("Profiler didn't finish running")
116
+
117
+ def table(
118
+ self,
119
+ sort_by=None,
120
+ row_limit=100,
121
+ max_src_column_width=75,
122
+ max_name_column_width=55,
123
+ max_shapes_column_width=80,
124
+ header=None,
125
+ top_level_events_only=False,
126
+ ):
127
+ self._check_finish()
128
+ assert self.function_events is not None
129
+ return self.function_events.table(
130
+ sort_by=sort_by,
131
+ row_limit=row_limit,
132
+ max_src_column_width=max_src_column_width,
133
+ max_name_column_width=max_name_column_width,
134
+ max_shapes_column_width=max_shapes_column_width,
135
+ header=header,
136
+ top_level_events_only=top_level_events_only,
137
+ )
138
+
139
+ table.__doc__ = EventList.table.__doc__
140
+
141
+ def export_chrome_trace(self, path):
142
+ self._check_finish()
143
+ assert self.function_events is not None
144
+ return self.function_events.export_chrome_trace(path)
145
+
146
+ export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
147
+
148
+ def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
149
+ self._check_finish()
150
+ assert self.function_events is not None, "Expected profiling results"
151
+ assert self.with_stack, "export_stacks() requires with_stack=True"
152
+ return self.function_events.export_stacks(path, metric)
153
+
154
+ def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
155
+ self._check_finish()
156
+ assert self.function_events is not None, "Expected profiling results"
157
+ return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
158
+
159
+ key_averages.__doc__ = EventList.key_averages.__doc__
160
+
161
+ def total_average(self):
162
+ self._check_finish()
163
+ assert self.function_events is not None, "Expected profiling results"
164
+ return self.function_events.total_average()
165
+
166
+ total_average.__doc__ = EventList.total_average.__doc__
167
+
168
+ @property
169
+ def self_cpu_time_total(self):
170
+ """Return CPU time as the sum of self times across all events."""
171
+ self._check_finish()
172
+ assert self.function_events is not None
173
+ return self.function_events.self_cpu_time_total
174
+
175
+
176
+ def _parse_legacy_records(thread_records):
177
+ def _get_record_key(record):
178
+ """Return a tuple for correlating start and end records in `_parse_legacy_records`."""
179
+ return (record.handle(), record.node_id())
180
+
181
+ next_id = 0
182
+ start_record = None
183
+ functions = []
184
+ record_stack = []
185
+
186
+ # '__start_profile' is not guaranteed to be first, so we must find it here
187
+ for record in itertools.chain.from_iterable(thread_records):
188
+ name = record.name()
189
+ if start_record is None and name == "__start_profile":
190
+ start_record = record
191
+
192
+ assert start_record is not None and not start_record.is_remote()
193
+
194
+ for thread_record_list in thread_records:
195
+ # accumulated memory allocations per handle
196
+ cpu_memory_allocs = {}
197
+ cuda_memory_allocs = {}
198
+ # ranges per handle
199
+ range_starts = {}
200
+
201
+ filtered_handles = set()
202
+ prev_record = None
203
+ for record in thread_record_list:
204
+ record_key = _get_record_key(record)
205
+ if _filter_name(record.name()) or record_key in filtered_handles:
206
+ filtered_handles.add(record_key)
207
+ continue
208
+
209
+ if record.kind() == "push":
210
+ # workaround to reduce double logging from operator
211
+ # wrappers and redispatch
212
+ if prev_record is not None:
213
+ duplicate = (
214
+ prev_record.name() == record.name()
215
+ and prev_record.kind() == record.kind()
216
+ and prev_record.node_id() == record.node_id()
217
+ )
218
+ if duplicate:
219
+ filtered_handles.add(record_key)
220
+ continue
221
+
222
+ range_starts[record_key] = record
223
+ cpu_memory_allocs[record_key] = 0
224
+ cuda_memory_allocs[record_key] = 0
225
+ elif record.kind() == "pop":
226
+ assert (
227
+ record_key in range_starts
228
+ ), f"""Expected record with key {record_key} to exist in range_starts.
229
+ This means that the pop event did not have a corresponding push."""
230
+
231
+ start = range_starts[record_key]
232
+
233
+ cpu_memory_usage = cpu_memory_allocs[record_key]
234
+ cuda_memory_usage = cuda_memory_allocs[record_key]
235
+ is_async = start.is_async() or (start.thread_id() != record.thread_id())
236
+ is_remote_event = record.is_remote()
237
+ start_flops = start.flops()
238
+
239
+ fe = FunctionEvent(
240
+ id=record.handle(),
241
+ node_id=record.node_id(),
242
+ name=_rewrite_name(name=start.name(), with_wildcard=True),
243
+ trace_name=_rewrite_name(name=start.name(), with_wildcard=False),
244
+ thread=start.thread_id(),
245
+ start_us=start_record.cpu_elapsed_us(start),
246
+ end_us=start_record.cpu_elapsed_us(record),
247
+ fwd_thread=start.fwd_thread_id(),
248
+ input_shapes=start.shapes(),
249
+ stack=[
250
+ entry for entry in start.stack() if _filter_stack_entry(entry)
251
+ ],
252
+ scope=start.scope(),
253
+ cpu_memory_usage=cpu_memory_usage,
254
+ cuda_memory_usage=cuda_memory_usage,
255
+ is_async=is_async,
256
+ is_remote=is_remote_event,
257
+ sequence_nr=start.sequence_nr(),
258
+ device_type=DeviceType.CPU,
259
+ is_legacy=True,
260
+ flops=start_flops,
261
+ )
262
+ # note: async events have only cpu total time
263
+ if not is_async and start.has_cuda():
264
+ duration = start.cuda_elapsed_us(record)
265
+ if duration > 0:
266
+ fe.append_kernel(start.name(), start.device(), duration)
267
+ functions.append(fe)
268
+ del range_starts[record_key]
269
+ del cpu_memory_allocs[record_key]
270
+ del cuda_memory_allocs[record_key]
271
+ elif record.kind() == "memory_alloc":
272
+ num_open_handles_cpu = len(cpu_memory_allocs)
273
+ num_open_handles_cuda = len(cuda_memory_allocs)
274
+ assert num_open_handles_cpu == num_open_handles_cuda
275
+ for handle in cpu_memory_allocs.keys():
276
+ cpu_memory_allocs[handle] += record.cpu_memory_usage()
277
+ for handle in cuda_memory_allocs.keys():
278
+ cuda_memory_allocs[handle] += record.cuda_memory_usage()
279
+ if num_open_handles_cpu == 0:
280
+ # output event as a top-level memory event
281
+ fe = FunctionEvent(
282
+ id=0,
283
+ name=MEMORY_EVENT_NAME,
284
+ trace_name=None,
285
+ thread=0,
286
+ start_us=0,
287
+ end_us=0,
288
+ stack=[],
289
+ cpu_memory_usage=record.cpu_memory_usage(),
290
+ cuda_memory_usage=record.cuda_memory_usage(),
291
+ is_legacy=True,
292
+ )
293
+ functions.append(fe)
294
+ prev_record = record
295
+
296
+ # Sort functions by start time then by end time ascending.
297
+ # This ensures that--in the case of nested events which
298
+ # have the same start time (which may happen due to the
299
+ # granularity of the given clock tick)--we always show
300
+ # the outermost nested call first. This adds stability
301
+ # in how FunctionEvents appear
302
+ functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
303
+ return functions
venv/lib/python3.10/site-packages/torch/autograd/profiler_util.py ADDED
@@ -0,0 +1,1178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bisect
2
+ import itertools
3
+ import math
4
+
5
+ from collections import defaultdict, namedtuple
6
+ from operator import attrgetter
7
+
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ import torch
11
+ from torch.autograd import DeviceType
12
+
13
+ __all__ = [
14
+ "EventList",
15
+ "FormattedTimesMixin",
16
+ "Interval",
17
+ "Kernel",
18
+ "FunctionEvent",
19
+ "FunctionEventAvg",
20
+ "StringTable",
21
+ "MemRecordsAcc",
22
+ ]
23
+
24
+
25
+ class EventList(list):
26
+ """A list of Events (for pretty printing)."""
27
+
28
+ def __init__(self, *args, **kwargs):
29
+ use_cuda = kwargs.pop("use_cuda", True)
30
+ use_device = kwargs.pop("use_device", None)
31
+ profile_memory = kwargs.pop("profile_memory", False)
32
+ with_flops = kwargs.pop("with_flops", False)
33
+ super().__init__(*args, **kwargs)
34
+ self._use_cuda = use_cuda
35
+ self._use_device = use_device
36
+ self._profile_memory = profile_memory
37
+ self._tree_built = False
38
+ self._with_flops = with_flops
39
+
40
+ def _build_tree(self):
41
+ self._populate_cpu_children()
42
+ self._remove_dup_nodes()
43
+ self._set_backward_stacktraces()
44
+ self._tree_built = True
45
+
46
+ def __str__(self):
47
+ return self.table()
48
+
49
+ def _remove_dup_nodes(self):
50
+ while True:
51
+ to_delete = set()
52
+ for idx in range(len(self)):
53
+ if (
54
+ self[idx].cpu_parent is not None
55
+ and self[idx].cpu_parent.name == self[idx].name
56
+ and len(self[idx].cpu_parent.cpu_children) == 1
57
+ ):
58
+ self[idx].cpu_parent.cpu_children = self[idx].cpu_children
59
+ self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up
60
+ for ch in self[idx].cpu_children:
61
+ ch.cpu_parent = self[idx].cpu_parent
62
+ to_delete.add(idx)
63
+ if len(to_delete) == 0:
64
+ break
65
+ new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete]
66
+ self.clear()
67
+ self.extend(new_evts)
68
+
69
+ def _populate_cpu_children(self):
70
+ """Populate child events into each underlying FunctionEvent object.
71
+
72
+ One event is a child of another if [s1, e1) is inside [s2, e2). Where
73
+ s1 and e1 would be start and end of the child event's interval. And
74
+ s2 and e2 start and end of the parent event's interval
75
+
76
+ Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]
77
+ be a parent of two other intervals.
78
+
79
+ If for any reason two intervals intersect only partially, this function
80
+ will not record a parent child relationship between then.
81
+ """
82
+ # Some events can be async (i.e. start and end on different threads),
83
+ # since it's generally undefined how to attribute children ranges to
84
+ # async ranges, we do not use them when calculating nested ranges and stats
85
+ sync_events = [
86
+ evt
87
+ for evt in self
88
+ if not evt.is_async and evt.device_type == DeviceType.CPU
89
+ ]
90
+ events = sorted(
91
+ sync_events,
92
+ key=attrgetter("thread"),
93
+ )
94
+ # Group by both thread and node_id, so that events that happen to have
95
+ # the same thread_id but are from different nodes aren't incorrectly
96
+ # grouped together.
97
+ threads = itertools.groupby(
98
+ events, key=lambda event: (event.thread, event.node_id)
99
+ )
100
+
101
+ # For each thread we keep a stack of current nested parents.
102
+ # We maintain the invariant that each interval is a subset of all other
103
+ # intervals lower in the stack.
104
+ #
105
+ # First we sort the intervals by their start time. Then we iterate over them.
106
+ # Every time we see a new interval we remove several parents from
107
+ # the top until we restore the invariant. Then parent child relationship
108
+ # if recorded if the stack is not empty.
109
+ # Finally we add new interval to the list
110
+ #
111
+ # Algorithm has O(N * log(N)) complexity where N is number of
112
+ # intervals
113
+ for thread_id, thread_events in threads:
114
+ thread_events_ = sorted(
115
+ thread_events,
116
+ key=lambda event: [event.time_range.start, -event.time_range.end],
117
+ )
118
+ current_events: List[FunctionEvent] = []
119
+ cur_end = 0
120
+ for event in thread_events_:
121
+ while len(current_events) > 0:
122
+ parent = current_events[-1]
123
+ if (
124
+ event.time_range.start >= parent.time_range.end
125
+ or event.time_range.end > parent.time_range.end
126
+ ):
127
+ # this can't be a parent
128
+ current_events.pop()
129
+ else:
130
+ parent.append_cpu_child(event)
131
+ assert (
132
+ event.cpu_parent is None
133
+ ), f"There is already a CPU parent event for {event.key}"
134
+ event.set_cpu_parent(parent)
135
+ break
136
+
137
+ current_events.append(event)
138
+
139
+ def _set_backward_stacktraces(self):
140
+ def bw_parent(evt):
141
+ if evt is None:
142
+ return None
143
+ elif evt.scope == 1: # BACKWARD_FUNCTION
144
+ return evt
145
+ else:
146
+ return bw_parent(evt.cpu_parent)
147
+
148
+ fwd_stacks = {}
149
+ for evt in self:
150
+ if bw_parent(evt) is None and evt.stack is not None:
151
+ t = (evt.sequence_nr, evt.thread)
152
+ if t not in fwd_stacks:
153
+ fwd_stacks[t] = evt.stack
154
+
155
+ for evt in self:
156
+ p = bw_parent(evt)
157
+ if p is not None:
158
+ assert p.fwd_thread is not None
159
+ t = (p.sequence_nr, p.fwd_thread)
160
+ if t in fwd_stacks:
161
+ evt.stack = fwd_stacks[t]
162
+ else:
163
+ evt.stack = []
164
+
165
+ @property
166
+ def self_cpu_time_total(self):
167
+ return sum([event.self_cpu_time_total for event in self])
168
+
169
+ def table(
170
+ self,
171
+ sort_by=None,
172
+ row_limit=100,
173
+ max_src_column_width=75,
174
+ max_name_column_width=55,
175
+ max_shapes_column_width=80,
176
+ header=None,
177
+ top_level_events_only=False,
178
+ ):
179
+ """Print an EventList as a nicely formatted table.
180
+
181
+ Args:
182
+ sort_by (str, optional): Attribute used to sort entries. By default
183
+ they are printed in the same order as they were registered.
184
+ Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
185
+ ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``,
186
+ ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``.
187
+ top_level_events_only(bool, optional): Boolean flag to determine the
188
+ selection of events to display. If true, the profiler will only
189
+ display events at top level like top-level invocation of python
190
+ `lstm`, python `add` or other functions, nested events like low-level
191
+ cpu/cuda ops events are omitted for profiler result readability.
192
+
193
+ Returns:
194
+ A string containing the table.
195
+ """
196
+ return _build_table(
197
+ self,
198
+ sort_by=sort_by,
199
+ row_limit=row_limit,
200
+ max_src_column_width=max_src_column_width,
201
+ max_name_column_width=max_name_column_width,
202
+ max_shapes_column_width=max_shapes_column_width,
203
+ header=header,
204
+ profile_memory=self._profile_memory,
205
+ with_flops=self._with_flops,
206
+ top_level_events_only=top_level_events_only,
207
+ )
208
+
209
+ def export_chrome_trace(self, path):
210
+ """Export an EventList as a Chrome tracing tools file.
211
+
212
+ The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.
213
+
214
+ Args:
215
+ path (str): Path where the trace will be written.
216
+ """
217
+ import os
218
+
219
+ device_name = "cuda" if not self._use_device else self._use_device
220
+ with open(path, "w") as f:
221
+ chrome_events = []
222
+ next_id = 0
223
+ # Use file IO over using json.dump since JSON dumping is very slow and
224
+ # this technique is proven to give a 4x speedup.
225
+ f.write("[")
226
+ for evt in self:
227
+ if evt.trace_name is None:
228
+ continue
229
+ f.write(
230
+ '{{"name": "{}", '
231
+ '"ph": "X", '
232
+ '"ts": {}, '
233
+ '"dur": {}, '
234
+ '"tid": {}, '
235
+ '"pid": "CPU functions", '
236
+ '"args": {{}}}}, '.format(
237
+ evt.trace_name,
238
+ evt.time_range.start,
239
+ evt.time_range.elapsed_us(),
240
+ evt.thread
241
+ if not evt.is_remote
242
+ else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "',
243
+ )
244
+ )
245
+ for k in evt.kernels:
246
+ # 's' and 'f' draw Flow arrows from
247
+ # the CPU launch to the GPU kernel
248
+ f.write(
249
+ f'{{"name": "{evt.trace_name}", '
250
+ '"ph": "s", '
251
+ f'"ts": {evt.time_range.start}, '
252
+ f'"tid": {evt.thread}, '
253
+ '"pid": "CPU functions", '
254
+ f'"id": {next_id}, '
255
+ f'"cat": "cpu_to_{device_name}", '
256
+ '"args": {}}, '
257
+ )
258
+ # Note: use torch.profiler to get device kernel trace
259
+ next_id += 1
260
+ if len(self) > 0:
261
+ # remove trailing whitespace and comma
262
+ f.seek(f.tell() - 2, os.SEEK_SET)
263
+ f.truncate()
264
+ f.write("]")
265
+
266
+ def supported_export_stacks_metrics(self):
267
+ return [
268
+ "self_cpu_time_total",
269
+ "self_cuda_time_total",
270
+ "self_privateuse1_time_total",
271
+ ]
272
+
273
+ def export_stacks(self, path: str, metric: str):
274
+ if metric not in self.supported_export_stacks_metrics():
275
+ raise ValueError(
276
+ "metric should be one of: "
277
+ + str(self.supported_export_stacks_metrics())
278
+ )
279
+ translate_table = str.maketrans(" ;\t\n", "____")
280
+ with open(path, "w") as f:
281
+ for evt in self:
282
+ if evt.stack and len(evt.stack) > 0:
283
+ metric_value = getattr(evt, metric)
284
+ if int(metric_value) > 0:
285
+ stack_str = ""
286
+ for entry in reversed(evt.stack):
287
+ stack_str += entry.translate(translate_table)
288
+ stack_str += ";"
289
+ stack_str = stack_str[:-1] + " " + str(int(metric_value))
290
+ f.write(stack_str + "\n")
291
+
292
+ def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0):
293
+ """Averages all function events over their keys.
294
+
295
+ Args:
296
+ group_by_input_shapes: group entries by
297
+ (event name, input shapes) rather than just event name.
298
+ This is useful to see which input shapes contribute to the runtime
299
+ the most and may help with size-specific optimizations or
300
+ choosing the best candidates for quantization (aka fitting a roof line)
301
+
302
+ group_by_stack_n: group by top n stack trace entries
303
+
304
+ Returns:
305
+ An EventList containing FunctionEventAvg objects.
306
+ """
307
+ assert self._tree_built
308
+ stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg)
309
+
310
+ def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]:
311
+ key = [
312
+ str(event.key),
313
+ str(event.node_id),
314
+ str(event.device_type),
315
+ str(event.is_legacy),
316
+ ]
317
+ if group_by_input_shapes:
318
+ key.append(str(event.input_shapes))
319
+ if group_by_stack_n > 0:
320
+ key += event.stack[:group_by_stack_n]
321
+ return tuple(key)
322
+
323
+ for evt in self:
324
+ stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt)
325
+
326
+ avg_list = EventList(
327
+ stats.values(),
328
+ use_cuda=self._use_cuda,
329
+ use_device=self._use_device,
330
+ profile_memory=self._profile_memory,
331
+ with_flops=self._with_flops,
332
+ )
333
+ for evt in avg_list:
334
+ evt.stack = evt.stack[:group_by_stack_n]
335
+ if not group_by_input_shapes:
336
+ evt.input_shapes = ""
337
+ return avg_list
338
+
339
+ def total_average(self):
340
+ """Averages all events.
341
+
342
+ Returns:
343
+ A FunctionEventAvg object.
344
+ """
345
+ total_stat = FunctionEventAvg()
346
+ for evt in self:
347
+ total_stat += evt
348
+ total_stat.key = None
349
+ total_stat.key = "Total"
350
+ return total_stat
351
+
352
+
353
+ def _format_time(time_us):
354
+ """Define how to format time in FunctionEvent."""
355
+ US_IN_SECOND = 1000.0 * 1000.0
356
+ US_IN_MS = 1000.0
357
+ if time_us >= US_IN_SECOND:
358
+ return f"{time_us / US_IN_SECOND:.3f}s"
359
+ if time_us >= US_IN_MS:
360
+ return f"{time_us / US_IN_MS:.3f}ms"
361
+ return f"{time_us:.3f}us"
362
+
363
+
364
+ def _format_time_share(time_us, total_time_us):
365
+ """Define how to format time in FunctionEvent."""
366
+ if total_time_us == 0:
367
+ assert time_us == 0, f"Expected time_us == 0 but got {time_us}"
368
+ return "NaN"
369
+ return f"{time_us * 100.0 / total_time_us:.2f}%"
370
+
371
+
372
+ def _format_memory(nbytes):
373
+ """Return a formatted memory size string."""
374
+ KB = 1024
375
+ MB = 1024 * KB
376
+ GB = 1024 * MB
377
+ if abs(nbytes) >= GB:
378
+ return f"{nbytes * 1.0 / GB:.2f} Gb"
379
+ elif abs(nbytes) >= MB:
380
+ return f"{nbytes * 1.0 / MB:.2f} Mb"
381
+ elif abs(nbytes) >= KB:
382
+ return f"{nbytes * 1.0 / KB:.2f} Kb"
383
+ else:
384
+ return str(nbytes) + " b"
385
+
386
+
387
+ def _attr_formatter(name):
388
+ return property(lambda self: _format_time(getattr(self, name)))
389
+
390
+
391
+ class FormattedTimesMixin:
392
+ """Helpers for FunctionEvent and FunctionEventAvg.
393
+
394
+ The subclass should define `*_time_total` and `count` attributes.
395
+ """
396
+
397
+ cpu_time_str = _attr_formatter("cpu_time")
398
+ cuda_time_str = _attr_formatter("cuda_time")
399
+ privateuse1_time_str = _attr_formatter("privateuse1_time")
400
+ cpu_time_total_str = _attr_formatter("cpu_time_total")
401
+ cuda_time_total_str = _attr_formatter("cuda_time_total")
402
+ privateuse1_time_total_str = _attr_formatter("privateuse1_time_total")
403
+ self_cpu_time_total_str = _attr_formatter("self_cpu_time_total")
404
+ self_cuda_time_total_str = _attr_formatter("self_cuda_time_total")
405
+ self_privateuse1_time_total_str = _attr_formatter("self_privateuse1_time_total")
406
+
407
+ @property
408
+ def cpu_time(self):
409
+ return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined]
410
+
411
+ @property
412
+ def cuda_time(self):
413
+ return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined]
414
+
415
+ @property
416
+ def privateuse1_time(self):
417
+ return 0.0 if self.count == 0 else 1.0 * self.privateuse1_time_total / self.count # type: ignore[attr-defined]
418
+
419
+
420
+ class Interval:
421
+ def __init__(self, start, end):
422
+ self.start = start
423
+ self.end = end
424
+
425
+ def elapsed_us(self):
426
+ r"""
427
+ Returns the length of the interval
428
+ """
429
+ return self.end - self.start
430
+
431
+
432
+ Kernel = namedtuple("Kernel", ["name", "device", "duration"])
433
+
434
+
435
+ class FunctionEvent(FormattedTimesMixin):
436
+ """Profiling information about a single function."""
437
+
438
+ def __init__(
439
+ self,
440
+ id,
441
+ name,
442
+ thread,
443
+ start_us,
444
+ end_us,
445
+ fwd_thread=None,
446
+ input_shapes=None,
447
+ stack=None,
448
+ scope=0,
449
+ use_device=None,
450
+ cpu_memory_usage=0,
451
+ cuda_memory_usage=0,
452
+ privateuse1_memory_usage=0,
453
+ is_async=False,
454
+ is_remote=False,
455
+ sequence_nr=-1,
456
+ node_id=-1,
457
+ device_type=DeviceType.CPU,
458
+ device_index=0,
459
+ is_legacy=False,
460
+ flops=None,
461
+ trace_name=None,
462
+ concrete_inputs=None,
463
+ ):
464
+ self.id: int = id
465
+ self.node_id: int = node_id
466
+ self.name: str = name
467
+ self.trace_name: str = trace_name
468
+ self.time_range: Interval = Interval(start_us, end_us)
469
+ self.thread: int = thread
470
+ self.fwd_thread: Optional[int] = fwd_thread
471
+ self.kernels: List[Kernel] = []
472
+ self.count: int = 1
473
+ self.cpu_children: List[FunctionEvent] = []
474
+ self.cpu_parent: Optional[FunctionEvent] = None
475
+ self.input_shapes: Tuple[int, ...] = input_shapes
476
+ self.concrete_inputs: List[Any] = concrete_inputs
477
+ self.stack: List = stack
478
+ self.scope: int = scope
479
+ self.use_device: Optional[str] = use_device
480
+ self.cpu_memory_usage: int = cpu_memory_usage
481
+ self.cuda_memory_usage: int = cuda_memory_usage
482
+ self.privateuse1_memory_usage: int = privateuse1_memory_usage
483
+ self.is_async: bool = is_async
484
+ self.is_remote: bool = is_remote
485
+ self.sequence_nr: int = sequence_nr
486
+ self.device_type: DeviceType = device_type
487
+ self.device_index: int = device_index
488
+ self.is_legacy: bool = is_legacy
489
+ self.flops: Optional[int] = flops
490
+
491
+ def append_kernel(self, name, device, duration):
492
+ assert self.device_type == DeviceType.CPU
493
+ self.kernels.append(Kernel(name, device, duration))
494
+
495
+ def append_cpu_child(self, child):
496
+ """Append a CPU child of type FunctionEvent.
497
+
498
+ One is supposed to append only direct children to the event to have
499
+ correct self cpu time being reported.
500
+ """
501
+ assert self.device_type == DeviceType.CPU
502
+ assert isinstance(child, FunctionEvent)
503
+ assert child.device_type == DeviceType.CPU
504
+ self.cpu_children.append(child)
505
+
506
+ def set_cpu_parent(self, parent):
507
+ """Set the immediate CPU parent of type FunctionEvent.
508
+
509
+ One profiling FunctionEvent should have only one CPU parent such that
510
+ the child's range interval is completely inside the parent's. We use
511
+ this connection to determine the event is from top-level op or not.
512
+ """
513
+ assert self.device_type == DeviceType.CPU
514
+ assert isinstance(parent, FunctionEvent)
515
+ assert parent.device_type == DeviceType.CPU
516
+ self.cpu_parent = parent
517
+
518
+ # Note: async events don't have children, are not used when computing 'self'
519
+ # metrics of other events, have only total cpu time
520
+ @property
521
+ def self_cpu_memory_usage(self):
522
+ if self.is_async or self.device_type != DeviceType.CPU:
523
+ return 0
524
+ return self.cpu_memory_usage - sum(
525
+ [child.cpu_memory_usage for child in self.cpu_children]
526
+ )
527
+
528
+ @property
529
+ def self_cuda_memory_usage(self):
530
+ if self.is_async or self.device_type != DeviceType.CPU:
531
+ return 0
532
+ return self.cuda_memory_usage - sum(
533
+ [child.cuda_memory_usage for child in self.cpu_children]
534
+ )
535
+
536
+ @property
537
+ def self_privateuse1_memory_usage(self):
538
+ if self.is_async or self.device_type != DeviceType.CPU:
539
+ return 0
540
+ return self.privateuse1_memory_usage - sum(
541
+ [child.privateuse1_memory_usage for child in self.cpu_children]
542
+ )
543
+
544
+ @property
545
+ def self_cpu_time_total(self):
546
+ if self.is_async or self.device_type != DeviceType.CPU:
547
+ return 0
548
+ return self.cpu_time_total - sum(
549
+ [child.cpu_time_total for child in self.cpu_children]
550
+ )
551
+
552
+ @property
553
+ def cuda_time_total(self):
554
+ if self.is_async or self.use_device:
555
+ return 0
556
+ if self.device_type == DeviceType.CPU:
557
+ if not self.is_legacy:
558
+ # account for the kernels in the children ops
559
+ return sum(kinfo.duration for kinfo in self.kernels) + sum(
560
+ ch.cuda_time_total for ch in self.cpu_children
561
+ )
562
+ else:
563
+ # each legacy cpu events has a single (fake) kernel
564
+ return sum(kinfo.duration for kinfo in self.kernels)
565
+ else:
566
+ assert self.device_type == DeviceType.CUDA
567
+ return self.time_range.elapsed_us()
568
+
569
+ @property
570
+ def self_cuda_time_total(self):
571
+ if self.is_async or self.use_device:
572
+ return 0
573
+ if self.device_type == DeviceType.CPU:
574
+ return self.cuda_time_total - sum(
575
+ [child.cuda_time_total for child in self.cpu_children]
576
+ )
577
+ else:
578
+ assert self.device_type == DeviceType.CUDA
579
+ return self.cuda_time_total
580
+
581
+ @property
582
+ def cpu_time_total(self):
583
+ if self.device_type == DeviceType.CPU:
584
+ return self.time_range.elapsed_us()
585
+ else:
586
+ return 0
587
+
588
+ @property
589
+ def self_privateuse1_time_total(self):
590
+ if self.is_async or not self.use_device:
591
+ return 0
592
+ if self.device_type == DeviceType.CPU:
593
+ return self.privateuse1_time_total - sum(
594
+ [child.privateuse1_time_total for child in self.cpu_children]
595
+ )
596
+ else:
597
+ assert self.device_type == DeviceType.CUDA
598
+ return self.privateuse1_time_total
599
+
600
+ @property
601
+ def privateuse1_time_total(self):
602
+ if self.is_async or not self.use_device:
603
+ return 0
604
+ if self.device_type == DeviceType.CPU:
605
+ if not self.is_legacy:
606
+ # account for the kernels in the children ops
607
+ return sum(kinfo.duration for kinfo in self.kernels) + sum(
608
+ ch.privateuse1_time_total for ch in self.cpu_children
609
+ )
610
+ else:
611
+ # each legacy cpu events has a single (fake) kernel
612
+ return sum(kinfo.duration for kinfo in self.kernels)
613
+ else:
614
+ assert self.device_type == DeviceType.PrivateUse1
615
+ return self.time_range.elapsed_us()
616
+
617
+ @property
618
+ def key(self):
619
+ return self.name
620
+
621
+ def __repr__(self):
622
+ device_name = "cuda" if not self.use_device else self.use_device
623
+ device_time = (
624
+ self.cuda_time_str if not self.use_device else self.privateuse1_time_str
625
+ )
626
+ device_memory_usage = (
627
+ self.cuda_memory_usage
628
+ if not self.use_device
629
+ else self.privateuse1_memory_usage
630
+ )
631
+ return (
632
+ "<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} "
633
+ "cpu_children={} {}_time={} name={} thread={} input_shapes={} "
634
+ "cpu_memory_usage={} {}_memory_usage={} is_async={} is_remote={} seq_nr={} is_legacy={}>".format(
635
+ self.id,
636
+ self.name,
637
+ self.device_type,
638
+ self.node_id,
639
+ self.cpu_time_str,
640
+ self.time_range.start,
641
+ self.time_range.end,
642
+ str([child.id for child in self.cpu_children]),
643
+ device_name,
644
+ device_time,
645
+ self.name,
646
+ self.thread,
647
+ str(self.input_shapes),
648
+ self.cpu_memory_usage,
649
+ device_name,
650
+ device_memory_usage,
651
+ self.is_async,
652
+ self.is_remote,
653
+ self.sequence_nr,
654
+ self.is_legacy,
655
+ )
656
+ )
657
+
658
+
659
+ class FunctionEventAvg(FormattedTimesMixin):
660
+ """Used to average stats over multiple FunctionEvent objects."""
661
+
662
+ def __init__(self):
663
+ self.key: Optional[str] = None
664
+ self.count: int = 0
665
+ self.node_id: int = 0
666
+ self.is_async: bool = False
667
+ self.is_remote: bool = False
668
+ self.use_device: Optional[str] = None
669
+ self.cpu_time_total: int = 0
670
+ self.cuda_time_total: int = 0
671
+ self.privateuse1_time_total: int = 0
672
+ self.self_cpu_time_total: int = 0
673
+ self.self_cuda_time_total: int = 0
674
+ self.self_privateuse1_time_total: int = 0
675
+ self.input_shapes: Optional[List[List[int]]] = None
676
+ self.stack: Optional[List] = None
677
+ self.scope: Optional[int] = None
678
+ self.cpu_memory_usage: int = 0
679
+ self.cuda_memory_usage: int = 0
680
+ self.privateuse1_memory_usage: int = 0
681
+ self.self_cpu_memory_usage: int = 0
682
+ self.self_cuda_memory_usage: int = 0
683
+ self.self_privateuse1_memory_usage: int = 0
684
+ self.cpu_children: Optional[List[FunctionEvent]] = None
685
+ self.cpu_parent: Optional[FunctionEvent] = None
686
+ self.device_type: DeviceType = DeviceType.CPU
687
+ self.is_legacy: bool = False
688
+ self.flops: int = 0
689
+
690
+ def add(self, other):
691
+ if self.key is None:
692
+ # First function being recorded as part of FunctionEventAvg, propagate
693
+ # fields.
694
+ self.key = other.key
695
+ self.node_id = other.node_id
696
+ self.is_async = other.is_async
697
+ self.is_remote = other.is_remote
698
+ self.cpu_parent = other.cpu_parent
699
+ self.cpu_children = other.cpu_children
700
+
701
+ self.input_shapes = other.input_shapes
702
+ self.stack = other.stack
703
+ self.scope = other.scope
704
+ self.device_type = other.device_type
705
+ self.is_legacy = other.is_legacy
706
+ self.use_device = other.use_device
707
+
708
+ assert isinstance(other, (FunctionEvent, FunctionEventAvg))
709
+ assert other.key == self.key
710
+ self.cpu_time_total += other.cpu_time_total
711
+ self.cuda_time_total += other.cuda_time_total
712
+ self.privateuse1_time_total += other.privateuse1_time_total
713
+ self.self_cpu_time_total += other.self_cpu_time_total
714
+ self.self_cuda_time_total += other.self_cuda_time_total
715
+ self.self_privateuse1_time_total += other.self_privateuse1_time_total
716
+ self.cpu_memory_usage += other.cpu_memory_usage
717
+ self.cuda_memory_usage += other.cuda_memory_usage
718
+ self.privateuse1_memory_usage += other.privateuse1_memory_usage
719
+ self.self_cpu_memory_usage += other.self_cpu_memory_usage
720
+ self.self_cuda_memory_usage += other.self_cuda_memory_usage
721
+ self.self_privateuse1_memory_usage += other.self_privateuse1_memory_usage
722
+ self.count += other.count
723
+ if self.flops is None:
724
+ self.flops = other.flops
725
+ elif other.flops is not None:
726
+ self.flops += other.flops
727
+ return self
728
+
729
+ def __iadd__(self, other):
730
+ return self.add(other)
731
+
732
+ def __repr__(self):
733
+ device_name = "cuda" if not self.use_device else self.use_device
734
+ self_device_time = (
735
+ self.self_cuda_time_total_str
736
+ if not self.use_device
737
+ else self.self_privateuse1_time_total_str
738
+ )
739
+ device_time = (
740
+ self.cuda_time_str if not self.use_device else self.privateuse1_time_str
741
+ )
742
+ device_memory = (
743
+ self.cuda_memory_usage
744
+ if not self.use_device
745
+ else self.privateuse1_memory_usage
746
+ )
747
+ return (
748
+ "<FunctionEventAvg key={} self_cpu_time={} cpu_time={} "
749
+ " self_{}_time={} {}_time={} input_shapes={} "
750
+ "cpu_memory_usage={} {}_memory_usage={}>".format(
751
+ self.key,
752
+ self.self_cpu_time_total_str,
753
+ self.cpu_time_str,
754
+ device_name,
755
+ self_device_time,
756
+ device_name,
757
+ device_time,
758
+ str(self.input_shapes),
759
+ self.cpu_memory_usage,
760
+ device_name,
761
+ device_memory,
762
+ )
763
+ )
764
+
765
+
766
+ class StringTable(defaultdict):
767
+ def __missing__(self, key):
768
+ # manage cases like 't' (demangled to 'unsigned short') separately,
769
+ # for now simply check the length to avoid unexpected results for
770
+ # the short sequences
771
+ self[key] = torch._C._demangle(key) if len(key) > 1 else key
772
+ return self[key]
773
+
774
+
775
+ class MemRecordsAcc:
776
+ """Acceleration structure for accessing mem_records in interval."""
777
+
778
+ def __init__(self, mem_records):
779
+ self._mem_records = mem_records
780
+ self._start_uses: List[int] = []
781
+ self._indices: List[int] = []
782
+ if len(mem_records) > 0:
783
+ tmp = sorted([(r[0].start_us(), i) for i, r in enumerate(mem_records)])
784
+ self._start_uses, self._indices = zip(*tmp) # type: ignore[assignment]
785
+
786
+ def in_interval(self, start_us, end_us):
787
+ r"""
788
+ Return all records in the given interval
789
+ """
790
+ start_idx = bisect.bisect_left(self._start_uses, start_us)
791
+ end_idx = bisect.bisect_right(self._start_uses, end_us)
792
+ for i in range(start_idx, end_idx):
793
+ yield self._mem_records[self._indices[i]]
794
+
795
+
796
+ def _filter_stack_entry(entry):
797
+ filtered_entries = [
798
+ ("autograd/__init__", "_make_grads"),
799
+ ("autograd/__init__", "backward"),
800
+ ("torch/tensor", "backward"),
801
+ ("_internal/common_utils", "prof_callable"),
802
+ ("_internal/common_utils", "prof_func_call"),
803
+ ("_internal/common_utils", "prof_meth_call"),
804
+ ]
805
+ return all(not (f[0] in entry and f[1] in entry) for f in filtered_entries)
806
+
807
+
808
+ MEMORY_EVENT_NAME = "[memory]"
809
+ OUT_OF_MEMORY_EVENT_NAME = "[OutOfMemory]"
810
+
811
+
812
+ def _filter_name(name):
813
+ # ignoring the following utility ops
814
+ filtered_out_names = [
815
+ MEMORY_EVENT_NAME, # used only for the top-level memory events
816
+ OUT_OF_MEMORY_EVENT_NAME,
817
+ "profiler::_record_function_enter",
818
+ "profiler::_record_function_enter_new",
819
+ "profiler::_record_function_exit",
820
+ "aten::is_leaf",
821
+ "aten::output_nr",
822
+ "aten::_version",
823
+ ]
824
+ return name in filtered_out_names
825
+
826
+
827
+ # Demangles and optionally rewrites the provided event name,
828
+ # with_wildcard - whether to replace certain numbered event names
829
+ # with a wildcard name to aggregate them together in the profiler table
830
+ # output
831
+ def _rewrite_name(name, with_wildcard=False):
832
+ string_table = StringTable()
833
+ name = string_table[name]
834
+ if with_wildcard:
835
+ if name.startswith("ProfilerStep#"):
836
+ name = "ProfilerStep*"
837
+ return name
838
+
839
+
840
+ def _build_table(
841
+ events,
842
+ sort_by=None,
843
+ header=None,
844
+ row_limit=100,
845
+ max_src_column_width=75,
846
+ max_name_column_width=55,
847
+ max_shapes_column_width=80,
848
+ with_flops=False,
849
+ profile_memory=False,
850
+ top_level_events_only=False,
851
+ ):
852
+ """Print a summary of events (which can be a list of FunctionEvent or FunctionEventAvg)."""
853
+ if len(events) == 0:
854
+ return ""
855
+
856
+ has_cuda_time = any(event.self_cuda_time_total > 0 for event in events)
857
+ has_cuda_mem = any(event.self_cuda_memory_usage > 0 for event in events)
858
+ has_privateuse1_time = any(
859
+ event.self_privateuse1_time_total > 0 for event in events
860
+ )
861
+ has_privateuse1_mem = any(
862
+ event.self_privateuse1_memory_usage > 0 for event in events
863
+ )
864
+ use_device = events[0].use_device
865
+ if not use_device and (has_privateuse1_mem or has_privateuse1_time):
866
+ raise RuntimeError(
867
+ "use_device is None, but there is private device performance data."
868
+ )
869
+
870
+ has_input_shapes = any(
871
+ (event.input_shapes is not None and len(event.input_shapes) > 0)
872
+ for event in events
873
+ )
874
+
875
+ if sort_by is not None:
876
+ events = EventList(
877
+ sorted(events, key=lambda evt: getattr(evt, sort_by), reverse=True),
878
+ use_cuda=has_cuda_time,
879
+ use_device=use_device,
880
+ profile_memory=profile_memory,
881
+ with_flops=with_flops,
882
+ )
883
+
884
+ name_column_width = max([len(evt.key) for evt in events]) + 4
885
+ if max_name_column_width is not None:
886
+ name_column_width = min(name_column_width, max_name_column_width)
887
+
888
+ shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4
889
+ if max_shapes_column_width is not None:
890
+ shapes_column_width = min(shapes_column_width, max_shapes_column_width)
891
+
892
+ DEFAULT_COLUMN_WIDTH = 12
893
+ flops_column_width = DEFAULT_COLUMN_WIDTH
894
+
895
+ src_column_width = None
896
+ stacks = []
897
+ for evt in events:
898
+ if evt.stack is not None and len(evt.stack) > 0:
899
+ stacks.append(evt.stack)
900
+ has_stack = len(stacks) > 0
901
+ if has_stack:
902
+ src_column_width = (
903
+ max([max([len(entry) for entry in stack]) for stack in stacks]) + 4
904
+ )
905
+ if max_src_column_width is not None:
906
+ src_column_width = min(src_column_width, max_src_column_width)
907
+
908
+ headers = [
909
+ "Name",
910
+ "Self CPU %",
911
+ "Self CPU",
912
+ "CPU total %",
913
+ "CPU total",
914
+ "CPU time avg",
915
+ ]
916
+ if has_cuda_time:
917
+ headers.extend(
918
+ [
919
+ "Self CUDA",
920
+ "Self CUDA %",
921
+ "CUDA total",
922
+ "CUDA time avg",
923
+ ]
924
+ )
925
+ if has_privateuse1_time:
926
+ privateuse1 = use_device.upper()
927
+ headers.extend(
928
+ [
929
+ f"Self {privateuse1}",
930
+ f"Self {privateuse1} %",
931
+ f"{privateuse1} total",
932
+ f"{privateuse1} time avg",
933
+ ]
934
+ )
935
+ if profile_memory:
936
+ headers.extend(
937
+ [
938
+ "CPU Mem",
939
+ "Self CPU Mem",
940
+ ]
941
+ )
942
+ if has_cuda_mem:
943
+ headers.extend(
944
+ [
945
+ "CUDA Mem",
946
+ "Self CUDA Mem",
947
+ ]
948
+ )
949
+ if has_privateuse1_mem:
950
+ privateuse1 = use_device.upper()
951
+ headers.extend(
952
+ [
953
+ f"{privateuse1} Mem",
954
+ f"Self {privateuse1} Mem",
955
+ ]
956
+ )
957
+ headers.append("# of Calls")
958
+ # Only append Node ID if any event has a valid (>= 0) Node ID
959
+ append_node_id = any(evt.node_id != -1 for evt in events)
960
+ if append_node_id:
961
+ headers.append("Node ID")
962
+
963
+ # Have to use a list because nonlocal is Py3 only...
964
+ SPACING_SIZE = 2
965
+ row_format_lst = [""]
966
+ header_sep_lst = [""]
967
+ line_length_lst = [-SPACING_SIZE]
968
+ MAX_STACK_ENTRY = 5
969
+
970
+ def add_column(padding, text_dir=">"):
971
+ row_format_lst[0] += (
972
+ "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
973
+ )
974
+ header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
975
+ line_length_lst[0] += padding + SPACING_SIZE
976
+
977
+ def auto_scale_flops(flops):
978
+ flop_headers = [
979
+ "FLOPs",
980
+ "KFLOPs",
981
+ "MFLOPs",
982
+ "GFLOPs",
983
+ "TFLOPs",
984
+ "PFLOPs",
985
+ ]
986
+ assert flops > 0
987
+ log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1)))
988
+ assert log_flops >= 0 and log_flops < len(flop_headers)
989
+ return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)])
990
+
991
+ add_column(name_column_width)
992
+ for _ in headers[1:]:
993
+ add_column(DEFAULT_COLUMN_WIDTH)
994
+
995
+ if has_input_shapes:
996
+ headers.append("Input Shapes")
997
+ add_column(shapes_column_width)
998
+
999
+ if has_stack:
1000
+ headers.append("Source Location")
1001
+ add_column(src_column_width, text_dir="<")
1002
+
1003
+ if with_flops:
1004
+ # Auto-scaling of flops header
1005
+ raw_flops = []
1006
+ for evt in events:
1007
+ if evt.flops > 0:
1008
+ raw_flops.append(evt.flops)
1009
+ if len(raw_flops) != 0:
1010
+ (flops_scale, flops_header) = auto_scale_flops(min(raw_flops))
1011
+ headers.append(f"Total {flops_header}")
1012
+ add_column(flops_column_width)
1013
+ else:
1014
+ with_flops = False # can't find any valid flops
1015
+
1016
+ row_format = row_format_lst[0]
1017
+ header_sep = header_sep_lst[0]
1018
+ line_length = line_length_lst[0]
1019
+ add_column = None # type: ignore[assignment]
1020
+
1021
+ # Have to use a list because nonlocal is Py3 only...
1022
+ result = []
1023
+
1024
+ def append(s):
1025
+ result.append(s)
1026
+ result.append("\n") # Yes, newline after the end as well
1027
+
1028
+ sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events])
1029
+ sum_self_cuda_time_total = 0
1030
+ sum_self_privateuse1_time_total = 0
1031
+ for evt in events:
1032
+ if evt.device_type == DeviceType.CPU:
1033
+ # in legacy profiler, kernel info is stored in cpu events
1034
+ if evt.is_legacy:
1035
+ if not use_device:
1036
+ sum_self_cuda_time_total += evt.self_cuda_time_total
1037
+ else:
1038
+ sum_self_privateuse1_time_total += evt.self_privateuse1_time_total
1039
+ elif evt.device_type == DeviceType.CUDA:
1040
+ # in kineto profiler, there're events with the correct device type (e.g. CUDA)
1041
+ sum_self_cuda_time_total += evt.self_cuda_time_total
1042
+ elif evt.device_type == DeviceType.PrivateUse1:
1043
+ sum_self_privateuse1_time_total += evt.self_privateuse1_time_total
1044
+
1045
+ # Actual printing
1046
+ if header is not None:
1047
+ append("=" * line_length)
1048
+ append(header)
1049
+ if top_level_events_only:
1050
+ append("=" * line_length)
1051
+ append("This report only display top-level ops statistics")
1052
+ append(header_sep)
1053
+ append(row_format.format(*headers))
1054
+
1055
+ append(header_sep)
1056
+
1057
+ def trim_path(path, src_column_width):
1058
+ if len(path) > src_column_width:
1059
+ offset = len(path) - src_column_width
1060
+ path = path[offset:]
1061
+ if len(path) > 3:
1062
+ path = "..." + path[3:]
1063
+ return path
1064
+
1065
+ event_limit = 0
1066
+ for evt in events:
1067
+ if event_limit == row_limit:
1068
+ break
1069
+ if top_level_events_only and evt.cpu_parent is not None:
1070
+ continue
1071
+ else:
1072
+ event_limit += 1
1073
+ name = evt.key
1074
+ if max_name_column_width is not None and len(name) >= max_name_column_width - 3:
1075
+ name = name[: (max_name_column_width - 3)] + "..."
1076
+ row_values = [
1077
+ name,
1078
+ # Self CPU total %, 0 for async events.
1079
+ _format_time_share(evt.self_cpu_time_total, sum_self_cpu_time_total),
1080
+ evt.self_cpu_time_total_str, # Self CPU total
1081
+ # CPU total %, 0 for async events.
1082
+ _format_time_share(evt.cpu_time_total, sum_self_cpu_time_total)
1083
+ if not evt.is_async
1084
+ else 0,
1085
+ evt.cpu_time_total_str, # CPU total
1086
+ evt.cpu_time_str, # CPU time avg
1087
+ ]
1088
+ if has_cuda_time:
1089
+ row_values.extend(
1090
+ [
1091
+ evt.self_cuda_time_total_str,
1092
+ # CUDA time total %
1093
+ _format_time_share(
1094
+ evt.self_cuda_time_total, sum_self_cuda_time_total
1095
+ ),
1096
+ evt.cuda_time_total_str,
1097
+ evt.cuda_time_str, # Cuda time avg
1098
+ ]
1099
+ )
1100
+ if has_privateuse1_time:
1101
+ row_values.extend(
1102
+ [
1103
+ evt.self_privateuse1_time_total_str,
1104
+ # PrivateUse1 time total %
1105
+ _format_time_share(
1106
+ evt.self_privateuse1_time_total, sum_self_privateuse1_time_total
1107
+ ),
1108
+ evt.privateuse1_time_total_str,
1109
+ evt.privateuse1_time_str, # PrivateUse1 time avg
1110
+ ]
1111
+ )
1112
+ if profile_memory:
1113
+ row_values.extend(
1114
+ [
1115
+ # CPU Mem Total
1116
+ _format_memory(evt.cpu_memory_usage),
1117
+ # Self CPU Mem Total
1118
+ _format_memory(evt.self_cpu_memory_usage),
1119
+ ]
1120
+ )
1121
+ if has_cuda_mem:
1122
+ row_values.extend(
1123
+ [
1124
+ # CUDA Mem Total
1125
+ _format_memory(evt.cuda_memory_usage),
1126
+ # Self CUDA Mem Total
1127
+ _format_memory(evt.self_cuda_memory_usage),
1128
+ ]
1129
+ )
1130
+ if has_privateuse1_mem:
1131
+ row_values.extend(
1132
+ [
1133
+ # PrivateUse1 Mem Total
1134
+ _format_memory(evt.privateuse1_memory_usage),
1135
+ # Self PrivateUse1 Mem Total
1136
+ _format_memory(evt.self_privateuse1_memory_usage),
1137
+ ]
1138
+ )
1139
+ row_values.append(
1140
+ evt.count, # Number of calls
1141
+ )
1142
+
1143
+ if append_node_id:
1144
+ row_values.append(evt.node_id)
1145
+ if has_input_shapes:
1146
+ row_values.append(str(evt.input_shapes)[:shapes_column_width])
1147
+ if with_flops:
1148
+ if evt.flops <= 0:
1149
+ row_values.append("--")
1150
+ else:
1151
+ row_values.append(f"{evt.flops * flops_scale:8.3f}") # type: ignore[possibly-undefined]
1152
+ if has_stack:
1153
+ src_field = ""
1154
+ if len(evt.stack) > 0:
1155
+ src_field = trim_path(evt.stack[0], src_column_width)
1156
+ row_values.append(src_field)
1157
+ append(row_format.format(*row_values))
1158
+
1159
+ if has_stack:
1160
+ empty_headers = [""] * (len(headers) - 1)
1161
+ for entry in evt.stack[1:MAX_STACK_ENTRY]:
1162
+ append(
1163
+ row_format.format(
1164
+ *(empty_headers + [trim_path(entry, src_column_width)])
1165
+ )
1166
+ )
1167
+ empty_headers.append("")
1168
+ append(row_format.format(*empty_headers))
1169
+
1170
+ append(header_sep)
1171
+ append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}")
1172
+ if has_cuda_time:
1173
+ append(f"Self CUDA time total: {_format_time(sum_self_cuda_time_total)}")
1174
+ if has_privateuse1_time:
1175
+ append(
1176
+ f"Self {use_device.upper()} time total: {_format_time(sum_self_privateuse1_time_total)}"
1177
+ )
1178
+ return "".join(result)
venv/lib/python3.10/site-packages/torch/autograd/variable.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C import _ImperativeEngine as ImperativeEngine
3
+
4
+
5
+ __all__ = ["VariableMeta", "Variable"]
6
+
7
+
8
+ class VariableMeta(type):
9
+ def __instancecheck__(cls, other):
10
+ return isinstance(other, torch.Tensor)
11
+
12
+
13
+ class Variable(torch._C._LegacyVariableBase, metaclass=VariableMeta): # type: ignore[misc]
14
+ _execution_engine = ImperativeEngine()
venv/lib/python3.10/site-packages/torch/distributions/__init__.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ The ``distributions`` package contains parameterizable probability distributions
3
+ and sampling functions. This allows the construction of stochastic computation
4
+ graphs and stochastic gradient estimators for optimization. This package
5
+ generally follows the design of the `TensorFlow Distributions`_ package.
6
+
7
+ .. _`TensorFlow Distributions`:
8
+ https://arxiv.org/abs/1711.10604
9
+
10
+ It is not possible to directly backpropagate through random samples. However,
11
+ there are two main methods for creating surrogate functions that can be
12
+ backpropagated through. These are the score function estimator/likelihood ratio
13
+ estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly
14
+ seen as the basis for policy gradient methods in reinforcement learning, and the
15
+ pathwise derivative estimator is commonly seen in the reparameterization trick
16
+ in variational autoencoders. Whilst the score function only requires the value
17
+ of samples :math:`f(x)`, the pathwise derivative requires the derivative
18
+ :math:`f'(x)`. The next sections discuss these two in a reinforcement learning
19
+ example. For more details see
20
+ `Gradient Estimation Using Stochastic Computation Graphs`_ .
21
+
22
+ .. _`Gradient Estimation Using Stochastic Computation Graphs`:
23
+ https://arxiv.org/abs/1506.05254
24
+
25
+ Score function
26
+ ^^^^^^^^^^^^^^
27
+
28
+ When the probability density function is differentiable with respect to its
29
+ parameters, we only need :meth:`~torch.distributions.Distribution.sample` and
30
+ :meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE:
31
+
32
+ .. math::
33
+
34
+ \Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta}
35
+
36
+ where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate,
37
+ :math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of
38
+ taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`.
39
+
40
+ In practice we would sample an action from the output of a network, apply this
41
+ action in an environment, and then use ``log_prob`` to construct an equivalent
42
+ loss function. Note that we use a negative because optimizers use gradient
43
+ descent, whilst the rule above assumes gradient ascent. With a categorical
44
+ policy, the code for implementing REINFORCE would be as follows::
45
+
46
+ probs = policy_network(state)
47
+ # Note that this is equivalent to what used to be called multinomial
48
+ m = Categorical(probs)
49
+ action = m.sample()
50
+ next_state, reward = env.step(action)
51
+ loss = -m.log_prob(action) * reward
52
+ loss.backward()
53
+
54
+ Pathwise derivative
55
+ ^^^^^^^^^^^^^^^^^^^
56
+
57
+ The other way to implement these stochastic/policy gradients would be to use the
58
+ reparameterization trick from the
59
+ :meth:`~torch.distributions.Distribution.rsample` method, where the
60
+ parameterized random variable can be constructed via a parameterized
61
+ deterministic function of a parameter-free random variable. The reparameterized
62
+ sample therefore becomes differentiable. The code for implementing the pathwise
63
+ derivative would be as follows::
64
+
65
+ params = policy_network(state)
66
+ m = Normal(*params)
67
+ # Any distribution with .has_rsample == True could work based on the application
68
+ action = m.rsample()
69
+ next_state, reward = env.step(action) # Assuming that reward is differentiable
70
+ loss = -reward
71
+ loss.backward()
72
+ """
73
+
74
+ from .bernoulli import Bernoulli
75
+ from .beta import Beta
76
+ from .binomial import Binomial
77
+ from .categorical import Categorical
78
+ from .cauchy import Cauchy
79
+ from .chi2 import Chi2
80
+ from .constraint_registry import biject_to, transform_to
81
+ from .continuous_bernoulli import ContinuousBernoulli
82
+ from .dirichlet import Dirichlet
83
+ from .distribution import Distribution
84
+ from .exp_family import ExponentialFamily
85
+ from .exponential import Exponential
86
+ from .fishersnedecor import FisherSnedecor
87
+ from .gamma import Gamma
88
+ from .geometric import Geometric
89
+ from .gumbel import Gumbel
90
+ from .half_cauchy import HalfCauchy
91
+ from .half_normal import HalfNormal
92
+ from .independent import Independent
93
+ from .inverse_gamma import InverseGamma
94
+ from .kl import _add_kl_info, kl_divergence, register_kl
95
+ from .kumaraswamy import Kumaraswamy
96
+ from .laplace import Laplace
97
+ from .lkj_cholesky import LKJCholesky
98
+ from .log_normal import LogNormal
99
+ from .logistic_normal import LogisticNormal
100
+ from .lowrank_multivariate_normal import LowRankMultivariateNormal
101
+ from .mixture_same_family import MixtureSameFamily
102
+ from .multinomial import Multinomial
103
+ from .multivariate_normal import MultivariateNormal
104
+ from .negative_binomial import NegativeBinomial
105
+ from .normal import Normal
106
+ from .one_hot_categorical import OneHotCategorical, OneHotCategoricalStraightThrough
107
+ from .pareto import Pareto
108
+ from .poisson import Poisson
109
+ from .relaxed_bernoulli import RelaxedBernoulli
110
+ from .relaxed_categorical import RelaxedOneHotCategorical
111
+ from .studentT import StudentT
112
+ from .transformed_distribution import TransformedDistribution
113
+ from .transforms import * # noqa: F403
114
+ from . import transforms
115
+ from .uniform import Uniform
116
+ from .von_mises import VonMises
117
+ from .weibull import Weibull
118
+ from .wishart import Wishart
119
+
120
+ _add_kl_info()
121
+ del _add_kl_info
122
+
123
+ __all__ = [
124
+ "Bernoulli",
125
+ "Beta",
126
+ "Binomial",
127
+ "Categorical",
128
+ "Cauchy",
129
+ "Chi2",
130
+ "ContinuousBernoulli",
131
+ "Dirichlet",
132
+ "Distribution",
133
+ "Exponential",
134
+ "ExponentialFamily",
135
+ "FisherSnedecor",
136
+ "Gamma",
137
+ "Geometric",
138
+ "Gumbel",
139
+ "HalfCauchy",
140
+ "HalfNormal",
141
+ "Independent",
142
+ "InverseGamma",
143
+ "Kumaraswamy",
144
+ "LKJCholesky",
145
+ "Laplace",
146
+ "LogNormal",
147
+ "LogisticNormal",
148
+ "LowRankMultivariateNormal",
149
+ "MixtureSameFamily",
150
+ "Multinomial",
151
+ "MultivariateNormal",
152
+ "NegativeBinomial",
153
+ "Normal",
154
+ "OneHotCategorical",
155
+ "OneHotCategoricalStraightThrough",
156
+ "Pareto",
157
+ "RelaxedBernoulli",
158
+ "RelaxedOneHotCategorical",
159
+ "StudentT",
160
+ "Poisson",
161
+ "Uniform",
162
+ "VonMises",
163
+ "Weibull",
164
+ "Wishart",
165
+ "TransformedDistribution",
166
+ "biject_to",
167
+ "kl_divergence",
168
+ "register_kl",
169
+ "transform_to",
170
+ ]
171
+ __all__.extend(transforms.__all__)
venv/lib/python3.10/site-packages/torch/distributions/bernoulli.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+
3
+ import torch
4
+ from torch import nan
5
+ from torch.distributions import constraints
6
+ from torch.distributions.exp_family import ExponentialFamily
7
+ from torch.distributions.utils import (
8
+ broadcast_all,
9
+ lazy_property,
10
+ logits_to_probs,
11
+ probs_to_logits,
12
+ )
13
+ from torch.nn.functional import binary_cross_entropy_with_logits
14
+
15
+ __all__ = ["Bernoulli"]
16
+
17
+
18
+ class Bernoulli(ExponentialFamily):
19
+ r"""
20
+ Creates a Bernoulli distribution parameterized by :attr:`probs`
21
+ or :attr:`logits` (but not both).
22
+
23
+ Samples are binary (0 or 1). They take the value `1` with probability `p`
24
+ and `0` with probability `1 - p`.
25
+
26
+ Example::
27
+
28
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
29
+ >>> m = Bernoulli(torch.tensor([0.3]))
30
+ >>> m.sample() # 30% chance 1; 70% chance 0
31
+ tensor([ 0.])
32
+
33
+ Args:
34
+ probs (Number, Tensor): the probability of sampling `1`
35
+ logits (Number, Tensor): the log-odds of sampling `1`
36
+ """
37
+ arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
38
+ support = constraints.boolean
39
+ has_enumerate_support = True
40
+ _mean_carrier_measure = 0
41
+
42
+ def __init__(self, probs=None, logits=None, validate_args=None):
43
+ if (probs is None) == (logits is None):
44
+ raise ValueError(
45
+ "Either `probs` or `logits` must be specified, but not both."
46
+ )
47
+ if probs is not None:
48
+ is_scalar = isinstance(probs, Number)
49
+ (self.probs,) = broadcast_all(probs)
50
+ else:
51
+ is_scalar = isinstance(logits, Number)
52
+ (self.logits,) = broadcast_all(logits)
53
+ self._param = self.probs if probs is not None else self.logits
54
+ if is_scalar:
55
+ batch_shape = torch.Size()
56
+ else:
57
+ batch_shape = self._param.size()
58
+ super().__init__(batch_shape, validate_args=validate_args)
59
+
60
+ def expand(self, batch_shape, _instance=None):
61
+ new = self._get_checked_instance(Bernoulli, _instance)
62
+ batch_shape = torch.Size(batch_shape)
63
+ if "probs" in self.__dict__:
64
+ new.probs = self.probs.expand(batch_shape)
65
+ new._param = new.probs
66
+ if "logits" in self.__dict__:
67
+ new.logits = self.logits.expand(batch_shape)
68
+ new._param = new.logits
69
+ super(Bernoulli, new).__init__(batch_shape, validate_args=False)
70
+ new._validate_args = self._validate_args
71
+ return new
72
+
73
+ def _new(self, *args, **kwargs):
74
+ return self._param.new(*args, **kwargs)
75
+
76
+ @property
77
+ def mean(self):
78
+ return self.probs
79
+
80
+ @property
81
+ def mode(self):
82
+ mode = (self.probs >= 0.5).to(self.probs)
83
+ mode[self.probs == 0.5] = nan
84
+ return mode
85
+
86
+ @property
87
+ def variance(self):
88
+ return self.probs * (1 - self.probs)
89
+
90
+ @lazy_property
91
+ def logits(self):
92
+ return probs_to_logits(self.probs, is_binary=True)
93
+
94
+ @lazy_property
95
+ def probs(self):
96
+ return logits_to_probs(self.logits, is_binary=True)
97
+
98
+ @property
99
+ def param_shape(self):
100
+ return self._param.size()
101
+
102
+ def sample(self, sample_shape=torch.Size()):
103
+ shape = self._extended_shape(sample_shape)
104
+ with torch.no_grad():
105
+ return torch.bernoulli(self.probs.expand(shape))
106
+
107
+ def log_prob(self, value):
108
+ if self._validate_args:
109
+ self._validate_sample(value)
110
+ logits, value = broadcast_all(self.logits, value)
111
+ return -binary_cross_entropy_with_logits(logits, value, reduction="none")
112
+
113
+ def entropy(self):
114
+ return binary_cross_entropy_with_logits(
115
+ self.logits, self.probs, reduction="none"
116
+ )
117
+
118
+ def enumerate_support(self, expand=True):
119
+ values = torch.arange(2, dtype=self._param.dtype, device=self._param.device)
120
+ values = values.view((-1,) + (1,) * len(self._batch_shape))
121
+ if expand:
122
+ values = values.expand((-1,) + self._batch_shape)
123
+ return values
124
+
125
+ @property
126
+ def _natural_params(self):
127
+ return (torch.logit(self.probs),)
128
+
129
+ def _log_normalizer(self, x):
130
+ return torch.log1p(torch.exp(x))
venv/lib/python3.10/site-packages/torch/distributions/binomial.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributions import constraints
3
+ from torch.distributions.distribution import Distribution
4
+ from torch.distributions.utils import (
5
+ broadcast_all,
6
+ lazy_property,
7
+ logits_to_probs,
8
+ probs_to_logits,
9
+ )
10
+
11
+ __all__ = ["Binomial"]
12
+
13
+
14
+ def _clamp_by_zero(x):
15
+ # works like clamp(x, min=0) but has grad at 0 is 0.5
16
+ return (x.clamp(min=0) + x - x.clamp(max=0)) / 2
17
+
18
+
19
+ class Binomial(Distribution):
20
+ r"""
21
+ Creates a Binomial distribution parameterized by :attr:`total_count` and
22
+ either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be
23
+ broadcastable with :attr:`probs`/:attr:`logits`.
24
+
25
+ Example::
26
+
27
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
28
+ >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
29
+ >>> x = m.sample()
30
+ tensor([ 0., 22., 71., 100.])
31
+
32
+ >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
33
+ >>> x = m.sample()
34
+ tensor([[ 4., 5.],
35
+ [ 7., 6.]])
36
+
37
+ Args:
38
+ total_count (int or Tensor): number of Bernoulli trials
39
+ probs (Tensor): Event probabilities
40
+ logits (Tensor): Event log-odds
41
+ """
42
+ arg_constraints = {
43
+ "total_count": constraints.nonnegative_integer,
44
+ "probs": constraints.unit_interval,
45
+ "logits": constraints.real,
46
+ }
47
+ has_enumerate_support = True
48
+
49
+ def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
50
+ if (probs is None) == (logits is None):
51
+ raise ValueError(
52
+ "Either `probs` or `logits` must be specified, but not both."
53
+ )
54
+ if probs is not None:
55
+ (
56
+ self.total_count,
57
+ self.probs,
58
+ ) = broadcast_all(total_count, probs)
59
+ self.total_count = self.total_count.type_as(self.probs)
60
+ else:
61
+ (
62
+ self.total_count,
63
+ self.logits,
64
+ ) = broadcast_all(total_count, logits)
65
+ self.total_count = self.total_count.type_as(self.logits)
66
+
67
+ self._param = self.probs if probs is not None else self.logits
68
+ batch_shape = self._param.size()
69
+ super().__init__(batch_shape, validate_args=validate_args)
70
+
71
+ def expand(self, batch_shape, _instance=None):
72
+ new = self._get_checked_instance(Binomial, _instance)
73
+ batch_shape = torch.Size(batch_shape)
74
+ new.total_count = self.total_count.expand(batch_shape)
75
+ if "probs" in self.__dict__:
76
+ new.probs = self.probs.expand(batch_shape)
77
+ new._param = new.probs
78
+ if "logits" in self.__dict__:
79
+ new.logits = self.logits.expand(batch_shape)
80
+ new._param = new.logits
81
+ super(Binomial, new).__init__(batch_shape, validate_args=False)
82
+ new._validate_args = self._validate_args
83
+ return new
84
+
85
+ def _new(self, *args, **kwargs):
86
+ return self._param.new(*args, **kwargs)
87
+
88
+ @constraints.dependent_property(is_discrete=True, event_dim=0)
89
+ def support(self):
90
+ return constraints.integer_interval(0, self.total_count)
91
+
92
+ @property
93
+ def mean(self):
94
+ return self.total_count * self.probs
95
+
96
+ @property
97
+ def mode(self):
98
+ return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count)
99
+
100
+ @property
101
+ def variance(self):
102
+ return self.total_count * self.probs * (1 - self.probs)
103
+
104
+ @lazy_property
105
+ def logits(self):
106
+ return probs_to_logits(self.probs, is_binary=True)
107
+
108
+ @lazy_property
109
+ def probs(self):
110
+ return logits_to_probs(self.logits, is_binary=True)
111
+
112
+ @property
113
+ def param_shape(self):
114
+ return self._param.size()
115
+
116
+ def sample(self, sample_shape=torch.Size()):
117
+ shape = self._extended_shape(sample_shape)
118
+ with torch.no_grad():
119
+ return torch.binomial(
120
+ self.total_count.expand(shape), self.probs.expand(shape)
121
+ )
122
+
123
+ def log_prob(self, value):
124
+ if self._validate_args:
125
+ self._validate_sample(value)
126
+ log_factorial_n = torch.lgamma(self.total_count + 1)
127
+ log_factorial_k = torch.lgamma(value + 1)
128
+ log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
129
+ # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
130
+ # (case logit < 0) = k * logit - n * log1p(e^logit)
131
+ # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
132
+ # = k * logit - n * logit - n * log1p(e^-logit)
133
+ # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
134
+ normalize_term = (
135
+ self.total_count * _clamp_by_zero(self.logits)
136
+ + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
137
+ - log_factorial_n
138
+ )
139
+ return (
140
+ value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
141
+ )
142
+
143
+ def entropy(self):
144
+ total_count = int(self.total_count.max())
145
+ if not self.total_count.min() == total_count:
146
+ raise NotImplementedError(
147
+ "Inhomogeneous total count not supported by `entropy`."
148
+ )
149
+
150
+ log_prob = self.log_prob(self.enumerate_support(False))
151
+ return -(torch.exp(log_prob) * log_prob).sum(0)
152
+
153
+ def enumerate_support(self, expand=True):
154
+ total_count = int(self.total_count.max())
155
+ if not self.total_count.min() == total_count:
156
+ raise NotImplementedError(
157
+ "Inhomogeneous total count not supported by `enumerate_support`."
158
+ )
159
+ values = torch.arange(
160
+ 1 + total_count, dtype=self._param.dtype, device=self._param.device
161
+ )
162
+ values = values.view((-1,) + (1,) * len(self._batch_shape))
163
+ if expand:
164
+ values = values.expand((-1,) + self._batch_shape)
165
+ return values
venv/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch.distributions import constraints
6
+ from torch.distributions.exp_family import ExponentialFamily
7
+ from torch.distributions.utils import (
8
+ broadcast_all,
9
+ clamp_probs,
10
+ lazy_property,
11
+ logits_to_probs,
12
+ probs_to_logits,
13
+ )
14
+ from torch.nn.functional import binary_cross_entropy_with_logits
15
+
16
+ __all__ = ["ContinuousBernoulli"]
17
+
18
+
19
+ class ContinuousBernoulli(ExponentialFamily):
20
+ r"""
21
+ Creates a continuous Bernoulli distribution parameterized by :attr:`probs`
22
+ or :attr:`logits` (but not both).
23
+
24
+ The distribution is supported in [0, 1] and parameterized by 'probs' (in
25
+ (0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs'
26
+ does not correspond to a probability and 'logits' does not correspond to
27
+ log-odds, but the same names are used due to the similarity with the
28
+ Bernoulli. See [1] for more details.
29
+
30
+ Example::
31
+
32
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
33
+ >>> m = ContinuousBernoulli(torch.tensor([0.3]))
34
+ >>> m.sample()
35
+ tensor([ 0.2538])
36
+
37
+ Args:
38
+ probs (Number, Tensor): (0,1) valued parameters
39
+ logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs'
40
+
41
+ [1] The continuous Bernoulli: fixing a pervasive error in variational
42
+ autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019.
43
+ https://arxiv.org/abs/1907.06845
44
+ """
45
+ arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
46
+ support = constraints.unit_interval
47
+ _mean_carrier_measure = 0
48
+ has_rsample = True
49
+
50
+ def __init__(
51
+ self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None
52
+ ):
53
+ if (probs is None) == (logits is None):
54
+ raise ValueError(
55
+ "Either `probs` or `logits` must be specified, but not both."
56
+ )
57
+ if probs is not None:
58
+ is_scalar = isinstance(probs, Number)
59
+ (self.probs,) = broadcast_all(probs)
60
+ # validate 'probs' here if necessary as it is later clamped for numerical stability
61
+ # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass
62
+ if validate_args is not None:
63
+ if not self.arg_constraints["probs"].check(self.probs).all():
64
+ raise ValueError("The parameter probs has invalid values")
65
+ self.probs = clamp_probs(self.probs)
66
+ else:
67
+ is_scalar = isinstance(logits, Number)
68
+ (self.logits,) = broadcast_all(logits)
69
+ self._param = self.probs if probs is not None else self.logits
70
+ if is_scalar:
71
+ batch_shape = torch.Size()
72
+ else:
73
+ batch_shape = self._param.size()
74
+ self._lims = lims
75
+ super().__init__(batch_shape, validate_args=validate_args)
76
+
77
+ def expand(self, batch_shape, _instance=None):
78
+ new = self._get_checked_instance(ContinuousBernoulli, _instance)
79
+ new._lims = self._lims
80
+ batch_shape = torch.Size(batch_shape)
81
+ if "probs" in self.__dict__:
82
+ new.probs = self.probs.expand(batch_shape)
83
+ new._param = new.probs
84
+ if "logits" in self.__dict__:
85
+ new.logits = self.logits.expand(batch_shape)
86
+ new._param = new.logits
87
+ super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False)
88
+ new._validate_args = self._validate_args
89
+ return new
90
+
91
+ def _new(self, *args, **kwargs):
92
+ return self._param.new(*args, **kwargs)
93
+
94
+ def _outside_unstable_region(self):
95
+ return torch.max(
96
+ torch.le(self.probs, self._lims[0]), torch.gt(self.probs, self._lims[1])
97
+ )
98
+
99
+ def _cut_probs(self):
100
+ return torch.where(
101
+ self._outside_unstable_region(),
102
+ self.probs,
103
+ self._lims[0] * torch.ones_like(self.probs),
104
+ )
105
+
106
+ def _cont_bern_log_norm(self):
107
+ """computes the log normalizing constant as a function of the 'probs' parameter"""
108
+ cut_probs = self._cut_probs()
109
+ cut_probs_below_half = torch.where(
110
+ torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs)
111
+ )
112
+ cut_probs_above_half = torch.where(
113
+ torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs)
114
+ )
115
+ log_norm = torch.log(
116
+ torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))
117
+ ) - torch.where(
118
+ torch.le(cut_probs, 0.5),
119
+ torch.log1p(-2.0 * cut_probs_below_half),
120
+ torch.log(2.0 * cut_probs_above_half - 1.0),
121
+ )
122
+ x = torch.pow(self.probs - 0.5, 2)
123
+ taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x
124
+ return torch.where(self._outside_unstable_region(), log_norm, taylor)
125
+
126
+ @property
127
+ def mean(self):
128
+ cut_probs = self._cut_probs()
129
+ mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / (
130
+ torch.log1p(-cut_probs) - torch.log(cut_probs)
131
+ )
132
+ x = self.probs - 0.5
133
+ taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x
134
+ return torch.where(self._outside_unstable_region(), mus, taylor)
135
+
136
+ @property
137
+ def stddev(self):
138
+ return torch.sqrt(self.variance)
139
+
140
+ @property
141
+ def variance(self):
142
+ cut_probs = self._cut_probs()
143
+ vars = cut_probs * (cut_probs - 1.0) / torch.pow(
144
+ 1.0 - 2.0 * cut_probs, 2
145
+ ) + 1.0 / torch.pow(torch.log1p(-cut_probs) - torch.log(cut_probs), 2)
146
+ x = torch.pow(self.probs - 0.5, 2)
147
+ taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128.0 / 945.0 * x) * x
148
+ return torch.where(self._outside_unstable_region(), vars, taylor)
149
+
150
+ @lazy_property
151
+ def logits(self):
152
+ return probs_to_logits(self.probs, is_binary=True)
153
+
154
+ @lazy_property
155
+ def probs(self):
156
+ return clamp_probs(logits_to_probs(self.logits, is_binary=True))
157
+
158
+ @property
159
+ def param_shape(self):
160
+ return self._param.size()
161
+
162
+ def sample(self, sample_shape=torch.Size()):
163
+ shape = self._extended_shape(sample_shape)
164
+ u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
165
+ with torch.no_grad():
166
+ return self.icdf(u)
167
+
168
+ def rsample(self, sample_shape=torch.Size()):
169
+ shape = self._extended_shape(sample_shape)
170
+ u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
171
+ return self.icdf(u)
172
+
173
+ def log_prob(self, value):
174
+ if self._validate_args:
175
+ self._validate_sample(value)
176
+ logits, value = broadcast_all(self.logits, value)
177
+ return (
178
+ -binary_cross_entropy_with_logits(logits, value, reduction="none")
179
+ + self._cont_bern_log_norm()
180
+ )
181
+
182
+ def cdf(self, value):
183
+ if self._validate_args:
184
+ self._validate_sample(value)
185
+ cut_probs = self._cut_probs()
186
+ cdfs = (
187
+ torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value)
188
+ + cut_probs
189
+ - 1.0
190
+ ) / (2.0 * cut_probs - 1.0)
191
+ unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value)
192
+ return torch.where(
193
+ torch.le(value, 0.0),
194
+ torch.zeros_like(value),
195
+ torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs),
196
+ )
197
+
198
+ def icdf(self, value):
199
+ cut_probs = self._cut_probs()
200
+ return torch.where(
201
+ self._outside_unstable_region(),
202
+ (
203
+ torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0))
204
+ - torch.log1p(-cut_probs)
205
+ )
206
+ / (torch.log(cut_probs) - torch.log1p(-cut_probs)),
207
+ value,
208
+ )
209
+
210
+ def entropy(self):
211
+ log_probs0 = torch.log1p(-self.probs)
212
+ log_probs1 = torch.log(self.probs)
213
+ return (
214
+ self.mean * (log_probs0 - log_probs1)
215
+ - self._cont_bern_log_norm()
216
+ - log_probs0
217
+ )
218
+
219
+ @property
220
+ def _natural_params(self):
221
+ return (self.logits,)
222
+
223
+ def _log_normalizer(self, x):
224
+ """computes the log normalizing constant as a function of the natural parameter"""
225
+ out_unst_reg = torch.max(
226
+ torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5)
227
+ )
228
+ cut_nat_params = torch.where(
229
+ out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x)
230
+ )
231
+ log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log(
232
+ torch.abs(cut_nat_params)
233
+ )
234
+ taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0
235
+ return torch.where(out_unst_reg, log_norm, taylor)
venv/lib/python3.10/site-packages/torch/distributions/dirichlet.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.autograd import Function
3
+ from torch.autograd.function import once_differentiable
4
+ from torch.distributions import constraints
5
+ from torch.distributions.exp_family import ExponentialFamily
6
+
7
+ __all__ = ["Dirichlet"]
8
+
9
+
10
+ # This helper is exposed for testing.
11
+ def _Dirichlet_backward(x, concentration, grad_output):
12
+ total = concentration.sum(-1, True).expand_as(concentration)
13
+ grad = torch._dirichlet_grad(x, concentration, total)
14
+ return grad * (grad_output - (x * grad_output).sum(-1, True))
15
+
16
+
17
+ class _Dirichlet(Function):
18
+ @staticmethod
19
+ def forward(ctx, concentration):
20
+ x = torch._sample_dirichlet(concentration)
21
+ ctx.save_for_backward(x, concentration)
22
+ return x
23
+
24
+ @staticmethod
25
+ @once_differentiable
26
+ def backward(ctx, grad_output):
27
+ x, concentration = ctx.saved_tensors
28
+ return _Dirichlet_backward(x, concentration, grad_output)
29
+
30
+
31
+ class Dirichlet(ExponentialFamily):
32
+ r"""
33
+ Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`.
34
+
35
+ Example::
36
+
37
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
38
+ >>> m = Dirichlet(torch.tensor([0.5, 0.5]))
39
+ >>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5]
40
+ tensor([ 0.1046, 0.8954])
41
+
42
+ Args:
43
+ concentration (Tensor): concentration parameter of the distribution
44
+ (often referred to as alpha)
45
+ """
46
+ arg_constraints = {
47
+ "concentration": constraints.independent(constraints.positive, 1)
48
+ }
49
+ support = constraints.simplex
50
+ has_rsample = True
51
+
52
+ def __init__(self, concentration, validate_args=None):
53
+ if concentration.dim() < 1:
54
+ raise ValueError(
55
+ "`concentration` parameter must be at least one-dimensional."
56
+ )
57
+ self.concentration = concentration
58
+ batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
59
+ super().__init__(batch_shape, event_shape, validate_args=validate_args)
60
+
61
+ def expand(self, batch_shape, _instance=None):
62
+ new = self._get_checked_instance(Dirichlet, _instance)
63
+ batch_shape = torch.Size(batch_shape)
64
+ new.concentration = self.concentration.expand(batch_shape + self.event_shape)
65
+ super(Dirichlet, new).__init__(
66
+ batch_shape, self.event_shape, validate_args=False
67
+ )
68
+ new._validate_args = self._validate_args
69
+ return new
70
+
71
+ def rsample(self, sample_shape=()):
72
+ shape = self._extended_shape(sample_shape)
73
+ concentration = self.concentration.expand(shape)
74
+ return _Dirichlet.apply(concentration)
75
+
76
+ def log_prob(self, value):
77
+ if self._validate_args:
78
+ self._validate_sample(value)
79
+ return (
80
+ torch.xlogy(self.concentration - 1.0, value).sum(-1)
81
+ + torch.lgamma(self.concentration.sum(-1))
82
+ - torch.lgamma(self.concentration).sum(-1)
83
+ )
84
+
85
+ @property
86
+ def mean(self):
87
+ return self.concentration / self.concentration.sum(-1, True)
88
+
89
+ @property
90
+ def mode(self):
91
+ concentrationm1 = (self.concentration - 1).clamp(min=0.0)
92
+ mode = concentrationm1 / concentrationm1.sum(-1, True)
93
+ mask = (self.concentration < 1).all(axis=-1)
94
+ mode[mask] = torch.nn.functional.one_hot(
95
+ mode[mask].argmax(axis=-1), concentrationm1.shape[-1]
96
+ ).to(mode)
97
+ return mode
98
+
99
+ @property
100
+ def variance(self):
101
+ con0 = self.concentration.sum(-1, True)
102
+ return (
103
+ self.concentration
104
+ * (con0 - self.concentration)
105
+ / (con0.pow(2) * (con0 + 1))
106
+ )
107
+
108
+ def entropy(self):
109
+ k = self.concentration.size(-1)
110
+ a0 = self.concentration.sum(-1)
111
+ return (
112
+ torch.lgamma(self.concentration).sum(-1)
113
+ - torch.lgamma(a0)
114
+ - (k - a0) * torch.digamma(a0)
115
+ - ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1)
116
+ )
117
+
118
+ @property
119
+ def _natural_params(self):
120
+ return (self.concentration,)
121
+
122
+ def _log_normalizer(self, x):
123
+ return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
venv/lib/python3.10/site-packages/torch/distributions/exp_family.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributions.distribution import Distribution
3
+
4
+ __all__ = ["ExponentialFamily"]
5
+
6
+
7
+ class ExponentialFamily(Distribution):
8
+ r"""
9
+ ExponentialFamily is the abstract base class for probability distributions belonging to an
10
+ exponential family, whose probability mass/density function has the form is defined below
11
+
12
+ .. math::
13
+
14
+ p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
15
+
16
+ where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
17
+ :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
18
+ measure.
19
+
20
+ Note:
21
+ This class is an intermediary between the `Distribution` class and distributions which belong
22
+ to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
23
+ divergence methods. We use this class to compute the entropy and KL divergence using the AD
24
+ framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
25
+ Cross-entropies of Exponential Families).
26
+ """
27
+
28
+ @property
29
+ def _natural_params(self):
30
+ """
31
+ Abstract method for natural parameters. Returns a tuple of Tensors based
32
+ on the distribution
33
+ """
34
+ raise NotImplementedError
35
+
36
+ def _log_normalizer(self, *natural_params):
37
+ """
38
+ Abstract method for log normalizer function. Returns a log normalizer based on
39
+ the distribution and input
40
+ """
41
+ raise NotImplementedError
42
+
43
+ @property
44
+ def _mean_carrier_measure(self):
45
+ """
46
+ Abstract method for expected carrier measure, which is required for computing
47
+ entropy.
48
+ """
49
+ raise NotImplementedError
50
+
51
+ def entropy(self):
52
+ """
53
+ Method to compute the entropy using Bregman divergence of the log normalizer.
54
+ """
55
+ result = -self._mean_carrier_measure
56
+ nparams = [p.detach().requires_grad_() for p in self._natural_params]
57
+ lg_normal = self._log_normalizer(*nparams)
58
+ gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
59
+ result += lg_normal
60
+ for np, g in zip(nparams, gradients):
61
+ result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)
62
+ return result
venv/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+
3
+ import torch
4
+ from torch import nan
5
+ from torch.distributions import constraints
6
+ from torch.distributions.distribution import Distribution
7
+ from torch.distributions.gamma import Gamma
8
+ from torch.distributions.utils import broadcast_all
9
+
10
+ __all__ = ["FisherSnedecor"]
11
+
12
+
13
+ class FisherSnedecor(Distribution):
14
+ r"""
15
+ Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`.
16
+
17
+ Example::
18
+
19
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
20
+ >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
21
+ >>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2
22
+ tensor([ 0.2453])
23
+
24
+ Args:
25
+ df1 (float or Tensor): degrees of freedom parameter 1
26
+ df2 (float or Tensor): degrees of freedom parameter 2
27
+ """
28
+ arg_constraints = {"df1": constraints.positive, "df2": constraints.positive}
29
+ support = constraints.positive
30
+ has_rsample = True
31
+
32
+ def __init__(self, df1, df2, validate_args=None):
33
+ self.df1, self.df2 = broadcast_all(df1, df2)
34
+ self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
35
+ self._gamma2 = Gamma(self.df2 * 0.5, self.df2)
36
+
37
+ if isinstance(df1, Number) and isinstance(df2, Number):
38
+ batch_shape = torch.Size()
39
+ else:
40
+ batch_shape = self.df1.size()
41
+ super().__init__(batch_shape, validate_args=validate_args)
42
+
43
+ def expand(self, batch_shape, _instance=None):
44
+ new = self._get_checked_instance(FisherSnedecor, _instance)
45
+ batch_shape = torch.Size(batch_shape)
46
+ new.df1 = self.df1.expand(batch_shape)
47
+ new.df2 = self.df2.expand(batch_shape)
48
+ new._gamma1 = self._gamma1.expand(batch_shape)
49
+ new._gamma2 = self._gamma2.expand(batch_shape)
50
+ super(FisherSnedecor, new).__init__(batch_shape, validate_args=False)
51
+ new._validate_args = self._validate_args
52
+ return new
53
+
54
+ @property
55
+ def mean(self):
56
+ df2 = self.df2.clone(memory_format=torch.contiguous_format)
57
+ df2[df2 <= 2] = nan
58
+ return df2 / (df2 - 2)
59
+
60
+ @property
61
+ def mode(self):
62
+ mode = (self.df1 - 2) / self.df1 * self.df2 / (self.df2 + 2)
63
+ mode[self.df1 <= 2] = nan
64
+ return mode
65
+
66
+ @property
67
+ def variance(self):
68
+ df2 = self.df2.clone(memory_format=torch.contiguous_format)
69
+ df2[df2 <= 4] = nan
70
+ return (
71
+ 2
72
+ * df2.pow(2)
73
+ * (self.df1 + df2 - 2)
74
+ / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))
75
+ )
76
+
77
+ def rsample(self, sample_shape=torch.Size(())):
78
+ shape = self._extended_shape(sample_shape)
79
+ # X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
80
+ # Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
81
+ X1 = self._gamma1.rsample(sample_shape).view(shape)
82
+ X2 = self._gamma2.rsample(sample_shape).view(shape)
83
+ tiny = torch.finfo(X2.dtype).tiny
84
+ X2.clamp_(min=tiny)
85
+ Y = X1 / X2
86
+ Y.clamp_(min=tiny)
87
+ return Y
88
+
89
+ def log_prob(self, value):
90
+ if self._validate_args:
91
+ self._validate_sample(value)
92
+ ct1 = self.df1 * 0.5
93
+ ct2 = self.df2 * 0.5
94
+ ct3 = self.df1 / self.df2
95
+ t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
96
+ t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
97
+ t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
98
+ return t1 + t2 - t3
venv/lib/python3.10/site-packages/torch/distributions/geometric.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+
3
+ import torch
4
+ from torch.distributions import constraints
5
+ from torch.distributions.distribution import Distribution
6
+ from torch.distributions.utils import (
7
+ broadcast_all,
8
+ lazy_property,
9
+ logits_to_probs,
10
+ probs_to_logits,
11
+ )
12
+ from torch.nn.functional import binary_cross_entropy_with_logits
13
+
14
+ __all__ = ["Geometric"]
15
+
16
+
17
+ class Geometric(Distribution):
18
+ r"""
19
+ Creates a Geometric distribution parameterized by :attr:`probs`,
20
+ where :attr:`probs` is the probability of success of Bernoulli trials.
21
+
22
+ .. math::
23
+
24
+ P(X=k) = (1-p)^{k} p, k = 0, 1, ...
25
+
26
+ .. note::
27
+ :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
28
+ hence draws samples in :math:`\{0, 1, \ldots\}`, whereas
29
+ :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`.
30
+
31
+ Example::
32
+
33
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
34
+ >>> m = Geometric(torch.tensor([0.3]))
35
+ >>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
36
+ tensor([ 2.])
37
+
38
+ Args:
39
+ probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1]
40
+ logits (Number, Tensor): the log-odds of sampling `1`.
41
+ """
42
+ arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
43
+ support = constraints.nonnegative_integer
44
+
45
+ def __init__(self, probs=None, logits=None, validate_args=None):
46
+ if (probs is None) == (logits is None):
47
+ raise ValueError(
48
+ "Either `probs` or `logits` must be specified, but not both."
49
+ )
50
+ if probs is not None:
51
+ (self.probs,) = broadcast_all(probs)
52
+ else:
53
+ (self.logits,) = broadcast_all(logits)
54
+ probs_or_logits = probs if probs is not None else logits
55
+ if isinstance(probs_or_logits, Number):
56
+ batch_shape = torch.Size()
57
+ else:
58
+ batch_shape = probs_or_logits.size()
59
+ super().__init__(batch_shape, validate_args=validate_args)
60
+ if self._validate_args and probs is not None:
61
+ # Add an extra check beyond unit_interval
62
+ value = self.probs
63
+ valid = value > 0
64
+ if not valid.all():
65
+ invalid_value = value.data[~valid]
66
+ raise ValueError(
67
+ "Expected parameter probs "
68
+ f"({type(value).__name__} of shape {tuple(value.shape)}) "
69
+ f"of distribution {repr(self)} "
70
+ f"to be positive but found invalid values:\n{invalid_value}"
71
+ )
72
+
73
+ def expand(self, batch_shape, _instance=None):
74
+ new = self._get_checked_instance(Geometric, _instance)
75
+ batch_shape = torch.Size(batch_shape)
76
+ if "probs" in self.__dict__:
77
+ new.probs = self.probs.expand(batch_shape)
78
+ if "logits" in self.__dict__:
79
+ new.logits = self.logits.expand(batch_shape)
80
+ super(Geometric, new).__init__(batch_shape, validate_args=False)
81
+ new._validate_args = self._validate_args
82
+ return new
83
+
84
+ @property
85
+ def mean(self):
86
+ return 1.0 / self.probs - 1.0
87
+
88
+ @property
89
+ def mode(self):
90
+ return torch.zeros_like(self.probs)
91
+
92
+ @property
93
+ def variance(self):
94
+ return (1.0 / self.probs - 1.0) / self.probs
95
+
96
+ @lazy_property
97
+ def logits(self):
98
+ return probs_to_logits(self.probs, is_binary=True)
99
+
100
+ @lazy_property
101
+ def probs(self):
102
+ return logits_to_probs(self.logits, is_binary=True)
103
+
104
+ def sample(self, sample_shape=torch.Size()):
105
+ shape = self._extended_shape(sample_shape)
106
+ tiny = torch.finfo(self.probs.dtype).tiny
107
+ with torch.no_grad():
108
+ if torch._C._get_tracing_state():
109
+ # [JIT WORKAROUND] lack of support for .uniform_()
110
+ u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
111
+ u = u.clamp(min=tiny)
112
+ else:
113
+ u = self.probs.new(shape).uniform_(tiny, 1)
114
+ return (u.log() / (-self.probs).log1p()).floor()
115
+
116
+ def log_prob(self, value):
117
+ if self._validate_args:
118
+ self._validate_sample(value)
119
+ value, probs = broadcast_all(value, self.probs)
120
+ probs = probs.clone(memory_format=torch.contiguous_format)
121
+ probs[(probs == 1) & (value == 0)] = 0
122
+ return value * (-probs).log1p() + self.probs.log()
123
+
124
+ def entropy(self):
125
+ return (
126
+ binary_cross_entropy_with_logits(self.logits, self.probs, reduction="none")
127
+ / self.probs
128
+ )
venv/lib/python3.10/site-packages/torch/distributions/gumbel.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch.distributions import constraints
6
+ from torch.distributions.transformed_distribution import TransformedDistribution
7
+ from torch.distributions.transforms import AffineTransform, ExpTransform
8
+ from torch.distributions.uniform import Uniform
9
+ from torch.distributions.utils import broadcast_all, euler_constant
10
+
11
+ __all__ = ["Gumbel"]
12
+
13
+
14
+ class Gumbel(TransformedDistribution):
15
+ r"""
16
+ Samples from a Gumbel Distribution.
17
+
18
+ Examples::
19
+
20
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
21
+ >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))
22
+ >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2
23
+ tensor([ 1.0124])
24
+
25
+ Args:
26
+ loc (float or Tensor): Location parameter of the distribution
27
+ scale (float or Tensor): Scale parameter of the distribution
28
+ """
29
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
30
+ support = constraints.real
31
+
32
+ def __init__(self, loc, scale, validate_args=None):
33
+ self.loc, self.scale = broadcast_all(loc, scale)
34
+ finfo = torch.finfo(self.loc.dtype)
35
+ if isinstance(loc, Number) and isinstance(scale, Number):
36
+ base_dist = Uniform(finfo.tiny, 1 - finfo.eps, validate_args=validate_args)
37
+ else:
38
+ base_dist = Uniform(
39
+ torch.full_like(self.loc, finfo.tiny),
40
+ torch.full_like(self.loc, 1 - finfo.eps),
41
+ validate_args=validate_args,
42
+ )
43
+ transforms = [
44
+ ExpTransform().inv,
45
+ AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
46
+ ExpTransform().inv,
47
+ AffineTransform(loc=loc, scale=-self.scale),
48
+ ]
49
+ super().__init__(base_dist, transforms, validate_args=validate_args)
50
+
51
+ def expand(self, batch_shape, _instance=None):
52
+ new = self._get_checked_instance(Gumbel, _instance)
53
+ new.loc = self.loc.expand(batch_shape)
54
+ new.scale = self.scale.expand(batch_shape)
55
+ return super().expand(batch_shape, _instance=new)
56
+
57
+ # Explicitly defining the log probability function for Gumbel due to precision issues
58
+ def log_prob(self, value):
59
+ if self._validate_args:
60
+ self._validate_sample(value)
61
+ y = (self.loc - value) / self.scale
62
+ return (y - y.exp()) - self.scale.log()
63
+
64
+ @property
65
+ def mean(self):
66
+ return self.loc + self.scale * euler_constant
67
+
68
+ @property
69
+ def mode(self):
70
+ return self.loc
71
+
72
+ @property
73
+ def stddev(self):
74
+ return (math.pi / math.sqrt(6)) * self.scale
75
+
76
+ @property
77
+ def variance(self):
78
+ return self.stddev.pow(2)
79
+
80
+ def entropy(self):
81
+ return self.scale.log() + (1 + euler_constant)
venv/lib/python3.10/site-packages/torch/distributions/half_cauchy.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import inf
5
+ from torch.distributions import constraints
6
+ from torch.distributions.cauchy import Cauchy
7
+ from torch.distributions.transformed_distribution import TransformedDistribution
8
+ from torch.distributions.transforms import AbsTransform
9
+
10
+ __all__ = ["HalfCauchy"]
11
+
12
+
13
+ class HalfCauchy(TransformedDistribution):
14
+ r"""
15
+ Creates a half-Cauchy distribution parameterized by `scale` where::
16
+
17
+ X ~ Cauchy(0, scale)
18
+ Y = |X| ~ HalfCauchy(scale)
19
+
20
+ Example::
21
+
22
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
23
+ >>> m = HalfCauchy(torch.tensor([1.0]))
24
+ >>> m.sample() # half-cauchy distributed with scale=1
25
+ tensor([ 2.3214])
26
+
27
+ Args:
28
+ scale (float or Tensor): scale of the full Cauchy distribution
29
+ """
30
+ arg_constraints = {"scale": constraints.positive}
31
+ support = constraints.nonnegative
32
+ has_rsample = True
33
+
34
+ def __init__(self, scale, validate_args=None):
35
+ base_dist = Cauchy(0, scale, validate_args=False)
36
+ super().__init__(base_dist, AbsTransform(), validate_args=validate_args)
37
+
38
+ def expand(self, batch_shape, _instance=None):
39
+ new = self._get_checked_instance(HalfCauchy, _instance)
40
+ return super().expand(batch_shape, _instance=new)
41
+
42
+ @property
43
+ def scale(self):
44
+ return self.base_dist.scale
45
+
46
+ @property
47
+ def mean(self):
48
+ return torch.full(
49
+ self._extended_shape(),
50
+ math.inf,
51
+ dtype=self.scale.dtype,
52
+ device=self.scale.device,
53
+ )
54
+
55
+ @property
56
+ def mode(self):
57
+ return torch.zeros_like(self.scale)
58
+
59
+ @property
60
+ def variance(self):
61
+ return self.base_dist.variance
62
+
63
+ def log_prob(self, value):
64
+ if self._validate_args:
65
+ self._validate_sample(value)
66
+ value = torch.as_tensor(
67
+ value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device
68
+ )
69
+ log_prob = self.base_dist.log_prob(value) + math.log(2)
70
+ log_prob = torch.where(value >= 0, log_prob, -inf)
71
+ return log_prob
72
+
73
+ def cdf(self, value):
74
+ if self._validate_args:
75
+ self._validate_sample(value)
76
+ return 2 * self.base_dist.cdf(value) - 1
77
+
78
+ def icdf(self, prob):
79
+ return self.base_dist.icdf((prob + 1) / 2)
80
+
81
+ def entropy(self):
82
+ return self.base_dist.entropy() - math.log(2)
venv/lib/python3.10/site-packages/torch/distributions/half_normal.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import inf
5
+ from torch.distributions import constraints
6
+ from torch.distributions.normal import Normal
7
+ from torch.distributions.transformed_distribution import TransformedDistribution
8
+ from torch.distributions.transforms import AbsTransform
9
+
10
+ __all__ = ["HalfNormal"]
11
+
12
+
13
+ class HalfNormal(TransformedDistribution):
14
+ r"""
15
+ Creates a half-normal distribution parameterized by `scale` where::
16
+
17
+ X ~ Normal(0, scale)
18
+ Y = |X| ~ HalfNormal(scale)
19
+
20
+ Example::
21
+
22
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
23
+ >>> m = HalfNormal(torch.tensor([1.0]))
24
+ >>> m.sample() # half-normal distributed with scale=1
25
+ tensor([ 0.1046])
26
+
27
+ Args:
28
+ scale (float or Tensor): scale of the full Normal distribution
29
+ """
30
+ arg_constraints = {"scale": constraints.positive}
31
+ support = constraints.nonnegative
32
+ has_rsample = True
33
+
34
+ def __init__(self, scale, validate_args=None):
35
+ base_dist = Normal(0, scale, validate_args=False)
36
+ super().__init__(base_dist, AbsTransform(), validate_args=validate_args)
37
+
38
+ def expand(self, batch_shape, _instance=None):
39
+ new = self._get_checked_instance(HalfNormal, _instance)
40
+ return super().expand(batch_shape, _instance=new)
41
+
42
+ @property
43
+ def scale(self):
44
+ return self.base_dist.scale
45
+
46
+ @property
47
+ def mean(self):
48
+ return self.scale * math.sqrt(2 / math.pi)
49
+
50
+ @property
51
+ def mode(self):
52
+ return torch.zeros_like(self.scale)
53
+
54
+ @property
55
+ def variance(self):
56
+ return self.scale.pow(2) * (1 - 2 / math.pi)
57
+
58
+ def log_prob(self, value):
59
+ if self._validate_args:
60
+ self._validate_sample(value)
61
+ log_prob = self.base_dist.log_prob(value) + math.log(2)
62
+ log_prob = torch.where(value >= 0, log_prob, -inf)
63
+ return log_prob
64
+
65
+ def cdf(self, value):
66
+ if self._validate_args:
67
+ self._validate_sample(value)
68
+ return 2 * self.base_dist.cdf(value) - 1
69
+
70
+ def icdf(self, prob):
71
+ return self.base_dist.icdf((prob + 1) / 2)
72
+
73
+ def entropy(self):
74
+ return self.base_dist.entropy() - math.log(2)
venv/lib/python3.10/site-packages/torch/distributions/kl.py ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+ from functools import total_ordering
4
+ from typing import Callable, Dict, Tuple, Type
5
+
6
+ import torch
7
+ from torch import inf
8
+
9
+ from .bernoulli import Bernoulli
10
+ from .beta import Beta
11
+ from .binomial import Binomial
12
+ from .categorical import Categorical
13
+ from .cauchy import Cauchy
14
+ from .continuous_bernoulli import ContinuousBernoulli
15
+ from .dirichlet import Dirichlet
16
+ from .distribution import Distribution
17
+ from .exp_family import ExponentialFamily
18
+ from .exponential import Exponential
19
+ from .gamma import Gamma
20
+ from .geometric import Geometric
21
+ from .gumbel import Gumbel
22
+ from .half_normal import HalfNormal
23
+ from .independent import Independent
24
+ from .laplace import Laplace
25
+ from .lowrank_multivariate_normal import (
26
+ _batch_lowrank_logdet,
27
+ _batch_lowrank_mahalanobis,
28
+ LowRankMultivariateNormal,
29
+ )
30
+ from .multivariate_normal import _batch_mahalanobis, MultivariateNormal
31
+ from .normal import Normal
32
+ from .one_hot_categorical import OneHotCategorical
33
+ from .pareto import Pareto
34
+ from .poisson import Poisson
35
+ from .transformed_distribution import TransformedDistribution
36
+ from .uniform import Uniform
37
+ from .utils import _sum_rightmost, euler_constant as _euler_gamma
38
+
39
+ _KL_REGISTRY: Dict[
40
+ Tuple[Type, Type], Callable
41
+ ] = {} # Source of truth mapping a few general (type, type) pairs to functions.
42
+ _KL_MEMOIZE: Dict[
43
+ Tuple[Type, Type], Callable
44
+ ] = {} # Memoized version mapping many specific (type, type) pairs to functions.
45
+
46
+ __all__ = ["register_kl", "kl_divergence"]
47
+
48
+
49
+ def register_kl(type_p, type_q):
50
+ """
51
+ Decorator to register a pairwise function with :meth:`kl_divergence`.
52
+ Usage::
53
+
54
+ @register_kl(Normal, Normal)
55
+ def kl_normal_normal(p, q):
56
+ # insert implementation here
57
+
58
+ Lookup returns the most specific (type,type) match ordered by subclass. If
59
+ the match is ambiguous, a `RuntimeWarning` is raised. For example to
60
+ resolve the ambiguous situation::
61
+
62
+ @register_kl(BaseP, DerivedQ)
63
+ def kl_version1(p, q): ...
64
+ @register_kl(DerivedP, BaseQ)
65
+ def kl_version2(p, q): ...
66
+
67
+ you should register a third most-specific implementation, e.g.::
68
+
69
+ register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie.
70
+
71
+ Args:
72
+ type_p (type): A subclass of :class:`~torch.distributions.Distribution`.
73
+ type_q (type): A subclass of :class:`~torch.distributions.Distribution`.
74
+ """
75
+ if not isinstance(type_p, type) and issubclass(type_p, Distribution):
76
+ raise TypeError(
77
+ f"Expected type_p to be a Distribution subclass but got {type_p}"
78
+ )
79
+ if not isinstance(type_q, type) and issubclass(type_q, Distribution):
80
+ raise TypeError(
81
+ f"Expected type_q to be a Distribution subclass but got {type_q}"
82
+ )
83
+
84
+ def decorator(fun):
85
+ _KL_REGISTRY[type_p, type_q] = fun
86
+ _KL_MEMOIZE.clear() # reset since lookup order may have changed
87
+ return fun
88
+
89
+ return decorator
90
+
91
+
92
+ @total_ordering
93
+ class _Match:
94
+ __slots__ = ["types"]
95
+
96
+ def __init__(self, *types):
97
+ self.types = types
98
+
99
+ def __eq__(self, other):
100
+ return self.types == other.types
101
+
102
+ def __le__(self, other):
103
+ for x, y in zip(self.types, other.types):
104
+ if not issubclass(x, y):
105
+ return False
106
+ if x is not y:
107
+ break
108
+ return True
109
+
110
+
111
+ def _dispatch_kl(type_p, type_q):
112
+ """
113
+ Find the most specific approximate match, assuming single inheritance.
114
+ """
115
+ matches = [
116
+ (super_p, super_q)
117
+ for super_p, super_q in _KL_REGISTRY
118
+ if issubclass(type_p, super_p) and issubclass(type_q, super_q)
119
+ ]
120
+ if not matches:
121
+ return NotImplemented
122
+ # Check that the left- and right- lexicographic orders agree.
123
+ # mypy isn't smart enough to know that _Match implements __lt__
124
+ # see: https://github.com/python/typing/issues/760#issuecomment-710670503
125
+ left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var]
126
+ right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var]
127
+ left_fun = _KL_REGISTRY[left_p, left_q]
128
+ right_fun = _KL_REGISTRY[right_p, right_q]
129
+ if left_fun is not right_fun:
130
+ warnings.warn(
131
+ "Ambiguous kl_divergence({}, {}). Please register_kl({}, {})".format(
132
+ type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__
133
+ ),
134
+ RuntimeWarning,
135
+ )
136
+ return left_fun
137
+
138
+
139
+ def _infinite_like(tensor):
140
+ """
141
+ Helper function for obtaining infinite KL Divergence throughout
142
+ """
143
+ return torch.full_like(tensor, inf)
144
+
145
+
146
+ def _x_log_x(tensor):
147
+ """
148
+ Utility function for calculating x log x
149
+ """
150
+ return tensor * tensor.log()
151
+
152
+
153
+ def _batch_trace_XXT(bmat):
154
+ """
155
+ Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions
156
+ """
157
+ n = bmat.size(-1)
158
+ m = bmat.size(-2)
159
+ flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1)
160
+ return flat_trace.reshape(bmat.shape[:-2])
161
+
162
+
163
+ def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor:
164
+ r"""
165
+ Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions.
166
+
167
+ .. math::
168
+
169
+ KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx
170
+
171
+ Args:
172
+ p (Distribution): A :class:`~torch.distributions.Distribution` object.
173
+ q (Distribution): A :class:`~torch.distributions.Distribution` object.
174
+
175
+ Returns:
176
+ Tensor: A batch of KL divergences of shape `batch_shape`.
177
+
178
+ Raises:
179
+ NotImplementedError: If the distribution types have not been registered via
180
+ :meth:`register_kl`.
181
+ """
182
+ try:
183
+ fun = _KL_MEMOIZE[type(p), type(q)]
184
+ except KeyError:
185
+ fun = _dispatch_kl(type(p), type(q))
186
+ _KL_MEMOIZE[type(p), type(q)] = fun
187
+ if fun is NotImplemented:
188
+ raise NotImplementedError(
189
+ f"No KL(p || q) is implemented for p type {p.__class__.__name__} and q type {q.__class__.__name__}"
190
+ )
191
+ return fun(p, q)
192
+
193
+
194
+ ################################################################################
195
+ # KL Divergence Implementations
196
+ ################################################################################
197
+
198
+ # Same distributions
199
+
200
+
201
+ @register_kl(Bernoulli, Bernoulli)
202
+ def _kl_bernoulli_bernoulli(p, q):
203
+ t1 = p.probs * (
204
+ torch.nn.functional.softplus(-q.logits)
205
+ - torch.nn.functional.softplus(-p.logits)
206
+ )
207
+ t1[q.probs == 0] = inf
208
+ t1[p.probs == 0] = 0
209
+ t2 = (1 - p.probs) * (
210
+ torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits)
211
+ )
212
+ t2[q.probs == 1] = inf
213
+ t2[p.probs == 1] = 0
214
+ return t1 + t2
215
+
216
+
217
+ @register_kl(Beta, Beta)
218
+ def _kl_beta_beta(p, q):
219
+ sum_params_p = p.concentration1 + p.concentration0
220
+ sum_params_q = q.concentration1 + q.concentration0
221
+ t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma()
222
+ t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma()
223
+ t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1)
224
+ t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0)
225
+ t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p)
226
+ return t1 - t2 + t3 + t4 + t5
227
+
228
+
229
+ @register_kl(Binomial, Binomial)
230
+ def _kl_binomial_binomial(p, q):
231
+ # from https://math.stackexchange.com/questions/2214993/
232
+ # kullback-leibler-divergence-for-binomial-distributions-p-and-q
233
+ if (p.total_count < q.total_count).any():
234
+ raise NotImplementedError(
235
+ "KL between Binomials where q.total_count > p.total_count is not implemented"
236
+ )
237
+ kl = p.total_count * (
238
+ p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p()
239
+ )
240
+ inf_idxs = p.total_count > q.total_count
241
+ kl[inf_idxs] = _infinite_like(kl[inf_idxs])
242
+ return kl
243
+
244
+
245
+ @register_kl(Categorical, Categorical)
246
+ def _kl_categorical_categorical(p, q):
247
+ t = p.probs * (p.logits - q.logits)
248
+ t[(q.probs == 0).expand_as(t)] = inf
249
+ t[(p.probs == 0).expand_as(t)] = 0
250
+ return t.sum(-1)
251
+
252
+
253
+ @register_kl(ContinuousBernoulli, ContinuousBernoulli)
254
+ def _kl_continuous_bernoulli_continuous_bernoulli(p, q):
255
+ t1 = p.mean * (p.logits - q.logits)
256
+ t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs)
257
+ t3 = -q._cont_bern_log_norm() - torch.log1p(-q.probs)
258
+ return t1 + t2 + t3
259
+
260
+
261
+ @register_kl(Dirichlet, Dirichlet)
262
+ def _kl_dirichlet_dirichlet(p, q):
263
+ # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
264
+ sum_p_concentration = p.concentration.sum(-1)
265
+ sum_q_concentration = q.concentration.sum(-1)
266
+ t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma()
267
+ t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)
268
+ t3 = p.concentration - q.concentration
269
+ t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1)
270
+ return t1 - t2 + (t3 * t4).sum(-1)
271
+
272
+
273
+ @register_kl(Exponential, Exponential)
274
+ def _kl_exponential_exponential(p, q):
275
+ rate_ratio = q.rate / p.rate
276
+ t1 = -rate_ratio.log()
277
+ return t1 + rate_ratio - 1
278
+
279
+
280
+ @register_kl(ExponentialFamily, ExponentialFamily)
281
+ def _kl_expfamily_expfamily(p, q):
282
+ if not type(p) == type(q):
283
+ raise NotImplementedError(
284
+ "The cross KL-divergence between different exponential families cannot \
285
+ be computed using Bregman divergences"
286
+ )
287
+ p_nparams = [np.detach().requires_grad_() for np in p._natural_params]
288
+ q_nparams = q._natural_params
289
+ lg_normal = p._log_normalizer(*p_nparams)
290
+ gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True)
291
+ result = q._log_normalizer(*q_nparams) - lg_normal
292
+ for pnp, qnp, g in zip(p_nparams, q_nparams, gradients):
293
+ term = (qnp - pnp) * g
294
+ result -= _sum_rightmost(term, len(q.event_shape))
295
+ return result
296
+
297
+
298
+ @register_kl(Gamma, Gamma)
299
+ def _kl_gamma_gamma(p, q):
300
+ t1 = q.concentration * (p.rate / q.rate).log()
301
+ t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration)
302
+ t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration)
303
+ t4 = (q.rate - p.rate) * (p.concentration / p.rate)
304
+ return t1 + t2 + t3 + t4
305
+
306
+
307
+ @register_kl(Gumbel, Gumbel)
308
+ def _kl_gumbel_gumbel(p, q):
309
+ ct1 = p.scale / q.scale
310
+ ct2 = q.loc / q.scale
311
+ ct3 = p.loc / q.scale
312
+ t1 = -ct1.log() - ct2 + ct3
313
+ t2 = ct1 * _euler_gamma
314
+ t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3)
315
+ return t1 + t2 + t3 - (1 + _euler_gamma)
316
+
317
+
318
+ @register_kl(Geometric, Geometric)
319
+ def _kl_geometric_geometric(p, q):
320
+ return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits
321
+
322
+
323
+ @register_kl(HalfNormal, HalfNormal)
324
+ def _kl_halfnormal_halfnormal(p, q):
325
+ return _kl_normal_normal(p.base_dist, q.base_dist)
326
+
327
+
328
+ @register_kl(Laplace, Laplace)
329
+ def _kl_laplace_laplace(p, q):
330
+ # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
331
+ scale_ratio = p.scale / q.scale
332
+ loc_abs_diff = (p.loc - q.loc).abs()
333
+ t1 = -scale_ratio.log()
334
+ t2 = loc_abs_diff / q.scale
335
+ t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale)
336
+ return t1 + t2 + t3 - 1
337
+
338
+
339
+ @register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal)
340
+ def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q):
341
+ if p.event_shape != q.event_shape:
342
+ raise ValueError(
343
+ "KL-divergence between two Low Rank Multivariate Normals with\
344
+ different event shapes cannot be computed"
345
+ )
346
+
347
+ term1 = _batch_lowrank_logdet(
348
+ q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril
349
+ ) - _batch_lowrank_logdet(
350
+ p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril
351
+ )
352
+ term3 = _batch_lowrank_mahalanobis(
353
+ q._unbroadcasted_cov_factor,
354
+ q._unbroadcasted_cov_diag,
355
+ q.loc - p.loc,
356
+ q._capacitance_tril,
357
+ )
358
+ # Expands term2 according to
359
+ # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD)
360
+ # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T)
361
+ qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2)
362
+ A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
363
+ term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1)
364
+ term22 = _batch_trace_XXT(
365
+ p._unbroadcasted_cov_factor * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)
366
+ )
367
+ term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2))
368
+ term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor))
369
+ term2 = term21 + term22 - term23 - term24
370
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
371
+
372
+
373
+ @register_kl(MultivariateNormal, LowRankMultivariateNormal)
374
+ def _kl_multivariatenormal_lowrankmultivariatenormal(p, q):
375
+ if p.event_shape != q.event_shape:
376
+ raise ValueError(
377
+ "KL-divergence between two (Low Rank) Multivariate Normals with\
378
+ different event shapes cannot be computed"
379
+ )
380
+
381
+ term1 = _batch_lowrank_logdet(
382
+ q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril
383
+ ) - 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
384
+ term3 = _batch_lowrank_mahalanobis(
385
+ q._unbroadcasted_cov_factor,
386
+ q._unbroadcasted_cov_diag,
387
+ q.loc - p.loc,
388
+ q._capacitance_tril,
389
+ )
390
+ # Expands term2 according to
391
+ # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T
392
+ # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T
393
+ qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2)
394
+ A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
395
+ term21 = _batch_trace_XXT(
396
+ p._unbroadcasted_scale_tril * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)
397
+ )
398
+ term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril))
399
+ term2 = term21 - term22
400
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
401
+
402
+
403
+ @register_kl(LowRankMultivariateNormal, MultivariateNormal)
404
+ def _kl_lowrankmultivariatenormal_multivariatenormal(p, q):
405
+ if p.event_shape != q.event_shape:
406
+ raise ValueError(
407
+ "KL-divergence between two (Low Rank) Multivariate Normals with\
408
+ different event shapes cannot be computed"
409
+ )
410
+
411
+ term1 = 2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(
412
+ -1
413
+ ) - _batch_lowrank_logdet(
414
+ p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril
415
+ )
416
+ term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
417
+ # Expands term2 according to
418
+ # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD)
419
+ combined_batch_shape = torch._C._infer_size(
420
+ q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_cov_factor.shape[:-2]
421
+ )
422
+ n = p.event_shape[0]
423
+ q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
424
+ p_cov_factor = p._unbroadcasted_cov_factor.expand(
425
+ combined_batch_shape + (n, p.cov_factor.size(-1))
426
+ )
427
+ p_cov_diag = torch.diag_embed(p._unbroadcasted_cov_diag.sqrt()).expand(
428
+ combined_batch_shape + (n, n)
429
+ )
430
+ term21 = _batch_trace_XXT(
431
+ torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False)
432
+ )
433
+ term22 = _batch_trace_XXT(
434
+ torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False)
435
+ )
436
+ term2 = term21 + term22
437
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
438
+
439
+
440
+ @register_kl(MultivariateNormal, MultivariateNormal)
441
+ def _kl_multivariatenormal_multivariatenormal(p, q):
442
+ # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence
443
+ if p.event_shape != q.event_shape:
444
+ raise ValueError(
445
+ "KL-divergence between two Multivariate Normals with\
446
+ different event shapes cannot be computed"
447
+ )
448
+
449
+ half_term1 = q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(
450
+ -1
451
+ ) - p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
452
+ combined_batch_shape = torch._C._infer_size(
453
+ q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_scale_tril.shape[:-2]
454
+ )
455
+ n = p.event_shape[0]
456
+ q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
457
+ p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
458
+ term2 = _batch_trace_XXT(
459
+ torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False)
460
+ )
461
+ term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
462
+ return half_term1 + 0.5 * (term2 + term3 - n)
463
+
464
+
465
+ @register_kl(Normal, Normal)
466
+ def _kl_normal_normal(p, q):
467
+ var_ratio = (p.scale / q.scale).pow(2)
468
+ t1 = ((p.loc - q.loc) / q.scale).pow(2)
469
+ return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())
470
+
471
+
472
+ @register_kl(OneHotCategorical, OneHotCategorical)
473
+ def _kl_onehotcategorical_onehotcategorical(p, q):
474
+ return _kl_categorical_categorical(p._categorical, q._categorical)
475
+
476
+
477
+ @register_kl(Pareto, Pareto)
478
+ def _kl_pareto_pareto(p, q):
479
+ # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
480
+ scale_ratio = p.scale / q.scale
481
+ alpha_ratio = q.alpha / p.alpha
482
+ t1 = q.alpha * scale_ratio.log()
483
+ t2 = -alpha_ratio.log()
484
+ result = t1 + t2 + alpha_ratio - 1
485
+ result[p.support.lower_bound < q.support.lower_bound] = inf
486
+ return result
487
+
488
+
489
+ @register_kl(Poisson, Poisson)
490
+ def _kl_poisson_poisson(p, q):
491
+ return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate)
492
+
493
+
494
+ @register_kl(TransformedDistribution, TransformedDistribution)
495
+ def _kl_transformed_transformed(p, q):
496
+ if p.transforms != q.transforms:
497
+ raise NotImplementedError
498
+ if p.event_shape != q.event_shape:
499
+ raise NotImplementedError
500
+ return kl_divergence(p.base_dist, q.base_dist)
501
+
502
+
503
+ @register_kl(Uniform, Uniform)
504
+ def _kl_uniform_uniform(p, q):
505
+ result = ((q.high - q.low) / (p.high - p.low)).log()
506
+ result[(q.low > p.low) | (q.high < p.high)] = inf
507
+ return result
508
+
509
+
510
+ # Different distributions
511
+ @register_kl(Bernoulli, Poisson)
512
+ def _kl_bernoulli_poisson(p, q):
513
+ return -p.entropy() - (p.probs * q.rate.log() - q.rate)
514
+
515
+
516
+ @register_kl(Beta, ContinuousBernoulli)
517
+ def _kl_beta_continuous_bernoulli(p, q):
518
+ return (
519
+ -p.entropy()
520
+ - p.mean * q.logits
521
+ - torch.log1p(-q.probs)
522
+ - q._cont_bern_log_norm()
523
+ )
524
+
525
+
526
+ @register_kl(Beta, Pareto)
527
+ def _kl_beta_infinity(p, q):
528
+ return _infinite_like(p.concentration1)
529
+
530
+
531
+ @register_kl(Beta, Exponential)
532
+ def _kl_beta_exponential(p, q):
533
+ return (
534
+ -p.entropy()
535
+ - q.rate.log()
536
+ + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0))
537
+ )
538
+
539
+
540
+ @register_kl(Beta, Gamma)
541
+ def _kl_beta_gamma(p, q):
542
+ t1 = -p.entropy()
543
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
544
+ t3 = (q.concentration - 1) * (
545
+ p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma()
546
+ )
547
+ t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0)
548
+ return t1 + t2 - t3 + t4
549
+
550
+
551
+ # TODO: Add Beta-Laplace KL Divergence
552
+
553
+
554
+ @register_kl(Beta, Normal)
555
+ def _kl_beta_normal(p, q):
556
+ E_beta = p.concentration1 / (p.concentration1 + p.concentration0)
557
+ var_normal = q.scale.pow(2)
558
+ t1 = -p.entropy()
559
+ t2 = 0.5 * (var_normal * 2 * math.pi).log()
560
+ t3 = (
561
+ E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1)
562
+ + E_beta.pow(2)
563
+ ) * 0.5
564
+ t4 = q.loc * E_beta
565
+ t5 = q.loc.pow(2) * 0.5
566
+ return t1 + t2 + (t3 - t4 + t5) / var_normal
567
+
568
+
569
+ @register_kl(Beta, Uniform)
570
+ def _kl_beta_uniform(p, q):
571
+ result = -p.entropy() + (q.high - q.low).log()
572
+ result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf
573
+ return result
574
+
575
+
576
+ # Note that the KL between a ContinuousBernoulli and Beta has no closed form
577
+
578
+
579
+ @register_kl(ContinuousBernoulli, Pareto)
580
+ def _kl_continuous_bernoulli_infinity(p, q):
581
+ return _infinite_like(p.probs)
582
+
583
+
584
+ @register_kl(ContinuousBernoulli, Exponential)
585
+ def _kl_continuous_bernoulli_exponential(p, q):
586
+ return -p.entropy() - torch.log(q.rate) + q.rate * p.mean
587
+
588
+
589
+ # Note that the KL between a ContinuousBernoulli and Gamma has no closed form
590
+ # TODO: Add ContinuousBernoulli-Laplace KL Divergence
591
+
592
+
593
+ @register_kl(ContinuousBernoulli, Normal)
594
+ def _kl_continuous_bernoulli_normal(p, q):
595
+ t1 = -p.entropy()
596
+ t2 = 0.5 * (math.log(2.0 * math.pi) + torch.square(q.loc / q.scale)) + torch.log(
597
+ q.scale
598
+ )
599
+ t3 = (p.variance + torch.square(p.mean) - 2.0 * q.loc * p.mean) / (
600
+ 2.0 * torch.square(q.scale)
601
+ )
602
+ return t1 + t2 + t3
603
+
604
+
605
+ @register_kl(ContinuousBernoulli, Uniform)
606
+ def _kl_continuous_bernoulli_uniform(p, q):
607
+ result = -p.entropy() + (q.high - q.low).log()
608
+ return torch.where(
609
+ torch.max(
610
+ torch.ge(q.low, p.support.lower_bound),
611
+ torch.le(q.high, p.support.upper_bound),
612
+ ),
613
+ torch.ones_like(result) * inf,
614
+ result,
615
+ )
616
+
617
+
618
+ @register_kl(Exponential, Beta)
619
+ @register_kl(Exponential, ContinuousBernoulli)
620
+ @register_kl(Exponential, Pareto)
621
+ @register_kl(Exponential, Uniform)
622
+ def _kl_exponential_infinity(p, q):
623
+ return _infinite_like(p.rate)
624
+
625
+
626
+ @register_kl(Exponential, Gamma)
627
+ def _kl_exponential_gamma(p, q):
628
+ ratio = q.rate / p.rate
629
+ t1 = -q.concentration * torch.log(ratio)
630
+ return (
631
+ t1
632
+ + ratio
633
+ + q.concentration.lgamma()
634
+ + q.concentration * _euler_gamma
635
+ - (1 + _euler_gamma)
636
+ )
637
+
638
+
639
+ @register_kl(Exponential, Gumbel)
640
+ def _kl_exponential_gumbel(p, q):
641
+ scale_rate_prod = p.rate * q.scale
642
+ loc_scale_ratio = q.loc / q.scale
643
+ t1 = scale_rate_prod.log() - 1
644
+ t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1)
645
+ t3 = scale_rate_prod.reciprocal()
646
+ return t1 - loc_scale_ratio + t2 + t3
647
+
648
+
649
+ # TODO: Add Exponential-Laplace KL Divergence
650
+
651
+
652
+ @register_kl(Exponential, Normal)
653
+ def _kl_exponential_normal(p, q):
654
+ var_normal = q.scale.pow(2)
655
+ rate_sqr = p.rate.pow(2)
656
+ t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi)
657
+ t2 = rate_sqr.reciprocal()
658
+ t3 = q.loc / p.rate
659
+ t4 = q.loc.pow(2) * 0.5
660
+ return t1 - 1 + (t2 - t3 + t4) / var_normal
661
+
662
+
663
+ @register_kl(Gamma, Beta)
664
+ @register_kl(Gamma, ContinuousBernoulli)
665
+ @register_kl(Gamma, Pareto)
666
+ @register_kl(Gamma, Uniform)
667
+ def _kl_gamma_infinity(p, q):
668
+ return _infinite_like(p.concentration)
669
+
670
+
671
+ @register_kl(Gamma, Exponential)
672
+ def _kl_gamma_exponential(p, q):
673
+ return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate
674
+
675
+
676
+ @register_kl(Gamma, Gumbel)
677
+ def _kl_gamma_gumbel(p, q):
678
+ beta_scale_prod = p.rate * q.scale
679
+ loc_scale_ratio = q.loc / q.scale
680
+ t1 = (
681
+ (p.concentration - 1) * p.concentration.digamma()
682
+ - p.concentration.lgamma()
683
+ - p.concentration
684
+ )
685
+ t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod
686
+ t3 = (
687
+ torch.exp(loc_scale_ratio)
688
+ * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration)
689
+ - loc_scale_ratio
690
+ )
691
+ return t1 + t2 + t3
692
+
693
+
694
+ # TODO: Add Gamma-Laplace KL Divergence
695
+
696
+
697
+ @register_kl(Gamma, Normal)
698
+ def _kl_gamma_normal(p, q):
699
+ var_normal = q.scale.pow(2)
700
+ beta_sqr = p.rate.pow(2)
701
+ t1 = (
702
+ 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi)
703
+ - p.concentration
704
+ - p.concentration.lgamma()
705
+ )
706
+ t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr
707
+ t3 = q.loc * p.concentration / p.rate
708
+ t4 = 0.5 * q.loc.pow(2)
709
+ return (
710
+ t1
711
+ + (p.concentration - 1) * p.concentration.digamma()
712
+ + (t2 - t3 + t4) / var_normal
713
+ )
714
+
715
+
716
+ @register_kl(Gumbel, Beta)
717
+ @register_kl(Gumbel, ContinuousBernoulli)
718
+ @register_kl(Gumbel, Exponential)
719
+ @register_kl(Gumbel, Gamma)
720
+ @register_kl(Gumbel, Pareto)
721
+ @register_kl(Gumbel, Uniform)
722
+ def _kl_gumbel_infinity(p, q):
723
+ return _infinite_like(p.loc)
724
+
725
+
726
+ # TODO: Add Gumbel-Laplace KL Divergence
727
+
728
+
729
+ @register_kl(Gumbel, Normal)
730
+ def _kl_gumbel_normal(p, q):
731
+ param_ratio = p.scale / q.scale
732
+ t1 = (param_ratio / math.sqrt(2 * math.pi)).log()
733
+ t2 = (math.pi * param_ratio * 0.5).pow(2) / 3
734
+ t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5
735
+ return -t1 + t2 + t3 - (_euler_gamma + 1)
736
+
737
+
738
+ @register_kl(Laplace, Beta)
739
+ @register_kl(Laplace, ContinuousBernoulli)
740
+ @register_kl(Laplace, Exponential)
741
+ @register_kl(Laplace, Gamma)
742
+ @register_kl(Laplace, Pareto)
743
+ @register_kl(Laplace, Uniform)
744
+ def _kl_laplace_infinity(p, q):
745
+ return _infinite_like(p.loc)
746
+
747
+
748
+ @register_kl(Laplace, Normal)
749
+ def _kl_laplace_normal(p, q):
750
+ var_normal = q.scale.pow(2)
751
+ scale_sqr_var_ratio = p.scale.pow(2) / var_normal
752
+ t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi)
753
+ t2 = 0.5 * p.loc.pow(2)
754
+ t3 = p.loc * q.loc
755
+ t4 = 0.5 * q.loc.pow(2)
756
+ return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1
757
+
758
+
759
+ @register_kl(Normal, Beta)
760
+ @register_kl(Normal, ContinuousBernoulli)
761
+ @register_kl(Normal, Exponential)
762
+ @register_kl(Normal, Gamma)
763
+ @register_kl(Normal, Pareto)
764
+ @register_kl(Normal, Uniform)
765
+ def _kl_normal_infinity(p, q):
766
+ return _infinite_like(p.loc)
767
+
768
+
769
+ @register_kl(Normal, Gumbel)
770
+ def _kl_normal_gumbel(p, q):
771
+ mean_scale_ratio = p.loc / q.scale
772
+ var_scale_sqr_ratio = (p.scale / q.scale).pow(2)
773
+ loc_scale_ratio = q.loc / q.scale
774
+ t1 = var_scale_sqr_ratio.log() * 0.5
775
+ t2 = mean_scale_ratio - loc_scale_ratio
776
+ t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio)
777
+ return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi)))
778
+
779
+
780
+ @register_kl(Normal, Laplace)
781
+ def _kl_normal_laplace(p, q):
782
+ loc_diff = p.loc - q.loc
783
+ scale_ratio = p.scale / q.scale
784
+ loc_diff_scale_ratio = loc_diff / p.scale
785
+ t1 = torch.log(scale_ratio)
786
+ t2 = (
787
+ math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2))
788
+ )
789
+ t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio)
790
+ return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi)))
791
+
792
+
793
+ @register_kl(Pareto, Beta)
794
+ @register_kl(Pareto, ContinuousBernoulli)
795
+ @register_kl(Pareto, Uniform)
796
+ def _kl_pareto_infinity(p, q):
797
+ return _infinite_like(p.scale)
798
+
799
+
800
+ @register_kl(Pareto, Exponential)
801
+ def _kl_pareto_exponential(p, q):
802
+ scale_rate_prod = p.scale * q.rate
803
+ t1 = (p.alpha / scale_rate_prod).log()
804
+ t2 = p.alpha.reciprocal()
805
+ t3 = p.alpha * scale_rate_prod / (p.alpha - 1)
806
+ result = t1 - t2 + t3 - 1
807
+ result[p.alpha <= 1] = inf
808
+ return result
809
+
810
+
811
+ @register_kl(Pareto, Gamma)
812
+ def _kl_pareto_gamma(p, q):
813
+ common_term = p.scale.log() + p.alpha.reciprocal()
814
+ t1 = p.alpha.log() - common_term
815
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
816
+ t3 = (1 - q.concentration) * common_term
817
+ t4 = q.rate * p.alpha * p.scale / (p.alpha - 1)
818
+ result = t1 + t2 + t3 + t4 - 1
819
+ result[p.alpha <= 1] = inf
820
+ return result
821
+
822
+
823
+ # TODO: Add Pareto-Laplace KL Divergence
824
+
825
+
826
+ @register_kl(Pareto, Normal)
827
+ def _kl_pareto_normal(p, q):
828
+ var_normal = 2 * q.scale.pow(2)
829
+ common_term = p.scale / (p.alpha - 1)
830
+ t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log()
831
+ t2 = p.alpha.reciprocal()
832
+ t3 = p.alpha * common_term.pow(2) / (p.alpha - 2)
833
+ t4 = (p.alpha * common_term - q.loc).pow(2)
834
+ result = t1 - t2 + (t3 + t4) / var_normal - 1
835
+ result[p.alpha <= 2] = inf
836
+ return result
837
+
838
+
839
+ @register_kl(Poisson, Bernoulli)
840
+ @register_kl(Poisson, Binomial)
841
+ def _kl_poisson_infinity(p, q):
842
+ return _infinite_like(p.rate)
843
+
844
+
845
+ @register_kl(Uniform, Beta)
846
+ def _kl_uniform_beta(p, q):
847
+ common_term = p.high - p.low
848
+ t1 = torch.log(common_term)
849
+ t2 = (
850
+ (q.concentration1 - 1)
851
+ * (_x_log_x(p.high) - _x_log_x(p.low) - common_term)
852
+ / common_term
853
+ )
854
+ t3 = (
855
+ (q.concentration0 - 1)
856
+ * (_x_log_x(1 - p.high) - _x_log_x(1 - p.low) + common_term)
857
+ / common_term
858
+ )
859
+ t4 = (
860
+ q.concentration1.lgamma()
861
+ + q.concentration0.lgamma()
862
+ - (q.concentration1 + q.concentration0).lgamma()
863
+ )
864
+ result = t3 + t4 - t1 - t2
865
+ result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf
866
+ return result
867
+
868
+
869
+ @register_kl(Uniform, ContinuousBernoulli)
870
+ def _kl_uniform_continuous_bernoulli(p, q):
871
+ result = (
872
+ -p.entropy()
873
+ - p.mean * q.logits
874
+ - torch.log1p(-q.probs)
875
+ - q._cont_bern_log_norm()
876
+ )
877
+ return torch.where(
878
+ torch.max(
879
+ torch.ge(p.high, q.support.upper_bound),
880
+ torch.le(p.low, q.support.lower_bound),
881
+ ),
882
+ torch.ones_like(result) * inf,
883
+ result,
884
+ )
885
+
886
+
887
+ @register_kl(Uniform, Exponential)
888
+ def _kl_uniform_exponetial(p, q):
889
+ result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log()
890
+ result[p.low < q.support.lower_bound] = inf
891
+ return result
892
+
893
+
894
+ @register_kl(Uniform, Gamma)
895
+ def _kl_uniform_gamma(p, q):
896
+ common_term = p.high - p.low
897
+ t1 = common_term.log()
898
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
899
+ t3 = (
900
+ (1 - q.concentration)
901
+ * (_x_log_x(p.high) - _x_log_x(p.low) - common_term)
902
+ / common_term
903
+ )
904
+ t4 = q.rate * (p.high + p.low) / 2
905
+ result = -t1 + t2 + t3 + t4
906
+ result[p.low < q.support.lower_bound] = inf
907
+ return result
908
+
909
+
910
+ @register_kl(Uniform, Gumbel)
911
+ def _kl_uniform_gumbel(p, q):
912
+ common_term = q.scale / (p.high - p.low)
913
+ high_loc_diff = (p.high - q.loc) / q.scale
914
+ low_loc_diff = (p.low - q.loc) / q.scale
915
+ t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff)
916
+ t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff))
917
+ return t1 - t2
918
+
919
+
920
+ # TODO: Uniform-Laplace KL Divergence
921
+
922
+
923
+ @register_kl(Uniform, Normal)
924
+ def _kl_uniform_normal(p, q):
925
+ common_term = p.high - p.low
926
+ t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log()
927
+ t2 = (common_term).pow(2) / 12
928
+ t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2)
929
+ return t1 + 0.5 * (t2 + t3) / q.scale.pow(2)
930
+
931
+
932
+ @register_kl(Uniform, Pareto)
933
+ def _kl_uniform_pareto(p, q):
934
+ support_uniform = p.high - p.low
935
+ t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log()
936
+ t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform
937
+ result = t2 * (q.alpha + 1) - t1
938
+ result[p.low < q.support.lower_bound] = inf
939
+ return result
940
+
941
+
942
+ @register_kl(Independent, Independent)
943
+ def _kl_independent_independent(p, q):
944
+ if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims:
945
+ raise NotImplementedError
946
+ result = kl_divergence(p.base_dist, q.base_dist)
947
+ return _sum_rightmost(result, p.reinterpreted_batch_ndims)
948
+
949
+
950
+ @register_kl(Cauchy, Cauchy)
951
+ def _kl_cauchy_cauchy(p, q):
952
+ # From https://arxiv.org/abs/1905.10965
953
+ t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log()
954
+ t2 = (4 * p.scale * q.scale).log()
955
+ return t1 - t2
956
+
957
+
958
+ def _add_kl_info():
959
+ """Appends a list of implemented KL functions to the doc for kl_divergence."""
960
+ rows = [
961
+ "KL divergence is currently implemented for the following distribution pairs:"
962
+ ]
963
+ for p, q in sorted(
964
+ _KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)
965
+ ):
966
+ rows.append(
967
+ f"* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`"
968
+ )
969
+ kl_info = "\n\t".join(rows)
970
+ if kl_divergence.__doc__:
971
+ kl_divergence.__doc__ += kl_info # type: ignore[operator]
venv/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nan
3
+ from torch.distributions import constraints
4
+ from torch.distributions.transformed_distribution import TransformedDistribution
5
+ from torch.distributions.transforms import AffineTransform, PowerTransform
6
+ from torch.distributions.uniform import Uniform
7
+ from torch.distributions.utils import broadcast_all, euler_constant
8
+
9
+ __all__ = ["Kumaraswamy"]
10
+
11
+
12
+ def _moments(a, b, n):
13
+ """
14
+ Computes nth moment of Kumaraswamy using using torch.lgamma
15
+ """
16
+ arg1 = 1 + n / a
17
+ log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b)
18
+ return b * torch.exp(log_value)
19
+
20
+
21
+ class Kumaraswamy(TransformedDistribution):
22
+ r"""
23
+ Samples from a Kumaraswamy distribution.
24
+
25
+ Example::
26
+
27
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
28
+ >>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0]))
29
+ >>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1
30
+ tensor([ 0.1729])
31
+
32
+ Args:
33
+ concentration1 (float or Tensor): 1st concentration parameter of the distribution
34
+ (often referred to as alpha)
35
+ concentration0 (float or Tensor): 2nd concentration parameter of the distribution
36
+ (often referred to as beta)
37
+ """
38
+ arg_constraints = {
39
+ "concentration1": constraints.positive,
40
+ "concentration0": constraints.positive,
41
+ }
42
+ support = constraints.unit_interval
43
+ has_rsample = True
44
+
45
+ def __init__(self, concentration1, concentration0, validate_args=None):
46
+ self.concentration1, self.concentration0 = broadcast_all(
47
+ concentration1, concentration0
48
+ )
49
+ finfo = torch.finfo(self.concentration0.dtype)
50
+ base_dist = Uniform(
51
+ torch.full_like(self.concentration0, 0),
52
+ torch.full_like(self.concentration0, 1),
53
+ validate_args=validate_args,
54
+ )
55
+ transforms = [
56
+ PowerTransform(exponent=self.concentration0.reciprocal()),
57
+ AffineTransform(loc=1.0, scale=-1.0),
58
+ PowerTransform(exponent=self.concentration1.reciprocal()),
59
+ ]
60
+ super().__init__(base_dist, transforms, validate_args=validate_args)
61
+
62
+ def expand(self, batch_shape, _instance=None):
63
+ new = self._get_checked_instance(Kumaraswamy, _instance)
64
+ new.concentration1 = self.concentration1.expand(batch_shape)
65
+ new.concentration0 = self.concentration0.expand(batch_shape)
66
+ return super().expand(batch_shape, _instance=new)
67
+
68
+ @property
69
+ def mean(self):
70
+ return _moments(self.concentration1, self.concentration0, 1)
71
+
72
+ @property
73
+ def mode(self):
74
+ # Evaluate in log-space for numerical stability.
75
+ log_mode = (
76
+ self.concentration0.reciprocal() * (-self.concentration0).log1p()
77
+ - (-self.concentration0 * self.concentration1).log1p()
78
+ )
79
+ log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan
80
+ return log_mode.exp()
81
+
82
+ @property
83
+ def variance(self):
84
+ return _moments(self.concentration1, self.concentration0, 2) - torch.pow(
85
+ self.mean, 2
86
+ )
87
+
88
+ def entropy(self):
89
+ t1 = 1 - self.concentration1.reciprocal()
90
+ t0 = 1 - self.concentration0.reciprocal()
91
+ H0 = torch.digamma(self.concentration0 + 1) + euler_constant
92
+ return (
93
+ t0
94
+ + t1 * H0
95
+ - torch.log(self.concentration1)
96
+ - torch.log(self.concentration0)
97
+ )
venv/lib/python3.10/site-packages/torch/distributions/logistic_normal.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.distributions import constraints
2
+ from torch.distributions.normal import Normal
3
+ from torch.distributions.transformed_distribution import TransformedDistribution
4
+ from torch.distributions.transforms import StickBreakingTransform
5
+
6
+ __all__ = ["LogisticNormal"]
7
+
8
+
9
+ class LogisticNormal(TransformedDistribution):
10
+ r"""
11
+ Creates a logistic-normal distribution parameterized by :attr:`loc` and :attr:`scale`
12
+ that define the base `Normal` distribution transformed with the
13
+ `StickBreakingTransform` such that::
14
+
15
+ X ~ LogisticNormal(loc, scale)
16
+ Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale)
17
+
18
+ Args:
19
+ loc (float or Tensor): mean of the base distribution
20
+ scale (float or Tensor): standard deviation of the base distribution
21
+
22
+ Example::
23
+
24
+ >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1)
25
+ >>> # of the base Normal distribution
26
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
27
+ >>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3))
28
+ >>> m.sample()
29
+ tensor([ 0.7653, 0.0341, 0.0579, 0.1427])
30
+
31
+ """
32
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
33
+ support = constraints.simplex
34
+ has_rsample = True
35
+
36
+ def __init__(self, loc, scale, validate_args=None):
37
+ base_dist = Normal(loc, scale, validate_args=validate_args)
38
+ if not base_dist.batch_shape:
39
+ base_dist = base_dist.expand([1])
40
+ super().__init__(
41
+ base_dist, StickBreakingTransform(), validate_args=validate_args
42
+ )
43
+
44
+ def expand(self, batch_shape, _instance=None):
45
+ new = self._get_checked_instance(LogisticNormal, _instance)
46
+ return super().expand(batch_shape, _instance=new)
47
+
48
+ @property
49
+ def loc(self):
50
+ return self.base_dist.base_dist.loc
51
+
52
+ @property
53
+ def scale(self):
54
+ return self.base_dist.base_dist.scale
venv/lib/python3.10/site-packages/torch/distributions/mixture_same_family.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+
3
+ import torch
4
+ from torch.distributions import Categorical, constraints
5
+ from torch.distributions.distribution import Distribution
6
+
7
+ __all__ = ["MixtureSameFamily"]
8
+
9
+
10
+ class MixtureSameFamily(Distribution):
11
+ r"""
12
+ The `MixtureSameFamily` distribution implements a (batch of) mixture
13
+ distribution where all component are from different parameterizations of
14
+ the same distribution type. It is parameterized by a `Categorical`
15
+ "selecting distribution" (over `k` component) and a component
16
+ distribution, i.e., a `Distribution` with a rightmost batch shape
17
+ (equal to `[k]`) which indexes each (batch of) component.
18
+
19
+ Examples::
20
+
21
+ >>> # xdoctest: +SKIP("undefined vars")
22
+ >>> # Construct Gaussian Mixture Model in 1D consisting of 5 equally
23
+ >>> # weighted normal distributions
24
+ >>> mix = D.Categorical(torch.ones(5,))
25
+ >>> comp = D.Normal(torch.randn(5,), torch.rand(5,))
26
+ >>> gmm = MixtureSameFamily(mix, comp)
27
+
28
+ >>> # Construct Gaussian Mixture Model in 2D consisting of 5 equally
29
+ >>> # weighted bivariate normal distributions
30
+ >>> mix = D.Categorical(torch.ones(5,))
31
+ >>> comp = D.Independent(D.Normal(
32
+ ... torch.randn(5,2), torch.rand(5,2)), 1)
33
+ >>> gmm = MixtureSameFamily(mix, comp)
34
+
35
+ >>> # Construct a batch of 3 Gaussian Mixture Models in 2D each
36
+ >>> # consisting of 5 random weighted bivariate normal distributions
37
+ >>> mix = D.Categorical(torch.rand(3,5))
38
+ >>> comp = D.Independent(D.Normal(
39
+ ... torch.randn(3,5,2), torch.rand(3,5,2)), 1)
40
+ >>> gmm = MixtureSameFamily(mix, comp)
41
+
42
+ Args:
43
+ mixture_distribution: `torch.distributions.Categorical`-like
44
+ instance. Manages the probability of selecting component.
45
+ The number of categories must match the rightmost batch
46
+ dimension of the `component_distribution`. Must have either
47
+ scalar `batch_shape` or `batch_shape` matching
48
+ `component_distribution.batch_shape[:-1]`
49
+ component_distribution: `torch.distributions.Distribution`-like
50
+ instance. Right-most batch dimension indexes component.
51
+ """
52
+ arg_constraints: Dict[str, constraints.Constraint] = {}
53
+ has_rsample = False
54
+
55
+ def __init__(
56
+ self, mixture_distribution, component_distribution, validate_args=None
57
+ ):
58
+ self._mixture_distribution = mixture_distribution
59
+ self._component_distribution = component_distribution
60
+
61
+ if not isinstance(self._mixture_distribution, Categorical):
62
+ raise ValueError(
63
+ " The Mixture distribution needs to be an "
64
+ " instance of torch.distributions.Categorical"
65
+ )
66
+
67
+ if not isinstance(self._component_distribution, Distribution):
68
+ raise ValueError(
69
+ "The Component distribution need to be an "
70
+ "instance of torch.distributions.Distribution"
71
+ )
72
+
73
+ # Check that batch size matches
74
+ mdbs = self._mixture_distribution.batch_shape
75
+ cdbs = self._component_distribution.batch_shape[:-1]
76
+ for size1, size2 in zip(reversed(mdbs), reversed(cdbs)):
77
+ if size1 != 1 and size2 != 1 and size1 != size2:
78
+ raise ValueError(
79
+ f"`mixture_distribution.batch_shape` ({mdbs}) is not "
80
+ "compatible with `component_distribution."
81
+ f"batch_shape`({cdbs})"
82
+ )
83
+
84
+ # Check that the number of mixture component matches
85
+ km = self._mixture_distribution.logits.shape[-1]
86
+ kc = self._component_distribution.batch_shape[-1]
87
+ if km is not None and kc is not None and km != kc:
88
+ raise ValueError(
89
+ f"`mixture_distribution component` ({km}) does not"
90
+ " equal `component_distribution.batch_shape[-1]`"
91
+ f" ({kc})"
92
+ )
93
+ self._num_component = km
94
+
95
+ event_shape = self._component_distribution.event_shape
96
+ self._event_ndims = len(event_shape)
97
+ super().__init__(
98
+ batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args
99
+ )
100
+
101
+ def expand(self, batch_shape, _instance=None):
102
+ batch_shape = torch.Size(batch_shape)
103
+ batch_shape_comp = batch_shape + (self._num_component,)
104
+ new = self._get_checked_instance(MixtureSameFamily, _instance)
105
+ new._component_distribution = self._component_distribution.expand(
106
+ batch_shape_comp
107
+ )
108
+ new._mixture_distribution = self._mixture_distribution.expand(batch_shape)
109
+ new._num_component = self._num_component
110
+ new._event_ndims = self._event_ndims
111
+ event_shape = new._component_distribution.event_shape
112
+ super(MixtureSameFamily, new).__init__(
113
+ batch_shape=batch_shape, event_shape=event_shape, validate_args=False
114
+ )
115
+ new._validate_args = self._validate_args
116
+ return new
117
+
118
+ @constraints.dependent_property
119
+ def support(self):
120
+ # FIXME this may have the wrong shape when support contains batched
121
+ # parameters
122
+ return self._component_distribution.support
123
+
124
+ @property
125
+ def mixture_distribution(self):
126
+ return self._mixture_distribution
127
+
128
+ @property
129
+ def component_distribution(self):
130
+ return self._component_distribution
131
+
132
+ @property
133
+ def mean(self):
134
+ probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
135
+ return torch.sum(
136
+ probs * self.component_distribution.mean, dim=-1 - self._event_ndims
137
+ ) # [B, E]
138
+
139
+ @property
140
+ def variance(self):
141
+ # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
142
+ probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
143
+ mean_cond_var = torch.sum(
144
+ probs * self.component_distribution.variance, dim=-1 - self._event_ndims
145
+ )
146
+ var_cond_mean = torch.sum(
147
+ probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0),
148
+ dim=-1 - self._event_ndims,
149
+ )
150
+ return mean_cond_var + var_cond_mean
151
+
152
+ def cdf(self, x):
153
+ x = self._pad(x)
154
+ cdf_x = self.component_distribution.cdf(x)
155
+ mix_prob = self.mixture_distribution.probs
156
+
157
+ return torch.sum(cdf_x * mix_prob, dim=-1)
158
+
159
+ def log_prob(self, x):
160
+ if self._validate_args:
161
+ self._validate_sample(x)
162
+ x = self._pad(x)
163
+ log_prob_x = self.component_distribution.log_prob(x) # [S, B, k]
164
+ log_mix_prob = torch.log_softmax(
165
+ self.mixture_distribution.logits, dim=-1
166
+ ) # [B, k]
167
+ return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B]
168
+
169
+ def sample(self, sample_shape=torch.Size()):
170
+ with torch.no_grad():
171
+ sample_len = len(sample_shape)
172
+ batch_len = len(self.batch_shape)
173
+ gather_dim = sample_len + batch_len
174
+ es = self.event_shape
175
+
176
+ # mixture samples [n, B]
177
+ mix_sample = self.mixture_distribution.sample(sample_shape)
178
+ mix_shape = mix_sample.shape
179
+
180
+ # component samples [n, B, k, E]
181
+ comp_samples = self.component_distribution.sample(sample_shape)
182
+
183
+ # Gather along the k dimension
184
+ mix_sample_r = mix_sample.reshape(
185
+ mix_shape + torch.Size([1] * (len(es) + 1))
186
+ )
187
+ mix_sample_r = mix_sample_r.repeat(
188
+ torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es
189
+ )
190
+
191
+ samples = torch.gather(comp_samples, gather_dim, mix_sample_r)
192
+ return samples.squeeze(gather_dim)
193
+
194
+ def _pad(self, x):
195
+ return x.unsqueeze(-1 - self._event_ndims)
196
+
197
+ def _pad_mixture_dimensions(self, x):
198
+ dist_batch_ndims = len(self.batch_shape)
199
+ cat_batch_ndims = len(self.mixture_distribution.batch_shape)
200
+ pad_ndims = 0 if cat_batch_ndims == 1 else dist_batch_ndims - cat_batch_ndims
201
+ xs = x.shape
202
+ x = x.reshape(
203
+ xs[:-1]
204
+ + torch.Size(pad_ndims * [1])
205
+ + xs[-1:]
206
+ + torch.Size(self._event_ndims * [1])
207
+ )
208
+ return x
209
+
210
+ def __repr__(self):
211
+ args_string = (
212
+ f"\n {self.mixture_distribution},\n {self.component_distribution}"
213
+ )
214
+ return "MixtureSameFamily" + "(" + args_string + ")"
venv/lib/python3.10/site-packages/torch/distributions/multivariate_normal.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch.distributions import constraints
5
+ from torch.distributions.distribution import Distribution
6
+ from torch.distributions.utils import _standard_normal, lazy_property
7
+
8
+ __all__ = ["MultivariateNormal"]
9
+
10
+
11
+ def _batch_mv(bmat, bvec):
12
+ r"""
13
+ Performs a batched matrix-vector product, with compatible but different batch shapes.
14
+
15
+ This function takes as input `bmat`, containing :math:`n \times n` matrices, and
16
+ `bvec`, containing length :math:`n` vectors.
17
+
18
+ Both `bmat` and `bvec` may have any number of leading dimensions, which correspond
19
+ to a batch shape. They are not necessarily assumed to have the same batch shape,
20
+ just ones which can be broadcasted.
21
+ """
22
+ return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)
23
+
24
+
25
+ def _batch_mahalanobis(bL, bx):
26
+ r"""
27
+ Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
28
+ for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`.
29
+
30
+ Accepts batches for both bL and bx. They are not necessarily assumed to have the same batch
31
+ shape, but `bL` one should be able to broadcasted to `bx` one.
32
+ """
33
+ n = bx.size(-1)
34
+ bx_batch_shape = bx.shape[:-1]
35
+
36
+ # Assume that bL.shape = (i, 1, n, n), bx.shape = (..., i, j, n),
37
+ # we are going to make bx have shape (..., 1, j, i, 1, n) to apply batched tri.solve
38
+ bx_batch_dims = len(bx_batch_shape)
39
+ bL_batch_dims = bL.dim() - 2
40
+ outer_batch_dims = bx_batch_dims - bL_batch_dims
41
+ old_batch_dims = outer_batch_dims + bL_batch_dims
42
+ new_batch_dims = outer_batch_dims + 2 * bL_batch_dims
43
+ # Reshape bx with the shape (..., 1, i, j, 1, n)
44
+ bx_new_shape = bx.shape[:outer_batch_dims]
45
+ for sL, sx in zip(bL.shape[:-2], bx.shape[outer_batch_dims:-1]):
46
+ bx_new_shape += (sx // sL, sL)
47
+ bx_new_shape += (n,)
48
+ bx = bx.reshape(bx_new_shape)
49
+ # Permute bx to make it have shape (..., 1, j, i, 1, n)
50
+ permute_dims = (
51
+ list(range(outer_batch_dims))
52
+ + list(range(outer_batch_dims, new_batch_dims, 2))
53
+ + list(range(outer_batch_dims + 1, new_batch_dims, 2))
54
+ + [new_batch_dims]
55
+ )
56
+ bx = bx.permute(permute_dims)
57
+
58
+ flat_L = bL.reshape(-1, n, n) # shape = b x n x n
59
+ flat_x = bx.reshape(-1, flat_L.size(0), n) # shape = c x b x n
60
+ flat_x_swap = flat_x.permute(1, 2, 0) # shape = b x n x c
61
+ M_swap = (
62
+ torch.linalg.solve_triangular(flat_L, flat_x_swap, upper=False).pow(2).sum(-2)
63
+ ) # shape = b x c
64
+ M = M_swap.t() # shape = c x b
65
+
66
+ # Now we revert the above reshape and permute operators.
67
+ permuted_M = M.reshape(bx.shape[:-1]) # shape = (..., 1, j, i, 1)
68
+ permute_inv_dims = list(range(outer_batch_dims))
69
+ for i in range(bL_batch_dims):
70
+ permute_inv_dims += [outer_batch_dims + i, old_batch_dims + i]
71
+ reshaped_M = permuted_M.permute(permute_inv_dims) # shape = (..., 1, i, j, 1)
72
+ return reshaped_M.reshape(bx_batch_shape)
73
+
74
+
75
+ def _precision_to_scale_tril(P):
76
+ # Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
77
+ Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
78
+ L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
79
+ Id = torch.eye(P.shape[-1], dtype=P.dtype, device=P.device)
80
+ L = torch.linalg.solve_triangular(L_inv, Id, upper=False)
81
+ return L
82
+
83
+
84
+ class MultivariateNormal(Distribution):
85
+ r"""
86
+ Creates a multivariate normal (also called Gaussian) distribution
87
+ parameterized by a mean vector and a covariance matrix.
88
+
89
+ The multivariate normal distribution can be parameterized either
90
+ in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}`
91
+ or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}`
92
+ or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued
93
+ diagonal entries, such that
94
+ :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix
95
+ can be obtained via e.g. Cholesky decomposition of the covariance.
96
+
97
+ Example:
98
+
99
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
100
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
101
+ >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
102
+ >>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
103
+ tensor([-0.2102, -0.5429])
104
+
105
+ Args:
106
+ loc (Tensor): mean of the distribution
107
+ covariance_matrix (Tensor): positive-definite covariance matrix
108
+ precision_matrix (Tensor): positive-definite precision matrix
109
+ scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal
110
+
111
+ Note:
112
+ Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or
113
+ :attr:`scale_tril` can be specified.
114
+
115
+ Using :attr:`scale_tril` will be more efficient: all computations internally
116
+ are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or
117
+ :attr:`precision_matrix` is passed instead, it is only used to compute
118
+ the corresponding lower triangular matrices using a Cholesky decomposition.
119
+ """
120
+ arg_constraints = {
121
+ "loc": constraints.real_vector,
122
+ "covariance_matrix": constraints.positive_definite,
123
+ "precision_matrix": constraints.positive_definite,
124
+ "scale_tril": constraints.lower_cholesky,
125
+ }
126
+ support = constraints.real_vector
127
+ has_rsample = True
128
+
129
+ def __init__(
130
+ self,
131
+ loc,
132
+ covariance_matrix=None,
133
+ precision_matrix=None,
134
+ scale_tril=None,
135
+ validate_args=None,
136
+ ):
137
+ if loc.dim() < 1:
138
+ raise ValueError("loc must be at least one-dimensional.")
139
+ if (covariance_matrix is not None) + (scale_tril is not None) + (
140
+ precision_matrix is not None
141
+ ) != 1:
142
+ raise ValueError(
143
+ "Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified."
144
+ )
145
+
146
+ if scale_tril is not None:
147
+ if scale_tril.dim() < 2:
148
+ raise ValueError(
149
+ "scale_tril matrix must be at least two-dimensional, "
150
+ "with optional leading batch dimensions"
151
+ )
152
+ batch_shape = torch.broadcast_shapes(scale_tril.shape[:-2], loc.shape[:-1])
153
+ self.scale_tril = scale_tril.expand(batch_shape + (-1, -1))
154
+ elif covariance_matrix is not None:
155
+ if covariance_matrix.dim() < 2:
156
+ raise ValueError(
157
+ "covariance_matrix must be at least two-dimensional, "
158
+ "with optional leading batch dimensions"
159
+ )
160
+ batch_shape = torch.broadcast_shapes(
161
+ covariance_matrix.shape[:-2], loc.shape[:-1]
162
+ )
163
+ self.covariance_matrix = covariance_matrix.expand(batch_shape + (-1, -1))
164
+ else:
165
+ if precision_matrix.dim() < 2:
166
+ raise ValueError(
167
+ "precision_matrix must be at least two-dimensional, "
168
+ "with optional leading batch dimensions"
169
+ )
170
+ batch_shape = torch.broadcast_shapes(
171
+ precision_matrix.shape[:-2], loc.shape[:-1]
172
+ )
173
+ self.precision_matrix = precision_matrix.expand(batch_shape + (-1, -1))
174
+ self.loc = loc.expand(batch_shape + (-1,))
175
+
176
+ event_shape = self.loc.shape[-1:]
177
+ super().__init__(batch_shape, event_shape, validate_args=validate_args)
178
+
179
+ if scale_tril is not None:
180
+ self._unbroadcasted_scale_tril = scale_tril
181
+ elif covariance_matrix is not None:
182
+ self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix)
183
+ else: # precision_matrix is not None
184
+ self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix)
185
+
186
+ def expand(self, batch_shape, _instance=None):
187
+ new = self._get_checked_instance(MultivariateNormal, _instance)
188
+ batch_shape = torch.Size(batch_shape)
189
+ loc_shape = batch_shape + self.event_shape
190
+ cov_shape = batch_shape + self.event_shape + self.event_shape
191
+ new.loc = self.loc.expand(loc_shape)
192
+ new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
193
+ if "covariance_matrix" in self.__dict__:
194
+ new.covariance_matrix = self.covariance_matrix.expand(cov_shape)
195
+ if "scale_tril" in self.__dict__:
196
+ new.scale_tril = self.scale_tril.expand(cov_shape)
197
+ if "precision_matrix" in self.__dict__:
198
+ new.precision_matrix = self.precision_matrix.expand(cov_shape)
199
+ super(MultivariateNormal, new).__init__(
200
+ batch_shape, self.event_shape, validate_args=False
201
+ )
202
+ new._validate_args = self._validate_args
203
+ return new
204
+
205
+ @lazy_property
206
+ def scale_tril(self):
207
+ return self._unbroadcasted_scale_tril.expand(
208
+ self._batch_shape + self._event_shape + self._event_shape
209
+ )
210
+
211
+ @lazy_property
212
+ def covariance_matrix(self):
213
+ return torch.matmul(
214
+ self._unbroadcasted_scale_tril, self._unbroadcasted_scale_tril.mT
215
+ ).expand(self._batch_shape + self._event_shape + self._event_shape)
216
+
217
+ @lazy_property
218
+ def precision_matrix(self):
219
+ return torch.cholesky_inverse(self._unbroadcasted_scale_tril).expand(
220
+ self._batch_shape + self._event_shape + self._event_shape
221
+ )
222
+
223
+ @property
224
+ def mean(self):
225
+ return self.loc
226
+
227
+ @property
228
+ def mode(self):
229
+ return self.loc
230
+
231
+ @property
232
+ def variance(self):
233
+ return (
234
+ self._unbroadcasted_scale_tril.pow(2)
235
+ .sum(-1)
236
+ .expand(self._batch_shape + self._event_shape)
237
+ )
238
+
239
+ def rsample(self, sample_shape=torch.Size()):
240
+ shape = self._extended_shape(sample_shape)
241
+ eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
242
+ return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps)
243
+
244
+ def log_prob(self, value):
245
+ if self._validate_args:
246
+ self._validate_sample(value)
247
+ diff = value - self.loc
248
+ M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff)
249
+ half_log_det = (
250
+ self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
251
+ )
252
+ return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det
253
+
254
+ def entropy(self):
255
+ half_log_det = (
256
+ self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
257
+ )
258
+ H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det
259
+ if len(self._batch_shape) == 0:
260
+ return H
261
+ else:
262
+ return H.expand(self._batch_shape)
venv/lib/python3.10/site-packages/torch/distributions/negative_binomial.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from torch.distributions import constraints
4
+ from torch.distributions.distribution import Distribution
5
+ from torch.distributions.utils import (
6
+ broadcast_all,
7
+ lazy_property,
8
+ logits_to_probs,
9
+ probs_to_logits,
10
+ )
11
+
12
+ __all__ = ["NegativeBinomial"]
13
+
14
+
15
+ class NegativeBinomial(Distribution):
16
+ r"""
17
+ Creates a Negative Binomial distribution, i.e. distribution
18
+ of the number of successful independent and identical Bernoulli trials
19
+ before :attr:`total_count` failures are achieved. The probability
20
+ of success of each Bernoulli trial is :attr:`probs`.
21
+
22
+ Args:
23
+ total_count (float or Tensor): non-negative number of negative Bernoulli
24
+ trials to stop, although the distribution is still valid for real
25
+ valued count
26
+ probs (Tensor): Event probabilities of success in the half open interval [0, 1)
27
+ logits (Tensor): Event log-odds for probabilities of success
28
+ """
29
+ arg_constraints = {
30
+ "total_count": constraints.greater_than_eq(0),
31
+ "probs": constraints.half_open_interval(0.0, 1.0),
32
+ "logits": constraints.real,
33
+ }
34
+ support = constraints.nonnegative_integer
35
+
36
+ def __init__(self, total_count, probs=None, logits=None, validate_args=None):
37
+ if (probs is None) == (logits is None):
38
+ raise ValueError(
39
+ "Either `probs` or `logits` must be specified, but not both."
40
+ )
41
+ if probs is not None:
42
+ (
43
+ self.total_count,
44
+ self.probs,
45
+ ) = broadcast_all(total_count, probs)
46
+ self.total_count = self.total_count.type_as(self.probs)
47
+ else:
48
+ (
49
+ self.total_count,
50
+ self.logits,
51
+ ) = broadcast_all(total_count, logits)
52
+ self.total_count = self.total_count.type_as(self.logits)
53
+
54
+ self._param = self.probs if probs is not None else self.logits
55
+ batch_shape = self._param.size()
56
+ super().__init__(batch_shape, validate_args=validate_args)
57
+
58
+ def expand(self, batch_shape, _instance=None):
59
+ new = self._get_checked_instance(NegativeBinomial, _instance)
60
+ batch_shape = torch.Size(batch_shape)
61
+ new.total_count = self.total_count.expand(batch_shape)
62
+ if "probs" in self.__dict__:
63
+ new.probs = self.probs.expand(batch_shape)
64
+ new._param = new.probs
65
+ if "logits" in self.__dict__:
66
+ new.logits = self.logits.expand(batch_shape)
67
+ new._param = new.logits
68
+ super(NegativeBinomial, new).__init__(batch_shape, validate_args=False)
69
+ new._validate_args = self._validate_args
70
+ return new
71
+
72
+ def _new(self, *args, **kwargs):
73
+ return self._param.new(*args, **kwargs)
74
+
75
+ @property
76
+ def mean(self):
77
+ return self.total_count * torch.exp(self.logits)
78
+
79
+ @property
80
+ def mode(self):
81
+ return ((self.total_count - 1) * self.logits.exp()).floor().clamp(min=0.0)
82
+
83
+ @property
84
+ def variance(self):
85
+ return self.mean / torch.sigmoid(-self.logits)
86
+
87
+ @lazy_property
88
+ def logits(self):
89
+ return probs_to_logits(self.probs, is_binary=True)
90
+
91
+ @lazy_property
92
+ def probs(self):
93
+ return logits_to_probs(self.logits, is_binary=True)
94
+
95
+ @property
96
+ def param_shape(self):
97
+ return self._param.size()
98
+
99
+ @lazy_property
100
+ def _gamma(self):
101
+ # Note we avoid validating because self.total_count can be zero.
102
+ return torch.distributions.Gamma(
103
+ concentration=self.total_count,
104
+ rate=torch.exp(-self.logits),
105
+ validate_args=False,
106
+ )
107
+
108
+ def sample(self, sample_shape=torch.Size()):
109
+ with torch.no_grad():
110
+ rate = self._gamma.sample(sample_shape=sample_shape)
111
+ return torch.poisson(rate)
112
+
113
+ def log_prob(self, value):
114
+ if self._validate_args:
115
+ self._validate_sample(value)
116
+
117
+ log_unnormalized_prob = self.total_count * F.logsigmoid(
118
+ -self.logits
119
+ ) + value * F.logsigmoid(self.logits)
120
+
121
+ log_normalization = (
122
+ -torch.lgamma(self.total_count + value)
123
+ + torch.lgamma(1.0 + value)
124
+ + torch.lgamma(self.total_count)
125
+ )
126
+ # The case self.total_count == 0 and value == 0 has probability 1 but
127
+ # lgamma(0) is infinite. Handle this case separately using a function
128
+ # that does not modify tensors in place to allow Jit compilation.
129
+ log_normalization = log_normalization.masked_fill(
130
+ self.total_count + value == 0.0, 0.0
131
+ )
132
+
133
+ return log_unnormalized_prob - log_normalization