applied-ai-018 commited on
Commit
2154cf2
·
verified ·
1 Parent(s): bd5e920

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/_functorch/__init__.py +5 -0
  4. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py +5 -0
  23. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py +626 -0
  36. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py +192 -0
  37. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py +370 -0
  38. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py +432 -0
  39. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py +936 -0
  40. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py +135 -0
  41. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py +1021 -0
  42. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py +696 -0
  43. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py +295 -0
  44. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py +698 -0
  45. venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py +226 -0
  46. venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py +1246 -0
  47. venv/lib/python3.10/site-packages/torch/_functorch/apis.py +401 -0
  48. venv/lib/python3.10/site-packages/torch/_functorch/autograd_function.py +659 -0
  49. venv/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py +24 -0
  50. venv/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py +195 -0
ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22d5573f9a1c363406a5caaba27fa962267b596020bb774e5e0a941bd35bdf9
3
+ size 50332828
ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd6970ddc989fe7653460eb102c1cb92dec1227367988731e8b574254a582c36
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/_functorch/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc ADDED
Binary file (54.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc ADDED
Binary file (391 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc ADDED
Binary file (584 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc ADDED
Binary file (8.65 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc ADDED
Binary file (9.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is one of the analysis modules - it takes as input a function or graph
3
+ and some preexisting properties, and returns some data that is useful for deciding
4
+ how to further proceed with compilation or construct runtime wrappers.
5
+
6
+ In particular, the analysis here constructs view and mutation metadata from running
7
+ a functionalized version of the graph under compilation.
8
+ """
9
+
10
+ import collections
11
+ import logging
12
+ from functools import wraps
13
+ from typing import Callable, DefaultDict, Dict, List
14
+
15
+ import torch
16
+ import torch.utils._pytree as pytree
17
+ from torch import Tensor
18
+ from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode
19
+ from torch._subclasses.meta_utils import safe_is_leaf
20
+ from torch.fx.experimental.symbolic_shapes import is_concrete_int
21
+ from torch.multiprocessing.reductions import StorageWeakRef
22
+ from torch.utils._python_dispatch import (
23
+ is_traceable_wrapper_subclass,
24
+ transform_subclass,
25
+ )
26
+ from .functional_utils import (
27
+ are_all_mutations_hidden_from_autograd,
28
+ are_all_mutations_under_no_grad_or_inference_mode,
29
+ from_fun,
30
+ has_data_mutation,
31
+ has_metadata_mutation,
32
+ has_same_metadata,
33
+ to_fun,
34
+ )
35
+ from .schemas import (
36
+ InputAliasInfo,
37
+ MutationType,
38
+ OutputAliasInfo,
39
+ OutputType,
40
+ ViewAndMutationMeta,
41
+ )
42
+ from .subclass_utils import create_subclass_meta
43
+
44
+ from .utils import _get_autocast_states, KNOWN_TYPES, strict_zip
45
+
46
+ zip = strict_zip
47
+
48
+ log = logging.getLogger(__name__)
49
+
50
+
51
+ # This is a version of functionalization that is specifically designed
52
+ # for the AOTAutograd use case.
53
+ #
54
+ # Unlike functorch's variant, this doesn't use the functorch level system,
55
+ # instead it directly uses PyTorch's conventional dispatcher to hit the
56
+ # functionalization key. In particular, this means that FunctionalTensorWrapper
57
+ # can have autograd data stored directly on it.
58
+ #
59
+ # In typical AOTAutograd usage, the dispatch key order will look like:
60
+ #
61
+ # Autograd - Functionalization ~~~~> Proxy Mode - Fake Tensor
62
+ # outer tensor inner tensor
63
+ #
64
+ # Returns:
65
+ # - ViewAndMutationMeta, telling us metadata about the inputs and outputs, and
66
+ # The list of outputs from the forward, but **only** the outputs that we need
67
+ # to pass in as tangents into the backward.
68
+ # Specifically, aliased outputs from the forward get regenerated, and don't participate
69
+ # in the compiled backward function.
70
+ def run_functionalized_fw_and_collect_metadata(
71
+ f,
72
+ *,
73
+ keep_input_mutations: bool,
74
+ # TODO: refactor to kill this flag
75
+ is_train: bool = False,
76
+ pre_dispatch: bool = False,
77
+ ) -> Callable[..., ViewAndMutationMeta]:
78
+ memo: Dict[Tensor, Tensor] = {}
79
+
80
+ def _to_fun(t):
81
+ if isinstance(t, Tensor):
82
+ if t in memo:
83
+ return memo[t]
84
+ r = to_fun(t)
85
+ memo[t] = r
86
+ return r
87
+ else:
88
+ return t
89
+
90
+ @wraps(f)
91
+ def inner(*flat_args):
92
+ # This function is meant to be run with the forward, which expects a flat list of tensor/symint/other args.
93
+ assert all(isinstance(a, tuple(KNOWN_TYPES)) for a in flat_args)
94
+
95
+ input_info: List[InputAliasInfo] = []
96
+ output_info: List[OutputAliasInfo] = []
97
+
98
+ prior_grad_enabled = torch.is_grad_enabled()
99
+ prior_autocast_states = _get_autocast_states()
100
+
101
+ # See Note [Disabling Functionalize TLS Above Python Functionalization]
102
+ disable_above = torch._C._ExcludeDispatchKeyGuard(
103
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
104
+ )
105
+
106
+ # It doesn't matter if we run this under predispatch or not because it is
107
+ # only for figuring out metadata
108
+ mode = FunctionalTensorMode(_allow_token_discovery=True)
109
+ with disable_above, mode:
110
+ # precondition: The passed in function already handles unflattening inputs + flattening outputs
111
+ flat_f_args = pytree.tree_map(_to_fun, flat_args)
112
+ flat_f_outs = f(*flat_f_args)
113
+
114
+ if prior_autocast_states != _get_autocast_states():
115
+ raise RuntimeError(
116
+ "AOTAutograd does not support tracing graphs that mutate the autocast state. "
117
+ "Dynamo will only insert autocast context managers (e.g. with torch.autocast(..)) into the graph, "
118
+ "which will unwind all of their mutations to autocast state before the graph exits. "
119
+ "If you encounter this error while using torch.compile, please file a bug."
120
+ )
121
+
122
+ # Inspect the state of the input tensor functional wrapper to detect input mutation info
123
+ # If inp[i] has a metadata-only mutation, then maybe_inputs_with_mutated_metadata[i] contains the updated version
124
+ for i, (arg, f_arg) in enumerate(zip(flat_args, flat_f_args)):
125
+ # NB: Mutation of non-contiguous tensor subclass input can result in a mismatch in
126
+ # strides between the functionalized arg inner tensors and non-functionalized arg inner
127
+ # tensors. This is a problem as the inner tensor stride change may not be reflected
128
+ # correctly in the outer tensor, so disallow this for now.
129
+ mutates_data = has_data_mutation(f_arg)
130
+ if (
131
+ mutates_data
132
+ and not arg.is_contiguous()
133
+ and is_traceable_wrapper_subclass(arg)
134
+ ):
135
+ raise RuntimeError(
136
+ "Mutations on non-contiguous inputs are currently not allowed on "
137
+ "tensor subclasses"
138
+ )
139
+
140
+ if not isinstance(arg, Tensor):
141
+ new_arg = arg
142
+ else:
143
+ new_arg = from_fun(f_arg)
144
+ mutates_metadata = has_metadata_mutation(
145
+ f_arg, arg, check_only_storage_mutation=False
146
+ )
147
+ if mutates_metadata and is_traceable_wrapper_subclass(arg):
148
+ raise RuntimeError(
149
+ "Metadata mutations are currently not allowed on tensor subclasses"
150
+ )
151
+ mutates_storage_metadata = has_metadata_mutation(
152
+ f_arg, arg, check_only_storage_mutation=True
153
+ )
154
+ mutations_hidden_from_autograd = are_all_mutations_hidden_from_autograd(
155
+ f_arg
156
+ )
157
+ mutations_under_no_grad_or_inference_mode = (
158
+ mutates_data
159
+ and are_all_mutations_under_no_grad_or_inference_mode(f_arg)
160
+ )
161
+
162
+ # Here, we're saying that if an input experienced a set call, inp.set_(other),
163
+ # then we can effectively not have to worry about whether its data was mutated.
164
+ # There are 3 cases:
165
+ # (1) We mutate inp *after* the set_() call. other is a graph intermediate.
166
+ # In this case, we're not really mutating the input storage of "inp";
167
+ # we're mutating the storage of an intermdiate value (other),
168
+ # and slamming that storage into the input tensor. So no data mutation is necessary.
169
+ # (2) We mutate inp *after* the set_() call. other is a graph *input*.
170
+ # In this case, the data mutation will be properly handled in the runtime
171
+ # epilogue during the processing of "other"
172
+ # (3) We mutate inp *before* the set_() call.
173
+ # This case is *not* currently handled.
174
+ # TODO: discuss this in the PR. Both supporting this, and detecting + erroring out,
175
+ # seem painful to get working.
176
+ if mutates_storage_metadata:
177
+ mutates_data = False
178
+
179
+ requires_grad = isinstance(f_arg, torch.Tensor) and f_arg.requires_grad
180
+
181
+ input_info.append(
182
+ InputAliasInfo(
183
+ is_leaf=isinstance(arg, Tensor) and safe_is_leaf(arg),
184
+ mutates_data=mutates_data,
185
+ mutates_metadata=mutates_metadata,
186
+ mutations_hidden_from_autograd=mutations_hidden_from_autograd,
187
+ mutates_storage_metadata=mutates_storage_metadata,
188
+ mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode,
189
+ requires_grad=requires_grad,
190
+ keep_input_mutations=keep_input_mutations,
191
+ )
192
+ )
193
+
194
+ # If a function involves creating a tensor, and returning a view of it, such that its _base is the intermediate,
195
+ # We need to make sure our graph returns the _base as a graph output, and we manually recreate the view
196
+ # to return to the user. Why? The backend compiler is free to (incorrectly) not set requires_grad
197
+ # on the base tensor, but we are obligated to properly set requires-gradness on the real output.
198
+
199
+ inp_storage_refs = {
200
+ StorageWeakRef(inpt.untyped_storage()): idx
201
+ for idx, inpt in enumerate(flat_f_args)
202
+ if isinstance(inpt, Tensor)
203
+ }
204
+
205
+ # We need inp tensor id's to be able to tell if an outputs **are** inputs.
206
+ inp_tensor_ids = {id(inpt) for inpt in flat_f_args if isinstance(inpt, Tensor)}
207
+ # We need output tensor id's to tell if any output._base` attributes **are** other outputs.
208
+ # (This is also a dict because we need to know that output's index, so we can regenerate
209
+ # the alias from it).
210
+ out_tensor_ids = {id(o): i for i, o in enumerate(flat_f_outs)}
211
+
212
+ # Keep track of which outputs alias other outputs
213
+ out_tensor_alias_counts: DefaultDict = collections.defaultdict(int)
214
+ # This tells us, for a given group of outputs that alias each other,
215
+ # whether they e.g. all came from an unbind call
216
+ num_aliased_tensors_that_are_multi_output_views: DefaultDict = (
217
+ collections.defaultdict(int)
218
+ )
219
+ out_storage_to_tensors: DefaultDict = collections.defaultdict(set)
220
+ curr_storage = None
221
+ for o in flat_f_outs:
222
+ if isinstance(o, torch.Tensor):
223
+ curr_storage = StorageWeakRef(o.untyped_storage())
224
+ out_tensor_alias_counts[curr_storage] += 1
225
+ # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
226
+ # This is an optimization on top of the "alias of intermediates" logic,
227
+ # which you can read more about under Note [AOT Autograd: outputs aliasing inputs or intermediates!]
228
+ #
229
+ # Before describing the optimization: this is important for AOTAutograd to have good
230
+ # perf around, multi-output views. HOWEVER:
231
+ # - There is a more generic change to AOTAutograd that we'd like to make, that subsumes this case,
232
+ # around using pre-dispatch tracing to partition out a graph so we can faithfully replay all
233
+ # views without having to regenerate them at runtime.
234
+ # - It's loosely described in this doc (more details will be added soon):
235
+ # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit
236
+ # - Once that change lands, we should just rip out this "optimization", since:
237
+ # (1) It will be fully unnecessary
238
+ # (2) Although it is only a few lines of code, it is a bit difficult to reason about
239
+ # its correctness with the autograd engine in all cases.
240
+ #
241
+ #
242
+ # What is this optimization? Consider the below case:
243
+ # def f(x):
244
+ # intermediate = x.mul(2)
245
+ # # x and intermediate here require grad
246
+ # o1, o2, ... o10 = intermediate.unbind(-1)
247
+ # return intermediate, o1, o2, ... o10
248
+ # Now, the "intermediate base" handling in AOTAutograd implies that we must do the following:
249
+ # (1) return "intermediate as an extra output of the compiled graph
250
+ # (2) regenerate each aliased output off of "intermediate", **outside** of the autograd.Function.
251
+ # The reason AOTAutograd ordinarily does this is for safety: the autograd engine needs to know
252
+ # that o1 through o10 are all aliased, and if we blindly return o1 through o10 from the autograd.Function,
253
+ # this information will be hidden.
254
+ # In particular, mutating one alias might require autograd to update autograd metadata on the other aliases
255
+ # (like their grad_fn, for example, when the autograd engine needs to do view-replay).
256
+ #
257
+ # However, intermediate_base logic can be bad for backward performance (we sometimes generate
258
+ # as_strided calls during the intermediate base logic, which can have a slow backward formula).
259
+ # Is it possible to find a set of conditions where it is **safe** to hide the output aliasing from autograd?
260
+ #
261
+ # For a set of outputs of the graph that alias each other, o_1...o_k, consider:
262
+ # (1) They came from the same multi-output view op, e.g. o_1, ..., o_k = intermediate.unbind(0)
263
+ # (2) If there are any other aliases of o_1 through o_k (in the example above, intermediate),
264
+ # **at most** 1 can escape from the graph (e.g. there is not some other graph input/output
265
+ # o_other, that aliases these outputs)
266
+ # (3) o_1...o_k all require_grad, they all share the same ._base, and their ._base requires grad.
267
+ # This condition is important because it's what causes slowness in the intermediate_base
268
+ # codepath of aot_autograd. Ordinarily, o_1...o_k would all get a grad_fn, and
269
+ # aot_autograd's view-replay might give each output an AsStridedBackward as its grad_fn.
270
+ # "K" AsStridedBackward calls will be *much* slower than a single UnbindBackward.
271
+ # In this setup, is it possible to mutate one of the outputs o_i in a way that would affect the autograd meta
272
+ # of the other aliases?
273
+ #
274
+ # Claim: No! Consider a few example (which I'm pretty sure cover all cases of mutation w.r.t. autograd):
275
+ # (a) What happens if we mutate any of o_1 through o_k directly?
276
+ # Autograd raises an error:
277
+ # "RuntimeError: Output 0 of UnbindBackward0 is a view and is being modified inplace. This view is
278
+ # the output of a function that returns multiple views. Such functions do not allow the output
279
+ # views to be modified inplace. You should replace the inplace operation by an out-of-place one."
280
+ # (b) What if we take a view of o_k and mutate it, o_k.view(o_k.shape).mul_(2)?
281
+ # Autograd raises the same error- the "multi-output-view"ness of an alias propagates to future views.
282
+ # (c) What if we mutate o_k under no_grad?
283
+ # Autograd raises the same error
284
+ # (d) What if we detach and mutate, e.g. o_k.detach().mul_(2)?
285
+ # Autograd allows this, *but* autograd updates all alias's grad_fn's to be error functions when accessed.
286
+ # Autograd raises the same error
287
+ # (e) What if we try to mutate another alias of o_1...o_k, that was **not** created from a multi-output view?
288
+ # We promised that there is at most **one** such alias, e.g. intermediate in the example above.
289
+ # You can mutate intermediate, but in eager mode this will change the grad_fn of o_1...o_k
290
+ # to be error fn's.
291
+ # Since intermediate was the *only* non-multi-output-alias, there are no other aliases
292
+ # of `intermediate` around that were produced by the compiled fn and have a valid grad_fn.
293
+ #
294
+ # Coming back to this optimization:
295
+ # Given that it is not possible for mutating one of these aliases to affect the autograd metadata of another alias
296
+ # without causing an error in eager mode, we will simple hide the aliasing from autograd during torch.compile
297
+ # if all of the above conditions are met.
298
+ # This has the slight downside that it's possible to write some "bad" code that autograd will raise an error on
299
+ # in eager but fail to during torch.compile, but it has the benefit that this code has much better performance.
300
+ # NOTE: if and when we eventually update AOTAutograd to do the "view graph slicing" defined here:
301
+ # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit,
302
+ # then this optimization will probably matter less and might be ok to remove.
303
+ is_cur_tensor_multi_out_view = isinstance(
304
+ o, FunctionalTensor
305
+ ) and torch._functionalize_is_multi_output_view( # type: ignore[attr-defined]
306
+ o.elem
307
+ )
308
+ if is_cur_tensor_multi_out_view:
309
+ num_aliased_tensors_that_are_multi_output_views[curr_storage] += 1
310
+ out_storage_to_tensors[curr_storage].add(o)
311
+
312
+ # maps the id of an intermediate base to its index in the output of the compiled forward
313
+ intermediate_base_tensor_id_to_output_idx: Dict[int, int] = {}
314
+ intermediate_bases: List[torch.Tensor] = []
315
+ # Why Do We Care If Storage Changed?
316
+ # It's important to understand the implications of storage changes in complex scenarios. Take this example:
317
+ #
318
+ # def f(x):
319
+ # x_storage = x.untyped_storage()
320
+ # non_leaf_tensor = torch.ones(4, requires_grad=True).clone()
321
+ #
322
+ # # Using no_grad() and _unsafe_preserve_version_counter to simulate the .data = operation
323
+ # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x):
324
+ # x.set_(non_leaf_tensor.untyped_storage())
325
+ #
326
+ # out = x.view(-1)
327
+ #
328
+ # # Restoring x to its original storage, again simulating .data = operation
329
+ # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x):
330
+ # x.set_(x_storage)
331
+ #
332
+ # return out
333
+ #
334
+ # In this scenario, 'x' and 'out' have different shapes and are stored at different memory addresses, aka no aliasing.
335
+ # However, due to how set_() and more specificlaly, set is functionalized, is defined to preserve eager semantics,
336
+ # the autograd engine mistakenly assumes that 'x' and 'out' are aliased, treating 'x' as 'out._base'.
337
+ # This misinterpretation leads to an 'alias_of_input' flag, causing an unnecessary as_strided() call to be generated,
338
+ # which could lead to issues later in the code.
339
+ for o in flat_f_outs:
340
+ functional_tensor_storage_changed = isinstance(
341
+ o, FunctionalTensor
342
+ ) and torch._functionalize_was_storage_changed( # type: ignore[attr-defined]
343
+ o.elem
344
+ )
345
+ curr_storage = (
346
+ None
347
+ if not isinstance(o, torch.Tensor)
348
+ else StorageWeakRef(o.untyped_storage())
349
+ )
350
+ outs_with_identical_metadata_that_require_grad = (
351
+ []
352
+ if not isinstance(o, Tensor)
353
+ else [
354
+ curr
355
+ for curr in out_storage_to_tensors[curr_storage]
356
+ if has_same_metadata(o, curr)
357
+ and curr.requires_grad
358
+ and o is not curr
359
+ ]
360
+ )
361
+
362
+ # See Note [Accessing .grad_fn on FunctionalTensor]
363
+ # In-place operations on views will trigger a lazy rebase of the autograd graph;
364
+ # this runs during access to the .grad_fn. The rebase logic will invoke view ops
365
+ # on FunctionalTensors, so we must enable a FunctionalTensorMode here to ensure
366
+ # these op calls succeed.
367
+ grad_fn = None
368
+ if isinstance(o, Tensor):
369
+ with FunctionalTensorMode():
370
+ grad_fn = o.grad_fn
371
+
372
+ is_result_of_custom_autograd_fn = False
373
+ # Need to check for both custom cpp (CppFunction) and python (BackwardCFunction)
374
+ # autograd fns
375
+ if type(grad_fn).__name__ == "CppFunction":
376
+ is_result_of_custom_autograd_fn = True
377
+ if isinstance(grad_fn, torch.autograd.function.BackwardCFunction):
378
+ is_result_of_custom_autograd_fn = True
379
+
380
+ if not isinstance(o, Tensor):
381
+ output_type = OutputType.non_alias
382
+ base_idx = None
383
+ elif (
384
+ curr_storage in inp_storage_refs
385
+ and grad_fn is not None
386
+ and is_result_of_custom_autograd_fn
387
+ ):
388
+ output_type = OutputType.custom_function_view
389
+ base_idx = None
390
+ elif (
391
+ curr_storage in inp_storage_refs
392
+ and not functional_tensor_storage_changed
393
+ ):
394
+ base_idx = inp_storage_refs[curr_storage]
395
+ is_input_tensor = id(o) in inp_tensor_ids
396
+ num_aliased_outs = out_tensor_alias_counts[curr_storage]
397
+ num_multi_output_view_outs = (
398
+ num_aliased_tensors_that_are_multi_output_views[curr_storage]
399
+ )
400
+ num_aliased_outs_that_are_not_multi_output_views = (
401
+ num_aliased_outs - num_multi_output_view_outs
402
+ )
403
+ if (
404
+ grad_fn is not None
405
+ and num_aliased_outs_that_are_not_multi_output_views == 0
406
+ ):
407
+ # See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
408
+ # In particular, given:
409
+ # def f(x):
410
+ # return list(x.unbind(0))
411
+ # The main reason we ordinarily try to regenerate these output aliases outside of the
412
+ # compiled autograd.Function is because if any of the outputs are later mutated,
413
+ # autograd needs to perform view-replay to regenerate them.
414
+ # However, autograd does not allow users to mutate multi-output views
415
+ # in any way that can change the autograd metadata of other aliases.
416
+ # So we hide this aliasing from autograd here.
417
+ log.debug(
418
+ "Encountered AOTAutograd case: differentiable outputs that \
419
+ alias each other from a multi-output view call"
420
+ )
421
+ output_type = OutputType.non_alias
422
+ elif is_input_tensor:
423
+ output_type = OutputType.is_input
424
+ else:
425
+ output_type = OutputType.alias_of_input
426
+
427
+ # We only need to handle the intermediate base case when both
428
+ # the intermediate base and the output require gradients.
429
+ # See Note [AOT Autograd: outputs aliasing inputs or intermediates!]
430
+ elif o._base is not None and o.requires_grad and o._base.requires_grad:
431
+ num_aliased_outs = out_tensor_alias_counts[curr_storage]
432
+ num_multi_output_view_outs = (
433
+ num_aliased_tensors_that_are_multi_output_views[curr_storage]
434
+ )
435
+ num_aliased_outs_that_are_not_multi_output_views = (
436
+ num_aliased_outs - num_multi_output_view_outs
437
+ )
438
+ # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
439
+ if (
440
+ out_tensor_alias_counts[curr_storage] == 1
441
+ or num_aliased_outs_that_are_not_multi_output_views <= 1
442
+ ):
443
+ # Note [Intermediate Bases Optimization]
444
+ # Normally if we have an output that aliases an intermediate,
445
+ # we need to add the extra "intermediate base" logic further down
446
+ # to prevent autograd from yelling at us if the user later tries to
447
+ # mutate that output.
448
+ # However, the common case here is if we have an output that aliases an intermediate,
449
+ # but doesn't alias any other outputs.
450
+ # In that case, autograd shouldn't have to worry about the aliasing at all
451
+ # (if that output is mutated, there are no other live aliases for autograd to worry about).
452
+ # The "intermediate bases" can hurt inductor perf by forcing more variables to become outputs.
453
+ # So as an optimization, we won't do intermediate base handling in this case.
454
+ # Instead, we'll hide the aliasing from autograd using aten._unsafe_view().
455
+ if (
456
+ out_tensor_alias_counts[curr_storage] != 1
457
+ and num_aliased_outs_that_are_not_multi_output_views <= 1
458
+ ):
459
+ log.debug(
460
+ "Encountered AOTAutograd case: differentiable outputs that alias each other \
461
+ from a multi-output view call"
462
+ )
463
+ output_type = OutputType.unsafe_view_alias
464
+ base_idx = None
465
+ else:
466
+ # First, check if o's ._base is an existing output
467
+ maybe_existing_out_idx = out_tensor_ids.get(id(o._base), None)
468
+ if maybe_existing_out_idx is not None:
469
+ # Special case where the output is an alias of a graph intermediate, but that intermediate
470
+ # is itself also a user output.
471
+ output_type = (
472
+ OutputType.alias_of_intermediate_base_is_user_output
473
+ )
474
+ base_idx = maybe_existing_out_idx
475
+ else:
476
+ # Next, check if o's ._base is an intermediate base that we already returned
477
+ maybe_existing_base_output_idx = (
478
+ intermediate_base_tensor_id_to_output_idx.get(
479
+ id(o._base), None
480
+ )
481
+ )
482
+ if maybe_existing_base_output_idx is not None:
483
+ output_type = OutputType.alias_of_intermediate
484
+ base_idx = maybe_existing_base_output_idx
485
+ else:
486
+ # Otherwise, take o._base and explicitly return it as an output in the compiled graph
487
+ new_out_idx = len(intermediate_bases)
488
+ base_idx = new_out_idx
489
+ # Indicate to the logic later on (when we trace the joint)
490
+ # that this particular output should get it's ._base appended to the forward graph outputs
491
+ output_type = (
492
+ OutputType.alias_of_intermediate_save_as_output
493
+ )
494
+ intermediate_base_tensor_id_to_output_idx[
495
+ id(o._base)
496
+ ] = new_out_idx
497
+ intermediate_bases.append(o._base)
498
+ elif (
499
+ # See https://github.com/pytorch/pytorch/issues/100348 for this case.
500
+ # This protects against the specific case where a user fn returns (output, output.detach())
501
+ out_tensor_alias_counts[curr_storage] > 1
502
+ and len(outs_with_identical_metadata_that_require_grad) > 0
503
+ and not o.requires_grad
504
+ ):
505
+ assert len(outs_with_identical_metadata_that_require_grad) > 0
506
+ # In theory we could use any of these tensors to regenerate the aliased outputs from,
507
+ # since they all alias each other and have identical metatadata
508
+ out_alias = outs_with_identical_metadata_that_require_grad[0]
509
+ existing_out_idx = out_tensor_ids[id(out_alias)]
510
+ output_type = OutputType.alias_of_intermediate_base_is_user_output
511
+ base_idx = existing_out_idx
512
+ else:
513
+ output_type = OutputType.non_alias
514
+ base_idx = None
515
+
516
+ if isinstance(o, torch.Tensor):
517
+ dynamic_dims = {
518
+ i for i, s in enumerate(o.shape) if not is_concrete_int(s)
519
+ }
520
+ else:
521
+ dynamic_dims = None
522
+ out_info = OutputAliasInfo(
523
+ output_type=output_type,
524
+ raw_type=type(o),
525
+ base_idx=base_idx,
526
+ dynamic_dims=dynamic_dims,
527
+ requires_grad=isinstance(o, torch.Tensor) and o.requires_grad,
528
+ )
529
+ output_info.append(out_info)
530
+
531
+ # See Note [AOT Autograd: Views to avoid tangents aliasing inputs]
532
+ def view_avoid_dupes_with_primals(t):
533
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
534
+ return transform_subclass(
535
+ t, lambda _, inner_t: view_avoid_dupes_with_primals(inner_t)
536
+ )
537
+ if isinstance(t, Tensor):
538
+ return t.view(t.shape)
539
+ return t
540
+
541
+ # This analysis function returns *only* the outputs that are meant to be tangents to the backwards.
542
+ # Anything that aliases (inputs returned in the fw due to metadata mutations, or outputs that alias inputs/intermediates)
543
+ # are *regenerated* later, and not used directly in the autograd graph
544
+ f_input_tangents = [
545
+ inp
546
+ for inp, info in zip(flat_f_args, input_info)
547
+ if info.mutation_type == MutationType.MUTATED_OUT_GRAPH
548
+ and info.mutates_data
549
+ and info.requires_grad
550
+ ]
551
+ f_output_tangents = [
552
+ o
553
+ for o, info in zip(flat_f_outs, output_info)
554
+ if info.output_type
555
+ in [
556
+ OutputType.non_alias,
557
+ OutputType.unsafe_view_alias,
558
+ OutputType.custom_function_view,
559
+ ]
560
+ and issubclass(info.raw_type, torch.Tensor)
561
+ and info.requires_grad
562
+ ]
563
+ # intermediate bases are also included in the backward graph
564
+ f_tangents = f_input_tangents + f_output_tangents + intermediate_bases
565
+ traced_tangents = pytree.tree_map(from_fun, f_tangents)
566
+ traced_tangents = pytree.tree_map(
567
+ view_avoid_dupes_with_primals, traced_tangents
568
+ )
569
+ user_outs = pytree.tree_map(from_fun, f_output_tangents)
570
+
571
+ f_mutated_inputs = [
572
+ inp
573
+ for inp, info in zip(flat_f_args, input_info)
574
+ if info.mutation_type == MutationType.MUTATED_OUT_GRAPH
575
+ ]
576
+ f_metadata_mutated_inputs = [
577
+ inp for inp, info in zip(flat_f_args, input_info) if info.mutates_metadata
578
+ ]
579
+ # This logic (annoyingly) re-figures out exactly what the outputs to the compiled fw graph will be.
580
+ # When handling subclasses, we need info about **all** outputs of compiled forward graph,
581
+ # so we know precisely which graph outputs to wrap back into tensor subclasses
582
+ # Ideally we would refactor this so not have an is_train flag, and have the separate
583
+ # inference and training paths decide which inputs/output to ask for subclass info on.
584
+ # However, we currently stash indexing information on each SubclassMeta about its order
585
+ # in the graph outputs list.
586
+ f_fw_graph_outs = list(flat_f_outs)
587
+ if is_train or not keep_input_mutations:
588
+ f_fw_graph_outs = f_mutated_inputs + f_fw_graph_outs
589
+ else:
590
+ # even when "keep_input_mutations" is True,
591
+ # we never keep metadata-only mutations in the fw graph
592
+ f_fw_graph_outs = f_metadata_mutated_inputs + f_fw_graph_outs
593
+ if is_train:
594
+ f_fw_graph_outs = f_fw_graph_outs + intermediate_bases
595
+ fw_graph_outs = pytree.tree_map(from_fun, f_fw_graph_outs)
596
+
597
+ grad_enabled_mutation = None
598
+ if torch.is_grad_enabled() != prior_grad_enabled:
599
+ grad_enabled_mutation = torch.is_grad_enabled()
600
+ torch.set_grad_enabled(
601
+ prior_grad_enabled
602
+ ) # Restore the prior state after tracing it
603
+ log.debug(
604
+ (
605
+ "grad_mode mutation encountered in graph. "
606
+ "Will emit mutation epilogue, to set grad_mode=%s"
607
+ ),
608
+ grad_enabled_mutation,
609
+ )
610
+
611
+ metadata = ViewAndMutationMeta(
612
+ input_info=input_info,
613
+ output_info=output_info,
614
+ num_intermediate_bases=len(intermediate_bases),
615
+ keep_input_mutations=keep_input_mutations,
616
+ traced_tangents=traced_tangents,
617
+ subclass_inp_meta=create_subclass_meta(flat_args),
618
+ subclass_fw_graph_out_meta=create_subclass_meta(fw_graph_outs),
619
+ subclass_tangent_meta=create_subclass_meta(traced_tangents),
620
+ is_train=is_train,
621
+ grad_enabled_mutation=grad_enabled_mutation,
622
+ tokens=mode._tokens,
623
+ )
624
+ return metadata
625
+
626
+ return inner
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module dispatches the graphs to either the forward-only or joint compilation
3
+ pathways, taking into account the AOTConfig and the collected ViewAndMutationMetadata.
4
+ """
5
+
6
+ from typing import Any, Callable, List, Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.utils._pytree as pytree
10
+ import torch.utils.dlpack
11
+ from torch import Tensor
12
+ from torch._dispatch.python import enable_python_dispatcher
13
+ from torch._dynamo.utils import lazy_format_graph_code
14
+ from torch._logging import getArtifactLogger, trace_structured
15
+ from torch._subclasses.functional_tensor import FunctionalTensorMode
16
+ from torch.fx.experimental.proxy_tensor import make_fx
17
+
18
+ from .functional_utils import (
19
+ assert_functional_graph,
20
+ propagate_input_mutation_stacktraces,
21
+ )
22
+ from .schemas import AOTConfig, SubclassMeta, ViewAndMutationMeta
23
+ from .traced_function_transforms import (
24
+ aot_dispatch_subclass,
25
+ create_functionalized_fn,
26
+ create_joint,
27
+ fn_input_mutations_to_outputs,
28
+ fn_prepped_for_autograd,
29
+ )
30
+
31
+ aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
32
+
33
+
34
+ def _create_graph(f, args, *, aot_config: AOTConfig) -> torch.fx.GraphModule:
35
+ # FunctionalTensorMode must be enabled here.
36
+ # See Note [Accessing .grad_fn on FunctionalTensor]
37
+ with enable_python_dispatcher(), FunctionalTensorMode(
38
+ pre_dispatch=aot_config.pre_dispatch, export=aot_config.is_export
39
+ ):
40
+ fx_g = make_fx(
41
+ f,
42
+ decomposition_table=aot_config.decompositions,
43
+ record_module_stack=True,
44
+ pre_dispatch=aot_config.pre_dispatch,
45
+ )(*args)
46
+
47
+ return fx_g
48
+
49
+
50
+ def aot_dispatch_base_graph(
51
+ flat_fn,
52
+ flat_args: List[Tensor],
53
+ aot_config: AOTConfig,
54
+ *,
55
+ fw_metadata: ViewAndMutationMeta,
56
+ ) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]:
57
+ # aot_dispatch_base requires functionalization, but doesn't need to handle as many cases as the autograd case.
58
+ # The cases that aot_dispatch_base doesn't need to handle include:
59
+ # - outputs that are aliases of graph intermediates
60
+ # - outputs that are aliases of graph inputs
61
+ # While cases that it does need to handle include:
62
+ # - input mutations (including when inputs are aliases of each other)
63
+ # - input metadata mutations
64
+ fn_to_trace = fn_input_mutations_to_outputs(
65
+ flat_fn,
66
+ fw_metadata,
67
+ keep_data_input_mutations=aot_config.keep_inference_input_mutations,
68
+ )
69
+
70
+ fn_to_trace, updated_flat_args = create_functionalized_fn(
71
+ fn_to_trace,
72
+ flat_args,
73
+ meta=fw_metadata,
74
+ aot_config=aot_config,
75
+ trace_joint=False,
76
+ )
77
+
78
+ (
79
+ fn_to_trace,
80
+ updated_flat_args_subclasses_desugared,
81
+ maybe_subclass_meta,
82
+ ) = aot_dispatch_subclass(
83
+ fn_to_trace,
84
+ updated_flat_args,
85
+ is_joint_structure=False,
86
+ meta=fw_metadata,
87
+ fw_only=flat_fn,
88
+ )
89
+
90
+ fw_module = _create_graph(
91
+ fn_to_trace,
92
+ updated_flat_args_subclasses_desugared,
93
+ aot_config=aot_config,
94
+ )
95
+
96
+ # As long as we opted to remove input mutations, then
97
+ # there should be *NO* mutating ops in the graph at this point.
98
+ copy_count = assert_functional_graph(fw_module.graph)
99
+
100
+ fw_module.graph.eliminate_dead_code()
101
+ fw_module.recompile()
102
+
103
+ copy_count2 = assert_functional_graph(fw_module.graph)
104
+ propagate_input_mutation_stacktraces(fw_module.graph)
105
+
106
+ assert copy_count == copy_count2
107
+
108
+ if aot_config.enable_log:
109
+ aot_graphs_log.info(
110
+ "%s", lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id)
111
+ )
112
+ trace_structured(
113
+ "aot_forward_graph",
114
+ payload_fn=lambda: fw_module.print_readable(print_output=False),
115
+ )
116
+
117
+ # TODO: should factor this into a separate function for export that always only returns just the graph.
118
+ if aot_config.is_export:
119
+ assert (
120
+ maybe_subclass_meta is None
121
+ ), "aot_export_module does not support tensor subclass inputs for now."
122
+ return fw_module
123
+ return fw_module, list(updated_flat_args_subclasses_desugared), maybe_subclass_meta
124
+
125
+
126
+ # Has the precondition that there
127
+ # are no duplicate arguments in flat_args (e.g., the same Tensor
128
+ # object never shows up twice. However, two tensor inputs MAY alias
129
+ # the same storage, so long as they have separate TensorImpls.)
130
+ def aot_dispatch_autograd_graph(
131
+ flat_fn,
132
+ flat_args: List[Any],
133
+ aot_config: AOTConfig,
134
+ *,
135
+ fw_metadata: ViewAndMutationMeta,
136
+ ) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]:
137
+ # traced_tangents corresponds to the set of outputs in the traced forward that should get grad_outputs in the traced backward.
138
+ # It includes outputs of the original forward, *and* any updated inputs due to input mutations.
139
+ # However, it does *not* include any outputs that are aliases of inputs or intermediates, or any metadata-only input mutations.
140
+ traced_tangents = pytree.tree_map(
141
+ lambda x: x.detach().contiguous() if isinstance(x, Tensor) else x,
142
+ fw_metadata.traced_tangents,
143
+ )
144
+
145
+ joint_inputs = (flat_args, traced_tangents)
146
+
147
+ fn_prepared_for_autograd = fn_prepped_for_autograd(
148
+ flat_fn,
149
+ fw_metadata,
150
+ )
151
+ joint_fn_to_trace = create_joint(fn_prepared_for_autograd, aot_config=aot_config)
152
+
153
+ joint_fn_to_trace, updated_joint_inputs = create_functionalized_fn(
154
+ joint_fn_to_trace,
155
+ joint_inputs,
156
+ meta=fw_metadata,
157
+ aot_config=aot_config,
158
+ trace_joint=True,
159
+ )
160
+
161
+ subclass_tracing_info = aot_dispatch_subclass(
162
+ joint_fn_to_trace,
163
+ updated_joint_inputs,
164
+ is_joint_structure=True,
165
+ meta=fw_metadata,
166
+ fw_only=flat_fn,
167
+ )
168
+
169
+ joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn
170
+ updated_joint_inputs = subclass_tracing_info.plain_tensor_args
171
+ maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta
172
+
173
+ fx_g = _create_graph(joint_fn_to_trace, updated_joint_inputs, aot_config=aot_config)
174
+
175
+ # There should be *NO* mutating ops in the graph at this point.
176
+ assert_functional_graph(fx_g.graph)
177
+
178
+ # Redundant with the check above, but worth having in case tracing introduced
179
+ # a fake tensor. Unlikely.
180
+ # See Note: [Fake Modules and AOTAutograd]
181
+ torch._dynamo.utils.assert_no_fake_params_or_buffers(fx_g)
182
+ fx_g.graph.eliminate_dead_code()
183
+ fx_g.recompile()
184
+ # TODO: in AOTAutograd, we create metadata like _indices_of_inps_to_detach to detect
185
+ # when we need to manually detach() some inputs in the forward.
186
+ # Higher order ops might eventually need to do the same.
187
+ if aot_config.is_export:
188
+ assert (
189
+ maybe_subclass_meta is None
190
+ ), "aot_export_module does not support tensor subclass inputs for now."
191
+ return fx_g
192
+ return fx_g, updated_joint_inputs, maybe_subclass_meta
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities related to functionalization in AOTAutograd:
3
+ 1. converting to/from functional tensors
4
+ 2. detecting Tensor mutations - both metadata and Tensor value
5
+ 3. regenerating/replaying views from their base
6
+ 4. checking if a graph is functional i.e. whether it contains any mutation ops
7
+ """
8
+
9
+ import torch
10
+ from torch import Tensor
11
+ from torch._subclasses.fake_tensor import FakeTensor
12
+ from torch._subclasses.functional_tensor import FunctionalTensor
13
+ from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq
14
+ from torch.multiprocessing.reductions import StorageWeakRef
15
+ from torch.utils._python_dispatch import (
16
+ is_traceable_wrapper_subclass,
17
+ transform_subclass,
18
+ )
19
+
20
+
21
+ def to_fun(t):
22
+ if isinstance(t, Tensor):
23
+ if is_traceable_wrapper_subclass(t):
24
+ # See Note [Functionalization always runs last]
25
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
26
+ # goes at the bottom.
27
+ # recurse here, so we can support nested wrapper subclasses
28
+ out = transform_subclass(t, lambda _, inner_t: to_fun(inner_t))
29
+ torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined]
30
+ return out
31
+ else:
32
+ return FunctionalTensor.to_functional(t)
33
+ else:
34
+ return t
35
+
36
+
37
+ def sync_functional_tensor(t):
38
+ if is_traceable_wrapper_subclass(t):
39
+ attrs, ctx = t.__tensor_flatten__() # type: ignore[attr-defined]
40
+ for attr in attrs:
41
+ sync_functional_tensor(getattr(t, attr))
42
+ else:
43
+ torch._sync(t)
44
+
45
+
46
+ # When subclasses are involved, t here will usually look something like:
47
+ # SubclassA(SubclassB(FunctionalTensor(_to_fun_tensor(FakeTensor))))
48
+ def from_fun(t):
49
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
50
+ # See Note [Functionalization always runs last]
51
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
52
+ # goes at the bottom.
53
+ # recurse here, so we can support nested wrapper subclasses
54
+ out = transform_subclass(t, lambda _, inner_t: from_fun(inner_t))
55
+ torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined]
56
+ return out
57
+
58
+ if not isinstance(t, FunctionalTensor):
59
+ # quick sanity assert
60
+ if isinstance(t, torch.Tensor):
61
+ assert not torch._is_functional_tensor(t) # type: ignore[attr-defined]
62
+ return t
63
+ sync_functional_tensor(t)
64
+ return torch._from_functional_tensor(t.elem)
65
+
66
+
67
+ def is_fun(t):
68
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
69
+ # See Note [Functionalization always runs last]
70
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
71
+ # goes at the bottom.
72
+ # recurse here, so we can support nested wrapper subclasses
73
+ t_attrs, _ = t.__tensor_flatten__() # type: ignore[attr-defined]
74
+ t_inners = [getattr(t, attr) for attr in t_attrs]
75
+ any_fun = any(is_fun(x) for x in t_inners)
76
+ all_fun = all(is_fun(x) for x in t_inners)
77
+ assert any_fun == all_fun
78
+ return any_fun
79
+
80
+ return isinstance(t, FunctionalTensor)
81
+
82
+
83
+ # t here is either
84
+ # (1) A FunctionalTensor(_to_functional_tensor(FakeTensor))
85
+ # (2) A traceable tensor subclass that holds a FunctionalTensor
86
+ # (3) Not a tensor
87
+ def has_data_mutation(t):
88
+ if is_traceable_wrapper_subclass(t):
89
+ attrs, _ = t.__tensor_flatten__()
90
+ # A tensor subclass was updated if any of its inner elements were updated
91
+ return any(has_data_mutation(getattr(t, attr)) for attr in attrs)
92
+ else:
93
+ if isinstance(t, torch.Tensor):
94
+ assert isinstance(t, FunctionalTensor)
95
+ return torch._functionalize_has_data_mutation(t.elem) # type: ignore[attr-defined]
96
+ return False
97
+
98
+
99
+ def are_all_mutations_hidden_from_autograd(t):
100
+ if is_traceable_wrapper_subclass(t):
101
+ attrs, _ = t.__tensor_flatten__()
102
+ # If all inner elements are mutations hidden from autograd, then it is a mutation hidden from autograd.
103
+ return all(
104
+ are_all_mutations_hidden_from_autograd(getattr(t, attr)) for attr in attrs
105
+ )
106
+ elif isinstance(t, torch.Tensor):
107
+ assert isinstance(t, FunctionalTensor)
108
+ return torch._functionalize_are_all_mutations_hidden_from_autograd(t.elem)
109
+ else:
110
+ return False
111
+
112
+
113
+ def are_all_mutations_under_no_grad_or_inference_mode(t):
114
+ if is_traceable_wrapper_subclass(t):
115
+ attrs, _ = t.__tensor_flatten__()
116
+ return all(
117
+ are_all_mutations_under_no_grad_or_inference_mode(getattr(t, attr))
118
+ for attr in attrs
119
+ )
120
+ else:
121
+ assert isinstance(t, FunctionalTensor)
122
+ return torch._functionalize_are_all_mutations_under_no_grad_or_inference_mode(
123
+ t.elem
124
+ )
125
+
126
+
127
+ # f_arg here is either
128
+ # (1) A FunctionalTensor(_to_functional_tensor(FakeTensor))
129
+ # (2) A traceable tensor subclass that holds a FunctionalTensor
130
+ # (3) Not a tensor
131
+ # Assumption: arg promises to be the "original" tensor wrapped by f_arg
132
+ # Note: "storage mutations" coming from set_() are a type of metadata mutation. So:
133
+ # - check_only_storage_mutation=True: only return true if there was a storage mutation
134
+ # - check_only_storage_mutation=Flse: return true if there was any metadata mutation (including a storage mutation)
135
+ def has_metadata_mutation(f_arg, arg, *, check_only_storage_mutation: bool):
136
+ if is_traceable_wrapper_subclass(f_arg):
137
+ attrs, _ = f_arg.__tensor_flatten__()
138
+ # A tensor subclass was updated if any of its inner elements were updated
139
+ f_inner_ts = [getattr(f_arg, attr) for attr in attrs]
140
+ inner_ts = [getattr(arg, attr) for attr in attrs]
141
+ return any(
142
+ has_metadata_mutation(
143
+ f_inner_t,
144
+ inner_t,
145
+ check_only_storage_mutation=check_only_storage_mutation,
146
+ )
147
+ for f_inner_t, inner_t in zip(f_inner_ts, inner_ts)
148
+ )
149
+ else:
150
+ if not isinstance(f_arg, torch.Tensor):
151
+ assert not isinstance(arg, torch.Tensor)
152
+ return False
153
+ assert isinstance(f_arg, FunctionalTensor)
154
+ assert isinstance(arg, FakeTensor)
155
+
156
+ arg_after = torch._from_functional_tensor(f_arg.elem)
157
+ # This is true if the current tensor experienced at least one set_() call
158
+ maybe_storage_changed = torch._functionalize_was_storage_changed(f_arg.elem) # type: ignore[attr-defined]
159
+ # However, multiple set_() calls can cancel out. So we also check whether the
160
+ # storage of the tensor has changed.
161
+ # Note: if an input experienced two set_() calls that cancel out, **and**
162
+ # it experiences an data mutation, we pessimistically think that the set_()
163
+ # call is necessary here. We could in theory fix this, but this will
164
+ # hopefully never happen in user code, and is not needed for fsdp.
165
+ same_storages = StorageWeakRef(arg.untyped_storage()) == StorageWeakRef(
166
+ arg_after.untyped_storage()
167
+ )
168
+ has_storage_metadata_mutation = maybe_storage_changed and not same_storages
169
+ if check_only_storage_mutation:
170
+ return has_storage_metadata_mutation
171
+
172
+ # storage metadata mutation is a type of metadata mutation, so return true if we saw one
173
+ if has_storage_metadata_mutation:
174
+ return True
175
+
176
+ maybe_metadata_mutated = torch._functionalize_has_metadata_mutation(f_arg.elem) # type: ignore[attr-defined]
177
+ # This is true if the current tensor experienced at least one metadata mutation.
178
+ # So if false, we know there was no metadata mutation
179
+ if not maybe_metadata_mutated:
180
+ return False
181
+
182
+ # However, multi metadata mutations can cancel out.
183
+ # So we also check if the concrete sizes/strides on the tensor have changed.
184
+ same_sizes = arg.shape == arg_after.shape
185
+ same_strides = arg.stride() == arg_after.stride()
186
+ same_offsets = arg.storage_offset() == arg_after.storage_offset()
187
+ has_metadata_mutation_ = maybe_metadata_mutated and not (
188
+ same_sizes and same_strides and same_offsets
189
+ )
190
+ # We consider a tensor to have been metadata mutated if its storage was mutated through a set_() call.
191
+ return has_metadata_mutation_
192
+
193
+
194
+ def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires_grad):
195
+ # Try to do view-replay if possible.
196
+ # fall back to .as_strided() if we can't.
197
+ if target_meta_tensor._base is not None:
198
+ # The base that we want to replay our view off of might have a different shape than the view's original base.
199
+ b = target_meta_tensor._base
200
+ abt = aliased_base_tensor
201
+ # Don't unnecessarily call as_strided if nothing changed; as_strided's
202
+ # backward is poorly implemented and slow
203
+ if abt is not b and (
204
+ abt.size() != b.size()
205
+ or abt.stride() != b.stride()
206
+ or abt.storage_offset() != b.storage_offset()
207
+ ):
208
+ reshaped_base_tensor = aliased_base_tensor.as_strided(
209
+ b.size(), b.stride(), b.storage_offset()
210
+ )
211
+ else:
212
+ reshaped_base_tensor = aliased_base_tensor
213
+ out = target_meta_tensor._view_func(reshaped_base_tensor)
214
+ # This shape mismatch can happen due to a bug in inplace/view handling in autograd.
215
+ # Try putting a breakpoint here and running
216
+ # `test/functorch/test_aotdispatch TestAOTAutograd.test_output_all_alias_types`
217
+ # Also, https://github.com/pytorch/pytorch/issues/49825
218
+ #
219
+ # As a stopgap, we'll fall back to as_strided.
220
+ if out is not None and out.shape == target_meta_tensor.shape:
221
+ if aliased_base_tensor.requires_grad and not target_requires_grad:
222
+ out = out.detach()
223
+ elif not aliased_base_tensor.requires_grad and target_requires_grad:
224
+ out.requires_grad_(True)
225
+ return out
226
+ size = target_meta_tensor.size()
227
+ stride = target_meta_tensor.stride()
228
+ storage_offset = target_meta_tensor.storage_offset()
229
+ if aliased_base_tensor.is_complex() and not target_meta_tensor.is_complex():
230
+ aliased_out = torch.view_as_real(aliased_base_tensor).as_strided(
231
+ size, stride, storage_offset
232
+ )
233
+ elif not aliased_base_tensor.is_complex() and target_meta_tensor.is_complex():
234
+ aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided(
235
+ size, stride, storage_offset
236
+ )
237
+ else:
238
+ aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset)
239
+ # For outputs aliasing inputs, we need to check if the requires-gradness has changed.
240
+ if aliased_base_tensor.requires_grad and not target_requires_grad:
241
+ aliased_out = aliased_out.detach()
242
+ elif not aliased_base_tensor.requires_grad and target_requires_grad:
243
+ aliased_out.requires_grad_(True)
244
+ # For outputs aliasing inputs, we need to check if the dtype has changed.
245
+ # as_strided() is the "most generic" view, but it does not cover cross-dtype views
246
+ if aliased_out.dtype != target_meta_tensor.dtype:
247
+ aliased_out = aliased_out.view(target_meta_tensor.dtype)
248
+ return aliased_out
249
+
250
+
251
+ def has_same_metadata(t1, t2):
252
+ return (
253
+ definitely_true(sym_eq(t1.size(), t2.size()))
254
+ and definitely_true(sym_eq(t1.stride(), t2.stride()))
255
+ and definitely_true(t1.storage_offset() == t2.storage_offset())
256
+ and t1.is_conj() == t2.is_conj()
257
+ and t1.is_neg() == t2.is_neg()
258
+ )
259
+
260
+
261
+ # new_arg and arg here are either:
262
+ # (1) both a FakeTensor
263
+ # (2) both a traceable tensor subclass that holds a FakeTensor
264
+ # Pre-condition: the two args are the "old" and "new" inputs from running functionalization.
265
+ # When we run functionalization and wrap our inputs into FunctionalTensors,
266
+ # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed
267
+ #
268
+ # Normally it would be enough just to check if arg is new_arg, which is normally enough for functionalization
269
+ # to confirm that inputs were not mutated when running the user's model with functionalization on.
270
+ # But when we have subclass inputs, we can't rely on that:
271
+ # `from_fun(to_fun(x)) is x` will return False, because the call to `from_fun` constructs
272
+ # a brand new subclass instance: we are calling __tensor_unflatten__, and going
273
+ # from Subclass(FakeTensor) to Subclass(FunctionalTensor(FakeTensor))
274
+ def was_tensor_updated(arg, new_arg):
275
+ if is_traceable_wrapper_subclass(arg):
276
+ assert is_traceable_wrapper_subclass(new_arg)
277
+ attrs, _ = arg.__tensor_flatten__()
278
+ new_attrs, _ = new_arg.__tensor_flatten__()
279
+ assert attrs == new_attrs
280
+ # A tensor subclass was updated if any of its inner elements were updated
281
+ return any(
282
+ was_tensor_updated(getattr(arg, attr), getattr(new_arg, attr))
283
+ for attr in attrs
284
+ )
285
+ else:
286
+ return arg is not new_arg
287
+
288
+
289
+ # new_arg and arg here are either:
290
+ # (1) both a FakeTensor
291
+ # (2) both a traceable tensor subclass that holds a FakeTensor
292
+ # Pre-condition: the two args are the "old" and "new" inputs from running functionalization.
293
+ # When we run functionalization and wrap our inputs into FunctionalTensors,
294
+ # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed,
295
+ # but shares storage with the old input
296
+ def was_tensor_metadata_updated(arg, new_arg):
297
+ if is_traceable_wrapper_subclass(arg):
298
+ assert is_traceable_wrapper_subclass(new_arg)
299
+ attrs, _ = arg.__tensor_flatten__()
300
+ new_attrs, _ = new_arg.__tensor_flatten__()
301
+ assert attrs == new_attrs
302
+ # A tensor subclass was updated if any of its inner elements were updated
303
+ return any(
304
+ was_tensor_metadata_updated(getattr(arg, attr), getattr(new_arg, attr))
305
+ for attr in attrs
306
+ )
307
+ else:
308
+ return arg is not new_arg and StorageWeakRef(
309
+ arg.untyped_storage()
310
+ ) == StorageWeakRef(new_arg.untyped_storage())
311
+
312
+
313
+ # Returns the number of detected copy_
314
+ def assert_functional_graph(fx_g: torch.fx.Graph) -> int:
315
+ placeholders = set()
316
+ copy_count = 0
317
+ # NB: It would also be nice to verify that the mutations all happen at the
318
+ # end, but we also do some administrative views after mutations so this
319
+ # isn't actually true. (TODO: Could this cause problems for Inductor?)
320
+ for n in fx_g.nodes:
321
+ if n.op == "placeholder":
322
+ placeholders.add(n)
323
+ if isinstance(n.target, torch._ops.OpOverload):
324
+ if n.target is torch.ops.aten.copy_.default:
325
+ suffix = True
326
+ # Can only copy_ into an input, and can only do so once
327
+ assert n.args[0] in placeholders
328
+ placeholders.remove(n.args[0])
329
+ copy_count += 1
330
+ else:
331
+ assert (
332
+ not n.target._schema.is_mutable
333
+ ), f"aot_autograd expected to have an entirely functional graph, but found {n.format_node()}"
334
+ return copy_count
335
+
336
+
337
+ def propagate_input_mutation_stacktraces(fx_g: torch.fx.Graph) -> None:
338
+ placeholders = set()
339
+ for n in fx_g.nodes:
340
+ if n.op == "placeholder":
341
+ placeholders.add(n)
342
+ if isinstance(n.target, torch._ops.OpOverload):
343
+ if n.target is torch.ops.aten.copy_.default:
344
+ # Can only copy_ into an input, and can only do so once
345
+ assert n.args[0] in placeholders
346
+ placeholders.remove(n.args[0])
347
+ copy_from_node = n.args[1]
348
+ # Pre-condition: every node has a "stack_trace" field in its meta,
349
+ # but copy_() nodes do not (since we manually added them during functionalization).
350
+ # Instead, we manually propagate here.
351
+ if "stack_trace" in copy_from_node.meta:
352
+ assert "stack_trace" not in n.meta, str(n)
353
+ n.meta["stack_trace"] = copy_from_node.meta["stack_trace"]
354
+
355
+
356
+ def _check_if_mutation_can_be_in_graph(
357
+ keep_input_mutations: bool,
358
+ mutates_data,
359
+ mutates_metadata,
360
+ mutations_hidden_from_autograd,
361
+ mutations_under_no_grad_or_inference_mode,
362
+ requires_grad,
363
+ ):
364
+ if keep_input_mutations:
365
+ return mutates_data and (
366
+ (not mutates_metadata and not requires_grad)
367
+ or mutations_hidden_from_autograd
368
+ or mutations_under_no_grad_or_inference_mode
369
+ )
370
+ return False
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is one of the analysis modules - it takes as input a function or graph
3
+ and some preexisting properties, and returns some data that is useful for deciding
4
+ how to further proceed with compilation or construct runtime wrappers.
5
+
6
+ In particular, the following analyses are provided:
7
+ 1. Refine the view and mutation metadata collected previously - removing duplicate
8
+ inputs or mapping views to their bases.
9
+ 2. We also analyze the function signature for export graphs.
10
+ """
11
+
12
+ import itertools
13
+ from typing import Any, Dict, List, Optional, Tuple, Union
14
+
15
+ import torch
16
+ import torch.utils._pytree as pytree
17
+ from torch import Tensor
18
+ from torch._subclasses.functional_tensor import FunctionalTensor
19
+ from torch.fx.experimental.symbolic_shapes import is_concrete_int
20
+ from .schemas import (
21
+ BackwardSignature,
22
+ GraphSignature,
23
+ InputAliasInfo,
24
+ OutputAliasInfo,
25
+ OutputType,
26
+ ViewAndMutationMeta,
27
+ )
28
+ from .utils import strict_zip
29
+
30
+ zip = strict_zip
31
+
32
+
33
+ def remove_dupe_metadata(
34
+ m: ViewAndMutationMeta,
35
+ keep_arg_mask: List[bool],
36
+ add_dupe_map: List[int],
37
+ ) -> ViewAndMutationMeta:
38
+ assert len(m.input_info) == len(keep_arg_mask)
39
+ # Easy invariant: the first argument should never be a dupe (it will be kept)
40
+ assert len(keep_arg_mask) > 0 and keep_arg_mask[0]
41
+
42
+ # Filter dupe'd mutated inputs out of traced_tangents
43
+ num_data_mutations = len([x for x in m.input_info if x.mutates_data])
44
+ other_traced_tangents = m.traced_tangents[num_data_mutations:]
45
+ inp_traced_tangents = m.traced_tangents[:num_data_mutations]
46
+ filtered_inp_traced_tangents = [
47
+ x
48
+ for i, x in enumerate(inp_traced_tangents)
49
+ if keep_arg_mask[m.mutated_inp_runtime_indices[i]]
50
+ ]
51
+ traced_tangents = filtered_inp_traced_tangents + other_traced_tangents
52
+
53
+ return ViewAndMutationMeta(
54
+ input_info=[x for i, x in enumerate(m.input_info) if keep_arg_mask[i]],
55
+ # For outputs that are views of inputs, we store the index of the input that the output
56
+ # was generated from. Need to update that index to account for removed dupes.
57
+ output_info=[
58
+ OutputAliasInfo(
59
+ output_type=o.output_type,
60
+ raw_type=o.raw_type,
61
+ dynamic_dims=o.dynamic_dims,
62
+ base_idx=None if o.base_idx is None else add_dupe_map[o.base_idx],
63
+ requires_grad=o.requires_grad,
64
+ )
65
+ for o in m.output_info
66
+ ],
67
+ num_intermediate_bases=m.num_intermediate_bases,
68
+ keep_input_mutations=m.keep_input_mutations,
69
+ traced_tangents=traced_tangents,
70
+ # We are guaranteed not to get here, since dupes are not supported today with subclass inputs.
71
+ subclass_inp_meta=[],
72
+ subclass_fw_graph_out_meta=[],
73
+ subclass_tangent_meta=[],
74
+ is_train=m.is_train,
75
+ )
76
+
77
+
78
+ # Given our ViewAndMutation metadata, this fn constructs a new set of metadata,
79
+ # after adding synthetic base arguments to the function.
80
+ # Most of the work in this fn is slogging through all of the metadata corresponding to inputs,
81
+ # and updating it with our synthetic base calling convention.
82
+ #
83
+ # When config.debug_assert is set, we automatically regenerate the metadata
84
+ # and compare it to this output for sanity.
85
+ #
86
+ # In addition to the updated metadata, also return the list of input indices
87
+ # that will need to be updated in the synthetic base epilogue
88
+
89
+
90
+ # Given our ViewAndMutation metadata, this fn constructs a new set of metadata,
91
+ # after adding synthetic base arguments to the function.
92
+ # Most of the work in this fn is slogging through all of the metadata corresponding to inputs,
93
+ # and updating it with our synthetic base calling convention.
94
+ #
95
+ # When config.debug_assert is set, we automatically regenerate the metadata
96
+ # and compare it to this output for sanity.
97
+ #
98
+ # In addition to the updated metadata, also return the list of input indices
99
+ # that will need to be updated in the synthetic base epilogue
100
+ def create_synthetic_base_metadata(
101
+ m: ViewAndMutationMeta,
102
+ # Maps each outer argument idx to its inner idx (or, if this outer arg is generated from a
103
+ # synthetic base, you get a tuple of (i, TensorMeta), telling you the base tensor idx, and view metadata)
104
+ synthetic_base_info: List[Union[int, Tuple[int, torch.Tensor]]],
105
+ outer_args: List[Any],
106
+ inner_args: List[Any],
107
+ ) -> Tuple[ViewAndMutationMeta, List[int]]:
108
+ # maps inner arg indices to outer arg indices
109
+ synthetic_base_to_indices: Dict[int, List[int]] = {}
110
+ for inner_idx in range(len(inner_args)):
111
+ outer_aliased_indices_of_current_base_arg = [
112
+ outer_idx
113
+ for outer_idx, inner_idx_or_tuple in enumerate(synthetic_base_info)
114
+ if (isinstance(inner_idx_or_tuple, int) and inner_idx_or_tuple == inner_idx)
115
+ or (
116
+ isinstance(inner_idx_or_tuple, tuple)
117
+ and inner_idx_or_tuple[0] == inner_idx
118
+ )
119
+ ]
120
+ synthetic_base_to_indices[inner_idx] = outer_aliased_indices_of_current_base_arg
121
+
122
+ # given the requires_grad info on mutated inputs,
123
+ # generate the requires_grad info on those same mutated inputs, but after constructing synthetic bases.
124
+ input_infos = []
125
+ for outer_indices in synthetic_base_to_indices.values():
126
+ # leaf-ness should be all-or-nothing for aliased tensor.
127
+ # (aka if "a" and "b" are views, then a.is_leaf == b.is_leaf)
128
+ any_leaf = any(m.input_info[x].is_leaf for x in outer_indices)
129
+ all_leaf = all(m.input_info[x].is_leaf for x in outer_indices)
130
+ assert any_leaf == all_leaf
131
+
132
+ mutates_data = (
133
+ True
134
+ if len(outer_indices) > 1
135
+ else m.input_info[outer_indices[0]].mutates_data
136
+ )
137
+ mutates_metadata = (
138
+ False
139
+ if len(outer_indices) > 1
140
+ else m.input_info[outer_indices[0]].mutates_metadata
141
+ )
142
+ requires_grad = any(m.input_info[x].requires_grad for x in outer_indices)
143
+ mutations_hidden_from_autograd = all(
144
+ m.input_info[x].mutations_hidden_from_autograd for x in outer_indices
145
+ )
146
+ mutations_under_no_grad_or_inference_mode = all(
147
+ m.input_info[x].mutations_under_no_grad_or_inference_mode
148
+ for x in outer_indices
149
+ )
150
+
151
+ inpt_info = InputAliasInfo(
152
+ # If len(outer_indices) > 1, then this input is a synthetic base.
153
+ # The invariant is that to the rest of aot autograd, synthetic bases only show up if
154
+ # one of their aliases gets a data mutation. And if any of their aliases get metadata
155
+ # mutations, they will be hidden from the rest of aot autograd.
156
+ mutates_data=mutates_data,
157
+ mutates_metadata=mutates_metadata,
158
+ mutations_hidden_from_autograd=all(
159
+ m.input_info[x].mutations_hidden_from_autograd for x in outer_indices
160
+ ),
161
+ mutates_storage_metadata=False
162
+ if len(outer_indices) > 1
163
+ else m.input_info[outer_indices[0]].mutates_storage_metadata,
164
+ mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode,
165
+ is_leaf=any_leaf,
166
+ requires_grad=requires_grad,
167
+ keep_input_mutations=m.keep_input_mutations,
168
+ )
169
+ input_infos.append(inpt_info)
170
+
171
+ # Find any inputs that fulfill the following criteria:
172
+ # (1) They are part of a synthetic base (because they alias another input,
173
+ # and at least one input experiences a data mutation)
174
+ # (2) They experience a metadata mutation
175
+ outer_aliased_arg_idx_with_metadata_mutations = [
176
+ outer_idx
177
+ for outer_idx, inpt_info in enumerate(m.input_info)
178
+ if inpt_info.mutates_metadata
179
+ and not isinstance(synthetic_base_info[outer_idx], int)
180
+ ]
181
+
182
+ # grab the original requires grad info on the outputs, except the ones from the mutated inputs
183
+ input_metadata_output_info = [
184
+ OutputAliasInfo(
185
+ output_type=OutputType.alias_of_input,
186
+ raw_type=FunctionalTensor,
187
+ dynamic_dims={
188
+ i
189
+ for i, s in enumerate(outer_args[outer_idx].shape)
190
+ if not is_concrete_int(s)
191
+ },
192
+ base_idx=synthetic_base_info[outer_idx][0], # type: ignore[index]
193
+ requires_grad=outer_args[outer_idx].requires_grad,
194
+ )
195
+ for outer_idx in outer_aliased_arg_idx_with_metadata_mutations
196
+ ]
197
+ existing_output_infos = []
198
+ for o in m.output_info:
199
+ new_base_idx = (
200
+ None
201
+ if o.base_idx is None
202
+ else (
203
+ synthetic_base_info[o.base_idx]
204
+ if isinstance(synthetic_base_info[o.base_idx], int)
205
+ else synthetic_base_info[o.base_idx][0] # type: ignore[index]
206
+ )
207
+ )
208
+ # If base_idx is changed for OutputType.is_input, we need to update the output type to reflect the change
209
+ new_output_type = (
210
+ OutputType.alias_of_input
211
+ if o.output_type == OutputType.is_input and o.base_idx != new_base_idx
212
+ else o.output_type
213
+ )
214
+ existing_output_infos.append(
215
+ OutputAliasInfo(
216
+ output_type=new_output_type,
217
+ raw_type=o.raw_type,
218
+ dynamic_dims=o.dynamic_dims,
219
+ # Map the input idx pre-synthetic-bases to the new idx post-synthetic-bases
220
+ base_idx=new_base_idx, # type: ignore[arg-type]
221
+ requires_grad=o.requires_grad,
222
+ )
223
+ )
224
+
225
+ inner_mutated_tangents = [
226
+ x
227
+ for inner_idx, x in enumerate(inner_args)
228
+ if input_infos[inner_idx].mutates_data and input_infos[inner_idx].requires_grad
229
+ ]
230
+
231
+ output_info = existing_output_infos + input_metadata_output_info
232
+ # Regenerate traced tangents to include mutated inputs including synthetic bases
233
+ traced_tangents = (
234
+ inner_mutated_tangents + m.traced_tangents[len(inner_mutated_tangents) :]
235
+ )
236
+
237
+ return (
238
+ ViewAndMutationMeta(
239
+ input_info=input_infos,
240
+ output_info=output_info,
241
+ num_intermediate_bases=m.num_intermediate_bases,
242
+ keep_input_mutations=m.keep_input_mutations,
243
+ traced_tangents=traced_tangents,
244
+ # We are guaranteed not to get here, since synthetic_base codepaths are not supported today with subclass inputs.
245
+ subclass_inp_meta=[],
246
+ subclass_fw_graph_out_meta=[],
247
+ subclass_tangent_meta=[],
248
+ is_train=m.is_train,
249
+ ),
250
+ outer_aliased_arg_idx_with_metadata_mutations,
251
+ )
252
+
253
+
254
+ def _get_last_mem_address(x):
255
+ out = x.storage_offset()
256
+ for size, stride in zip(x.size(), x.stride()):
257
+ out += (size - 1) * stride
258
+ return out
259
+
260
+
261
+ # Assumption: x and y are known to share a storage, and we are trying to determine
262
+ # if their memory is actually completely disjoint, based on sizes/strides/storage_offset
263
+ def _tensors_definitely_do_not_overlap(x, y):
264
+ if x is y:
265
+ return False
266
+ if x.numel() == 0 or y.numel() == 0:
267
+ return True
268
+
269
+ # Make x always on the left
270
+ if x.storage_offset() > y.storage_offset():
271
+ x, y = y, x
272
+ # Short-circuit in the "obvious" overlapping case: both tensors are contiguous
273
+ if x.is_contiguous() and y.is_contiguous():
274
+ if x.storage_offset() + x.numel() > y.storage_offset():
275
+ # definitely overlap
276
+ return False
277
+ else:
278
+ # definitely no overlap
279
+ return True
280
+
281
+ # Short-circuit: if last memory address of x is < start of y, then not overlapping.
282
+ x_last = _get_last_mem_address(x)
283
+ if x_last < y.storage_offset():
284
+ return True
285
+
286
+ if x.dim() == 2 and y.dim() == 2 and x.stride(1) == 1 and y.stride(1) == 1:
287
+ # This cases is needed for the shampoo optimizer.
288
+ # All tensors are 2d (non-contiguous), have the same outer stride, and have an inner stride of 1
289
+ # (so rows are contiguous)
290
+ if x.stride(0) == y.stride(0):
291
+ offset_delta = y.storage_offset() - x.storage_offset()
292
+ if offset_delta < x.size(1):
293
+ # definitely overlaps (row 0 of y overlaps with row 0 of x)
294
+ # Example:
295
+ # base = torch.arange(32).reshape(4, 8)
296
+ # x = base.narrow(1, 0, 4)
297
+ # x: size=(4, 4), stride=(8, 1), offset=0
298
+ # y = base.narrow(1, 3, 4)
299
+ # y: size=(4, 4), stride=(8, 1), offset=3
300
+ return False
301
+ x_total_elems_covered = x.stride(0) * (x.size(0) - 1) + x.size(1)
302
+ if x_total_elems_covered <= offset_delta:
303
+ # definitely does not overlap (last byte of x is before start of y)
304
+ # Example:
305
+ # x: size=(4, 4), stride=(8, 1), offset=0 (last byte is 27)
306
+ # y: size=(4, 4), stride=(8, 1), offset=28 (start byte is 28)
307
+ return True
308
+ # At this point, we want to check if the 0th row of y
309
+ # overlaps with **some** row of x.
310
+ # We can check this by shifting y backward by the shared stride, repeatedly,
311
+ # until the first row of y is before the first row of x.
312
+ # Then we can check if these rows overlap.
313
+ # We can accomplish this by modding our offset by the stride.
314
+ offset_delta_mod = offset_delta % x.stride(0)
315
+ # Example:
316
+ # 0 1 2 3
317
+ # 9 10 11 12
318
+ # 18 19 20 21
319
+ # 27 28 29 30
320
+ # x: size=(4, 4), stride=(9, 1), offset=0
321
+ # y: size=(4, 4), stride=(9, 1), offset=22 (this would not overlap)
322
+ # y: size=(4, 4), stride=(9, 1), offset=23 (this would not overlap)
323
+ # y: size=(4, 4), stride=(9, 1), offset=24 (this would overlap)
324
+ # y: size=(4, 4), stride=(9, 1), offset=25 (this would overlap)
325
+ # If the interval [modded_offset, modded_offset + x_size] falls entirely
326
+ # without
327
+ if offset_delta_mod + y.size(1) <= x.stride(0):
328
+ return True
329
+ else:
330
+ return False
331
+ return False
332
+
333
+
334
+ def compute_overlapping_inputs(fwd_inputs, aliased_input_indices):
335
+ actual_aliased_indices = set()
336
+ for j in range(len(aliased_input_indices)):
337
+ for i in range(j):
338
+ i_ = aliased_input_indices[i]
339
+ j_ = aliased_input_indices[j]
340
+ if not _tensors_definitely_do_not_overlap(fwd_inputs[i_], fwd_inputs[j_]):
341
+ actual_aliased_indices.add(i_)
342
+ actual_aliased_indices.add(j_)
343
+ return actual_aliased_indices
344
+
345
+
346
+ def _graph_input_names(gm):
347
+ return [node.name for node in gm.graph.nodes if node.op == "placeholder"]
348
+
349
+
350
+ def _graph_output_names(gm):
351
+ output_node = next(iter(reversed(gm.graph.nodes)))
352
+ assert output_node.op == "output" and len(output_node.args) == 1
353
+ return_args = output_node.args[0]
354
+ return [getattr(return_arg, "name", None) for return_arg in return_args]
355
+
356
+
357
+ def create_graph_signature(
358
+ fx_g: torch.fx.GraphModule,
359
+ fw_metadata: ViewAndMutationMeta,
360
+ in_spec: pytree.TreeSpec,
361
+ out_spec: pytree.TreeSpec,
362
+ *,
363
+ user_args_flat: List[Tensor],
364
+ params_and_buffers_flat: List[Tensor],
365
+ param_names: List[str],
366
+ buffer_names: List[str],
367
+ trace_joint: bool,
368
+ num_user_fw_outs: Optional[int],
369
+ loss_index: Optional[int],
370
+ ) -> GraphSignature:
371
+ # Retrieve graph input names
372
+ graph_input_names = _graph_input_names(fx_g)
373
+ # Retrieve graph output names
374
+ graph_output_names = _graph_output_names(fx_g)
375
+
376
+ num_params_buffers = len(param_names) + len(buffer_names)
377
+ num_tokens = len(fw_metadata.tokens)
378
+ # We have enough restrictions on the graph (no de-duping, synthetic bases, etc),
379
+ # Such that # graph inps = # user inps + # params + # buffers
380
+ num_user_args = len(graph_input_names) - num_params_buffers - num_tokens
381
+
382
+ if trace_joint:
383
+ assert num_user_fw_outs is not None
384
+ num_fw_outs = num_user_fw_outs + fw_metadata.num_mutated_inp_runtime_indices
385
+ backward_output_names = graph_output_names[num_fw_outs:]
386
+
387
+ grad_index = itertools.count(0)
388
+ gradients_to_parameters = {
389
+ backward_output_names[next(grad_index)]: param_names[i]
390
+ for i, param in enumerate(params_and_buffers_flat)
391
+ if param.requires_grad
392
+ }
393
+
394
+ gradients_to_user_inputs = {
395
+ backward_output_names[next(grad_index)]: graph_input_names[
396
+ i + len(params_and_buffers_flat)
397
+ ]
398
+ for i, user_input in enumerate(user_args_flat)
399
+ if user_input.requires_grad
400
+ }
401
+
402
+ assert len(gradients_to_parameters) + len(gradients_to_user_inputs) == len(
403
+ backward_output_names
404
+ )
405
+
406
+ # Check that we have fully accounted for all graph outputs
407
+ backward_signature = BackwardSignature(
408
+ gradients_to_parameters,
409
+ gradients_to_user_inputs,
410
+ graph_output_names[loss_index],
411
+ )
412
+ else:
413
+ backward_signature = None
414
+ num_user_fw_outs = (
415
+ len(graph_output_names)
416
+ - fw_metadata.num_mutated_inp_runtime_indices
417
+ - num_tokens
418
+ )
419
+
420
+ return GraphSignature.from_tracing_metadata(
421
+ in_spec=in_spec,
422
+ out_spec=out_spec,
423
+ graph_input_names=graph_input_names,
424
+ graph_output_names=graph_output_names,
425
+ view_mutation_metadata=fw_metadata,
426
+ named_parameters=param_names,
427
+ named_buffers=buffer_names,
428
+ num_user_inputs=num_user_args,
429
+ num_user_outputs=num_user_fw_outs,
430
+ loss_index=loss_index,
431
+ backward_signature=backward_signature,
432
+ )
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ These are the runtime wrappers that are associated with JIT-compiling.
3
+
4
+ This includes the forward-only and joint JIT runtime wrappers.
5
+
6
+ This module depends heavily on the runtime wrapper building blocks defined
7
+ in `runtime_wrappers`.
8
+ """
9
+
10
+ import logging
11
+ from contextlib import nullcontext
12
+ from functools import wraps
13
+ from typing import Any, List, Optional
14
+
15
+ import torch
16
+ import torch.utils.dlpack
17
+ from torch import Tensor
18
+ from torch._dynamo.utils import lazy_format_graph_code
19
+ from torch._guards import detect_fake_mode, tracing, TracingContext
20
+ from torch._logging import getArtifactLogger, trace_structured
21
+ from torch._prims_common import CUDARngStateHelper
22
+ from torch._subclasses import FakeTensor
23
+ from torch.fx.experimental._backward_state import BackwardState
24
+ from torch.fx.experimental.proxy_tensor import is_sym_node
25
+ from torch.fx.experimental.symbolic_shapes import fx_placeholder_vals
26
+ from .. import config
27
+ from .dispatch_and_compile_graph import (
28
+ aot_dispatch_autograd_graph,
29
+ aot_dispatch_base_graph,
30
+ )
31
+ from .logging_utils import describe_input, format_guard_bug_msg, track_graph_compiling
32
+
33
+ from .runtime_wrappers import (
34
+ aot_dispatch_subclass_wrapper,
35
+ create_runtime_wrapper,
36
+ functionalized_rng_runtime_epilogue,
37
+ )
38
+ from .schemas import (
39
+ AOTConfig,
40
+ MutationType,
41
+ OutputType,
42
+ SubclassMeta,
43
+ TensorAlias,
44
+ ViewAndMutationMeta,
45
+ )
46
+ from .subclass_utils import (
47
+ compute_inner_mutated_inp_indices_from_subclass_meta,
48
+ unwrap_tensor_subclasses,
49
+ wrap_tensor_subclasses,
50
+ )
51
+
52
+ from .utils import (
53
+ _get_symint_hints,
54
+ call_func_at_runtime_with_args,
55
+ make_boxed_func,
56
+ normalize_as_list,
57
+ strict_zip,
58
+ )
59
+
60
+ zip = strict_zip
61
+
62
+ log = logging.getLogger(__name__)
63
+ aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
64
+ aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
65
+
66
+ aten = torch.ops.aten
67
+
68
+
69
+ def _compute_output_meta_with_inductor_strides(fw_module, fwd_output_strides):
70
+ out = [n.meta["val"] for n in (list(fw_module.graph.nodes)[-1].args[0])]
71
+ # will only be set for inductor
72
+ if not fwd_output_strides:
73
+ return out
74
+ with TracingContext.get().fake_mode.shape_env.suppress_guards():
75
+ for i in range(len(out)):
76
+ if not isinstance(out[i], Tensor):
77
+ continue
78
+ if all(s1 == s2 for s1, s2 in zip(out[i].stride(), fwd_output_strides[i])):
79
+ continue
80
+ out[i] = out[i].as_strided(out[i].shape, fwd_output_strides[i])
81
+ return out
82
+
83
+
84
+ def aot_dispatch_base(
85
+ flat_fn,
86
+ flat_args: List[Tensor],
87
+ aot_config: AOTConfig,
88
+ *,
89
+ fw_metadata: ViewAndMutationMeta,
90
+ ):
91
+ fw_module, updated_flat_args, maybe_subclass_meta = aot_dispatch_base_graph( # type: ignore[misc]
92
+ flat_fn, flat_args, aot_config, fw_metadata=fw_metadata
93
+ )
94
+
95
+ disable_amp = torch._C._is_any_autocast_enabled()
96
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
97
+ fakified_out = None
98
+
99
+ with context(), track_graph_compiling(aot_config, "inference"):
100
+ compiler = (
101
+ aot_config.inference_compiler
102
+ if aot_config.inference_compiler is not None
103
+ else aot_config.fw_compiler
104
+ )
105
+ if config.functionalize_rng_ops:
106
+ # Add the seed and offset as example inputs to pass to the compiler
107
+ fake_mode = detect_fake_mode()
108
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode)
109
+ updated_flat_args.extend([seed, offset])
110
+
111
+ if tracing_context := torch._guards.TracingContext.try_get():
112
+ tracing_context.fw_metadata = (
113
+ fw_metadata
114
+ if maybe_subclass_meta is None
115
+ else maybe_subclass_meta.fw_metadata
116
+ )
117
+
118
+ with TracingContext.report_output_strides() as fwd_output_strides:
119
+ compiled_fw = compiler(fw_module, updated_flat_args)
120
+
121
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
122
+ if tracing_context and tracing_context.fakify_first_call:
123
+ fakified_out = _compute_output_meta_with_inductor_strides(
124
+ fw_module, fwd_output_strides
125
+ )
126
+
127
+ # However, create_runtime_wrapper does not expect the rng offsets in the
128
+ # output. So, we have to create another wrapper and take out the offset. As
129
+ # a result, we have to account for not boxed_call compilers as well.
130
+ if not hasattr(compiled_fw, "_boxed_call"):
131
+ compiled_fw = make_boxed_func(compiled_fw)
132
+
133
+ # Create a wrapper to set up the rng functionalize bits
134
+ @wraps(compiled_fw)
135
+ def rng_functionalization_wrapper(args):
136
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
137
+ nonlocal fakified_out
138
+ if fakified_out is not None:
139
+ out = fakified_out
140
+ fakified_out = None
141
+ return out
142
+
143
+ # args is a list because compiled_fw is boxed_call
144
+ if fw_metadata.is_rng_op_functionalized:
145
+ # Add the seed and offset to args
146
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
147
+ args.extend([seed, offset])
148
+ out = compiled_fw(args)
149
+ out = functionalized_rng_runtime_epilogue(fw_metadata, out)
150
+ return out
151
+ else:
152
+ return compiled_fw(args)
153
+
154
+ if maybe_subclass_meta is not None:
155
+ compiled_fw_func = aot_dispatch_subclass_wrapper(
156
+ rng_functionalization_wrapper,
157
+ subclass_metas=fw_metadata.subclass_fw_graph_out_meta,
158
+ num_fw_outs_saved_for_bw=None,
159
+ )
160
+ else:
161
+ compiled_fw_func = rng_functionalization_wrapper
162
+
163
+ if not hasattr(compiled_fw_func, "_boxed_call"):
164
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
165
+
166
+ compiled_fn = create_runtime_wrapper(
167
+ compiled_fw_func,
168
+ runtime_metadata=fw_metadata,
169
+ indices_of_inps_to_detach=[],
170
+ trace_joint=False,
171
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
172
+ disable_amp=disable_amp,
173
+ )
174
+
175
+ return compiled_fn
176
+
177
+
178
+ def aot_dispatch_autograd(
179
+ flat_fn,
180
+ flat_args: List[Any],
181
+ aot_config: AOTConfig,
182
+ *,
183
+ fw_metadata: ViewAndMutationMeta,
184
+ ):
185
+ fw_metadata.deterministic = torch.are_deterministic_algorithms_enabled()
186
+ fx_g, joint_inputs, maybe_subclass_meta = aot_dispatch_autograd_graph( # type: ignore[misc]
187
+ flat_fn, flat_args, aot_config, fw_metadata=fw_metadata
188
+ )
189
+
190
+ # Copied from aot_dispatch_autograd_graph.
191
+ disable_amp = torch._C._is_any_autocast_enabled()
192
+
193
+ if aot_config.enable_log:
194
+ aot_joint_log.info(
195
+ "%s", lazy_format_graph_code("Joint graph", fx_g, aot_config.aot_id)
196
+ )
197
+ trace_structured(
198
+ "aot_joint_graph",
199
+ payload_fn=lambda: fx_g.print_readable(print_output=False), # type: ignore[union-attr]
200
+ )
201
+
202
+ fakify_first_call = False
203
+ fakified_out = None
204
+
205
+ with torch.no_grad():
206
+ inner_meta = (
207
+ fw_metadata
208
+ if maybe_subclass_meta is None
209
+ else maybe_subclass_meta.fw_metadata
210
+ )
211
+ with track_graph_compiling(aot_config, "joint"):
212
+ # See Note: [Partitioner handling for Subclasses, Part 1]
213
+ # See Note: [Recomputing subclass mutation handling]
214
+ mutated_inp_runtime_indices = (
215
+ compute_inner_mutated_inp_indices_from_subclass_meta(
216
+ fw_metadata, inner_meta
217
+ )
218
+ )
219
+ num_mutated_inp_runtime_indices = len(mutated_inp_runtime_indices)
220
+ num_inner_fwd_outputs = (
221
+ num_mutated_inp_runtime_indices
222
+ + inner_meta.num_outputs
223
+ + inner_meta.num_intermediate_bases
224
+ + inner_meta.num_outputs_rng_offset
225
+ + len(
226
+ fw_metadata.tokens
227
+ ) # See Note [Side-Effectful Tokens in AOTAutograd]
228
+ )
229
+ fw_module, bw_module = aot_config.partition_fn(
230
+ fx_g, joint_inputs, num_fwd_outputs=num_inner_fwd_outputs
231
+ )
232
+
233
+ fw_outs = next(n for n in fw_module.graph.nodes if n.op == "output").args[0]
234
+ # we only need to bookkeep the symints that are saved for bw, not any symints
235
+ # the user forward might have returned in its own output
236
+ fw_outs_saved_for_bw = fw_outs[num_inner_fwd_outputs:]
237
+ num_fw_outs_saved_for_bw = len(fw_outs_saved_for_bw)
238
+ symint_outs_saved_for_bw = [
239
+ n for n in fw_outs_saved_for_bw if is_sym_node(n)
240
+ ]
241
+ fw_metadata.num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
242
+ inner_meta.num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
243
+ _num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
244
+
245
+ # Note [Detaching inputs that never need gradients]
246
+ # See https://github.com/pytorch/pytorch/issues/97745
247
+ # Suppose we have a function like this that we want to compile:
248
+ #
249
+ # def f(x, y):
250
+ # return torch.mul(x, y.detach())
251
+ #
252
+ # What gradients should we compute for x and y?
253
+ # By default, AOTAutograd will compute a gradient for **every** input that requires gradients,
254
+ # and so we'll compute:
255
+ # x_grad_input = y
256
+ # y_grad_input = None
257
+ # Does this preserve the semantics of eager mode?
258
+ # Unfortunately, no.
259
+ # Doing the above will cause autograd to **continue** to backprop the autograd tape
260
+ # that was generated from constructing y.
261
+ #
262
+ # This is **different** from what would have happened in eager mode.
263
+ # In eager mode, if we backprop through the output of this function, autograd will only traverse
264
+ # the bit of the autograd tape corresponding to "x".
265
+ # In particular, if a user had previously backpropped through y's autograd tape,
266
+ # And then they try to backprop through the output of the above function,
267
+ # then we'll hit the dreaded "Trying to backward through the graph a second time" error.
268
+ #
269
+ # You might think: If autograd sees that a gradient is None, shouldn't it stop early,
270
+ # instead of continuing the backprop through the ancestors of that node in the graph?
271
+ #
272
+ # Autograd has two passes:
273
+ # (1) a first pass that traverses the autograd graph and figures out which nodes need to be executed
274
+ # (2) a second pass that actually goes ahead and executes each node when it becomes ready,
275
+ # propagating gradients
276
+ # By the time we're executing a node and we see that it produces a None, the set of nodes to execute
277
+ # is already locked-in.
278
+ #
279
+ # The fix: instead, we can recognize statically that the graph we're compiling will never contribute
280
+ # gradients to y, and prevent autograd from trying to traverse y's autograd tape at all.
281
+ # We can do this by manually detach'ing y before sending it through the `CompiledFunction`.
282
+ #
283
+ # Note that this solution is not bulletproof.
284
+ # It's possible to construct a case where eager may or may not have have tried to autograd through y,
285
+ # depending on the actual grad_outputs that were passed in during the backward.
286
+ # There is no easy fix for this: the simplest fix would be to run with `retain_graph=True`,
287
+ # allowing autograd to re-use the graph.
288
+ #
289
+ # An example of this case is:
290
+ # def f(x):
291
+ # return x.detach() * 2, x * 3
292
+ # If we were to only backprop through outs[0], in eager, we would stop
293
+ # If we backward only on the first output, we shouldn't send a grad through x.
294
+ # But the custom autograd function doesn't know that: it will materialize zero grads for x * 3
295
+ # and we will end up with a zero grad at x.
296
+ # If we later backprop through the second output, this will also require backprop'ing through x.
297
+ # Meaning we'll need to use `retain_graph=True` to be able to backprop through x the second time.
298
+ _indices_of_inps_to_detach = []
299
+ bw_outs = next(n for n in bw_module.graph.nodes if n.op == "output").args[0]
300
+
301
+ # TODO: we should apply the below "detach inputs if their gradients are statically known to be None"
302
+ # optimization even if we have subclass inputs/outputs (we do not handle this today).
303
+ # Computing which our our inputs get None gradients is a bit more complicated,
304
+ # if any of our inputs are subclasses. Why?
305
+ # (a) we need to make sure that we call .detach() on the input subclasses, since autograd sees subclasses.
306
+ # (b) The grad_outputs that we AOT computed in our backward graph are the desugared tensor tensors,
307
+ # so we need to figure out which subclass fw inputs they map to.
308
+ if maybe_subclass_meta is None:
309
+ assert (
310
+ len(bw_outs)
311
+ == len(fw_metadata.input_info) + inner_meta.num_outputs_rng_offset
312
+ )
313
+ for i, (bw_out) in enumerate(bw_outs):
314
+ if bw_out is None:
315
+ _indices_of_inps_to_detach.append(i)
316
+
317
+ if aot_config.enable_log:
318
+ aot_graphs_log.info(
319
+ "%s",
320
+ lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id),
321
+ )
322
+ aot_graphs_log.info(
323
+ "%s",
324
+ lazy_format_graph_code("Backward graph", bw_module, aot_config.aot_id),
325
+ )
326
+ trace_structured(
327
+ "aot_forward_graph",
328
+ payload_fn=lambda: fw_module.print_readable(print_output=False),
329
+ )
330
+ trace_structured(
331
+ "aot_backward_graph",
332
+ payload_fn=lambda: bw_module.print_readable(print_output=False),
333
+ )
334
+
335
+ with track_graph_compiling(aot_config, "forward"):
336
+ # flat_args at this point might still be subclasses-
337
+ # make sure to pass the unwrapped fake tensors into the compiler!
338
+ adjusted_flat_args = joint_inputs[0]
339
+ if config.functionalize_rng_ops:
340
+ # Update example inputs for the fw_compiler
341
+ fake_mode = detect_fake_mode()
342
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode)
343
+ adjusted_flat_args.extend([seed, offset])
344
+ # We are not clearing flat_args here because
345
+ # 1) There is a check in the debug compiler at the end
346
+ # 2) It does not matter as these are fake tensors
347
+
348
+ if tracing_context := torch._guards.TracingContext.try_get():
349
+ tracing_context.fw_metadata = inner_meta
350
+
351
+ with TracingContext.report_output_strides() as fwd_output_strides:
352
+ compiled_fw_func = aot_config.fw_compiler(fw_module, adjusted_flat_args)
353
+ if not hasattr(compiled_fw_func, "_boxed_call"):
354
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
355
+
356
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
357
+ if tracing_context and tracing_context.fakify_first_call:
358
+ fakified_out = _compute_output_meta_with_inductor_strides(
359
+ fw_module, fwd_output_strides
360
+ )
361
+ fakify_first_call = True
362
+
363
+ if maybe_subclass_meta is not None:
364
+ # Why do we need to pass in num_fw_outs_saved_for_bw?
365
+ # See Note: [Partitioner handling for Subclasses, Part 2]
366
+ compiled_fw_func = aot_dispatch_subclass_wrapper(
367
+ compiled_fw_func,
368
+ subclass_metas=fw_metadata.subclass_fw_graph_out_meta,
369
+ num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
370
+ )
371
+ if not hasattr(compiled_fw_func, "_boxed_call"):
372
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
373
+
374
+ # NB: It's important to compile backwards ahead of time, as this may
375
+ # add extra guards which we need to apply to the Dynamo cache at
376
+ # forwards
377
+ with track_graph_compiling(aot_config, "backward"):
378
+ placeholder_list = fx_placeholder_vals(bw_module)
379
+
380
+ forward_saved_for_backwards_strides = None
381
+ if fwd_output_strides is not None:
382
+ forward_saved_for_backwards_strides = fwd_output_strides[
383
+ inner_meta.tensors_saved_for_backwards_slice
384
+ ]
385
+
386
+ # saved activations can have different stride to eager if
387
+ # the compiler does layout optimization. We should restride the
388
+ # tensor passed in for compiling the backward graph using the
389
+ # saved tensor's stride.
390
+ for i in range(len(placeholder_list)):
391
+ ph_arg = placeholder_list[i]
392
+ if not isinstance(ph_arg, torch.Tensor):
393
+ continue
394
+
395
+ if forward_saved_for_backwards_strides is None:
396
+ continue
397
+
398
+ real_stride = None
399
+ # Per all_args calling convention
400
+ j = i - len(symint_outs_saved_for_bw)
401
+ if 0 <= j < len(forward_saved_for_backwards_strides):
402
+ real_stride = forward_saved_for_backwards_strides[j]
403
+ if real_stride is None:
404
+ continue
405
+
406
+ # Comparing ph_arg.stride() with real_stride directly may
407
+ # cause dynamic dimensions in ph_arg being specialized to static
408
+ # value. Using the hints to avoid that.
409
+ if _get_symint_hints(ph_arg.stride()) != real_stride:
410
+ # Note that here we use the stride of the real tensor to
411
+ # restride a FakeTensor. This does not cause trouble
412
+ # for dynamic shape since this code path only get
413
+ # executed if layout optimization is enabled. And we
414
+ # disable layout optimization for dynamic shape right
415
+ # now.
416
+ #
417
+ # A solution that decide stride order based on real
418
+ # tensor's stride and then apply that stride order to
419
+ # the FakeTensor does not work smoothly since some
420
+ # tensor's layout is not 'dense'. E.g. mixnet_l has a
421
+ # tensor with size [8, 64, 112, 112] and strides
422
+ # (2408448, 1, 21504, 192). The solution mentioned will
423
+ # decide a stride of (802816, 1, 7168, 64) for this
424
+ # tensor which is wrong.
425
+ placeholder_list[i] = ph_arg.as_strided(ph_arg.size(), real_stride)
426
+
427
+ compiled_bw_func = None
428
+ if len(symint_outs_saved_for_bw):
429
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
430
+ with context():
431
+ try:
432
+ compiled_bw_func = aot_config.bw_compiler(
433
+ bw_module, placeholder_list
434
+ )
435
+ except Exception:
436
+ log.warning(
437
+ "failed to eagerly compile backwards for dynamic, suppressing in case backwards not needed",
438
+ exc_info=True,
439
+ )
440
+ # Compiled autograd will run the bw_module in the backward pass,
441
+ # so recompilation need happen anyway if the backward pass is ever
442
+ # called.
443
+ #
444
+ # The reason we do the GraphModule recompilation here is because
445
+ # the lazy recompilation will cause issue in the backward pass
446
+ # with compiled autograd.
447
+ #
448
+ # Do the _LazyGraphModule.force_recompile here rather than when
449
+ # bw_module is first generated by the partitioner because the bw_module.recompile
450
+ # may be called in some code path later and cause the _LazyGraphModule.forward
451
+ # becomes the lazy version again. One example is when dynamic shape is enabled
452
+ # upfront, the bw_compiler will be called above which can cause extra
453
+ # graph module recompilation on bw_module.
454
+ if torch._dynamo.compiled_autograd.compiled_autograd_enabled_count:
455
+ from torch.fx._lazy_graph_module import _LazyGraphModule
456
+
457
+ _LazyGraphModule.force_recompile(bw_module)
458
+
459
+ saved_context = TracingContext.try_get()
460
+
461
+ backward_state_indices = [
462
+ idx for idx, x in enumerate(flat_args) if isinstance(x, BackwardState)
463
+ ]
464
+ assert len(backward_state_indices) <= 1
465
+
466
+ class CompiledFunction(torch.autograd.Function):
467
+ compiled_fw = compiled_fw_func
468
+ compiled_bw = compiled_bw_func
469
+ metadata: ViewAndMutationMeta = fw_metadata # type: ignore[assignment]
470
+ maybe_subclass_metadata: Optional[SubclassMeta] = maybe_subclass_meta
471
+ num_symints_saved_for_bw = _num_symints_saved_for_bw
472
+ _compiled_autograd_should_lift = False
473
+ _fakify_first_call = fakify_first_call
474
+
475
+ @staticmethod
476
+ def _compiled_autograd_key(ctx):
477
+ return (ctx._autograd_function_id, *ctx.symints)
478
+
479
+ @staticmethod
480
+ def forward(ctx, *deduped_flat_tensor_args):
481
+ args = deduped_flat_tensor_args
482
+ if backward_state_indices:
483
+ bw_state = args[backward_state_indices[0]]
484
+ assert isinstance(bw_state, BackwardState)
485
+ ctx._compiled_autograd_backward_state = bw_state
486
+
487
+ marked_dirty_inps = []
488
+ for i in fw_metadata.mutated_graph_handled_indices_seen_by_autograd:
489
+ arg = deduped_flat_tensor_args[i]
490
+ if not (arg.requires_grad and arg.is_leaf): # would error
491
+ ctx.mark_dirty(arg)
492
+ marked_dirty_inps.append(arg)
493
+
494
+ if not CompiledFunction._fakify_first_call:
495
+ if CompiledFunction.metadata.is_rng_op_functionalized:
496
+ # Add the seed and offset to args
497
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
498
+ args = (*args, seed, offset)
499
+ # There is a pretty complicated calling convention around what the compiled fw returns.
500
+ # The full list of outputs and their relative order is:
501
+ # (*tokens, *mutated_inputs, *fw_outs, *fw_intermediate_bases, *saved_tensors, *saved_symints)
502
+ # - Note that in the synthetic bases case, mutated_inputs will correspond to an updated version
503
+ # of the original view, and not the synthetic base
504
+
505
+ fw_outs = call_func_at_runtime_with_args(
506
+ CompiledFunction.compiled_fw,
507
+ args,
508
+ disable_amp=disable_amp,
509
+ )
510
+ else:
511
+ nonlocal fakified_out
512
+ assert fakified_out is not None
513
+ CompiledFunction._fakify_first_call = False
514
+ fw_outs = fakified_out
515
+ fakified_out = None
516
+
517
+ num_outputs = CompiledFunction.metadata.num_outputs
518
+ num_outputs_aliased = CompiledFunction.metadata.num_outputs_aliased
519
+ num_mutated_runtime_inps = (
520
+ CompiledFunction.metadata.num_mutated_inp_runtime_indices
521
+ )
522
+ num_tokens = len(CompiledFunction.metadata.tokens)
523
+ num_forward_returns = CompiledFunction.metadata.num_forward_returns
524
+ num_forward = CompiledFunction.metadata.num_forward
525
+
526
+ # Partitioners must put symint arguments at the end separate from tensor arguments
527
+ tensors_saved_for_backwards = fw_outs[
528
+ CompiledFunction.metadata.tensors_saved_for_backwards_slice
529
+ ]
530
+ assert all(isinstance(x, torch.Tensor) for x in tensors_saved_for_backwards)
531
+ # See Note [Detaching saved tensors in AOTAutograd]
532
+ ctx.save_for_backward(
533
+ *(
534
+ x.detach() if x._is_view() else x
535
+ for x in tensors_saved_for_backwards
536
+ )
537
+ )
538
+ symint_outs = fw_outs[
539
+ CompiledFunction.metadata.symints_saved_for_backwards_slice
540
+ ]
541
+ assert all(
542
+ isinstance(x, (int, float, torch.SymInt, torch.SymFloat))
543
+ for x in symint_outs
544
+ ), str([type(x) for x in symint_outs])
545
+ ctx.symints = symint_outs
546
+
547
+ raw_returns = fw_outs[0 : num_forward_returns + num_tokens]
548
+
549
+ # Wrap all autograd.Function.forward() outputs that are aliases
550
+ # so that autograd.Function doesn't treat them as tensors
551
+ if num_mutated_runtime_inps > 0:
552
+ for i, idx in enumerate(
553
+ CompiledFunction.metadata.mutated_inp_runtime_indices
554
+ ):
555
+ # We could make this faster by only looping over inputs with metadata-only mutations
556
+ # (instead of looping over inputs with either data or metadata mutations), but there shouldn't be many.
557
+ info = CompiledFunction.metadata.input_info[idx]
558
+ if info.mutates_metadata and not info.mutates_data:
559
+ raw_returns[i] = TensorAlias(raw_returns[i])
560
+
561
+ if config.debug_assert:
562
+ user_mutated_inputs_raw = raw_returns[0:num_mutated_runtime_inps]
563
+ mut_inp_infos = [
564
+ x
565
+ for x in CompiledFunction.metadata.input_info
566
+ if x.mutates_data or x.mutates_metadata
567
+ ]
568
+ assert len(user_mutated_inputs_raw) == len(mut_inp_infos)
569
+
570
+ if CompiledFunction.metadata.num_unsafe_view_outputs > 0:
571
+ for idx in CompiledFunction.metadata.unsafe_view_out_indices:
572
+ raw_return_idx = num_mutated_runtime_inps + idx
573
+ o = raw_returns[raw_return_idx]
574
+ raw_returns[raw_return_idx] = torch.ops.aten._unsafe_view(
575
+ o, o.shape
576
+ )
577
+
578
+ if num_outputs_aliased > 0:
579
+ for idx in CompiledFunction.metadata.aliased_out_indices:
580
+ raw_return_idx = num_mutated_runtime_inps + idx
581
+ raw_returns[raw_return_idx] = TensorAlias(
582
+ raw_returns[raw_return_idx]
583
+ )
584
+
585
+ if config.debug_assert:
586
+ intermediates_raw = raw_returns[
587
+ num_mutated_runtime_inps + num_outputs :
588
+ ]
589
+ assert not any(
590
+ isinstance(x, TensorAlias) for x in intermediates_raw
591
+ )
592
+
593
+ # invariant: intermediate bases always require gradients, so we don't have to
594
+ # consider marking them as non-differentiable.
595
+ raw_returns_not_including_intermediate_bases = raw_returns[
596
+ : num_mutated_runtime_inps + num_outputs
597
+ ]
598
+ raw_returns_meta = [
599
+ x
600
+ for x in CompiledFunction.metadata.input_info
601
+ if x.mutation_type == MutationType.MUTATED_OUT_GRAPH
602
+ ] + CompiledFunction.metadata.output_info
603
+
604
+ fw_outs_not_requiring_grad = [
605
+ x
606
+ for (i, x) in enumerate(raw_returns_not_including_intermediate_bases)
607
+ if isinstance(x, torch.Tensor) and not raw_returns_meta[i].requires_grad
608
+ ]
609
+ ctx.mark_non_differentiable(*fw_outs_not_requiring_grad)
610
+ ctx._materialize_non_diff_grads = False
611
+
612
+ functionalized_rng_runtime_epilogue(
613
+ CompiledFunction.metadata,
614
+ fw_outs[num_forward_returns:num_forward],
615
+ return_new_outs=False,
616
+ )
617
+ return tuple(raw_returns) + tuple(marked_dirty_inps)
618
+
619
+ @staticmethod
620
+ def backward(ctx, *flat_args):
621
+ # Calling convention: we expect a grad_out passed to the backward:
622
+ # - for every output of the fw that does *not* alias an input or graph intermediate
623
+ # - for every updated_input generated by the fw that does *not* alias an input (aka only data-mutations)
624
+ # - for every graph intermediate that we need to use to generate an output later.
625
+ # The other outputs in the autograd.Function.forward that do *not* show up in the backward include:
626
+ # - outputs that alias inputs or graph intermediates
627
+ # - updated inputs due to metadata-only mutations.
628
+ # We need to return them in the forward, but ensure that they all do not get gradients in the backward,
629
+ # and we filter them out here before passing the remaining grad_outputs into the compiled backward.
630
+ num_intermediate_bases = CompiledFunction.metadata.num_intermediate_bases
631
+ num_graph_handled_inputs = (
632
+ CompiledFunction.metadata.num_mutated_graph_handled_indices_seen_by_autograd
633
+ )
634
+ num_mutated_runtime_inps = (
635
+ CompiledFunction.metadata.num_mutated_inp_runtime_indices
636
+ )
637
+ expected_grad_outs = (
638
+ CompiledFunction.metadata.num_outputs
639
+ + num_mutated_runtime_inps
640
+ + num_intermediate_bases
641
+ )
642
+ deterministic = CompiledFunction.metadata.deterministic
643
+ global_deterministic = torch.are_deterministic_algorithms_enabled()
644
+ if deterministic is not None:
645
+ torch._check(
646
+ not (not deterministic and global_deterministic),
647
+ lambda: (
648
+ "This compiled backward function is being run with "
649
+ "torch.use_deterministic_algorithms(True), "
650
+ "but it was previously generated during the forward function while "
651
+ "torch.use_deterministic_algorithms(False) was set."
652
+ ),
653
+ )
654
+
655
+ if num_graph_handled_inputs > 0:
656
+ flat_args = flat_args[:-num_graph_handled_inputs]
657
+ assert len(flat_args) == expected_grad_outs
658
+ out_info = CompiledFunction.metadata.output_info
659
+
660
+ inp_tangents, out_tangents, intermediate_base_tangents = (
661
+ flat_args[0:num_mutated_runtime_inps],
662
+ flat_args[
663
+ num_mutated_runtime_inps : num_mutated_runtime_inps
664
+ + CompiledFunction.metadata.num_outputs
665
+ ],
666
+ flat_args[
667
+ num_mutated_runtime_inps + CompiledFunction.metadata.num_outputs :
668
+ ],
669
+ )
670
+ # input_info contains info on *every* input,
671
+ # But in the backward(), we are only given grad outputs for every mutated input
672
+ # We then need to filter out the grad outputs that correspond to metadata-only mutations or don't require grad
673
+ input_info = CompiledFunction.metadata.input_info
674
+ inp_tangents_filtered = [
675
+ x
676
+ for x, info_idx in zip(
677
+ inp_tangents, CompiledFunction.metadata.mutated_inp_runtime_indices
678
+ )
679
+ if input_info[info_idx].mutates_data
680
+ and input_info[info_idx].requires_grad
681
+ ]
682
+ # We also need to filter out grad outputs that correspond to outputs aliasing inputs/intermediates
683
+ out_tangents_filtered = [
684
+ x
685
+ for x, info in zip(out_tangents, out_info)
686
+ if info.output_type
687
+ in [
688
+ OutputType.non_alias,
689
+ OutputType.unsafe_view_alias,
690
+ OutputType.custom_function_view,
691
+ ]
692
+ and issubclass(info.raw_type, torch.Tensor)
693
+ and info.requires_grad
694
+ ]
695
+ # intermediate bases always require gradients, and always participate in the backward graph.
696
+ flat_bw_args_with_grads = [
697
+ *inp_tangents_filtered,
698
+ *out_tangents_filtered,
699
+ *intermediate_base_tangents,
700
+ ]
701
+ num_flat_bw_args_with_grads = len(flat_bw_args_with_grads)
702
+
703
+ # sanity asserts
704
+ # metadata_only_inps = [
705
+ # x for x, info_idx in zip(inp_tangents, mutated_inp_indices)
706
+ # if not input_info[info_idx].mutates_data
707
+ # ]
708
+ # aliased_outputs = [
709
+ # x for x, info in zip(out_tangents, out_info) if info.output_type != OutputType.non_alias]
710
+ # assert all(x is None for x in metadata_only_inps)
711
+ # assert all(x is None for x in aliased_outputs)
712
+
713
+ rng_args = []
714
+ if CompiledFunction.metadata.is_rng_op_functionalized:
715
+ # Add the seed and offset to args
716
+ rng_args = CUDARngStateHelper.get_torch_state_as_tuple()
717
+
718
+ all_args = [
719
+ *ctx.symints,
720
+ *ctx.saved_tensors,
721
+ *flat_bw_args_with_grads,
722
+ *rng_args,
723
+ ]
724
+ del flat_bw_args_with_grads
725
+
726
+ tangents_start_idx = (
727
+ len(all_args) - num_flat_bw_args_with_grads - len(rng_args)
728
+ )
729
+ tangents_end_idx = len(all_args) - len(rng_args)
730
+
731
+ # Note: [AOTAutograd Backward Guards]
732
+ # During AOTDispatch, we eagerly create and trace out a joint fw-bw graph.
733
+ # Doing so requires us to "guess" about some of the metadata of our grad_outputs.
734
+ #
735
+ # In particular: if an output to the forward is a plain tensor or a subclass,
736
+ # its corresponding grad_output in the backward **may or may not** be
737
+ # a plain tensor or a subclass. The main cases are:
738
+ # (1) If an output is a plain tensor, its grad_out will also be a plain tensor,
739
+ # *unless* the output is used in some subclass compute later in the forward graph,
740
+ # which will cause its grad_output to become a subclass
741
+ # (2) If an output is a subclass, its grad_out will also be a subclass,
742
+ # *unless* the output of the forward did not actually participate in the gradient computation,
743
+ # in which case autograd will insert a plain tensor of zeros for the grad_output.
744
+ # We could avoid this case with `torch.autograd.Function.set_materialize_grads`,
745
+ # although this is not turned on today in AOTAutgrad and would require more work.
746
+ #
747
+ # Today, we make a guess on subclass-ness based on the above examples,
748
+ # and hard-error in the backward if we guessed wrong.
749
+ #
750
+ # In the future, we should add backward guards that would allow us to
751
+ # properly handle this case instead of erroring: we would need to retrace the backward graph,
752
+ # since we might produce an entirely different trace if our grad_outputs are subclass or not.
753
+ assert (
754
+ len(CompiledFunction.metadata.output_types)
755
+ == num_flat_bw_args_with_grads
756
+ )
757
+ grad_output_types = [
758
+ type(x) for x in all_args[-num_flat_bw_args_with_grads:]
759
+ ]
760
+ # In general, we can add more asserts/guards here for when we partitioned
761
+ # with incorrect assumptions about the grad_outputs.
762
+ # Normalize FakeTensor -> torch.Tensor
763
+ # - during tracing our types are FakeTensor
764
+ # - at runtime in the backward our types are torch.Tensor...
765
+ # - unless we're running compiled backward, in which case they are also FakeTensor
766
+ grad_output_types_ = [
767
+ torch.Tensor if x is FakeTensor else x for x in grad_output_types
768
+ ]
769
+ assert (
770
+ grad_output_types_ == CompiledFunction.metadata.output_types
771
+ ), f"""\
772
+ We incorrectly attempted to compile the backward with incorrect subclass metadata.
773
+ If you run into this error, please file an issue.
774
+ Expected grad_output types: {str(CompiledFunction.metadata.output_types)}
775
+ Got grad_output types: {str(grad_output_types)}"""
776
+
777
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
778
+ if CompiledFunction.maybe_subclass_metadata is not None:
779
+ # Get the number of tangents after unwrapping
780
+ len_tangents = len(
781
+ unwrap_tensor_subclasses(
782
+ all_args[tangents_start_idx:tangents_end_idx],
783
+ is_joint_structure=False,
784
+ )
785
+ )
786
+ all_args = unwrap_tensor_subclasses(all_args, is_joint_structure=False)
787
+ tangents_start_idx = len(all_args) - len_tangents - len(rng_args)
788
+ tangents_end_idx = tangents_start_idx + len_tangents
789
+
790
+ # Make the tangents contiguous. Note that we must do this after subclass desugaring
791
+ # because inputs to inductor have to be contiguous
792
+ all_args = [
793
+ t.contiguous()
794
+ if (
795
+ (tangents_start_idx <= i < tangents_end_idx)
796
+ and (not t.is_contiguous())
797
+ )
798
+ else t
799
+ for i, t in enumerate(all_args)
800
+ ]
801
+
802
+ def call_compiled_backward():
803
+ if ctx._is_compiled_autograd_tracing():
804
+ # For compiled autograd, run raw FX graph so that it can be inlined into the larger graph
805
+ symints = ctx._get_compiled_autograd_symints()
806
+ assert len(symints) == len(ctx.symints)
807
+ all_args[: len(symints)] = symints
808
+ if backward_state_indices:
809
+ assert ctx._compiled_autograd_backward_state.proxy is not None
810
+ all_args.append(ctx._compiled_autograd_backward_state)
811
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
812
+ with context():
813
+ out = normalize_as_list(bw_module(*all_args))
814
+ out = functionalized_rng_runtime_epilogue(
815
+ CompiledFunction.metadata, out
816
+ )
817
+ return tuple(out)
818
+ assert (
819
+ not backward_state_indices
820
+ ), "BackwardState requires CompiledAutograd"
821
+ ctx.maybe_clear_saved_tensors()
822
+ if CompiledFunction.compiled_bw is None:
823
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
824
+ with tracing(saved_context), context(), track_graph_compiling(
825
+ aot_config, "backward"
826
+ ):
827
+ CompiledFunction.compiled_bw = aot_config.bw_compiler(
828
+ bw_module, placeholder_list
829
+ )
830
+
831
+ out = call_func_at_runtime_with_args(
832
+ CompiledFunction.compiled_bw,
833
+ all_args,
834
+ steal_args=True,
835
+ disable_amp=disable_amp,
836
+ )
837
+
838
+ out = functionalized_rng_runtime_epilogue(
839
+ CompiledFunction.metadata, out
840
+ )
841
+ return tuple(out)
842
+
843
+ if torch.is_grad_enabled() and any(
844
+ t.requires_grad for t in all_args if isinstance(t, torch.Tensor)
845
+ ):
846
+ # Ensure that the graph is connected, and error if double backward is performed.
847
+ # See comment for why once_differentiable is not sufficient:
848
+ # https://github.com/pytorch/pytorch/pull/92348/files#r1072962107
849
+ class CompiledFunctionBackward(torch.autograd.Function):
850
+ # CompiledFunctionBackward is not yet supported in dynamo skipfiles
851
+ _compiled_autograd_should_lift = False
852
+
853
+ @staticmethod
854
+ def forward(ctx, *unused_args):
855
+ outs = call_compiled_backward()
856
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
857
+ if CompiledFunction.maybe_subclass_metadata is not None:
858
+ assert (
859
+ CompiledFunction.maybe_subclass_metadata.grad_input_metas
860
+ is not None
861
+ )
862
+ outs_wrapped = wrap_tensor_subclasses(
863
+ outs,
864
+ subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas,
865
+ )
866
+ return outs_wrapped
867
+ return outs
868
+
869
+ @staticmethod
870
+ def backward(ctx, *args):
871
+ raise RuntimeError(
872
+ "torch.compile with aot_autograd does not currently support double backward"
873
+ )
874
+
875
+ CompiledFunctionBackward._compiled_autograd_key = ( # type: ignore[method-assign]
876
+ CompiledFunction._compiled_autograd_key
877
+ )
878
+
879
+ # Pass args even though they're unused, so that the graph is built
880
+ out = CompiledFunctionBackward.apply(*all_args)
881
+ else:
882
+ out = call_compiled_backward()
883
+
884
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
885
+ if CompiledFunction.maybe_subclass_metadata is not None:
886
+ assert (
887
+ CompiledFunction.maybe_subclass_metadata.grad_input_metas
888
+ is not None
889
+ )
890
+ outs_wrapped = wrap_tensor_subclasses(
891
+ out,
892
+ subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas,
893
+ )
894
+ return outs_wrapped
895
+ return out
896
+
897
+ compiled_function = create_runtime_wrapper(
898
+ CompiledFunction.apply,
899
+ runtime_metadata=fw_metadata,
900
+ indices_of_inps_to_detach=_indices_of_inps_to_detach,
901
+ trace_joint=True,
902
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
903
+ disable_amp=disable_amp,
904
+ )
905
+
906
+ if not config.debug_assert:
907
+ return compiled_function
908
+
909
+ flat_requires_grad = [
910
+ a.requires_grad if isinstance(a, Tensor) else None for a in flat_args
911
+ ]
912
+
913
+ @wraps(compiled_function)
914
+ def debug_compiled_function(*args):
915
+ # TODO: Check aliasing relationships
916
+ # TODO: Check strides for metadata mutation
917
+ # (NB: ideally, this logic is factored out of this function and
918
+ # you move these debug checks there)
919
+
920
+ # Check requires grad. Bad case is when we compiled with
921
+ # requires_grad = False, but input requires_grad = True
922
+ # (vice versa is OK; we compute a gradient and then throw
923
+ # it away when it hits the input.)
924
+ for i, a in enumerate(args):
925
+ can_require_grad = flat_requires_grad[i]
926
+ if can_require_grad is None:
927
+ assert not isinstance(a, Tensor)
928
+ elif not can_require_grad:
929
+ assert not a.requires_grad, format_guard_bug_msg(
930
+ aot_config,
931
+ f"{describe_input(i, aot_config)} would not require grad",
932
+ )
933
+
934
+ return compiled_function(*args)
935
+
936
+ return debug_compiled_function
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains utils for logging in AOTAutograd, including managing the names of the graphs under
3
+ compilation, capturing user-friendly tracebacks, and debug messages.
4
+ """
5
+
6
+ import collections
7
+ from contextlib import contextmanager
8
+ from typing import List, Tuple
9
+
10
+ import torch
11
+ import torch.fx.traceback as fx_traceback
12
+
13
+ # This is a list since looking forward, we can have this arbitrarily nested.
14
+ graph_being_compiled: List[str] = []
15
+ # TODO: It would be nice to reset the numbering every time aot_id goes
16
+ # up, but this is annoying to do right now (because we don't know if
17
+ # an aot_id will come back from the dead), so right now this also happens
18
+ # to be a globally unique number too (at the cost of wobbling if you change
19
+ # how the graphs compile)
20
+ nth_graph: int = 0
21
+ model_name: str = "model"
22
+
23
+
24
+ def set_model_name(name):
25
+ global model_name
26
+ model_name = name
27
+
28
+
29
+ def get_aot_compilation_context() -> Tuple[List[str], str, int]:
30
+ return list(graph_being_compiled), model_name, nth_graph
31
+
32
+
33
+ def get_aot_graph_name() -> str:
34
+ """
35
+ Returns the name of the graph being compiled.
36
+ """
37
+ global model_name, graph_being_compiled, nth_graph
38
+ return f"{model_name}__{'_'.join(graph_being_compiled)}_{nth_graph}"
39
+
40
+
41
+ get_graph_being_compiled = get_aot_graph_name
42
+
43
+
44
+ @contextmanager
45
+ def track_graph_compiling(aot_config, graph_name):
46
+ global graph_being_compiled
47
+ # TODO: Don't shove the aot_id in here; set it in the context
48
+ graph_being_compiled = [f"{aot_config.aot_id}_{graph_name}"]
49
+ try:
50
+ yield
51
+ finally:
52
+ global nth_graph
53
+ nth_graph += 1
54
+ graph_being_compiled = []
55
+
56
+
57
+ # Set up hooks so that during backward the fx's stack_trace is properly set
58
+ callback_set = False
59
+
60
+
61
+ def setup_stacktrace_preservation_hooks(roots: List):
62
+ def iter_graph(roots):
63
+ if not roots:
64
+ return
65
+ seen = set()
66
+ q = collections.deque() # type: ignore[var-annotated]
67
+ for node in roots:
68
+ if node is not None and node not in seen:
69
+ seen.add(node)
70
+ q.append(node)
71
+
72
+ while q:
73
+ node = q.popleft()
74
+ for fn, _idx in node.next_functions:
75
+ if fn in seen or fn is None:
76
+ continue
77
+ seen.add(fn)
78
+ q.append(fn)
79
+
80
+ yield node
81
+
82
+ def get_callback(saved_stack_):
83
+ def callback():
84
+ global callback_set
85
+ fx_traceback.set_stack_trace(saved_stack_)
86
+ callback_set = False
87
+
88
+ return callback
89
+
90
+ def get_prehook(stack_, seq_nr):
91
+ def prehook(grad_output):
92
+ global callback_set
93
+
94
+ if not callback_set:
95
+ torch.autograd.variable.Variable._execution_engine.queue_callback( # type: ignore[attr-defined]
96
+ get_callback(fx_traceback.format_stack())
97
+ )
98
+ callback_set = True
99
+
100
+ fx_traceback.set_stack_trace(stack_)
101
+ fx_traceback.set_grad_fn_seq_nr(seq_nr)
102
+
103
+ return prehook
104
+
105
+ def get_posthook(special_stack_, seq_nr):
106
+ def posthook(grad_input, grad_output):
107
+ fx_traceback.set_stack_trace(special_stack_)
108
+ fx_traceback.reset_grad_fn_seq_nr()
109
+
110
+ return posthook
111
+
112
+ for node in iter_graph(roots):
113
+ forward_node_stack = node.metadata.get("traceback_", [])
114
+ node.register_prehook(get_prehook(forward_node_stack, node._sequence_nr()))
115
+
116
+ special_stack = forward_node_stack.copy()
117
+ special_stack.append(
118
+ "Gradient addition node due to multiple use of tensor around:"
119
+ )
120
+ node.register_hook(get_posthook(special_stack, node._sequence_nr()))
121
+
122
+
123
+ def describe_input(i, aot_config):
124
+ if i < aot_config.num_params_buffers:
125
+ return f"parameter/buffer {i}"
126
+ else:
127
+ return f"input {i - aot_config.num_params_buffers}"
128
+
129
+
130
+ def format_guard_bug_msg(aot_config, expected):
131
+ return (
132
+ f"At compilation time, graph {aot_config.aot_id} was compiled under the "
133
+ f"assumption that {expected}, but at runtime this was not the case. "
134
+ "This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch."
135
+ )
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module defines runtime wrappers, which, based on previous analysis attempts to:
3
+ 1. process the inputs and outputs
4
+ 2. apply mutations
5
+ 3. handle functionalized randomness
6
+ 4. deduplicate inputs and consolidate views into their bases (see input_output_analysis)
7
+ """
8
+
9
+ import collections
10
+ import pprint
11
+ from functools import wraps
12
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
13
+
14
+ import torch
15
+ import torch.utils.dlpack
16
+ from torch import Tensor
17
+ from torch._guards import DuplicateInputs, TracingContext
18
+ from torch._prims_common import CUDARngStateHelper
19
+ from torch.multiprocessing.reductions import StorageWeakRef
20
+ from .. import config
21
+ from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata
22
+
23
+ from .functional_utils import gen_alias_from_base
24
+ from .input_output_analysis import (
25
+ compute_overlapping_inputs,
26
+ create_synthetic_base_metadata,
27
+ remove_dupe_metadata,
28
+ )
29
+ from .logging_utils import describe_input, format_guard_bug_msg
30
+ from .schemas import (
31
+ AOTConfig,
32
+ InputAliasInfo,
33
+ OutputType,
34
+ SubclassCreationMeta,
35
+ TensorAlias,
36
+ ViewAndMutationMeta,
37
+ )
38
+ from .subclass_utils import (
39
+ requires_subclass_dispatch,
40
+ unwrap_tensor_subclasses,
41
+ wrap_tensor_subclasses,
42
+ )
43
+
44
+ from .utils import (
45
+ call_func_at_runtime_with_args,
46
+ make_boxed_func,
47
+ partial_flatten_asdict,
48
+ strict_zip,
49
+ )
50
+
51
+
52
+ zip = strict_zip
53
+
54
+
55
+ # The wrapper created by this function handles all of the runtime aliasing and mutation "epilogue" logic
56
+ # that needs to run after the compiled function.
57
+ #
58
+ # This function accepts a trace_joint flag, indicating whether or not we're generating the runtime
59
+ # epilogue for a forward-only inference graph, or for an autograd.Function.apply function.
60
+ # This is because there are some minor differences in how we treat these cases at runtime:
61
+ # - resize_() is currently handled in the inference case, but not fully handled in the autograd case.
62
+ # - the autograd cases inserts TensorAlias wrapper objects for outputs that alias inputs
63
+ def create_runtime_wrapper(
64
+ compiled_fn,
65
+ *,
66
+ runtime_metadata: ViewAndMutationMeta,
67
+ indices_of_inps_to_detach: List[int],
68
+ trace_joint: bool,
69
+ keep_input_mutations: bool,
70
+ disable_amp: bool,
71
+ ):
72
+ num_tokens = len(runtime_metadata.tokens)
73
+
74
+ if not hasattr(compiled_fn, "_boxed_call"):
75
+ compiled_fn = make_boxed_func(compiled_fn)
76
+
77
+ def runtime_wrapper(*args):
78
+ # Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
79
+ args = (*[torch.tensor([])] * num_tokens, *args)
80
+
81
+ if trace_joint:
82
+ args_ = list(args)
83
+ # See Note [Detaching inputs that never need gradients]
84
+ for idx in indices_of_inps_to_detach:
85
+ if isinstance(args_[idx], torch.Tensor):
86
+ args_[idx] = args_[idx].detach()
87
+ with torch.autograd._force_original_view_tracking(True):
88
+ all_outs = call_func_at_runtime_with_args(
89
+ compiled_fn,
90
+ args_,
91
+ disable_amp=disable_amp,
92
+ )
93
+ else:
94
+ # When we have an inference graph, we run with torch.no_grad.
95
+ # It's possible to get an inference graph with inputs that require grad,
96
+ # in which case we want to make sure autograd is disabled
97
+ # (since e.g., inductor will generate aten.addmm.out calls which autograd will complain on)
98
+ if torch.is_grad_enabled():
99
+ with torch.no_grad():
100
+ all_outs = call_func_at_runtime_with_args(
101
+ compiled_fn,
102
+ args,
103
+ disable_amp=disable_amp,
104
+ )
105
+ else:
106
+ all_outs = call_func_at_runtime_with_args(
107
+ compiled_fn,
108
+ args,
109
+ disable_amp=disable_amp,
110
+ )
111
+
112
+ num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices
113
+ num_intermediate_bases = runtime_metadata.num_intermediate_bases
114
+
115
+ if keep_input_mutations and trace_joint:
116
+ num_input_mutations_handled_by_autograd = (
117
+ runtime_metadata.num_mutated_graph_handled_indices_seen_by_autograd
118
+ )
119
+ # autograd.Function requires us to return the mutated inputs as extra outputs to the autograd.Function.forward
120
+ if num_input_mutations_handled_by_autograd > 0:
121
+ all_outs = all_outs[:-num_input_mutations_handled_by_autograd]
122
+
123
+ assert (
124
+ len(all_outs)
125
+ == num_mutated_runtime_inps
126
+ + runtime_metadata.num_outputs
127
+ + num_intermediate_bases
128
+ + num_tokens
129
+ )
130
+
131
+ # Toss out the effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
132
+ all_outs = all_outs[num_tokens:]
133
+
134
+ # Step 3: After running the compiled fw, apply updates to mutated inputs
135
+ num_mutations_to_apply = runtime_metadata.num_mutated_inp_runtime_indices
136
+ if num_mutations_to_apply > 0:
137
+ updated_inputs = all_outs[:num_mutations_to_apply]
138
+ fw_outs = all_outs[num_mutations_to_apply:]
139
+
140
+ for i, inpt_idx in enumerate(runtime_metadata.mutated_inp_runtime_indices):
141
+ meta = runtime_metadata.input_info[inpt_idx]
142
+ if not meta.mutates_data and not meta.mutates_metadata:
143
+ continue
144
+ original_inpt = args[inpt_idx]
145
+ updated_inpt = updated_inputs[i]
146
+ if meta.mutates_storage_metadata:
147
+ # mutates_storage_metadata means our input saw a x.set_(y) call.
148
+ # What if x **also** saw a data and/or a metadata mutation?
149
+ # (1) If the [meta]data mutation occurred after the set_(),
150
+ # then there is no need to copy_() the data.
151
+ # When we perform x.set_(x_updated), we are guaranteed that
152
+ # x_updated already has the final version of the data/metadata
153
+ # (2) If a data mutation occurred before the set_().
154
+ # This case seems very difficult to support.
155
+ # TODO: discuss on the PR and decide if we want to tr to
156
+ # either support it, or detect and ban it.
157
+ if trace_joint:
158
+ assert isinstance(updated_inpt, TensorAlias)
159
+ updated_inpt = updated_inpt.alias
160
+ with torch.no_grad():
161
+ original_inpt.set_(updated_inpt)
162
+ continue
163
+ if meta.mutates_metadata and not meta.mutates_data:
164
+ if trace_joint:
165
+ assert isinstance(updated_inpt, TensorAlias)
166
+ updated_inpt = updated_inpt.alias
167
+ # We need to grab the size/stride/storage_offset from the compiled forward,
168
+ # and use that to mutate the metadata of the input
169
+ original_inpt.as_strided_(
170
+ updated_inpt.size(),
171
+ updated_inpt.stride(),
172
+ updated_inpt.storage_offset(),
173
+ )
174
+ else:
175
+ if meta.mutates_data and meta.mutates_metadata:
176
+ original_inpt.as_strided_(
177
+ updated_inpt.size(),
178
+ updated_inpt.stride(),
179
+ updated_inpt.storage_offset(),
180
+ )
181
+ else:
182
+ assert meta.mutates_data
183
+ if meta.is_leaf and original_inpt.requires_grad:
184
+ # We can hit this situation in this case:
185
+ # def f(x):
186
+ # x.detach().mul_(2)
187
+ # return x + 1
188
+ # AOTAutograd will see a mutation in the above case, and try to
189
+ # apply a copy_() here, in the epilogue.
190
+ # But if x required gradients, and is a leaf, then autograd
191
+ # will yell at us for trying to mutate it.
192
+ # However, it's only possible to end up in this scenario (like the above)
193
+ # if all of the mutations to the leaf input were non-autograd-tracking mutations
194
+ # (aka mutations under no_grad(), or on detached views).
195
+ # In that case, we fully want to hide the mutation from autograd, so detaching is ok.
196
+ original_inpt.detach().copy_(updated_inpt)
197
+ else:
198
+ original_inpt.copy_(updated_inpt)
199
+ else:
200
+ fw_outs = all_outs
201
+
202
+ # Step 4: Manually regenerate any outputs that are aliased to inputs, instead of
203
+ # compiling them.
204
+ if runtime_metadata.num_outputs_aliased > 0:
205
+ # The compiled forward also returned intermediate bases. We don't want to return them to the user.
206
+ if runtime_metadata.num_intermediate_bases > 0:
207
+ fw_outs_no_intermediate_bases = fw_outs[
208
+ : -runtime_metadata.num_intermediate_bases
209
+ ]
210
+ intermediate_bases = fw_outs[-runtime_metadata.num_intermediate_bases :]
211
+ else:
212
+ fw_outs_no_intermediate_bases = fw_outs
213
+ intermediate_bases = []
214
+
215
+ assert len(fw_outs_no_intermediate_bases) == len(
216
+ runtime_metadata.output_info
217
+ )
218
+ fw_outs_including_aliases = []
219
+ for i, (o, info) in enumerate(
220
+ zip(fw_outs_no_intermediate_bases, runtime_metadata.output_info)
221
+ ):
222
+ if info.output_type in [
223
+ OutputType.non_alias,
224
+ OutputType.unsafe_view_alias,
225
+ OutputType.custom_function_view,
226
+ ]:
227
+ fw_outs_including_aliases.append(o)
228
+ continue
229
+ if trace_joint:
230
+ assert isinstance(o, TensorAlias)
231
+ o_ = o.alias
232
+ else:
233
+ o_ = o
234
+
235
+ o_grad = runtime_metadata.output_info[i].requires_grad
236
+ if info.output_type == OutputType.alias_of_input:
237
+ aliased_base_tensor = args[info.base_idx] # type: ignore[index]
238
+ regenerated_out = gen_alias_from_base(
239
+ aliased_base_tensor, o_, o_grad
240
+ )
241
+ fw_outs_including_aliases.append(regenerated_out)
242
+ continue
243
+ elif info.output_type == OutputType.is_input:
244
+ aliased_base_tensor = args[info.base_idx] # type: ignore[index]
245
+ regenerated_out = aliased_base_tensor
246
+ fw_outs_including_aliases.append(regenerated_out)
247
+ continue
248
+ elif info.output_type == OutputType.alias_of_intermediate:
249
+ base_tensor_list = intermediate_bases
250
+ elif (
251
+ info.output_type == OutputType.alias_of_intermediate_save_as_output
252
+ ):
253
+ base_tensor_list = intermediate_bases
254
+ else:
255
+ assert (
256
+ info.output_type
257
+ == OutputType.alias_of_intermediate_base_is_user_output
258
+ )
259
+ base_tensor_list = fw_outs_no_intermediate_bases
260
+ aliased_base_tensor = base_tensor_list[info.base_idx]
261
+ # TODO: handle the custom autograd function case here.
262
+ # We need a way to check whether a tensor came from a custom autograd fn from python,
263
+ # AND a way to replay that custom view fn.
264
+ regenerated_out = gen_alias_from_base(aliased_base_tensor, o_, o_grad)
265
+ fw_outs_including_aliases.append(regenerated_out)
266
+ ret_outs = fw_outs_including_aliases
267
+ else:
268
+ ret_outs = fw_outs
269
+
270
+ if runtime_metadata.dynamic_outputs:
271
+ for t, o in zip(ret_outs, runtime_metadata.output_info):
272
+ if o.dynamic_dims is None:
273
+ continue
274
+ if hasattr(t, "_dynamo_weak_dynamic_indices"):
275
+ t._dynamo_weak_dynamic_indices |= o.dynamic_dims
276
+ else:
277
+ t._dynamo_weak_dynamic_indices = o.dynamic_dims.copy()
278
+ if runtime_metadata.grad_enabled_mutation is not None:
279
+ torch.set_grad_enabled(runtime_metadata.grad_enabled_mutation)
280
+ return ret_outs
281
+
282
+ return runtime_wrapper
283
+
284
+
285
+ # Calling convention: If we are running functionalized RNG, then outs consists
286
+ # of (user_outs, rng_offset)
287
+ def functionalized_rng_runtime_epilogue(
288
+ metadata: ViewAndMutationMeta, outs, return_new_outs=True
289
+ ):
290
+ if metadata.is_rng_op_functionalized:
291
+ assert metadata.num_outputs_rng_offset == 1
292
+ new_rng_offset = outs[-1]
293
+ CUDARngStateHelper.set_new_offset(new_rng_offset)
294
+ if return_new_outs:
295
+ user_outs = outs[:-1]
296
+ return user_outs
297
+ else:
298
+ return None
299
+ return outs
300
+
301
+
302
+ # This wrapper handles the AOTDispatch runtime logic for tensor subclasses.
303
+ # At runtime, we have a compiled function that knows how to operate on the domain of DenseTensor -> DenseTensor,
304
+ # But the user might have passed us some tensor subclass inputs (or expect some subclass tensor outputs).
305
+ # This function handles the wrapping and unwrapping of tensor subclasses at runtime.
306
+ def aot_dispatch_subclass_wrapper(
307
+ runtime_fn: Callable,
308
+ *,
309
+ subclass_metas: List[Union[int, SubclassCreationMeta]],
310
+ num_fw_outs_saved_for_bw: Optional[int],
311
+ ) -> Callable:
312
+ def inner_fn(args):
313
+ unwrapped_args = unwrap_tensor_subclasses(args, is_joint_structure=False)
314
+ # expectation: runtime_fn is a boxed fn
315
+ unwrapped_outs = runtime_fn(unwrapped_args)
316
+ wrapped_outs = wrap_tensor_subclasses(
317
+ unwrapped_outs,
318
+ subclass_metas=subclass_metas,
319
+ num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
320
+ is_runtime=True,
321
+ )
322
+ return wrapped_outs
323
+
324
+ # box it
325
+ inner_fn._boxed_call = True # type: ignore[attr-defined]
326
+ return inner_fn
327
+
328
+
329
+ # MOTIVATION:
330
+ #
331
+ # When tracing functions for future execution, one must be careful not to pass
332
+ # in the same input tensor multiple times (e.g., f(x, x), as this can result
333
+ # in graphs that are ONLY valid if you later pass a new tensor in exactly the
334
+ # same way (e.g., f(y, y)). (NB: we really mean duplicate; two distinct
335
+ # tensors that alias each other is a different situation that is covered by
336
+ # aot_dispatch_deduplicated_autograd). Here are two examples:
337
+ #
338
+ # (1) Suppose you have a function:
339
+ #
340
+ # def f(x, y):
341
+ # return x + y
342
+ #
343
+ # If you make_fx(f)(x, x), you will trace out:
344
+ #
345
+ # def f(x, y):
346
+ # return y + y
347
+ #
348
+ # Oops!
349
+ #
350
+ # (2) For most tensors x and y, you can compute f's gradient with respect to
351
+ # these to inputs by saying torch.autograd.grad(f(x, y), (x, y)). However,
352
+ # if x is y, you will trace out a program that gets incorrect gradients:
353
+ #
354
+ # >>> x = torch.randn(1, requires_grad=True)
355
+ # >>> torch.autograd.grad(x + x, (x, x))
356
+ # (tensor([2.]), tensor([2.]))
357
+ #
358
+ # In other words, the gradient is double-counted. Deduplicating the arguments
359
+ # gives you an appropriate gradient:
360
+ #
361
+ # >>> y = torch.randn(1, requires_grad=True)
362
+ # >>> torch.autograd.grad(x + y, (x, y))
363
+ # (tensor([1.]), tensor([1.]))
364
+ #
365
+ # HOW TO DEDUPLICATE:
366
+ #
367
+ # There are a few strategies, in order of preference:
368
+ #
369
+ # 1. For every duplicate argument to the function, detach it into
370
+ # a separate leaf tensor, so that it is no longer duplicated.
371
+ #
372
+ # PRO: The resulting compiled graph works for any configuration
373
+ # of duplicated arguments.
374
+ #
375
+ # CON: It does not (naively) work if you mutate the metadata of inputs:
376
+ #
377
+ # def f(x, y):
378
+ # x.transpose_(0, 1)
379
+ # y.transpose_(0, 2)
380
+ #
381
+ # x = torch.randn(2, 3, 4)
382
+ # f(x, x)
383
+ #
384
+ # The ordering of the transposes inside f dictates whether or not
385
+ # you get [4, 2, 3] or [3, 4, 2]. This means that you cannot precompute
386
+ # what metadata mutations should get applied to each input; you need to
387
+ # assume they aren't duplicates (what we do today) or preserve
388
+ # the original metadata mutations exactly in order, so that they work
389
+ # for any duplicate configuration.
390
+ #
391
+ # CON: It does not (naively) work if you mutate the data of inputs.
392
+ # In particular, leaf tensors that require grad cannot be mutated,
393
+ # this makes it impossible to differentiate with respect to the original
394
+ # base.
395
+ #
396
+ # 2. For every duplicate argument to the function, remove it, so it is
397
+ # no longer part of the "true" signature:
398
+ #
399
+ # PRO: Implemented naively, it still works for metadata/data mutation.
400
+ #
401
+ # CON: The resulting compiled graph is duplicate-specialized: it only
402
+ # works if future calls duplicate arguments in exactly the same way.
403
+ # Horribly, Dynamo doesn't guard on this at the moment. But even if
404
+ # it did, you could still end up recompiling a bunch of each duplicate.
405
+ #
406
+ # Our strategy is to do (1) if we can, and do (2) otherwise, erroring if
407
+ # Dynamo's guards are not enough. In practice, this seems to cover
408
+ # everything.
409
+ #
410
+ def aot_wrapper_dedupe(
411
+ flat_fn,
412
+ flat_args: List[Tensor],
413
+ aot_config: AOTConfig,
414
+ *,
415
+ compiler_fn,
416
+ fw_metadata,
417
+ ):
418
+ # Use information about whether or not flat_fn mutates its arguments
419
+ # or not to handle dupe args
420
+
421
+ # Strategy 1: For any input that is not mutated, we can leafify it if we
422
+ # need to remove a duplicate.
423
+ leaf_flat_args = []
424
+ args_set = set()
425
+ ok = True
426
+
427
+ for i, a in enumerate(flat_args):
428
+ if not isinstance(a, torch.Tensor):
429
+ leaf_flat_args.append(a)
430
+ elif a not in args_set:
431
+ args_set.add(a)
432
+ leaf_flat_args.append(a)
433
+ elif (
434
+ not fw_metadata.input_info[i].mutates_data
435
+ and not fw_metadata.input_info[i].mutates_metadata
436
+ ):
437
+ leaf_flat_args.append(a.detach().requires_grad_(a.requires_grad))
438
+ else:
439
+ ok = False
440
+ break
441
+
442
+ if ok:
443
+ return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
444
+
445
+ if requires_subclass_dispatch(leaf_flat_args, fw_metadata):
446
+ raise RuntimeError(
447
+ """\
448
+ Encountered duplicate inputs that are mutated in the graph, but at least one input/output
449
+ to the graph is a tensor subclass. This is not supported today. You can try to
450
+ remove the aliasing yourself as a workaround, or otherwise file an issue on github."""
451
+ )
452
+
453
+ # export path: ban duplicate inputs for now, add later if requested.
454
+ if aot_config.is_export:
455
+ raise RuntimeError(
456
+ f"""\
457
+ Encountered duplicated inputs that are mutated in the graph you are trying to export.
458
+ This functionality is currently not supported. If needed, please file a github issue.
459
+
460
+ fw_metadata={str(fw_metadata)}
461
+ """
462
+ )
463
+
464
+ # Strategy 2: Duplicate specialize.
465
+ #
466
+ # In Haskell types, suppose you have:
467
+ #
468
+ # add_dupe_args :: DedupedArgs -> Args
469
+ # remove_dupe_args :: Args -> DedupedArgs
470
+ #
471
+ # compiler_fn
472
+ # :: (DedupedArgs -> R) -> DedupedArgs -> AOTConfig -> (DedupedArgs -> R)
473
+ # deped_compiler_fn
474
+ # :: (Args -> R) -> Args -> AOTConfig -> (Args -> R)
475
+ #
476
+ # Then the code below can be written in point-free style as:
477
+ #
478
+ # deduped_compiler_fn f a c =
479
+ # compiler_fn (f . add_dupe_args) (remove_dupe_args a) c . remove_dupe_args
480
+ #
481
+ # Suppose you have:
482
+ #
483
+ # [a, b, a, c]
484
+ #
485
+ # We want:
486
+ #
487
+ # remove_dupe_args([a, b, a, c]) == [a, b, c]
488
+ # add_dupe_args([a, b, c]) == [a, b, a, c]
489
+ #
490
+ # This is done via (respectively):
491
+ #
492
+ # seen_args = {a: 0, b: 1, c: 2}
493
+ # enumerate(add_dupe_map) = [ # how to get args from the deduped list
494
+ # (0, 0),
495
+ # (1, 1),
496
+ # (2, 0),
497
+ # (3, 2),
498
+ # ]
499
+ # keep_arg_mask = [True, True, False, True]
500
+
501
+ seen_args: Dict[Tensor, int] = {}
502
+ keep_arg_mask = []
503
+ # Implicitly map duped arg position (list index) to de-duped arg position
504
+ add_dupe_map: List[int] = []
505
+ duped_arg_len = len(flat_args)
506
+
507
+ j = 0 # index into deduped_flat_args
508
+ for t in flat_args:
509
+ if isinstance(t, torch.Tensor):
510
+ if t in seen_args:
511
+ keep_arg_mask.append(False)
512
+ add_dupe_map.append(seen_args[t])
513
+ continue
514
+ seen_args[t] = j
515
+
516
+ keep_arg_mask.append(True)
517
+ add_dupe_map.append(j)
518
+ j += 1
519
+ assert (
520
+ len(add_dupe_map) == duped_arg_len
521
+ ), f"Expects add_dupe_map to have length {duped_arg_len} but got {len(add_dupe_map)}"
522
+
523
+ # NB: Hot path, avoid set lookups here
524
+ # TODO: Can avoid the zip here too, probably
525
+ def remove_dupe_args(args):
526
+ return [t for t, keep in zip(args, keep_arg_mask) if keep]
527
+
528
+ def add_dupe_args(args):
529
+ return [args[add_dupe_map[i]] for i in range(duped_arg_len)]
530
+
531
+ deduped_flat_args = remove_dupe_args(flat_args)
532
+
533
+ # Update our input metadata to remove duped input metadata.
534
+ updated_fw_metadata = remove_dupe_metadata(fw_metadata, keep_arg_mask, add_dupe_map)
535
+
536
+ if (
537
+ tracing_context := TracingContext.try_get()
538
+ and aot_config.aot_autograd_arg_pos_to_source
539
+ ):
540
+ # TODO(voz): This structure is 1:1, we could consider an alternate structure like
541
+ # kept_pos:[dupe_arg_pos], however, add_dupe_map is 1:1 so we would need a new structure there,
542
+ # which feels like needless complexity for a tiny bit of efficiency at this point.
543
+ for dupe_arg_pos, (kept_pos, keep_arg) in enumerate(
544
+ zip(add_dupe_map, keep_arg_mask)
545
+ ):
546
+ if not keep_arg:
547
+ dupe_arg_source = aot_config.aot_autograd_arg_pos_to_source[
548
+ dupe_arg_pos
549
+ ]
550
+ kept_arg_source = aot_config.aot_autograd_arg_pos_to_source[kept_pos]
551
+ tracing_context.guards_context.aotautograd_guards.append( # type: ignore[attr-defined]
552
+ DuplicateInputs(kept_arg_source, dupe_arg_source)
553
+ )
554
+
555
+ @wraps(flat_fn)
556
+ def wrapped_flat_fn(*args):
557
+ return flat_fn(*add_dupe_args(args))
558
+
559
+ if config.debug_assert:
560
+ ref_fw_metadata = run_functionalized_fw_and_collect_metadata(
561
+ wrapped_flat_fn,
562
+ keep_input_mutations=fw_metadata.keep_input_mutations,
563
+ is_train=fw_metadata.is_train,
564
+ )(*deduped_flat_args)
565
+ assert (
566
+ ref_fw_metadata == updated_fw_metadata
567
+ ), f"ref_metadata={str(ref_fw_metadata)}, actual_metadata={str(updated_fw_metadata)}"
568
+
569
+ compiled_fn = compiler_fn(
570
+ wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata
571
+ )
572
+
573
+ if not hasattr(compiled_fn, "_boxed_call"):
574
+ compiled_fn = make_boxed_func(compiled_fn)
575
+
576
+ @wraps(compiled_fn)
577
+ def wrapped_compiled_fn(args):
578
+ deduped_args = remove_dupe_args(args)
579
+ args.clear()
580
+ return compiled_fn(deduped_args)
581
+
582
+ wrapped_compiled_fn._boxed_call = True # type: ignore[attr-defined]
583
+
584
+ # This can be uncommented when we properly guard for duplicates,
585
+ # but right now we must not do it.
586
+ # if not config.debug_assert:
587
+ # return wrapped_compiled_fn
588
+
589
+ @wraps(wrapped_compiled_fn)
590
+ def debugged_compiled_fn(args):
591
+ # Test that the computed remove/add arg functions are an inverse
592
+ new_args = add_dupe_args(remove_dupe_args(args))
593
+ seen: Dict[Any, None] = {}
594
+ for i, (x, y) in enumerate(zip(new_args, args)):
595
+ seen[y] = None
596
+ assert x is y, format_guard_bug_msg(
597
+ aot_config,
598
+ f"{describe_input(i, aot_config)} would be a duplicate of "
599
+ f"{describe_input(add_dupe_map[i], aot_config)}",
600
+ )
601
+ # This is only an error if there is metadata mutation on both of
602
+ # the duped arguments; in this case, we need to know what order
603
+ # the metadata mutation applies in. You'll get the correct result
604
+ # otherwise, because a graph that assumes distinct inputs works if
605
+ # you dupe the inputs (the gradient contributions from each input
606
+ # will get summed up appropriately.)
607
+ #
608
+ # TODO: work out how to setup this assert correctly
609
+ """
610
+ assert len(seen) == unique_args, format_guard_bug_msg(aot_config,
611
+ f"there would be {unique_args} distinct arguments"
612
+ )
613
+ """
614
+ return wrapped_compiled_fn(args)
615
+
616
+ debugged_compiled_fn._boxed_call = True # type: ignore[attr-defined]
617
+
618
+ return debugged_compiled_fn
619
+
620
+
621
+ # This layer handles the situation where you have two inputs that alias each other,
622
+ # and one of the inputs is mutated.
623
+ # We need to take special care to ensure that the mutation is applied to the other aliases in the graph.
624
+ #
625
+ # pre-condition: aot_wrapper_dedup has already run.
626
+ # (This function will in theory work if there are duplicate args.
627
+ # However, the synthetic base code path is a bit sub-optimal, and running with dupe'd inputs
628
+ # would cause us to hit that path more frequently).
629
+ def aot_wrapper_synthetic_base(
630
+ flat_fn,
631
+ flat_args: List[Tensor],
632
+ aot_config: AOTConfig,
633
+ *,
634
+ fw_metadata: ViewAndMutationMeta,
635
+ # Currently, the only reason we need to plumb this bool is because
636
+ # the synthetic base code prohibits more cases in the autograd case than the inference case.
637
+ needs_autograd: bool,
638
+ compiler_fn,
639
+ ):
640
+ is_inference = not needs_autograd
641
+ flat_args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
642
+ flat_args,
643
+ fw_metadata.input_info,
644
+ is_inference=is_inference,
645
+ )
646
+ # Happy path: we don't need synthetic bases
647
+ if synthetic_base_info is None:
648
+ return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
649
+
650
+ # export path: ban synthetic bases for now, add later if requested.
651
+ if requires_subclass_dispatch(flat_args, fw_metadata):
652
+ raise RuntimeError(
653
+ """\
654
+ Encountered aliased inputs that are mutated in the graph, but at least one input/output
655
+ to the graph is a tensor subclass. This is not supported today. You can try to
656
+ remove the aliasing yourself as a workaround, or otherwise file an issue on github."""
657
+ )
658
+
659
+ if aot_config.is_export:
660
+ raise RuntimeError(
661
+ f"""\
662
+ Encountered aliased inputs that are mutated in the graph you are trying to export.
663
+ This functionality is currently not supported. If needed, please file a github issue.
664
+
665
+ synthetic_base_info={str(synthetic_base_info)}
666
+
667
+ fw_metadata={str(fw_metadata)}
668
+ """
669
+ )
670
+
671
+ assert len(fw_metadata.input_info) == len(synthetic_base_info)
672
+
673
+ # Update our forward metadata to take synthetic bases into account
674
+ (
675
+ fw_metadata_updated,
676
+ aliased_arg_idx_with_metadata_mutations,
677
+ ) = create_synthetic_base_metadata(
678
+ fw_metadata, synthetic_base_info, flat_args, flat_args_with_synthetic_bases
679
+ )
680
+
681
+ num_aliased_args_with_metadata_mutations = len(
682
+ aliased_arg_idx_with_metadata_mutations
683
+ )
684
+
685
+ def _unpack_synthetic_bases(primals: Tuple[Any, ...]) -> List[Any]:
686
+ f_args_inner = []
687
+ for inner_idx_or_tuple in synthetic_base_info:
688
+ if isinstance(inner_idx_or_tuple, int):
689
+ f_args_inner.append(primals[inner_idx_or_tuple])
690
+ else:
691
+ inner_base_idx, view_tensor = inner_idx_or_tuple
692
+ base = primals[inner_base_idx]
693
+ view_arg = gen_alias_from_base(
694
+ base, view_tensor, view_tensor.requires_grad
695
+ )
696
+ f_args_inner.append(view_arg)
697
+ return f_args_inner
698
+
699
+ @wraps(flat_fn)
700
+ def wrapped_flat_fn(*args):
701
+ unpacked_args = _unpack_synthetic_bases(args)
702
+ # This is a bit subtle. The goal of this entire function (aot_dispatch_synthetic_bases)
703
+ # is to relieve the downstream logic from having to reason about mutations on inputs that alias
704
+ # each other, by replacing aliased inputs with a synthetic base.
705
+ # One area where this breaks down a bit however is if one of those aliased inputs
706
+ # experienced a metadata mutation.
707
+ # We are now obligated to reapply the metadata mutation directly to the user's input;
708
+ # it isn't enough to apply mutations back to the synthetic base in the downstream logic.
709
+ #
710
+ # The way we handle this is by pretending that those aliased inputs that experience metadata mutations
711
+ # are additional outputs in the user's forward function.
712
+ # The downstream logic will just treat these as "user outputs that alias inputs".
713
+ # However, we will manually grab them at runtime here, use them to reapply the metadata mutation
714
+ # to the user inputs, and not return them to the user.
715
+ aliased_args_with_metadata_mutations = [
716
+ x
717
+ for i, x in enumerate(unpacked_args)
718
+ if i in aliased_arg_idx_with_metadata_mutations
719
+ ]
720
+ if len(aliased_args_with_metadata_mutations) > 0:
721
+ return *(flat_fn(*unpacked_args)), *aliased_args_with_metadata_mutations
722
+ else:
723
+ return flat_fn(*unpacked_args)
724
+
725
+ if config.debug_assert:
726
+ ref_fw_metadata = run_functionalized_fw_and_collect_metadata(
727
+ wrapped_flat_fn,
728
+ keep_input_mutations=fw_metadata.keep_input_mutations,
729
+ is_train=fw_metadata.is_train,
730
+ )(*flat_args_with_synthetic_bases)
731
+ assert ref_fw_metadata == fw_metadata_updated, (
732
+ f"ref_metadata={pprint.pformat(partial_flatten_asdict(ref_fw_metadata))}, "
733
+ f"\nactual_metadata={pprint.pformat(partial_flatten_asdict(fw_metadata_updated))}"
734
+ )
735
+
736
+ compiled_fn = compiler_fn(
737
+ wrapped_flat_fn,
738
+ flat_args_with_synthetic_bases,
739
+ aot_config,
740
+ fw_metadata=fw_metadata_updated,
741
+ )
742
+
743
+ if not hasattr(compiled_fn, "_boxed_call"):
744
+ compiled_fn = make_boxed_func(compiled_fn)
745
+
746
+ @wraps(compiled_fn)
747
+ def wrapped_compiled_fn(args):
748
+ args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
749
+ args, fw_metadata.input_info, is_inference=is_inference
750
+ )
751
+ assert synthetic_base_info is not None
752
+ aliased_args_w_metadata_mutations = [
753
+ args[i] for i in aliased_arg_idx_with_metadata_mutations
754
+ ]
755
+ args.clear()
756
+ outs = compiled_fn(args_with_synthetic_bases)
757
+ if num_aliased_args_with_metadata_mutations > 0:
758
+ # This code does not handle **all** input metadata mutations.
759
+ # Instead, it only handles metadata mutations on inputs that were converted into synthetic bases
760
+ # (which only happens if at least one aliased input experienced a data mutation).
761
+ # e.g:
762
+ # def f(a, b):
763
+ # a.mul_(2)
764
+ # b.t_(1, 0)
765
+ # f(x.view(2, 2), x.view(2, 2))
766
+ mutated_metadata_inps = outs[-num_aliased_args_with_metadata_mutations:]
767
+ user_outs = outs[:-num_aliased_args_with_metadata_mutations]
768
+ for inp, mutated_inp in zip(
769
+ aliased_args_w_metadata_mutations, mutated_metadata_inps
770
+ ):
771
+ inp.as_strided_(
772
+ mutated_inp.size(),
773
+ mutated_inp.stride(),
774
+ mutated_inp.storage_offset(),
775
+ )
776
+ return user_outs
777
+ return outs
778
+
779
+ return wrapped_compiled_fn
780
+
781
+
782
+ # Note [Handling mutations on an input that aliases other inputs]
783
+ # The easiest example to show-case this edge case is here:
784
+ #
785
+ # def f(a, b):
786
+ # a.mul_(2)
787
+ # out = a + b
788
+ # return out
789
+ # b = torch.ones(...)
790
+ # a = b.view(-1)
791
+ # f(a, b)
792
+ #
793
+ # In this situation, if a and b happened to be aliased, we need to trace something different!
794
+ # Suppose we had b = a.view(-1)
795
+ # (In this case, that means that `a._base is b`)
796
+ #
797
+ # We need to ensure that the aliasing relationship between a and b is preserved.
798
+ # We do that detecting the specific situation above (mutate an input that aliases another input),
799
+ # and when we do that, we create a synthetic base argument. Then inside of the traced forward,
800
+ # we regenerate a and b off of that base.
801
+ # The complete example of the transformed function looks like this:
802
+ #
803
+ # // The traced forward takes in a synthetic base, and regenerates the aliased inputs as views
804
+ # // We could consider getting view-replay support here to minimize as_strided_scatter ops in the graph
805
+ # def traced_forward(base):
806
+ # a = base.as_strided(...)
807
+ # b = base.as_strided(...)
808
+ # a_updated = a.mul(2)
809
+ # base_updated = torch.as_strided_scatter(base, a_updated, ...)
810
+ # b_updated = base_updated.as_strided(...)
811
+ # out = a_updated + b_updated
812
+ # return a_updated, out
813
+ #
814
+ # def compiled_fn(a, b):
815
+ # // we detect that a is the "differentiable base" here
816
+ # base = a
817
+ # // In other situations, we might do either:
818
+ # // (1) a and b are both views off of some larger differentiable base
819
+ # // assert a._base is b._base and a._base is not None
820
+ # // base = a._base
821
+ # // (2) a and b both don't require gradients. Create a base from the storage
822
+ # // assert a._base is None and b._base is None
823
+ # // base = torch.Tensor(a.storage())
824
+ # a_updated, out = traced_forward(base)
825
+ # a.copy_(a_updated)
826
+ # return out
827
+ #
828
+ # This function:
829
+ # (1) Merges input views into a synthetic base argument, when any of those input views are mutated
830
+ # (2) Returns metadata telling the autograd.Function how to modify their arguments properly,
831
+ # to respect the new calling convention.
832
+ #
833
+ # The calling convention is as follows.
834
+ # Any inputs that were originally views of one another get yanked, and replaced with a synthetic base.
835
+ # The argument list ordering goes [base1, ..., baseN], [arg1, ..., argN],
836
+ # Where the ordering of the bases is determined from the ordering of the original view args.
837
+ # baseA will come before baseB if the earliest original argument coming from baseA
838
+ # showed up earlier in the argument list than the earliest original argument coming from baseB.
839
+ #
840
+ # Example, given some tensors a, b, c, d
841
+ # call site:
842
+ # f(a, c.view(-1), b.view(-1), b, c, d)
843
+ # Modified argument list:
844
+ # c_base comes first because the first c view came earlier in arg list than the first b view
845
+ # a and d still show up in the modified arg list, but b and c don't- they're regenerated from their bases
846
+ # b_base = torch.Tensor(b.storage())
847
+ # c_base = torch.Tensor(c.storage())
848
+ # f(c_base, b_base, a, d)
849
+ def merge_view_inputs(
850
+ fwd_inputs: List[Any],
851
+ mutated_input_info: List[InputAliasInfo],
852
+ *,
853
+ # The autograd case currently has more restrictions than the inference case.
854
+ is_inference: bool,
855
+ ) -> Tuple[List[Any], Optional[List[Union[int, Tuple[int, torch.Tensor]]]]]:
856
+ def _are_differentiable_views(view1, view2):
857
+ if view1 is view2:
858
+ return True
859
+ if view1._base is None and view2._base is None:
860
+ return False
861
+ if view1._base is view2._base or view1._base is view2 or view1 is view2._base:
862
+ return True
863
+ return False
864
+
865
+ def _same_dtype_views(view1, view2):
866
+ if view1.dtype != view2.dtype:
867
+ return False
868
+ if view1._base is not None and view1.dtype != view1._base.dtype:
869
+ return False
870
+ if view2._base is not None and view2.dtype != view2._base.dtype:
871
+ return False
872
+ return True
873
+
874
+ assert len(fwd_inputs) == len(mutated_input_info)
875
+ storage_ref_to_idx: Dict[StorageWeakRef, List[int]] = collections.defaultdict(list)
876
+ base_args = []
877
+ other_args = []
878
+ for i, inpt in enumerate(fwd_inputs):
879
+ if isinstance(inpt, Tensor):
880
+ storage_ref = StorageWeakRef(inpt.untyped_storage())
881
+ storage_ref_to_idx[storage_ref].append(i)
882
+ else:
883
+ other_args.append(inpt)
884
+ # Note [Synthetic Base Info Metadata]
885
+ # This list contains metadata that tells you what the i'th argument in the inner calling convention should be.
886
+ # It's either:
887
+ # - another int (corresponding to the index in the argument list of the element from the outer calling convention)
888
+ # - idx, view_tensor, where we can generate the new output with view_tensor._view_func(old_args[idx])
889
+ # idx corresponds to which synthetic base from the outer calling context to view
890
+ inner_calling_convention_meta: Dict[int, Union[int, Tuple[int, torch.Tensor]]] = {}
891
+ for aliased_input_indices in storage_ref_to_idx.values():
892
+ if len(aliased_input_indices) <= 1 or not any(
893
+ # We only care about mutations that affect all aliases,
894
+ # so metadata mutations on an input doesn't require us to do synthetic base handling.
895
+ mutated_input_info[inpt_idx].mutates_data
896
+ for inpt_idx in aliased_input_indices
897
+ ):
898
+ for curr_idx in aliased_input_indices:
899
+ other_args.append(fwd_inputs[curr_idx])
900
+ continue
901
+
902
+ # Here, we attempt to do a more complicated check to detect false aliasing
903
+ # (e.g. if all the tensors have the same storage, but don't actually overlap)
904
+ # In theory, we could have a large group of tensors that all share storages, where only *some* of them
905
+ # have overlapping memory.
906
+ # I don't bother with that case for now: here, we only bail out earlier if we detect that **every** pair
907
+ # of tensors in the current group that shares a storage is non-overlapping.
908
+ aliased_input_indices_no_false_sharing = compute_overlapping_inputs(
909
+ fwd_inputs, aliased_input_indices
910
+ )
911
+ if len(aliased_input_indices_no_false_sharing) <= 1:
912
+ for curr_idx in aliased_input_indices:
913
+ other_args.append(fwd_inputs[curr_idx])
914
+ continue
915
+
916
+ # We detected an input that was mutated, AND aliases with another input.
917
+ # we need to replace this set of aliased inputs with a single synthetic base.
918
+ # For now, I'm banning a bunch of cases. We expect dynamo to properly detect these cases
919
+ # and error out. We can fix them later.
920
+ # These checks are transitive, so we don't need to check every pair.
921
+ for idx1, idx2 in zip(
922
+ aliased_input_indices, aliased_input_indices[1:], strict=False
923
+ ):
924
+ view1 = fwd_inputs[idx1]
925
+ view2 = fwd_inputs[idx2]
926
+ # The "inputs that are aliased but have different differentiable bases" case
927
+ # is more complicated and hopefully pretty rare. Not currently handled.
928
+ if not is_inference:
929
+ assert _are_differentiable_views(
930
+ view1, view2
931
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
932
+ # Regenerating views when reinterpreting complex / real tensors seems non-trivial,
933
+ # not handling for now
934
+ assert _same_dtype_views(
935
+ view1, view2
936
+ ), "aot_autograd() does not yet handle input mutations on views with different dtypes."
937
+ non_none_bases = [
938
+ fwd_inputs[i]._base
939
+ for i in aliased_input_indices
940
+ if fwd_inputs[i]._base is not None
941
+ ]
942
+ aliases_with_none_bases = [
943
+ fwd_inputs[i] for i in aliased_input_indices if fwd_inputs[i]._base is None
944
+ ]
945
+ if len(non_none_bases) == 0:
946
+ # Case where none of the aliases have a ._base
947
+ # we generate a synthetic base without gradients, and generate views off of it
948
+ # We hit this case when we have input tensors to the graph that share a storage,
949
+ # but do not have a ._base field.
950
+ # Wondering when we hit this case?
951
+ # The _base field simply says that autograd knows about the aliasing relationship,
952
+ # but sometimes we create tensors which are aliased out of the same storage but guaranteed
953
+ # to be disjoint. In these cases, we will skip setting up the _base relationship
954
+ # for performance reasons (because the fact that the tensors share the same storage
955
+ # is unobservable unless you (1) do naughty things with resize_/as_strided
956
+ # or (2) look at the storage--as we are doing here.)
957
+ # One particular example of this is optimizer steps on the LSTM module:
958
+ # LSTM parameters are packed into a contiguous storage for efficiency reasons when
959
+ # calling cuDNN kernels, so when these parameters get passed to the optimizer we will
960
+ # find they share the same storage, but do not have _base set since they are all disjoint.
961
+ #
962
+ # NOTE: There is one case where this is unsafe:
963
+ # torch.Tensor(storage) will ALWAYS create a 1D tensor, which is not necessarily
964
+ # the same shape as the "actual" base that the tensor came from.
965
+ # For the most part this is fine, because we always use as_strided()
966
+ # to generate the original aliased inputs again.
967
+ # If we were to use view-replay though, this could cause the aliased views
968
+ # to have incorrect sizes.
969
+ example_idx = aliased_input_indices[0]
970
+ example_alias = fwd_inputs[example_idx]
971
+ # Note that this function is re-used at both trace time and runtime.
972
+ # At trace time, we're under a FakeMode so synthetic_base becomes a FakeTensor.
973
+ synthetic_base = torch.empty(
974
+ (0,), dtype=example_alias.dtype, device=example_alias.device
975
+ )
976
+ # We don't actually have a convenient way of going from storage -> tensor,
977
+ # So using set_() here (we suffer some minor overhead, but this case is rare).
978
+ synthetic_base.set_(example_alias.untyped_storage())
979
+ else:
980
+ # Case where all of the aliases require gradients, and have the same _base.
981
+ synthetic_base = non_none_bases[0]
982
+ for other_base in non_none_bases[1:]:
983
+ assert (
984
+ other_base is synthetic_base
985
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
986
+ for alias in aliases_with_none_bases:
987
+ assert (
988
+ alias is synthetic_base
989
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
990
+ base_args.append(synthetic_base)
991
+ for curr_view_idx in aliased_input_indices:
992
+ curr_view = fwd_inputs[curr_view_idx]
993
+ base_idx = len(base_args) - 1
994
+ # We store just enough info here so that we can regenerate the view later.
995
+ # Regeneration: curr_view._view_func(args[base_idx])
996
+ inner_calling_convention_meta[curr_view_idx] = (base_idx, curr_view)
997
+ if len(base_args) == 0:
998
+ assert len(other_args) == len(fwd_inputs)
999
+ # If no synthetic bases are necessary, just return the original inputs.
1000
+ return fwd_inputs, None
1001
+ else:
1002
+ # Otherwise, return:
1003
+ # (1) The new args according to the updated calling convention: (synthetic_bases, other_args)
1004
+ # (2) Metadata telling functionalization how to generate the inner argument list given the outer calling convention.
1005
+ # We post-process it into a list, where meta[i] tells you info about the i'th argument in the inner calling convention.
1006
+ args_to_functionalization = base_args + other_args
1007
+ arg_to_old_idx_map = {arg: i for (i, arg) in enumerate(fwd_inputs)}
1008
+ for i, other_arg in enumerate(other_args):
1009
+ new_idx = len(base_args) + i
1010
+ old_idx = arg_to_old_idx_map[other_arg]
1011
+ inner_calling_convention_meta[old_idx] = new_idx
1012
+ # post process into a list
1013
+ post_processed_calling_convention_meta: List[
1014
+ Union[int, Tuple[int, torch.Tensor]]
1015
+ ] = [-1 for _ in range(len(inner_calling_convention_meta))]
1016
+ for k, v in inner_calling_convention_meta.items():
1017
+ post_processed_calling_convention_meta[k] = v
1018
+ # Quick assert: every argument in the inner calling convention should be accounted for.
1019
+ for x in post_processed_calling_convention_meta:
1020
+ assert x != -1
1021
+ return args_to_functionalization, post_processed_calling_convention_meta
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The various dataclasses, Enums, namedtuples etc used in AOTAutograd. This includes
3
+ input/output types, metadata, config, function signatures etc.
4
+ """
5
+
6
+ import collections
7
+ import functools
8
+ from dataclasses import dataclass, field
9
+ from enum import Enum
10
+ from typing import Any, Callable, Dict, List, NewType, Optional, Set, Tuple, Union
11
+
12
+ import torch
13
+ import torch.utils._pytree as pytree
14
+ from torch._guards import Source
15
+ from torch._subclasses import FakeTensor
16
+ from torch._subclasses.fake_tensor import is_fake
17
+
18
+ from .. import config
19
+
20
+ from .functional_utils import _check_if_mutation_can_be_in_graph
21
+ from .utils import strict_zip
22
+
23
+ zip = strict_zip
24
+
25
+ OutputType = Enum(
26
+ "OutputType",
27
+ (
28
+ # output is not an alias
29
+ "non_alias",
30
+ # output aliases an input
31
+ "alias_of_input",
32
+ # output **is** an input tensor
33
+ "is_input",
34
+ # output has a ._base tensor, which is a graph intermediate.
35
+ # We need to return its ._base as a graph output,
36
+ # so its requires_grad info is populated correctly.
37
+ # Instructs the runtime code to regenerate the current output
38
+ # from a base tensor, graph_intermediates[base_idx]
39
+ "alias_of_intermediate_save_as_output",
40
+ # Same as above; but we don't need to explicitly add its ._base
41
+ # as a graph output, because it already **is** a graph output.
42
+ "alias_of_intermediate",
43
+ # Same as above; but the output's ._base is **already** a user output.
44
+ # Instructs the runtime code to regenerate the current output from
45
+ # a base tensor, user_outputs[base_idx]
46
+ "alias_of_intermediate_base_is_user_output",
47
+ # See Note [Intermediate Bases Optimization]
48
+ "unsafe_view_alias",
49
+ # output is an alias, but has a custom autograd.Function backward.
50
+ # In this case, we don't want to do view-replay, since we won't be able to replay the custom function.
51
+ # Instead, we'll treat this output "normally", and trace its backward into the graph.
52
+ "custom_function_view",
53
+ ),
54
+ )
55
+
56
+
57
+ # This class stores info about every user output.
58
+ @dataclass(frozen=True)
59
+ class OutputAliasInfo:
60
+ # Tells us if this output is:
61
+ # (1) a regular (non-aliased) output
62
+ # (2) an alias of a forward input
63
+ # (3) **is** a forward input (special case of "alias_of_input")
64
+ # (4) an alias of an intermediate (aka an alias of an output of the inner traced forward)
65
+ # (5) an alias of an intermediate, that explicitly requires returning the intermediate
66
+ # as a graph output
67
+ # (6) an alias of an intermediate, where that intermediate is also a user output
68
+ output_type: OutputType
69
+ # The raw type of the output (torch.Tensor, SymInt, etc)
70
+ raw_type: type
71
+ # If (1) above, then
72
+ # - base_idx is None
73
+ # If (2) or (3) above, then
74
+ # - Tells us that the base of this alias is user_fwd_input[base_idx]
75
+ # (This is an index into the inputs *before* we make synthetic bases)
76
+ # If (4) or (5) above, then
77
+ # - Tells us that the base of this alias is output_graph_intermediates[base_idx]
78
+ # here, this refers to the index of the *direct* traced
79
+ # If (6) above, then:
80
+ # - Tells us that the base of this alias is output_user_fwds[base_idx]
81
+ # here, this refers to the index of the *direct* traced
82
+ base_idx: Optional[int]
83
+ # If it is a Tensor, what the dynamic dims are (otherwise is None)
84
+ dynamic_dims: Optional[Set[int]]
85
+ # requires_grad
86
+ requires_grad: bool
87
+
88
+
89
+ class MutationType(Enum):
90
+ NOT_MUTATED = 1
91
+ MUTATED_IN_GRAPH = 2
92
+ MUTATED_OUT_GRAPH = 3
93
+
94
+
95
+ # This class tells us info about user inputs.
96
+ @dataclass(frozen=True)
97
+ class InputAliasInfo:
98
+ is_leaf: bool
99
+ mutates_data: bool
100
+ mutates_metadata: bool
101
+ mutations_hidden_from_autograd: bool
102
+ mutations_under_no_grad_or_inference_mode: bool
103
+ mutates_storage_metadata: bool
104
+ requires_grad: bool
105
+ keep_input_mutations: bool
106
+
107
+ def __post_init__(self):
108
+ if self.mutates_storage_metadata:
109
+ # For convenience, we guarantee that this is always true.
110
+ # In practice, If we call .set_(), then at runtime there is no need
111
+ # to additionally fix up the tensor metadata, since our runtime
112
+ # call to inp.set_(updated_inp) will already have the right metadata
113
+ assert self.mutates_metadata
114
+
115
+ @functools.cached_property
116
+ def mutation_type(self) -> MutationType:
117
+ if (not self.mutates_data) and (not self.mutates_metadata):
118
+ return MutationType.NOT_MUTATED
119
+
120
+ if _check_if_mutation_can_be_in_graph(
121
+ self.keep_input_mutations,
122
+ self.mutates_data,
123
+ self.mutates_metadata,
124
+ self.mutations_hidden_from_autograd,
125
+ self.mutations_under_no_grad_or_inference_mode,
126
+ self.requires_grad,
127
+ ):
128
+ return MutationType.MUTATED_IN_GRAPH
129
+
130
+ return MutationType.MUTATED_OUT_GRAPH
131
+
132
+
133
+ @dataclass
134
+ class SubclassCreationMeta:
135
+ """
136
+ Used for AOTDispatch.
137
+ This dataclass gives us the information we need to reconstruct a tensor subclass
138
+ from our flat inputs.
139
+ Why is this important? The graph that we'd like to trace out contains flat tensor inputs,
140
+ But the user's original model may have subclass inputs and outputs.
141
+ So we need to wrap/unwrap subclasses as necessary to translate between the user's
142
+ view (subclass inps/outs), and the backend compiler's view (graph with no subclass args).
143
+
144
+ Complications arise mostly from the fact that a subclass can hold more than one inner tensor;
145
+ So for a given subclass input/output, we need to carefully track which indices map
146
+ to the subclass tensor in the corresponding "dense-tensor-only" graph.
147
+ """
148
+
149
+ # In the inner graph that only takes in dense tensor inputs,
150
+ # this maps to the first index of "tensors that should go in this subclass wrapper"
151
+ flat_tensor_start_idx: int
152
+ # The number of tensors that live in this subclass wrapper
153
+ arg_count: int
154
+ # Stores the original subclass itself.
155
+ # This is needed because we need the autograd metadata on the original subclass
156
+ # (this is guaranteed to be a wrapper subclass that holds a fake tensor,
157
+ # so holding onto this at runtime shouldn't leak memory)
158
+ original_subclass: torch.Tensor
159
+ # meta and inner_keys are produced by the subclass's __tensor_flatten__.
160
+ # We need to keep them around along with outer_size / outer_stride to plumb them
161
+ # into __tensor_unflatten__.
162
+ meta: Any
163
+ inner_keys: List[Any]
164
+ outer_size: Tuple[int, ...]
165
+ outer_stride: Tuple[int, ...]
166
+
167
+ def creation_fn(self, all_args, *, is_runtime: bool):
168
+ curr_args = all_args[
169
+ self.flat_tensor_start_idx : self.flat_tensor_start_idx + self.arg_count
170
+ ]
171
+ assert len(curr_args) == len(
172
+ self.inner_keys
173
+ ), f"inner_keys: {str(self.inner_keys)}. len(curr_args): {len(curr_args)}"
174
+ # NB: Sometimes we have real inner tensors and symbolic metadata.
175
+ # TODO: Resolve this so we always have matching real / symbolic tensors / metadata.
176
+ out = type(self.original_subclass).__tensor_unflatten__( # type: ignore[attr-defined]
177
+ dict(zip(self.inner_keys, curr_args)),
178
+ self.meta,
179
+ self.outer_size,
180
+ self.outer_stride,
181
+ )
182
+ if not is_runtime:
183
+ # After wrapping up the inner dense tensors into a subclass, we need to make sure that our new wrapper
184
+ # has correct autograd metadata, since we'll be tracing through the autograd engine with the subclass.
185
+ # We don't trace through the autograd engine at runtime though, so no need
186
+ # to compute this extra metadata then!
187
+ torch._mirror_autograd_meta_to(self.original_subclass, out) # type: ignore[attr-defined]
188
+
189
+ return out
190
+
191
+ def __post_init__(self):
192
+ # sanity assert to make sure we don't leak memory
193
+ assert is_fake(self.original_subclass)
194
+
195
+
196
+ # This class encapsulates all aliasing + mutation info we need about the forward graph
197
+ # See a more detailed overview of the edge case handling at
198
+ # https://docs.google.com/document/d/19UoIh_SVrMy_b2Sx5ZaeOJttm6P0Qmyss2rdBuyfoic/edit
199
+ @dataclass(eq=False)
200
+ class ViewAndMutationMeta:
201
+ # length = # user inputs
202
+ # This gives us info about every input, and what sort of mutation happened to it (if any)
203
+ input_info: List[InputAliasInfo]
204
+
205
+ # length = # user outputs
206
+ # This gives us info about every output (mostly around whether it aliases other tensors)
207
+ output_info: List[OutputAliasInfo]
208
+
209
+ # length = the number of intermediate bases appended as outputs to the end of the forward graph.
210
+ # Note: this is not necessarily the same thing as:
211
+ # len([x for x in output_info if x.output_type == OutputType.alias_of_intermediate])
212
+ # Because outputs might share a ._base, or an output's ._base might itself be
213
+ # another user output (in both cases, we won't redundantly append bases to the end of the graph)
214
+ num_intermediate_bases: int
215
+
216
+ # For inference only: instructs us to keep data-only input mutations directly in the graph
217
+ keep_input_mutations: bool
218
+
219
+ # length = (# inputs w data mutations) + (# user outputs that are non_aliasing tensors)
220
+ # + (# intermediate bases)
221
+ # These are the FakeTensor (or potential SymInt) outputs that we traced from our
222
+ # metadata pass of the user's forward function.
223
+ # Their only use today is to pass them as a best-guess for tangents when tracing the joint.
224
+ # Stashing them as part of our "metadata" makes it simpler if we want to run our analysis
225
+ # pass once, and re-use the output throughout AOTAutograd
226
+ traced_tangents: List[Any]
227
+
228
+ # Each of these is a list telling us about subclasses for the inputs/outputs/grad_outs
229
+ # They are used throughout AOTDispatch to tell us how to generate a list of subclass tensors,
230
+ # Given a (potentially larger) list of plain torch tensors.
231
+
232
+ # Taking subclass_inp_meta as an example:
233
+ # subclass_inp_meta[i] = j (an int) tells us:
234
+ # "The i'th user input is not a subclass, and corresponds to inputs[j] of the plain-tensor graph."
235
+ # subclass_inp_meta[i] = SubclassCreationMeta(flat_tensor_start_idx=3, arg_count=2)
236
+ # "The i'th user input is subclass holding two inner tensors, which are
237
+ # inputs[3] and inputs[4] of the plain-tensor graph".
238
+
239
+ # length = # user inputs
240
+ subclass_inp_meta: List[Union[int, SubclassCreationMeta]]
241
+ # So, the full set of outputs to the forward graph looks something like:
242
+ # (*mutated_inps, *user_outs, *intermediate_bases, *saved_for_bw_tensors)
243
+ # where the first 3 of those 4 can be subclasses
244
+ # (but not saved_for_bw tensors, since these are internal to the compiler
245
+ # and not user visible, so there's no point in wrapping/unwrapping them at runtime).
246
+ # This list contains subclass information on all of the fw graph outputs
247
+ # except for saved_for_bw_tensors.
248
+ subclass_fw_graph_out_meta: List[Union[int, SubclassCreationMeta]]
249
+ # length = # backward graph inputs
250
+ subclass_tangent_meta: List[Union[int, SubclassCreationMeta]]
251
+ # TODO: we should kill this
252
+ # (need to default it to not break internal)
253
+ is_train: bool = False
254
+
255
+ num_symints_saved_for_bw: Optional[int] = None
256
+
257
+ # The grad_enabled mutation that will be emitted in the runtime_wrapper epilogue
258
+ # NOTE: AOTAutograd will assume that the ambient `is_grad_enabled` is the grad mode
259
+ # that is intended to be in effect prior to running the graph, in keeping with
260
+ # equivalence to eager mode. It is the responsibility of upstream graph acquisition
261
+ # to reset the grad mode to its pre-graph value prior to calling aot_autograd.
262
+ grad_enabled_mutation: Optional[bool] = None
263
+
264
+ # Keeps track of whether `torch.use_deterministic_algorithms` was turned on
265
+ # when the forward was run. If deterministic mode was turned off during the
266
+ # forward, but is turned on during the backward call, then an error is
267
+ # raised
268
+ deterministic: Optional[bool] = None
269
+
270
+ # Map of effect type (ex. _EffectType.ORDERED) to token. If there are
271
+ # side-effectful operators, FunctionalTensorMode will populate this
272
+ # dictionary telling us how many tokens we will need during tracing.
273
+ tokens: Dict[Any, torch.Tensor] = field(default_factory=dict)
274
+
275
+ def __post_init__(self):
276
+ # pre-compute the indices of the inputs that are mutated.
277
+ # When keep_input_mutations is set, we don't need to worry about our epilogue
278
+ # handling data-only mutations, because we keep them directly in the graph.
279
+
280
+ mutated_inp_runtime_indices = [
281
+ i
282
+ for i, m in enumerate(self.input_info)
283
+ if (m.mutation_type == MutationType.MUTATED_OUT_GRAPH)
284
+ ]
285
+
286
+ mutated_graph_handled_indices = [
287
+ i
288
+ for i, m in enumerate(self.input_info)
289
+ if m.mutation_type == MutationType.MUTATED_IN_GRAPH
290
+ ]
291
+ self.mutated_graph_handled_indices = mutated_graph_handled_indices
292
+ self.num_mutated_graph_handled_indices = len(self.mutated_graph_handled_indices)
293
+
294
+ mutated_graph_handled_indices_seen_by_autograd = [
295
+ i
296
+ for i in mutated_graph_handled_indices
297
+ if not self.input_info[i].mutations_hidden_from_autograd
298
+ ]
299
+
300
+ self.mutated_graph_handled_indices_seen_by_autograd = (
301
+ mutated_graph_handled_indices_seen_by_autograd
302
+ )
303
+ self.num_mutated_graph_handled_indices_seen_by_autograd = len(
304
+ self.mutated_graph_handled_indices_seen_by_autograd
305
+ )
306
+
307
+ aliased_out_indices = [
308
+ i
309
+ for i, m in enumerate(self.output_info)
310
+ if m.output_type
311
+ not in [
312
+ OutputType.non_alias,
313
+ OutputType.unsafe_view_alias,
314
+ OutputType.custom_function_view,
315
+ ]
316
+ ]
317
+ unsafe_view_out_indices = [
318
+ i
319
+ for i, m in enumerate(self.output_info)
320
+ if m.output_type is OutputType.unsafe_view_alias
321
+ ]
322
+
323
+ # This is pre-computed in post_init for perf.
324
+ # It contains the index of every element
325
+ # of input_info that corresponds to a mutation (data or metadata or both)
326
+ self.mutated_inp_runtime_indices = mutated_inp_runtime_indices
327
+ self.num_mutated_inp_runtime_indices = len(self.mutated_inp_runtime_indices)
328
+
329
+ # This is pre-computed for perf.
330
+ # It contains the index of every element
331
+ # of output_info that corresponds to an alias (either of an input or intermediate)
332
+ self.aliased_out_indices = aliased_out_indices
333
+ self.unsafe_view_out_indices = unsafe_view_out_indices
334
+ self.num_outputs = len(self.output_info)
335
+ self.num_outputs_non_aliased = len(
336
+ [
337
+ x
338
+ for x in self.output_info
339
+ if x.output_type
340
+ in [
341
+ OutputType.non_alias,
342
+ OutputType.unsafe_view_alias,
343
+ OutputType.custom_function_view,
344
+ ]
345
+ ]
346
+ )
347
+ self.num_outputs_aliased_to_inputs = len(
348
+ [
349
+ x
350
+ for x in self.output_info
351
+ if x.output_type
352
+ in [
353
+ OutputType.alias_of_input,
354
+ OutputType.is_input,
355
+ ]
356
+ ]
357
+ )
358
+ self.num_unsafe_view_outputs = len(self.unsafe_view_out_indices)
359
+ self.num_outputs_aliased_to_intermediates = len(
360
+ [
361
+ x
362
+ for x in self.output_info
363
+ if x.output_type
364
+ in [
365
+ OutputType.alias_of_intermediate,
366
+ OutputType.alias_of_intermediate_save_as_output,
367
+ OutputType.alias_of_intermediate_base_is_user_output,
368
+ ]
369
+ ]
370
+ )
371
+ self.num_outputs_aliased = (
372
+ self.num_outputs_aliased_to_inputs
373
+ + self.num_outputs_aliased_to_intermediates
374
+ )
375
+
376
+ self.dynamic_outputs = any(o.dynamic_dims for o in self.output_info)
377
+ # See Note: [AOTAutograd Backward Guards]
378
+ # This is pre-computed for fast asserts on the types of our grad_outputs in the backward.
379
+ # Eventually, we should kill this and replace with real backward guards.
380
+ # (we want to precompute the "runtime" types, so replace FakeTensor with torch.Tensor)
381
+ self.output_types = [
382
+ torch.Tensor if isinstance(x, FakeTensor) else type(x)
383
+ for x in self.traced_tangents
384
+ ]
385
+
386
+ self.is_rng_op_functionalized = config.functionalize_rng_ops
387
+ # All of the above metadata is collected by tracing the fw function.
388
+ # However, extra outputs for rng offsets behave differently. Both fwd
389
+ # and bwd graphs have their own outputs for the total consumed offsets.
390
+ # Unlike mutated inputs, we don't have to worry about sending the right
391
+ # set of tensors between fwd and bwd. Fwd and bwd offsets are
392
+ # independent and simpler to handle. Therefore, we track them
393
+ # separately.
394
+ self.num_outputs_rng_offset = 1 if self.is_rng_op_functionalized else 0
395
+
396
+ # Our forward() returns both (mutated_inputs, outputs, output_intermediate_bases, saved_tensors, saved_symints)
397
+ self.num_forward_returns = (
398
+ self.num_mutated_inp_runtime_indices
399
+ + self.num_outputs
400
+ + self.num_intermediate_bases
401
+ )
402
+ # In case of functionalization of rng ops, the fw_module returns one
403
+ # additional output for rng offset. This rng offset is used right
404
+ # away to advance the rng state, and is not passed on to the raw
405
+ # outputs. However, we need to know the exact boundary to identify
406
+ # which tensors to be saved for the bwd graph. num_forward captures
407
+ # this information.
408
+ self.num_forward = self.num_forward_returns + self.num_outputs_rng_offset
409
+
410
+ @property
411
+ def tensors_saved_for_backwards_slice(self):
412
+ assert self.num_symints_saved_for_bw is not None
413
+ if self.num_symints_saved_for_bw > 0:
414
+ return slice(self.num_forward, -self.num_symints_saved_for_bw)
415
+ else:
416
+ return slice(self.num_forward, None)
417
+
418
+ @property
419
+ def symints_saved_for_backwards_slice(self):
420
+ assert self.num_symints_saved_for_bw is not None
421
+ if self.num_symints_saved_for_bw > 0:
422
+ return slice(-self.num_symints_saved_for_bw, None)
423
+ else:
424
+ return slice(0, 0) # empty slice
425
+
426
+ def __eq__(self, other):
427
+ if not isinstance(other, ViewAndMutationMeta):
428
+ return NotImplemented
429
+ return (
430
+ self.input_info == other.input_info
431
+ and self.output_info == other.output_info
432
+ and self.num_intermediate_bases == other.num_intermediate_bases
433
+ and self.keep_input_mutations == other.keep_input_mutations
434
+ and self.is_rng_op_functionalized == other.is_rng_op_functionalized
435
+ and self.num_outputs_rng_offset == other.num_outputs_rng_offset
436
+ and len(self.traced_tangents) == len(other.traced_tangents)
437
+ and all(
438
+ x.shape == y.shape and x.dtype == y.dtype
439
+ for x, y, in zip(self.traced_tangents, other.traced_tangents)
440
+ )
441
+ )
442
+
443
+
444
+ @dataclass(eq=False)
445
+ class SubclassMeta:
446
+ # A copy of all forward metadata, but computed on the *dense* tensor forward (after desugaring subclasses)
447
+ # So for example, if the user had a model containing two `TwoTensor` inputs,
448
+ # Then `SubclassMeta.fw_metadata.input_infos` would have length 4 here.
449
+ fw_metadata: ViewAndMutationMeta
450
+
451
+ # Note: [Computing Subclass Metadata about grad_inputs]
452
+ # Given a list of flattened, plain tensor grad_inputs, this tells us how to reconstruct the grad_input subclasses
453
+ #
454
+ # You might think: why not just assume that all grad_inputs will have the same subclass-ness as the original inputs?
455
+ # (AOTAutograd generally assumes other properties, e.g. that grad_outputs are contiguous)
456
+ #
457
+ # This doesn't really work though. take this example:
458
+ #
459
+ # def f(DoubleTensor, DenseTensor):
460
+ # return DoubleTensor * DenseTensor
461
+ #
462
+ # In the above example, the .grad field of *both* DoubleTensor and DenseTensor will be a DoubleTensor.
463
+ # When we trace out a joint fw-bw graph, we'll end up returning two subclasses for the two grad_inputs.
464
+ # This means that our backward graph will return 4 outputs (two dense tensors for each DoubleTensor grad_input)
465
+ # and we need to properly store the metadata that tells us how to turn these 4 outputs back into DoubleTensors.
466
+ #
467
+ # Note that this info **cannot** easily be figured out from ViewAndMutationMeta.
468
+ # We can only compute this info by tracing the entire joint and examining the grad_inputs that we computed.
469
+ #
470
+ # See Note: [AOTAutograd Backward Guards]
471
+ # This will also eventually require us to install backward guards,
472
+ # in case we made incorrect assumptions about the subclass-ness of our grad_outputs
473
+ #
474
+ # Optional field because we don't compute for inference graphs
475
+ grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]]
476
+
477
+ def __init__(self):
478
+ # The fields in this class get set after its construction.
479
+ pass
480
+
481
+
482
+ # This class exists because:
483
+ # - the autograd.Function.forward() in aot autograd returns outputs that might alias inputs
484
+ # - we only care about the metadata on those aliases, so we can regenerate them.
485
+ # We do not want them to participate in the autograd.Function.
486
+ # We do that by wrapping them in an opaque class, so the autograd.Function
487
+ # does not know to treat them as tensors.
488
+ @dataclass(frozen=True)
489
+ class TensorAlias:
490
+ alias: torch.Tensor
491
+
492
+
493
+ @dataclass
494
+ class BackwardSignature:
495
+ """
496
+ Provides information about the backward section of an exported
497
+ joint forward-backward graph.
498
+ For a particular fx GraphModule, this class contains information on:
499
+ (1) A mapping from each gradient (backwards output) to the parameter
500
+ it corresponds to (forward input)
501
+ (2) A mapping from each gradient (backwards output) to the user input
502
+ it corresponds to (forward input)
503
+ (3) Which of the forward outputs corresponds to the loss, that we backprop on.
504
+
505
+ Each string name is the `node.name` of the corresponding node in the fx graph.
506
+ """
507
+
508
+ gradients_to_parameters: Dict[str, str]
509
+ gradients_to_user_inputs: Dict[str, str]
510
+ loss_output: str
511
+
512
+
513
+ GraphOutputName = NewType("GraphOutputName", str)
514
+ GraphInputName = NewType("GraphInputName", str)
515
+ FQN = NewType("FQN", str)
516
+
517
+
518
+ @dataclass
519
+ class GraphSignature:
520
+ """
521
+ Provides information about an exported module.
522
+ For a particular fx GraphModule, this class contains information on:
523
+ (1) Which graph inputs are parameters, buffers, or user inputs
524
+ (2) (for params/buffers) a mapping from the name of each graph argument
525
+ to its parameter/buffer FQN in the original nn.Module.
526
+ (3) If there are input mutations, these are represented as extra outputs
527
+ in the fx GraphModule. We provide a mapping from these
528
+ extra output names to the names of the actual inputs.
529
+ (4) The pytree metadata on how to flatten/unflatten inputs and outputs.
530
+ The corresponding FX GraphModule only accepts and returns
531
+ pytree-flattened inputs/outputs.
532
+ (5) (Optionally) if the FX is a joint forward-backward graph, we provide
533
+ a signature on the backward section of the joint graph.
534
+ """
535
+
536
+ parameters: List[FQN]
537
+ buffers: List[FQN]
538
+
539
+ user_inputs: List[GraphInputName]
540
+ user_outputs: List[GraphOutputName]
541
+ inputs_to_parameters: Dict[GraphInputName, FQN]
542
+ inputs_to_buffers: Dict[GraphInputName, FQN]
543
+
544
+ # If the user's module mutates a buffer,
545
+ # it's represented in the graph as an extra graph output.
546
+ # This dict is a mapping from
547
+ # "graph outputs that correspond to updated buffers"
548
+ # to the FQN names of those mutated buffers.
549
+ buffers_to_mutate: Dict[GraphOutputName, FQN]
550
+ user_inputs_to_mutate: Dict[GraphOutputName, GraphInputName]
551
+
552
+ in_spec: pytree.TreeSpec
553
+ out_spec: pytree.TreeSpec
554
+
555
+ backward_signature: Optional[BackwardSignature]
556
+
557
+ input_tokens: List[GraphInputName]
558
+ output_tokens: List[GraphOutputName]
559
+
560
+ @classmethod
561
+ def from_tracing_metadata(
562
+ cls,
563
+ *,
564
+ in_spec: pytree.TreeSpec,
565
+ out_spec: pytree.TreeSpec,
566
+ graph_input_names: List[str],
567
+ graph_output_names: List[str],
568
+ view_mutation_metadata: ViewAndMutationMeta,
569
+ named_parameters: List[str],
570
+ named_buffers: List[str],
571
+ num_user_inputs: int,
572
+ num_user_outputs: int,
573
+ loss_index: Optional[int],
574
+ backward_signature: Optional[BackwardSignature],
575
+ ) -> "GraphSignature":
576
+ graph_inputs = graph_input_names
577
+ graph_outputs = graph_output_names
578
+ parameters = list(named_parameters)
579
+ buffers = list(named_buffers)
580
+ num_tokens = len(view_mutation_metadata.tokens)
581
+
582
+ # Calling convention assumptions:
583
+ # (1) graph inputs = (input_tokens, params, buffers, user_inputs)
584
+ # (2) graph outputs = (output_tokens, mutated_inputs, user_outs, param_gradients)
585
+ # (If we are capturing an inference graph, this convention is identical
586
+ # except that param_gradients is empty)
587
+ # See Note [Side-Effectful Tokens in AOTAutograd] for information on tokens
588
+
589
+ # Address input calling conventions:
590
+ start, stop = 0, num_tokens
591
+ input_tokens = graph_inputs[start:stop]
592
+
593
+ start, stop = stop, stop + len(parameters)
594
+ inputs_to_parameters = dict(zip(graph_inputs[start:stop], parameters))
595
+
596
+ start, stop = stop, stop + len(buffers)
597
+ inputs_to_buffers = dict(
598
+ zip(
599
+ graph_inputs[start:stop],
600
+ buffers,
601
+ )
602
+ )
603
+
604
+ start, stop = stop, stop + num_user_inputs
605
+ user_inputs = graph_inputs[start:stop]
606
+
607
+ # We should've gone through all the inputs now
608
+ assert len(graph_inputs) - stop == 0
609
+
610
+ # Address output calling conventions:
611
+ start, stop = 0, num_tokens
612
+ output_tokens = graph_outputs[start:stop]
613
+
614
+ names = [*input_tokens, *parameters, *buffers, *user_inputs]
615
+ mutations = []
616
+ for idx, input_info in enumerate(view_mutation_metadata.input_info):
617
+ if input_info.mutates_data:
618
+ # Only buffers can be mutated, not parameters
619
+ assert idx >= len(parameters)
620
+ mutations.append(names[idx + num_tokens])
621
+
622
+ assert len(mutations) == view_mutation_metadata.num_mutated_inp_runtime_indices
623
+
624
+ start, stop = (
625
+ stop,
626
+ stop + view_mutation_metadata.num_mutated_inp_runtime_indices,
627
+ )
628
+ outputs_to_mutations = dict(zip(graph_outputs[start:stop], mutations))
629
+
630
+ user_inputs_to_mutate = {}
631
+ buffers_to_mutate = {}
632
+ for output_name, mutation_name in outputs_to_mutations.items():
633
+ if mutation_name in user_inputs:
634
+ user_inputs_to_mutate[output_name] = mutation_name
635
+ else:
636
+ assert mutation_name in buffers
637
+ buffers_to_mutate[output_name] = mutation_name
638
+
639
+ start, stop = stop, stop + num_user_outputs
640
+ user_outputs = graph_outputs[start:stop]
641
+
642
+ unused_outputs = len(graph_outputs) - stop
643
+ if backward_signature is not None:
644
+ unused_outputs -= len(backward_signature.gradients_to_parameters) + len(
645
+ backward_signature.gradients_to_user_inputs
646
+ )
647
+ assert unused_outputs == 0
648
+
649
+ return GraphSignature(
650
+ parameters=parameters, # type: ignore[arg-type]
651
+ buffers=buffers, # type: ignore[arg-type]
652
+ user_inputs=user_inputs, # type: ignore[arg-type]
653
+ user_outputs=user_outputs, # type: ignore[arg-type]
654
+ inputs_to_buffers=inputs_to_buffers, # type: ignore[arg-type]
655
+ inputs_to_parameters=inputs_to_parameters, # type: ignore[arg-type]
656
+ user_inputs_to_mutate=user_inputs_to_mutate,
657
+ buffers_to_mutate=buffers_to_mutate, # type: ignore[arg-type]
658
+ in_spec=in_spec,
659
+ out_spec=out_spec,
660
+ backward_signature=backward_signature,
661
+ input_tokens=input_tokens, # type: ignore[arg-type]
662
+ output_tokens=output_tokens, # type: ignore[arg-type]
663
+ )
664
+
665
+
666
+ @dataclass
667
+ class AOTConfig:
668
+ """
669
+ Configuration for AOTDispatcher
670
+ """
671
+
672
+ fw_compiler: Callable
673
+ bw_compiler: Callable
674
+ partition_fn: Callable
675
+ decompositions: Dict[Callable, Callable]
676
+ num_params_buffers: int
677
+ aot_id: int
678
+ keep_inference_input_mutations: bool
679
+ is_export: bool = False
680
+ no_tangents: bool = False
681
+ dynamic_shapes: bool = False
682
+ aot_autograd_arg_pos_to_source: Optional[List[Source]] = None
683
+ inference_compiler: Optional[Callable] = None
684
+ enable_log: bool = True
685
+ # this is always false outside of export.
686
+ pre_dispatch: bool = False
687
+
688
+ def __post_init__(self):
689
+ if self.pre_dispatch:
690
+ assert self.is_export, "Can only have pre_dispatch IR for export."
691
+
692
+
693
+ SubclassTracingInfo = collections.namedtuple(
694
+ "SubclassTracingInfo",
695
+ ["plain_tensor_trace_fn", "plain_tensor_args", "maybe_subclass_meta"],
696
+ )
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities for tracing through __torch_dispatch__ based tensor subclasses and modes.
3
+ AOTAutograd's responsibility is to trace through all pytorch capabilities that live in the pytorch dispatcher,
4
+ and this includes tensor subclasses that implement __torch_dispatch__.
5
+ """
6
+
7
+ from typing import Any, List, Optional, Tuple, Union
8
+
9
+ import torch.utils._pytree as pytree
10
+
11
+ from torch import Tensor
12
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
13
+
14
+ from .schemas import MutationType, SubclassCreationMeta, ViewAndMutationMeta
15
+ from .utils import strict_zip
16
+
17
+ zip = strict_zip
18
+
19
+
20
+ def requires_subclass_dispatch(args, fw_metadata: ViewAndMutationMeta) -> bool:
21
+ args_flattened = pytree.arg_tree_leaves(*args)
22
+ any_subclass_args = any(
23
+ is_traceable_wrapper_subclass(x)
24
+ for x in args_flattened
25
+ if isinstance(x, Tensor)
26
+ )
27
+ from torch._functorch._aot_autograd.schemas import SubclassCreationMeta
28
+
29
+ any_subclass_outputs = any(
30
+ type(x) is SubclassCreationMeta for x in fw_metadata.subclass_fw_graph_out_meta
31
+ )
32
+ # This tells us whether or not we need to perform any unwrapping/wrapping of tensor subclasses at runtime.
33
+ return any_subclass_args or any_subclass_outputs
34
+
35
+
36
+ # Given a flat list of arguments, some of which may be tensor subclasses,
37
+ # computes metadata about "how to reconstruct the current list of subclasses,
38
+ # if we were given their flattened dense tensors instead"
39
+ def create_subclass_meta(
40
+ curr_args: Union[List[Any], Tuple[Any, ...]],
41
+ ) -> List[Union[int, SubclassCreationMeta]]:
42
+ idx = 0
43
+ infos: List[Union[int, SubclassCreationMeta]] = []
44
+ for a in curr_args:
45
+ if isinstance(a, Tensor) and is_traceable_wrapper_subclass(a):
46
+ attrs, meta = a.__tensor_flatten__() # type: ignore[attr-defined]
47
+ start_idx = idx
48
+ cnt = len(attrs)
49
+ curr_cnt = cnt
50
+ infos.append(
51
+ SubclassCreationMeta(
52
+ flat_tensor_start_idx=start_idx,
53
+ arg_count=curr_cnt,
54
+ original_subclass=a,
55
+ meta=meta,
56
+ inner_keys=attrs,
57
+ outer_size=a.shape,
58
+ outer_stride=a.stride(),
59
+ )
60
+ )
61
+ else:
62
+ infos.append(idx)
63
+ cnt = 1
64
+ idx += cnt
65
+ return infos
66
+
67
+
68
+ # Output structure:
69
+ # - List[Tensor] if tracing an inference graph
70
+ # - Tuple[List[Tensor], List[Tensor]] if tracing a joint graph.
71
+ # This function effectively concats each inner list of subclass tensors
72
+ # into a (potentially longer) list of inner tensors.
73
+ #
74
+ # This function takes in a pytree of arguments and unwraps any tensor subclasses.
75
+ # Annoyingly, we can't use pytrees to perform the unwrapping, because unwrapping returns
76
+ # a list of tensors that we would then need to concat together.
77
+ # Instead, we specialize the logic for the inference vs. joint graph case.
78
+ # NOTE: this function is hot, since we unwrap tensor subclass inputs at runtime
79
+ def unwrap_tensor_subclasses(wrapped_args, *, is_joint_structure: bool):
80
+ def concat_inner_tensors_from_subclasses(xs):
81
+ xs_inner = []
82
+ for x in xs:
83
+ if isinstance(x, Tensor) and is_traceable_wrapper_subclass(x):
84
+ attrs, _ = x.__tensor_flatten__() # type: ignore[attr-defined]
85
+ xs_inner += [getattr(x, attr) for attr in attrs]
86
+ else:
87
+ xs_inner += [x]
88
+ return xs_inner
89
+
90
+ if is_joint_structure:
91
+ assert isinstance(wrapped_args, tuple) and len(wrapped_args) == 2
92
+ assert isinstance(wrapped_args[0], (tuple, list)) and isinstance(
93
+ wrapped_args[1], (tuple, list)
94
+ )
95
+ unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args[0])
96
+ unwrapped_args_tangents = concat_inner_tensors_from_subclasses(wrapped_args[1])
97
+ unwrapped_args = (unwrapped_args_fw, unwrapped_args_tangents)
98
+ else:
99
+ assert isinstance(wrapped_args, (list, tuple))
100
+ unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args)
101
+ unwrapped_args = unwrapped_args_fw
102
+ return unwrapped_args
103
+
104
+
105
+ # Turns a flattened list of tensor arguments into (maybe) subclass tensors.
106
+ # This function is used both at trace time and runtime, so we have an is_runtime flag telling us which context we're in.
107
+ def wrap_tensor_subclasses(
108
+ unwrapped_args: Union[Tuple[Any, ...], List[Any]],
109
+ *,
110
+ subclass_metas: List[Union[int, SubclassCreationMeta]],
111
+ num_fw_outs_saved_for_bw: Optional[int] = None,
112
+ is_runtime: bool = False,
113
+ ) -> Tuple[Any, ...]:
114
+ wrapped_args = []
115
+ num_args_tallied = 0
116
+ for subclass_meta in subclass_metas:
117
+ if isinstance(subclass_meta, int):
118
+ wrapped_args.append(unwrapped_args[subclass_meta])
119
+ num_args_tallied += 1
120
+ else:
121
+ assert isinstance(subclass_meta, SubclassCreationMeta)
122
+ wrapped_args.append(
123
+ subclass_meta.creation_fn(unwrapped_args, is_runtime=is_runtime)
124
+ )
125
+ num_args_tallied += subclass_meta.arg_count
126
+
127
+ # Note: [Partitioner handling for Subclasses, Part 2]
128
+ # At the beginning of AOTAutograd, we collect metadata on the inputs and outputs of the user fw,
129
+ # to figure out which inputs/outputs are subclasses, and how to reconstruct the subclasses after flattening them.
130
+ #
131
+ # When this function is called at runtime in the forward,
132
+ # we have been passed a list of (flattened) dense-tensor fw-outs, and need to reconstruct any subclass fw outs.
133
+ #
134
+ # One reasonable question that you should ask: when should the dense_tensor -> subclass_tensor wrapping happen?
135
+ # Answer: we do it **inside of our compiled autograd.Function**.
136
+ # This seems like morally the right place: autograd happens above subclass desugaring,
137
+ # so autograd should see actual tensor subclasses at runtime, and not flattened dense tensors.
138
+ #
139
+ # This causes a tricky interaction though: when we run the min-cut partitioner to divvy up the joint graph
140
+ # into a forward and backward graph, we end up with some activations that show up as extra outputs
141
+ # in the compiled forward graph, that are **not** user outputs.
142
+ # These activations are not visible to the user, and so there's no need for us to wrap them back into subclasses.
143
+ #
144
+ # On top of that, when we first computed subclass metadata (in `run_functionalized_fw_and_collect_metadata`),
145
+ # we computed subclass metadata on every forward output, but this did **not** include activations
146
+ # created by the partitioner.
147
+ # as a result, `unwrapped_args` here will correspond to (*unwrapped_user_fw_outs, *activations),
148
+ # but `subclass_metas` will only correspond to subclass metatadata on `user_fw_outs`.
149
+ # We then need to make sure that we return (*wrapped_user_fw_outs, *activations).
150
+ if num_fw_outs_saved_for_bw is not None:
151
+ assert len(unwrapped_args) == num_args_tallied + num_fw_outs_saved_for_bw, (
152
+ f"Expected the number actual unwrapped-subclass outputs {len(unwrapped_args)} to equal "
153
+ f"the number of args calculated from subclasses ({num_args_tallied}) plus the number of "
154
+ f"additional activations saved for the backward pass ({num_fw_outs_saved_for_bw})"
155
+ )
156
+ activations = unwrapped_args[num_args_tallied:]
157
+ if isinstance(wrapped_args, tuple) and isinstance(activations, tuple):
158
+ return wrapped_args + activations
159
+ return tuple(list(wrapped_args) + list(activations))
160
+ else:
161
+ assert len(unwrapped_args) == num_args_tallied
162
+ return tuple(wrapped_args)
163
+
164
+
165
+ # Given a bunch of "dense" tensor arguments, this function (potentially) wraps them into tensor subclasses.
166
+ # This function carefully handles the inference vs. joint cases:
167
+ # - when is_joint_structure is True, args is (primals, tangents)
168
+ # - when is_joint_structure is False, args is [*primals]
169
+ def wrap_tensor_subclasses_maybe_joint(
170
+ unwrapped_args, *, is_joint_structure: bool, meta: ViewAndMutationMeta
171
+ ) -> Union[Tuple[Any, ...], List[Any]]:
172
+ # Since this function is re-used for both inference and joint graphs,
173
+ if is_joint_structure:
174
+ assert isinstance(unwrapped_args, tuple) and len(unwrapped_args) == 2
175
+ assert isinstance(unwrapped_args[0], (tuple, list)) and isinstance(
176
+ unwrapped_args[1], (tuple, list)
177
+ )
178
+ primals, tangents = unwrapped_args[0], unwrapped_args[1]
179
+ wrapped_primals = wrap_tensor_subclasses(
180
+ primals, subclass_metas=meta.subclass_inp_meta
181
+ )
182
+ wrapped_tangents = wrap_tensor_subclasses(
183
+ tangents, subclass_metas=meta.subclass_tangent_meta
184
+ )
185
+ return (wrapped_primals, wrapped_tangents)
186
+ else:
187
+ wrapped_args = wrap_tensor_subclasses(
188
+ unwrapped_args, subclass_metas=meta.subclass_inp_meta
189
+ )
190
+ return wrapped_args
191
+
192
+
193
+ # TODO: UNUSED. delete?
194
+ def create_metadata_for_subclass(meta: ViewAndMutationMeta) -> ViewAndMutationMeta:
195
+ # input infos
196
+ input_info = []
197
+ for inp, subclass_meta in zip(meta.input_info, meta.subclass_inp_meta):
198
+ num_inps = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count
199
+ for _ in range(num_inps):
200
+ input_info.append(inp)
201
+
202
+ # output infos
203
+ output_info = []
204
+ subclass_out_meta_user_outs_only = meta.subclass_fw_graph_out_meta[
205
+ meta.num_mutated_inp_runtime_indices :
206
+ ]
207
+ if meta.num_intermediate_bases > 0:
208
+ subclass_out_meta_user_outs_only = subclass_out_meta_user_outs_only[
209
+ : -meta.num_intermediate_bases
210
+ ]
211
+ # sanity assert
212
+ assert len(meta.output_info) == len(subclass_out_meta_user_outs_only)
213
+ # Assume that the information on the output is shared by all of its inner tensors.
214
+ for out, subclass_meta in zip(meta.output_info, subclass_out_meta_user_outs_only):
215
+ num_outs = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count
216
+ for _ in range(num_outs):
217
+ output_info.append(out)
218
+
219
+ # A bit hacky, but we don't actually care about all of the metadata here.
220
+ # This metadata is used **underneath** both autograd and subclass de-sugaring,
221
+ # So all we really care about is stuff like:
222
+ # - num inputs/outputs (needed by the partitioner)
223
+ # - input mutations (**not** used today, since we don't handle input mutations inside the subclass,
224
+ # although we should handle this eventually)
225
+ # TODO: add a test case to assert we error when this happens, instead of getting silent correctness
226
+ num_intermediate_bases = None
227
+ keep_input_mutations = meta.keep_input_mutations
228
+ traced_tangents = None
229
+ subclass_inp_meta = None
230
+ subclass_fw_graph_out_meta = None
231
+ subclass_tangent_meta = None
232
+
233
+ metadata = ViewAndMutationMeta(
234
+ input_info=input_info, # type: ignore[arg-type]
235
+ output_info=output_info, # type: ignore[arg-type]
236
+ num_intermediate_bases=num_intermediate_bases, # type: ignore[arg-type]
237
+ keep_input_mutations=keep_input_mutations, # type: ignore[arg-type]
238
+ traced_tangents=traced_tangents, # type: ignore[arg-type]
239
+ subclass_inp_meta=subclass_inp_meta, # type: ignore[arg-type]
240
+ subclass_fw_graph_out_meta=subclass_fw_graph_out_meta, # type: ignore[arg-type]
241
+ subclass_tangent_meta=subclass_tangent_meta, # type: ignore[arg-type]
242
+ )
243
+ return metadata
244
+
245
+
246
+ def compute_inner_mutated_inp_indices_from_subclass_meta(
247
+ fw_metadata: ViewAndMutationMeta,
248
+ inner_metadata: ViewAndMutationMeta,
249
+ ) -> List[int]:
250
+ # Note: [Recomputing subclass mutation handling]
251
+ #
252
+ # Generally, if a subclass requires grad, its components will not require grad.
253
+ # But for the purposes of tracking returned tensors, we should treat those component
254
+ # tensors as if they require grad.
255
+ #
256
+ # For example, if the subclass tensor requires grad and will be mutated in a way that
257
+ # requires us to handle the mutation outside of the graph, we need to return it
258
+ # from the forward graph. The inner_meta data won't consider the component tensors
259
+ # as if they need to be returned, because they don't require grad; but really, we
260
+ # should handle those tensors the same way we handle the subclass tensor itself; i.e.
261
+ # if we'd include the subclass tensor as part of the outputs, then we should also
262
+ # include the component tensors.
263
+ #
264
+ # To do this, we patch num_mutated_inp_runtime_indices below by expanding the inputs
265
+ # from the outer subclass tensors and propagating
266
+
267
+ updated_input_info = []
268
+ inner_idx = 0
269
+ if not fw_metadata.subclass_inp_meta:
270
+ # Sometimes we don't have subclass info, e.g. synthetic_base codepaths
271
+ return inner_metadata.mutated_inp_runtime_indices
272
+ assert len(fw_metadata.subclass_inp_meta) == len(fw_metadata.input_info)
273
+ for outer_idx, inp_meta in enumerate(fw_metadata.subclass_inp_meta):
274
+ if isinstance(inp_meta, int):
275
+ assert outer_idx < len(fw_metadata.input_info)
276
+ if inner_metadata is not None:
277
+ assert inner_idx < len(inner_metadata.input_info)
278
+ assert (
279
+ inner_metadata.input_info[inner_idx]
280
+ == fw_metadata.input_info[outer_idx]
281
+ )
282
+ updated_input_info.append(fw_metadata.input_info[outer_idx])
283
+ inner_idx += 1
284
+ else:
285
+ for _ in range(inp_meta.arg_count):
286
+ updated_input_info.append(fw_metadata.input_info[outer_idx])
287
+ inner_idx += 1
288
+ if inner_metadata is not None:
289
+ assert len(inner_metadata.input_info) == len(updated_input_info)
290
+
291
+ return [
292
+ i
293
+ for i, inp in enumerate(updated_input_info)
294
+ if inp.mutation_type == MutationType.MUTATED_OUT_GRAPH
295
+ ]
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is responsible for transforming functions to be traced into a form
3
+ that is easier for the downstream infra (e.g. Autograd, FX, AOTAutograd analysis)
4
+ to handle.
5
+
6
+ It does so by:
7
+ 1. functionalization (including RNG functionalzation)
8
+ 2. creating a joint graph when required
9
+ 3. transforming mutations into extra outputs
10
+ 4. dispatching subclasses
11
+ """
12
+
13
+ import warnings
14
+ from contextlib import nullcontext
15
+ from functools import wraps
16
+ from typing import Any, Callable, List, Tuple, Union
17
+ from unittest.mock import patch
18
+
19
+ import torch
20
+ import torch.fx.traceback as fx_traceback
21
+ import torch.utils._pytree as pytree
22
+ from torch import Tensor
23
+ from torch._decomp.decompositions_for_rng import PhiloxStateTracker
24
+ from torch._guards import detect_fake_mode
25
+ from torch._prims_common import CUDARngStateHelper
26
+ from torch.fx.experimental.symbolic_shapes import definitely_false, sym_eq
27
+ from torch.nn.utils import stateless
28
+
29
+ from .. import config
30
+ from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata
31
+ from .functional_utils import (
32
+ from_fun,
33
+ has_data_mutation,
34
+ has_metadata_mutation,
35
+ is_fun,
36
+ sync_functional_tensor,
37
+ to_fun,
38
+ )
39
+ from .logging_utils import setup_stacktrace_preservation_hooks
40
+ from .schemas import (
41
+ AOTConfig,
42
+ MutationType,
43
+ OutputType,
44
+ SubclassMeta,
45
+ SubclassTracingInfo,
46
+ ViewAndMutationMeta,
47
+ )
48
+ from .subclass_utils import (
49
+ create_subclass_meta,
50
+ requires_subclass_dispatch,
51
+ unwrap_tensor_subclasses,
52
+ wrap_tensor_subclasses_maybe_joint,
53
+ )
54
+ from .utils import maybe_to_fresh_input
55
+
56
+
57
+ # This function returns a new function that returns mutated inputs as outputs.
58
+ # if keep_data_input_mutations is set, then we assume that data-only mutations
59
+ # will be left in the graph, and we only return metadata-mutated inputs as outputs.
60
+ def fn_input_mutations_to_outputs(
61
+ fn: Callable,
62
+ meta: ViewAndMutationMeta,
63
+ keep_data_input_mutations: bool,
64
+ ) -> Any:
65
+ @wraps(fn)
66
+ def inner_fn(*args):
67
+ outs = fn(*args)
68
+ assert len(meta.output_info) == len(outs)
69
+ # The compiled fw will return mutated input tensors, *including* metadata-only mutation.
70
+ # However, if keep_data_input_mutations is set, the compiled fw only needs to return metadata-mutated inputs.
71
+ # (because data-only input mutations are handled directly in the compiled graph)
72
+ mutated_inputs_to_return = [
73
+ x for (i, x) in enumerate(args) if i in meta.mutated_inp_runtime_indices
74
+ ]
75
+ return *mutated_inputs_to_return, *outs
76
+
77
+ return inner_fn
78
+
79
+
80
+ # This function takes in a fn with external aliasing and mutation,
81
+ # and returns a new fn with no external aliasing and mutation,
82
+ # as needed for autograd.
83
+ # The main transformations are:
84
+ # - Return mutated inputs as extra outputs
85
+ # - Clone mutated inputs that require gradients,
86
+ # because autograd will require us to pass the pre-mutated inputs into autograd.grad
87
+ # - Return intermediate bases of outputs as additional outputs,
88
+ # needed to appease autograd.Function
89
+ # The new function returns:
90
+ # (1) The updated outputs
91
+ # (2) A boolean mask of len(new_fn_outputs),
92
+ # that can be used to tell autograd.grad which outputs should get tangents
93
+ # if we trace the backward.
94
+ def fn_prepped_for_autograd(
95
+ fn: Callable,
96
+ meta: ViewAndMutationMeta,
97
+ ) -> Any:
98
+ @wraps(fn)
99
+ def inner_fn(*args):
100
+ args_maybe_cloned = [
101
+ maybe_to_fresh_input(i, t, meta) for i, t in enumerate(args)
102
+ ]
103
+
104
+ outs = fn(*args_maybe_cloned)
105
+ assert isinstance(outs, (tuple, list))
106
+ outs = list(outs)
107
+ assert len(meta.output_info) == len(outs)
108
+
109
+ mutated_inputs_to_return = [
110
+ x
111
+ for (i, x) in enumerate(args_maybe_cloned)
112
+ if i in meta.mutated_inp_runtime_indices
113
+ ]
114
+
115
+ intermediate_bases = []
116
+ for i, (o, info) in enumerate(zip(outs, meta.output_info)):
117
+ if info.output_type == OutputType.alias_of_intermediate_save_as_output:
118
+ intermediate_bases.append(o._base)
119
+
120
+ assert meta.num_intermediate_bases == len(intermediate_bases)
121
+
122
+ # the compiled forward should return (mutated_inputs, user_outs, intermediate_bases)
123
+ fw_outs_to_return = *mutated_inputs_to_return, *outs, *intermediate_bases
124
+
125
+ # Also return a boolean mask specifying which outputs to this function will be used as tangents
126
+ mutated_inputs_grad_mask = [
127
+ meta.input_info[meta.mutated_inp_runtime_indices[i]].mutates_data
128
+ and meta.input_info[meta.mutated_inp_runtime_indices[i]].requires_grad
129
+ for (i, x) in enumerate(mutated_inputs_to_return)
130
+ ]
131
+
132
+ # Pass any (non-aliased) outputs in as tangents, since they'll be returned as outputs in the fw
133
+ # For outputs that are aliases of intermediates, we will have returned the output's _base as an output in the graph instead,
134
+ # which we *should* send to grad()
135
+ output_grad_mask = [
136
+ meta.output_info[i].output_type
137
+ in [
138
+ OutputType.non_alias,
139
+ OutputType.unsafe_view_alias,
140
+ OutputType.custom_function_view,
141
+ ]
142
+ # Also, only tensor outputs should participate in the backward
143
+ # (in particular, Symint outputs in the forward graph shouldn't get tangents)
144
+ and issubclass(meta.output_info[i].raw_type, Tensor)
145
+ and meta.output_info[i].requires_grad
146
+ for (i, x) in enumerate(outs)
147
+ ]
148
+
149
+ intermediate_base_grad_mask = [True for _ in range(len(intermediate_bases))]
150
+
151
+ out_grad_mask = (
152
+ mutated_inputs_grad_mask + output_grad_mask + intermediate_base_grad_mask
153
+ )
154
+ assert len(out_grad_mask) == len(fw_outs_to_return)
155
+
156
+ # Take care to grab and sync the updated inputs from primals_after_cloning (the inputs we actually mutate!)
157
+ # and not primals (the preserved inputs, pre-mutation, that we pass to grad())
158
+ # This is annoying: our joint function needs to be aware of functionalization
159
+ # (syncing mutated inputs before calling autograd.grad())
160
+ # In theory, we could make the autograd engine do this automatically, although that probably isn't any cleaner.
161
+ for arg in args_maybe_cloned:
162
+ if not isinstance(arg, Tensor):
163
+ continue
164
+ sync_functional_tensor(arg)
165
+
166
+ return fw_outs_to_return, out_grad_mask
167
+
168
+ return inner_fn
169
+
170
+
171
+ # Given a fn, computes the joint.
172
+ # NOTE: fn is expects the following behavior:
173
+ # (1) fn() needs to return a tuple of (outs, mask),
174
+ # where `mask` tells us which outputs are meant to have tangents.
175
+ # we don't know this info automatically, because we don't actually want to blindly
176
+ # compute tangents for every output that requires grad.
177
+ # Specifically, outputs that alias inputs won't participate in the backward and get tangents.
178
+ # (2) fn() cannot mutate any inputs that require gradient.
179
+ # otherwise, when we compute autograd.grad(), we will not take those input mutations into account
180
+ # (the way this is handled is that we ensure any inputs that normally get mutated are cloned first)
181
+ def create_joint(fn: Callable, *, aot_config: AOTConfig) -> Any:
182
+ def inner_fn(primals: List[Any], tangents: List[Any]):
183
+ outs, tangent_mask = fn(*primals)
184
+ assert len(tangent_mask) == len(outs)
185
+ outs_to_grad = [
186
+ o for needs_tangent, o in zip(tangent_mask, outs) if needs_tangent
187
+ ]
188
+ assert len(outs_to_grad) == len(tangents)
189
+
190
+ # Get the inputs that need gradients
191
+ grad_primals = []
192
+ inputs_needs_grads = []
193
+ # Note that we're not using primals here,
194
+ # being carefully not to pass any mutated inputs into autograd.grad()
195
+ for p in primals:
196
+ is_grad_tensor = isinstance(p, Tensor) and p.requires_grad
197
+ inputs_needs_grads.append(is_grad_tensor)
198
+ if is_grad_tensor:
199
+ grad_primals.append(p)
200
+
201
+ # Get the outputs that need gradients
202
+ needed_outs = []
203
+ needed_tangents = []
204
+ for out, tangent in zip(outs_to_grad, tangents):
205
+ if isinstance(out, Tensor) and out.requires_grad:
206
+ # A bit sketchy, but fixes e.g. test_aot_autograd_exhaustive_matmul_cpu_float32
207
+ # The issue is that we are sensitive to decomps that don't accurately maintain
208
+ # their output's _base.shape compared to eager mode, and this helps mitigate a bit.
209
+ # The not definitely_false is also sketchy; if unbacked
210
+ # symints are involved, we're just going to assume that the
211
+ # decomps setup the base shape correctly
212
+ needed_outs.append(
213
+ out
214
+ if not definitely_false(sym_eq(out.shape, tangent.shape))
215
+ else out.view(tangent.shape)
216
+ )
217
+ needed_tangents.append(tangent)
218
+
219
+ setup_stacktrace_preservation_hooks([out.grad_fn for out in needed_outs])
220
+
221
+ if config.functionalize_rng_ops:
222
+ PhiloxStateTracker.mark_beginning_of_backward()
223
+ backward_out: Tuple[Tensor, ...] = tuple()
224
+ # Call the backwards pass
225
+ if grad_primals:
226
+ with fx_traceback.preserve_node_meta():
227
+ # for full graph export, we always export a joint graph where we assume no tangents are needed.
228
+ if aot_config.no_tangents:
229
+ assert len(needed_tangents) == 1 and needed_tangents[0].numel() == 1
230
+ backward_out = torch.autograd.grad(
231
+ needed_outs,
232
+ grad_primals,
233
+ allow_unused=True,
234
+ )
235
+ else:
236
+ backward_out = torch.autograd.grad(
237
+ needed_outs,
238
+ grad_primals,
239
+ grad_outputs=needed_tangents,
240
+ allow_unused=True,
241
+ )
242
+ backward_out_iter = iter(backward_out)
243
+ return outs, [
244
+ next(backward_out_iter) if i else None for i in inputs_needs_grads
245
+ ]
246
+
247
+ def inner_fn_with_anomaly(*args):
248
+ with fx_traceback.preserve_node_meta(), warnings.catch_warnings():
249
+ warnings.filterwarnings("ignore", "Anomaly Detection has been enabled.")
250
+ with torch.autograd.detect_anomaly(check_nan=False):
251
+ return inner_fn(*args)
252
+
253
+ return inner_fn_with_anomaly
254
+
255
+
256
+ def create_functionalized_rng_ops_wrapper(func, args, trace_joint=True) -> Any:
257
+ # Functionalization of rng ops changes the calling convention of the joint graph.
258
+ # It goes from (primals, tangents) to (seed, offset, primals, tangents)
259
+ # At runtime, we pass on the current seed and offset. This is hidden from
260
+ # the user.
261
+ fake_mode = detect_fake_mode()
262
+ if fake_mode is None:
263
+ fake_mode = nullcontext()
264
+
265
+ def override_get_rng_state(device: Union[int, str, torch.device] = "cuda"):
266
+ out = PhiloxStateTracker.get_state_as_tensor()
267
+ return out
268
+
269
+ def override_set_rng_state(x, device: Union[int, str, torch.device] = "cuda"):
270
+ PhiloxStateTracker.set_state_from_tensor(x)
271
+
272
+ def append_rng_offsets(args):
273
+ if trace_joint:
274
+ # args signature before: Tuple(fwd_outputs), Tuple(bwd_outputs)
275
+ # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset), Tuple(bwd_offset, new_bwd_rng_offset)
276
+ return (
277
+ (*args[0], PhiloxStateTracker.get_updated_fwd_offset()),
278
+ (*args[1], PhiloxStateTracker.get_updated_bwd_offset()),
279
+ )
280
+ else:
281
+ # args signature before: Tuple(fwd_outputs)
282
+ # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset)
283
+ return (*args, PhiloxStateTracker.get_updated_fwd_offset())
284
+
285
+ def traced_joint(
286
+ primals, tangents, fwd_seed, fwd_base_offset, bwd_seed, bwd_base_offset
287
+ ):
288
+ with patch("torch.cuda.get_rng_state", override_get_rng_state), patch(
289
+ "torch.cuda.set_rng_state", override_set_rng_state
290
+ ):
291
+ return append_rng_offsets(func(primals, tangents))
292
+
293
+ def traced_forward(*primals_fwd_seed_fwd_base_offset):
294
+ # The signature is (*primals, seed, offset)
295
+ with patch("torch.cuda.get_rng_state", override_get_rng_state), patch(
296
+ "torch.cuda.set_rng_state", override_set_rng_state
297
+ ):
298
+ return append_rng_offsets(func(*primals_fwd_seed_fwd_base_offset[:-2]))
299
+
300
+ if trace_joint:
301
+ # Get the current seed and offset to setup tracing.
302
+ fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
303
+ fake_mode
304
+ )
305
+ bwd_seed, bwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
306
+ fake_mode
307
+ )
308
+ PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward")
309
+ PhiloxStateTracker.record_state(bwd_seed, bwd_base_offset, "backward")
310
+ return traced_joint, (
311
+ *args,
312
+ fwd_seed,
313
+ fwd_base_offset,
314
+ bwd_seed,
315
+ bwd_base_offset,
316
+ )
317
+ else:
318
+ # Get the current seed and offset to setup tracing.
319
+ fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
320
+ fake_mode
321
+ )
322
+ PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward")
323
+ return traced_forward, (*args, fwd_seed, fwd_base_offset)
324
+
325
+
326
+ # This creates the final function that we want to trace using make_fx(),
327
+ # in both aot_dispatch_autograd and aot_dispatch_base.
328
+ # Preconditions:
329
+ # - fn corresponds to the user's fw function
330
+ # - fn arguments have been flattened, duplicate arguments have been handled
331
+ # - In the returned function, the "primals" arguments *includes* synthetic bases.
332
+ # This function does the work of functionalizing the input function,
333
+ # and performing copy_() calls at the end of the function if `keep_input_mutations` is set.
334
+ # The function returned has signature that is either:
335
+ # (1) "traced_fn(primals: List[Any])" if trace_joint is False
336
+ # (2) "traced_fn(primals: List[Any], tangents: List[Any])" if trace_joint is True
337
+ # Returns a new (functionalized) function, and updated arguments to call it with.
338
+ def create_functionalized_fn(
339
+ fn,
340
+ args,
341
+ *,
342
+ meta: ViewAndMutationMeta,
343
+ aot_config: AOTConfig,
344
+ trace_joint: bool,
345
+ ) -> Any:
346
+ @wraps(fn)
347
+ def _functionalized_f_helper(*args):
348
+ # See Note [Disabling Functionalize TLS Above Python Functionalization]
349
+ disable_above = torch._C._ExcludeDispatchKeyGuard(
350
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
351
+ )
352
+
353
+ # See Note [Side-Effectful Tokens in AOTAutograd]
354
+ if trace_joint:
355
+ assert (
356
+ isinstance(args, tuple)
357
+ and len(args) == 2
358
+ and isinstance(args[0], (list, tuple))
359
+ )
360
+ tokens = args[0][: len(meta.tokens)]
361
+ actual_args = args[0][len(meta.tokens) :]
362
+ args = (actual_args, args[1])
363
+ else:
364
+ tokens = args[: len(meta.tokens)]
365
+ args = args[len(meta.tokens) :]
366
+ assert all(token.numel() == 0 for token in tokens)
367
+
368
+ with disable_above:
369
+ # Wrap inputs into functional wrappers
370
+ f_args = pytree.tree_map(to_fun, args)
371
+ f_tokens = pytree.tree_map(to_fun, tokens)
372
+
373
+ # Populate the current FunctionalTensorMode with the tokens per
374
+ # operator. See Note [FunctionalTensorMode is Stateful]
375
+ functional_tensor_mode = (
376
+ torch.utils._python_dispatch._detect_functional_mode()
377
+ )
378
+ assert functional_tensor_mode is not None
379
+ for i, k in enumerate(meta.tokens.keys()):
380
+ functional_tensor_mode._tokens[k] = f_tokens[i]
381
+
382
+ # Run the joint
383
+ f_outs = fn(*f_args)
384
+
385
+ # Return both the tokens and the outputs
386
+ # See Note [Side-Effectful Tokens in AOTAutograd]
387
+ f_outs = (*functional_tensor_mode._tokens.values(), *f_outs)
388
+
389
+ if trace_joint:
390
+ # We support a limited amount of mutation of graph inputs during the backward pass.
391
+ # (This is used e.g. by Float8, which needs to update buffers during the backward pass)
392
+ # Here, we perform extra checks for primals that were mutated in the **backward**
393
+ # We're doing the checks here instead of doing them with the rest of the input mutation handling because:
394
+ # - We need to detect inputs that were mutated in the backward **separately** from mutations that happened
395
+ # during the forward, because the handling is different: some input mutations from the the forward
396
+ # can be only handled in a fw-only runtime epilogue, and in theory if we wanted to handle those same
397
+ # types of mutations in the backward we would need a bw-only runtime epilogue.
398
+ # - We could in theory have our analysis pass differentiate mutations in the fw from mutations in
399
+ # the bw by running our analysis first on the fw-only graph, and then on the joint graph. This would
400
+ # require an extra round of tracing though, so it's more efficient to do in-line here.
401
+ assert (
402
+ isinstance(args, tuple)
403
+ and len(args) == 2
404
+ and isinstance(args[0], (list, tuple))
405
+ )
406
+ # Only look at mutations that happened to forward inputs (e.g. fw buffers that were saved for bw)
407
+ primals_before = args[0]
408
+ primals_after = pytree.tree_map(from_fun, f_args[0])
409
+ for f_inpt, before, after, inpt_info in zip(
410
+ f_args[0], primals_before, primals_after, meta.input_info
411
+ ):
412
+ # Ban metadata mutations on fw inputs during the bw
413
+ if not inpt_info.mutates_metadata:
414
+ assert not has_metadata_mutation(
415
+ f_inpt, before, check_only_storage_mutation=False
416
+ ), "Found a graph input that had its metadata mutated in the backward. This is not supported"
417
+ # Allow data mutations on fw inputs during the bw, but only if they do not require grad
418
+ # So we can guarantee that we can keep the mutations in the graph
419
+ if has_data_mutation(f_inpt) and not inpt_info.mutates_data:
420
+ assert (
421
+ not inpt_info.requires_grad
422
+ ), "Found a graph input that requires_grad and was mutated in the backward. This is not supported"
423
+ # Otherwise, put the mutation in the graph
424
+ before.copy_(after)
425
+ # Now that we covered mutations to *forward* inputs during the backward,
426
+ # we also need to cover mutations to *backward-only* inputs during the backward (e.g. mutation to a grad_out).
427
+ # Today, we will just error in all cases of this happening unless someone needs us to support it.
428
+ tangents_before = args[1]
429
+ tangents_after = pytree.tree_map(from_fun, f_args[1])
430
+ for f_inpt, before, after in zip(
431
+ f_args[1], tangents_before, tangents_after
432
+ ):
433
+ assert not has_metadata_mutation(
434
+ f_inpt, before, check_only_storage_mutation=False
435
+ ) and not has_data_mutation(
436
+ f_inpt
437
+ ), "Found an input to the backward that was mutated during the backward pass. This is not supported"
438
+
439
+ if aot_config.keep_inference_input_mutations:
440
+ # Note: This is a bit annoying. There's a layering issue here, where:
441
+ # (1) functionalization needs to operate on **synthetic base** inputs, before unpacking them into the "real" inputs.
442
+ # (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs.
443
+ # However, we **only** want to support this for inputs that have data-only (and no metadata) mutations,
444
+ # because inductor (and backends in generally) would prefer not to see these (e.g. as_strided_(), resize_()).
445
+ # This makes it pretty difficult for this logic to operate on synthetic bases.
446
+ # (3) In addition, there are cases where it's significantly cheaper to perform the copy on the individual
447
+ # (unpacked) input aliases, instead of the synthetic base.
448
+ # Example case where (3) could be important:
449
+ #
450
+ # def f(x, y):
451
+ # x.mul_(2)
452
+ # y.mul_(3)
453
+ # return x, y
454
+ # a = torch.ones(1'000'000)
455
+ # x, y = out(a[0:9], a[1:10])
456
+ #
457
+ # It would be much better to add copy_() calls into the graph for the two tiny slices, instead of materializing
458
+ # a giant "updated synthetic base" and copying into a's entire storage.
459
+ #
460
+ # For now, we are pessimistically not performing the optimization from (3);
461
+ # we will materialize an "updated" synthetic base, and copy it back to the synthetic input base.
462
+ # This allows us to factor aot autograd much more nicely, since only one area of the code needs to worry
463
+ # about synthetic bases.
464
+ for i, (inpt_old, inpt_f) in enumerate(
465
+ zip(args, f_args) if not trace_joint else zip(args[0], f_args[0])
466
+ ):
467
+ if not isinstance(inpt_f, torch.Tensor):
468
+ continue
469
+ assert is_fun(inpt_f)
470
+ inpt_new = from_fun(inpt_f)
471
+ if meta.input_info[i].mutation_type == MutationType.MUTATED_IN_GRAPH:
472
+ # We found an input that had a (data-only) mutation.
473
+ # Since keep_input_mutations is set, we need to faithfully apply a copy_()
474
+ # so the compiler will see the input mutation in the graph.
475
+ if meta.input_info[i].mutations_hidden_from_autograd:
476
+ # Hidden from autograd = run under no_grad, **and** don't bump VC
477
+ with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(
478
+ inpt_old
479
+ ):
480
+ inpt_old.copy_(inpt_new)
481
+ elif meta.input_info[i].mutations_under_no_grad_or_inference_mode:
482
+ # Under no_grad = run under no_grad (we still bump the VC though)
483
+ # (inference_mode will also bump the VC, as long as the tensor in question
484
+ # was created outside of inference_mode)
485
+ with torch.no_grad():
486
+ inpt_old.copy_(inpt_new)
487
+ else:
488
+ inpt_old.copy_(inpt_new)
489
+
490
+ # When an output tensor is a functionalized mutated input, and we
491
+ # were able to move the mutation in to the graph then we can return
492
+ # the mutated input directly. This prevents duplicating the
493
+ # tensors contents.
494
+ flat_outs, outs_spec = pytree.tree_flatten(f_outs)
495
+ flat_outs = [from_fun(o) for o in flat_outs]
496
+ num_outs = len(meta.output_info)
497
+
498
+ for i, outp in enumerate(flat_outs[:num_outs]):
499
+ info = meta.output_info[i]
500
+ if info.output_type != OutputType.is_input:
501
+ continue
502
+
503
+ assert info.base_idx is not None
504
+ if (
505
+ meta.input_info[info.base_idx].mutation_type
506
+ == MutationType.MUTATED_IN_GRAPH
507
+ ):
508
+ flat_outs[i] = args[info.base_idx]
509
+ return pytree.tree_unflatten(flat_outs, outs_spec)
510
+
511
+ return pytree.tree_map(from_fun, f_outs)
512
+
513
+ # Kinda annoying, but needed to make sure that the fx graph we trace out has "primals"
514
+ # and "tangents" as its input names (which are special-cased by the partitioner)
515
+ # TODO (tmanlaibaatar) revisit this if we ever need to turn on non-strict joint graph export
516
+ def joint_helper(primals, tangents):
517
+ return _functionalized_f_helper(primals, tangents)
518
+
519
+ helper = joint_helper if trace_joint else _functionalized_f_helper
520
+ if config.functionalize_rng_ops:
521
+ # Setup the wrapper for functionalization of rng ops
522
+ helper, args = create_functionalized_rng_ops_wrapper(helper, args, trace_joint)
523
+
524
+ # Additionally pass in tokens as inputs
525
+ # See Note [Side-Effectful Tokens in AOTAutograd]
526
+ additional_token_inputs = [torch.tensor([])] * len(meta.tokens)
527
+ if trace_joint:
528
+ args = ([*additional_token_inputs, *args[0]], *args[1:])
529
+ else:
530
+ args = [*additional_token_inputs, *args]
531
+
532
+ return helper, args
533
+
534
+
535
+ # Given a function operating on Subclass -> Subclass, returns an function that operates on Tensor -> Tensor
536
+ # Also returns:
537
+ # - the new set of arguments to pass into this function (now that tensor subclasses have been eliminated)
538
+ # - the updated ViewAndMutationMeta for this dense -> dense function.
539
+ # The other important arguments are:
540
+ # - flat_fn_maybe_joint: when is_joint_structure=True, this is the joint fw-bw function.
541
+ # when is_joint_structure=False, this is just the forward function.
542
+ # - fw_only: this is *always* the forward-only function.
543
+ # Why do we need this? We need to collect updated ViewAndMutationMeta on our new dense -> dense functions.
544
+ # In particular, we need this to tell the partitioner how many dense forward outputs there are.
545
+ def aot_dispatch_subclass(
546
+ flat_fn_maybe_joint,
547
+ args: List[Any],
548
+ *,
549
+ is_joint_structure: bool,
550
+ meta: ViewAndMutationMeta,
551
+ fw_only: Callable,
552
+ ) -> SubclassTracingInfo:
553
+ # Skip logic if we don't need to trace through any subclasses
554
+ req_subclass_dispatch = requires_subclass_dispatch(args, meta)
555
+ if not req_subclass_dispatch:
556
+ return SubclassTracingInfo(
557
+ plain_tensor_trace_fn=flat_fn_maybe_joint,
558
+ plain_tensor_args=args,
559
+ maybe_subclass_meta=None,
560
+ )
561
+
562
+ # TODO: add subclass guards (later PR).
563
+
564
+ # What's going on here? We need to compute subclass metadata about the outputs of the joint (grad_inputs).
565
+ # Annoying: we don't know the grad input metas until we're in the middle of tracing the joint,
566
+ # so we set it later, while we're tracing the joint (see inner_fn() below).
567
+ # Another option would be to run our run_functionalized_fw_and_collect_metadata() function
568
+ # directly on the joint, but this would hurt compile time (adding yet another pass through the joint).
569
+ subclass_meta = SubclassMeta()
570
+
571
+ def inner_fn(fn, args, *, use_trace_joint: bool):
572
+ # Step 1: wrap tensor inputs into subclasses if necessary
573
+ all_args = wrap_tensor_subclasses_maybe_joint(
574
+ args, is_joint_structure=use_trace_joint, meta=meta
575
+ )
576
+
577
+ # Step 2: call the inner function, with our (maybe subclass) inputs
578
+ wrapped_outs = fn(*all_args)
579
+
580
+ if use_trace_joint:
581
+ # See Note: [Computing Subclass Metadata about grad_inputs]
582
+ # We also stash subclass info on our grad_inputs, if we're tracing the joint.
583
+ nonlocal subclass_meta
584
+ assert isinstance(wrapped_outs, tuple) and len(wrapped_outs) == 2
585
+ # Don't need fw outs since we already have subclass metadata on them
586
+ grad_inputs = wrapped_outs[1]
587
+ subclass_meta.grad_input_metas = create_subclass_meta(grad_inputs)
588
+
589
+ # Step 3: Unwrap any subclass outputs back into dense tensors
590
+ unwrapped_outs = unwrap_tensor_subclasses(
591
+ wrapped_outs, is_joint_structure=use_trace_joint
592
+ )
593
+ return unwrapped_outs
594
+
595
+ def joint_fn(primals, tangents):
596
+ return inner_fn(flat_fn_maybe_joint, (primals, tangents), use_trace_joint=True)
597
+
598
+ def fw_fn(*primals):
599
+ return inner_fn(flat_fn_maybe_joint, primals, use_trace_joint=False)
600
+
601
+ def metadata_fn(*primals):
602
+ return inner_fn(fw_only, primals, use_trace_joint=False)
603
+
604
+ args_unwrapped = unwrap_tensor_subclasses(
605
+ args, is_joint_structure=is_joint_structure
606
+ )
607
+
608
+ if is_joint_structure:
609
+ primals_unwrapped = args_unwrapped[0]
610
+ fn_to_trace = joint_fn
611
+ else:
612
+ primals_unwrapped = args_unwrapped
613
+ fn_to_trace = fw_fn
614
+
615
+ # Note: [Partitioner handling for Subclasses, Part 1]
616
+ # The way the partitioner works is that:
617
+ # (1) we pass is a single graph containing the joint fw/bw,
618
+ # where the # of graph outputs corresponds to # fw_outputs + # grad_inputs
619
+ # (2) The partitioner accepts an arguments, num_fwd_outputs,
620
+ # and assumes that the first "num_fwd_outputs" graph outputs correspond
621
+ # to outputs of the forward graph.
622
+ # How do tensor subclasses enter the picture?
623
+ # the num_fwd_outputs in the final graph is actually non-trivial to compute,
624
+ # because it can be influenced by input mutations and intermediate bases.
625
+ # So we compute it by inspecting the current ViewAndMutationMeta object.
626
+ # However, the original ViewAndMutationMeta that we computed was created
627
+ # on the subclass -> subclass graph,
628
+ # which can have a different number of outputs than the dense -> dense graph.
629
+ # That's why we createa a fresh metadata object on the dense -> dense function here,
630
+ # and plumb it back up to the partitioner.
631
+ # See Note: [Partitioner handling for Subclasses, Part 2] for more info.
632
+ meta_updated = run_functionalized_fw_and_collect_metadata(
633
+ metadata_fn,
634
+ keep_input_mutations=meta.keep_input_mutations,
635
+ is_train=meta.is_train,
636
+ )(*primals_unwrapped)
637
+
638
+ subclass_meta.fw_metadata = meta_updated
639
+
640
+ return SubclassTracingInfo(
641
+ plain_tensor_trace_fn=fn_to_trace,
642
+ plain_tensor_args=args_unwrapped,
643
+ maybe_subclass_meta=subclass_meta,
644
+ )
645
+
646
+
647
+ class PropagateUnbackedSymInts(torch.fx.Interpreter):
648
+ def run_node(self, n: torch.fx.Node):
649
+ import sympy
650
+
651
+ result = super().run_node(n)
652
+ # TODO: handle Tensor returns
653
+ if "example_value" in n.meta:
654
+ if isinstance(result, torch.SymInt) and isinstance(
655
+ result.node.expr, sympy.Symbol
656
+ ):
657
+ torch._check(result == n.meta["example_value"])
658
+
659
+ return result
660
+
661
+
662
+ def create_functional_call(mod, params_spec, params_len, store_orig_mod=False):
663
+ # Redundant with dynamo, but worth having in case this gets invoked elsewhere.
664
+ # https://github.com/pytorch/pytorch/issues/103569
665
+
666
+ def functional_call(*args, **kwargs):
667
+ with stateless._reparametrize_module(
668
+ mod, pytree.tree_unflatten(args[:params_len], params_spec)
669
+ ):
670
+ if isinstance(mod, torch.fx.GraphModule):
671
+ with fx_traceback.preserve_node_meta(), warnings.catch_warnings():
672
+ warnings.filterwarnings(
673
+ "ignore", "Anomaly Detection has been enabled."
674
+ )
675
+ with torch.autograd.detect_anomaly(check_nan=False):
676
+ out = PropagateUnbackedSymInts(mod).run(
677
+ *args[params_len:], **kwargs
678
+ )
679
+ else:
680
+ out = mod(*args[params_len:], **kwargs)
681
+
682
+ if not isinstance(out, (tuple, list)):
683
+ raise RuntimeError(
684
+ "Graph output must be a tuple(). This is so that we can avoid "
685
+ "pytree processing of the outputs. Please change the module to "
686
+ "have tuple outputs or use aot_module instead."
687
+ )
688
+ return out
689
+
690
+ # Note [Preserving the nn module stack metadata during export non-strict mode]
691
+ # This path is currently only used by the non-strict export flow,
692
+ # where we cannot rely on dynamo to preserve nn stack metadata in our captured graph.
693
+ # Instead, we stash the original user nn module here, and rely on `make_fx` to grab
694
+ # this stashed module and use it to track nn module stack metadata
695
+ if store_orig_mod and not hasattr(functional_call, "_orig_mod"):
696
+ functional_call._orig_mod = mod # type: ignore[attr-defined]
697
+
698
+ return functional_call
venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains various utils for AOTAutograd, including those for handling collections.
3
+ """
4
+
5
+ import dataclasses
6
+ import warnings
7
+ from contextlib import nullcontext
8
+ from functools import wraps
9
+ from typing import Any, Callable, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.utils._pytree as pytree
13
+ from torch.fx.experimental._backward_state import BackwardState
14
+ from torch.fx.experimental.proxy_tensor import py_sym_types
15
+
16
+ KNOWN_TYPES = [
17
+ torch.Tensor,
18
+ BackwardState,
19
+ int,
20
+ str,
21
+ float,
22
+ bool,
23
+ type(None),
24
+ *py_sym_types,
25
+ ]
26
+
27
+ original_zip = zip
28
+
29
+
30
+ def strict_zip(*iterables, strict=True, **kwargs):
31
+ if not strict:
32
+ return original_zip(*iterables, **kwargs)
33
+
34
+ shortest_length = min(len(it) for it in iterables)
35
+ for iterable in iterables:
36
+ if len(iterable) != shortest_length:
37
+ raise ValueError(
38
+ "The iterables have different lengths and strict mode is enabled."
39
+ )
40
+
41
+ return original_zip(*iterables, **kwargs)
42
+
43
+
44
+ def _get_symint_hints(exprs):
45
+ """
46
+ Get the hints of a list/tuple of int/SymInt.
47
+ """
48
+ if isinstance(exprs, (list, tuple)):
49
+ return type(exprs)(_get_symint_hints(e) for e in exprs)
50
+ elif isinstance(exprs, torch.SymInt):
51
+ return exprs.node.shape_env.size_hint(exprs.node.expr)
52
+ else:
53
+ return exprs
54
+
55
+
56
+ def partial_flatten_asdict(obj: Any) -> Any:
57
+ if dataclasses.is_dataclass(obj):
58
+ return {
59
+ field.name: getattr(obj, field.name) for field in dataclasses.fields(obj)
60
+ }
61
+ elif isinstance(obj, (list, tuple)):
62
+ return obj.__class__([partial_flatten_asdict(item) for item in obj])
63
+ elif isinstance(obj, dict):
64
+ return {k: partial_flatten_asdict(v) for k, v in obj.items()}
65
+ else:
66
+ return obj
67
+
68
+
69
+ def normalize_as_list(x):
70
+ if isinstance(x, tuple):
71
+ return list(x)
72
+ elif isinstance(x, list):
73
+ return x
74
+ return [x]
75
+
76
+
77
+ def _get_autocast_states():
78
+ return [
79
+ torch.is_autocast_enabled(),
80
+ torch.is_autocast_cpu_enabled(),
81
+ torch.get_autocast_gpu_dtype(),
82
+ torch.get_autocast_cpu_dtype(),
83
+ torch.is_autocast_cache_enabled(),
84
+ ]
85
+
86
+
87
+ def make_boxed_func(f):
88
+ def g(args):
89
+ return f(*args)
90
+
91
+ g._boxed_call = True # type: ignore[attr-defined]
92
+ return g
93
+
94
+
95
+ def make_boxed_compiler(compiler):
96
+ @wraps(compiler)
97
+ def f(fx_g, inps):
98
+ out_f = compiler(fx_g, inps)
99
+ fx_g = make_boxed_func(out_f)
100
+ return fx_g
101
+
102
+ return f
103
+
104
+
105
+ def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False):
106
+ if not steal_args:
107
+ args = list(args)
108
+ assert isinstance(args, list)
109
+
110
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
111
+ with context():
112
+ if hasattr(f, "_boxed_call"):
113
+ out = normalize_as_list(f(args))
114
+ else:
115
+ # TODO: Please remove soon
116
+ # https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670
117
+ warnings.warn(
118
+ "Your compiler for AOTAutograd is returning a function that doesn't take boxed arguments. "
119
+ "Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. "
120
+ "See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale."
121
+ )
122
+ out = normalize_as_list(f(*args))
123
+ return out
124
+
125
+
126
+ # Inspired by autodidax (thanks!)
127
+ class PytreeThunk:
128
+ spec: Optional[pytree.TreeSpec] = None
129
+ # These are some kinda dumb microoptimizations that save about 3-4 us of overhead.
130
+ is_simple: Optional[
131
+ bool
132
+ ] = None # if the output spec is a tuple/list, we won't bother unflattening it.
133
+ is_really_simple: Optional[bool] = None # if the output spec is a LeafSpec
134
+
135
+ def set(self, spec: pytree.TreeSpec) -> None:
136
+ assert self.spec is None or self.spec == spec
137
+ assert spec is not None
138
+ self.spec: pytree.TreeSpec = spec
139
+ if self.spec.type in {tuple, list} and all(
140
+ child.is_leaf() for child in spec.children_specs
141
+ ):
142
+ self.is_simple = True
143
+ if self.spec.is_leaf():
144
+ self.is_really_simple = True
145
+
146
+ def unflatten(self, x: List[Any]) -> Any:
147
+ if self.is_really_simple:
148
+ return x[0]
149
+ if self.is_simple:
150
+ return x
151
+ assert self.spec is not None
152
+ return pytree.tree_unflatten(x, self.spec)
153
+
154
+
155
+ # Creates a function that returns flattened inputs and outputs
156
+ # Also returns the output tree spec, which is needed to recover the "unflattened"
157
+ # output tree structure later.
158
+ def create_tree_flattened_fn(fn, args, kwargs=None) -> Tuple[Callable, PytreeThunk]:
159
+ if kwargs is None:
160
+ kwargs = {}
161
+ # Save the args_spec for flat_tensor_args to unflatten while tracing
162
+ _, tensor_args_spec = pytree.tree_flatten((args, kwargs))
163
+ out_spec = PytreeThunk()
164
+
165
+ def flat_fn(*flat_args):
166
+ # The input are flattened tensor args. Prepare the args in the
167
+ # order that original function expects. Add static args as well.
168
+ # They will appear as tensor constants in the traced graph.
169
+ nonlocal out_spec
170
+ args, kwargs = pytree.tree_unflatten(flat_args, tensor_args_spec)
171
+ tree_out = fn(*args, **kwargs)
172
+ flat_out, spec = pytree.tree_flatten(tree_out)
173
+ for i in flat_out:
174
+ is_known_type = False
175
+ for j in KNOWN_TYPES:
176
+ if isinstance(i, j):
177
+ is_known_type = True
178
+ break
179
+ if not is_known_type:
180
+ raise RuntimeError(
181
+ f"Found {type(i)} in output, which is not a known type. "
182
+ "If this type holds tensors, you need to register a pytree for it. "
183
+ "See https://github.com/pytorch/functorch/issues/475 for a brief "
184
+ "explanation why. If you don't need to register a pytree, please "
185
+ "leave a comment explaining your use case and we'll make this more "
186
+ "ergonomic to deal with"
187
+ )
188
+ out_spec.set(spec)
189
+ return flat_out
190
+
191
+ # Can't use functools.wraps here because the wrapper has different
192
+ # calling convention
193
+ if hasattr(fn, "_orig_mod"):
194
+ flat_fn._orig_mod = fn._orig_mod # type: ignore[attr-defined]
195
+
196
+ return flat_fn, out_spec
197
+
198
+
199
+ # This function takes in a tensor t, and returns one of t, t.view(), or t.clone().
200
+ # When tracing the joint forward + backward, for any inputs in the graph that are mutated,
201
+ # we need to clone them first (and similarly for metadata-only mutations, we need to view them first).
202
+ # The idea is that when we trace the backward, we need to pass in the *original* primals
203
+ # to autograd.grad(), before they were mutated.
204
+ # Note: when we have synthetic base inputs, we need to clone them *before* creating views off of them.
205
+ # This means that "idx" here represents the index of the (potentially) synthetic base.
206
+ # What we need to do is:
207
+ # (1) map the current (post-synthetic-base calling convention) input argument index
208
+ # to int index pre-synthetic-base-calling-convention.
209
+ # (2) There could be multiple, if this index corresponds to a synthetic base
210
+ # that has multiple input aliases.
211
+ # (3) If any of those corresponding inputs get metadata mutations, then we clone the base.
212
+ def maybe_to_fresh_input(idx, t, meta):
213
+ if not isinstance(t, torch.Tensor):
214
+ return t
215
+ if idx in meta.mutated_inp_runtime_indices:
216
+ # We only need to bother cloning mutated inputs that participate in autograd.
217
+ mutated_inp_idx = meta.mutated_inp_runtime_indices.index(idx)
218
+ if meta.input_info[idx].requires_grad and meta.input_info[idx].mutates_data:
219
+ # Make sure the primal we pass to autograd.grad()
220
+ # sees the tensor before the mutation
221
+ return t.clone()
222
+ if meta.input_info[idx] and meta.input_info[idx].mutates_metadata:
223
+ # Make sure the primal we pass to autograd.grad()
224
+ # sees the tensor before the metadata mutation
225
+ return t.view(t.shape)
226
+ return t
venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py ADDED
@@ -0,0 +1,1246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import itertools
4
+ from contextlib import nullcontext
5
+ from functools import partial, wraps
6
+ from typing import Any, Callable, Dict, List, Optional, Tuple
7
+ from unittest.mock import patch
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils._pytree as pytree
12
+ import torch.utils.dlpack
13
+ from torch import Tensor
14
+ from torch._dispatch.python import enable_python_dispatcher
15
+ from torch._dynamo import compiled_autograd
16
+ from torch._dynamo.utils import dynamo_timed, preserve_rng_state
17
+ from torch._guards import detect_fake_mode
18
+ from torch._subclasses import FakeTensor, FakeTensorMode
19
+ from torch.fx.experimental.proxy_tensor import make_fx
20
+ from torch.fx.experimental.symbolic_shapes import (
21
+ ShapeEnv
22
+ )
23
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
24
+ from torch._decomp.decompositions_for_rng import PhiloxStateTracker, rng_decompositions
25
+ from . import config
26
+ from .partitioners import default_partition
27
+
28
+ from ._aot_autograd.utils import ( # noqa: F401
29
+ strict_zip,
30
+ _get_symint_hints,
31
+ KNOWN_TYPES,
32
+ partial_flatten_asdict,
33
+ normalize_as_list,
34
+ _get_autocast_states,
35
+ make_boxed_func,
36
+ make_boxed_compiler,
37
+ call_func_at_runtime_with_args,
38
+ create_tree_flattened_fn,
39
+ maybe_to_fresh_input,
40
+ )
41
+ from ._aot_autograd.logging_utils import ( # noqa: F401
42
+ graph_being_compiled,
43
+ nth_graph,
44
+ model_name,
45
+ set_model_name,
46
+ get_aot_compilation_context,
47
+ get_aot_graph_name,
48
+ get_graph_being_compiled,
49
+ track_graph_compiling,
50
+ callback_set,
51
+ setup_stacktrace_preservation_hooks,
52
+ describe_input,
53
+ format_guard_bug_msg,
54
+ )
55
+ from ._aot_autograd.functional_utils import ( # noqa: F401
56
+ is_fun,
57
+ to_fun,
58
+ from_fun,
59
+ sync_functional_tensor,
60
+ has_metadata_mutation,
61
+ has_data_mutation,
62
+ are_all_mutations_hidden_from_autograd,
63
+ are_all_mutations_under_no_grad_or_inference_mode,
64
+ gen_alias_from_base,
65
+ assert_functional_graph,
66
+ _check_if_mutation_can_be_in_graph,
67
+ )
68
+ from ._aot_autograd.schemas import ( # noqa: F401
69
+ OutputType,
70
+ OutputAliasInfo,
71
+ MutationType,
72
+ InputAliasInfo,
73
+ SubclassCreationMeta,
74
+ ViewAndMutationMeta,
75
+ SubclassMeta,
76
+ TensorAlias,
77
+ BackwardSignature,
78
+ GraphOutputName,
79
+ GraphInputName,
80
+ FQN,
81
+ GraphSignature,
82
+ AOTConfig,
83
+ )
84
+ from ._aot_autograd.subclass_utils import ( # noqa: F401
85
+ requires_subclass_dispatch,
86
+ unwrap_tensor_subclasses,
87
+ wrap_tensor_subclasses,
88
+ wrap_tensor_subclasses_maybe_joint,
89
+ create_metadata_for_subclass,
90
+ )
91
+ from ._aot_autograd.collect_metadata_analysis import ( # noqa: F401
92
+ run_functionalized_fw_and_collect_metadata,
93
+ )
94
+ from ._aot_autograd.input_output_analysis import ( # noqa: F401
95
+ remove_dupe_metadata,
96
+ create_synthetic_base_metadata,
97
+ _tensors_definitely_do_not_overlap,
98
+ compute_overlapping_inputs,
99
+ create_graph_signature,
100
+ )
101
+ from ._aot_autograd.traced_function_transforms import ( # noqa: F401
102
+ fn_input_mutations_to_outputs,
103
+ fn_prepped_for_autograd,
104
+ create_functionalized_fn,
105
+ create_functionalized_rng_ops_wrapper,
106
+ aot_dispatch_subclass,
107
+ create_functional_call,
108
+ create_joint,
109
+ )
110
+ from ._aot_autograd.runtime_wrappers import ( # noqa: F401
111
+ create_runtime_wrapper,
112
+ functionalized_rng_runtime_epilogue,
113
+ aot_dispatch_subclass_wrapper,
114
+ aot_wrapper_dedupe,
115
+ aot_wrapper_synthetic_base,
116
+ merge_view_inputs,
117
+ )
118
+ from ._aot_autograd.dispatch_and_compile_graph import ( # noqa: F401
119
+ aot_dispatch_base_graph,
120
+ aot_dispatch_autograd_graph,
121
+ )
122
+ from ._aot_autograd.jit_compile_runtime_wrappers import ( # noqa: F401
123
+ aot_dispatch_base,
124
+ aot_dispatch_autograd,
125
+ )
126
+
127
+ zip = strict_zip
128
+
129
+ # This global counter increments every time we compile a graph with
130
+ # AOTAutograd. You can use this to correlate runtime error messages
131
+ # with compile time (e.g., if you get an error at runtime saying
132
+ # compiled graph 3 failed, you can set a breakpoint at compile time
133
+ # for this graph number to investigate further at compile time.)
134
+ #
135
+ # NB: this is different from get_aot_compilation_context, which tracks
136
+ # each underlying graph that is compiled. In contrast, AOT_COUNTER
137
+ # corresponds to top-level invocations of aot_module/aot_function;
138
+ # one counter is allocated per entire compiled block (but this block
139
+ # may involve compiling multiple subgraphs; e.g., for forwards/backwards)
140
+ AOT_COUNTER = itertools.count()
141
+
142
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
143
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
144
+ #
145
+ # AOT Autograd contains a pretty non-trivial amount of logic to handle edge cases around aliasing and mutation
146
+ # that are external to the graph (they show up as side effects in some way when you run the graph).
147
+ #
148
+ # Take a look at `test_aotdispatch.py TestAOTAutograd.test_input_mutation*` tests for some examples functions
149
+ # and what they're compiled graphs looks like.
150
+ # Below is a very long comment detailing several edge cases, and showing how AOT Autograd handles them.
151
+ #
152
+ # Note [AOT Autograd: input data mutations]
153
+ #
154
+ # If we compile a function that mutates inputs, then those input mutations are real side effects
155
+ # that a user expects to see after running the compiled graph.
156
+ # However, the graph that we want to send to a backend needs to be *entirely* functional.
157
+ # The way we reconcile this difference is that we remove the mutations completely from the graph that we compile
158
+ # but we update the graph to return (updated_inputs, user_outputs).
159
+ # In the epilogue that runs after the compiled graph is executed, we copy the updated inputs back to the originals.
160
+ #
161
+ # Example: original user code:
162
+ # def f(x):
163
+ # x.mul_(2)
164
+ # out = x.mul(3)
165
+ # return out
166
+ #
167
+ # After AOT Autograd compiles, we end up with a:
168
+ # (a) compiled graph
169
+ # (b) autograd.Function.forward() method, that executes the compiled graph
170
+ # (c) wrapper function, that calls the autograd.Function.forward() and performs the epilogue
171
+ #
172
+ # The output of (a, b, c) are all written below.
173
+ #
174
+ # def compiled_forward_graph(x):
175
+ # x_updated = x.mul(2)
176
+ # out = x_updated.mul(3)
177
+ # return x_updated, out
178
+ #
179
+ # # x_updated gets a gradient in the compiled backward
180
+ # def compiled_backward_graph(grad_x_updated, grad_out):
181
+ # grad_x = ...
182
+ # return grad_x
183
+ #
184
+ # def autograd.Function.forward(x):
185
+ # x_updated, out = compiled_forward_graph(x)
186
+ # return x_updated, out
187
+ #
188
+ # def compiled_wrapper(x):
189
+ # x_updated, out = autograd.Function.apply(x)
190
+ # x.copy_(x_updated)
191
+ # return out
192
+ #
193
+ # Another important thing to note is that updated inputs (due to data mutations) *do* participate
194
+ # in the compiled backward graph! Since the compiled forward graph gets N extra outputs
195
+ # (due to updated inputs showing up as graph outputs),
196
+ # The compiled backward gets an additional N inputs.
197
+ # That way, during the x.copy_(x_updated) bit in the epilogue, gradients will flow from the updated input
198
+ # back to the original input.
199
+
200
+
201
+ # Note [AOT Autograd: input metadata mutations]
202
+ #
203
+ # For the same reason as input mutations, we also don't put input metadata mutations in the graph.
204
+ # Instead, we return the updated version of the input (a view), and mutate the input's metadata outside of the graph
205
+ #
206
+ # Example: original user code:
207
+ # def f(x):
208
+ # x.t_()
209
+ # out = x.mul(3)
210
+ # return out
211
+ #
212
+ # AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function):
213
+ # def compiled_forward_graph(x):
214
+ # x_updated = x.t()
215
+ # out = x_updated.mul(3)
216
+ # return x_updated, out
217
+ #
218
+ # # x_updated does *not* get a gradient in the compiled backward
219
+ # def compiled_backward_graph(grad_out):
220
+ # grad_x = ...
221
+ # return grad_x
222
+ #
223
+ # def autograd.Function.forward(x):
224
+ # x_updated, out = compiled_forward_graph(x)
225
+ # return x_updated, out
226
+ #
227
+ # def compiled_wrapper(x):
228
+ # x_updated, out = autograd.Function.apply(x)
229
+ # x.as_strided_(x_updated)
230
+ # return out
231
+
232
+
233
+ # Note [AOT Autograd: outputs aliasing inputs or intermediates!]
234
+ #
235
+ # AOT Autograd needs special handling for outputs that alias graph inputs or intermediates!
236
+ # Why?
237
+ # (1) autograd.Function.forward() has a limitation, where views that returned in the forward cannot later be mutated.
238
+ # (2) views don't need to be compiled in the graph anyway - it's cheap to generate them outside of the compiled graph,
239
+ # in an epilogue.
240
+ # For outputs that alias inputs, we do the following:
241
+ # (a) *still* return the aliased output as a graph output
242
+ # (b) In the AOT Autograd wrapper/epilogue, we don't return that aliased output. Instead, we use it to regenerate the output.
243
+ #
244
+ # For outputs that alias *intermediates*, we do the following:
245
+ # (a) Return the output in the compiled forward, **and** return it's ._base (a graph intermediates) as an output in the forward
246
+ # (b) Use (output, graph_intermediate) to regenerate the alias, and return that to the user (instead of the compiled fw output).
247
+ # You might wonder why we return the aliased output directly in the graph (and making the graph compute it),
248
+ # only to not return it and instead generate a fresh alias off of the intermediate,
249
+ # instead of (say) just storing metadata about the size/stride of the output somewhere to generate the alias. There are two reasons:
250
+ # (1) Getting the actual alias tensor allows us to use view-replay to generate the alias, instead of an as_strided() call
251
+ # (2) Inductor (and other backends) are free to change the memory format of graph outputs, if it results in better performance.
252
+ # This can result in problems if a user later tries to .view() that output expecting it to have one set of strides,
253
+ # when it has a different set of strides.
254
+ # By including the view op directly in the graph, inductor takes that into account when deciding what memory format
255
+ # the graph intermediate should be.
256
+ #
257
+ # Another important thing to note is how our traced backward() graph handles aliases.
258
+ # (this applies to outputs aliasing inputs, outputs aliasing intermediates,
259
+ # *and* updated inputs returned in the compiled forward due to metadata-only mutations).
260
+ # Any outputs that alias (either inputs or intermediates) do NOT participate in the compiled backward graph
261
+ # It would be wasteful to include them in the compiled backward(), because we regenerate them eagerly
262
+ # at the end of the forward.
263
+ #
264
+ # Example: original user code:
265
+ # def f(x):
266
+ # out1 = x.t()
267
+ # intermediate = x.mul(2)
268
+ # out2 = intermediate.view(-1)
269
+ # return out1, out2
270
+ #
271
+ # AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function):
272
+ # def compiled_forward_graph(x):
273
+ # out1 = x.t()
274
+ # intermediate = x.mul(2)
275
+ # out2 = intermediate.view(-1)
276
+ # # the compiled graph also returns the intermediate
277
+ # return out1, out2, intermediate
278
+ #
279
+ # # intermediate gets a gradient in the compiled backward.
280
+ # # both output aliases (out1 and out2) do not.
281
+ # def compiled_backward_graph(grad_intermediate):
282
+ # grad_x = ...
283
+ # return grad_x
284
+ #
285
+ # def autograd.Function.forward(x):
286
+ # out1, out2, intermediate = compiled_forward_graph(x)
287
+ # return out1, out2, intermediate
288
+ #
289
+ # def compiled_wrapper(x):
290
+ # out1, out2, intermediate = autograd.Function.apply(x)
291
+ # # regenerate out1 from the input
292
+ # out1_regenerated = out1._view_func(x)
293
+ # # regenerate out1 from the intermediate
294
+ # out2_regenerated = out2._view_func(intermediate)
295
+ # return out1_regenerated, out2_regenerated
296
+
297
+
298
+ # Note [AOT Autograd: mutations to inputs that alias other inputs]
299
+ #
300
+ # Another edge case that is (only partially) handled today is when an input is mutated, but itself aliases another input.
301
+ # AOT Autograd needs to **ensure** that functionalization knows that the two inputs are aliased to each other.
302
+ # That way, when the aliased input is accessed later in the graph, functionalization knows to "update" the alias
303
+ # given the mutation that occurred.
304
+ #
305
+ # This is handled by updating the calling convention: we create a "synthetic base" that becomes a new input
306
+ # in the compiled function, and we regenerate the original (aliased) inputs directly off of the base
307
+ # inside of the compiled function.
308
+ #
309
+ # This logic is fully encapsulated in aot_wrapper_synthetic_base()
310
+ #
311
+ # Example: original user code:
312
+ # def f(x, x_view):
313
+ # x.mul_(2)
314
+ # out = x * x_view
315
+ # return out
316
+ # f(x, x.view(-1))
317
+ #
318
+ # AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function):
319
+ # def compiled_forward_graph(base)
320
+ # x = generate_x(base)
321
+ # x_view = generate_x_view(base)
322
+ # x_updated = x.mul(2)
323
+ # x_view_updated = x_updated.view(-1)
324
+ # out = x_updated * x_view_updated
325
+ # return x_updated, out
326
+ #
327
+ # # The calling convention change from (aliases) -> (base) happens
328
+ # # *outside* of the autograd.Function.forward().
329
+ # # That means the forward() only has 1 input (base),
330
+ # # and the backward() only has 1 output (grad_base)
331
+ # def compiled_backward_graph(grad_out):
332
+ # grad_base = ...
333
+ # return grad_base
334
+ #
335
+ # def autograd.Function.forward(base):
336
+ # x_updated, out = compiled_forward_graph(base)
337
+ # return x_updated, out
338
+ #
339
+ # # The compiled wrapper is where we create synthetic bases.
340
+ # # The info on which inputs are mutated is also tracked *before* synthetic base creation.
341
+ # def compiled_wrapper(x, x_view):
342
+ # base = merge_view_inputs(x, x_view)
343
+ # x_updated, out = autograd.Function.apply(base)
344
+ # # x and x_view are aliased in eager mode, so this mutation to x will automatically affect x_view.
345
+ # x.copy_(x_updated)
346
+ # return out
347
+
348
+
349
+ # Note [AOT Autograd: Views to avoid tangents aliasing inputs]
350
+ #
351
+ # We view every forward output when creating out tangent tensors to handle the problematic
352
+ # case in which a subclass does extra aliasing between graph outputs/inputs in a way that
353
+ # is not visible above the sublass.
354
+ #
355
+ # Ordinarily, when constructing the joint function that we want to trace in AOTAutograd,
356
+ # we're guaranteed that the tangent tensors that we pass
357
+ # into the joint are distinct tensors from the primals. This is because when
358
+ # decide which forward outputs to create tangents for, we only create tangents
359
+ # for forward outputs that are not aliases of inputs (See Note
360
+ # [AOT Autograd: outputs aliasing inputs or intermediates!]).
361
+ #
362
+ # However, when wrapper tensor subclasses enter the picture, it is possible
363
+ # to have an output of the forward that is a subclass that is not an
364
+ # input / alias of an input, but one of its inner tensors is an alias!
365
+ # NestedTensor is an example: Performing an out-of-place pointwise op on a
366
+ # NestedTensor constructs a fresh NestedTensor that holds onto the input's
367
+ # offsets tensor directly.
368
+ #
369
+ # Having tangent tensors that are the same as the (primal) forward inputs,
370
+ # can cause problems during tracing as make_fx() will specialize on our
371
+ # duplicate inputs: If we passed in the same tensor for primals_1 and
372
+ # tangents_1 during tracing, make_fx() will happily sub out all usages of
373
+ # tangents_1 with primals_1 in the graph, which is not what we want.
374
+ #
375
+ # To work around this, we view every forward output when creating out tangent
376
+ # tensors so that tangents can never be the same as forward inputs even if
377
+ # forward inputs alias forward outputs.
378
+
379
+ # Note [Side-Effectful Tokens in AOTAutograd]
380
+ #
381
+ # We allow some some side-effectful operators in
382
+ # the post-AOTAutograd (functional) graph, such as prints and torchbind operations.
383
+ # To ensure that these side-effects are compatible to future graph passes that
384
+ # assume that the graph is functional, we will thread "effect tokens" to show
385
+ # data dependence between these side-effectful operators. Practically speaking,
386
+ # effect tokens are just dummy values (torch.tensor([])). The graph would look
387
+ # like the following:
388
+ #
389
+ # def gm(self, token0, reader):
390
+ # token1, frame = with_token(ordered_effect_op, (reader,), token0)
391
+ # frame = frame * 2
392
+ # token2, frame2 = with_token(ordered_effect_op, (reader,), token1)
393
+ # frame2 = frame2 * 2
394
+ # return token2, frame, frame2
395
+ #
396
+ # We will pass the token as an input to the graph, thread it through
397
+ # side-effectful operators using the `with_effects` high order operator, and then
398
+ # return the updated token as an output.
399
+ # So the signature of the graph input would look something like
400
+ # (*tokens, *params_buffers, *user_inputs), and the signature of the graph
401
+ # output would look something like (*tokens, *outputs).
402
+
403
+ #
404
+ #
405
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
406
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
407
+
408
+
409
+ aot_autograd_decompositions = {}
410
+
411
+ @dynamo_timed
412
+ def create_aot_dispatcher_function(
413
+ flat_fn, flat_args: List[Any], aot_config: AOTConfig
414
+ ):
415
+ """
416
+ Traces the forward and backward graphs of the attr:`flat_fn` to generate a
417
+ joint graph. The joint graph is an Fx graph with Aten ops. Please refer to
418
+ the tracing mechanism to understand the graph capturing details.
419
+
420
+ The joint graph is then passed through attr:`partition_fn` to isolate the
421
+ forward and backward portions, which are then respectively compiled via the
422
+ provided attr:`fw_compiler` and attr:`bw_compiler`.
423
+
424
+ The resulting compiled forward and backward graphs are then wrapped up in a
425
+ ``torch.autograd.Function`` object.
426
+
427
+ The calling convention here is that the first aot_config.num_params_buffers
428
+ inputs in flat_args are parameters and buffers, and the rest are inputs.
429
+
430
+ We use this to assume that parameters/buffer's shapes don't change.
431
+
432
+ Note: this function is used both by aot_function and aot_export (controlled by aot_config.is_export)
433
+ When aot_config.is_export is True, we return an FX graph + metadata
434
+ When aot_config.is_export is False, we return an ordinary runtime function
435
+ """
436
+
437
+ # This is the main entry point.
438
+ # TODO: Chillee argues that dynamo itself should pass in fake tensors to
439
+ # the list of arguments when compiling; at the moment we do not do this
440
+
441
+ if aot_config.decompositions is None:
442
+ aot_config.decompositions = {}
443
+
444
+
445
+ aot_config.decompositions = {
446
+ **aot_autograd_decompositions,
447
+ **aot_config.decompositions,
448
+ }
449
+
450
+ if config.functionalize_rng_ops:
451
+ # Update the decompositions with functionalized random decompositions
452
+ aot_config.decompositions = {
453
+ **rng_decompositions,
454
+ **aot_config.decompositions,
455
+ }
456
+
457
+ # Check flat_args to see if they're already fake. If so, use that fake
458
+ # mode instead.
459
+
460
+ fake_mode = detect_fake_mode(flat_args)
461
+ if fake_mode is None:
462
+ shape_env = ShapeEnv() if aot_config.dynamic_shapes else None
463
+ fake_mode = FakeTensorMode(shape_env=shape_env)
464
+ else:
465
+ shape_env = fake_mode.shape_env
466
+
467
+ python_dispatcher_mode = (
468
+ enable_python_dispatcher() if shape_env is not None else nullcontext()
469
+ )
470
+
471
+ with torch.autograd.set_multithreading_enabled(
472
+ False
473
+ ), preserve_rng_state(), fake_mode, python_dispatcher_mode, PhiloxStateTracker():
474
+
475
+ def process_inputs(flat_args):
476
+ def convert(idx, x):
477
+ if shape_env is not None:
478
+ from torch._dynamo.source import ConstantSource
479
+ if isinstance(x, int):
480
+ # We always specialize on scalar values in export.
481
+ if aot_config.is_export:
482
+ return x
483
+ source = ConstantSource(f"sym_{idx}")
484
+ return shape_env.create_symintnode(
485
+ shape_env.create_symbol(x, source),
486
+ hint=x,
487
+ source=source
488
+ )
489
+ if not isinstance(x, torch.Tensor):
490
+ return x
491
+ if isinstance(x, FakeTensor):
492
+ assert x.fake_mode is fake_mode
493
+ return x
494
+ if is_traceable_wrapper_subclass(x):
495
+ attrs, _ = x.__tensor_flatten__()
496
+ if all(isinstance(getattr(x, attr), FakeTensor) for attr in attrs):
497
+ assert all(getattr(x, attr).fake_mode is fake_mode for attr in attrs)
498
+ return x
499
+
500
+
501
+ # see note [Tensor Fakification and Symbol Caching]
502
+ symbolic_context = None
503
+ source = None
504
+ if tracing_context := torch._guards.TracingContext.try_get():
505
+ if x in tracing_context.tensor_to_context:
506
+ symbolic_context = tracing_context.tensor_to_context[x]
507
+ source = symbolic_context.tensor_source
508
+ if (
509
+ idx < aot_config.num_params_buffers
510
+ and config.static_weight_shapes
511
+ and not symbolic_context
512
+ ):
513
+ # TODO: Ensure that this codepath is never exercised from
514
+ # Dynamo
515
+ return fake_mode.from_tensor(x, static_shapes=True)
516
+
517
+ return fake_mode.from_tensor(
518
+ x, static_shapes=False, symbolic_context=symbolic_context, source=source
519
+ )
520
+ return [convert(idx, x) for idx, x in enumerate(flat_args)]
521
+
522
+ fake_flat_args = process_inputs(flat_args)
523
+
524
+ needs_autograd = (
525
+ any(x.requires_grad for x in fake_flat_args if isinstance(x, Tensor))
526
+ and torch.is_grad_enabled()
527
+ )
528
+
529
+ with enable_python_dispatcher():
530
+ # Patch set_rng_state as set_rng_state with fake tensors is
531
+ # nonsensical. This does not affect the collection of metadata.
532
+ with patch("torch.cuda.set_rng_state", lambda *args: None):
533
+ fw_metadata = run_functionalized_fw_and_collect_metadata(
534
+ flat_fn,
535
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
536
+ is_train=needs_autograd,
537
+ pre_dispatch=aot_config.pre_dispatch,
538
+ )(*fake_flat_args)
539
+
540
+ req_subclass_dispatch = requires_subclass_dispatch(fake_flat_args, fw_metadata)
541
+
542
+ if needs_autograd and not any(x.requires_grad for x in fw_metadata.output_info):
543
+ # We realized that none of the outputs require grad,
544
+ # so we actually have an inference graph.
545
+ needs_autograd = False
546
+ # A bit silly: right now in the subclass codepath, our ViewAndMutationMeta
547
+ # changes depending on whether we pass in is_train / keep_input_mutations,
548
+ # so we're forced to recompute the metadata.
549
+ # TODO: refactor the subclass path of run_functionalized_fw_and_collect_metadata
550
+ # so that this is unnecessary.
551
+ if req_subclass_dispatch:
552
+ fw_metadata = run_functionalized_fw_and_collect_metadata(
553
+ flat_fn,
554
+ keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd,
555
+ is_train=needs_autograd,
556
+ pre_dispatch=aot_config.pre_dispatch,
557
+ )(*fake_flat_args)
558
+ else:
559
+ fw_metadata = ViewAndMutationMeta(
560
+ input_info=fw_metadata.input_info,
561
+ output_info=fw_metadata.output_info,
562
+ num_intermediate_bases=fw_metadata.num_intermediate_bases,
563
+ keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd,
564
+ traced_tangents=fw_metadata.traced_tangents,
565
+ subclass_inp_meta=fw_metadata.subclass_inp_meta,
566
+ subclass_fw_graph_out_meta=fw_metadata.subclass_fw_graph_out_meta,
567
+ subclass_tangent_meta=fw_metadata.subclass_tangent_meta,
568
+ is_train=needs_autograd,
569
+ )
570
+
571
+
572
+ if fw_metadata.num_intermediate_bases > 0:
573
+ assert not req_subclass_dispatch, f"""\
574
+ torch.compile is currently being used with tensor subclass inputs:
575
+ {','.join([str(type(x)) for x in fake_flat_args])}. We are attempting to a compile a graph with two graph outputs
576
+ that alias one another, which is currently unsupported in the subclass use case. If you run into this,
577
+ please file a github issue"""
578
+
579
+ if aot_config.is_export:
580
+ # aot_export: ban input metadata mutations for now to keep shared code paths simpler.
581
+ # Keeping .resize_() in the graph will require some work
582
+ # Allowing it but keeping the graph functional will require some calling convention changes.
583
+ if len([x for x in fw_metadata.input_info if x.mutates_metadata]) != 0:
584
+ raise RuntimeError(f"""\
585
+ Found an input that received a metadata mutation, through e.g. a call to `.resize_()` or `.transpose_()`.
586
+ This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue.
587
+
588
+ fw_metadata={str(fw_metadata)}""")
589
+ # In export, banning data mutations on inputs that require grad for now.
590
+ # This should be rare, and is tricky to get right. When we trace the backward,
591
+ # we currently trace with autograd.grad instead of .backward(), which makes it difficult
592
+ # to ensure that we run autograd all the way through the input **before** it saw the mutation.
593
+ if len([x for x in fw_metadata.input_info if x.requires_grad and x.mutates_data]) != 0:
594
+ raise RuntimeError(f"""\
595
+ Found a graph input that requires gradients, and received a mutation.
596
+ This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue.
597
+
598
+ fw_metadata={str(fw_metadata)}""")
599
+ if req_subclass_dispatch:
600
+ raise RuntimeError("""\
601
+ aot_export is not currently supported with traceable tensor subclass.
602
+ If you need this feature, please comment on <CREATE_ISSUE_LINK>""")
603
+
604
+ # Need to decide on a strategy for functionalized RNG: toggling via global config seems bad,
605
+ # and turning it on will require a non-trivial calling convention change for any export runtime.
606
+ if config.functionalize_rng_ops:
607
+ raise RuntimeError("""\
608
+ Functionalized RNG is not currently supported in the aot_export workflow. Please file a github issue,
609
+ or otherwise set torch._functorch.config.functionalize_rng_ops = False.""")
610
+
611
+ # crappy version of dispatcher
612
+ # TODO: Do this properly
613
+ if needs_autograd:
614
+ # For now, aot_dispatch_autograd knows to explicitly return a graph
615
+ # when run with export, and an opaque callable otherwise.
616
+ # In theory we could factor these out, but I wanted to let the dust
617
+ # settle on how functionalized rng fits into export first.
618
+ compiler_fn = aot_dispatch_autograd_graph if aot_config.is_export else aot_dispatch_autograd
619
+ else:
620
+ # aot_dispatch_base_graph contains only the "graph bits", while aot_dispatch_base
621
+ # includes some extra work around handling a runtime epilogue.
622
+ compiler_fn = aot_dispatch_base_graph if aot_config.is_export else aot_dispatch_base
623
+
624
+ compiler_fn = partial(aot_wrapper_synthetic_base, compiler_fn=compiler_fn, needs_autograd=needs_autograd)
625
+ compiler_fn = partial(aot_wrapper_dedupe, compiler_fn=compiler_fn)
626
+ # You can put more passes here
627
+
628
+ compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata)
629
+ if aot_config.is_export:
630
+ # During export, we don't get back a callable - we get back the raw fx graph
631
+ # (either a joint or an inference-only graph)
632
+ assert isinstance(compiled_fn, torch.fx.GraphModule)
633
+ return compiled_fn, fw_metadata
634
+
635
+ if not hasattr(compiled_fn, "_boxed_call"):
636
+ compiled_fn = make_boxed_func(compiled_fn)
637
+
638
+ return compiled_fn
639
+
640
+
641
+ def aot_function(
642
+ fn: Callable,
643
+ fw_compiler: Callable,
644
+ bw_compiler: Optional[Callable] = None,
645
+ partition_fn: Callable = default_partition,
646
+ decompositions: Optional[Dict] = None,
647
+ num_params_buffers: int = 0,
648
+ keep_inference_input_mutations: bool = False,
649
+ inference_compiler: Optional[Callable] = None,
650
+ *,
651
+ # Whether or not to trace with dynamic shapes
652
+ dynamic=False,
653
+ enable_log=True,
654
+ ) -> Callable:
655
+ """
656
+ Traces the forward and backward graph of :attr:`fn` using torch dispatch
657
+ mechanism, and then compiles the generated forward and backward graphs
658
+ through :attr:`fw_compiler` and :attr:`bw_compiler`.
659
+
660
+ :func:`aot_function` traces the forward and backward graph ahead of time,
661
+ and generates a joint forward and backward graph. :attr:`partition_fn` is
662
+ then used to separate out forward and backward graphs. The partitioner
663
+ function can be used to perform optimizations such as recomputation. One can
664
+ set `decompositions` dictionary to decompose the operators into a sequence
665
+ of core or simpler operators supported by the backend compilers.
666
+
667
+ .. warning::
668
+ This API is experimental and likely to change.
669
+
670
+ Args:
671
+ fn (Callable): A Python function that takes one ore more arguments. Must
672
+ return one or more Tensors.
673
+ fw_compiler (Callable): A Python function that accepts an Fx graph with
674
+ Aten ops and input args, and returns a Callable that semantically is
675
+ equivalent to the input Fx graph.
676
+ bw_compiler (Optional[Callable]): A Python function that accepts an
677
+ Fx graph with Aten ops and input args, and returns a Callable that
678
+ semantically is equivalent to the input Fx graph. Default: None
679
+ (when None, it defaults to the :attr:`fw_compiler`)
680
+ partition_fn (Callable): A Python function that takes a joint forward
681
+ and backward graph, and partitions it into separate forward and
682
+ backward graphs.
683
+ decompositions (Dict): A dictionary to define the decomposition of
684
+ larger Aten ops into simpler or core Aten ops.
685
+ inference_compiler (Optional[Callable]): A Python function that accepts an
686
+ Fx graph with Aten ops and input args, and returns a Callable that
687
+ semantically is equivalent to the input Fx graph. inference_compiler is invoked
688
+ if no autograd is needed. Default: None
689
+ (when None, it defaults to the :attr:`fw_compiler`)
690
+ Returns:
691
+ Returns a ``Callable`` that retains the eager behavior of the original
692
+ :attr:`fn`, but with forward and backward graph compiled via
693
+ :attr:`fw_compile` and :attr:`bw_compile`.
694
+
695
+ A simple example usage of :func:`aot_function` is as follows. This example
696
+ will print the forward and backward graphs of the function ``fn``
697
+
698
+ >>> fn = lambda x : x.sin().cos()
699
+ >>> def print_compile_fn(fx_module, args):
700
+ >>> print(fx_module)
701
+ >>> return fx_module
702
+ >>> aot_fn = aot_function(fn, print_compile_fn)
703
+ >>> x = torch.randn(4, 5, requires_grad=True)
704
+ >>> aot_fn(x)
705
+ """
706
+
707
+ if bw_compiler is None:
708
+ bw_compiler = fw_compiler
709
+ if inference_compiler is None:
710
+ inference_compiler = fw_compiler
711
+ aot_config = AOTConfig(
712
+ fw_compiler=fw_compiler,
713
+ bw_compiler=bw_compiler,
714
+ inference_compiler=inference_compiler,
715
+ partition_fn=partition_fn,
716
+ decompositions=decompositions,
717
+ num_params_buffers=num_params_buffers,
718
+ aot_id=next(AOT_COUNTER),
719
+ keep_inference_input_mutations=keep_inference_input_mutations,
720
+ dynamic_shapes=dynamic,
721
+ aot_autograd_arg_pos_to_source=None,
722
+ is_export=False,
723
+ no_tangents=False,
724
+ enable_log=enable_log,
725
+ )
726
+ cached_res = None
727
+
728
+ @wraps(fn)
729
+ def returned_function(*args, **kwargs):
730
+ nonlocal cached_res
731
+ # Now flatten the tensor args
732
+ flat_args = pytree.arg_tree_leaves(*args, **kwargs)
733
+
734
+ # Compile the function and save it in the cache
735
+ if cached_res is None:
736
+ flat_fn, out_spec = create_tree_flattened_fn(fn, args, kwargs)
737
+
738
+ compiled_fn = create_aot_dispatcher_function(
739
+ flat_fn,
740
+ flat_args,
741
+ aot_config,
742
+ )
743
+ cached_res = (compiled_fn, out_spec)
744
+
745
+ cached_fn, out_spec = cached_res
746
+ out = cached_fn(flat_args)
747
+ return out_spec.unflatten(out)
748
+
749
+ return returned_function
750
+
751
+
752
+ def aot_module(mod: nn.Module, *args, **kwargs) -> nn.Module:
753
+ """
754
+ Traces the forward and backward graph of :attr:`mod` using torch dispatch
755
+ tracing mechanism. It is wrapper function, that underneath uses
756
+ :func:`aot_function` to perform tracing and compilation.
757
+
758
+ :func:`aot_module` lifts the parameters and buffers of ``nn.Module`` as inputs
759
+ to a new callable which is then compiled through :func:`aot_function`.
760
+
761
+ .. warning::
762
+ This API is experimental and likely to change.
763
+
764
+ Args:
765
+ mod (Callable): A ``nn.Module`` module.
766
+ args : args to be passed to :func:`aot_function`
767
+ kwargs : kwargs to be passed to :func:`aot_function`
768
+
769
+ Returns:
770
+ Returns a ``nn.Module`` that retains the eager behavior of the original
771
+ :attr:`mod`, but with forward and backward graph compiled.
772
+
773
+ """
774
+ # See Note: [Fake Modules and AOTAutograd]
775
+ torch._dynamo.utils.assert_no_fake_params_or_buffers(mod)
776
+
777
+ def functional_call(named_params, named_buffers, *args, **kwargs):
778
+ params_and_buffers = {**named_params, **named_buffers}
779
+ return torch.func.functional_call(mod, params_and_buffers, args, kwargs)
780
+
781
+ named_params = dict(mod.named_parameters(remove_duplicate=False))
782
+ named_buffers = dict(mod.named_buffers(remove_duplicate=False))
783
+ num_params_buffers = len(named_params) + len(named_buffers)
784
+ compiled_f = aot_function(
785
+ functional_call, *args, num_params_buffers=num_params_buffers, **kwargs
786
+ )
787
+
788
+ class AOTModule(nn.Module):
789
+ def __init__(self):
790
+ super().__init__()
791
+ self.orig_module = mod
792
+
793
+ def forward(self, *args, **kwargs):
794
+ return compiled_f(
795
+ named_params,
796
+ named_buffers,
797
+ *args,
798
+ **kwargs,
799
+ )
800
+
801
+ return AOTModule()
802
+
803
+
804
+ def aot_module_simplified(
805
+ mod: nn.Module,
806
+ args,
807
+ fw_compiler: Callable,
808
+ bw_compiler: Optional[Callable] = None,
809
+ partition_fn: Callable = default_partition,
810
+ decompositions: Optional[Dict] = None,
811
+ keep_inference_input_mutations=False,
812
+ inference_compiler: Optional[Callable] = None,
813
+ ) -> nn.Module:
814
+ """
815
+ This is the simplified or low overhead version of aot_module. For frontends
816
+ like TorchDynamo, the input functions/modules to AOT are static and have
817
+ unpacked inputs/outputs. This gives us an opportunity to remove the
818
+ (1) pytree overhead to parse inputs/outputs,
819
+ (2) AOT Autograd cache,
820
+ (3) Reading of params/buffers in every forward call
821
+
822
+ :func:`aot_module_simplified` removes these overheads.
823
+ """
824
+ params = {
825
+ **dict(mod.named_parameters(remove_duplicate=False)),
826
+ **dict(mod.named_buffers(remove_duplicate=False)),
827
+ }
828
+ params_flat, params_spec = pytree.tree_flatten(params)
829
+ params_flat = list(params_flat)
830
+ params_len = len(params_flat)
831
+
832
+ functional_call = create_functional_call(mod, params_spec, params_len)
833
+
834
+ if bw_compiler is None:
835
+ bw_compiler = fw_compiler
836
+ if inference_compiler is None:
837
+ inference_compiler = fw_compiler
838
+
839
+ seen_sources = set()
840
+
841
+ full_args = []
842
+ # First, the params
843
+ full_args.extend(params_flat)
844
+
845
+ if tracing_context := torch._guards.TracingContext.try_get():
846
+ tracing_context.params_flat = params_flat
847
+
848
+ aot_autograd_arg_pos_to_source = None
849
+ # Then, the params 1:1 mapped sources, if relevant.
850
+ if hasattr(mod, "_param_name_to_source"):
851
+ aot_autograd_arg_pos_to_source = []
852
+ # We now know this came from dynamo, and (1) we care about guards,
853
+ # so setting up aot_autograd_arg_pos_to_source for downstream dedup guards
854
+ # can now be done safely. (2) Dynamo logic protects the 1:1 sizing below.
855
+ for name in params.keys():
856
+ assert name in mod._param_name_to_source, f"{name} not found."
857
+ source = mod._param_name_to_source[name]
858
+ assert source not in seen_sources, source
859
+ seen_sources.add(source)
860
+ aot_autograd_arg_pos_to_source.append(source)
861
+
862
+ # Next, the input args
863
+ full_args.extend(args)
864
+
865
+ if hasattr(mod, "graph"):
866
+ # Non dynamo entrypoints can get to here...
867
+ for i, node in enumerate(mod.graph.nodes):
868
+ if node.op == "placeholder":
869
+ if hasattr(node, "_dynamo_source"):
870
+ # ... but not here!
871
+ if aot_autograd_arg_pos_to_source is None:
872
+ aot_autograd_arg_pos_to_source = []
873
+ source = node._dynamo_source
874
+ assert source not in seen_sources, source
875
+ seen_sources.add(source)
876
+ aot_autograd_arg_pos_to_source.append(source)
877
+
878
+ if aot_autograd_arg_pos_to_source is not None:
879
+ assert len(full_args) == len(aot_autograd_arg_pos_to_source)
880
+
881
+ dynamic_shapes = False
882
+ for x in full_args:
883
+ if isinstance(x, FakeTensor):
884
+ dynamic_shapes = x.fake_mode.shape_env is not None
885
+ break
886
+
887
+ aot_config = AOTConfig(
888
+ fw_compiler=fw_compiler,
889
+ bw_compiler=bw_compiler,
890
+ inference_compiler=inference_compiler,
891
+ partition_fn=partition_fn,
892
+ decompositions=decompositions,
893
+ num_params_buffers=params_len,
894
+ aot_id=next(AOT_COUNTER),
895
+ keep_inference_input_mutations=keep_inference_input_mutations,
896
+ dynamic_shapes=dynamic_shapes,
897
+ aot_autograd_arg_pos_to_source=aot_autograd_arg_pos_to_source,
898
+ is_export=False,
899
+ no_tangents=False,
900
+ )
901
+
902
+ with compiled_autograd.disable():
903
+ compiled_fn = create_aot_dispatcher_function(
904
+ functional_call,
905
+ full_args,
906
+ aot_config,
907
+ )
908
+
909
+ # TODO: There is something deeply wrong here; compiled_fn running with
910
+ # the boxed calling convention, but aot_module_simplified somehow
911
+ # historically returned a function that was not the boxed calling
912
+ # convention. This should get fixed...
913
+ def forward(*runtime_args):
914
+ full_args = []
915
+ full_args.extend(params_flat)
916
+ full_args.extend(runtime_args)
917
+ return compiled_fn(full_args)
918
+
919
+ # Just for convenience
920
+ forward.zero_grad = mod.zero_grad
921
+ forward.named_parameters = mod.named_parameters
922
+ forward.named_buffers = mod.named_buffers
923
+
924
+ return forward
925
+
926
+
927
+ def aot_export_module(
928
+ mod: nn.Module,
929
+ args,
930
+ *,
931
+ decompositions: Optional[Dict] = None,
932
+ # If true, we'll return a joint forward-backward graph,
933
+ # As well as metadata on the loss + gradients in the backward.
934
+ trace_joint: bool,
935
+ # If trace_joint is True, we expect your module to return a scalar loss.
936
+ # Your module can return multiple outputs, so you must specify which output the loss is.
937
+ output_loss_index: Optional[int] = None,
938
+ pre_dispatch: bool = False,
939
+ kwargs=None,
940
+ ) -> Tuple[torch.fx.GraphModule, GraphSignature]:
941
+ """
942
+ This function takes in a module, and returns:
943
+ (1) an FX graph that can be exported
944
+ (2) some metadata about the graph
945
+
946
+ If `trace_joint=True` we will return a joint graph of the forward + backward.
947
+
948
+ The traced FX graph will have the following properties compared to the original module:
949
+ (1) Inputs and outputs to the module will be pytree-flattened
950
+ (2) Parameters and buffers on the module will be lifted into graph inputs,
951
+ graph_inputs = (*parameters, *buffers, *user_inputs)
952
+ (3) The graph will be fully functionalized
953
+ (4) Any input mutations will be converted into additional outputs in the graph,
954
+ meaning whoever calls this graph is responsible for applying the mutations
955
+ back to the original inputs.
956
+ (5) If is_joint is provided the graph will return parameter gradients in addition to user outputs.
957
+ The graph output will look like:
958
+ graph_outputs = (*updated_inputs, *user_outputs, *param_gradients)
959
+
960
+ There are also several restrictions on what modules can use this API. In particular:
961
+ (1) If trace_joint is specified, we expect the loss function to be **fused**
962
+ into the module forward. One of the outputs to the forward must be a scalar loss,
963
+ which is specified with `output_loss_index`.
964
+ All other outputs to the forward are presumed to not require gradients.
965
+ (2) This API cannot capture optimizers (although in theory we could build an API for this).
966
+ (3) Metadata mutations on params/buffers/inputs are banned.
967
+ (4) Data mutations on anything that requires gradients are banned (parameters)
968
+ (5) If an input is mutated, it is not allowed to alias any other inputs.
969
+ (6) Parameters must not be duplicated.
970
+ """
971
+ if pre_dispatch and trace_joint:
972
+ raise RuntimeError("pre_dispatch is not supported when trace_joint is True.")
973
+ named_parameters = dict(mod.named_parameters(remove_duplicate=False))
974
+ named_buffers = dict(mod.named_buffers(remove_duplicate=False))
975
+
976
+ params_and_buffers = {
977
+ **dict(named_parameters),
978
+ **dict(named_buffers),
979
+ }
980
+ params_and_buffers_flat, params_spec = pytree.tree_flatten(params_and_buffers)
981
+ params_and_buffers_flat = tuple(params_and_buffers_flat)
982
+ params_len = len(params_and_buffers_flat)
983
+
984
+ kwargs = kwargs or {}
985
+
986
+ functional_call = create_functional_call(mod, params_spec, params_len, store_orig_mod=True)
987
+
988
+ num_fw_outs = None
989
+
990
+ if trace_joint:
991
+ # This helper effectively just adds some extra asserts about what the backward will look like:
992
+ # Outputs must include a scalar loss, that we compute gradients w.r.t.
993
+ # We don't compute gradients w.r.t. anything else: so just in case we detach()
994
+ # and other output tensors.
995
+ def fn_to_trace(*args):
996
+ nonlocal num_fw_outs
997
+ out = functional_call(*args)
998
+ if output_loss_index is None:
999
+ raise RuntimeError("""\
1000
+ If trace_joint=Trueit is required that one of your forward outputs must be a scalar loss.
1001
+ You must specify the which (index) output is the loss with output_loss_index.""")
1002
+ if isinstance(out, (torch.Tensor)):
1003
+ out = (out,)
1004
+ if not isinstance(out, (tuple, list)):
1005
+ raise RuntimeError(f"Expected forward output to be either a tensor or a list/tuple of tensors. found {type(out)}")
1006
+
1007
+ for i, o in enumerate(out):
1008
+ # We only want to create a backward graph w.r.t. the loss that the user passed in.
1009
+ # This implies that every other output should not require gradients.
1010
+ # Instead of making this an error (and forcing the user to detach all other outputs
1011
+ # of their forward),
1012
+ # we'll automatically detach them here.
1013
+ if o.requires_grad and i != output_loss_index:
1014
+ raise RuntimeError(f"""\
1015
+ Found an output of the forward that requires gradients, that was not the scalar loss.
1016
+ We require all outputs to the forward that are not the scalar loss to not require gradient,
1017
+ because we will only compute a backward graph against the scalar loss.
1018
+ You can fix this by calling .detach() on each of your forward outputs that is not the loss.
1019
+ You specified that output index {output_loss_index} is the loss, but we found that
1020
+ the output at index {i} requires gradients.""")
1021
+ out_loss = out[output_loss_index]
1022
+ num_fw_outs = len(out)
1023
+ if not out_loss.requires_grad:
1024
+ raise RuntimeError(f"""\
1025
+ The output at index {output_loss_index} was marked as the loss, but it does not require gradients""")
1026
+ if out_loss.numel() != 1:
1027
+ raise RuntimeError(f"""\
1028
+ We require the output marked as the loss (at index {output_loss_index}) to be a scalar, but it has shape {out_loss.shape}""")
1029
+ return out
1030
+ ctx = nullcontext
1031
+ else:
1032
+ # Run under no_grad, so our tracing machinery only traces an inference graph.
1033
+ ctx = torch.no_grad
1034
+ fn_to_trace = functional_call
1035
+
1036
+ full_args = []
1037
+ # First, the params
1038
+ # NB: It is REQUIRED that parameters come first, Inductor infers "fixed"
1039
+ # parameters by looking at the difference in parameter count outside
1040
+ # and inside AOTAutograd, and assumes the prefix of arguments are fixed
1041
+ # arguments
1042
+ full_args.extend(params_and_buffers_flat)
1043
+ # Next, the input args
1044
+ full_args.extend(args)
1045
+
1046
+ with ctx():
1047
+ fx_g, metadata, in_spec, out_spec = _aot_export_function(
1048
+ fn_to_trace,
1049
+ full_args,
1050
+ decompositions=decompositions,
1051
+ num_params_buffers=params_len,
1052
+ no_tangents=True,
1053
+ pre_dispatch=pre_dispatch,
1054
+ kwargs=kwargs,
1055
+ )
1056
+ if trace_joint:
1057
+ def flattened_joint(*args):
1058
+ # The idea here is that the joint graph that AOTAutograd creates has some strict properties:
1059
+ # (1) It accepts two arguments (primals, tangents), and pytree_flattens them
1060
+ # (2) It returns a tuple of (fw_outs, gradients)
1061
+ # This is a very useful convention for anyone who wants to partition the joint graph
1062
+ # into a separate forward and backward graph.
1063
+ # However,
1064
+ # (1) for people exporting a single joint graph, it would be preferable not to have
1065
+ # any pytrees in the graph.
1066
+ # (2) We are guaranteed in the aot_export_module case that the forward outputs a loss,
1067
+ # and there are therefore no tangents that are needed to run the joint graph.
1068
+ # (3) AOTAutograd creates a grad_input for every input in the forward,
1069
+ # including None's for inputs that are not grad-requiring tensors.
1070
+ # we don't want these in our export graph.
1071
+ # and there are therefore no tangents that are needed to run the joint graph.
1072
+ # This function "fixes" both of the above by removing any tangent inputs,
1073
+ # and removing pytrees from the original FX graph.
1074
+ fake_tangents = [None for _ in range(metadata.num_outputs + metadata.num_mutated_inp_runtime_indices)]
1075
+ fw_outs, gradients = fx_g(args, fake_tangents)
1076
+ assert len(gradients) == len(args)
1077
+ output_gradients = []
1078
+ for i, (a, grad) in enumerate(zip(args, gradients)):
1079
+ if isinstance(a, torch.Tensor) and a.requires_grad:
1080
+ assert grad is not None, """\
1081
+ Found a parameter that did not receive a gradient.
1082
+ "This is most likely a bug, but if this needs to be supported please comment on this Github issue:
1083
+ https://github.com/pytorch/pytorch/issues/101192
1084
+ """
1085
+ output_gradients.append(grad)
1086
+ else:
1087
+ assert grad is None
1088
+ return *fw_outs, *output_gradients
1089
+ fx_g = make_fx(flattened_joint)(*full_args)
1090
+
1091
+ user_args_flat = pytree.arg_tree_leaves(*args, **kwargs)
1092
+ return fx_g, create_graph_signature(
1093
+ fx_g,
1094
+ metadata,
1095
+ in_spec,
1096
+ out_spec,
1097
+ user_args_flat=user_args_flat,
1098
+ params_and_buffers_flat=params_and_buffers_flat,
1099
+ param_names=list(named_parameters.keys()),
1100
+ buffer_names=list(named_buffers.keys()),
1101
+ trace_joint=trace_joint,
1102
+ num_user_fw_outs=num_fw_outs,
1103
+ loss_index=output_loss_index,
1104
+ )
1105
+
1106
+ def aot_export_joint_simple(
1107
+ func: Callable,
1108
+ args,
1109
+ *,
1110
+ trace_joint: bool,
1111
+ # It looks like the main consequence of this API is that for dynamic shapes,
1112
+ # it will assume that parms/buffers are static.
1113
+ # With the new inferred dynamic shapes API, maybe this doesn't matter?
1114
+ num_params_buffers: int = 0,
1115
+ decompositions: Optional[Dict] = None,
1116
+ ) -> torch.fx.GraphModule:
1117
+ """
1118
+ A simplified version of export. Used by higher order operators.
1119
+
1120
+ This function makes a high-level "no calling convention changes" guarantee:
1121
+ - If no inputs require grad (so we export an inference graph),
1122
+ there are *no* calling convention change between the exported graph, and "func".
1123
+ - If at least one input requires grad (so we trace out and export a joint fw-bw graph),
1124
+ Then if you were partition the graph into a separate forward and backward graph,
1125
+ The forward graph will have no calling convention changes compared to "func".
1126
+
1127
+ The above also relies on some strong restrictions around which functions this API accepts:
1128
+ (1) `args` cannot contain any pytrees (they must have been pytree_flattened already)
1129
+ (2) `func` cannot mutate any inputs
1130
+ (3) The outputs of `func` cannot alias any inputs.
1131
+
1132
+ Note: this function is only lightly tested today. It will probably be tested more heavily by higher order ops.
1133
+ """
1134
+ if trace_joint:
1135
+ ctx = nullcontext
1136
+ else:
1137
+ # Run under no_grad, so our tracing machinery only traces an inference graph.
1138
+ ctx = torch.no_grad
1139
+
1140
+ with ctx():
1141
+ fx_g, metadata, in_spec, out_spec = _aot_export_function(
1142
+ func,
1143
+ args,
1144
+ decompositions=decompositions,
1145
+ )
1146
+ in_spec, _kw_in_spec = in_spec.children_specs
1147
+ # At this point, we can just directly return the (joint or inference graph) that we traced.
1148
+ # First though: a bunch of assertions to make sure that our graph doesn't require
1149
+ # any calling convention changes compared to the original function.
1150
+ # These restrictions are *in addition to* the general restrictions on export.
1151
+
1152
+ # No input mutations
1153
+ if len([x for x in metadata.input_info if x.mutates_data or x.mutates_metadata]) != 0:
1154
+ raise RuntimeError(f"aot_export_joint_simple does not support input mutations. {str(metadata)}")
1155
+ # No output aliasing
1156
+ if len([x for x in metadata.output_info if x.output_type != OutputType.non_alias]) != 0:
1157
+ raise RuntimeError(f"aot_export_joint_simple does not support outputs that alias inputs. {str(metadata)}")
1158
+ # No pytrees
1159
+ if in_spec.is_leaf():
1160
+ raise RuntimeError(f"aot_export_joint_simple requires inputs to be a single list/tuple. in_spec={str(in_spec)}")
1161
+ if not all(child.is_leaf() for child in in_spec.children_specs):
1162
+ raise RuntimeError(f"aot_export_joint_simple requires individual inputs not to be pytrees. in_spec={str(in_spec)}")
1163
+ if out_spec.is_leaf():
1164
+ raise RuntimeError(f"aot_export_joint_simple requires outputs to be a single list/tuple. out_spec={str(out_spec)}")
1165
+ if not all(child.is_leaf() for child in out_spec.children_specs):
1166
+ raise RuntimeError(f"aot_export_joint_simple requires individual outputs not to be pytrees. out_spec={str(out_spec)}")
1167
+ # TODO: we might have to temporarily patch config.functionalize_rng
1168
+ # so that it doesn't run when we're exporting a higher order op.
1169
+
1170
+ if config.debug_assert:
1171
+ # Smoke test that after partitioning, we can run the forward without any calling convention changes.
1172
+ fw_module, bw_module = aot_config.default_partition( # noqa: F821
1173
+ fx_g, args, num_fwd_outputs=len(fw_metadata.output_infos) # noqa: F821
1174
+ )
1175
+ # Attempt to run the fw_module with the original user inputs
1176
+ fake_mode = detect_fake_mode(args)
1177
+ if fake_mode is None:
1178
+ fake_mode = FakeTensorMode()
1179
+ with fake_mode:
1180
+ fw_module(*args)
1181
+ return fx_g
1182
+
1183
+ # Private for now because we aren't providing a contract on what to return
1184
+ # for joint graphs (we could when there's a clearer use case)
1185
+ # In the future, we may need to add more export API's that provide their own strong guarantees.
1186
+ # This is meant as a general helper function for handling various export-y use cases.
1187
+ def _aot_export_function(
1188
+ func: Callable,
1189
+ args,
1190
+ *,
1191
+ num_params_buffers: int = 0,
1192
+ decompositions: Optional[Dict] = None,
1193
+ # If we're exporting a joint graph and we don't want any tangent inputs in the graph
1194
+ # (because we are backpropping through a scalar 1 loss),
1195
+ # we need to explicitly specify not to include tangents in the graph.
1196
+ # It's not enough just to check that our tangent is a scalar, since we also
1197
+ # need to know if it is a 1 (no need to make it a graph input), or something else
1198
+ # (requiring it to be a graph input).
1199
+ # We don't know this info at trace time though, so we need to make it an explicit config.
1200
+ no_tangents: bool = False,
1201
+ pre_dispatch: bool = False,
1202
+ kwargs=None,
1203
+ ) -> Tuple[torch.fx.GraphModule, ViewAndMutationMeta, pytree.TreeSpec, pytree.TreeSpec]:
1204
+ kwargs = kwargs or {}
1205
+
1206
+ flat_fn, out_spec = create_tree_flattened_fn(func, args, kwargs)
1207
+ flat_args, in_spec = pytree.tree_flatten((args, kwargs))
1208
+
1209
+ dynamic_shapes = False
1210
+ for x in flat_args:
1211
+ if isinstance(x, FakeTensor):
1212
+ dynamic_shapes = x.fake_mode.shape_env is not None
1213
+ break
1214
+
1215
+ # The export use case doesn't care about several bits of AOTConfig
1216
+ # (1) compilers (we just export the graph)
1217
+ # (2) partitioners (export is only full graph, user can partition themselves)
1218
+ aot_config = AOTConfig(
1219
+ fw_compiler=None,
1220
+ bw_compiler=None,
1221
+ inference_compiler=None,
1222
+ partition_fn=None,
1223
+ decompositions=decompositions,
1224
+ num_params_buffers=num_params_buffers,
1225
+ aot_id=next(AOT_COUNTER),
1226
+ # For now there's no use case involving keeping input mutations in the graph
1227
+ # (which we can only do in the inference case anyway).
1228
+ # We can add this later if we need to.
1229
+ keep_inference_input_mutations=False,
1230
+ dynamic_shapes=dynamic_shapes,
1231
+ aot_autograd_arg_pos_to_source=None,
1232
+ is_export=True,
1233
+ no_tangents=no_tangents,
1234
+ pre_dispatch=pre_dispatch,
1235
+ )
1236
+
1237
+ fx_g, meta = create_aot_dispatcher_function(
1238
+ flat_fn,
1239
+ flat_args,
1240
+ aot_config,
1241
+ )
1242
+ return fx_g, meta, in_spec, out_spec.spec
1243
+
1244
+
1245
+ compiled_function = aot_function
1246
+ compiled_module = aot_module
venv/lib/python3.10/site-packages/torch/_functorch/apis.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NOTE: We allow Dynamo to see this file (via torch/_dynamo/trace_rules.py) so that it can
2
+ # trace through functorch transforms.
3
+ # Currently, we can't allow Dynamo to see `eager_transforms.py`/`vmap.py` as that break a lot of thing
4
+ # and there isn't a mechanism to selectively expose only some functions (eg. grad) from a file
5
+ # to Dynamo.
6
+ from torch._functorch.vmap import (vmap_impl, _check_randomness_arg,
7
+ Callable, in_dims_t, out_dims_t, _check_out_dims_is_int_or_int_pytree,
8
+ _process_batched_inputs, _chunked_vmap)
9
+ from torch._functorch.utils import exposed_in, argnums_t
10
+ import functools
11
+
12
+ # vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
13
+ # sends those into func, and then unwraps the output BatchedTensors. Operations
14
+ # on BatchedTensors perform the batched operations that the user is asking for.
15
+ #
16
+ # vmap's randomness behavior differs from JAX's, which would require a PRNG key
17
+ # to be passed everywhere.
18
+
19
+
20
+ @exposed_in('torch.func')
21
+ def vmap(
22
+ func: Callable,
23
+ in_dims: in_dims_t = 0,
24
+ out_dims: out_dims_t = 0,
25
+ randomness: str = 'error',
26
+ *,
27
+ chunk_size=None) -> Callable:
28
+ """
29
+ vmap is the vectorizing map; ``vmap(func)`` returns a new function that
30
+ maps ``func`` over some dimension of the inputs. Semantically, vmap
31
+ pushes the map into PyTorch operations called by ``func``, effectively
32
+ vectorizing those operations.
33
+
34
+ vmap is useful for handling batch dimensions: one can write a function
35
+ ``func`` that runs on examples and then lift it to a function that can
36
+ take batches of examples with ``vmap(func)``. vmap can also be used to
37
+ compute batched gradients when composed with autograd.
38
+
39
+ .. note::
40
+ :func:`torch.vmap` is aliased to :func:`torch.func.vmap` for
41
+ convenience. Use whichever one you'd like.
42
+
43
+ Args:
44
+ func (function): A Python function that takes one or more arguments.
45
+ Must return one or more Tensors.
46
+ in_dims (int or nested structure): Specifies which dimension of the
47
+ inputs should be mapped over. ``in_dims`` should have a
48
+ structure like the inputs. If the ``in_dim`` for a particular
49
+ input is None, then that indicates there is no map dimension.
50
+ Default: 0.
51
+ out_dims (int or Tuple[int]): Specifies where the mapped dimension
52
+ should appear in the outputs. If ``out_dims`` is a Tuple, then
53
+ it should have one element per output. Default: 0.
54
+ randomness (str): Specifies whether the randomness in this
55
+ vmap should be the same or different across batches. If 'different',
56
+ the randomness for each batch will be different. If 'same', the
57
+ randomness will be the same across batches. If 'error', any calls to
58
+ random functions will error. Default: 'error'. WARNING: this flag
59
+ only applies to random PyTorch operations and does not apply to
60
+ Python's random module or numpy randomness.
61
+ chunk_size (None or int): If None (default), apply a single vmap over inputs.
62
+ If not None, then compute the vmap :attr:`chunk_size` samples at a time.
63
+ Note that :attr:`chunk_size=1` is equivalent to computing the vmap with a for-loop.
64
+ If you run into memory issues computing the vmap, please try a non-None chunk_size.
65
+
66
+ Returns:
67
+ Returns a new "batched" function. It takes the same inputs as
68
+ ``func``, except each input has an extra dimension at the index
69
+ specified by ``in_dims``. It takes returns the same outputs as
70
+ ``func``, except each output has an extra dimension at the index
71
+ specified by ``out_dims``.
72
+
73
+ .. warning:
74
+ :func:`vmap` works best with functional-style code. Please do not
75
+ perform any side-effects in ``func``, with the exception of
76
+ in-place PyTorch operations. Examples of side-effects include mutating
77
+ Python data structures and assigning values to variables not captured
78
+ in ``func``.
79
+
80
+ One example of using :func:`vmap` is to compute batched dot products. PyTorch
81
+ doesn't provide a batched ``torch.dot`` API; instead of unsuccessfully
82
+ rummaging through docs, use :func:`vmap` to construct a new function.
83
+
84
+ >>> torch.dot # [D], [D] -> []
85
+ >>> batched_dot = torch.func.vmap(torch.dot) # [N, D], [N, D] -> [N]
86
+ >>> x, y = torch.randn(2, 5), torch.randn(2, 5)
87
+ >>> batched_dot(x, y)
88
+
89
+ :func:`vmap` can be helpful in hiding batch dimensions, leading to a simpler
90
+ model authoring experience.
91
+
92
+ >>> batch_size, feature_size = 3, 5
93
+ >>> weights = torch.randn(feature_size, requires_grad=True)
94
+ >>>
95
+ >>> def model(feature_vec):
96
+ >>> # Very simple linear model with activation
97
+ >>> return feature_vec.dot(weights).relu()
98
+ >>>
99
+ >>> examples = torch.randn(batch_size, feature_size)
100
+ >>> result = torch.vmap(model)(examples)
101
+
102
+ :func:`vmap` can also help vectorize computations that were previously difficult
103
+ or impossible to batch. One example is higher-order gradient computation.
104
+ The PyTorch autograd engine computes vjps (vector-Jacobian products).
105
+ Computing a full Jacobian matrix for some function f: R^N -> R^N usually
106
+ requires N calls to ``autograd.grad``, one per Jacobian row. Using :func:`vmap`,
107
+ we can vectorize the whole computation, computing the Jacobian in a single
108
+ call to ``autograd.grad``.
109
+
110
+ >>> # Setup
111
+ >>> N = 5
112
+ >>> f = lambda x: x ** 2
113
+ >>> x = torch.randn(N, requires_grad=True)
114
+ >>> y = f(x)
115
+ >>> I_N = torch.eye(N)
116
+ >>>
117
+ >>> # Sequential approach
118
+ >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0]
119
+ >>> for v in I_N.unbind()]
120
+ >>> jacobian = torch.stack(jacobian_rows)
121
+ >>>
122
+ >>> # vectorized gradient computation
123
+ >>> def get_vjp(v):
124
+ >>> return torch.autograd.grad(y, x, v)
125
+ >>> jacobian = torch.vmap(get_vjp)(I_N)
126
+
127
+ :func:`vmap` can also be nested, producing an output with multiple batched dimensions
128
+
129
+ >>> torch.dot # [D], [D] -> []
130
+ >>> batched_dot = torch.vmap(torch.vmap(torch.dot)) # [N1, N0, D], [N1, N0, D] -> [N1, N0]
131
+ >>> x, y = torch.randn(2, 3, 5), torch.randn(2, 3, 5)
132
+ >>> batched_dot(x, y) # tensor of size [2, 3]
133
+
134
+ If the inputs are not batched along the first dimension, ``in_dims`` specifies
135
+ the dimension that each inputs are batched along as
136
+
137
+ >>> torch.dot # [N], [N] -> []
138
+ >>> batched_dot = torch.vmap(torch.dot, in_dims=1) # [N, D], [N, D] -> [D]
139
+ >>> x, y = torch.randn(2, 5), torch.randn(2, 5)
140
+ >>> batched_dot(x, y) # output is [5] instead of [2] if batched along the 0th dimension
141
+
142
+ If there are multiple inputs each of which is batched along different dimensions,
143
+ ``in_dims`` must be a tuple with the batch dimension for each input as
144
+
145
+ >>> torch.dot # [D], [D] -> []
146
+ >>> batched_dot = torch.vmap(torch.dot, in_dims=(0, None)) # [N, D], [D] -> [N]
147
+ >>> x, y = torch.randn(2, 5), torch.randn(5)
148
+ >>> batched_dot(x, y) # second arg doesn't have a batch dim because in_dim[1] was None
149
+
150
+ If the input is a Python struct, ``in_dims`` must be a tuple containing a struct
151
+ matching the shape of the input:
152
+
153
+ >>> f = lambda dict: torch.dot(dict['x'], dict['y'])
154
+ >>> x, y = torch.randn(2, 5), torch.randn(5)
155
+ >>> input = {'x': x, 'y': y}
156
+ >>> batched_dot = torch.vmap(f, in_dims=({'x': 0, 'y': None},))
157
+ >>> batched_dot(input)
158
+
159
+ By default, the output is batched along the first dimension. However, it can be batched
160
+ along any dimension by using ``out_dims``
161
+
162
+ >>> f = lambda x: x ** 2
163
+ >>> x = torch.randn(2, 5)
164
+ >>> batched_pow = torch.vmap(f, out_dims=1)
165
+ >>> batched_pow(x) # [5, 2]
166
+
167
+ For any function that uses kwargs, the returned function will not batch the kwargs but will
168
+ accept kwargs
169
+
170
+ >>> x = torch.randn([2, 5])
171
+ >>> def fn(x, scale=4.):
172
+ >>> return x * scale
173
+ >>>
174
+ >>> batched_pow = torch.vmap(fn)
175
+ >>> assert torch.allclose(batched_pow(x), x * 4)
176
+ >>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5]
177
+
178
+ .. note::
179
+ vmap does not provide general autobatching or handle variable-length
180
+ sequences out of the box.
181
+ """
182
+ _check_randomness_arg(randomness)
183
+ if not (chunk_size is None or chunk_size > 0):
184
+ raise ValueError(f"vmap: chunk_size should be None or greater than 0. (got {chunk_size})")
185
+
186
+ # @functools.wraps(func)
187
+ def wrapped(*args, **kwargs):
188
+ return vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs)
189
+
190
+ return wrapped
191
+
192
+
193
+ def chunk_vmap(
194
+ func: Callable,
195
+ in_dims: in_dims_t = 0,
196
+ out_dims: out_dims_t = 0,
197
+ randomness: str = 'error',
198
+ chunks=2) -> Callable:
199
+ """
200
+ chunk_vmap is the vectorizing map (vmap) using chunks of input data. It is a mix of vmap (which vectorizes
201
+ everything) and map (which executes things sequentially). ``chunk_vmap`` vectorizes the input with number of
202
+ chunks at a time. For more details about vectorizing map, see :func:`vmap`.
203
+
204
+ .. note::
205
+ Please use :func:`vmap` with ``chunk_size`` argument instead of this API.
206
+
207
+ Args:
208
+ func (function): A Python function that takes one or more arguments.
209
+ Must return one or more Tensors.
210
+ in_dims (int or nested structure): Specifies which dimension of the
211
+ inputs should be mapped over. ``in_dims`` should have a
212
+ structure like the inputs. If the ``in_dim`` for a particular
213
+ input is None, then that indicates there is no map dimension.
214
+ Default: 0.
215
+ out_dims (int or Tuple[int]): Specifies where the mapped dimension
216
+ should appear in the outputs. If ``out_dims`` is a Tuple, then
217
+ it should have one element per output. Default: 0.
218
+ randomness (str): Specifies whether the randomness in this
219
+ vmap should be the same or different across batches. If 'different',
220
+ the randomness for each batch will be different. If 'same', the
221
+ randomness will be the same across batches. If 'error', any calls to
222
+ random functions will error. Default: 'error'. WARNING: this flag
223
+ only applies to random PyTorch operations and does not apply to
224
+ Python's random module or numpy randomness.
225
+ chunks (int): Number of chunks to use to split the input data. Default is 2.
226
+ If equals to 1 then :func:`vmap` is called.
227
+
228
+ Returns:
229
+ Returns a new "batched" function. It takes the same inputs as
230
+ ``func``, except each input has an extra dimension at the index
231
+ specified by ``in_dims``. It takes returns the same outputs as
232
+ ``func``, except each output has an extra dimension at the index
233
+ specified by ``out_dims``.
234
+ """
235
+ _check_randomness_arg(randomness)
236
+
237
+ if chunks == 1:
238
+ return vmap(func, in_dims=in_dims, out_dims=out_dims, randomness=randomness)
239
+
240
+ def _get_chunk_flat_args(flat_args_, flat_in_dims_, chunks_):
241
+ flat_args_chunks = tuple(
242
+ t.chunk(chunks_, dim=in_dim) if in_dim is not None else [t, ] * chunks_
243
+ for t, in_dim in zip(flat_args_, flat_in_dims_)
244
+ )
245
+ # transpose chunk dim and flatten structure
246
+ # chunks_flat_args is a list of flatten args
247
+ chunks_flat_args = zip(*flat_args_chunks)
248
+ return chunks_flat_args
249
+
250
+ @functools.wraps(func)
251
+ def wrapped_with_chunks(*args, **kwargs):
252
+ _check_out_dims_is_int_or_int_pytree(out_dims, func)
253
+ _, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func)
254
+ # Chunk flat arguments
255
+ chunks_flat_args = _get_chunk_flat_args(flat_args, flat_in_dims, chunks)
256
+
257
+ # Apply vmap on chunks
258
+ return _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs)
259
+
260
+ return wrapped_with_chunks
261
+
262
+
263
+ @exposed_in("torch.func")
264
+ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable:
265
+ """``grad`` operator helps computing gradients of ``func`` with respect to the
266
+ input(s) specified by ``argnums``. This operator can be nested to
267
+ compute higher-order gradients.
268
+
269
+ Args:
270
+ func (Callable): A Python function that takes one or more arguments.
271
+ Must return a single-element Tensor. If specified ``has_aux`` equals ``True``,
272
+ function can return a tuple of single-element Tensor and other auxiliary objects:
273
+ ``(output, aux)``.
274
+ argnums (int or Tuple[int]): Specifies arguments to compute gradients with respect to.
275
+ ``argnums`` can be single integer or tuple of integers. Default: 0.
276
+ has_aux (bool): Flag indicating that ``func`` returns a tensor and other
277
+ auxiliary objects: ``(output, aux)``. Default: False.
278
+
279
+ Returns:
280
+ Function to compute gradients with respect to its inputs. By default, the output of
281
+ the function is the gradient tensor(s) with respect to the first argument.
282
+ If specified ``has_aux`` equals ``True``, tuple of gradients and output auxiliary objects
283
+ is returned. If ``argnums`` is a tuple of integers, a tuple of output gradients with
284
+ respect to each ``argnums`` value is returned.
285
+
286
+ Example of using ``grad``:
287
+
288
+ >>> # xdoctest: +SKIP
289
+ >>> from torch.func import grad
290
+ >>> x = torch.randn([])
291
+ >>> cos_x = grad(lambda x: torch.sin(x))(x)
292
+ >>> assert torch.allclose(cos_x, x.cos())
293
+ >>>
294
+ >>> # Second-order gradients
295
+ >>> neg_sin_x = grad(grad(lambda x: torch.sin(x)))(x)
296
+ >>> assert torch.allclose(neg_sin_x, -x.sin())
297
+
298
+ When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients:
299
+
300
+ >>> # xdoctest: +SKIP
301
+ >>> from torch.func import grad, vmap
302
+ >>> batch_size, feature_size = 3, 5
303
+ >>>
304
+ >>> def model(weights, feature_vec):
305
+ >>> # Very simple linear model with activation
306
+ >>> assert feature_vec.dim() == 1
307
+ >>> return feature_vec.dot(weights).relu()
308
+ >>>
309
+ >>> def compute_loss(weights, example, target):
310
+ >>> y = model(weights, example)
311
+ >>> return ((y - target) ** 2).mean() # MSELoss
312
+ >>>
313
+ >>> weights = torch.randn(feature_size, requires_grad=True)
314
+ >>> examples = torch.randn(batch_size, feature_size)
315
+ >>> targets = torch.randn(batch_size)
316
+ >>> inputs = (weights, examples, targets)
317
+ >>> grad_weight_per_example = vmap(grad(compute_loss), in_dims=(None, 0, 0))(*inputs)
318
+
319
+ Example of using ``grad`` with ``has_aux`` and ``argnums``:
320
+
321
+ >>> # xdoctest: +SKIP
322
+ >>> from torch.func import grad
323
+ >>> def my_loss_func(y, y_pred):
324
+ >>> loss_per_sample = (0.5 * y_pred - y) ** 2
325
+ >>> loss = loss_per_sample.mean()
326
+ >>> return loss, (y_pred, loss_per_sample)
327
+ >>>
328
+ >>> fn = grad(my_loss_func, argnums=(0, 1), has_aux=True)
329
+ >>> y_true = torch.rand(4)
330
+ >>> y_preds = torch.rand(4, requires_grad=True)
331
+ >>> out = fn(y_true, y_preds)
332
+ >>> # > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
333
+
334
+ .. note::
335
+ Using PyTorch ``torch.no_grad`` together with ``grad``.
336
+
337
+ Case 1: Using ``torch.no_grad`` inside a function:
338
+
339
+ >>> # xdoctest: +SKIP
340
+ >>> def f(x):
341
+ >>> with torch.no_grad():
342
+ >>> c = x ** 2
343
+ >>> return x - c
344
+
345
+ In this case, ``grad(f)(x)`` will respect the inner ``torch.no_grad``.
346
+
347
+ Case 2: Using ``grad`` inside ``torch.no_grad`` context manager:
348
+
349
+ >>> # xdoctest: +SKIP
350
+ >>> with torch.no_grad():
351
+ >>> grad(f)(x)
352
+
353
+ In this case, ``grad`` will respect the inner ``torch.no_grad``, but not the
354
+ outer one. This is because ``grad`` is a "function transform": its result
355
+ should not depend on the result of a context manager outside of ``f``.
356
+
357
+ """
358
+ # To avoid cyclical dependency.
359
+ import torch._functorch.eager_transforms as eager_transforms
360
+
361
+ @functools.wraps(func)
362
+ def wrapper(*args, **kwargs):
363
+ return eager_transforms.grad_impl(func, argnums, has_aux, args, kwargs)
364
+ return wrapper
365
+
366
+
367
+ @exposed_in("torch.func")
368
+ def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable:
369
+ """
370
+ Returns a function to compute a tuple of the gradient and primal, or
371
+ forward, computation.
372
+
373
+ Args:
374
+ func (Callable): A Python function that takes one or more arguments.
375
+ Must return a single-element Tensor. If specified ``has_aux``
376
+ equals ``True``, function can return a tuple of single-element
377
+ Tensor and other auxiliary objects: ``(output, aux)``.
378
+ argnums (int or Tuple[int]): Specifies arguments to compute gradients
379
+ with respect to. ``argnums`` can be single integer or tuple of
380
+ integers. Default: 0.
381
+ has_aux (bool): Flag indicating that ``func`` returns a tensor and
382
+ other auxiliary objects: ``(output, aux)``. Default: False.
383
+
384
+ Returns:
385
+ Function to compute a tuple of gradients with respect to its inputs
386
+ and the forward computation. By default, the output of the function is
387
+ a tuple of the gradient tensor(s) with respect to the first argument
388
+ and the primal computation. If specified ``has_aux`` equals
389
+ ``True``, tuple of gradients and tuple of the forward computation with
390
+ output auxiliary objects is returned. If ``argnums`` is a tuple of
391
+ integers, a tuple of a tuple of the output gradients with respect to
392
+ each ``argnums`` value and the forward computation is returned.
393
+
394
+ See :func:`grad` for examples
395
+ """
396
+ from torch._functorch import eager_transforms
397
+
398
+ @functools.wraps(func)
399
+ def wrapper(*args, **kwargs):
400
+ return eager_transforms.grad_and_value_impl(func, argnums, has_aux, args, kwargs)
401
+ return wrapper
venv/lib/python3.10/site-packages/torch/_functorch/autograd_function.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._ops import HigherOrderOperator
3
+ from torch._C._functorch import TransformType
4
+ from torch._functorch.utils import enable_single_level_autograd_function
5
+ import torch.utils._pytree as pytree
6
+ from torch._C._functorch import (
7
+ _wrap_for_grad,
8
+ _unwrap_for_grad,
9
+ current_level,
10
+ )
11
+ from torch._functorch.vmap import (
12
+ wrap_batched,
13
+ unwrap_batched,
14
+ restore_vmap,
15
+ _add_batch_dim,
16
+ )
17
+ from torch._functorch.apis import vmap
18
+ from torch._functorch.vmap import _broadcast_to_and_flatten
19
+ from torch.autograd.forward_ad import _set_fwd_grad_enabled
20
+ from typing import Any, NamedTuple, Tuple
21
+
22
+ # autograd.Function technically runs before the regular PyTorch dispatcher.
23
+ # This is how features like autocast and torch_dispatch (e.g. PythonTLSSnapshot)
24
+ # work with it. One day we might decide to change this, but until then,
25
+ # we need to give the illusion that autograd.Function runs before those things.
26
+ #
27
+ # We do this by using creating a custom HigherOrderOperator that only functorch
28
+ # dispatches specially.
29
+ class CustomFunctionHigherOrderOperator(HigherOrderOperator):
30
+ def __init__(self):
31
+ super().__init__('custom_function_call')
32
+
33
+ def __call__(self, autograd_function, *args, **kwargs):
34
+ # When custom_function_call is done dispatching through functorch,
35
+ # it should just invoke the autograd.Function. This is consistent
36
+ # with the autograd.Function behavior of being invoked before the
37
+ # PyTorch dispatcher.
38
+ #
39
+ # This will lead us into trouble later down the line, but this is
40
+ # pre-existing. There is an invariant that a function traced by
41
+ # make_fx should have the same behavior when provided the same
42
+ # Tensor. However, make_fx sees autograd.Function as a composite
43
+ # (because autograd.Function happens before the Python dispatch key)
44
+ # and only traces the forward pass.
45
+ if torch._C._are_functorch_transforms_active():
46
+ return super().__call__(autograd_function, *args, **kwargs)
47
+ return autograd_function.apply(*args, **kwargs)
48
+
49
+
50
+ # "custom_function_call"
51
+ # This is the mechanism for an autograd.Function that works with functorch transforms.
52
+ # It wraps an autograd.Function; interactions with functorch transforms are defined
53
+ # via PyDispatcher and HigherOrderOperator rather than through the traditional PyTorch
54
+ # dispatcher.
55
+ custom_function_call = CustomFunctionHigherOrderOperator()
56
+
57
+
58
+ # The grad rule for custom_function_call is to construct a new _SingleLevelFunction
59
+ # (autograd.Function that only works with a single layer (level) of functorch) that:
60
+ # - unwraps the inputs
61
+ # - redispatches to custom_function_call
62
+ # - wraps the outputs
63
+ # and whose backward pass calls the original autograd.Function's backward.
64
+ #
65
+ # Why do we need to redispatch to custom_function_call?
66
+ # -----------------------------------------------------
67
+ # This is consistent with how ATen operators work with functorch's grad transform:
68
+ # they always redispatch to the original operator.
69
+ # Consider torch.sin, and let's say we do grad0(grad1(torch.sin))(x)
70
+ #
71
+ # grad1 will:
72
+ # - set up the autograd graph
73
+ # - unwrap the inputs
74
+ # - redispatch to at::sin (*)
75
+ # - rewrap the outputs on the return
76
+ #
77
+ # On the redispatch in (*), grad0 will:
78
+ # - set up the autograd graph
79
+ # - unwrap the inputs
80
+ # - redispatch to at::sin
81
+ # - rewrap the outputs on the return
82
+ #
83
+ # To "set up the autograd graph", we generate a _SingleLevelFunction
84
+ # and apply it.
85
+ @custom_function_call.py_impl(TransformType.Grad)
86
+ @custom_function_call.py_impl(TransformType.Jvp)
87
+ def custom_function_call_grad(interpreter, autograd_function, *operands):
88
+ Generated = generate_single_level_function(interpreter, autograd_function)
89
+ with enable_single_level_autograd_function():
90
+ flat_out = Generated.apply(*operands)
91
+ return flat_out
92
+
93
+
94
+ def generate_single_level_function(interpreter, autograd_function):
95
+ level = interpreter.level()
96
+
97
+ def forward(*operands):
98
+ unwrapped_operands = pytree.tree_map_only(
99
+ torch.Tensor,
100
+ lambda x: _unwrap_for_grad(x, level),
101
+ operands)
102
+ # Both enable_grad() and _set_fwd_grad_enabled() are necessary no matter
103
+ # the transform. _SingleLevelFunction will turn off both fwd and bwd
104
+ # gradient computation and we need to turn it back on here.
105
+ with torch.enable_grad(), _set_fwd_grad_enabled(True), interpreter.lower():
106
+ unwrapped_output = custom_function_call(autograd_function, *unwrapped_operands)
107
+
108
+ # See NOTE [mark_dirty object identity check]
109
+ def wrap_fn(output):
110
+ return _wrap_for_grad(output, level)
111
+
112
+ return wrap_outputs_maintaining_identity(
113
+ unwrapped_output,
114
+ unwrapped_operands,
115
+ operands,
116
+ wrap_fn)
117
+
118
+ def setup_context(ctx, inputs, output):
119
+ return autograd_function.setup_context(ctx, inputs, output)
120
+
121
+ # backward is only used if the transform is TransformType.Grad
122
+ def backward(ctx, *grads):
123
+ result = autograd_function.backward(ctx, *grads)
124
+ return result
125
+
126
+ # jvp is only used if the transform is TransformType.Jvp
127
+ def jvp(ctx, *tangents):
128
+ result = autograd_function.jvp(ctx, *tangents)
129
+ return result
130
+
131
+ # This is the sequence of magic words to dynamically generate a Subclass with
132
+ # a given name. A Tensor's .grad_fn field has a class name that is the original
133
+ # autograd.Function's name + Backward, so we do this to generate some
134
+ # meaningful name.
135
+ name = f'{autograd_function.__name__}Generated'
136
+ Generated = type(
137
+ name,
138
+ (torch.autograd.function._SingleLevelFunction,),
139
+ {
140
+ 'forward': staticmethod(forward),
141
+ 'backward': staticmethod(backward),
142
+ 'jvp': staticmethod(jvp),
143
+ 'setup_context': staticmethod(setup_context),
144
+ },
145
+ )
146
+ return Generated
147
+
148
+ # wrap_outputs_maintaining_identity handles outputs from the vmap,
149
+ # backward (vjp), and jvp staticmethod. The way it distinguishes
150
+ # between the vmap case and the {backward, jvp} case is if the out_dims
151
+ # are specified or not.
152
+ #
153
+ # NB: we cannot use out_dims=None as the deciding factor. This because
154
+ # out_dims=None can still happen in the vmap staticmethod! What the
155
+ # user is saying in that case is that their output does not have a
156
+ # dimension that is being vmapped over, which is valid.
157
+ NO_OUT_DIMS = "not specified"
158
+
159
+ # NOTE [mark_dirty object identity check]
160
+ # autograd.Function's ctx.mark_dirty expect a returned input
161
+ # to have the same object identity as the input.
162
+ # Mode-only functorch will greatly simplify this logic.
163
+ def wrap_outputs_maintaining_identity(
164
+ outputs, unwrapped_inputs, orig_inputs, wrap_fn, out_dims=NO_OUT_DIMS):
165
+ flat_unwrapped_inputs = pytree.arg_tree_leaves(*unwrapped_inputs)
166
+ flat_orig_inputs = pytree.arg_tree_leaves(*orig_inputs)
167
+
168
+ unwrapped_input_to_orig_input = {
169
+ id(unwrapped): orig
170
+ for unwrapped, orig in zip(flat_unwrapped_inputs, flat_orig_inputs)
171
+ }
172
+
173
+ flat_outputs, spec = pytree.tree_flatten(outputs)
174
+ result = []
175
+
176
+ out_dims_specified = out_dims != NO_OUT_DIMS
177
+
178
+ if out_dims_specified:
179
+ flat_out_dims = _broadcast_to_and_flatten(out_dims, spec)
180
+ # _broadcast_to_and_flatten returns None if it is unable to broadcast.
181
+ # TODO: update following link from master to stable once that's out
182
+ if flat_out_dims is None:
183
+ raise RuntimeError(
184
+ f"The autograd.Function's vmap staticmethod returned an "
185
+ f"incompatible (output, out_dims) tuple. "
186
+ f"Expected out_dims={out_dims} "
187
+ f"to be compatible with the structure of `output`. "
188
+ f"out_dims has structure {pytree.tree_flatten(out_dims)[1]} "
189
+ f"but output has structure {spec}. "
190
+ f"For more details, please see "
191
+ f"https://pytorch.org/docs/master/notes/extending.func.html"
192
+ )
193
+
194
+ for i, output in enumerate(flat_outputs):
195
+ if not isinstance(output, torch.Tensor):
196
+ result.append(output)
197
+ continue
198
+ if id(output) in unwrapped_input_to_orig_input:
199
+ result.append(unwrapped_input_to_orig_input[id(output)])
200
+ continue
201
+ if out_dims_specified:
202
+ result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[possibly-undefined, index]
203
+ else:
204
+ result.append(wrap_fn(output))
205
+
206
+ return pytree.tree_unflatten(result, spec)
207
+
208
+
209
+ # NOTE: [functorch vjp and autograd interaction]
210
+ # There's an edge case with the functorch vjp and autograd interaction
211
+ # that will eventually be fixed by mode-only functorch.
212
+ # The TL;DR is that there's no way to unwrap a dead GradTensorWrapper,
213
+ # so we (the framework) need to do it manually. Regular PyTorch operators
214
+ # automatically do so this is consistent.
215
+ #
216
+ # class MyExp(torch.autograd.Function):
217
+ # @staticmethod
218
+ # def forward(x):
219
+ # return x.exp()
220
+ #
221
+ # @staticmethod
222
+ # def setup_context(ctx, inputs, output):
223
+ # y = output
224
+ # ctx.save_for_backward(y)
225
+ #
226
+ # @staticmethod
227
+ # def backward(gy):
228
+ # y, = ctx.saved_tensors()
229
+ # return MyMul.apply(gy, y)
230
+ #
231
+ # x = torch.randn([], requires_grad=True)
232
+ # gy = torch.randn([], requires_grad=True)
233
+ # _, vjp_fn = vjp(MySin.apply, x)
234
+ # result = vjp_fn(gy)
235
+ #
236
+ # MyMul is an autograd.Function that is not shown here.
237
+ # It saves a `y` for backward (since gy requires grad).
238
+ #
239
+ # in vjp_fn(gy), we get:
240
+ # > MyMul.apply(gy, GradTensorWrapper(y, level=dead))
241
+ # Because the y that is saved for backward by MyExp is a GradTensorWrapper
242
+ # but is now dead since we are outside the vjp context.
243
+ #
244
+ # PyTorch dispatcher operations, upon seeing a dead GradTensorWrapper,
245
+ # will automatically unwrap the GradTensorWrapper when applied.
246
+ # But since autograd.Function technically sits above the regular PyTorch
247
+ # dispatcher, it doesn't get this treatment. So we manually do
248
+ # the unwrapping to be consistent with regular PyTorch dispatcher operations.
249
+
250
+
251
+ class VmapInfo(NamedTuple):
252
+ batch_size: int
253
+ randomness: str
254
+
255
+
256
+ def has_overriden_vmap_rule(autograd_function):
257
+ return autograd_function.vmap is not torch.autograd.Function.vmap
258
+
259
+
260
+ def validate_vmap_returns_tuple_of_two_elements(result):
261
+ base_error_msg = (
262
+ "Expected the vmap staticmethod to have two returns, an output "
263
+ "and out_dims with pytree structure compatible with the output. "
264
+ )
265
+ if not isinstance(result, tuple):
266
+ raise RuntimeError(base_error_msg + f"Got a {type(result)} instead")
267
+ if not len(result) == 2:
268
+ raise RuntimeError(base_error_msg + f"Got {len(result)} returns instead")
269
+
270
+ @custom_function_call.py_impl(TransformType.Vmap)
271
+ def custom_function_call_vmap(interpreter, autograd_function, *operands):
272
+ if autograd_function.generate_vmap_rule:
273
+ if has_overriden_vmap_rule(autograd_function):
274
+ # TODO: Update link to stable once that's out
275
+ # https://github.com/pytorch/pytorch/issues/92029
276
+ raise RuntimeError(
277
+ f"You tried to vmap over {autograd_function.__name__}, but "
278
+ f"it has both generate_vmap_rule=True and an overriden vmap "
279
+ f"staticmethod. Please set generate_vmap_rule=False or delete "
280
+ f"the overriden vmap staticmethod to avoid ambiguity. "
281
+ f"For more details, please see "
282
+ f"https://pytorch.org/docs/master/notes/extending.func.html")
283
+ return custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands)
284
+
285
+ if not has_overriden_vmap_rule(autograd_function):
286
+ # TODO: Update link to stable once that's out
287
+ # https://github.com/pytorch/pytorch/issues/92029
288
+ raise RuntimeError(
289
+ f"You tried to vmap over {autograd_function.__name__}, but "
290
+ f"it does not have vmap support. Please override and implement the "
291
+ f"vmap staticmethod or set generate_vmap_rule=True. "
292
+ f"For more details, please see "
293
+ f"https://pytorch.org/docs/master/notes/extending.func.html")
294
+
295
+ current_level = interpreter.level()
296
+ info = VmapInfo(
297
+ batch_size=interpreter.batch_size(),
298
+ randomness=interpreter.randomness(),
299
+ )
300
+ unwrapped_operands, in_dims = unwrap_batched(operands, current_level)
301
+
302
+ # If none of the tensors are batched at the current level, then we skip the
303
+ # current level. This saves the user from needing to handle this case in
304
+ # their vmap staticmethod (and is consistent with our C++ batching rule API)
305
+ if pytree.tree_all(lambda dim: dim is None, in_dims):
306
+ with interpreter.lower():
307
+ return custom_function_call(autograd_function, *operands)
308
+
309
+ with interpreter.lower():
310
+ result = autograd_function.vmap(info, in_dims, *unwrapped_operands)
311
+ validate_vmap_returns_tuple_of_two_elements(result)
312
+ unwrapped_output, out_dims = result
313
+
314
+ # See NOTE [mark_dirty object identity check]
315
+ def wrap_fn(output, out_dim):
316
+ return output if out_dim is None else _add_batch_dim(output, out_dim, current_level)
317
+
318
+ return wrap_outputs_maintaining_identity(
319
+ unwrapped_output,
320
+ unwrapped_operands,
321
+ operands,
322
+ wrap_fn,
323
+ out_dims=out_dims)
324
+
325
+
326
+ def custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands):
327
+ unwrapped_operands, in_dims = unwrap_batched(operands, interpreter.level())
328
+ vmapped_function, get_out_dims = vmapify_autograd_function(
329
+ autograd_function, in_dims, interpreter.batch_size(), interpreter.randomness())
330
+
331
+ with interpreter.lower():
332
+ output = custom_function_call(vmapped_function, *unwrapped_operands)
333
+
334
+ out_dims = get_out_dims()
335
+ return wrap_batched(output, out_dims, interpreter.level())
336
+
337
+
338
+ @custom_function_call.py_impl(TransformType.Functionalize)
339
+ def custom_function_call_functionalize(interpreter, autograd_function, generate_vmap_rule, *operands):
340
+ raise RuntimeError("NYI: Functionalize rule for custom_function_call")
341
+
342
+
343
+ def vmapify_autograd_function(autograd_function, in_dims, batch_size, randomness):
344
+ # The following values are saved from the forward() and setup_context()
345
+ # and used in backward().
346
+ # Why do we save the values out here instead of on the ctx object?
347
+ # - out_dims: There's no way to retrieve this from forward()
348
+ # - input_shapes, saved_tensors_bdims: I'm a bit scared of nesting
349
+ # vmap(vmap( but not completely sure if it is a problem. If we
350
+ # assigned those fields to the ctx object, the worry is that they
351
+ # get overwritten.
352
+ init_val = "not populated"
353
+ out_dims = init_val
354
+ input_shapes: Any = init_val
355
+ saved_tensors_bdims: Any = init_val
356
+
357
+ def forward(*operands):
358
+ nonlocal out_dims
359
+ outputs, out_dims = restore_vmap(
360
+ autograd_function.forward, in_dims, batch_size, randomness)(*operands)
361
+ return outputs
362
+
363
+ def setup_context(ctx, inputs, outputs):
364
+ input_shapes_ = None
365
+ saved_tensors_bdims_ = None
366
+
367
+ def inner(inputs, outputs):
368
+ # wrapped_ctx.save_for_backward will:
369
+ # - unwrap batchedtensors into (tensor, bdim)
370
+ # - save_for_backward(*unwrapped_tensors)
371
+ # - assign the bdims to wrapped_ctx._pt_saved_tensors_bdims
372
+ wrapped_ctx = CtxCustomSave(ctx, current_level())
373
+ autograd_function.setup_context(wrapped_ctx, inputs, outputs)
374
+
375
+ # input_shapes are used for reductify later to reduce expanded gradients
376
+ # to the correct shape.
377
+ # See NOTE: [Why can't we rely on autograd to reduce expanded gradients?]
378
+ # for more details
379
+ nonlocal input_shapes_
380
+ input_shapes_ = tuple(inp.shape if isinstance(inp, torch.Tensor) else None
381
+ for inp in inputs)
382
+ nonlocal saved_tensors_bdims_
383
+ saved_tensors_bdims_ = wrapped_ctx._pt_saved_tensors_bdims
384
+
385
+ # See NOTE: [Why do we need to run setup_context under a vmap?]
386
+ restore_vmap(
387
+ inner,
388
+ (in_dims, out_dims),
389
+ batch_size,
390
+ randomness,
391
+ )(inputs, outputs)
392
+
393
+ nonlocal input_shapes
394
+ input_shapes = input_shapes_
395
+ nonlocal saved_tensors_bdims
396
+ saved_tensors_bdims = saved_tensors_bdims_
397
+
398
+ def jvp(ctx, *tangents):
399
+ assert out_dims != init_val
400
+ assert saved_tensors_bdims != init_val
401
+
402
+ def jvp_no_context(saved_tensors, tangents):
403
+ wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors)
404
+ return autograd_function.jvp(wrapped_ctx, *tangents)
405
+
406
+ tangent_in_dims = get_tangents_in_dims(in_dims, tangents)
407
+ out_tangents, out_tangents_dims = restore_vmap(
408
+ jvp_no_context, (saved_tensors_bdims, tangent_in_dims), batch_size, randomness)(
409
+ ctx.saved_tensors, tangents)
410
+
411
+ result = reductify(out_tangents, out_tangents_dims, out_dims, batch_size)
412
+ return result
413
+
414
+ def backward(ctx, *grad_outputs):
415
+ assert out_dims != init_val
416
+ assert input_shapes != init_val
417
+ assert saved_tensors_bdims != init_val
418
+
419
+ def backward_no_context(inputs):
420
+ saved_tensors, grad_outputs = inputs
421
+ wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors)
422
+ return autograd_function.backward(wrapped_ctx, *grad_outputs)
423
+
424
+ grad_ins, grad_ins_dims = restore_vmap(
425
+ backward_no_context, ((saved_tensors_bdims, out_dims),), batch_size, randomness)(
426
+ (ctx.saved_tensors, grad_outputs))
427
+ result = reductify(grad_ins, grad_ins_dims, in_dims, batch_size, input_shapes)
428
+ return result
429
+
430
+ name = f'Vmapped{autograd_function.__name__}'
431
+ Generated = type(
432
+ name,
433
+ (torch.autograd.Function,),
434
+ {
435
+ 'forward': staticmethod(forward),
436
+ 'backward': staticmethod(backward),
437
+ 'jvp': staticmethod(jvp),
438
+ 'setup_context': staticmethod(setup_context),
439
+ 'generate_vmap_rule': True
440
+ }
441
+ )
442
+
443
+ def get_out_dims():
444
+ assert out_dims != init_val
445
+ return out_dims
446
+
447
+ return Generated, get_out_dims
448
+
449
+
450
+ # tangents might be None, so we need to replace
451
+ # the corresponding in_dims with None.
452
+ def get_tangents_in_dims(input_dims, tangents):
453
+ flat_in_dims, spec = pytree.tree_flatten(input_dims)
454
+ flat_tangents = pytree.arg_tree_leaves(*tangents)
455
+ result = [None if tangent is None else in_dim
456
+ for in_dim, tangent in zip(flat_in_dims, flat_tangents)]
457
+ return pytree.tree_unflatten(result, spec)
458
+
459
+
460
+ # NOTE: [Why do we need to run setup_context under a vmap?]
461
+ # Consider the following autograd.Function
462
+ #
463
+ # class Sum(torch.autograd.Function):
464
+ # @staticmethod
465
+ # def forward(x):
466
+ # return x.sum()
467
+ # @staticmethod
468
+ # def setup_context(ctx, inputs, outputs):
469
+ # ctx.x_shape = inputs[0]
470
+ # @staticmethod
471
+ # def backward(ctx, gy):
472
+ # return gy.expand(ctx.x_shape)
473
+ #
474
+ # x = torch.randn(B, 4)
475
+ # in_dims = 0
476
+ # vmap(Sum.apply, in_dims)(x)
477
+ #
478
+ # Let’s assume for a moment that we didn’t vmap setup_context in VmappedSum:
479
+ #
480
+ # class VmappedSum(torch.autograd.Function):
481
+ # @staticmethod
482
+ # def forward(x):
483
+ # return vmap(Sum.forward, in_dims)(x)
484
+ #
485
+ # @staticmethod
486
+ # def setup_context(ctx, inputs, outputs):
487
+ # Sum.setup_context(ctx, inputs, outputs)
488
+ #
489
+ # @staticmethod
490
+ # def backward(ctx, gy):
491
+ # def backward_no_context(gy):
492
+ # return gy.expand(ctx.x_shape)
493
+ #
494
+ # dims = (0,)
495
+ # gx = vmap(backward_no_context, dims)(gy)
496
+ # return gx
497
+ #
498
+ # We end up saving [B, 4] as x_shape. In the backward, gy has shape [B],
499
+ # and we’re doing:
500
+ #
501
+ # def backward_no_context(gy):
502
+ # return gy.expand([B, 4])
503
+ #
504
+ # gx = vmap(backward_no_context, dims)(gy: "Tensor[B]")
505
+ #
506
+ # This gives us the wrong result (gx has shape [B, B, 4], but it should
507
+ # have shape [4]). Performing vmap over setup_context means the shape
508
+ # saved has shape [4] and leads to a correct result shape for gx.
509
+
510
+ # Wraps a ctx object. Forwards all attr accesses to the underlying object
511
+ # except for the attrs in _pt_attrs
512
+ class WrappedCtx:
513
+ _pt_reserved_attrs: Tuple[str, ...] = ('_pt_reserved_attrs', '_pt_inner_ctx')
514
+
515
+ def __init__(self, ctx):
516
+ if not isinstance(ctx, WrappedCtx):
517
+ reserved_attrs = type(self)._pt_reserved_attrs
518
+ for name in reserved_attrs:
519
+ if not hasattr(ctx, name):
520
+ continue
521
+ raise RuntimeError(
522
+ f'PyTorch reserves the {reserved_attrs} field on ctx. '
523
+ 'Please name your fields on ctx something else to avoid name '
524
+ 'collision.')
525
+ self._pt_inner_ctx = ctx
526
+
527
+ def __getattr__(self, name):
528
+ return getattr(self._pt_inner_ctx, name)
529
+
530
+ def __setattr__(self, name, value):
531
+ if name in type(self)._pt_reserved_attrs:
532
+ self.__dict__[name] = value
533
+ return
534
+ return setattr(self._pt_inner_ctx, name, value)
535
+
536
+ # Wraps ctx to create a new ctx object that overrides saved_tensors.
537
+ class CtxWithSavedTensors(WrappedCtx):
538
+ _pt_reserved_attrs = ('_pt_new_saved_tensors', *WrappedCtx._pt_reserved_attrs)
539
+
540
+ def __init__(self, ctx, new_saved_tensors):
541
+ super().__init__(ctx)
542
+ self._pt_new_saved_tensors = new_saved_tensors
543
+
544
+ @property
545
+ def saved_tensors(self):
546
+ return self._pt_new_saved_tensors
547
+
548
+ class CtxCustomSave(WrappedCtx):
549
+ _pt_reserved_attrs = ('_pt_saved_tensors_bdims', '_pt_current_level',
550
+ *WrappedCtx._pt_reserved_attrs)
551
+
552
+ def __init__(self, ctx, current_level):
553
+ super().__init__(ctx)
554
+ self._pt_saved_tensors_bdims = ()
555
+ self._pt_current_level = current_level
556
+
557
+ def save_for_backward(self, *tensors):
558
+ unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level)
559
+ self._pt_inner_ctx.save_for_backward(*unwrapped_tensors)
560
+ self._pt_saved_tensors_bdims = bdims
561
+
562
+ def save_for_forward(self, *tensors):
563
+ unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level)
564
+ self._pt_inner_ctx.save_for_forward(*unwrapped_tensors)
565
+ self._pt_saved_tensors_bdims = bdims
566
+
567
+
568
+ def reductify(grad_input, grad_input_bdim, input_bdim, batch_size,
569
+ target_shape_without_bdim_to_reduce_to=None):
570
+ if not isinstance(grad_input, tuple):
571
+ grad_input = (grad_input,)
572
+ if not isinstance(grad_input_bdim, tuple):
573
+ grad_input_bdim = (grad_input_bdim,)
574
+ if not isinstance(input_bdim, tuple):
575
+ input_bdim = (input_bdim,)
576
+
577
+ if target_shape_without_bdim_to_reduce_to is None:
578
+ target_shape_without_bdim_to_reduce_to = len(grad_input) * (None,)
579
+ result = tuple(
580
+ reductify_leaf(gi, gi_bdim, i_bdim, batch_size, maybe_ishape)
581
+ for gi, gi_bdim, i_bdim, maybe_ishape in
582
+ zip(grad_input, grad_input_bdim, input_bdim, target_shape_without_bdim_to_reduce_to)
583
+ )
584
+ return result
585
+
586
+
587
+ def reductify_leaf(grad_input, grad_input_bdim, input_bdim, batch_size,
588
+ target_shape_without_bdim_to_reduce_to=None):
589
+ if grad_input is None:
590
+ return None
591
+
592
+ if grad_input_bdim is None and input_bdim is None:
593
+ return grad_input
594
+
595
+ if grad_input_bdim is not None and input_bdim is None:
596
+ return grad_input.sum(grad_input_bdim)
597
+
598
+ # NOTE: [Why can't we rely on autograd to reduce expanded gradients?]
599
+ # For reverse-mode AD,
600
+ # given a grad_input and input, it is valid for the user to return a
601
+ # grad_input that has a broadcasted shape when compared to the input.
602
+ # In this situation, autograd automatically reduces the grad_input to
603
+ # the shape of the input.
604
+ #
605
+ # However, when input_bdim is not None, we have problems.
606
+ #
607
+ # [example 1]
608
+ # grad_input: Tensor[3, 4], input: Tensor[B, 4]
609
+ # We can expand grad_input to Tensor[B, 3, 4], but that isn't broadcastable
610
+ # from [B, 4].
611
+ #
612
+ # [example 2]
613
+ # grad_input: Tensor[3, B, 4], input: Tensor[B, 4]
614
+ # We can swizzle grad_input to Tensor[B, 3, 4], but that isn't broadcastable
615
+ # from [B, 4].
616
+ #
617
+ # This means that we need to also reduce the grad_input to the shape of the
618
+ # input. This behavior is controlled by the `target_shape_without_bdim_to_reduce_to` flag;
619
+ # if not-None then we do the reducing manually, otherwise, we do not do a reduction.
620
+ assert input_bdim is not None
621
+
622
+ if grad_input_bdim is None:
623
+ grad_input = grad_input.unsqueeze(input_bdim)
624
+ new_shape = list(grad_input.shape)
625
+ new_shape[input_bdim] = batch_size
626
+ grad_input = grad_input.expand(new_shape)
627
+ grad_input_bdim = input_bdim
628
+
629
+ if target_shape_without_bdim_to_reduce_to is not None:
630
+ return vmap(torch.Tensor.sum_to_size, in_dims=(grad_input_bdim, None), out_dims=input_bdim)(
631
+ grad_input, target_shape_without_bdim_to_reduce_to)
632
+
633
+ if input_bdim != grad_input_bdim:
634
+ grad_input = grad_input.movedim(grad_input_bdim, input_bdim)
635
+ return grad_input
636
+
637
+
638
+ class AutogradFunctionApply(HigherOrderOperator):
639
+ def __init__(self):
640
+ super().__init__("autograd_function_apply")
641
+
642
+ def __call__(self, fwd, bwd, *fwd_args):
643
+ saved_values = None
644
+
645
+ class ApplyTemplate(torch.autograd.Function):
646
+ @staticmethod
647
+ def forward(ctx, *args):
648
+ nonlocal saved_values
649
+ output, saved_values = fwd(None, *args)
650
+ return output
651
+
652
+ @staticmethod
653
+ def backward(ctx, *grad):
654
+ return bwd(None, *grad, *saved_values)
655
+
656
+ return ApplyTemplate.apply(*fwd_args)
657
+
658
+
659
+ autograd_function_apply = AutogradFunctionApply()
venv/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from torch._functorch.utils import exposed_in
3
+
4
+
5
+ def batch_norm_without_running_stats(module: nn.Module):
6
+ if isinstance(module, nn.modules.batchnorm._BatchNorm) and module.track_running_stats:
7
+ module.running_mean = None
8
+ module.running_var = None
9
+ module.num_batches_tracked = None
10
+ module.track_running_stats = False
11
+
12
+
13
+ @exposed_in("torch.func")
14
+ def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:
15
+ """
16
+ In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and
17
+ setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`
18
+ """
19
+ # base case
20
+ batch_norm_without_running_stats(root)
21
+
22
+ for obj in root.modules():
23
+ batch_norm_without_running_stats(obj)
24
+ return root
venv/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import time
5
+ import os
6
+ import json
7
+
8
+ import torch
9
+ from torch.profiler import profile, ProfilerActivity
10
+
11
+
12
+ def synchronize():
13
+ pass
14
+
15
+
16
+ def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1,
17
+ devices=None, kwargs_for_f=None, kwargs_for_profiler=None):
18
+ """
19
+ Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx]
20
+ [num_runs] times to [trace_filename].
21
+
22
+ [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA.
23
+ Return total runtime without the profiler
24
+
25
+ Outputs to trace_filename
26
+ """
27
+
28
+ if devices is None:
29
+ devices = ["cuda"]
30
+
31
+ global synchronize
32
+ if devices != ["cpu"] and torch.cuda.is_available():
33
+ synchronize = torch.cuda.synchronize
34
+
35
+ if kwargs_for_f is None:
36
+ kwargs_for_f = {}
37
+ if kwargs_for_profiler is None:
38
+ kwargs_for_profiler = {}
39
+
40
+ with optimize_ctx:
41
+ torch.manual_seed(1337)
42
+ for _ in range(5): # warmup runs
43
+ f(input, **kwargs_for_f)
44
+ synchronize()
45
+ torch.manual_seed(1337)
46
+ t0 = time.perf_counter()
47
+ for _ in range(num_runs):
48
+ f(input, **kwargs_for_f)
49
+ synchronize()
50
+ t1 = time.perf_counter()
51
+ timing = t1 - t0
52
+
53
+ with profile(activities=activities, **kwargs_for_profiler) as prof:
54
+ with optimize_ctx:
55
+ synchronize()
56
+ torch.manual_seed(1337)
57
+ for _ in range(num_runs):
58
+ f(input, **kwargs_for_f)
59
+ synchronize()
60
+ prof.export_chrome_trace(trace_filename)
61
+
62
+ return timing
63
+
64
+
65
+ def get_chrome_trace_events(filename):
66
+ f = open(filename)
67
+ data = json.load(f)
68
+ events = data["traceEvents"]
69
+ return events
70
+
71
+
72
+ def is_gpu_compute_event(event):
73
+ global gpu_pids
74
+ return "pid" in event and event["pid"] in gpu_pids and "ph" in event and event["ph"] == "X"
75
+
76
+
77
+ def get_sorted_gpu_events(events):
78
+ sorted_gpu_events = []
79
+ for event in events:
80
+ if not is_gpu_compute_event(event):
81
+ continue
82
+ sorted_gpu_events.append(event)
83
+ return sorted(sorted_gpu_events, key=lambda x: x["ts"])
84
+
85
+
86
+ def get_duration(sorted_gpu_events):
87
+ if len(sorted_gpu_events) == 0:
88
+ return 0
89
+ event = sorted_gpu_events[0]
90
+ current_end_time = event["ts"] + event["dur"]
91
+ total_duration = event["dur"]
92
+ for event in sorted_gpu_events[1:]:
93
+ start_time = max(event["ts"], current_end_time)
94
+ end_time = event["ts"] + event["dur"]
95
+ total_duration = total_duration + max(end_time - start_time, 0)
96
+ current_end_time = max(current_end_time, end_time)
97
+ return total_duration
98
+
99
+
100
+ def get_sorted_gpu_mm_conv_events(events):
101
+ def is_mm_conv_event(event):
102
+ return "name" in event and ("gemm" in event["name"] or "conv" in event["name"]
103
+ or "cutlass" in event["name"] or "wgrad" in event["name"])
104
+ gpu_events = get_sorted_gpu_events(events)
105
+ sorted_events = []
106
+ for event in gpu_events:
107
+ if not is_mm_conv_event(event):
108
+ continue
109
+ sorted_events.append(event)
110
+ return sorted_events
111
+
112
+
113
+ gpu_pids = []
114
+
115
+
116
+ def compute_utilization(filename: str, total_length: float):
117
+ """
118
+ Process the chrome traces outputs by the pytorch profiler to compute GPU Utilization
119
+ and percent of times spent on matmul and convolution
120
+
121
+ Args:
122
+ filename(str): Name of chrome traces file produced by pytorch profiler
123
+
124
+ total_length(float): total length of the process without profiler in second
125
+
126
+ Return:
127
+ tuple: (GPU Utilization, percent of time spent on matmul and convolution)
128
+ """
129
+ events = get_chrome_trace_events(filename)
130
+
131
+ # get pids of GPU events
132
+ global gpu_pids
133
+ gpu_pids = []
134
+ for event in events:
135
+ if "name" not in event:
136
+ continue
137
+ if event["name"] == 'process_labels' and "GPU" in event["args"]["labels"]:
138
+ gpu_pids.append(event["pid"])
139
+
140
+ total_length = total_length * 1e6
141
+ sorted_gpu_events = get_sorted_gpu_events(events)
142
+ utilization = get_duration(sorted_gpu_events) / total_length
143
+
144
+ sorted_gpu_mm_conv_events = get_sorted_gpu_mm_conv_events(events)
145
+ mm_conv_utilization = get_duration(sorted_gpu_mm_conv_events) / total_length
146
+
147
+ return utilization, mm_conv_utilization
148
+
149
+
150
+ def benchmark_utilization(f, input, trace_folder, optimize_ctx=None, trace_file_name="tmp_chrome_trace", num_runs=1):
151
+ """
152
+ Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of
153
+ running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times.
154
+ It will produce a chrome trace file in trace_folder/trace_file_name.json
155
+
156
+ Example:
157
+
158
+ ```
159
+ def f(a):
160
+ return a.sum()
161
+ a = torch.rand(2**20, device="cuda")
162
+ utilization, mm_conv_utilization = benchmark_utilization(f, a, "tmp", trace_file_name = "tmp_chrome_trace")
163
+ ```
164
+
165
+ Args:
166
+ f: function to benchmark
167
+
168
+ input: input to :attr:`f`
169
+
170
+ trace_folder: name of the folder to store the chrome trace
171
+
172
+ optimize_ctx: the context in which f will run
173
+
174
+ trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace"
175
+
176
+ num_runs: number of times to run f, excluding the warm-up runs, default to 1.
177
+
178
+ Return:
179
+ tuple: (GPU Utilization, percent of time spent on matmul and convolution)
180
+
181
+ """
182
+ isExist = os.path.exists(trace_folder)
183
+ if not isExist:
184
+ os.makedirs(trace_folder)
185
+ print("create folder " + trace_folder)
186
+
187
+ if optimize_ctx is None:
188
+ optimize_ctx = contextlib.nullcontext()
189
+
190
+ chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + ".json")
191
+ total_length = dump_chrome_trace(f, input, chrome_trace_file_name, optimize_ctx,
192
+ [ProfilerActivity.CUDA], num_runs=num_runs, devices="cuda")
193
+ utilization, mm_conv_utilization = compute_utilization(chrome_trace_file_name, total_length)
194
+
195
+ return utilization, mm_conv_utilization