applied-ai-018 commited on
Commit
a937d03
·
verified ·
1 Parent(s): 1fad656

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py +5 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py +626 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py +192 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py +370 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py +936 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py +1021 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py +696 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py +295 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py +698 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py +226 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/_vendor/__init__.py +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py +15 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py +61 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/version.py +563 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__init__.py +78 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc ADDED
Binary file (744 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc ADDED
Binary file (396 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc ADDED
Binary file (589 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc ADDED
Binary file (8.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc ADDED
Binary file (9.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is one of the analysis modules - it takes as input a function or graph
3
+ and some preexisting properties, and returns some data that is useful for deciding
4
+ how to further proceed with compilation or construct runtime wrappers.
5
+
6
+ In particular, the analysis here constructs view and mutation metadata from running
7
+ a functionalized version of the graph under compilation.
8
+ """
9
+
10
+ import collections
11
+ import logging
12
+ from functools import wraps
13
+ from typing import Callable, DefaultDict, Dict, List
14
+
15
+ import torch
16
+ import torch.utils._pytree as pytree
17
+ from torch import Tensor
18
+ from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode
19
+ from torch._subclasses.meta_utils import safe_is_leaf
20
+ from torch.fx.experimental.symbolic_shapes import is_concrete_int
21
+ from torch.multiprocessing.reductions import StorageWeakRef
22
+ from torch.utils._python_dispatch import (
23
+ is_traceable_wrapper_subclass,
24
+ transform_subclass,
25
+ )
26
+ from .functional_utils import (
27
+ are_all_mutations_hidden_from_autograd,
28
+ are_all_mutations_under_no_grad_or_inference_mode,
29
+ from_fun,
30
+ has_data_mutation,
31
+ has_metadata_mutation,
32
+ has_same_metadata,
33
+ to_fun,
34
+ )
35
+ from .schemas import (
36
+ InputAliasInfo,
37
+ MutationType,
38
+ OutputAliasInfo,
39
+ OutputType,
40
+ ViewAndMutationMeta,
41
+ )
42
+ from .subclass_utils import create_subclass_meta
43
+
44
+ from .utils import _get_autocast_states, KNOWN_TYPES, strict_zip
45
+
46
+ zip = strict_zip
47
+
48
+ log = logging.getLogger(__name__)
49
+
50
+
51
+ # This is a version of functionalization that is specifically designed
52
+ # for the AOTAutograd use case.
53
+ #
54
+ # Unlike functorch's variant, this doesn't use the functorch level system,
55
+ # instead it directly uses PyTorch's conventional dispatcher to hit the
56
+ # functionalization key. In particular, this means that FunctionalTensorWrapper
57
+ # can have autograd data stored directly on it.
58
+ #
59
+ # In typical AOTAutograd usage, the dispatch key order will look like:
60
+ #
61
+ # Autograd - Functionalization ~~~~> Proxy Mode - Fake Tensor
62
+ # outer tensor inner tensor
63
+ #
64
+ # Returns:
65
+ # - ViewAndMutationMeta, telling us metadata about the inputs and outputs, and
66
+ # The list of outputs from the forward, but **only** the outputs that we need
67
+ # to pass in as tangents into the backward.
68
+ # Specifically, aliased outputs from the forward get regenerated, and don't participate
69
+ # in the compiled backward function.
70
+ def run_functionalized_fw_and_collect_metadata(
71
+ f,
72
+ *,
73
+ keep_input_mutations: bool,
74
+ # TODO: refactor to kill this flag
75
+ is_train: bool = False,
76
+ pre_dispatch: bool = False,
77
+ ) -> Callable[..., ViewAndMutationMeta]:
78
+ memo: Dict[Tensor, Tensor] = {}
79
+
80
+ def _to_fun(t):
81
+ if isinstance(t, Tensor):
82
+ if t in memo:
83
+ return memo[t]
84
+ r = to_fun(t)
85
+ memo[t] = r
86
+ return r
87
+ else:
88
+ return t
89
+
90
+ @wraps(f)
91
+ def inner(*flat_args):
92
+ # This function is meant to be run with the forward, which expects a flat list of tensor/symint/other args.
93
+ assert all(isinstance(a, tuple(KNOWN_TYPES)) for a in flat_args)
94
+
95
+ input_info: List[InputAliasInfo] = []
96
+ output_info: List[OutputAliasInfo] = []
97
+
98
+ prior_grad_enabled = torch.is_grad_enabled()
99
+ prior_autocast_states = _get_autocast_states()
100
+
101
+ # See Note [Disabling Functionalize TLS Above Python Functionalization]
102
+ disable_above = torch._C._ExcludeDispatchKeyGuard(
103
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
104
+ )
105
+
106
+ # It doesn't matter if we run this under predispatch or not because it is
107
+ # only for figuring out metadata
108
+ mode = FunctionalTensorMode(_allow_token_discovery=True)
109
+ with disable_above, mode:
110
+ # precondition: The passed in function already handles unflattening inputs + flattening outputs
111
+ flat_f_args = pytree.tree_map(_to_fun, flat_args)
112
+ flat_f_outs = f(*flat_f_args)
113
+
114
+ if prior_autocast_states != _get_autocast_states():
115
+ raise RuntimeError(
116
+ "AOTAutograd does not support tracing graphs that mutate the autocast state. "
117
+ "Dynamo will only insert autocast context managers (e.g. with torch.autocast(..)) into the graph, "
118
+ "which will unwind all of their mutations to autocast state before the graph exits. "
119
+ "If you encounter this error while using torch.compile, please file a bug."
120
+ )
121
+
122
+ # Inspect the state of the input tensor functional wrapper to detect input mutation info
123
+ # If inp[i] has a metadata-only mutation, then maybe_inputs_with_mutated_metadata[i] contains the updated version
124
+ for i, (arg, f_arg) in enumerate(zip(flat_args, flat_f_args)):
125
+ # NB: Mutation of non-contiguous tensor subclass input can result in a mismatch in
126
+ # strides between the functionalized arg inner tensors and non-functionalized arg inner
127
+ # tensors. This is a problem as the inner tensor stride change may not be reflected
128
+ # correctly in the outer tensor, so disallow this for now.
129
+ mutates_data = has_data_mutation(f_arg)
130
+ if (
131
+ mutates_data
132
+ and not arg.is_contiguous()
133
+ and is_traceable_wrapper_subclass(arg)
134
+ ):
135
+ raise RuntimeError(
136
+ "Mutations on non-contiguous inputs are currently not allowed on "
137
+ "tensor subclasses"
138
+ )
139
+
140
+ if not isinstance(arg, Tensor):
141
+ new_arg = arg
142
+ else:
143
+ new_arg = from_fun(f_arg)
144
+ mutates_metadata = has_metadata_mutation(
145
+ f_arg, arg, check_only_storage_mutation=False
146
+ )
147
+ if mutates_metadata and is_traceable_wrapper_subclass(arg):
148
+ raise RuntimeError(
149
+ "Metadata mutations are currently not allowed on tensor subclasses"
150
+ )
151
+ mutates_storage_metadata = has_metadata_mutation(
152
+ f_arg, arg, check_only_storage_mutation=True
153
+ )
154
+ mutations_hidden_from_autograd = are_all_mutations_hidden_from_autograd(
155
+ f_arg
156
+ )
157
+ mutations_under_no_grad_or_inference_mode = (
158
+ mutates_data
159
+ and are_all_mutations_under_no_grad_or_inference_mode(f_arg)
160
+ )
161
+
162
+ # Here, we're saying that if an input experienced a set call, inp.set_(other),
163
+ # then we can effectively not have to worry about whether its data was mutated.
164
+ # There are 3 cases:
165
+ # (1) We mutate inp *after* the set_() call. other is a graph intermediate.
166
+ # In this case, we're not really mutating the input storage of "inp";
167
+ # we're mutating the storage of an intermdiate value (other),
168
+ # and slamming that storage into the input tensor. So no data mutation is necessary.
169
+ # (2) We mutate inp *after* the set_() call. other is a graph *input*.
170
+ # In this case, the data mutation will be properly handled in the runtime
171
+ # epilogue during the processing of "other"
172
+ # (3) We mutate inp *before* the set_() call.
173
+ # This case is *not* currently handled.
174
+ # TODO: discuss this in the PR. Both supporting this, and detecting + erroring out,
175
+ # seem painful to get working.
176
+ if mutates_storage_metadata:
177
+ mutates_data = False
178
+
179
+ requires_grad = isinstance(f_arg, torch.Tensor) and f_arg.requires_grad
180
+
181
+ input_info.append(
182
+ InputAliasInfo(
183
+ is_leaf=isinstance(arg, Tensor) and safe_is_leaf(arg),
184
+ mutates_data=mutates_data,
185
+ mutates_metadata=mutates_metadata,
186
+ mutations_hidden_from_autograd=mutations_hidden_from_autograd,
187
+ mutates_storage_metadata=mutates_storage_metadata,
188
+ mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode,
189
+ requires_grad=requires_grad,
190
+ keep_input_mutations=keep_input_mutations,
191
+ )
192
+ )
193
+
194
+ # If a function involves creating a tensor, and returning a view of it, such that its _base is the intermediate,
195
+ # We need to make sure our graph returns the _base as a graph output, and we manually recreate the view
196
+ # to return to the user. Why? The backend compiler is free to (incorrectly) not set requires_grad
197
+ # on the base tensor, but we are obligated to properly set requires-gradness on the real output.
198
+
199
+ inp_storage_refs = {
200
+ StorageWeakRef(inpt.untyped_storage()): idx
201
+ for idx, inpt in enumerate(flat_f_args)
202
+ if isinstance(inpt, Tensor)
203
+ }
204
+
205
+ # We need inp tensor id's to be able to tell if an outputs **are** inputs.
206
+ inp_tensor_ids = {id(inpt) for inpt in flat_f_args if isinstance(inpt, Tensor)}
207
+ # We need output tensor id's to tell if any output._base` attributes **are** other outputs.
208
+ # (This is also a dict because we need to know that output's index, so we can regenerate
209
+ # the alias from it).
210
+ out_tensor_ids = {id(o): i for i, o in enumerate(flat_f_outs)}
211
+
212
+ # Keep track of which outputs alias other outputs
213
+ out_tensor_alias_counts: DefaultDict = collections.defaultdict(int)
214
+ # This tells us, for a given group of outputs that alias each other,
215
+ # whether they e.g. all came from an unbind call
216
+ num_aliased_tensors_that_are_multi_output_views: DefaultDict = (
217
+ collections.defaultdict(int)
218
+ )
219
+ out_storage_to_tensors: DefaultDict = collections.defaultdict(set)
220
+ curr_storage = None
221
+ for o in flat_f_outs:
222
+ if isinstance(o, torch.Tensor):
223
+ curr_storage = StorageWeakRef(o.untyped_storage())
224
+ out_tensor_alias_counts[curr_storage] += 1
225
+ # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
226
+ # This is an optimization on top of the "alias of intermediates" logic,
227
+ # which you can read more about under Note [AOT Autograd: outputs aliasing inputs or intermediates!]
228
+ #
229
+ # Before describing the optimization: this is important for AOTAutograd to have good
230
+ # perf around, multi-output views. HOWEVER:
231
+ # - There is a more generic change to AOTAutograd that we'd like to make, that subsumes this case,
232
+ # around using pre-dispatch tracing to partition out a graph so we can faithfully replay all
233
+ # views without having to regenerate them at runtime.
234
+ # - It's loosely described in this doc (more details will be added soon):
235
+ # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit
236
+ # - Once that change lands, we should just rip out this "optimization", since:
237
+ # (1) It will be fully unnecessary
238
+ # (2) Although it is only a few lines of code, it is a bit difficult to reason about
239
+ # its correctness with the autograd engine in all cases.
240
+ #
241
+ #
242
+ # What is this optimization? Consider the below case:
243
+ # def f(x):
244
+ # intermediate = x.mul(2)
245
+ # # x and intermediate here require grad
246
+ # o1, o2, ... o10 = intermediate.unbind(-1)
247
+ # return intermediate, o1, o2, ... o10
248
+ # Now, the "intermediate base" handling in AOTAutograd implies that we must do the following:
249
+ # (1) return "intermediate as an extra output of the compiled graph
250
+ # (2) regenerate each aliased output off of "intermediate", **outside** of the autograd.Function.
251
+ # The reason AOTAutograd ordinarily does this is for safety: the autograd engine needs to know
252
+ # that o1 through o10 are all aliased, and if we blindly return o1 through o10 from the autograd.Function,
253
+ # this information will be hidden.
254
+ # In particular, mutating one alias might require autograd to update autograd metadata on the other aliases
255
+ # (like their grad_fn, for example, when the autograd engine needs to do view-replay).
256
+ #
257
+ # However, intermediate_base logic can be bad for backward performance (we sometimes generate
258
+ # as_strided calls during the intermediate base logic, which can have a slow backward formula).
259
+ # Is it possible to find a set of conditions where it is **safe** to hide the output aliasing from autograd?
260
+ #
261
+ # For a set of outputs of the graph that alias each other, o_1...o_k, consider:
262
+ # (1) They came from the same multi-output view op, e.g. o_1, ..., o_k = intermediate.unbind(0)
263
+ # (2) If there are any other aliases of o_1 through o_k (in the example above, intermediate),
264
+ # **at most** 1 can escape from the graph (e.g. there is not some other graph input/output
265
+ # o_other, that aliases these outputs)
266
+ # (3) o_1...o_k all require_grad, they all share the same ._base, and their ._base requires grad.
267
+ # This condition is important because it's what causes slowness in the intermediate_base
268
+ # codepath of aot_autograd. Ordinarily, o_1...o_k would all get a grad_fn, and
269
+ # aot_autograd's view-replay might give each output an AsStridedBackward as its grad_fn.
270
+ # "K" AsStridedBackward calls will be *much* slower than a single UnbindBackward.
271
+ # In this setup, is it possible to mutate one of the outputs o_i in a way that would affect the autograd meta
272
+ # of the other aliases?
273
+ #
274
+ # Claim: No! Consider a few example (which I'm pretty sure cover all cases of mutation w.r.t. autograd):
275
+ # (a) What happens if we mutate any of o_1 through o_k directly?
276
+ # Autograd raises an error:
277
+ # "RuntimeError: Output 0 of UnbindBackward0 is a view and is being modified inplace. This view is
278
+ # the output of a function that returns multiple views. Such functions do not allow the output
279
+ # views to be modified inplace. You should replace the inplace operation by an out-of-place one."
280
+ # (b) What if we take a view of o_k and mutate it, o_k.view(o_k.shape).mul_(2)?
281
+ # Autograd raises the same error- the "multi-output-view"ness of an alias propagates to future views.
282
+ # (c) What if we mutate o_k under no_grad?
283
+ # Autograd raises the same error
284
+ # (d) What if we detach and mutate, e.g. o_k.detach().mul_(2)?
285
+ # Autograd allows this, *but* autograd updates all alias's grad_fn's to be error functions when accessed.
286
+ # Autograd raises the same error
287
+ # (e) What if we try to mutate another alias of o_1...o_k, that was **not** created from a multi-output view?
288
+ # We promised that there is at most **one** such alias, e.g. intermediate in the example above.
289
+ # You can mutate intermediate, but in eager mode this will change the grad_fn of o_1...o_k
290
+ # to be error fn's.
291
+ # Since intermediate was the *only* non-multi-output-alias, there are no other aliases
292
+ # of `intermediate` around that were produced by the compiled fn and have a valid grad_fn.
293
+ #
294
+ # Coming back to this optimization:
295
+ # Given that it is not possible for mutating one of these aliases to affect the autograd metadata of another alias
296
+ # without causing an error in eager mode, we will simple hide the aliasing from autograd during torch.compile
297
+ # if all of the above conditions are met.
298
+ # This has the slight downside that it's possible to write some "bad" code that autograd will raise an error on
299
+ # in eager but fail to during torch.compile, but it has the benefit that this code has much better performance.
300
+ # NOTE: if and when we eventually update AOTAutograd to do the "view graph slicing" defined here:
301
+ # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit,
302
+ # then this optimization will probably matter less and might be ok to remove.
303
+ is_cur_tensor_multi_out_view = isinstance(
304
+ o, FunctionalTensor
305
+ ) and torch._functionalize_is_multi_output_view( # type: ignore[attr-defined]
306
+ o.elem
307
+ )
308
+ if is_cur_tensor_multi_out_view:
309
+ num_aliased_tensors_that_are_multi_output_views[curr_storage] += 1
310
+ out_storage_to_tensors[curr_storage].add(o)
311
+
312
+ # maps the id of an intermediate base to its index in the output of the compiled forward
313
+ intermediate_base_tensor_id_to_output_idx: Dict[int, int] = {}
314
+ intermediate_bases: List[torch.Tensor] = []
315
+ # Why Do We Care If Storage Changed?
316
+ # It's important to understand the implications of storage changes in complex scenarios. Take this example:
317
+ #
318
+ # def f(x):
319
+ # x_storage = x.untyped_storage()
320
+ # non_leaf_tensor = torch.ones(4, requires_grad=True).clone()
321
+ #
322
+ # # Using no_grad() and _unsafe_preserve_version_counter to simulate the .data = operation
323
+ # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x):
324
+ # x.set_(non_leaf_tensor.untyped_storage())
325
+ #
326
+ # out = x.view(-1)
327
+ #
328
+ # # Restoring x to its original storage, again simulating .data = operation
329
+ # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x):
330
+ # x.set_(x_storage)
331
+ #
332
+ # return out
333
+ #
334
+ # In this scenario, 'x' and 'out' have different shapes and are stored at different memory addresses, aka no aliasing.
335
+ # However, due to how set_() and more specificlaly, set is functionalized, is defined to preserve eager semantics,
336
+ # the autograd engine mistakenly assumes that 'x' and 'out' are aliased, treating 'x' as 'out._base'.
337
+ # This misinterpretation leads to an 'alias_of_input' flag, causing an unnecessary as_strided() call to be generated,
338
+ # which could lead to issues later in the code.
339
+ for o in flat_f_outs:
340
+ functional_tensor_storage_changed = isinstance(
341
+ o, FunctionalTensor
342
+ ) and torch._functionalize_was_storage_changed( # type: ignore[attr-defined]
343
+ o.elem
344
+ )
345
+ curr_storage = (
346
+ None
347
+ if not isinstance(o, torch.Tensor)
348
+ else StorageWeakRef(o.untyped_storage())
349
+ )
350
+ outs_with_identical_metadata_that_require_grad = (
351
+ []
352
+ if not isinstance(o, Tensor)
353
+ else [
354
+ curr
355
+ for curr in out_storage_to_tensors[curr_storage]
356
+ if has_same_metadata(o, curr)
357
+ and curr.requires_grad
358
+ and o is not curr
359
+ ]
360
+ )
361
+
362
+ # See Note [Accessing .grad_fn on FunctionalTensor]
363
+ # In-place operations on views will trigger a lazy rebase of the autograd graph;
364
+ # this runs during access to the .grad_fn. The rebase logic will invoke view ops
365
+ # on FunctionalTensors, so we must enable a FunctionalTensorMode here to ensure
366
+ # these op calls succeed.
367
+ grad_fn = None
368
+ if isinstance(o, Tensor):
369
+ with FunctionalTensorMode():
370
+ grad_fn = o.grad_fn
371
+
372
+ is_result_of_custom_autograd_fn = False
373
+ # Need to check for both custom cpp (CppFunction) and python (BackwardCFunction)
374
+ # autograd fns
375
+ if type(grad_fn).__name__ == "CppFunction":
376
+ is_result_of_custom_autograd_fn = True
377
+ if isinstance(grad_fn, torch.autograd.function.BackwardCFunction):
378
+ is_result_of_custom_autograd_fn = True
379
+
380
+ if not isinstance(o, Tensor):
381
+ output_type = OutputType.non_alias
382
+ base_idx = None
383
+ elif (
384
+ curr_storage in inp_storage_refs
385
+ and grad_fn is not None
386
+ and is_result_of_custom_autograd_fn
387
+ ):
388
+ output_type = OutputType.custom_function_view
389
+ base_idx = None
390
+ elif (
391
+ curr_storage in inp_storage_refs
392
+ and not functional_tensor_storage_changed
393
+ ):
394
+ base_idx = inp_storage_refs[curr_storage]
395
+ is_input_tensor = id(o) in inp_tensor_ids
396
+ num_aliased_outs = out_tensor_alias_counts[curr_storage]
397
+ num_multi_output_view_outs = (
398
+ num_aliased_tensors_that_are_multi_output_views[curr_storage]
399
+ )
400
+ num_aliased_outs_that_are_not_multi_output_views = (
401
+ num_aliased_outs - num_multi_output_view_outs
402
+ )
403
+ if (
404
+ grad_fn is not None
405
+ and num_aliased_outs_that_are_not_multi_output_views == 0
406
+ ):
407
+ # See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
408
+ # In particular, given:
409
+ # def f(x):
410
+ # return list(x.unbind(0))
411
+ # The main reason we ordinarily try to regenerate these output aliases outside of the
412
+ # compiled autograd.Function is because if any of the outputs are later mutated,
413
+ # autograd needs to perform view-replay to regenerate them.
414
+ # However, autograd does not allow users to mutate multi-output views
415
+ # in any way that can change the autograd metadata of other aliases.
416
+ # So we hide this aliasing from autograd here.
417
+ log.debug(
418
+ "Encountered AOTAutograd case: differentiable outputs that \
419
+ alias each other from a multi-output view call"
420
+ )
421
+ output_type = OutputType.non_alias
422
+ elif is_input_tensor:
423
+ output_type = OutputType.is_input
424
+ else:
425
+ output_type = OutputType.alias_of_input
426
+
427
+ # We only need to handle the intermediate base case when both
428
+ # the intermediate base and the output require gradients.
429
+ # See Note [AOT Autograd: outputs aliasing inputs or intermediates!]
430
+ elif o._base is not None and o.requires_grad and o._base.requires_grad:
431
+ num_aliased_outs = out_tensor_alias_counts[curr_storage]
432
+ num_multi_output_view_outs = (
433
+ num_aliased_tensors_that_are_multi_output_views[curr_storage]
434
+ )
435
+ num_aliased_outs_that_are_not_multi_output_views = (
436
+ num_aliased_outs - num_multi_output_view_outs
437
+ )
438
+ # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
439
+ if (
440
+ out_tensor_alias_counts[curr_storage] == 1
441
+ or num_aliased_outs_that_are_not_multi_output_views <= 1
442
+ ):
443
+ # Note [Intermediate Bases Optimization]
444
+ # Normally if we have an output that aliases an intermediate,
445
+ # we need to add the extra "intermediate base" logic further down
446
+ # to prevent autograd from yelling at us if the user later tries to
447
+ # mutate that output.
448
+ # However, the common case here is if we have an output that aliases an intermediate,
449
+ # but doesn't alias any other outputs.
450
+ # In that case, autograd shouldn't have to worry about the aliasing at all
451
+ # (if that output is mutated, there are no other live aliases for autograd to worry about).
452
+ # The "intermediate bases" can hurt inductor perf by forcing more variables to become outputs.
453
+ # So as an optimization, we won't do intermediate base handling in this case.
454
+ # Instead, we'll hide the aliasing from autograd using aten._unsafe_view().
455
+ if (
456
+ out_tensor_alias_counts[curr_storage] != 1
457
+ and num_aliased_outs_that_are_not_multi_output_views <= 1
458
+ ):
459
+ log.debug(
460
+ "Encountered AOTAutograd case: differentiable outputs that alias each other \
461
+ from a multi-output view call"
462
+ )
463
+ output_type = OutputType.unsafe_view_alias
464
+ base_idx = None
465
+ else:
466
+ # First, check if o's ._base is an existing output
467
+ maybe_existing_out_idx = out_tensor_ids.get(id(o._base), None)
468
+ if maybe_existing_out_idx is not None:
469
+ # Special case where the output is an alias of a graph intermediate, but that intermediate
470
+ # is itself also a user output.
471
+ output_type = (
472
+ OutputType.alias_of_intermediate_base_is_user_output
473
+ )
474
+ base_idx = maybe_existing_out_idx
475
+ else:
476
+ # Next, check if o's ._base is an intermediate base that we already returned
477
+ maybe_existing_base_output_idx = (
478
+ intermediate_base_tensor_id_to_output_idx.get(
479
+ id(o._base), None
480
+ )
481
+ )
482
+ if maybe_existing_base_output_idx is not None:
483
+ output_type = OutputType.alias_of_intermediate
484
+ base_idx = maybe_existing_base_output_idx
485
+ else:
486
+ # Otherwise, take o._base and explicitly return it as an output in the compiled graph
487
+ new_out_idx = len(intermediate_bases)
488
+ base_idx = new_out_idx
489
+ # Indicate to the logic later on (when we trace the joint)
490
+ # that this particular output should get it's ._base appended to the forward graph outputs
491
+ output_type = (
492
+ OutputType.alias_of_intermediate_save_as_output
493
+ )
494
+ intermediate_base_tensor_id_to_output_idx[
495
+ id(o._base)
496
+ ] = new_out_idx
497
+ intermediate_bases.append(o._base)
498
+ elif (
499
+ # See https://github.com/pytorch/pytorch/issues/100348 for this case.
500
+ # This protects against the specific case where a user fn returns (output, output.detach())
501
+ out_tensor_alias_counts[curr_storage] > 1
502
+ and len(outs_with_identical_metadata_that_require_grad) > 0
503
+ and not o.requires_grad
504
+ ):
505
+ assert len(outs_with_identical_metadata_that_require_grad) > 0
506
+ # In theory we could use any of these tensors to regenerate the aliased outputs from,
507
+ # since they all alias each other and have identical metatadata
508
+ out_alias = outs_with_identical_metadata_that_require_grad[0]
509
+ existing_out_idx = out_tensor_ids[id(out_alias)]
510
+ output_type = OutputType.alias_of_intermediate_base_is_user_output
511
+ base_idx = existing_out_idx
512
+ else:
513
+ output_type = OutputType.non_alias
514
+ base_idx = None
515
+
516
+ if isinstance(o, torch.Tensor):
517
+ dynamic_dims = {
518
+ i for i, s in enumerate(o.shape) if not is_concrete_int(s)
519
+ }
520
+ else:
521
+ dynamic_dims = None
522
+ out_info = OutputAliasInfo(
523
+ output_type=output_type,
524
+ raw_type=type(o),
525
+ base_idx=base_idx,
526
+ dynamic_dims=dynamic_dims,
527
+ requires_grad=isinstance(o, torch.Tensor) and o.requires_grad,
528
+ )
529
+ output_info.append(out_info)
530
+
531
+ # See Note [AOT Autograd: Views to avoid tangents aliasing inputs]
532
+ def view_avoid_dupes_with_primals(t):
533
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
534
+ return transform_subclass(
535
+ t, lambda _, inner_t: view_avoid_dupes_with_primals(inner_t)
536
+ )
537
+ if isinstance(t, Tensor):
538
+ return t.view(t.shape)
539
+ return t
540
+
541
+ # This analysis function returns *only* the outputs that are meant to be tangents to the backwards.
542
+ # Anything that aliases (inputs returned in the fw due to metadata mutations, or outputs that alias inputs/intermediates)
543
+ # are *regenerated* later, and not used directly in the autograd graph
544
+ f_input_tangents = [
545
+ inp
546
+ for inp, info in zip(flat_f_args, input_info)
547
+ if info.mutation_type == MutationType.MUTATED_OUT_GRAPH
548
+ and info.mutates_data
549
+ and info.requires_grad
550
+ ]
551
+ f_output_tangents = [
552
+ o
553
+ for o, info in zip(flat_f_outs, output_info)
554
+ if info.output_type
555
+ in [
556
+ OutputType.non_alias,
557
+ OutputType.unsafe_view_alias,
558
+ OutputType.custom_function_view,
559
+ ]
560
+ and issubclass(info.raw_type, torch.Tensor)
561
+ and info.requires_grad
562
+ ]
563
+ # intermediate bases are also included in the backward graph
564
+ f_tangents = f_input_tangents + f_output_tangents + intermediate_bases
565
+ traced_tangents = pytree.tree_map(from_fun, f_tangents)
566
+ traced_tangents = pytree.tree_map(
567
+ view_avoid_dupes_with_primals, traced_tangents
568
+ )
569
+ user_outs = pytree.tree_map(from_fun, f_output_tangents)
570
+
571
+ f_mutated_inputs = [
572
+ inp
573
+ for inp, info in zip(flat_f_args, input_info)
574
+ if info.mutation_type == MutationType.MUTATED_OUT_GRAPH
575
+ ]
576
+ f_metadata_mutated_inputs = [
577
+ inp for inp, info in zip(flat_f_args, input_info) if info.mutates_metadata
578
+ ]
579
+ # This logic (annoyingly) re-figures out exactly what the outputs to the compiled fw graph will be.
580
+ # When handling subclasses, we need info about **all** outputs of compiled forward graph,
581
+ # so we know precisely which graph outputs to wrap back into tensor subclasses
582
+ # Ideally we would refactor this so not have an is_train flag, and have the separate
583
+ # inference and training paths decide which inputs/output to ask for subclass info on.
584
+ # However, we currently stash indexing information on each SubclassMeta about its order
585
+ # in the graph outputs list.
586
+ f_fw_graph_outs = list(flat_f_outs)
587
+ if is_train or not keep_input_mutations:
588
+ f_fw_graph_outs = f_mutated_inputs + f_fw_graph_outs
589
+ else:
590
+ # even when "keep_input_mutations" is True,
591
+ # we never keep metadata-only mutations in the fw graph
592
+ f_fw_graph_outs = f_metadata_mutated_inputs + f_fw_graph_outs
593
+ if is_train:
594
+ f_fw_graph_outs = f_fw_graph_outs + intermediate_bases
595
+ fw_graph_outs = pytree.tree_map(from_fun, f_fw_graph_outs)
596
+
597
+ grad_enabled_mutation = None
598
+ if torch.is_grad_enabled() != prior_grad_enabled:
599
+ grad_enabled_mutation = torch.is_grad_enabled()
600
+ torch.set_grad_enabled(
601
+ prior_grad_enabled
602
+ ) # Restore the prior state after tracing it
603
+ log.debug(
604
+ (
605
+ "grad_mode mutation encountered in graph. "
606
+ "Will emit mutation epilogue, to set grad_mode=%s"
607
+ ),
608
+ grad_enabled_mutation,
609
+ )
610
+
611
+ metadata = ViewAndMutationMeta(
612
+ input_info=input_info,
613
+ output_info=output_info,
614
+ num_intermediate_bases=len(intermediate_bases),
615
+ keep_input_mutations=keep_input_mutations,
616
+ traced_tangents=traced_tangents,
617
+ subclass_inp_meta=create_subclass_meta(flat_args),
618
+ subclass_fw_graph_out_meta=create_subclass_meta(fw_graph_outs),
619
+ subclass_tangent_meta=create_subclass_meta(traced_tangents),
620
+ is_train=is_train,
621
+ grad_enabled_mutation=grad_enabled_mutation,
622
+ tokens=mode._tokens,
623
+ )
624
+ return metadata
625
+
626
+ return inner
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module dispatches the graphs to either the forward-only or joint compilation
3
+ pathways, taking into account the AOTConfig and the collected ViewAndMutationMetadata.
4
+ """
5
+
6
+ from typing import Any, Callable, List, Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.utils._pytree as pytree
10
+ import torch.utils.dlpack
11
+ from torch import Tensor
12
+ from torch._dispatch.python import enable_python_dispatcher
13
+ from torch._dynamo.utils import lazy_format_graph_code
14
+ from torch._logging import getArtifactLogger, trace_structured
15
+ from torch._subclasses.functional_tensor import FunctionalTensorMode
16
+ from torch.fx.experimental.proxy_tensor import make_fx
17
+
18
+ from .functional_utils import (
19
+ assert_functional_graph,
20
+ propagate_input_mutation_stacktraces,
21
+ )
22
+ from .schemas import AOTConfig, SubclassMeta, ViewAndMutationMeta
23
+ from .traced_function_transforms import (
24
+ aot_dispatch_subclass,
25
+ create_functionalized_fn,
26
+ create_joint,
27
+ fn_input_mutations_to_outputs,
28
+ fn_prepped_for_autograd,
29
+ )
30
+
31
+ aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
32
+
33
+
34
+ def _create_graph(f, args, *, aot_config: AOTConfig) -> torch.fx.GraphModule:
35
+ # FunctionalTensorMode must be enabled here.
36
+ # See Note [Accessing .grad_fn on FunctionalTensor]
37
+ with enable_python_dispatcher(), FunctionalTensorMode(
38
+ pre_dispatch=aot_config.pre_dispatch, export=aot_config.is_export
39
+ ):
40
+ fx_g = make_fx(
41
+ f,
42
+ decomposition_table=aot_config.decompositions,
43
+ record_module_stack=True,
44
+ pre_dispatch=aot_config.pre_dispatch,
45
+ )(*args)
46
+
47
+ return fx_g
48
+
49
+
50
+ def aot_dispatch_base_graph(
51
+ flat_fn,
52
+ flat_args: List[Tensor],
53
+ aot_config: AOTConfig,
54
+ *,
55
+ fw_metadata: ViewAndMutationMeta,
56
+ ) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]:
57
+ # aot_dispatch_base requires functionalization, but doesn't need to handle as many cases as the autograd case.
58
+ # The cases that aot_dispatch_base doesn't need to handle include:
59
+ # - outputs that are aliases of graph intermediates
60
+ # - outputs that are aliases of graph inputs
61
+ # While cases that it does need to handle include:
62
+ # - input mutations (including when inputs are aliases of each other)
63
+ # - input metadata mutations
64
+ fn_to_trace = fn_input_mutations_to_outputs(
65
+ flat_fn,
66
+ fw_metadata,
67
+ keep_data_input_mutations=aot_config.keep_inference_input_mutations,
68
+ )
69
+
70
+ fn_to_trace, updated_flat_args = create_functionalized_fn(
71
+ fn_to_trace,
72
+ flat_args,
73
+ meta=fw_metadata,
74
+ aot_config=aot_config,
75
+ trace_joint=False,
76
+ )
77
+
78
+ (
79
+ fn_to_trace,
80
+ updated_flat_args_subclasses_desugared,
81
+ maybe_subclass_meta,
82
+ ) = aot_dispatch_subclass(
83
+ fn_to_trace,
84
+ updated_flat_args,
85
+ is_joint_structure=False,
86
+ meta=fw_metadata,
87
+ fw_only=flat_fn,
88
+ )
89
+
90
+ fw_module = _create_graph(
91
+ fn_to_trace,
92
+ updated_flat_args_subclasses_desugared,
93
+ aot_config=aot_config,
94
+ )
95
+
96
+ # As long as we opted to remove input mutations, then
97
+ # there should be *NO* mutating ops in the graph at this point.
98
+ copy_count = assert_functional_graph(fw_module.graph)
99
+
100
+ fw_module.graph.eliminate_dead_code()
101
+ fw_module.recompile()
102
+
103
+ copy_count2 = assert_functional_graph(fw_module.graph)
104
+ propagate_input_mutation_stacktraces(fw_module.graph)
105
+
106
+ assert copy_count == copy_count2
107
+
108
+ if aot_config.enable_log:
109
+ aot_graphs_log.info(
110
+ "%s", lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id)
111
+ )
112
+ trace_structured(
113
+ "aot_forward_graph",
114
+ payload_fn=lambda: fw_module.print_readable(print_output=False),
115
+ )
116
+
117
+ # TODO: should factor this into a separate function for export that always only returns just the graph.
118
+ if aot_config.is_export:
119
+ assert (
120
+ maybe_subclass_meta is None
121
+ ), "aot_export_module does not support tensor subclass inputs for now."
122
+ return fw_module
123
+ return fw_module, list(updated_flat_args_subclasses_desugared), maybe_subclass_meta
124
+
125
+
126
+ # Has the precondition that there
127
+ # are no duplicate arguments in flat_args (e.g., the same Tensor
128
+ # object never shows up twice. However, two tensor inputs MAY alias
129
+ # the same storage, so long as they have separate TensorImpls.)
130
+ def aot_dispatch_autograd_graph(
131
+ flat_fn,
132
+ flat_args: List[Any],
133
+ aot_config: AOTConfig,
134
+ *,
135
+ fw_metadata: ViewAndMutationMeta,
136
+ ) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]:
137
+ # traced_tangents corresponds to the set of outputs in the traced forward that should get grad_outputs in the traced backward.
138
+ # It includes outputs of the original forward, *and* any updated inputs due to input mutations.
139
+ # However, it does *not* include any outputs that are aliases of inputs or intermediates, or any metadata-only input mutations.
140
+ traced_tangents = pytree.tree_map(
141
+ lambda x: x.detach().contiguous() if isinstance(x, Tensor) else x,
142
+ fw_metadata.traced_tangents,
143
+ )
144
+
145
+ joint_inputs = (flat_args, traced_tangents)
146
+
147
+ fn_prepared_for_autograd = fn_prepped_for_autograd(
148
+ flat_fn,
149
+ fw_metadata,
150
+ )
151
+ joint_fn_to_trace = create_joint(fn_prepared_for_autograd, aot_config=aot_config)
152
+
153
+ joint_fn_to_trace, updated_joint_inputs = create_functionalized_fn(
154
+ joint_fn_to_trace,
155
+ joint_inputs,
156
+ meta=fw_metadata,
157
+ aot_config=aot_config,
158
+ trace_joint=True,
159
+ )
160
+
161
+ subclass_tracing_info = aot_dispatch_subclass(
162
+ joint_fn_to_trace,
163
+ updated_joint_inputs,
164
+ is_joint_structure=True,
165
+ meta=fw_metadata,
166
+ fw_only=flat_fn,
167
+ )
168
+
169
+ joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn
170
+ updated_joint_inputs = subclass_tracing_info.plain_tensor_args
171
+ maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta
172
+
173
+ fx_g = _create_graph(joint_fn_to_trace, updated_joint_inputs, aot_config=aot_config)
174
+
175
+ # There should be *NO* mutating ops in the graph at this point.
176
+ assert_functional_graph(fx_g.graph)
177
+
178
+ # Redundant with the check above, but worth having in case tracing introduced
179
+ # a fake tensor. Unlikely.
180
+ # See Note: [Fake Modules and AOTAutograd]
181
+ torch._dynamo.utils.assert_no_fake_params_or_buffers(fx_g)
182
+ fx_g.graph.eliminate_dead_code()
183
+ fx_g.recompile()
184
+ # TODO: in AOTAutograd, we create metadata like _indices_of_inps_to_detach to detect
185
+ # when we need to manually detach() some inputs in the forward.
186
+ # Higher order ops might eventually need to do the same.
187
+ if aot_config.is_export:
188
+ assert (
189
+ maybe_subclass_meta is None
190
+ ), "aot_export_module does not support tensor subclass inputs for now."
191
+ return fx_g
192
+ return fx_g, updated_joint_inputs, maybe_subclass_meta
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities related to functionalization in AOTAutograd:
3
+ 1. converting to/from functional tensors
4
+ 2. detecting Tensor mutations - both metadata and Tensor value
5
+ 3. regenerating/replaying views from their base
6
+ 4. checking if a graph is functional i.e. whether it contains any mutation ops
7
+ """
8
+
9
+ import torch
10
+ from torch import Tensor
11
+ from torch._subclasses.fake_tensor import FakeTensor
12
+ from torch._subclasses.functional_tensor import FunctionalTensor
13
+ from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq
14
+ from torch.multiprocessing.reductions import StorageWeakRef
15
+ from torch.utils._python_dispatch import (
16
+ is_traceable_wrapper_subclass,
17
+ transform_subclass,
18
+ )
19
+
20
+
21
+ def to_fun(t):
22
+ if isinstance(t, Tensor):
23
+ if is_traceable_wrapper_subclass(t):
24
+ # See Note [Functionalization always runs last]
25
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
26
+ # goes at the bottom.
27
+ # recurse here, so we can support nested wrapper subclasses
28
+ out = transform_subclass(t, lambda _, inner_t: to_fun(inner_t))
29
+ torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined]
30
+ return out
31
+ else:
32
+ return FunctionalTensor.to_functional(t)
33
+ else:
34
+ return t
35
+
36
+
37
+ def sync_functional_tensor(t):
38
+ if is_traceable_wrapper_subclass(t):
39
+ attrs, ctx = t.__tensor_flatten__() # type: ignore[attr-defined]
40
+ for attr in attrs:
41
+ sync_functional_tensor(getattr(t, attr))
42
+ else:
43
+ torch._sync(t)
44
+
45
+
46
+ # When subclasses are involved, t here will usually look something like:
47
+ # SubclassA(SubclassB(FunctionalTensor(_to_fun_tensor(FakeTensor))))
48
+ def from_fun(t):
49
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
50
+ # See Note [Functionalization always runs last]
51
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
52
+ # goes at the bottom.
53
+ # recurse here, so we can support nested wrapper subclasses
54
+ out = transform_subclass(t, lambda _, inner_t: from_fun(inner_t))
55
+ torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined]
56
+ return out
57
+
58
+ if not isinstance(t, FunctionalTensor):
59
+ # quick sanity assert
60
+ if isinstance(t, torch.Tensor):
61
+ assert not torch._is_functional_tensor(t) # type: ignore[attr-defined]
62
+ return t
63
+ sync_functional_tensor(t)
64
+ return torch._from_functional_tensor(t.elem)
65
+
66
+
67
+ def is_fun(t):
68
+ if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t):
69
+ # See Note [Functionalization always runs last]
70
+ # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper
71
+ # goes at the bottom.
72
+ # recurse here, so we can support nested wrapper subclasses
73
+ t_attrs, _ = t.__tensor_flatten__() # type: ignore[attr-defined]
74
+ t_inners = [getattr(t, attr) for attr in t_attrs]
75
+ any_fun = any(is_fun(x) for x in t_inners)
76
+ all_fun = all(is_fun(x) for x in t_inners)
77
+ assert any_fun == all_fun
78
+ return any_fun
79
+
80
+ return isinstance(t, FunctionalTensor)
81
+
82
+
83
+ # t here is either
84
+ # (1) A FunctionalTensor(_to_functional_tensor(FakeTensor))
85
+ # (2) A traceable tensor subclass that holds a FunctionalTensor
86
+ # (3) Not a tensor
87
+ def has_data_mutation(t):
88
+ if is_traceable_wrapper_subclass(t):
89
+ attrs, _ = t.__tensor_flatten__()
90
+ # A tensor subclass was updated if any of its inner elements were updated
91
+ return any(has_data_mutation(getattr(t, attr)) for attr in attrs)
92
+ else:
93
+ if isinstance(t, torch.Tensor):
94
+ assert isinstance(t, FunctionalTensor)
95
+ return torch._functionalize_has_data_mutation(t.elem) # type: ignore[attr-defined]
96
+ return False
97
+
98
+
99
+ def are_all_mutations_hidden_from_autograd(t):
100
+ if is_traceable_wrapper_subclass(t):
101
+ attrs, _ = t.__tensor_flatten__()
102
+ # If all inner elements are mutations hidden from autograd, then it is a mutation hidden from autograd.
103
+ return all(
104
+ are_all_mutations_hidden_from_autograd(getattr(t, attr)) for attr in attrs
105
+ )
106
+ elif isinstance(t, torch.Tensor):
107
+ assert isinstance(t, FunctionalTensor)
108
+ return torch._functionalize_are_all_mutations_hidden_from_autograd(t.elem)
109
+ else:
110
+ return False
111
+
112
+
113
+ def are_all_mutations_under_no_grad_or_inference_mode(t):
114
+ if is_traceable_wrapper_subclass(t):
115
+ attrs, _ = t.__tensor_flatten__()
116
+ return all(
117
+ are_all_mutations_under_no_grad_or_inference_mode(getattr(t, attr))
118
+ for attr in attrs
119
+ )
120
+ else:
121
+ assert isinstance(t, FunctionalTensor)
122
+ return torch._functionalize_are_all_mutations_under_no_grad_or_inference_mode(
123
+ t.elem
124
+ )
125
+
126
+
127
+ # f_arg here is either
128
+ # (1) A FunctionalTensor(_to_functional_tensor(FakeTensor))
129
+ # (2) A traceable tensor subclass that holds a FunctionalTensor
130
+ # (3) Not a tensor
131
+ # Assumption: arg promises to be the "original" tensor wrapped by f_arg
132
+ # Note: "storage mutations" coming from set_() are a type of metadata mutation. So:
133
+ # - check_only_storage_mutation=True: only return true if there was a storage mutation
134
+ # - check_only_storage_mutation=Flse: return true if there was any metadata mutation (including a storage mutation)
135
+ def has_metadata_mutation(f_arg, arg, *, check_only_storage_mutation: bool):
136
+ if is_traceable_wrapper_subclass(f_arg):
137
+ attrs, _ = f_arg.__tensor_flatten__()
138
+ # A tensor subclass was updated if any of its inner elements were updated
139
+ f_inner_ts = [getattr(f_arg, attr) for attr in attrs]
140
+ inner_ts = [getattr(arg, attr) for attr in attrs]
141
+ return any(
142
+ has_metadata_mutation(
143
+ f_inner_t,
144
+ inner_t,
145
+ check_only_storage_mutation=check_only_storage_mutation,
146
+ )
147
+ for f_inner_t, inner_t in zip(f_inner_ts, inner_ts)
148
+ )
149
+ else:
150
+ if not isinstance(f_arg, torch.Tensor):
151
+ assert not isinstance(arg, torch.Tensor)
152
+ return False
153
+ assert isinstance(f_arg, FunctionalTensor)
154
+ assert isinstance(arg, FakeTensor)
155
+
156
+ arg_after = torch._from_functional_tensor(f_arg.elem)
157
+ # This is true if the current tensor experienced at least one set_() call
158
+ maybe_storage_changed = torch._functionalize_was_storage_changed(f_arg.elem) # type: ignore[attr-defined]
159
+ # However, multiple set_() calls can cancel out. So we also check whether the
160
+ # storage of the tensor has changed.
161
+ # Note: if an input experienced two set_() calls that cancel out, **and**
162
+ # it experiences an data mutation, we pessimistically think that the set_()
163
+ # call is necessary here. We could in theory fix this, but this will
164
+ # hopefully never happen in user code, and is not needed for fsdp.
165
+ same_storages = StorageWeakRef(arg.untyped_storage()) == StorageWeakRef(
166
+ arg_after.untyped_storage()
167
+ )
168
+ has_storage_metadata_mutation = maybe_storage_changed and not same_storages
169
+ if check_only_storage_mutation:
170
+ return has_storage_metadata_mutation
171
+
172
+ # storage metadata mutation is a type of metadata mutation, so return true if we saw one
173
+ if has_storage_metadata_mutation:
174
+ return True
175
+
176
+ maybe_metadata_mutated = torch._functionalize_has_metadata_mutation(f_arg.elem) # type: ignore[attr-defined]
177
+ # This is true if the current tensor experienced at least one metadata mutation.
178
+ # So if false, we know there was no metadata mutation
179
+ if not maybe_metadata_mutated:
180
+ return False
181
+
182
+ # However, multi metadata mutations can cancel out.
183
+ # So we also check if the concrete sizes/strides on the tensor have changed.
184
+ same_sizes = arg.shape == arg_after.shape
185
+ same_strides = arg.stride() == arg_after.stride()
186
+ same_offsets = arg.storage_offset() == arg_after.storage_offset()
187
+ has_metadata_mutation_ = maybe_metadata_mutated and not (
188
+ same_sizes and same_strides and same_offsets
189
+ )
190
+ # We consider a tensor to have been metadata mutated if its storage was mutated through a set_() call.
191
+ return has_metadata_mutation_
192
+
193
+
194
+ def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires_grad):
195
+ # Try to do view-replay if possible.
196
+ # fall back to .as_strided() if we can't.
197
+ if target_meta_tensor._base is not None:
198
+ # The base that we want to replay our view off of might have a different shape than the view's original base.
199
+ b = target_meta_tensor._base
200
+ abt = aliased_base_tensor
201
+ # Don't unnecessarily call as_strided if nothing changed; as_strided's
202
+ # backward is poorly implemented and slow
203
+ if abt is not b and (
204
+ abt.size() != b.size()
205
+ or abt.stride() != b.stride()
206
+ or abt.storage_offset() != b.storage_offset()
207
+ ):
208
+ reshaped_base_tensor = aliased_base_tensor.as_strided(
209
+ b.size(), b.stride(), b.storage_offset()
210
+ )
211
+ else:
212
+ reshaped_base_tensor = aliased_base_tensor
213
+ out = target_meta_tensor._view_func(reshaped_base_tensor)
214
+ # This shape mismatch can happen due to a bug in inplace/view handling in autograd.
215
+ # Try putting a breakpoint here and running
216
+ # `test/functorch/test_aotdispatch TestAOTAutograd.test_output_all_alias_types`
217
+ # Also, https://github.com/pytorch/pytorch/issues/49825
218
+ #
219
+ # As a stopgap, we'll fall back to as_strided.
220
+ if out is not None and out.shape == target_meta_tensor.shape:
221
+ if aliased_base_tensor.requires_grad and not target_requires_grad:
222
+ out = out.detach()
223
+ elif not aliased_base_tensor.requires_grad and target_requires_grad:
224
+ out.requires_grad_(True)
225
+ return out
226
+ size = target_meta_tensor.size()
227
+ stride = target_meta_tensor.stride()
228
+ storage_offset = target_meta_tensor.storage_offset()
229
+ if aliased_base_tensor.is_complex() and not target_meta_tensor.is_complex():
230
+ aliased_out = torch.view_as_real(aliased_base_tensor).as_strided(
231
+ size, stride, storage_offset
232
+ )
233
+ elif not aliased_base_tensor.is_complex() and target_meta_tensor.is_complex():
234
+ aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided(
235
+ size, stride, storage_offset
236
+ )
237
+ else:
238
+ aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset)
239
+ # For outputs aliasing inputs, we need to check if the requires-gradness has changed.
240
+ if aliased_base_tensor.requires_grad and not target_requires_grad:
241
+ aliased_out = aliased_out.detach()
242
+ elif not aliased_base_tensor.requires_grad and target_requires_grad:
243
+ aliased_out.requires_grad_(True)
244
+ # For outputs aliasing inputs, we need to check if the dtype has changed.
245
+ # as_strided() is the "most generic" view, but it does not cover cross-dtype views
246
+ if aliased_out.dtype != target_meta_tensor.dtype:
247
+ aliased_out = aliased_out.view(target_meta_tensor.dtype)
248
+ return aliased_out
249
+
250
+
251
+ def has_same_metadata(t1, t2):
252
+ return (
253
+ definitely_true(sym_eq(t1.size(), t2.size()))
254
+ and definitely_true(sym_eq(t1.stride(), t2.stride()))
255
+ and definitely_true(t1.storage_offset() == t2.storage_offset())
256
+ and t1.is_conj() == t2.is_conj()
257
+ and t1.is_neg() == t2.is_neg()
258
+ )
259
+
260
+
261
+ # new_arg and arg here are either:
262
+ # (1) both a FakeTensor
263
+ # (2) both a traceable tensor subclass that holds a FakeTensor
264
+ # Pre-condition: the two args are the "old" and "new" inputs from running functionalization.
265
+ # When we run functionalization and wrap our inputs into FunctionalTensors,
266
+ # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed
267
+ #
268
+ # Normally it would be enough just to check if arg is new_arg, which is normally enough for functionalization
269
+ # to confirm that inputs were not mutated when running the user's model with functionalization on.
270
+ # But when we have subclass inputs, we can't rely on that:
271
+ # `from_fun(to_fun(x)) is x` will return False, because the call to `from_fun` constructs
272
+ # a brand new subclass instance: we are calling __tensor_unflatten__, and going
273
+ # from Subclass(FakeTensor) to Subclass(FunctionalTensor(FakeTensor))
274
+ def was_tensor_updated(arg, new_arg):
275
+ if is_traceable_wrapper_subclass(arg):
276
+ assert is_traceable_wrapper_subclass(new_arg)
277
+ attrs, _ = arg.__tensor_flatten__()
278
+ new_attrs, _ = new_arg.__tensor_flatten__()
279
+ assert attrs == new_attrs
280
+ # A tensor subclass was updated if any of its inner elements were updated
281
+ return any(
282
+ was_tensor_updated(getattr(arg, attr), getattr(new_arg, attr))
283
+ for attr in attrs
284
+ )
285
+ else:
286
+ return arg is not new_arg
287
+
288
+
289
+ # new_arg and arg here are either:
290
+ # (1) both a FakeTensor
291
+ # (2) both a traceable tensor subclass that holds a FakeTensor
292
+ # Pre-condition: the two args are the "old" and "new" inputs from running functionalization.
293
+ # When we run functionalization and wrap our inputs into FunctionalTensors,
294
+ # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed,
295
+ # but shares storage with the old input
296
+ def was_tensor_metadata_updated(arg, new_arg):
297
+ if is_traceable_wrapper_subclass(arg):
298
+ assert is_traceable_wrapper_subclass(new_arg)
299
+ attrs, _ = arg.__tensor_flatten__()
300
+ new_attrs, _ = new_arg.__tensor_flatten__()
301
+ assert attrs == new_attrs
302
+ # A tensor subclass was updated if any of its inner elements were updated
303
+ return any(
304
+ was_tensor_metadata_updated(getattr(arg, attr), getattr(new_arg, attr))
305
+ for attr in attrs
306
+ )
307
+ else:
308
+ return arg is not new_arg and StorageWeakRef(
309
+ arg.untyped_storage()
310
+ ) == StorageWeakRef(new_arg.untyped_storage())
311
+
312
+
313
+ # Returns the number of detected copy_
314
+ def assert_functional_graph(fx_g: torch.fx.Graph) -> int:
315
+ placeholders = set()
316
+ copy_count = 0
317
+ # NB: It would also be nice to verify that the mutations all happen at the
318
+ # end, but we also do some administrative views after mutations so this
319
+ # isn't actually true. (TODO: Could this cause problems for Inductor?)
320
+ for n in fx_g.nodes:
321
+ if n.op == "placeholder":
322
+ placeholders.add(n)
323
+ if isinstance(n.target, torch._ops.OpOverload):
324
+ if n.target is torch.ops.aten.copy_.default:
325
+ suffix = True
326
+ # Can only copy_ into an input, and can only do so once
327
+ assert n.args[0] in placeholders
328
+ placeholders.remove(n.args[0])
329
+ copy_count += 1
330
+ else:
331
+ assert (
332
+ not n.target._schema.is_mutable
333
+ ), f"aot_autograd expected to have an entirely functional graph, but found {n.format_node()}"
334
+ return copy_count
335
+
336
+
337
+ def propagate_input_mutation_stacktraces(fx_g: torch.fx.Graph) -> None:
338
+ placeholders = set()
339
+ for n in fx_g.nodes:
340
+ if n.op == "placeholder":
341
+ placeholders.add(n)
342
+ if isinstance(n.target, torch._ops.OpOverload):
343
+ if n.target is torch.ops.aten.copy_.default:
344
+ # Can only copy_ into an input, and can only do so once
345
+ assert n.args[0] in placeholders
346
+ placeholders.remove(n.args[0])
347
+ copy_from_node = n.args[1]
348
+ # Pre-condition: every node has a "stack_trace" field in its meta,
349
+ # but copy_() nodes do not (since we manually added them during functionalization).
350
+ # Instead, we manually propagate here.
351
+ if "stack_trace" in copy_from_node.meta:
352
+ assert "stack_trace" not in n.meta, str(n)
353
+ n.meta["stack_trace"] = copy_from_node.meta["stack_trace"]
354
+
355
+
356
+ def _check_if_mutation_can_be_in_graph(
357
+ keep_input_mutations: bool,
358
+ mutates_data,
359
+ mutates_metadata,
360
+ mutations_hidden_from_autograd,
361
+ mutations_under_no_grad_or_inference_mode,
362
+ requires_grad,
363
+ ):
364
+ if keep_input_mutations:
365
+ return mutates_data and (
366
+ (not mutates_metadata and not requires_grad)
367
+ or mutations_hidden_from_autograd
368
+ or mutations_under_no_grad_or_inference_mode
369
+ )
370
+ return False
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ These are the runtime wrappers that are associated with JIT-compiling.
3
+
4
+ This includes the forward-only and joint JIT runtime wrappers.
5
+
6
+ This module depends heavily on the runtime wrapper building blocks defined
7
+ in `runtime_wrappers`.
8
+ """
9
+
10
+ import logging
11
+ from contextlib import nullcontext
12
+ from functools import wraps
13
+ from typing import Any, List, Optional
14
+
15
+ import torch
16
+ import torch.utils.dlpack
17
+ from torch import Tensor
18
+ from torch._dynamo.utils import lazy_format_graph_code
19
+ from torch._guards import detect_fake_mode, tracing, TracingContext
20
+ from torch._logging import getArtifactLogger, trace_structured
21
+ from torch._prims_common import CUDARngStateHelper
22
+ from torch._subclasses import FakeTensor
23
+ from torch.fx.experimental._backward_state import BackwardState
24
+ from torch.fx.experimental.proxy_tensor import is_sym_node
25
+ from torch.fx.experimental.symbolic_shapes import fx_placeholder_vals
26
+ from .. import config
27
+ from .dispatch_and_compile_graph import (
28
+ aot_dispatch_autograd_graph,
29
+ aot_dispatch_base_graph,
30
+ )
31
+ from .logging_utils import describe_input, format_guard_bug_msg, track_graph_compiling
32
+
33
+ from .runtime_wrappers import (
34
+ aot_dispatch_subclass_wrapper,
35
+ create_runtime_wrapper,
36
+ functionalized_rng_runtime_epilogue,
37
+ )
38
+ from .schemas import (
39
+ AOTConfig,
40
+ MutationType,
41
+ OutputType,
42
+ SubclassMeta,
43
+ TensorAlias,
44
+ ViewAndMutationMeta,
45
+ )
46
+ from .subclass_utils import (
47
+ compute_inner_mutated_inp_indices_from_subclass_meta,
48
+ unwrap_tensor_subclasses,
49
+ wrap_tensor_subclasses,
50
+ )
51
+
52
+ from .utils import (
53
+ _get_symint_hints,
54
+ call_func_at_runtime_with_args,
55
+ make_boxed_func,
56
+ normalize_as_list,
57
+ strict_zip,
58
+ )
59
+
60
+ zip = strict_zip
61
+
62
+ log = logging.getLogger(__name__)
63
+ aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
64
+ aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
65
+
66
+ aten = torch.ops.aten
67
+
68
+
69
+ def _compute_output_meta_with_inductor_strides(fw_module, fwd_output_strides):
70
+ out = [n.meta["val"] for n in (list(fw_module.graph.nodes)[-1].args[0])]
71
+ # will only be set for inductor
72
+ if not fwd_output_strides:
73
+ return out
74
+ with TracingContext.get().fake_mode.shape_env.suppress_guards():
75
+ for i in range(len(out)):
76
+ if not isinstance(out[i], Tensor):
77
+ continue
78
+ if all(s1 == s2 for s1, s2 in zip(out[i].stride(), fwd_output_strides[i])):
79
+ continue
80
+ out[i] = out[i].as_strided(out[i].shape, fwd_output_strides[i])
81
+ return out
82
+
83
+
84
+ def aot_dispatch_base(
85
+ flat_fn,
86
+ flat_args: List[Tensor],
87
+ aot_config: AOTConfig,
88
+ *,
89
+ fw_metadata: ViewAndMutationMeta,
90
+ ):
91
+ fw_module, updated_flat_args, maybe_subclass_meta = aot_dispatch_base_graph( # type: ignore[misc]
92
+ flat_fn, flat_args, aot_config, fw_metadata=fw_metadata
93
+ )
94
+
95
+ disable_amp = torch._C._is_any_autocast_enabled()
96
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
97
+ fakified_out = None
98
+
99
+ with context(), track_graph_compiling(aot_config, "inference"):
100
+ compiler = (
101
+ aot_config.inference_compiler
102
+ if aot_config.inference_compiler is not None
103
+ else aot_config.fw_compiler
104
+ )
105
+ if config.functionalize_rng_ops:
106
+ # Add the seed and offset as example inputs to pass to the compiler
107
+ fake_mode = detect_fake_mode()
108
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode)
109
+ updated_flat_args.extend([seed, offset])
110
+
111
+ if tracing_context := torch._guards.TracingContext.try_get():
112
+ tracing_context.fw_metadata = (
113
+ fw_metadata
114
+ if maybe_subclass_meta is None
115
+ else maybe_subclass_meta.fw_metadata
116
+ )
117
+
118
+ with TracingContext.report_output_strides() as fwd_output_strides:
119
+ compiled_fw = compiler(fw_module, updated_flat_args)
120
+
121
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
122
+ if tracing_context and tracing_context.fakify_first_call:
123
+ fakified_out = _compute_output_meta_with_inductor_strides(
124
+ fw_module, fwd_output_strides
125
+ )
126
+
127
+ # However, create_runtime_wrapper does not expect the rng offsets in the
128
+ # output. So, we have to create another wrapper and take out the offset. As
129
+ # a result, we have to account for not boxed_call compilers as well.
130
+ if not hasattr(compiled_fw, "_boxed_call"):
131
+ compiled_fw = make_boxed_func(compiled_fw)
132
+
133
+ # Create a wrapper to set up the rng functionalize bits
134
+ @wraps(compiled_fw)
135
+ def rng_functionalization_wrapper(args):
136
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
137
+ nonlocal fakified_out
138
+ if fakified_out is not None:
139
+ out = fakified_out
140
+ fakified_out = None
141
+ return out
142
+
143
+ # args is a list because compiled_fw is boxed_call
144
+ if fw_metadata.is_rng_op_functionalized:
145
+ # Add the seed and offset to args
146
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
147
+ args.extend([seed, offset])
148
+ out = compiled_fw(args)
149
+ out = functionalized_rng_runtime_epilogue(fw_metadata, out)
150
+ return out
151
+ else:
152
+ return compiled_fw(args)
153
+
154
+ if maybe_subclass_meta is not None:
155
+ compiled_fw_func = aot_dispatch_subclass_wrapper(
156
+ rng_functionalization_wrapper,
157
+ subclass_metas=fw_metadata.subclass_fw_graph_out_meta,
158
+ num_fw_outs_saved_for_bw=None,
159
+ )
160
+ else:
161
+ compiled_fw_func = rng_functionalization_wrapper
162
+
163
+ if not hasattr(compiled_fw_func, "_boxed_call"):
164
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
165
+
166
+ compiled_fn = create_runtime_wrapper(
167
+ compiled_fw_func,
168
+ runtime_metadata=fw_metadata,
169
+ indices_of_inps_to_detach=[],
170
+ trace_joint=False,
171
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
172
+ disable_amp=disable_amp,
173
+ )
174
+
175
+ return compiled_fn
176
+
177
+
178
+ def aot_dispatch_autograd(
179
+ flat_fn,
180
+ flat_args: List[Any],
181
+ aot_config: AOTConfig,
182
+ *,
183
+ fw_metadata: ViewAndMutationMeta,
184
+ ):
185
+ fw_metadata.deterministic = torch.are_deterministic_algorithms_enabled()
186
+ fx_g, joint_inputs, maybe_subclass_meta = aot_dispatch_autograd_graph( # type: ignore[misc]
187
+ flat_fn, flat_args, aot_config, fw_metadata=fw_metadata
188
+ )
189
+
190
+ # Copied from aot_dispatch_autograd_graph.
191
+ disable_amp = torch._C._is_any_autocast_enabled()
192
+
193
+ if aot_config.enable_log:
194
+ aot_joint_log.info(
195
+ "%s", lazy_format_graph_code("Joint graph", fx_g, aot_config.aot_id)
196
+ )
197
+ trace_structured(
198
+ "aot_joint_graph",
199
+ payload_fn=lambda: fx_g.print_readable(print_output=False), # type: ignore[union-attr]
200
+ )
201
+
202
+ fakify_first_call = False
203
+ fakified_out = None
204
+
205
+ with torch.no_grad():
206
+ inner_meta = (
207
+ fw_metadata
208
+ if maybe_subclass_meta is None
209
+ else maybe_subclass_meta.fw_metadata
210
+ )
211
+ with track_graph_compiling(aot_config, "joint"):
212
+ # See Note: [Partitioner handling for Subclasses, Part 1]
213
+ # See Note: [Recomputing subclass mutation handling]
214
+ mutated_inp_runtime_indices = (
215
+ compute_inner_mutated_inp_indices_from_subclass_meta(
216
+ fw_metadata, inner_meta
217
+ )
218
+ )
219
+ num_mutated_inp_runtime_indices = len(mutated_inp_runtime_indices)
220
+ num_inner_fwd_outputs = (
221
+ num_mutated_inp_runtime_indices
222
+ + inner_meta.num_outputs
223
+ + inner_meta.num_intermediate_bases
224
+ + inner_meta.num_outputs_rng_offset
225
+ + len(
226
+ fw_metadata.tokens
227
+ ) # See Note [Side-Effectful Tokens in AOTAutograd]
228
+ )
229
+ fw_module, bw_module = aot_config.partition_fn(
230
+ fx_g, joint_inputs, num_fwd_outputs=num_inner_fwd_outputs
231
+ )
232
+
233
+ fw_outs = next(n for n in fw_module.graph.nodes if n.op == "output").args[0]
234
+ # we only need to bookkeep the symints that are saved for bw, not any symints
235
+ # the user forward might have returned in its own output
236
+ fw_outs_saved_for_bw = fw_outs[num_inner_fwd_outputs:]
237
+ num_fw_outs_saved_for_bw = len(fw_outs_saved_for_bw)
238
+ symint_outs_saved_for_bw = [
239
+ n for n in fw_outs_saved_for_bw if is_sym_node(n)
240
+ ]
241
+ fw_metadata.num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
242
+ inner_meta.num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
243
+ _num_symints_saved_for_bw = len(symint_outs_saved_for_bw)
244
+
245
+ # Note [Detaching inputs that never need gradients]
246
+ # See https://github.com/pytorch/pytorch/issues/97745
247
+ # Suppose we have a function like this that we want to compile:
248
+ #
249
+ # def f(x, y):
250
+ # return torch.mul(x, y.detach())
251
+ #
252
+ # What gradients should we compute for x and y?
253
+ # By default, AOTAutograd will compute a gradient for **every** input that requires gradients,
254
+ # and so we'll compute:
255
+ # x_grad_input = y
256
+ # y_grad_input = None
257
+ # Does this preserve the semantics of eager mode?
258
+ # Unfortunately, no.
259
+ # Doing the above will cause autograd to **continue** to backprop the autograd tape
260
+ # that was generated from constructing y.
261
+ #
262
+ # This is **different** from what would have happened in eager mode.
263
+ # In eager mode, if we backprop through the output of this function, autograd will only traverse
264
+ # the bit of the autograd tape corresponding to "x".
265
+ # In particular, if a user had previously backpropped through y's autograd tape,
266
+ # And then they try to backprop through the output of the above function,
267
+ # then we'll hit the dreaded "Trying to backward through the graph a second time" error.
268
+ #
269
+ # You might think: If autograd sees that a gradient is None, shouldn't it stop early,
270
+ # instead of continuing the backprop through the ancestors of that node in the graph?
271
+ #
272
+ # Autograd has two passes:
273
+ # (1) a first pass that traverses the autograd graph and figures out which nodes need to be executed
274
+ # (2) a second pass that actually goes ahead and executes each node when it becomes ready,
275
+ # propagating gradients
276
+ # By the time we're executing a node and we see that it produces a None, the set of nodes to execute
277
+ # is already locked-in.
278
+ #
279
+ # The fix: instead, we can recognize statically that the graph we're compiling will never contribute
280
+ # gradients to y, and prevent autograd from trying to traverse y's autograd tape at all.
281
+ # We can do this by manually detach'ing y before sending it through the `CompiledFunction`.
282
+ #
283
+ # Note that this solution is not bulletproof.
284
+ # It's possible to construct a case where eager may or may not have have tried to autograd through y,
285
+ # depending on the actual grad_outputs that were passed in during the backward.
286
+ # There is no easy fix for this: the simplest fix would be to run with `retain_graph=True`,
287
+ # allowing autograd to re-use the graph.
288
+ #
289
+ # An example of this case is:
290
+ # def f(x):
291
+ # return x.detach() * 2, x * 3
292
+ # If we were to only backprop through outs[0], in eager, we would stop
293
+ # If we backward only on the first output, we shouldn't send a grad through x.
294
+ # But the custom autograd function doesn't know that: it will materialize zero grads for x * 3
295
+ # and we will end up with a zero grad at x.
296
+ # If we later backprop through the second output, this will also require backprop'ing through x.
297
+ # Meaning we'll need to use `retain_graph=True` to be able to backprop through x the second time.
298
+ _indices_of_inps_to_detach = []
299
+ bw_outs = next(n for n in bw_module.graph.nodes if n.op == "output").args[0]
300
+
301
+ # TODO: we should apply the below "detach inputs if their gradients are statically known to be None"
302
+ # optimization even if we have subclass inputs/outputs (we do not handle this today).
303
+ # Computing which our our inputs get None gradients is a bit more complicated,
304
+ # if any of our inputs are subclasses. Why?
305
+ # (a) we need to make sure that we call .detach() on the input subclasses, since autograd sees subclasses.
306
+ # (b) The grad_outputs that we AOT computed in our backward graph are the desugared tensor tensors,
307
+ # so we need to figure out which subclass fw inputs they map to.
308
+ if maybe_subclass_meta is None:
309
+ assert (
310
+ len(bw_outs)
311
+ == len(fw_metadata.input_info) + inner_meta.num_outputs_rng_offset
312
+ )
313
+ for i, (bw_out) in enumerate(bw_outs):
314
+ if bw_out is None:
315
+ _indices_of_inps_to_detach.append(i)
316
+
317
+ if aot_config.enable_log:
318
+ aot_graphs_log.info(
319
+ "%s",
320
+ lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id),
321
+ )
322
+ aot_graphs_log.info(
323
+ "%s",
324
+ lazy_format_graph_code("Backward graph", bw_module, aot_config.aot_id),
325
+ )
326
+ trace_structured(
327
+ "aot_forward_graph",
328
+ payload_fn=lambda: fw_module.print_readable(print_output=False),
329
+ )
330
+ trace_structured(
331
+ "aot_backward_graph",
332
+ payload_fn=lambda: bw_module.print_readable(print_output=False),
333
+ )
334
+
335
+ with track_graph_compiling(aot_config, "forward"):
336
+ # flat_args at this point might still be subclasses-
337
+ # make sure to pass the unwrapped fake tensors into the compiler!
338
+ adjusted_flat_args = joint_inputs[0]
339
+ if config.functionalize_rng_ops:
340
+ # Update example inputs for the fw_compiler
341
+ fake_mode = detect_fake_mode()
342
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode)
343
+ adjusted_flat_args.extend([seed, offset])
344
+ # We are not clearing flat_args here because
345
+ # 1) There is a check in the debug compiler at the end
346
+ # 2) It does not matter as these are fake tensors
347
+
348
+ if tracing_context := torch._guards.TracingContext.try_get():
349
+ tracing_context.fw_metadata = inner_meta
350
+
351
+ with TracingContext.report_output_strides() as fwd_output_strides:
352
+ compiled_fw_func = aot_config.fw_compiler(fw_module, adjusted_flat_args)
353
+ if not hasattr(compiled_fw_func, "_boxed_call"):
354
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
355
+
356
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
357
+ if tracing_context and tracing_context.fakify_first_call:
358
+ fakified_out = _compute_output_meta_with_inductor_strides(
359
+ fw_module, fwd_output_strides
360
+ )
361
+ fakify_first_call = True
362
+
363
+ if maybe_subclass_meta is not None:
364
+ # Why do we need to pass in num_fw_outs_saved_for_bw?
365
+ # See Note: [Partitioner handling for Subclasses, Part 2]
366
+ compiled_fw_func = aot_dispatch_subclass_wrapper(
367
+ compiled_fw_func,
368
+ subclass_metas=fw_metadata.subclass_fw_graph_out_meta,
369
+ num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
370
+ )
371
+ if not hasattr(compiled_fw_func, "_boxed_call"):
372
+ compiled_fw_func = make_boxed_func(compiled_fw_func)
373
+
374
+ # NB: It's important to compile backwards ahead of time, as this may
375
+ # add extra guards which we need to apply to the Dynamo cache at
376
+ # forwards
377
+ with track_graph_compiling(aot_config, "backward"):
378
+ placeholder_list = fx_placeholder_vals(bw_module)
379
+
380
+ forward_saved_for_backwards_strides = None
381
+ if fwd_output_strides is not None:
382
+ forward_saved_for_backwards_strides = fwd_output_strides[
383
+ inner_meta.tensors_saved_for_backwards_slice
384
+ ]
385
+
386
+ # saved activations can have different stride to eager if
387
+ # the compiler does layout optimization. We should restride the
388
+ # tensor passed in for compiling the backward graph using the
389
+ # saved tensor's stride.
390
+ for i in range(len(placeholder_list)):
391
+ ph_arg = placeholder_list[i]
392
+ if not isinstance(ph_arg, torch.Tensor):
393
+ continue
394
+
395
+ if forward_saved_for_backwards_strides is None:
396
+ continue
397
+
398
+ real_stride = None
399
+ # Per all_args calling convention
400
+ j = i - len(symint_outs_saved_for_bw)
401
+ if 0 <= j < len(forward_saved_for_backwards_strides):
402
+ real_stride = forward_saved_for_backwards_strides[j]
403
+ if real_stride is None:
404
+ continue
405
+
406
+ # Comparing ph_arg.stride() with real_stride directly may
407
+ # cause dynamic dimensions in ph_arg being specialized to static
408
+ # value. Using the hints to avoid that.
409
+ if _get_symint_hints(ph_arg.stride()) != real_stride:
410
+ # Note that here we use the stride of the real tensor to
411
+ # restride a FakeTensor. This does not cause trouble
412
+ # for dynamic shape since this code path only get
413
+ # executed if layout optimization is enabled. And we
414
+ # disable layout optimization for dynamic shape right
415
+ # now.
416
+ #
417
+ # A solution that decide stride order based on real
418
+ # tensor's stride and then apply that stride order to
419
+ # the FakeTensor does not work smoothly since some
420
+ # tensor's layout is not 'dense'. E.g. mixnet_l has a
421
+ # tensor with size [8, 64, 112, 112] and strides
422
+ # (2408448, 1, 21504, 192). The solution mentioned will
423
+ # decide a stride of (802816, 1, 7168, 64) for this
424
+ # tensor which is wrong.
425
+ placeholder_list[i] = ph_arg.as_strided(ph_arg.size(), real_stride)
426
+
427
+ compiled_bw_func = None
428
+ if len(symint_outs_saved_for_bw):
429
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
430
+ with context():
431
+ try:
432
+ compiled_bw_func = aot_config.bw_compiler(
433
+ bw_module, placeholder_list
434
+ )
435
+ except Exception:
436
+ log.warning(
437
+ "failed to eagerly compile backwards for dynamic, suppressing in case backwards not needed",
438
+ exc_info=True,
439
+ )
440
+ # Compiled autograd will run the bw_module in the backward pass,
441
+ # so recompilation need happen anyway if the backward pass is ever
442
+ # called.
443
+ #
444
+ # The reason we do the GraphModule recompilation here is because
445
+ # the lazy recompilation will cause issue in the backward pass
446
+ # with compiled autograd.
447
+ #
448
+ # Do the _LazyGraphModule.force_recompile here rather than when
449
+ # bw_module is first generated by the partitioner because the bw_module.recompile
450
+ # may be called in some code path later and cause the _LazyGraphModule.forward
451
+ # becomes the lazy version again. One example is when dynamic shape is enabled
452
+ # upfront, the bw_compiler will be called above which can cause extra
453
+ # graph module recompilation on bw_module.
454
+ if torch._dynamo.compiled_autograd.compiled_autograd_enabled_count:
455
+ from torch.fx._lazy_graph_module import _LazyGraphModule
456
+
457
+ _LazyGraphModule.force_recompile(bw_module)
458
+
459
+ saved_context = TracingContext.try_get()
460
+
461
+ backward_state_indices = [
462
+ idx for idx, x in enumerate(flat_args) if isinstance(x, BackwardState)
463
+ ]
464
+ assert len(backward_state_indices) <= 1
465
+
466
+ class CompiledFunction(torch.autograd.Function):
467
+ compiled_fw = compiled_fw_func
468
+ compiled_bw = compiled_bw_func
469
+ metadata: ViewAndMutationMeta = fw_metadata # type: ignore[assignment]
470
+ maybe_subclass_metadata: Optional[SubclassMeta] = maybe_subclass_meta
471
+ num_symints_saved_for_bw = _num_symints_saved_for_bw
472
+ _compiled_autograd_should_lift = False
473
+ _fakify_first_call = fakify_first_call
474
+
475
+ @staticmethod
476
+ def _compiled_autograd_key(ctx):
477
+ return (ctx._autograd_function_id, *ctx.symints)
478
+
479
+ @staticmethod
480
+ def forward(ctx, *deduped_flat_tensor_args):
481
+ args = deduped_flat_tensor_args
482
+ if backward_state_indices:
483
+ bw_state = args[backward_state_indices[0]]
484
+ assert isinstance(bw_state, BackwardState)
485
+ ctx._compiled_autograd_backward_state = bw_state
486
+
487
+ marked_dirty_inps = []
488
+ for i in fw_metadata.mutated_graph_handled_indices_seen_by_autograd:
489
+ arg = deduped_flat_tensor_args[i]
490
+ if not (arg.requires_grad and arg.is_leaf): # would error
491
+ ctx.mark_dirty(arg)
492
+ marked_dirty_inps.append(arg)
493
+
494
+ if not CompiledFunction._fakify_first_call:
495
+ if CompiledFunction.metadata.is_rng_op_functionalized:
496
+ # Add the seed and offset to args
497
+ seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
498
+ args = (*args, seed, offset)
499
+ # There is a pretty complicated calling convention around what the compiled fw returns.
500
+ # The full list of outputs and their relative order is:
501
+ # (*tokens, *mutated_inputs, *fw_outs, *fw_intermediate_bases, *saved_tensors, *saved_symints)
502
+ # - Note that in the synthetic bases case, mutated_inputs will correspond to an updated version
503
+ # of the original view, and not the synthetic base
504
+
505
+ fw_outs = call_func_at_runtime_with_args(
506
+ CompiledFunction.compiled_fw,
507
+ args,
508
+ disable_amp=disable_amp,
509
+ )
510
+ else:
511
+ nonlocal fakified_out
512
+ assert fakified_out is not None
513
+ CompiledFunction._fakify_first_call = False
514
+ fw_outs = fakified_out
515
+ fakified_out = None
516
+
517
+ num_outputs = CompiledFunction.metadata.num_outputs
518
+ num_outputs_aliased = CompiledFunction.metadata.num_outputs_aliased
519
+ num_mutated_runtime_inps = (
520
+ CompiledFunction.metadata.num_mutated_inp_runtime_indices
521
+ )
522
+ num_tokens = len(CompiledFunction.metadata.tokens)
523
+ num_forward_returns = CompiledFunction.metadata.num_forward_returns
524
+ num_forward = CompiledFunction.metadata.num_forward
525
+
526
+ # Partitioners must put symint arguments at the end separate from tensor arguments
527
+ tensors_saved_for_backwards = fw_outs[
528
+ CompiledFunction.metadata.tensors_saved_for_backwards_slice
529
+ ]
530
+ assert all(isinstance(x, torch.Tensor) for x in tensors_saved_for_backwards)
531
+ # See Note [Detaching saved tensors in AOTAutograd]
532
+ ctx.save_for_backward(
533
+ *(
534
+ x.detach() if x._is_view() else x
535
+ for x in tensors_saved_for_backwards
536
+ )
537
+ )
538
+ symint_outs = fw_outs[
539
+ CompiledFunction.metadata.symints_saved_for_backwards_slice
540
+ ]
541
+ assert all(
542
+ isinstance(x, (int, float, torch.SymInt, torch.SymFloat))
543
+ for x in symint_outs
544
+ ), str([type(x) for x in symint_outs])
545
+ ctx.symints = symint_outs
546
+
547
+ raw_returns = fw_outs[0 : num_forward_returns + num_tokens]
548
+
549
+ # Wrap all autograd.Function.forward() outputs that are aliases
550
+ # so that autograd.Function doesn't treat them as tensors
551
+ if num_mutated_runtime_inps > 0:
552
+ for i, idx in enumerate(
553
+ CompiledFunction.metadata.mutated_inp_runtime_indices
554
+ ):
555
+ # We could make this faster by only looping over inputs with metadata-only mutations
556
+ # (instead of looping over inputs with either data or metadata mutations), but there shouldn't be many.
557
+ info = CompiledFunction.metadata.input_info[idx]
558
+ if info.mutates_metadata and not info.mutates_data:
559
+ raw_returns[i] = TensorAlias(raw_returns[i])
560
+
561
+ if config.debug_assert:
562
+ user_mutated_inputs_raw = raw_returns[0:num_mutated_runtime_inps]
563
+ mut_inp_infos = [
564
+ x
565
+ for x in CompiledFunction.metadata.input_info
566
+ if x.mutates_data or x.mutates_metadata
567
+ ]
568
+ assert len(user_mutated_inputs_raw) == len(mut_inp_infos)
569
+
570
+ if CompiledFunction.metadata.num_unsafe_view_outputs > 0:
571
+ for idx in CompiledFunction.metadata.unsafe_view_out_indices:
572
+ raw_return_idx = num_mutated_runtime_inps + idx
573
+ o = raw_returns[raw_return_idx]
574
+ raw_returns[raw_return_idx] = torch.ops.aten._unsafe_view(
575
+ o, o.shape
576
+ )
577
+
578
+ if num_outputs_aliased > 0:
579
+ for idx in CompiledFunction.metadata.aliased_out_indices:
580
+ raw_return_idx = num_mutated_runtime_inps + idx
581
+ raw_returns[raw_return_idx] = TensorAlias(
582
+ raw_returns[raw_return_idx]
583
+ )
584
+
585
+ if config.debug_assert:
586
+ intermediates_raw = raw_returns[
587
+ num_mutated_runtime_inps + num_outputs :
588
+ ]
589
+ assert not any(
590
+ isinstance(x, TensorAlias) for x in intermediates_raw
591
+ )
592
+
593
+ # invariant: intermediate bases always require gradients, so we don't have to
594
+ # consider marking them as non-differentiable.
595
+ raw_returns_not_including_intermediate_bases = raw_returns[
596
+ : num_mutated_runtime_inps + num_outputs
597
+ ]
598
+ raw_returns_meta = [
599
+ x
600
+ for x in CompiledFunction.metadata.input_info
601
+ if x.mutation_type == MutationType.MUTATED_OUT_GRAPH
602
+ ] + CompiledFunction.metadata.output_info
603
+
604
+ fw_outs_not_requiring_grad = [
605
+ x
606
+ for (i, x) in enumerate(raw_returns_not_including_intermediate_bases)
607
+ if isinstance(x, torch.Tensor) and not raw_returns_meta[i].requires_grad
608
+ ]
609
+ ctx.mark_non_differentiable(*fw_outs_not_requiring_grad)
610
+ ctx._materialize_non_diff_grads = False
611
+
612
+ functionalized_rng_runtime_epilogue(
613
+ CompiledFunction.metadata,
614
+ fw_outs[num_forward_returns:num_forward],
615
+ return_new_outs=False,
616
+ )
617
+ return tuple(raw_returns) + tuple(marked_dirty_inps)
618
+
619
+ @staticmethod
620
+ def backward(ctx, *flat_args):
621
+ # Calling convention: we expect a grad_out passed to the backward:
622
+ # - for every output of the fw that does *not* alias an input or graph intermediate
623
+ # - for every updated_input generated by the fw that does *not* alias an input (aka only data-mutations)
624
+ # - for every graph intermediate that we need to use to generate an output later.
625
+ # The other outputs in the autograd.Function.forward that do *not* show up in the backward include:
626
+ # - outputs that alias inputs or graph intermediates
627
+ # - updated inputs due to metadata-only mutations.
628
+ # We need to return them in the forward, but ensure that they all do not get gradients in the backward,
629
+ # and we filter them out here before passing the remaining grad_outputs into the compiled backward.
630
+ num_intermediate_bases = CompiledFunction.metadata.num_intermediate_bases
631
+ num_graph_handled_inputs = (
632
+ CompiledFunction.metadata.num_mutated_graph_handled_indices_seen_by_autograd
633
+ )
634
+ num_mutated_runtime_inps = (
635
+ CompiledFunction.metadata.num_mutated_inp_runtime_indices
636
+ )
637
+ expected_grad_outs = (
638
+ CompiledFunction.metadata.num_outputs
639
+ + num_mutated_runtime_inps
640
+ + num_intermediate_bases
641
+ )
642
+ deterministic = CompiledFunction.metadata.deterministic
643
+ global_deterministic = torch.are_deterministic_algorithms_enabled()
644
+ if deterministic is not None:
645
+ torch._check(
646
+ not (not deterministic and global_deterministic),
647
+ lambda: (
648
+ "This compiled backward function is being run with "
649
+ "torch.use_deterministic_algorithms(True), "
650
+ "but it was previously generated during the forward function while "
651
+ "torch.use_deterministic_algorithms(False) was set."
652
+ ),
653
+ )
654
+
655
+ if num_graph_handled_inputs > 0:
656
+ flat_args = flat_args[:-num_graph_handled_inputs]
657
+ assert len(flat_args) == expected_grad_outs
658
+ out_info = CompiledFunction.metadata.output_info
659
+
660
+ inp_tangents, out_tangents, intermediate_base_tangents = (
661
+ flat_args[0:num_mutated_runtime_inps],
662
+ flat_args[
663
+ num_mutated_runtime_inps : num_mutated_runtime_inps
664
+ + CompiledFunction.metadata.num_outputs
665
+ ],
666
+ flat_args[
667
+ num_mutated_runtime_inps + CompiledFunction.metadata.num_outputs :
668
+ ],
669
+ )
670
+ # input_info contains info on *every* input,
671
+ # But in the backward(), we are only given grad outputs for every mutated input
672
+ # We then need to filter out the grad outputs that correspond to metadata-only mutations or don't require grad
673
+ input_info = CompiledFunction.metadata.input_info
674
+ inp_tangents_filtered = [
675
+ x
676
+ for x, info_idx in zip(
677
+ inp_tangents, CompiledFunction.metadata.mutated_inp_runtime_indices
678
+ )
679
+ if input_info[info_idx].mutates_data
680
+ and input_info[info_idx].requires_grad
681
+ ]
682
+ # We also need to filter out grad outputs that correspond to outputs aliasing inputs/intermediates
683
+ out_tangents_filtered = [
684
+ x
685
+ for x, info in zip(out_tangents, out_info)
686
+ if info.output_type
687
+ in [
688
+ OutputType.non_alias,
689
+ OutputType.unsafe_view_alias,
690
+ OutputType.custom_function_view,
691
+ ]
692
+ and issubclass(info.raw_type, torch.Tensor)
693
+ and info.requires_grad
694
+ ]
695
+ # intermediate bases always require gradients, and always participate in the backward graph.
696
+ flat_bw_args_with_grads = [
697
+ *inp_tangents_filtered,
698
+ *out_tangents_filtered,
699
+ *intermediate_base_tangents,
700
+ ]
701
+ num_flat_bw_args_with_grads = len(flat_bw_args_with_grads)
702
+
703
+ # sanity asserts
704
+ # metadata_only_inps = [
705
+ # x for x, info_idx in zip(inp_tangents, mutated_inp_indices)
706
+ # if not input_info[info_idx].mutates_data
707
+ # ]
708
+ # aliased_outputs = [
709
+ # x for x, info in zip(out_tangents, out_info) if info.output_type != OutputType.non_alias]
710
+ # assert all(x is None for x in metadata_only_inps)
711
+ # assert all(x is None for x in aliased_outputs)
712
+
713
+ rng_args = []
714
+ if CompiledFunction.metadata.is_rng_op_functionalized:
715
+ # Add the seed and offset to args
716
+ rng_args = CUDARngStateHelper.get_torch_state_as_tuple()
717
+
718
+ all_args = [
719
+ *ctx.symints,
720
+ *ctx.saved_tensors,
721
+ *flat_bw_args_with_grads,
722
+ *rng_args,
723
+ ]
724
+ del flat_bw_args_with_grads
725
+
726
+ tangents_start_idx = (
727
+ len(all_args) - num_flat_bw_args_with_grads - len(rng_args)
728
+ )
729
+ tangents_end_idx = len(all_args) - len(rng_args)
730
+
731
+ # Note: [AOTAutograd Backward Guards]
732
+ # During AOTDispatch, we eagerly create and trace out a joint fw-bw graph.
733
+ # Doing so requires us to "guess" about some of the metadata of our grad_outputs.
734
+ #
735
+ # In particular: if an output to the forward is a plain tensor or a subclass,
736
+ # its corresponding grad_output in the backward **may or may not** be
737
+ # a plain tensor or a subclass. The main cases are:
738
+ # (1) If an output is a plain tensor, its grad_out will also be a plain tensor,
739
+ # *unless* the output is used in some subclass compute later in the forward graph,
740
+ # which will cause its grad_output to become a subclass
741
+ # (2) If an output is a subclass, its grad_out will also be a subclass,
742
+ # *unless* the output of the forward did not actually participate in the gradient computation,
743
+ # in which case autograd will insert a plain tensor of zeros for the grad_output.
744
+ # We could avoid this case with `torch.autograd.Function.set_materialize_grads`,
745
+ # although this is not turned on today in AOTAutgrad and would require more work.
746
+ #
747
+ # Today, we make a guess on subclass-ness based on the above examples,
748
+ # and hard-error in the backward if we guessed wrong.
749
+ #
750
+ # In the future, we should add backward guards that would allow us to
751
+ # properly handle this case instead of erroring: we would need to retrace the backward graph,
752
+ # since we might produce an entirely different trace if our grad_outputs are subclass or not.
753
+ assert (
754
+ len(CompiledFunction.metadata.output_types)
755
+ == num_flat_bw_args_with_grads
756
+ )
757
+ grad_output_types = [
758
+ type(x) for x in all_args[-num_flat_bw_args_with_grads:]
759
+ ]
760
+ # In general, we can add more asserts/guards here for when we partitioned
761
+ # with incorrect assumptions about the grad_outputs.
762
+ # Normalize FakeTensor -> torch.Tensor
763
+ # - during tracing our types are FakeTensor
764
+ # - at runtime in the backward our types are torch.Tensor...
765
+ # - unless we're running compiled backward, in which case they are also FakeTensor
766
+ grad_output_types_ = [
767
+ torch.Tensor if x is FakeTensor else x for x in grad_output_types
768
+ ]
769
+ assert (
770
+ grad_output_types_ == CompiledFunction.metadata.output_types
771
+ ), f"""\
772
+ We incorrectly attempted to compile the backward with incorrect subclass metadata.
773
+ If you run into this error, please file an issue.
774
+ Expected grad_output types: {str(CompiledFunction.metadata.output_types)}
775
+ Got grad_output types: {str(grad_output_types)}"""
776
+
777
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
778
+ if CompiledFunction.maybe_subclass_metadata is not None:
779
+ # Get the number of tangents after unwrapping
780
+ len_tangents = len(
781
+ unwrap_tensor_subclasses(
782
+ all_args[tangents_start_idx:tangents_end_idx],
783
+ is_joint_structure=False,
784
+ )
785
+ )
786
+ all_args = unwrap_tensor_subclasses(all_args, is_joint_structure=False)
787
+ tangents_start_idx = len(all_args) - len_tangents - len(rng_args)
788
+ tangents_end_idx = tangents_start_idx + len_tangents
789
+
790
+ # Make the tangents contiguous. Note that we must do this after subclass desugaring
791
+ # because inputs to inductor have to be contiguous
792
+ all_args = [
793
+ t.contiguous()
794
+ if (
795
+ (tangents_start_idx <= i < tangents_end_idx)
796
+ and (not t.is_contiguous())
797
+ )
798
+ else t
799
+ for i, t in enumerate(all_args)
800
+ ]
801
+
802
+ def call_compiled_backward():
803
+ if ctx._is_compiled_autograd_tracing():
804
+ # For compiled autograd, run raw FX graph so that it can be inlined into the larger graph
805
+ symints = ctx._get_compiled_autograd_symints()
806
+ assert len(symints) == len(ctx.symints)
807
+ all_args[: len(symints)] = symints
808
+ if backward_state_indices:
809
+ assert ctx._compiled_autograd_backward_state.proxy is not None
810
+ all_args.append(ctx._compiled_autograd_backward_state)
811
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
812
+ with context():
813
+ out = normalize_as_list(bw_module(*all_args))
814
+ out = functionalized_rng_runtime_epilogue(
815
+ CompiledFunction.metadata, out
816
+ )
817
+ return tuple(out)
818
+ assert (
819
+ not backward_state_indices
820
+ ), "BackwardState requires CompiledAutograd"
821
+ ctx.maybe_clear_saved_tensors()
822
+ if CompiledFunction.compiled_bw is None:
823
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
824
+ with tracing(saved_context), context(), track_graph_compiling(
825
+ aot_config, "backward"
826
+ ):
827
+ CompiledFunction.compiled_bw = aot_config.bw_compiler(
828
+ bw_module, placeholder_list
829
+ )
830
+
831
+ out = call_func_at_runtime_with_args(
832
+ CompiledFunction.compiled_bw,
833
+ all_args,
834
+ steal_args=True,
835
+ disable_amp=disable_amp,
836
+ )
837
+
838
+ out = functionalized_rng_runtime_epilogue(
839
+ CompiledFunction.metadata, out
840
+ )
841
+ return tuple(out)
842
+
843
+ if torch.is_grad_enabled() and any(
844
+ t.requires_grad for t in all_args if isinstance(t, torch.Tensor)
845
+ ):
846
+ # Ensure that the graph is connected, and error if double backward is performed.
847
+ # See comment for why once_differentiable is not sufficient:
848
+ # https://github.com/pytorch/pytorch/pull/92348/files#r1072962107
849
+ class CompiledFunctionBackward(torch.autograd.Function):
850
+ # CompiledFunctionBackward is not yet supported in dynamo skipfiles
851
+ _compiled_autograd_should_lift = False
852
+
853
+ @staticmethod
854
+ def forward(ctx, *unused_args):
855
+ outs = call_compiled_backward()
856
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
857
+ if CompiledFunction.maybe_subclass_metadata is not None:
858
+ assert (
859
+ CompiledFunction.maybe_subclass_metadata.grad_input_metas
860
+ is not None
861
+ )
862
+ outs_wrapped = wrap_tensor_subclasses(
863
+ outs,
864
+ subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas,
865
+ )
866
+ return outs_wrapped
867
+ return outs
868
+
869
+ @staticmethod
870
+ def backward(ctx, *args):
871
+ raise RuntimeError(
872
+ "torch.compile with aot_autograd does not currently support double backward"
873
+ )
874
+
875
+ CompiledFunctionBackward._compiled_autograd_key = ( # type: ignore[method-assign]
876
+ CompiledFunction._compiled_autograd_key
877
+ )
878
+
879
+ # Pass args even though they're unused, so that the graph is built
880
+ out = CompiledFunctionBackward.apply(*all_args)
881
+ else:
882
+ out = call_compiled_backward()
883
+
884
+ # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here.
885
+ if CompiledFunction.maybe_subclass_metadata is not None:
886
+ assert (
887
+ CompiledFunction.maybe_subclass_metadata.grad_input_metas
888
+ is not None
889
+ )
890
+ outs_wrapped = wrap_tensor_subclasses(
891
+ out,
892
+ subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas,
893
+ )
894
+ return outs_wrapped
895
+ return out
896
+
897
+ compiled_function = create_runtime_wrapper(
898
+ CompiledFunction.apply,
899
+ runtime_metadata=fw_metadata,
900
+ indices_of_inps_to_detach=_indices_of_inps_to_detach,
901
+ trace_joint=True,
902
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
903
+ disable_amp=disable_amp,
904
+ )
905
+
906
+ if not config.debug_assert:
907
+ return compiled_function
908
+
909
+ flat_requires_grad = [
910
+ a.requires_grad if isinstance(a, Tensor) else None for a in flat_args
911
+ ]
912
+
913
+ @wraps(compiled_function)
914
+ def debug_compiled_function(*args):
915
+ # TODO: Check aliasing relationships
916
+ # TODO: Check strides for metadata mutation
917
+ # (NB: ideally, this logic is factored out of this function and
918
+ # you move these debug checks there)
919
+
920
+ # Check requires grad. Bad case is when we compiled with
921
+ # requires_grad = False, but input requires_grad = True
922
+ # (vice versa is OK; we compute a gradient and then throw
923
+ # it away when it hits the input.)
924
+ for i, a in enumerate(args):
925
+ can_require_grad = flat_requires_grad[i]
926
+ if can_require_grad is None:
927
+ assert not isinstance(a, Tensor)
928
+ elif not can_require_grad:
929
+ assert not a.requires_grad, format_guard_bug_msg(
930
+ aot_config,
931
+ f"{describe_input(i, aot_config)} would not require grad",
932
+ )
933
+
934
+ return compiled_function(*args)
935
+
936
+ return debug_compiled_function
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module defines runtime wrappers, which, based on previous analysis attempts to:
3
+ 1. process the inputs and outputs
4
+ 2. apply mutations
5
+ 3. handle functionalized randomness
6
+ 4. deduplicate inputs and consolidate views into their bases (see input_output_analysis)
7
+ """
8
+
9
+ import collections
10
+ import pprint
11
+ from functools import wraps
12
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
13
+
14
+ import torch
15
+ import torch.utils.dlpack
16
+ from torch import Tensor
17
+ from torch._guards import DuplicateInputs, TracingContext
18
+ from torch._prims_common import CUDARngStateHelper
19
+ from torch.multiprocessing.reductions import StorageWeakRef
20
+ from .. import config
21
+ from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata
22
+
23
+ from .functional_utils import gen_alias_from_base
24
+ from .input_output_analysis import (
25
+ compute_overlapping_inputs,
26
+ create_synthetic_base_metadata,
27
+ remove_dupe_metadata,
28
+ )
29
+ from .logging_utils import describe_input, format_guard_bug_msg
30
+ from .schemas import (
31
+ AOTConfig,
32
+ InputAliasInfo,
33
+ OutputType,
34
+ SubclassCreationMeta,
35
+ TensorAlias,
36
+ ViewAndMutationMeta,
37
+ )
38
+ from .subclass_utils import (
39
+ requires_subclass_dispatch,
40
+ unwrap_tensor_subclasses,
41
+ wrap_tensor_subclasses,
42
+ )
43
+
44
+ from .utils import (
45
+ call_func_at_runtime_with_args,
46
+ make_boxed_func,
47
+ partial_flatten_asdict,
48
+ strict_zip,
49
+ )
50
+
51
+
52
+ zip = strict_zip
53
+
54
+
55
+ # The wrapper created by this function handles all of the runtime aliasing and mutation "epilogue" logic
56
+ # that needs to run after the compiled function.
57
+ #
58
+ # This function accepts a trace_joint flag, indicating whether or not we're generating the runtime
59
+ # epilogue for a forward-only inference graph, or for an autograd.Function.apply function.
60
+ # This is because there are some minor differences in how we treat these cases at runtime:
61
+ # - resize_() is currently handled in the inference case, but not fully handled in the autograd case.
62
+ # - the autograd cases inserts TensorAlias wrapper objects for outputs that alias inputs
63
+ def create_runtime_wrapper(
64
+ compiled_fn,
65
+ *,
66
+ runtime_metadata: ViewAndMutationMeta,
67
+ indices_of_inps_to_detach: List[int],
68
+ trace_joint: bool,
69
+ keep_input_mutations: bool,
70
+ disable_amp: bool,
71
+ ):
72
+ num_tokens = len(runtime_metadata.tokens)
73
+
74
+ if not hasattr(compiled_fn, "_boxed_call"):
75
+ compiled_fn = make_boxed_func(compiled_fn)
76
+
77
+ def runtime_wrapper(*args):
78
+ # Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
79
+ args = (*[torch.tensor([])] * num_tokens, *args)
80
+
81
+ if trace_joint:
82
+ args_ = list(args)
83
+ # See Note [Detaching inputs that never need gradients]
84
+ for idx in indices_of_inps_to_detach:
85
+ if isinstance(args_[idx], torch.Tensor):
86
+ args_[idx] = args_[idx].detach()
87
+ with torch.autograd._force_original_view_tracking(True):
88
+ all_outs = call_func_at_runtime_with_args(
89
+ compiled_fn,
90
+ args_,
91
+ disable_amp=disable_amp,
92
+ )
93
+ else:
94
+ # When we have an inference graph, we run with torch.no_grad.
95
+ # It's possible to get an inference graph with inputs that require grad,
96
+ # in which case we want to make sure autograd is disabled
97
+ # (since e.g., inductor will generate aten.addmm.out calls which autograd will complain on)
98
+ if torch.is_grad_enabled():
99
+ with torch.no_grad():
100
+ all_outs = call_func_at_runtime_with_args(
101
+ compiled_fn,
102
+ args,
103
+ disable_amp=disable_amp,
104
+ )
105
+ else:
106
+ all_outs = call_func_at_runtime_with_args(
107
+ compiled_fn,
108
+ args,
109
+ disable_amp=disable_amp,
110
+ )
111
+
112
+ num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices
113
+ num_intermediate_bases = runtime_metadata.num_intermediate_bases
114
+
115
+ if keep_input_mutations and trace_joint:
116
+ num_input_mutations_handled_by_autograd = (
117
+ runtime_metadata.num_mutated_graph_handled_indices_seen_by_autograd
118
+ )
119
+ # autograd.Function requires us to return the mutated inputs as extra outputs to the autograd.Function.forward
120
+ if num_input_mutations_handled_by_autograd > 0:
121
+ all_outs = all_outs[:-num_input_mutations_handled_by_autograd]
122
+
123
+ assert (
124
+ len(all_outs)
125
+ == num_mutated_runtime_inps
126
+ + runtime_metadata.num_outputs
127
+ + num_intermediate_bases
128
+ + num_tokens
129
+ )
130
+
131
+ # Toss out the effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
132
+ all_outs = all_outs[num_tokens:]
133
+
134
+ # Step 3: After running the compiled fw, apply updates to mutated inputs
135
+ num_mutations_to_apply = runtime_metadata.num_mutated_inp_runtime_indices
136
+ if num_mutations_to_apply > 0:
137
+ updated_inputs = all_outs[:num_mutations_to_apply]
138
+ fw_outs = all_outs[num_mutations_to_apply:]
139
+
140
+ for i, inpt_idx in enumerate(runtime_metadata.mutated_inp_runtime_indices):
141
+ meta = runtime_metadata.input_info[inpt_idx]
142
+ if not meta.mutates_data and not meta.mutates_metadata:
143
+ continue
144
+ original_inpt = args[inpt_idx]
145
+ updated_inpt = updated_inputs[i]
146
+ if meta.mutates_storage_metadata:
147
+ # mutates_storage_metadata means our input saw a x.set_(y) call.
148
+ # What if x **also** saw a data and/or a metadata mutation?
149
+ # (1) If the [meta]data mutation occurred after the set_(),
150
+ # then there is no need to copy_() the data.
151
+ # When we perform x.set_(x_updated), we are guaranteed that
152
+ # x_updated already has the final version of the data/metadata
153
+ # (2) If a data mutation occurred before the set_().
154
+ # This case seems very difficult to support.
155
+ # TODO: discuss on the PR and decide if we want to tr to
156
+ # either support it, or detect and ban it.
157
+ if trace_joint:
158
+ assert isinstance(updated_inpt, TensorAlias)
159
+ updated_inpt = updated_inpt.alias
160
+ with torch.no_grad():
161
+ original_inpt.set_(updated_inpt)
162
+ continue
163
+ if meta.mutates_metadata and not meta.mutates_data:
164
+ if trace_joint:
165
+ assert isinstance(updated_inpt, TensorAlias)
166
+ updated_inpt = updated_inpt.alias
167
+ # We need to grab the size/stride/storage_offset from the compiled forward,
168
+ # and use that to mutate the metadata of the input
169
+ original_inpt.as_strided_(
170
+ updated_inpt.size(),
171
+ updated_inpt.stride(),
172
+ updated_inpt.storage_offset(),
173
+ )
174
+ else:
175
+ if meta.mutates_data and meta.mutates_metadata:
176
+ original_inpt.as_strided_(
177
+ updated_inpt.size(),
178
+ updated_inpt.stride(),
179
+ updated_inpt.storage_offset(),
180
+ )
181
+ else:
182
+ assert meta.mutates_data
183
+ if meta.is_leaf and original_inpt.requires_grad:
184
+ # We can hit this situation in this case:
185
+ # def f(x):
186
+ # x.detach().mul_(2)
187
+ # return x + 1
188
+ # AOTAutograd will see a mutation in the above case, and try to
189
+ # apply a copy_() here, in the epilogue.
190
+ # But if x required gradients, and is a leaf, then autograd
191
+ # will yell at us for trying to mutate it.
192
+ # However, it's only possible to end up in this scenario (like the above)
193
+ # if all of the mutations to the leaf input were non-autograd-tracking mutations
194
+ # (aka mutations under no_grad(), or on detached views).
195
+ # In that case, we fully want to hide the mutation from autograd, so detaching is ok.
196
+ original_inpt.detach().copy_(updated_inpt)
197
+ else:
198
+ original_inpt.copy_(updated_inpt)
199
+ else:
200
+ fw_outs = all_outs
201
+
202
+ # Step 4: Manually regenerate any outputs that are aliased to inputs, instead of
203
+ # compiling them.
204
+ if runtime_metadata.num_outputs_aliased > 0:
205
+ # The compiled forward also returned intermediate bases. We don't want to return them to the user.
206
+ if runtime_metadata.num_intermediate_bases > 0:
207
+ fw_outs_no_intermediate_bases = fw_outs[
208
+ : -runtime_metadata.num_intermediate_bases
209
+ ]
210
+ intermediate_bases = fw_outs[-runtime_metadata.num_intermediate_bases :]
211
+ else:
212
+ fw_outs_no_intermediate_bases = fw_outs
213
+ intermediate_bases = []
214
+
215
+ assert len(fw_outs_no_intermediate_bases) == len(
216
+ runtime_metadata.output_info
217
+ )
218
+ fw_outs_including_aliases = []
219
+ for i, (o, info) in enumerate(
220
+ zip(fw_outs_no_intermediate_bases, runtime_metadata.output_info)
221
+ ):
222
+ if info.output_type in [
223
+ OutputType.non_alias,
224
+ OutputType.unsafe_view_alias,
225
+ OutputType.custom_function_view,
226
+ ]:
227
+ fw_outs_including_aliases.append(o)
228
+ continue
229
+ if trace_joint:
230
+ assert isinstance(o, TensorAlias)
231
+ o_ = o.alias
232
+ else:
233
+ o_ = o
234
+
235
+ o_grad = runtime_metadata.output_info[i].requires_grad
236
+ if info.output_type == OutputType.alias_of_input:
237
+ aliased_base_tensor = args[info.base_idx] # type: ignore[index]
238
+ regenerated_out = gen_alias_from_base(
239
+ aliased_base_tensor, o_, o_grad
240
+ )
241
+ fw_outs_including_aliases.append(regenerated_out)
242
+ continue
243
+ elif info.output_type == OutputType.is_input:
244
+ aliased_base_tensor = args[info.base_idx] # type: ignore[index]
245
+ regenerated_out = aliased_base_tensor
246
+ fw_outs_including_aliases.append(regenerated_out)
247
+ continue
248
+ elif info.output_type == OutputType.alias_of_intermediate:
249
+ base_tensor_list = intermediate_bases
250
+ elif (
251
+ info.output_type == OutputType.alias_of_intermediate_save_as_output
252
+ ):
253
+ base_tensor_list = intermediate_bases
254
+ else:
255
+ assert (
256
+ info.output_type
257
+ == OutputType.alias_of_intermediate_base_is_user_output
258
+ )
259
+ base_tensor_list = fw_outs_no_intermediate_bases
260
+ aliased_base_tensor = base_tensor_list[info.base_idx]
261
+ # TODO: handle the custom autograd function case here.
262
+ # We need a way to check whether a tensor came from a custom autograd fn from python,
263
+ # AND a way to replay that custom view fn.
264
+ regenerated_out = gen_alias_from_base(aliased_base_tensor, o_, o_grad)
265
+ fw_outs_including_aliases.append(regenerated_out)
266
+ ret_outs = fw_outs_including_aliases
267
+ else:
268
+ ret_outs = fw_outs
269
+
270
+ if runtime_metadata.dynamic_outputs:
271
+ for t, o in zip(ret_outs, runtime_metadata.output_info):
272
+ if o.dynamic_dims is None:
273
+ continue
274
+ if hasattr(t, "_dynamo_weak_dynamic_indices"):
275
+ t._dynamo_weak_dynamic_indices |= o.dynamic_dims
276
+ else:
277
+ t._dynamo_weak_dynamic_indices = o.dynamic_dims.copy()
278
+ if runtime_metadata.grad_enabled_mutation is not None:
279
+ torch.set_grad_enabled(runtime_metadata.grad_enabled_mutation)
280
+ return ret_outs
281
+
282
+ return runtime_wrapper
283
+
284
+
285
+ # Calling convention: If we are running functionalized RNG, then outs consists
286
+ # of (user_outs, rng_offset)
287
+ def functionalized_rng_runtime_epilogue(
288
+ metadata: ViewAndMutationMeta, outs, return_new_outs=True
289
+ ):
290
+ if metadata.is_rng_op_functionalized:
291
+ assert metadata.num_outputs_rng_offset == 1
292
+ new_rng_offset = outs[-1]
293
+ CUDARngStateHelper.set_new_offset(new_rng_offset)
294
+ if return_new_outs:
295
+ user_outs = outs[:-1]
296
+ return user_outs
297
+ else:
298
+ return None
299
+ return outs
300
+
301
+
302
+ # This wrapper handles the AOTDispatch runtime logic for tensor subclasses.
303
+ # At runtime, we have a compiled function that knows how to operate on the domain of DenseTensor -> DenseTensor,
304
+ # But the user might have passed us some tensor subclass inputs (or expect some subclass tensor outputs).
305
+ # This function handles the wrapping and unwrapping of tensor subclasses at runtime.
306
+ def aot_dispatch_subclass_wrapper(
307
+ runtime_fn: Callable,
308
+ *,
309
+ subclass_metas: List[Union[int, SubclassCreationMeta]],
310
+ num_fw_outs_saved_for_bw: Optional[int],
311
+ ) -> Callable:
312
+ def inner_fn(args):
313
+ unwrapped_args = unwrap_tensor_subclasses(args, is_joint_structure=False)
314
+ # expectation: runtime_fn is a boxed fn
315
+ unwrapped_outs = runtime_fn(unwrapped_args)
316
+ wrapped_outs = wrap_tensor_subclasses(
317
+ unwrapped_outs,
318
+ subclass_metas=subclass_metas,
319
+ num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
320
+ is_runtime=True,
321
+ )
322
+ return wrapped_outs
323
+
324
+ # box it
325
+ inner_fn._boxed_call = True # type: ignore[attr-defined]
326
+ return inner_fn
327
+
328
+
329
+ # MOTIVATION:
330
+ #
331
+ # When tracing functions for future execution, one must be careful not to pass
332
+ # in the same input tensor multiple times (e.g., f(x, x), as this can result
333
+ # in graphs that are ONLY valid if you later pass a new tensor in exactly the
334
+ # same way (e.g., f(y, y)). (NB: we really mean duplicate; two distinct
335
+ # tensors that alias each other is a different situation that is covered by
336
+ # aot_dispatch_deduplicated_autograd). Here are two examples:
337
+ #
338
+ # (1) Suppose you have a function:
339
+ #
340
+ # def f(x, y):
341
+ # return x + y
342
+ #
343
+ # If you make_fx(f)(x, x), you will trace out:
344
+ #
345
+ # def f(x, y):
346
+ # return y + y
347
+ #
348
+ # Oops!
349
+ #
350
+ # (2) For most tensors x and y, you can compute f's gradient with respect to
351
+ # these to inputs by saying torch.autograd.grad(f(x, y), (x, y)). However,
352
+ # if x is y, you will trace out a program that gets incorrect gradients:
353
+ #
354
+ # >>> x = torch.randn(1, requires_grad=True)
355
+ # >>> torch.autograd.grad(x + x, (x, x))
356
+ # (tensor([2.]), tensor([2.]))
357
+ #
358
+ # In other words, the gradient is double-counted. Deduplicating the arguments
359
+ # gives you an appropriate gradient:
360
+ #
361
+ # >>> y = torch.randn(1, requires_grad=True)
362
+ # >>> torch.autograd.grad(x + y, (x, y))
363
+ # (tensor([1.]), tensor([1.]))
364
+ #
365
+ # HOW TO DEDUPLICATE:
366
+ #
367
+ # There are a few strategies, in order of preference:
368
+ #
369
+ # 1. For every duplicate argument to the function, detach it into
370
+ # a separate leaf tensor, so that it is no longer duplicated.
371
+ #
372
+ # PRO: The resulting compiled graph works for any configuration
373
+ # of duplicated arguments.
374
+ #
375
+ # CON: It does not (naively) work if you mutate the metadata of inputs:
376
+ #
377
+ # def f(x, y):
378
+ # x.transpose_(0, 1)
379
+ # y.transpose_(0, 2)
380
+ #
381
+ # x = torch.randn(2, 3, 4)
382
+ # f(x, x)
383
+ #
384
+ # The ordering of the transposes inside f dictates whether or not
385
+ # you get [4, 2, 3] or [3, 4, 2]. This means that you cannot precompute
386
+ # what metadata mutations should get applied to each input; you need to
387
+ # assume they aren't duplicates (what we do today) or preserve
388
+ # the original metadata mutations exactly in order, so that they work
389
+ # for any duplicate configuration.
390
+ #
391
+ # CON: It does not (naively) work if you mutate the data of inputs.
392
+ # In particular, leaf tensors that require grad cannot be mutated,
393
+ # this makes it impossible to differentiate with respect to the original
394
+ # base.
395
+ #
396
+ # 2. For every duplicate argument to the function, remove it, so it is
397
+ # no longer part of the "true" signature:
398
+ #
399
+ # PRO: Implemented naively, it still works for metadata/data mutation.
400
+ #
401
+ # CON: The resulting compiled graph is duplicate-specialized: it only
402
+ # works if future calls duplicate arguments in exactly the same way.
403
+ # Horribly, Dynamo doesn't guard on this at the moment. But even if
404
+ # it did, you could still end up recompiling a bunch of each duplicate.
405
+ #
406
+ # Our strategy is to do (1) if we can, and do (2) otherwise, erroring if
407
+ # Dynamo's guards are not enough. In practice, this seems to cover
408
+ # everything.
409
+ #
410
+ def aot_wrapper_dedupe(
411
+ flat_fn,
412
+ flat_args: List[Tensor],
413
+ aot_config: AOTConfig,
414
+ *,
415
+ compiler_fn,
416
+ fw_metadata,
417
+ ):
418
+ # Use information about whether or not flat_fn mutates its arguments
419
+ # or not to handle dupe args
420
+
421
+ # Strategy 1: For any input that is not mutated, we can leafify it if we
422
+ # need to remove a duplicate.
423
+ leaf_flat_args = []
424
+ args_set = set()
425
+ ok = True
426
+
427
+ for i, a in enumerate(flat_args):
428
+ if not isinstance(a, torch.Tensor):
429
+ leaf_flat_args.append(a)
430
+ elif a not in args_set:
431
+ args_set.add(a)
432
+ leaf_flat_args.append(a)
433
+ elif (
434
+ not fw_metadata.input_info[i].mutates_data
435
+ and not fw_metadata.input_info[i].mutates_metadata
436
+ ):
437
+ leaf_flat_args.append(a.detach().requires_grad_(a.requires_grad))
438
+ else:
439
+ ok = False
440
+ break
441
+
442
+ if ok:
443
+ return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
444
+
445
+ if requires_subclass_dispatch(leaf_flat_args, fw_metadata):
446
+ raise RuntimeError(
447
+ """\
448
+ Encountered duplicate inputs that are mutated in the graph, but at least one input/output
449
+ to the graph is a tensor subclass. This is not supported today. You can try to
450
+ remove the aliasing yourself as a workaround, or otherwise file an issue on github."""
451
+ )
452
+
453
+ # export path: ban duplicate inputs for now, add later if requested.
454
+ if aot_config.is_export:
455
+ raise RuntimeError(
456
+ f"""\
457
+ Encountered duplicated inputs that are mutated in the graph you are trying to export.
458
+ This functionality is currently not supported. If needed, please file a github issue.
459
+
460
+ fw_metadata={str(fw_metadata)}
461
+ """
462
+ )
463
+
464
+ # Strategy 2: Duplicate specialize.
465
+ #
466
+ # In Haskell types, suppose you have:
467
+ #
468
+ # add_dupe_args :: DedupedArgs -> Args
469
+ # remove_dupe_args :: Args -> DedupedArgs
470
+ #
471
+ # compiler_fn
472
+ # :: (DedupedArgs -> R) -> DedupedArgs -> AOTConfig -> (DedupedArgs -> R)
473
+ # deped_compiler_fn
474
+ # :: (Args -> R) -> Args -> AOTConfig -> (Args -> R)
475
+ #
476
+ # Then the code below can be written in point-free style as:
477
+ #
478
+ # deduped_compiler_fn f a c =
479
+ # compiler_fn (f . add_dupe_args) (remove_dupe_args a) c . remove_dupe_args
480
+ #
481
+ # Suppose you have:
482
+ #
483
+ # [a, b, a, c]
484
+ #
485
+ # We want:
486
+ #
487
+ # remove_dupe_args([a, b, a, c]) == [a, b, c]
488
+ # add_dupe_args([a, b, c]) == [a, b, a, c]
489
+ #
490
+ # This is done via (respectively):
491
+ #
492
+ # seen_args = {a: 0, b: 1, c: 2}
493
+ # enumerate(add_dupe_map) = [ # how to get args from the deduped list
494
+ # (0, 0),
495
+ # (1, 1),
496
+ # (2, 0),
497
+ # (3, 2),
498
+ # ]
499
+ # keep_arg_mask = [True, True, False, True]
500
+
501
+ seen_args: Dict[Tensor, int] = {}
502
+ keep_arg_mask = []
503
+ # Implicitly map duped arg position (list index) to de-duped arg position
504
+ add_dupe_map: List[int] = []
505
+ duped_arg_len = len(flat_args)
506
+
507
+ j = 0 # index into deduped_flat_args
508
+ for t in flat_args:
509
+ if isinstance(t, torch.Tensor):
510
+ if t in seen_args:
511
+ keep_arg_mask.append(False)
512
+ add_dupe_map.append(seen_args[t])
513
+ continue
514
+ seen_args[t] = j
515
+
516
+ keep_arg_mask.append(True)
517
+ add_dupe_map.append(j)
518
+ j += 1
519
+ assert (
520
+ len(add_dupe_map) == duped_arg_len
521
+ ), f"Expects add_dupe_map to have length {duped_arg_len} but got {len(add_dupe_map)}"
522
+
523
+ # NB: Hot path, avoid set lookups here
524
+ # TODO: Can avoid the zip here too, probably
525
+ def remove_dupe_args(args):
526
+ return [t for t, keep in zip(args, keep_arg_mask) if keep]
527
+
528
+ def add_dupe_args(args):
529
+ return [args[add_dupe_map[i]] for i in range(duped_arg_len)]
530
+
531
+ deduped_flat_args = remove_dupe_args(flat_args)
532
+
533
+ # Update our input metadata to remove duped input metadata.
534
+ updated_fw_metadata = remove_dupe_metadata(fw_metadata, keep_arg_mask, add_dupe_map)
535
+
536
+ if (
537
+ tracing_context := TracingContext.try_get()
538
+ and aot_config.aot_autograd_arg_pos_to_source
539
+ ):
540
+ # TODO(voz): This structure is 1:1, we could consider an alternate structure like
541
+ # kept_pos:[dupe_arg_pos], however, add_dupe_map is 1:1 so we would need a new structure there,
542
+ # which feels like needless complexity for a tiny bit of efficiency at this point.
543
+ for dupe_arg_pos, (kept_pos, keep_arg) in enumerate(
544
+ zip(add_dupe_map, keep_arg_mask)
545
+ ):
546
+ if not keep_arg:
547
+ dupe_arg_source = aot_config.aot_autograd_arg_pos_to_source[
548
+ dupe_arg_pos
549
+ ]
550
+ kept_arg_source = aot_config.aot_autograd_arg_pos_to_source[kept_pos]
551
+ tracing_context.guards_context.aotautograd_guards.append( # type: ignore[attr-defined]
552
+ DuplicateInputs(kept_arg_source, dupe_arg_source)
553
+ )
554
+
555
+ @wraps(flat_fn)
556
+ def wrapped_flat_fn(*args):
557
+ return flat_fn(*add_dupe_args(args))
558
+
559
+ if config.debug_assert:
560
+ ref_fw_metadata = run_functionalized_fw_and_collect_metadata(
561
+ wrapped_flat_fn,
562
+ keep_input_mutations=fw_metadata.keep_input_mutations,
563
+ is_train=fw_metadata.is_train,
564
+ )(*deduped_flat_args)
565
+ assert (
566
+ ref_fw_metadata == updated_fw_metadata
567
+ ), f"ref_metadata={str(ref_fw_metadata)}, actual_metadata={str(updated_fw_metadata)}"
568
+
569
+ compiled_fn = compiler_fn(
570
+ wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata
571
+ )
572
+
573
+ if not hasattr(compiled_fn, "_boxed_call"):
574
+ compiled_fn = make_boxed_func(compiled_fn)
575
+
576
+ @wraps(compiled_fn)
577
+ def wrapped_compiled_fn(args):
578
+ deduped_args = remove_dupe_args(args)
579
+ args.clear()
580
+ return compiled_fn(deduped_args)
581
+
582
+ wrapped_compiled_fn._boxed_call = True # type: ignore[attr-defined]
583
+
584
+ # This can be uncommented when we properly guard for duplicates,
585
+ # but right now we must not do it.
586
+ # if not config.debug_assert:
587
+ # return wrapped_compiled_fn
588
+
589
+ @wraps(wrapped_compiled_fn)
590
+ def debugged_compiled_fn(args):
591
+ # Test that the computed remove/add arg functions are an inverse
592
+ new_args = add_dupe_args(remove_dupe_args(args))
593
+ seen: Dict[Any, None] = {}
594
+ for i, (x, y) in enumerate(zip(new_args, args)):
595
+ seen[y] = None
596
+ assert x is y, format_guard_bug_msg(
597
+ aot_config,
598
+ f"{describe_input(i, aot_config)} would be a duplicate of "
599
+ f"{describe_input(add_dupe_map[i], aot_config)}",
600
+ )
601
+ # This is only an error if there is metadata mutation on both of
602
+ # the duped arguments; in this case, we need to know what order
603
+ # the metadata mutation applies in. You'll get the correct result
604
+ # otherwise, because a graph that assumes distinct inputs works if
605
+ # you dupe the inputs (the gradient contributions from each input
606
+ # will get summed up appropriately.)
607
+ #
608
+ # TODO: work out how to setup this assert correctly
609
+ """
610
+ assert len(seen) == unique_args, format_guard_bug_msg(aot_config,
611
+ f"there would be {unique_args} distinct arguments"
612
+ )
613
+ """
614
+ return wrapped_compiled_fn(args)
615
+
616
+ debugged_compiled_fn._boxed_call = True # type: ignore[attr-defined]
617
+
618
+ return debugged_compiled_fn
619
+
620
+
621
+ # This layer handles the situation where you have two inputs that alias each other,
622
+ # and one of the inputs is mutated.
623
+ # We need to take special care to ensure that the mutation is applied to the other aliases in the graph.
624
+ #
625
+ # pre-condition: aot_wrapper_dedup has already run.
626
+ # (This function will in theory work if there are duplicate args.
627
+ # However, the synthetic base code path is a bit sub-optimal, and running with dupe'd inputs
628
+ # would cause us to hit that path more frequently).
629
+ def aot_wrapper_synthetic_base(
630
+ flat_fn,
631
+ flat_args: List[Tensor],
632
+ aot_config: AOTConfig,
633
+ *,
634
+ fw_metadata: ViewAndMutationMeta,
635
+ # Currently, the only reason we need to plumb this bool is because
636
+ # the synthetic base code prohibits more cases in the autograd case than the inference case.
637
+ needs_autograd: bool,
638
+ compiler_fn,
639
+ ):
640
+ is_inference = not needs_autograd
641
+ flat_args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
642
+ flat_args,
643
+ fw_metadata.input_info,
644
+ is_inference=is_inference,
645
+ )
646
+ # Happy path: we don't need synthetic bases
647
+ if synthetic_base_info is None:
648
+ return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
649
+
650
+ # export path: ban synthetic bases for now, add later if requested.
651
+ if requires_subclass_dispatch(flat_args, fw_metadata):
652
+ raise RuntimeError(
653
+ """\
654
+ Encountered aliased inputs that are mutated in the graph, but at least one input/output
655
+ to the graph is a tensor subclass. This is not supported today. You can try to
656
+ remove the aliasing yourself as a workaround, or otherwise file an issue on github."""
657
+ )
658
+
659
+ if aot_config.is_export:
660
+ raise RuntimeError(
661
+ f"""\
662
+ Encountered aliased inputs that are mutated in the graph you are trying to export.
663
+ This functionality is currently not supported. If needed, please file a github issue.
664
+
665
+ synthetic_base_info={str(synthetic_base_info)}
666
+
667
+ fw_metadata={str(fw_metadata)}
668
+ """
669
+ )
670
+
671
+ assert len(fw_metadata.input_info) == len(synthetic_base_info)
672
+
673
+ # Update our forward metadata to take synthetic bases into account
674
+ (
675
+ fw_metadata_updated,
676
+ aliased_arg_idx_with_metadata_mutations,
677
+ ) = create_synthetic_base_metadata(
678
+ fw_metadata, synthetic_base_info, flat_args, flat_args_with_synthetic_bases
679
+ )
680
+
681
+ num_aliased_args_with_metadata_mutations = len(
682
+ aliased_arg_idx_with_metadata_mutations
683
+ )
684
+
685
+ def _unpack_synthetic_bases(primals: Tuple[Any, ...]) -> List[Any]:
686
+ f_args_inner = []
687
+ for inner_idx_or_tuple in synthetic_base_info:
688
+ if isinstance(inner_idx_or_tuple, int):
689
+ f_args_inner.append(primals[inner_idx_or_tuple])
690
+ else:
691
+ inner_base_idx, view_tensor = inner_idx_or_tuple
692
+ base = primals[inner_base_idx]
693
+ view_arg = gen_alias_from_base(
694
+ base, view_tensor, view_tensor.requires_grad
695
+ )
696
+ f_args_inner.append(view_arg)
697
+ return f_args_inner
698
+
699
+ @wraps(flat_fn)
700
+ def wrapped_flat_fn(*args):
701
+ unpacked_args = _unpack_synthetic_bases(args)
702
+ # This is a bit subtle. The goal of this entire function (aot_dispatch_synthetic_bases)
703
+ # is to relieve the downstream logic from having to reason about mutations on inputs that alias
704
+ # each other, by replacing aliased inputs with a synthetic base.
705
+ # One area where this breaks down a bit however is if one of those aliased inputs
706
+ # experienced a metadata mutation.
707
+ # We are now obligated to reapply the metadata mutation directly to the user's input;
708
+ # it isn't enough to apply mutations back to the synthetic base in the downstream logic.
709
+ #
710
+ # The way we handle this is by pretending that those aliased inputs that experience metadata mutations
711
+ # are additional outputs in the user's forward function.
712
+ # The downstream logic will just treat these as "user outputs that alias inputs".
713
+ # However, we will manually grab them at runtime here, use them to reapply the metadata mutation
714
+ # to the user inputs, and not return them to the user.
715
+ aliased_args_with_metadata_mutations = [
716
+ x
717
+ for i, x in enumerate(unpacked_args)
718
+ if i in aliased_arg_idx_with_metadata_mutations
719
+ ]
720
+ if len(aliased_args_with_metadata_mutations) > 0:
721
+ return *(flat_fn(*unpacked_args)), *aliased_args_with_metadata_mutations
722
+ else:
723
+ return flat_fn(*unpacked_args)
724
+
725
+ if config.debug_assert:
726
+ ref_fw_metadata = run_functionalized_fw_and_collect_metadata(
727
+ wrapped_flat_fn,
728
+ keep_input_mutations=fw_metadata.keep_input_mutations,
729
+ is_train=fw_metadata.is_train,
730
+ )(*flat_args_with_synthetic_bases)
731
+ assert ref_fw_metadata == fw_metadata_updated, (
732
+ f"ref_metadata={pprint.pformat(partial_flatten_asdict(ref_fw_metadata))}, "
733
+ f"\nactual_metadata={pprint.pformat(partial_flatten_asdict(fw_metadata_updated))}"
734
+ )
735
+
736
+ compiled_fn = compiler_fn(
737
+ wrapped_flat_fn,
738
+ flat_args_with_synthetic_bases,
739
+ aot_config,
740
+ fw_metadata=fw_metadata_updated,
741
+ )
742
+
743
+ if not hasattr(compiled_fn, "_boxed_call"):
744
+ compiled_fn = make_boxed_func(compiled_fn)
745
+
746
+ @wraps(compiled_fn)
747
+ def wrapped_compiled_fn(args):
748
+ args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
749
+ args, fw_metadata.input_info, is_inference=is_inference
750
+ )
751
+ assert synthetic_base_info is not None
752
+ aliased_args_w_metadata_mutations = [
753
+ args[i] for i in aliased_arg_idx_with_metadata_mutations
754
+ ]
755
+ args.clear()
756
+ outs = compiled_fn(args_with_synthetic_bases)
757
+ if num_aliased_args_with_metadata_mutations > 0:
758
+ # This code does not handle **all** input metadata mutations.
759
+ # Instead, it only handles metadata mutations on inputs that were converted into synthetic bases
760
+ # (which only happens if at least one aliased input experienced a data mutation).
761
+ # e.g:
762
+ # def f(a, b):
763
+ # a.mul_(2)
764
+ # b.t_(1, 0)
765
+ # f(x.view(2, 2), x.view(2, 2))
766
+ mutated_metadata_inps = outs[-num_aliased_args_with_metadata_mutations:]
767
+ user_outs = outs[:-num_aliased_args_with_metadata_mutations]
768
+ for inp, mutated_inp in zip(
769
+ aliased_args_w_metadata_mutations, mutated_metadata_inps
770
+ ):
771
+ inp.as_strided_(
772
+ mutated_inp.size(),
773
+ mutated_inp.stride(),
774
+ mutated_inp.storage_offset(),
775
+ )
776
+ return user_outs
777
+ return outs
778
+
779
+ return wrapped_compiled_fn
780
+
781
+
782
+ # Note [Handling mutations on an input that aliases other inputs]
783
+ # The easiest example to show-case this edge case is here:
784
+ #
785
+ # def f(a, b):
786
+ # a.mul_(2)
787
+ # out = a + b
788
+ # return out
789
+ # b = torch.ones(...)
790
+ # a = b.view(-1)
791
+ # f(a, b)
792
+ #
793
+ # In this situation, if a and b happened to be aliased, we need to trace something different!
794
+ # Suppose we had b = a.view(-1)
795
+ # (In this case, that means that `a._base is b`)
796
+ #
797
+ # We need to ensure that the aliasing relationship between a and b is preserved.
798
+ # We do that detecting the specific situation above (mutate an input that aliases another input),
799
+ # and when we do that, we create a synthetic base argument. Then inside of the traced forward,
800
+ # we regenerate a and b off of that base.
801
+ # The complete example of the transformed function looks like this:
802
+ #
803
+ # // The traced forward takes in a synthetic base, and regenerates the aliased inputs as views
804
+ # // We could consider getting view-replay support here to minimize as_strided_scatter ops in the graph
805
+ # def traced_forward(base):
806
+ # a = base.as_strided(...)
807
+ # b = base.as_strided(...)
808
+ # a_updated = a.mul(2)
809
+ # base_updated = torch.as_strided_scatter(base, a_updated, ...)
810
+ # b_updated = base_updated.as_strided(...)
811
+ # out = a_updated + b_updated
812
+ # return a_updated, out
813
+ #
814
+ # def compiled_fn(a, b):
815
+ # // we detect that a is the "differentiable base" here
816
+ # base = a
817
+ # // In other situations, we might do either:
818
+ # // (1) a and b are both views off of some larger differentiable base
819
+ # // assert a._base is b._base and a._base is not None
820
+ # // base = a._base
821
+ # // (2) a and b both don't require gradients. Create a base from the storage
822
+ # // assert a._base is None and b._base is None
823
+ # // base = torch.Tensor(a.storage())
824
+ # a_updated, out = traced_forward(base)
825
+ # a.copy_(a_updated)
826
+ # return out
827
+ #
828
+ # This function:
829
+ # (1) Merges input views into a synthetic base argument, when any of those input views are mutated
830
+ # (2) Returns metadata telling the autograd.Function how to modify their arguments properly,
831
+ # to respect the new calling convention.
832
+ #
833
+ # The calling convention is as follows.
834
+ # Any inputs that were originally views of one another get yanked, and replaced with a synthetic base.
835
+ # The argument list ordering goes [base1, ..., baseN], [arg1, ..., argN],
836
+ # Where the ordering of the bases is determined from the ordering of the original view args.
837
+ # baseA will come before baseB if the earliest original argument coming from baseA
838
+ # showed up earlier in the argument list than the earliest original argument coming from baseB.
839
+ #
840
+ # Example, given some tensors a, b, c, d
841
+ # call site:
842
+ # f(a, c.view(-1), b.view(-1), b, c, d)
843
+ # Modified argument list:
844
+ # c_base comes first because the first c view came earlier in arg list than the first b view
845
+ # a and d still show up in the modified arg list, but b and c don't- they're regenerated from their bases
846
+ # b_base = torch.Tensor(b.storage())
847
+ # c_base = torch.Tensor(c.storage())
848
+ # f(c_base, b_base, a, d)
849
+ def merge_view_inputs(
850
+ fwd_inputs: List[Any],
851
+ mutated_input_info: List[InputAliasInfo],
852
+ *,
853
+ # The autograd case currently has more restrictions than the inference case.
854
+ is_inference: bool,
855
+ ) -> Tuple[List[Any], Optional[List[Union[int, Tuple[int, torch.Tensor]]]]]:
856
+ def _are_differentiable_views(view1, view2):
857
+ if view1 is view2:
858
+ return True
859
+ if view1._base is None and view2._base is None:
860
+ return False
861
+ if view1._base is view2._base or view1._base is view2 or view1 is view2._base:
862
+ return True
863
+ return False
864
+
865
+ def _same_dtype_views(view1, view2):
866
+ if view1.dtype != view2.dtype:
867
+ return False
868
+ if view1._base is not None and view1.dtype != view1._base.dtype:
869
+ return False
870
+ if view2._base is not None and view2.dtype != view2._base.dtype:
871
+ return False
872
+ return True
873
+
874
+ assert len(fwd_inputs) == len(mutated_input_info)
875
+ storage_ref_to_idx: Dict[StorageWeakRef, List[int]] = collections.defaultdict(list)
876
+ base_args = []
877
+ other_args = []
878
+ for i, inpt in enumerate(fwd_inputs):
879
+ if isinstance(inpt, Tensor):
880
+ storage_ref = StorageWeakRef(inpt.untyped_storage())
881
+ storage_ref_to_idx[storage_ref].append(i)
882
+ else:
883
+ other_args.append(inpt)
884
+ # Note [Synthetic Base Info Metadata]
885
+ # This list contains metadata that tells you what the i'th argument in the inner calling convention should be.
886
+ # It's either:
887
+ # - another int (corresponding to the index in the argument list of the element from the outer calling convention)
888
+ # - idx, view_tensor, where we can generate the new output with view_tensor._view_func(old_args[idx])
889
+ # idx corresponds to which synthetic base from the outer calling context to view
890
+ inner_calling_convention_meta: Dict[int, Union[int, Tuple[int, torch.Tensor]]] = {}
891
+ for aliased_input_indices in storage_ref_to_idx.values():
892
+ if len(aliased_input_indices) <= 1 or not any(
893
+ # We only care about mutations that affect all aliases,
894
+ # so metadata mutations on an input doesn't require us to do synthetic base handling.
895
+ mutated_input_info[inpt_idx].mutates_data
896
+ for inpt_idx in aliased_input_indices
897
+ ):
898
+ for curr_idx in aliased_input_indices:
899
+ other_args.append(fwd_inputs[curr_idx])
900
+ continue
901
+
902
+ # Here, we attempt to do a more complicated check to detect false aliasing
903
+ # (e.g. if all the tensors have the same storage, but don't actually overlap)
904
+ # In theory, we could have a large group of tensors that all share storages, where only *some* of them
905
+ # have overlapping memory.
906
+ # I don't bother with that case for now: here, we only bail out earlier if we detect that **every** pair
907
+ # of tensors in the current group that shares a storage is non-overlapping.
908
+ aliased_input_indices_no_false_sharing = compute_overlapping_inputs(
909
+ fwd_inputs, aliased_input_indices
910
+ )
911
+ if len(aliased_input_indices_no_false_sharing) <= 1:
912
+ for curr_idx in aliased_input_indices:
913
+ other_args.append(fwd_inputs[curr_idx])
914
+ continue
915
+
916
+ # We detected an input that was mutated, AND aliases with another input.
917
+ # we need to replace this set of aliased inputs with a single synthetic base.
918
+ # For now, I'm banning a bunch of cases. We expect dynamo to properly detect these cases
919
+ # and error out. We can fix them later.
920
+ # These checks are transitive, so we don't need to check every pair.
921
+ for idx1, idx2 in zip(
922
+ aliased_input_indices, aliased_input_indices[1:], strict=False
923
+ ):
924
+ view1 = fwd_inputs[idx1]
925
+ view2 = fwd_inputs[idx2]
926
+ # The "inputs that are aliased but have different differentiable bases" case
927
+ # is more complicated and hopefully pretty rare. Not currently handled.
928
+ if not is_inference:
929
+ assert _are_differentiable_views(
930
+ view1, view2
931
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
932
+ # Regenerating views when reinterpreting complex / real tensors seems non-trivial,
933
+ # not handling for now
934
+ assert _same_dtype_views(
935
+ view1, view2
936
+ ), "aot_autograd() does not yet handle input mutations on views with different dtypes."
937
+ non_none_bases = [
938
+ fwd_inputs[i]._base
939
+ for i in aliased_input_indices
940
+ if fwd_inputs[i]._base is not None
941
+ ]
942
+ aliases_with_none_bases = [
943
+ fwd_inputs[i] for i in aliased_input_indices if fwd_inputs[i]._base is None
944
+ ]
945
+ if len(non_none_bases) == 0:
946
+ # Case where none of the aliases have a ._base
947
+ # we generate a synthetic base without gradients, and generate views off of it
948
+ # We hit this case when we have input tensors to the graph that share a storage,
949
+ # but do not have a ._base field.
950
+ # Wondering when we hit this case?
951
+ # The _base field simply says that autograd knows about the aliasing relationship,
952
+ # but sometimes we create tensors which are aliased out of the same storage but guaranteed
953
+ # to be disjoint. In these cases, we will skip setting up the _base relationship
954
+ # for performance reasons (because the fact that the tensors share the same storage
955
+ # is unobservable unless you (1) do naughty things with resize_/as_strided
956
+ # or (2) look at the storage--as we are doing here.)
957
+ # One particular example of this is optimizer steps on the LSTM module:
958
+ # LSTM parameters are packed into a contiguous storage for efficiency reasons when
959
+ # calling cuDNN kernels, so when these parameters get passed to the optimizer we will
960
+ # find they share the same storage, but do not have _base set since they are all disjoint.
961
+ #
962
+ # NOTE: There is one case where this is unsafe:
963
+ # torch.Tensor(storage) will ALWAYS create a 1D tensor, which is not necessarily
964
+ # the same shape as the "actual" base that the tensor came from.
965
+ # For the most part this is fine, because we always use as_strided()
966
+ # to generate the original aliased inputs again.
967
+ # If we were to use view-replay though, this could cause the aliased views
968
+ # to have incorrect sizes.
969
+ example_idx = aliased_input_indices[0]
970
+ example_alias = fwd_inputs[example_idx]
971
+ # Note that this function is re-used at both trace time and runtime.
972
+ # At trace time, we're under a FakeMode so synthetic_base becomes a FakeTensor.
973
+ synthetic_base = torch.empty(
974
+ (0,), dtype=example_alias.dtype, device=example_alias.device
975
+ )
976
+ # We don't actually have a convenient way of going from storage -> tensor,
977
+ # So using set_() here (we suffer some minor overhead, but this case is rare).
978
+ synthetic_base.set_(example_alias.untyped_storage())
979
+ else:
980
+ # Case where all of the aliases require gradients, and have the same _base.
981
+ synthetic_base = non_none_bases[0]
982
+ for other_base in non_none_bases[1:]:
983
+ assert (
984
+ other_base is synthetic_base
985
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
986
+ for alias in aliases_with_none_bases:
987
+ assert (
988
+ alias is synthetic_base
989
+ ), "aot_autograd() does not yet handle non-differentiable view input mutations."
990
+ base_args.append(synthetic_base)
991
+ for curr_view_idx in aliased_input_indices:
992
+ curr_view = fwd_inputs[curr_view_idx]
993
+ base_idx = len(base_args) - 1
994
+ # We store just enough info here so that we can regenerate the view later.
995
+ # Regeneration: curr_view._view_func(args[base_idx])
996
+ inner_calling_convention_meta[curr_view_idx] = (base_idx, curr_view)
997
+ if len(base_args) == 0:
998
+ assert len(other_args) == len(fwd_inputs)
999
+ # If no synthetic bases are necessary, just return the original inputs.
1000
+ return fwd_inputs, None
1001
+ else:
1002
+ # Otherwise, return:
1003
+ # (1) The new args according to the updated calling convention: (synthetic_bases, other_args)
1004
+ # (2) Metadata telling functionalization how to generate the inner argument list given the outer calling convention.
1005
+ # We post-process it into a list, where meta[i] tells you info about the i'th argument in the inner calling convention.
1006
+ args_to_functionalization = base_args + other_args
1007
+ arg_to_old_idx_map = {arg: i for (i, arg) in enumerate(fwd_inputs)}
1008
+ for i, other_arg in enumerate(other_args):
1009
+ new_idx = len(base_args) + i
1010
+ old_idx = arg_to_old_idx_map[other_arg]
1011
+ inner_calling_convention_meta[old_idx] = new_idx
1012
+ # post process into a list
1013
+ post_processed_calling_convention_meta: List[
1014
+ Union[int, Tuple[int, torch.Tensor]]
1015
+ ] = [-1 for _ in range(len(inner_calling_convention_meta))]
1016
+ for k, v in inner_calling_convention_meta.items():
1017
+ post_processed_calling_convention_meta[k] = v
1018
+ # Quick assert: every argument in the inner calling convention should be accounted for.
1019
+ for x in post_processed_calling_convention_meta:
1020
+ assert x != -1
1021
+ return args_to_functionalization, post_processed_calling_convention_meta
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The various dataclasses, Enums, namedtuples etc used in AOTAutograd. This includes
3
+ input/output types, metadata, config, function signatures etc.
4
+ """
5
+
6
+ import collections
7
+ import functools
8
+ from dataclasses import dataclass, field
9
+ from enum import Enum
10
+ from typing import Any, Callable, Dict, List, NewType, Optional, Set, Tuple, Union
11
+
12
+ import torch
13
+ import torch.utils._pytree as pytree
14
+ from torch._guards import Source
15
+ from torch._subclasses import FakeTensor
16
+ from torch._subclasses.fake_tensor import is_fake
17
+
18
+ from .. import config
19
+
20
+ from .functional_utils import _check_if_mutation_can_be_in_graph
21
+ from .utils import strict_zip
22
+
23
+ zip = strict_zip
24
+
25
+ OutputType = Enum(
26
+ "OutputType",
27
+ (
28
+ # output is not an alias
29
+ "non_alias",
30
+ # output aliases an input
31
+ "alias_of_input",
32
+ # output **is** an input tensor
33
+ "is_input",
34
+ # output has a ._base tensor, which is a graph intermediate.
35
+ # We need to return its ._base as a graph output,
36
+ # so its requires_grad info is populated correctly.
37
+ # Instructs the runtime code to regenerate the current output
38
+ # from a base tensor, graph_intermediates[base_idx]
39
+ "alias_of_intermediate_save_as_output",
40
+ # Same as above; but we don't need to explicitly add its ._base
41
+ # as a graph output, because it already **is** a graph output.
42
+ "alias_of_intermediate",
43
+ # Same as above; but the output's ._base is **already** a user output.
44
+ # Instructs the runtime code to regenerate the current output from
45
+ # a base tensor, user_outputs[base_idx]
46
+ "alias_of_intermediate_base_is_user_output",
47
+ # See Note [Intermediate Bases Optimization]
48
+ "unsafe_view_alias",
49
+ # output is an alias, but has a custom autograd.Function backward.
50
+ # In this case, we don't want to do view-replay, since we won't be able to replay the custom function.
51
+ # Instead, we'll treat this output "normally", and trace its backward into the graph.
52
+ "custom_function_view",
53
+ ),
54
+ )
55
+
56
+
57
+ # This class stores info about every user output.
58
+ @dataclass(frozen=True)
59
+ class OutputAliasInfo:
60
+ # Tells us if this output is:
61
+ # (1) a regular (non-aliased) output
62
+ # (2) an alias of a forward input
63
+ # (3) **is** a forward input (special case of "alias_of_input")
64
+ # (4) an alias of an intermediate (aka an alias of an output of the inner traced forward)
65
+ # (5) an alias of an intermediate, that explicitly requires returning the intermediate
66
+ # as a graph output
67
+ # (6) an alias of an intermediate, where that intermediate is also a user output
68
+ output_type: OutputType
69
+ # The raw type of the output (torch.Tensor, SymInt, etc)
70
+ raw_type: type
71
+ # If (1) above, then
72
+ # - base_idx is None
73
+ # If (2) or (3) above, then
74
+ # - Tells us that the base of this alias is user_fwd_input[base_idx]
75
+ # (This is an index into the inputs *before* we make synthetic bases)
76
+ # If (4) or (5) above, then
77
+ # - Tells us that the base of this alias is output_graph_intermediates[base_idx]
78
+ # here, this refers to the index of the *direct* traced
79
+ # If (6) above, then:
80
+ # - Tells us that the base of this alias is output_user_fwds[base_idx]
81
+ # here, this refers to the index of the *direct* traced
82
+ base_idx: Optional[int]
83
+ # If it is a Tensor, what the dynamic dims are (otherwise is None)
84
+ dynamic_dims: Optional[Set[int]]
85
+ # requires_grad
86
+ requires_grad: bool
87
+
88
+
89
+ class MutationType(Enum):
90
+ NOT_MUTATED = 1
91
+ MUTATED_IN_GRAPH = 2
92
+ MUTATED_OUT_GRAPH = 3
93
+
94
+
95
+ # This class tells us info about user inputs.
96
+ @dataclass(frozen=True)
97
+ class InputAliasInfo:
98
+ is_leaf: bool
99
+ mutates_data: bool
100
+ mutates_metadata: bool
101
+ mutations_hidden_from_autograd: bool
102
+ mutations_under_no_grad_or_inference_mode: bool
103
+ mutates_storage_metadata: bool
104
+ requires_grad: bool
105
+ keep_input_mutations: bool
106
+
107
+ def __post_init__(self):
108
+ if self.mutates_storage_metadata:
109
+ # For convenience, we guarantee that this is always true.
110
+ # In practice, If we call .set_(), then at runtime there is no need
111
+ # to additionally fix up the tensor metadata, since our runtime
112
+ # call to inp.set_(updated_inp) will already have the right metadata
113
+ assert self.mutates_metadata
114
+
115
+ @functools.cached_property
116
+ def mutation_type(self) -> MutationType:
117
+ if (not self.mutates_data) and (not self.mutates_metadata):
118
+ return MutationType.NOT_MUTATED
119
+
120
+ if _check_if_mutation_can_be_in_graph(
121
+ self.keep_input_mutations,
122
+ self.mutates_data,
123
+ self.mutates_metadata,
124
+ self.mutations_hidden_from_autograd,
125
+ self.mutations_under_no_grad_or_inference_mode,
126
+ self.requires_grad,
127
+ ):
128
+ return MutationType.MUTATED_IN_GRAPH
129
+
130
+ return MutationType.MUTATED_OUT_GRAPH
131
+
132
+
133
+ @dataclass
134
+ class SubclassCreationMeta:
135
+ """
136
+ Used for AOTDispatch.
137
+ This dataclass gives us the information we need to reconstruct a tensor subclass
138
+ from our flat inputs.
139
+ Why is this important? The graph that we'd like to trace out contains flat tensor inputs,
140
+ But the user's original model may have subclass inputs and outputs.
141
+ So we need to wrap/unwrap subclasses as necessary to translate between the user's
142
+ view (subclass inps/outs), and the backend compiler's view (graph with no subclass args).
143
+
144
+ Complications arise mostly from the fact that a subclass can hold more than one inner tensor;
145
+ So for a given subclass input/output, we need to carefully track which indices map
146
+ to the subclass tensor in the corresponding "dense-tensor-only" graph.
147
+ """
148
+
149
+ # In the inner graph that only takes in dense tensor inputs,
150
+ # this maps to the first index of "tensors that should go in this subclass wrapper"
151
+ flat_tensor_start_idx: int
152
+ # The number of tensors that live in this subclass wrapper
153
+ arg_count: int
154
+ # Stores the original subclass itself.
155
+ # This is needed because we need the autograd metadata on the original subclass
156
+ # (this is guaranteed to be a wrapper subclass that holds a fake tensor,
157
+ # so holding onto this at runtime shouldn't leak memory)
158
+ original_subclass: torch.Tensor
159
+ # meta and inner_keys are produced by the subclass's __tensor_flatten__.
160
+ # We need to keep them around along with outer_size / outer_stride to plumb them
161
+ # into __tensor_unflatten__.
162
+ meta: Any
163
+ inner_keys: List[Any]
164
+ outer_size: Tuple[int, ...]
165
+ outer_stride: Tuple[int, ...]
166
+
167
+ def creation_fn(self, all_args, *, is_runtime: bool):
168
+ curr_args = all_args[
169
+ self.flat_tensor_start_idx : self.flat_tensor_start_idx + self.arg_count
170
+ ]
171
+ assert len(curr_args) == len(
172
+ self.inner_keys
173
+ ), f"inner_keys: {str(self.inner_keys)}. len(curr_args): {len(curr_args)}"
174
+ # NB: Sometimes we have real inner tensors and symbolic metadata.
175
+ # TODO: Resolve this so we always have matching real / symbolic tensors / metadata.
176
+ out = type(self.original_subclass).__tensor_unflatten__( # type: ignore[attr-defined]
177
+ dict(zip(self.inner_keys, curr_args)),
178
+ self.meta,
179
+ self.outer_size,
180
+ self.outer_stride,
181
+ )
182
+ if not is_runtime:
183
+ # After wrapping up the inner dense tensors into a subclass, we need to make sure that our new wrapper
184
+ # has correct autograd metadata, since we'll be tracing through the autograd engine with the subclass.
185
+ # We don't trace through the autograd engine at runtime though, so no need
186
+ # to compute this extra metadata then!
187
+ torch._mirror_autograd_meta_to(self.original_subclass, out) # type: ignore[attr-defined]
188
+
189
+ return out
190
+
191
+ def __post_init__(self):
192
+ # sanity assert to make sure we don't leak memory
193
+ assert is_fake(self.original_subclass)
194
+
195
+
196
+ # This class encapsulates all aliasing + mutation info we need about the forward graph
197
+ # See a more detailed overview of the edge case handling at
198
+ # https://docs.google.com/document/d/19UoIh_SVrMy_b2Sx5ZaeOJttm6P0Qmyss2rdBuyfoic/edit
199
+ @dataclass(eq=False)
200
+ class ViewAndMutationMeta:
201
+ # length = # user inputs
202
+ # This gives us info about every input, and what sort of mutation happened to it (if any)
203
+ input_info: List[InputAliasInfo]
204
+
205
+ # length = # user outputs
206
+ # This gives us info about every output (mostly around whether it aliases other tensors)
207
+ output_info: List[OutputAliasInfo]
208
+
209
+ # length = the number of intermediate bases appended as outputs to the end of the forward graph.
210
+ # Note: this is not necessarily the same thing as:
211
+ # len([x for x in output_info if x.output_type == OutputType.alias_of_intermediate])
212
+ # Because outputs might share a ._base, or an output's ._base might itself be
213
+ # another user output (in both cases, we won't redundantly append bases to the end of the graph)
214
+ num_intermediate_bases: int
215
+
216
+ # For inference only: instructs us to keep data-only input mutations directly in the graph
217
+ keep_input_mutations: bool
218
+
219
+ # length = (# inputs w data mutations) + (# user outputs that are non_aliasing tensors)
220
+ # + (# intermediate bases)
221
+ # These are the FakeTensor (or potential SymInt) outputs that we traced from our
222
+ # metadata pass of the user's forward function.
223
+ # Their only use today is to pass them as a best-guess for tangents when tracing the joint.
224
+ # Stashing them as part of our "metadata" makes it simpler if we want to run our analysis
225
+ # pass once, and re-use the output throughout AOTAutograd
226
+ traced_tangents: List[Any]
227
+
228
+ # Each of these is a list telling us about subclasses for the inputs/outputs/grad_outs
229
+ # They are used throughout AOTDispatch to tell us how to generate a list of subclass tensors,
230
+ # Given a (potentially larger) list of plain torch tensors.
231
+
232
+ # Taking subclass_inp_meta as an example:
233
+ # subclass_inp_meta[i] = j (an int) tells us:
234
+ # "The i'th user input is not a subclass, and corresponds to inputs[j] of the plain-tensor graph."
235
+ # subclass_inp_meta[i] = SubclassCreationMeta(flat_tensor_start_idx=3, arg_count=2)
236
+ # "The i'th user input is subclass holding two inner tensors, which are
237
+ # inputs[3] and inputs[4] of the plain-tensor graph".
238
+
239
+ # length = # user inputs
240
+ subclass_inp_meta: List[Union[int, SubclassCreationMeta]]
241
+ # So, the full set of outputs to the forward graph looks something like:
242
+ # (*mutated_inps, *user_outs, *intermediate_bases, *saved_for_bw_tensors)
243
+ # where the first 3 of those 4 can be subclasses
244
+ # (but not saved_for_bw tensors, since these are internal to the compiler
245
+ # and not user visible, so there's no point in wrapping/unwrapping them at runtime).
246
+ # This list contains subclass information on all of the fw graph outputs
247
+ # except for saved_for_bw_tensors.
248
+ subclass_fw_graph_out_meta: List[Union[int, SubclassCreationMeta]]
249
+ # length = # backward graph inputs
250
+ subclass_tangent_meta: List[Union[int, SubclassCreationMeta]]
251
+ # TODO: we should kill this
252
+ # (need to default it to not break internal)
253
+ is_train: bool = False
254
+
255
+ num_symints_saved_for_bw: Optional[int] = None
256
+
257
+ # The grad_enabled mutation that will be emitted in the runtime_wrapper epilogue
258
+ # NOTE: AOTAutograd will assume that the ambient `is_grad_enabled` is the grad mode
259
+ # that is intended to be in effect prior to running the graph, in keeping with
260
+ # equivalence to eager mode. It is the responsibility of upstream graph acquisition
261
+ # to reset the grad mode to its pre-graph value prior to calling aot_autograd.
262
+ grad_enabled_mutation: Optional[bool] = None
263
+
264
+ # Keeps track of whether `torch.use_deterministic_algorithms` was turned on
265
+ # when the forward was run. If deterministic mode was turned off during the
266
+ # forward, but is turned on during the backward call, then an error is
267
+ # raised
268
+ deterministic: Optional[bool] = None
269
+
270
+ # Map of effect type (ex. _EffectType.ORDERED) to token. If there are
271
+ # side-effectful operators, FunctionalTensorMode will populate this
272
+ # dictionary telling us how many tokens we will need during tracing.
273
+ tokens: Dict[Any, torch.Tensor] = field(default_factory=dict)
274
+
275
+ def __post_init__(self):
276
+ # pre-compute the indices of the inputs that are mutated.
277
+ # When keep_input_mutations is set, we don't need to worry about our epilogue
278
+ # handling data-only mutations, because we keep them directly in the graph.
279
+
280
+ mutated_inp_runtime_indices = [
281
+ i
282
+ for i, m in enumerate(self.input_info)
283
+ if (m.mutation_type == MutationType.MUTATED_OUT_GRAPH)
284
+ ]
285
+
286
+ mutated_graph_handled_indices = [
287
+ i
288
+ for i, m in enumerate(self.input_info)
289
+ if m.mutation_type == MutationType.MUTATED_IN_GRAPH
290
+ ]
291
+ self.mutated_graph_handled_indices = mutated_graph_handled_indices
292
+ self.num_mutated_graph_handled_indices = len(self.mutated_graph_handled_indices)
293
+
294
+ mutated_graph_handled_indices_seen_by_autograd = [
295
+ i
296
+ for i in mutated_graph_handled_indices
297
+ if not self.input_info[i].mutations_hidden_from_autograd
298
+ ]
299
+
300
+ self.mutated_graph_handled_indices_seen_by_autograd = (
301
+ mutated_graph_handled_indices_seen_by_autograd
302
+ )
303
+ self.num_mutated_graph_handled_indices_seen_by_autograd = len(
304
+ self.mutated_graph_handled_indices_seen_by_autograd
305
+ )
306
+
307
+ aliased_out_indices = [
308
+ i
309
+ for i, m in enumerate(self.output_info)
310
+ if m.output_type
311
+ not in [
312
+ OutputType.non_alias,
313
+ OutputType.unsafe_view_alias,
314
+ OutputType.custom_function_view,
315
+ ]
316
+ ]
317
+ unsafe_view_out_indices = [
318
+ i
319
+ for i, m in enumerate(self.output_info)
320
+ if m.output_type is OutputType.unsafe_view_alias
321
+ ]
322
+
323
+ # This is pre-computed in post_init for perf.
324
+ # It contains the index of every element
325
+ # of input_info that corresponds to a mutation (data or metadata or both)
326
+ self.mutated_inp_runtime_indices = mutated_inp_runtime_indices
327
+ self.num_mutated_inp_runtime_indices = len(self.mutated_inp_runtime_indices)
328
+
329
+ # This is pre-computed for perf.
330
+ # It contains the index of every element
331
+ # of output_info that corresponds to an alias (either of an input or intermediate)
332
+ self.aliased_out_indices = aliased_out_indices
333
+ self.unsafe_view_out_indices = unsafe_view_out_indices
334
+ self.num_outputs = len(self.output_info)
335
+ self.num_outputs_non_aliased = len(
336
+ [
337
+ x
338
+ for x in self.output_info
339
+ if x.output_type
340
+ in [
341
+ OutputType.non_alias,
342
+ OutputType.unsafe_view_alias,
343
+ OutputType.custom_function_view,
344
+ ]
345
+ ]
346
+ )
347
+ self.num_outputs_aliased_to_inputs = len(
348
+ [
349
+ x
350
+ for x in self.output_info
351
+ if x.output_type
352
+ in [
353
+ OutputType.alias_of_input,
354
+ OutputType.is_input,
355
+ ]
356
+ ]
357
+ )
358
+ self.num_unsafe_view_outputs = len(self.unsafe_view_out_indices)
359
+ self.num_outputs_aliased_to_intermediates = len(
360
+ [
361
+ x
362
+ for x in self.output_info
363
+ if x.output_type
364
+ in [
365
+ OutputType.alias_of_intermediate,
366
+ OutputType.alias_of_intermediate_save_as_output,
367
+ OutputType.alias_of_intermediate_base_is_user_output,
368
+ ]
369
+ ]
370
+ )
371
+ self.num_outputs_aliased = (
372
+ self.num_outputs_aliased_to_inputs
373
+ + self.num_outputs_aliased_to_intermediates
374
+ )
375
+
376
+ self.dynamic_outputs = any(o.dynamic_dims for o in self.output_info)
377
+ # See Note: [AOTAutograd Backward Guards]
378
+ # This is pre-computed for fast asserts on the types of our grad_outputs in the backward.
379
+ # Eventually, we should kill this and replace with real backward guards.
380
+ # (we want to precompute the "runtime" types, so replace FakeTensor with torch.Tensor)
381
+ self.output_types = [
382
+ torch.Tensor if isinstance(x, FakeTensor) else type(x)
383
+ for x in self.traced_tangents
384
+ ]
385
+
386
+ self.is_rng_op_functionalized = config.functionalize_rng_ops
387
+ # All of the above metadata is collected by tracing the fw function.
388
+ # However, extra outputs for rng offsets behave differently. Both fwd
389
+ # and bwd graphs have their own outputs for the total consumed offsets.
390
+ # Unlike mutated inputs, we don't have to worry about sending the right
391
+ # set of tensors between fwd and bwd. Fwd and bwd offsets are
392
+ # independent and simpler to handle. Therefore, we track them
393
+ # separately.
394
+ self.num_outputs_rng_offset = 1 if self.is_rng_op_functionalized else 0
395
+
396
+ # Our forward() returns both (mutated_inputs, outputs, output_intermediate_bases, saved_tensors, saved_symints)
397
+ self.num_forward_returns = (
398
+ self.num_mutated_inp_runtime_indices
399
+ + self.num_outputs
400
+ + self.num_intermediate_bases
401
+ )
402
+ # In case of functionalization of rng ops, the fw_module returns one
403
+ # additional output for rng offset. This rng offset is used right
404
+ # away to advance the rng state, and is not passed on to the raw
405
+ # outputs. However, we need to know the exact boundary to identify
406
+ # which tensors to be saved for the bwd graph. num_forward captures
407
+ # this information.
408
+ self.num_forward = self.num_forward_returns + self.num_outputs_rng_offset
409
+
410
+ @property
411
+ def tensors_saved_for_backwards_slice(self):
412
+ assert self.num_symints_saved_for_bw is not None
413
+ if self.num_symints_saved_for_bw > 0:
414
+ return slice(self.num_forward, -self.num_symints_saved_for_bw)
415
+ else:
416
+ return slice(self.num_forward, None)
417
+
418
+ @property
419
+ def symints_saved_for_backwards_slice(self):
420
+ assert self.num_symints_saved_for_bw is not None
421
+ if self.num_symints_saved_for_bw > 0:
422
+ return slice(-self.num_symints_saved_for_bw, None)
423
+ else:
424
+ return slice(0, 0) # empty slice
425
+
426
+ def __eq__(self, other):
427
+ if not isinstance(other, ViewAndMutationMeta):
428
+ return NotImplemented
429
+ return (
430
+ self.input_info == other.input_info
431
+ and self.output_info == other.output_info
432
+ and self.num_intermediate_bases == other.num_intermediate_bases
433
+ and self.keep_input_mutations == other.keep_input_mutations
434
+ and self.is_rng_op_functionalized == other.is_rng_op_functionalized
435
+ and self.num_outputs_rng_offset == other.num_outputs_rng_offset
436
+ and len(self.traced_tangents) == len(other.traced_tangents)
437
+ and all(
438
+ x.shape == y.shape and x.dtype == y.dtype
439
+ for x, y, in zip(self.traced_tangents, other.traced_tangents)
440
+ )
441
+ )
442
+
443
+
444
+ @dataclass(eq=False)
445
+ class SubclassMeta:
446
+ # A copy of all forward metadata, but computed on the *dense* tensor forward (after desugaring subclasses)
447
+ # So for example, if the user had a model containing two `TwoTensor` inputs,
448
+ # Then `SubclassMeta.fw_metadata.input_infos` would have length 4 here.
449
+ fw_metadata: ViewAndMutationMeta
450
+
451
+ # Note: [Computing Subclass Metadata about grad_inputs]
452
+ # Given a list of flattened, plain tensor grad_inputs, this tells us how to reconstruct the grad_input subclasses
453
+ #
454
+ # You might think: why not just assume that all grad_inputs will have the same subclass-ness as the original inputs?
455
+ # (AOTAutograd generally assumes other properties, e.g. that grad_outputs are contiguous)
456
+ #
457
+ # This doesn't really work though. take this example:
458
+ #
459
+ # def f(DoubleTensor, DenseTensor):
460
+ # return DoubleTensor * DenseTensor
461
+ #
462
+ # In the above example, the .grad field of *both* DoubleTensor and DenseTensor will be a DoubleTensor.
463
+ # When we trace out a joint fw-bw graph, we'll end up returning two subclasses for the two grad_inputs.
464
+ # This means that our backward graph will return 4 outputs (two dense tensors for each DoubleTensor grad_input)
465
+ # and we need to properly store the metadata that tells us how to turn these 4 outputs back into DoubleTensors.
466
+ #
467
+ # Note that this info **cannot** easily be figured out from ViewAndMutationMeta.
468
+ # We can only compute this info by tracing the entire joint and examining the grad_inputs that we computed.
469
+ #
470
+ # See Note: [AOTAutograd Backward Guards]
471
+ # This will also eventually require us to install backward guards,
472
+ # in case we made incorrect assumptions about the subclass-ness of our grad_outputs
473
+ #
474
+ # Optional field because we don't compute for inference graphs
475
+ grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]]
476
+
477
+ def __init__(self):
478
+ # The fields in this class get set after its construction.
479
+ pass
480
+
481
+
482
+ # This class exists because:
483
+ # - the autograd.Function.forward() in aot autograd returns outputs that might alias inputs
484
+ # - we only care about the metadata on those aliases, so we can regenerate them.
485
+ # We do not want them to participate in the autograd.Function.
486
+ # We do that by wrapping them in an opaque class, so the autograd.Function
487
+ # does not know to treat them as tensors.
488
+ @dataclass(frozen=True)
489
+ class TensorAlias:
490
+ alias: torch.Tensor
491
+
492
+
493
+ @dataclass
494
+ class BackwardSignature:
495
+ """
496
+ Provides information about the backward section of an exported
497
+ joint forward-backward graph.
498
+ For a particular fx GraphModule, this class contains information on:
499
+ (1) A mapping from each gradient (backwards output) to the parameter
500
+ it corresponds to (forward input)
501
+ (2) A mapping from each gradient (backwards output) to the user input
502
+ it corresponds to (forward input)
503
+ (3) Which of the forward outputs corresponds to the loss, that we backprop on.
504
+
505
+ Each string name is the `node.name` of the corresponding node in the fx graph.
506
+ """
507
+
508
+ gradients_to_parameters: Dict[str, str]
509
+ gradients_to_user_inputs: Dict[str, str]
510
+ loss_output: str
511
+
512
+
513
+ GraphOutputName = NewType("GraphOutputName", str)
514
+ GraphInputName = NewType("GraphInputName", str)
515
+ FQN = NewType("FQN", str)
516
+
517
+
518
+ @dataclass
519
+ class GraphSignature:
520
+ """
521
+ Provides information about an exported module.
522
+ For a particular fx GraphModule, this class contains information on:
523
+ (1) Which graph inputs are parameters, buffers, or user inputs
524
+ (2) (for params/buffers) a mapping from the name of each graph argument
525
+ to its parameter/buffer FQN in the original nn.Module.
526
+ (3) If there are input mutations, these are represented as extra outputs
527
+ in the fx GraphModule. We provide a mapping from these
528
+ extra output names to the names of the actual inputs.
529
+ (4) The pytree metadata on how to flatten/unflatten inputs and outputs.
530
+ The corresponding FX GraphModule only accepts and returns
531
+ pytree-flattened inputs/outputs.
532
+ (5) (Optionally) if the FX is a joint forward-backward graph, we provide
533
+ a signature on the backward section of the joint graph.
534
+ """
535
+
536
+ parameters: List[FQN]
537
+ buffers: List[FQN]
538
+
539
+ user_inputs: List[GraphInputName]
540
+ user_outputs: List[GraphOutputName]
541
+ inputs_to_parameters: Dict[GraphInputName, FQN]
542
+ inputs_to_buffers: Dict[GraphInputName, FQN]
543
+
544
+ # If the user's module mutates a buffer,
545
+ # it's represented in the graph as an extra graph output.
546
+ # This dict is a mapping from
547
+ # "graph outputs that correspond to updated buffers"
548
+ # to the FQN names of those mutated buffers.
549
+ buffers_to_mutate: Dict[GraphOutputName, FQN]
550
+ user_inputs_to_mutate: Dict[GraphOutputName, GraphInputName]
551
+
552
+ in_spec: pytree.TreeSpec
553
+ out_spec: pytree.TreeSpec
554
+
555
+ backward_signature: Optional[BackwardSignature]
556
+
557
+ input_tokens: List[GraphInputName]
558
+ output_tokens: List[GraphOutputName]
559
+
560
+ @classmethod
561
+ def from_tracing_metadata(
562
+ cls,
563
+ *,
564
+ in_spec: pytree.TreeSpec,
565
+ out_spec: pytree.TreeSpec,
566
+ graph_input_names: List[str],
567
+ graph_output_names: List[str],
568
+ view_mutation_metadata: ViewAndMutationMeta,
569
+ named_parameters: List[str],
570
+ named_buffers: List[str],
571
+ num_user_inputs: int,
572
+ num_user_outputs: int,
573
+ loss_index: Optional[int],
574
+ backward_signature: Optional[BackwardSignature],
575
+ ) -> "GraphSignature":
576
+ graph_inputs = graph_input_names
577
+ graph_outputs = graph_output_names
578
+ parameters = list(named_parameters)
579
+ buffers = list(named_buffers)
580
+ num_tokens = len(view_mutation_metadata.tokens)
581
+
582
+ # Calling convention assumptions:
583
+ # (1) graph inputs = (input_tokens, params, buffers, user_inputs)
584
+ # (2) graph outputs = (output_tokens, mutated_inputs, user_outs, param_gradients)
585
+ # (If we are capturing an inference graph, this convention is identical
586
+ # except that param_gradients is empty)
587
+ # See Note [Side-Effectful Tokens in AOTAutograd] for information on tokens
588
+
589
+ # Address input calling conventions:
590
+ start, stop = 0, num_tokens
591
+ input_tokens = graph_inputs[start:stop]
592
+
593
+ start, stop = stop, stop + len(parameters)
594
+ inputs_to_parameters = dict(zip(graph_inputs[start:stop], parameters))
595
+
596
+ start, stop = stop, stop + len(buffers)
597
+ inputs_to_buffers = dict(
598
+ zip(
599
+ graph_inputs[start:stop],
600
+ buffers,
601
+ )
602
+ )
603
+
604
+ start, stop = stop, stop + num_user_inputs
605
+ user_inputs = graph_inputs[start:stop]
606
+
607
+ # We should've gone through all the inputs now
608
+ assert len(graph_inputs) - stop == 0
609
+
610
+ # Address output calling conventions:
611
+ start, stop = 0, num_tokens
612
+ output_tokens = graph_outputs[start:stop]
613
+
614
+ names = [*input_tokens, *parameters, *buffers, *user_inputs]
615
+ mutations = []
616
+ for idx, input_info in enumerate(view_mutation_metadata.input_info):
617
+ if input_info.mutates_data:
618
+ # Only buffers can be mutated, not parameters
619
+ assert idx >= len(parameters)
620
+ mutations.append(names[idx + num_tokens])
621
+
622
+ assert len(mutations) == view_mutation_metadata.num_mutated_inp_runtime_indices
623
+
624
+ start, stop = (
625
+ stop,
626
+ stop + view_mutation_metadata.num_mutated_inp_runtime_indices,
627
+ )
628
+ outputs_to_mutations = dict(zip(graph_outputs[start:stop], mutations))
629
+
630
+ user_inputs_to_mutate = {}
631
+ buffers_to_mutate = {}
632
+ for output_name, mutation_name in outputs_to_mutations.items():
633
+ if mutation_name in user_inputs:
634
+ user_inputs_to_mutate[output_name] = mutation_name
635
+ else:
636
+ assert mutation_name in buffers
637
+ buffers_to_mutate[output_name] = mutation_name
638
+
639
+ start, stop = stop, stop + num_user_outputs
640
+ user_outputs = graph_outputs[start:stop]
641
+
642
+ unused_outputs = len(graph_outputs) - stop
643
+ if backward_signature is not None:
644
+ unused_outputs -= len(backward_signature.gradients_to_parameters) + len(
645
+ backward_signature.gradients_to_user_inputs
646
+ )
647
+ assert unused_outputs == 0
648
+
649
+ return GraphSignature(
650
+ parameters=parameters, # type: ignore[arg-type]
651
+ buffers=buffers, # type: ignore[arg-type]
652
+ user_inputs=user_inputs, # type: ignore[arg-type]
653
+ user_outputs=user_outputs, # type: ignore[arg-type]
654
+ inputs_to_buffers=inputs_to_buffers, # type: ignore[arg-type]
655
+ inputs_to_parameters=inputs_to_parameters, # type: ignore[arg-type]
656
+ user_inputs_to_mutate=user_inputs_to_mutate,
657
+ buffers_to_mutate=buffers_to_mutate, # type: ignore[arg-type]
658
+ in_spec=in_spec,
659
+ out_spec=out_spec,
660
+ backward_signature=backward_signature,
661
+ input_tokens=input_tokens, # type: ignore[arg-type]
662
+ output_tokens=output_tokens, # type: ignore[arg-type]
663
+ )
664
+
665
+
666
+ @dataclass
667
+ class AOTConfig:
668
+ """
669
+ Configuration for AOTDispatcher
670
+ """
671
+
672
+ fw_compiler: Callable
673
+ bw_compiler: Callable
674
+ partition_fn: Callable
675
+ decompositions: Dict[Callable, Callable]
676
+ num_params_buffers: int
677
+ aot_id: int
678
+ keep_inference_input_mutations: bool
679
+ is_export: bool = False
680
+ no_tangents: bool = False
681
+ dynamic_shapes: bool = False
682
+ aot_autograd_arg_pos_to_source: Optional[List[Source]] = None
683
+ inference_compiler: Optional[Callable] = None
684
+ enable_log: bool = True
685
+ # this is always false outside of export.
686
+ pre_dispatch: bool = False
687
+
688
+ def __post_init__(self):
689
+ if self.pre_dispatch:
690
+ assert self.is_export, "Can only have pre_dispatch IR for export."
691
+
692
+
693
+ SubclassTracingInfo = collections.namedtuple(
694
+ "SubclassTracingInfo",
695
+ ["plain_tensor_trace_fn", "plain_tensor_args", "maybe_subclass_meta"],
696
+ )
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities for tracing through __torch_dispatch__ based tensor subclasses and modes.
3
+ AOTAutograd's responsibility is to trace through all pytorch capabilities that live in the pytorch dispatcher,
4
+ and this includes tensor subclasses that implement __torch_dispatch__.
5
+ """
6
+
7
+ from typing import Any, List, Optional, Tuple, Union
8
+
9
+ import torch.utils._pytree as pytree
10
+
11
+ from torch import Tensor
12
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
13
+
14
+ from .schemas import MutationType, SubclassCreationMeta, ViewAndMutationMeta
15
+ from .utils import strict_zip
16
+
17
+ zip = strict_zip
18
+
19
+
20
+ def requires_subclass_dispatch(args, fw_metadata: ViewAndMutationMeta) -> bool:
21
+ args_flattened = pytree.arg_tree_leaves(*args)
22
+ any_subclass_args = any(
23
+ is_traceable_wrapper_subclass(x)
24
+ for x in args_flattened
25
+ if isinstance(x, Tensor)
26
+ )
27
+ from torch._functorch._aot_autograd.schemas import SubclassCreationMeta
28
+
29
+ any_subclass_outputs = any(
30
+ type(x) is SubclassCreationMeta for x in fw_metadata.subclass_fw_graph_out_meta
31
+ )
32
+ # This tells us whether or not we need to perform any unwrapping/wrapping of tensor subclasses at runtime.
33
+ return any_subclass_args or any_subclass_outputs
34
+
35
+
36
+ # Given a flat list of arguments, some of which may be tensor subclasses,
37
+ # computes metadata about "how to reconstruct the current list of subclasses,
38
+ # if we were given their flattened dense tensors instead"
39
+ def create_subclass_meta(
40
+ curr_args: Union[List[Any], Tuple[Any, ...]],
41
+ ) -> List[Union[int, SubclassCreationMeta]]:
42
+ idx = 0
43
+ infos: List[Union[int, SubclassCreationMeta]] = []
44
+ for a in curr_args:
45
+ if isinstance(a, Tensor) and is_traceable_wrapper_subclass(a):
46
+ attrs, meta = a.__tensor_flatten__() # type: ignore[attr-defined]
47
+ start_idx = idx
48
+ cnt = len(attrs)
49
+ curr_cnt = cnt
50
+ infos.append(
51
+ SubclassCreationMeta(
52
+ flat_tensor_start_idx=start_idx,
53
+ arg_count=curr_cnt,
54
+ original_subclass=a,
55
+ meta=meta,
56
+ inner_keys=attrs,
57
+ outer_size=a.shape,
58
+ outer_stride=a.stride(),
59
+ )
60
+ )
61
+ else:
62
+ infos.append(idx)
63
+ cnt = 1
64
+ idx += cnt
65
+ return infos
66
+
67
+
68
+ # Output structure:
69
+ # - List[Tensor] if tracing an inference graph
70
+ # - Tuple[List[Tensor], List[Tensor]] if tracing a joint graph.
71
+ # This function effectively concats each inner list of subclass tensors
72
+ # into a (potentially longer) list of inner tensors.
73
+ #
74
+ # This function takes in a pytree of arguments and unwraps any tensor subclasses.
75
+ # Annoyingly, we can't use pytrees to perform the unwrapping, because unwrapping returns
76
+ # a list of tensors that we would then need to concat together.
77
+ # Instead, we specialize the logic for the inference vs. joint graph case.
78
+ # NOTE: this function is hot, since we unwrap tensor subclass inputs at runtime
79
+ def unwrap_tensor_subclasses(wrapped_args, *, is_joint_structure: bool):
80
+ def concat_inner_tensors_from_subclasses(xs):
81
+ xs_inner = []
82
+ for x in xs:
83
+ if isinstance(x, Tensor) and is_traceable_wrapper_subclass(x):
84
+ attrs, _ = x.__tensor_flatten__() # type: ignore[attr-defined]
85
+ xs_inner += [getattr(x, attr) for attr in attrs]
86
+ else:
87
+ xs_inner += [x]
88
+ return xs_inner
89
+
90
+ if is_joint_structure:
91
+ assert isinstance(wrapped_args, tuple) and len(wrapped_args) == 2
92
+ assert isinstance(wrapped_args[0], (tuple, list)) and isinstance(
93
+ wrapped_args[1], (tuple, list)
94
+ )
95
+ unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args[0])
96
+ unwrapped_args_tangents = concat_inner_tensors_from_subclasses(wrapped_args[1])
97
+ unwrapped_args = (unwrapped_args_fw, unwrapped_args_tangents)
98
+ else:
99
+ assert isinstance(wrapped_args, (list, tuple))
100
+ unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args)
101
+ unwrapped_args = unwrapped_args_fw
102
+ return unwrapped_args
103
+
104
+
105
+ # Turns a flattened list of tensor arguments into (maybe) subclass tensors.
106
+ # This function is used both at trace time and runtime, so we have an is_runtime flag telling us which context we're in.
107
+ def wrap_tensor_subclasses(
108
+ unwrapped_args: Union[Tuple[Any, ...], List[Any]],
109
+ *,
110
+ subclass_metas: List[Union[int, SubclassCreationMeta]],
111
+ num_fw_outs_saved_for_bw: Optional[int] = None,
112
+ is_runtime: bool = False,
113
+ ) -> Tuple[Any, ...]:
114
+ wrapped_args = []
115
+ num_args_tallied = 0
116
+ for subclass_meta in subclass_metas:
117
+ if isinstance(subclass_meta, int):
118
+ wrapped_args.append(unwrapped_args[subclass_meta])
119
+ num_args_tallied += 1
120
+ else:
121
+ assert isinstance(subclass_meta, SubclassCreationMeta)
122
+ wrapped_args.append(
123
+ subclass_meta.creation_fn(unwrapped_args, is_runtime=is_runtime)
124
+ )
125
+ num_args_tallied += subclass_meta.arg_count
126
+
127
+ # Note: [Partitioner handling for Subclasses, Part 2]
128
+ # At the beginning of AOTAutograd, we collect metadata on the inputs and outputs of the user fw,
129
+ # to figure out which inputs/outputs are subclasses, and how to reconstruct the subclasses after flattening them.
130
+ #
131
+ # When this function is called at runtime in the forward,
132
+ # we have been passed a list of (flattened) dense-tensor fw-outs, and need to reconstruct any subclass fw outs.
133
+ #
134
+ # One reasonable question that you should ask: when should the dense_tensor -> subclass_tensor wrapping happen?
135
+ # Answer: we do it **inside of our compiled autograd.Function**.
136
+ # This seems like morally the right place: autograd happens above subclass desugaring,
137
+ # so autograd should see actual tensor subclasses at runtime, and not flattened dense tensors.
138
+ #
139
+ # This causes a tricky interaction though: when we run the min-cut partitioner to divvy up the joint graph
140
+ # into a forward and backward graph, we end up with some activations that show up as extra outputs
141
+ # in the compiled forward graph, that are **not** user outputs.
142
+ # These activations are not visible to the user, and so there's no need for us to wrap them back into subclasses.
143
+ #
144
+ # On top of that, when we first computed subclass metadata (in `run_functionalized_fw_and_collect_metadata`),
145
+ # we computed subclass metadata on every forward output, but this did **not** include activations
146
+ # created by the partitioner.
147
+ # as a result, `unwrapped_args` here will correspond to (*unwrapped_user_fw_outs, *activations),
148
+ # but `subclass_metas` will only correspond to subclass metatadata on `user_fw_outs`.
149
+ # We then need to make sure that we return (*wrapped_user_fw_outs, *activations).
150
+ if num_fw_outs_saved_for_bw is not None:
151
+ assert len(unwrapped_args) == num_args_tallied + num_fw_outs_saved_for_bw, (
152
+ f"Expected the number actual unwrapped-subclass outputs {len(unwrapped_args)} to equal "
153
+ f"the number of args calculated from subclasses ({num_args_tallied}) plus the number of "
154
+ f"additional activations saved for the backward pass ({num_fw_outs_saved_for_bw})"
155
+ )
156
+ activations = unwrapped_args[num_args_tallied:]
157
+ if isinstance(wrapped_args, tuple) and isinstance(activations, tuple):
158
+ return wrapped_args + activations
159
+ return tuple(list(wrapped_args) + list(activations))
160
+ else:
161
+ assert len(unwrapped_args) == num_args_tallied
162
+ return tuple(wrapped_args)
163
+
164
+
165
+ # Given a bunch of "dense" tensor arguments, this function (potentially) wraps them into tensor subclasses.
166
+ # This function carefully handles the inference vs. joint cases:
167
+ # - when is_joint_structure is True, args is (primals, tangents)
168
+ # - when is_joint_structure is False, args is [*primals]
169
+ def wrap_tensor_subclasses_maybe_joint(
170
+ unwrapped_args, *, is_joint_structure: bool, meta: ViewAndMutationMeta
171
+ ) -> Union[Tuple[Any, ...], List[Any]]:
172
+ # Since this function is re-used for both inference and joint graphs,
173
+ if is_joint_structure:
174
+ assert isinstance(unwrapped_args, tuple) and len(unwrapped_args) == 2
175
+ assert isinstance(unwrapped_args[0], (tuple, list)) and isinstance(
176
+ unwrapped_args[1], (tuple, list)
177
+ )
178
+ primals, tangents = unwrapped_args[0], unwrapped_args[1]
179
+ wrapped_primals = wrap_tensor_subclasses(
180
+ primals, subclass_metas=meta.subclass_inp_meta
181
+ )
182
+ wrapped_tangents = wrap_tensor_subclasses(
183
+ tangents, subclass_metas=meta.subclass_tangent_meta
184
+ )
185
+ return (wrapped_primals, wrapped_tangents)
186
+ else:
187
+ wrapped_args = wrap_tensor_subclasses(
188
+ unwrapped_args, subclass_metas=meta.subclass_inp_meta
189
+ )
190
+ return wrapped_args
191
+
192
+
193
+ # TODO: UNUSED. delete?
194
+ def create_metadata_for_subclass(meta: ViewAndMutationMeta) -> ViewAndMutationMeta:
195
+ # input infos
196
+ input_info = []
197
+ for inp, subclass_meta in zip(meta.input_info, meta.subclass_inp_meta):
198
+ num_inps = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count
199
+ for _ in range(num_inps):
200
+ input_info.append(inp)
201
+
202
+ # output infos
203
+ output_info = []
204
+ subclass_out_meta_user_outs_only = meta.subclass_fw_graph_out_meta[
205
+ meta.num_mutated_inp_runtime_indices :
206
+ ]
207
+ if meta.num_intermediate_bases > 0:
208
+ subclass_out_meta_user_outs_only = subclass_out_meta_user_outs_only[
209
+ : -meta.num_intermediate_bases
210
+ ]
211
+ # sanity assert
212
+ assert len(meta.output_info) == len(subclass_out_meta_user_outs_only)
213
+ # Assume that the information on the output is shared by all of its inner tensors.
214
+ for out, subclass_meta in zip(meta.output_info, subclass_out_meta_user_outs_only):
215
+ num_outs = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count
216
+ for _ in range(num_outs):
217
+ output_info.append(out)
218
+
219
+ # A bit hacky, but we don't actually care about all of the metadata here.
220
+ # This metadata is used **underneath** both autograd and subclass de-sugaring,
221
+ # So all we really care about is stuff like:
222
+ # - num inputs/outputs (needed by the partitioner)
223
+ # - input mutations (**not** used today, since we don't handle input mutations inside the subclass,
224
+ # although we should handle this eventually)
225
+ # TODO: add a test case to assert we error when this happens, instead of getting silent correctness
226
+ num_intermediate_bases = None
227
+ keep_input_mutations = meta.keep_input_mutations
228
+ traced_tangents = None
229
+ subclass_inp_meta = None
230
+ subclass_fw_graph_out_meta = None
231
+ subclass_tangent_meta = None
232
+
233
+ metadata = ViewAndMutationMeta(
234
+ input_info=input_info, # type: ignore[arg-type]
235
+ output_info=output_info, # type: ignore[arg-type]
236
+ num_intermediate_bases=num_intermediate_bases, # type: ignore[arg-type]
237
+ keep_input_mutations=keep_input_mutations, # type: ignore[arg-type]
238
+ traced_tangents=traced_tangents, # type: ignore[arg-type]
239
+ subclass_inp_meta=subclass_inp_meta, # type: ignore[arg-type]
240
+ subclass_fw_graph_out_meta=subclass_fw_graph_out_meta, # type: ignore[arg-type]
241
+ subclass_tangent_meta=subclass_tangent_meta, # type: ignore[arg-type]
242
+ )
243
+ return metadata
244
+
245
+
246
+ def compute_inner_mutated_inp_indices_from_subclass_meta(
247
+ fw_metadata: ViewAndMutationMeta,
248
+ inner_metadata: ViewAndMutationMeta,
249
+ ) -> List[int]:
250
+ # Note: [Recomputing subclass mutation handling]
251
+ #
252
+ # Generally, if a subclass requires grad, its components will not require grad.
253
+ # But for the purposes of tracking returned tensors, we should treat those component
254
+ # tensors as if they require grad.
255
+ #
256
+ # For example, if the subclass tensor requires grad and will be mutated in a way that
257
+ # requires us to handle the mutation outside of the graph, we need to return it
258
+ # from the forward graph. The inner_meta data won't consider the component tensors
259
+ # as if they need to be returned, because they don't require grad; but really, we
260
+ # should handle those tensors the same way we handle the subclass tensor itself; i.e.
261
+ # if we'd include the subclass tensor as part of the outputs, then we should also
262
+ # include the component tensors.
263
+ #
264
+ # To do this, we patch num_mutated_inp_runtime_indices below by expanding the inputs
265
+ # from the outer subclass tensors and propagating
266
+
267
+ updated_input_info = []
268
+ inner_idx = 0
269
+ if not fw_metadata.subclass_inp_meta:
270
+ # Sometimes we don't have subclass info, e.g. synthetic_base codepaths
271
+ return inner_metadata.mutated_inp_runtime_indices
272
+ assert len(fw_metadata.subclass_inp_meta) == len(fw_metadata.input_info)
273
+ for outer_idx, inp_meta in enumerate(fw_metadata.subclass_inp_meta):
274
+ if isinstance(inp_meta, int):
275
+ assert outer_idx < len(fw_metadata.input_info)
276
+ if inner_metadata is not None:
277
+ assert inner_idx < len(inner_metadata.input_info)
278
+ assert (
279
+ inner_metadata.input_info[inner_idx]
280
+ == fw_metadata.input_info[outer_idx]
281
+ )
282
+ updated_input_info.append(fw_metadata.input_info[outer_idx])
283
+ inner_idx += 1
284
+ else:
285
+ for _ in range(inp_meta.arg_count):
286
+ updated_input_info.append(fw_metadata.input_info[outer_idx])
287
+ inner_idx += 1
288
+ if inner_metadata is not None:
289
+ assert len(inner_metadata.input_info) == len(updated_input_info)
290
+
291
+ return [
292
+ i
293
+ for i, inp in enumerate(updated_input_info)
294
+ if inp.mutation_type == MutationType.MUTATED_OUT_GRAPH
295
+ ]
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is responsible for transforming functions to be traced into a form
3
+ that is easier for the downstream infra (e.g. Autograd, FX, AOTAutograd analysis)
4
+ to handle.
5
+
6
+ It does so by:
7
+ 1. functionalization (including RNG functionalzation)
8
+ 2. creating a joint graph when required
9
+ 3. transforming mutations into extra outputs
10
+ 4. dispatching subclasses
11
+ """
12
+
13
+ import warnings
14
+ from contextlib import nullcontext
15
+ from functools import wraps
16
+ from typing import Any, Callable, List, Tuple, Union
17
+ from unittest.mock import patch
18
+
19
+ import torch
20
+ import torch.fx.traceback as fx_traceback
21
+ import torch.utils._pytree as pytree
22
+ from torch import Tensor
23
+ from torch._decomp.decompositions_for_rng import PhiloxStateTracker
24
+ from torch._guards import detect_fake_mode
25
+ from torch._prims_common import CUDARngStateHelper
26
+ from torch.fx.experimental.symbolic_shapes import definitely_false, sym_eq
27
+ from torch.nn.utils import stateless
28
+
29
+ from .. import config
30
+ from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata
31
+ from .functional_utils import (
32
+ from_fun,
33
+ has_data_mutation,
34
+ has_metadata_mutation,
35
+ is_fun,
36
+ sync_functional_tensor,
37
+ to_fun,
38
+ )
39
+ from .logging_utils import setup_stacktrace_preservation_hooks
40
+ from .schemas import (
41
+ AOTConfig,
42
+ MutationType,
43
+ OutputType,
44
+ SubclassMeta,
45
+ SubclassTracingInfo,
46
+ ViewAndMutationMeta,
47
+ )
48
+ from .subclass_utils import (
49
+ create_subclass_meta,
50
+ requires_subclass_dispatch,
51
+ unwrap_tensor_subclasses,
52
+ wrap_tensor_subclasses_maybe_joint,
53
+ )
54
+ from .utils import maybe_to_fresh_input
55
+
56
+
57
+ # This function returns a new function that returns mutated inputs as outputs.
58
+ # if keep_data_input_mutations is set, then we assume that data-only mutations
59
+ # will be left in the graph, and we only return metadata-mutated inputs as outputs.
60
+ def fn_input_mutations_to_outputs(
61
+ fn: Callable,
62
+ meta: ViewAndMutationMeta,
63
+ keep_data_input_mutations: bool,
64
+ ) -> Any:
65
+ @wraps(fn)
66
+ def inner_fn(*args):
67
+ outs = fn(*args)
68
+ assert len(meta.output_info) == len(outs)
69
+ # The compiled fw will return mutated input tensors, *including* metadata-only mutation.
70
+ # However, if keep_data_input_mutations is set, the compiled fw only needs to return metadata-mutated inputs.
71
+ # (because data-only input mutations are handled directly in the compiled graph)
72
+ mutated_inputs_to_return = [
73
+ x for (i, x) in enumerate(args) if i in meta.mutated_inp_runtime_indices
74
+ ]
75
+ return *mutated_inputs_to_return, *outs
76
+
77
+ return inner_fn
78
+
79
+
80
+ # This function takes in a fn with external aliasing and mutation,
81
+ # and returns a new fn with no external aliasing and mutation,
82
+ # as needed for autograd.
83
+ # The main transformations are:
84
+ # - Return mutated inputs as extra outputs
85
+ # - Clone mutated inputs that require gradients,
86
+ # because autograd will require us to pass the pre-mutated inputs into autograd.grad
87
+ # - Return intermediate bases of outputs as additional outputs,
88
+ # needed to appease autograd.Function
89
+ # The new function returns:
90
+ # (1) The updated outputs
91
+ # (2) A boolean mask of len(new_fn_outputs),
92
+ # that can be used to tell autograd.grad which outputs should get tangents
93
+ # if we trace the backward.
94
+ def fn_prepped_for_autograd(
95
+ fn: Callable,
96
+ meta: ViewAndMutationMeta,
97
+ ) -> Any:
98
+ @wraps(fn)
99
+ def inner_fn(*args):
100
+ args_maybe_cloned = [
101
+ maybe_to_fresh_input(i, t, meta) for i, t in enumerate(args)
102
+ ]
103
+
104
+ outs = fn(*args_maybe_cloned)
105
+ assert isinstance(outs, (tuple, list))
106
+ outs = list(outs)
107
+ assert len(meta.output_info) == len(outs)
108
+
109
+ mutated_inputs_to_return = [
110
+ x
111
+ for (i, x) in enumerate(args_maybe_cloned)
112
+ if i in meta.mutated_inp_runtime_indices
113
+ ]
114
+
115
+ intermediate_bases = []
116
+ for i, (o, info) in enumerate(zip(outs, meta.output_info)):
117
+ if info.output_type == OutputType.alias_of_intermediate_save_as_output:
118
+ intermediate_bases.append(o._base)
119
+
120
+ assert meta.num_intermediate_bases == len(intermediate_bases)
121
+
122
+ # the compiled forward should return (mutated_inputs, user_outs, intermediate_bases)
123
+ fw_outs_to_return = *mutated_inputs_to_return, *outs, *intermediate_bases
124
+
125
+ # Also return a boolean mask specifying which outputs to this function will be used as tangents
126
+ mutated_inputs_grad_mask = [
127
+ meta.input_info[meta.mutated_inp_runtime_indices[i]].mutates_data
128
+ and meta.input_info[meta.mutated_inp_runtime_indices[i]].requires_grad
129
+ for (i, x) in enumerate(mutated_inputs_to_return)
130
+ ]
131
+
132
+ # Pass any (non-aliased) outputs in as tangents, since they'll be returned as outputs in the fw
133
+ # For outputs that are aliases of intermediates, we will have returned the output's _base as an output in the graph instead,
134
+ # which we *should* send to grad()
135
+ output_grad_mask = [
136
+ meta.output_info[i].output_type
137
+ in [
138
+ OutputType.non_alias,
139
+ OutputType.unsafe_view_alias,
140
+ OutputType.custom_function_view,
141
+ ]
142
+ # Also, only tensor outputs should participate in the backward
143
+ # (in particular, Symint outputs in the forward graph shouldn't get tangents)
144
+ and issubclass(meta.output_info[i].raw_type, Tensor)
145
+ and meta.output_info[i].requires_grad
146
+ for (i, x) in enumerate(outs)
147
+ ]
148
+
149
+ intermediate_base_grad_mask = [True for _ in range(len(intermediate_bases))]
150
+
151
+ out_grad_mask = (
152
+ mutated_inputs_grad_mask + output_grad_mask + intermediate_base_grad_mask
153
+ )
154
+ assert len(out_grad_mask) == len(fw_outs_to_return)
155
+
156
+ # Take care to grab and sync the updated inputs from primals_after_cloning (the inputs we actually mutate!)
157
+ # and not primals (the preserved inputs, pre-mutation, that we pass to grad())
158
+ # This is annoying: our joint function needs to be aware of functionalization
159
+ # (syncing mutated inputs before calling autograd.grad())
160
+ # In theory, we could make the autograd engine do this automatically, although that probably isn't any cleaner.
161
+ for arg in args_maybe_cloned:
162
+ if not isinstance(arg, Tensor):
163
+ continue
164
+ sync_functional_tensor(arg)
165
+
166
+ return fw_outs_to_return, out_grad_mask
167
+
168
+ return inner_fn
169
+
170
+
171
+ # Given a fn, computes the joint.
172
+ # NOTE: fn is expects the following behavior:
173
+ # (1) fn() needs to return a tuple of (outs, mask),
174
+ # where `mask` tells us which outputs are meant to have tangents.
175
+ # we don't know this info automatically, because we don't actually want to blindly
176
+ # compute tangents for every output that requires grad.
177
+ # Specifically, outputs that alias inputs won't participate in the backward and get tangents.
178
+ # (2) fn() cannot mutate any inputs that require gradient.
179
+ # otherwise, when we compute autograd.grad(), we will not take those input mutations into account
180
+ # (the way this is handled is that we ensure any inputs that normally get mutated are cloned first)
181
+ def create_joint(fn: Callable, *, aot_config: AOTConfig) -> Any:
182
+ def inner_fn(primals: List[Any], tangents: List[Any]):
183
+ outs, tangent_mask = fn(*primals)
184
+ assert len(tangent_mask) == len(outs)
185
+ outs_to_grad = [
186
+ o for needs_tangent, o in zip(tangent_mask, outs) if needs_tangent
187
+ ]
188
+ assert len(outs_to_grad) == len(tangents)
189
+
190
+ # Get the inputs that need gradients
191
+ grad_primals = []
192
+ inputs_needs_grads = []
193
+ # Note that we're not using primals here,
194
+ # being carefully not to pass any mutated inputs into autograd.grad()
195
+ for p in primals:
196
+ is_grad_tensor = isinstance(p, Tensor) and p.requires_grad
197
+ inputs_needs_grads.append(is_grad_tensor)
198
+ if is_grad_tensor:
199
+ grad_primals.append(p)
200
+
201
+ # Get the outputs that need gradients
202
+ needed_outs = []
203
+ needed_tangents = []
204
+ for out, tangent in zip(outs_to_grad, tangents):
205
+ if isinstance(out, Tensor) and out.requires_grad:
206
+ # A bit sketchy, but fixes e.g. test_aot_autograd_exhaustive_matmul_cpu_float32
207
+ # The issue is that we are sensitive to decomps that don't accurately maintain
208
+ # their output's _base.shape compared to eager mode, and this helps mitigate a bit.
209
+ # The not definitely_false is also sketchy; if unbacked
210
+ # symints are involved, we're just going to assume that the
211
+ # decomps setup the base shape correctly
212
+ needed_outs.append(
213
+ out
214
+ if not definitely_false(sym_eq(out.shape, tangent.shape))
215
+ else out.view(tangent.shape)
216
+ )
217
+ needed_tangents.append(tangent)
218
+
219
+ setup_stacktrace_preservation_hooks([out.grad_fn for out in needed_outs])
220
+
221
+ if config.functionalize_rng_ops:
222
+ PhiloxStateTracker.mark_beginning_of_backward()
223
+ backward_out: Tuple[Tensor, ...] = tuple()
224
+ # Call the backwards pass
225
+ if grad_primals:
226
+ with fx_traceback.preserve_node_meta():
227
+ # for full graph export, we always export a joint graph where we assume no tangents are needed.
228
+ if aot_config.no_tangents:
229
+ assert len(needed_tangents) == 1 and needed_tangents[0].numel() == 1
230
+ backward_out = torch.autograd.grad(
231
+ needed_outs,
232
+ grad_primals,
233
+ allow_unused=True,
234
+ )
235
+ else:
236
+ backward_out = torch.autograd.grad(
237
+ needed_outs,
238
+ grad_primals,
239
+ grad_outputs=needed_tangents,
240
+ allow_unused=True,
241
+ )
242
+ backward_out_iter = iter(backward_out)
243
+ return outs, [
244
+ next(backward_out_iter) if i else None for i in inputs_needs_grads
245
+ ]
246
+
247
+ def inner_fn_with_anomaly(*args):
248
+ with fx_traceback.preserve_node_meta(), warnings.catch_warnings():
249
+ warnings.filterwarnings("ignore", "Anomaly Detection has been enabled.")
250
+ with torch.autograd.detect_anomaly(check_nan=False):
251
+ return inner_fn(*args)
252
+
253
+ return inner_fn_with_anomaly
254
+
255
+
256
+ def create_functionalized_rng_ops_wrapper(func, args, trace_joint=True) -> Any:
257
+ # Functionalization of rng ops changes the calling convention of the joint graph.
258
+ # It goes from (primals, tangents) to (seed, offset, primals, tangents)
259
+ # At runtime, we pass on the current seed and offset. This is hidden from
260
+ # the user.
261
+ fake_mode = detect_fake_mode()
262
+ if fake_mode is None:
263
+ fake_mode = nullcontext()
264
+
265
+ def override_get_rng_state(device: Union[int, str, torch.device] = "cuda"):
266
+ out = PhiloxStateTracker.get_state_as_tensor()
267
+ return out
268
+
269
+ def override_set_rng_state(x, device: Union[int, str, torch.device] = "cuda"):
270
+ PhiloxStateTracker.set_state_from_tensor(x)
271
+
272
+ def append_rng_offsets(args):
273
+ if trace_joint:
274
+ # args signature before: Tuple(fwd_outputs), Tuple(bwd_outputs)
275
+ # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset), Tuple(bwd_offset, new_bwd_rng_offset)
276
+ return (
277
+ (*args[0], PhiloxStateTracker.get_updated_fwd_offset()),
278
+ (*args[1], PhiloxStateTracker.get_updated_bwd_offset()),
279
+ )
280
+ else:
281
+ # args signature before: Tuple(fwd_outputs)
282
+ # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset)
283
+ return (*args, PhiloxStateTracker.get_updated_fwd_offset())
284
+
285
+ def traced_joint(
286
+ primals, tangents, fwd_seed, fwd_base_offset, bwd_seed, bwd_base_offset
287
+ ):
288
+ with patch("torch.cuda.get_rng_state", override_get_rng_state), patch(
289
+ "torch.cuda.set_rng_state", override_set_rng_state
290
+ ):
291
+ return append_rng_offsets(func(primals, tangents))
292
+
293
+ def traced_forward(*primals_fwd_seed_fwd_base_offset):
294
+ # The signature is (*primals, seed, offset)
295
+ with patch("torch.cuda.get_rng_state", override_get_rng_state), patch(
296
+ "torch.cuda.set_rng_state", override_set_rng_state
297
+ ):
298
+ return append_rng_offsets(func(*primals_fwd_seed_fwd_base_offset[:-2]))
299
+
300
+ if trace_joint:
301
+ # Get the current seed and offset to setup tracing.
302
+ fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
303
+ fake_mode
304
+ )
305
+ bwd_seed, bwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
306
+ fake_mode
307
+ )
308
+ PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward")
309
+ PhiloxStateTracker.record_state(bwd_seed, bwd_base_offset, "backward")
310
+ return traced_joint, (
311
+ *args,
312
+ fwd_seed,
313
+ fwd_base_offset,
314
+ bwd_seed,
315
+ bwd_base_offset,
316
+ )
317
+ else:
318
+ # Get the current seed and offset to setup tracing.
319
+ fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple(
320
+ fake_mode
321
+ )
322
+ PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward")
323
+ return traced_forward, (*args, fwd_seed, fwd_base_offset)
324
+
325
+
326
+ # This creates the final function that we want to trace using make_fx(),
327
+ # in both aot_dispatch_autograd and aot_dispatch_base.
328
+ # Preconditions:
329
+ # - fn corresponds to the user's fw function
330
+ # - fn arguments have been flattened, duplicate arguments have been handled
331
+ # - In the returned function, the "primals" arguments *includes* synthetic bases.
332
+ # This function does the work of functionalizing the input function,
333
+ # and performing copy_() calls at the end of the function if `keep_input_mutations` is set.
334
+ # The function returned has signature that is either:
335
+ # (1) "traced_fn(primals: List[Any])" if trace_joint is False
336
+ # (2) "traced_fn(primals: List[Any], tangents: List[Any])" if trace_joint is True
337
+ # Returns a new (functionalized) function, and updated arguments to call it with.
338
+ def create_functionalized_fn(
339
+ fn,
340
+ args,
341
+ *,
342
+ meta: ViewAndMutationMeta,
343
+ aot_config: AOTConfig,
344
+ trace_joint: bool,
345
+ ) -> Any:
346
+ @wraps(fn)
347
+ def _functionalized_f_helper(*args):
348
+ # See Note [Disabling Functionalize TLS Above Python Functionalization]
349
+ disable_above = torch._C._ExcludeDispatchKeyGuard(
350
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
351
+ )
352
+
353
+ # See Note [Side-Effectful Tokens in AOTAutograd]
354
+ if trace_joint:
355
+ assert (
356
+ isinstance(args, tuple)
357
+ and len(args) == 2
358
+ and isinstance(args[0], (list, tuple))
359
+ )
360
+ tokens = args[0][: len(meta.tokens)]
361
+ actual_args = args[0][len(meta.tokens) :]
362
+ args = (actual_args, args[1])
363
+ else:
364
+ tokens = args[: len(meta.tokens)]
365
+ args = args[len(meta.tokens) :]
366
+ assert all(token.numel() == 0 for token in tokens)
367
+
368
+ with disable_above:
369
+ # Wrap inputs into functional wrappers
370
+ f_args = pytree.tree_map(to_fun, args)
371
+ f_tokens = pytree.tree_map(to_fun, tokens)
372
+
373
+ # Populate the current FunctionalTensorMode with the tokens per
374
+ # operator. See Note [FunctionalTensorMode is Stateful]
375
+ functional_tensor_mode = (
376
+ torch.utils._python_dispatch._detect_functional_mode()
377
+ )
378
+ assert functional_tensor_mode is not None
379
+ for i, k in enumerate(meta.tokens.keys()):
380
+ functional_tensor_mode._tokens[k] = f_tokens[i]
381
+
382
+ # Run the joint
383
+ f_outs = fn(*f_args)
384
+
385
+ # Return both the tokens and the outputs
386
+ # See Note [Side-Effectful Tokens in AOTAutograd]
387
+ f_outs = (*functional_tensor_mode._tokens.values(), *f_outs)
388
+
389
+ if trace_joint:
390
+ # We support a limited amount of mutation of graph inputs during the backward pass.
391
+ # (This is used e.g. by Float8, which needs to update buffers during the backward pass)
392
+ # Here, we perform extra checks for primals that were mutated in the **backward**
393
+ # We're doing the checks here instead of doing them with the rest of the input mutation handling because:
394
+ # - We need to detect inputs that were mutated in the backward **separately** from mutations that happened
395
+ # during the forward, because the handling is different: some input mutations from the the forward
396
+ # can be only handled in a fw-only runtime epilogue, and in theory if we wanted to handle those same
397
+ # types of mutations in the backward we would need a bw-only runtime epilogue.
398
+ # - We could in theory have our analysis pass differentiate mutations in the fw from mutations in
399
+ # the bw by running our analysis first on the fw-only graph, and then on the joint graph. This would
400
+ # require an extra round of tracing though, so it's more efficient to do in-line here.
401
+ assert (
402
+ isinstance(args, tuple)
403
+ and len(args) == 2
404
+ and isinstance(args[0], (list, tuple))
405
+ )
406
+ # Only look at mutations that happened to forward inputs (e.g. fw buffers that were saved for bw)
407
+ primals_before = args[0]
408
+ primals_after = pytree.tree_map(from_fun, f_args[0])
409
+ for f_inpt, before, after, inpt_info in zip(
410
+ f_args[0], primals_before, primals_after, meta.input_info
411
+ ):
412
+ # Ban metadata mutations on fw inputs during the bw
413
+ if not inpt_info.mutates_metadata:
414
+ assert not has_metadata_mutation(
415
+ f_inpt, before, check_only_storage_mutation=False
416
+ ), "Found a graph input that had its metadata mutated in the backward. This is not supported"
417
+ # Allow data mutations on fw inputs during the bw, but only if they do not require grad
418
+ # So we can guarantee that we can keep the mutations in the graph
419
+ if has_data_mutation(f_inpt) and not inpt_info.mutates_data:
420
+ assert (
421
+ not inpt_info.requires_grad
422
+ ), "Found a graph input that requires_grad and was mutated in the backward. This is not supported"
423
+ # Otherwise, put the mutation in the graph
424
+ before.copy_(after)
425
+ # Now that we covered mutations to *forward* inputs during the backward,
426
+ # we also need to cover mutations to *backward-only* inputs during the backward (e.g. mutation to a grad_out).
427
+ # Today, we will just error in all cases of this happening unless someone needs us to support it.
428
+ tangents_before = args[1]
429
+ tangents_after = pytree.tree_map(from_fun, f_args[1])
430
+ for f_inpt, before, after in zip(
431
+ f_args[1], tangents_before, tangents_after
432
+ ):
433
+ assert not has_metadata_mutation(
434
+ f_inpt, before, check_only_storage_mutation=False
435
+ ) and not has_data_mutation(
436
+ f_inpt
437
+ ), "Found an input to the backward that was mutated during the backward pass. This is not supported"
438
+
439
+ if aot_config.keep_inference_input_mutations:
440
+ # Note: This is a bit annoying. There's a layering issue here, where:
441
+ # (1) functionalization needs to operate on **synthetic base** inputs, before unpacking them into the "real" inputs.
442
+ # (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs.
443
+ # However, we **only** want to support this for inputs that have data-only (and no metadata) mutations,
444
+ # because inductor (and backends in generally) would prefer not to see these (e.g. as_strided_(), resize_()).
445
+ # This makes it pretty difficult for this logic to operate on synthetic bases.
446
+ # (3) In addition, there are cases where it's significantly cheaper to perform the copy on the individual
447
+ # (unpacked) input aliases, instead of the synthetic base.
448
+ # Example case where (3) could be important:
449
+ #
450
+ # def f(x, y):
451
+ # x.mul_(2)
452
+ # y.mul_(3)
453
+ # return x, y
454
+ # a = torch.ones(1'000'000)
455
+ # x, y = out(a[0:9], a[1:10])
456
+ #
457
+ # It would be much better to add copy_() calls into the graph for the two tiny slices, instead of materializing
458
+ # a giant "updated synthetic base" and copying into a's entire storage.
459
+ #
460
+ # For now, we are pessimistically not performing the optimization from (3);
461
+ # we will materialize an "updated" synthetic base, and copy it back to the synthetic input base.
462
+ # This allows us to factor aot autograd much more nicely, since only one area of the code needs to worry
463
+ # about synthetic bases.
464
+ for i, (inpt_old, inpt_f) in enumerate(
465
+ zip(args, f_args) if not trace_joint else zip(args[0], f_args[0])
466
+ ):
467
+ if not isinstance(inpt_f, torch.Tensor):
468
+ continue
469
+ assert is_fun(inpt_f)
470
+ inpt_new = from_fun(inpt_f)
471
+ if meta.input_info[i].mutation_type == MutationType.MUTATED_IN_GRAPH:
472
+ # We found an input that had a (data-only) mutation.
473
+ # Since keep_input_mutations is set, we need to faithfully apply a copy_()
474
+ # so the compiler will see the input mutation in the graph.
475
+ if meta.input_info[i].mutations_hidden_from_autograd:
476
+ # Hidden from autograd = run under no_grad, **and** don't bump VC
477
+ with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(
478
+ inpt_old
479
+ ):
480
+ inpt_old.copy_(inpt_new)
481
+ elif meta.input_info[i].mutations_under_no_grad_or_inference_mode:
482
+ # Under no_grad = run under no_grad (we still bump the VC though)
483
+ # (inference_mode will also bump the VC, as long as the tensor in question
484
+ # was created outside of inference_mode)
485
+ with torch.no_grad():
486
+ inpt_old.copy_(inpt_new)
487
+ else:
488
+ inpt_old.copy_(inpt_new)
489
+
490
+ # When an output tensor is a functionalized mutated input, and we
491
+ # were able to move the mutation in to the graph then we can return
492
+ # the mutated input directly. This prevents duplicating the
493
+ # tensors contents.
494
+ flat_outs, outs_spec = pytree.tree_flatten(f_outs)
495
+ flat_outs = [from_fun(o) for o in flat_outs]
496
+ num_outs = len(meta.output_info)
497
+
498
+ for i, outp in enumerate(flat_outs[:num_outs]):
499
+ info = meta.output_info[i]
500
+ if info.output_type != OutputType.is_input:
501
+ continue
502
+
503
+ assert info.base_idx is not None
504
+ if (
505
+ meta.input_info[info.base_idx].mutation_type
506
+ == MutationType.MUTATED_IN_GRAPH
507
+ ):
508
+ flat_outs[i] = args[info.base_idx]
509
+ return pytree.tree_unflatten(flat_outs, outs_spec)
510
+
511
+ return pytree.tree_map(from_fun, f_outs)
512
+
513
+ # Kinda annoying, but needed to make sure that the fx graph we trace out has "primals"
514
+ # and "tangents" as its input names (which are special-cased by the partitioner)
515
+ # TODO (tmanlaibaatar) revisit this if we ever need to turn on non-strict joint graph export
516
+ def joint_helper(primals, tangents):
517
+ return _functionalized_f_helper(primals, tangents)
518
+
519
+ helper = joint_helper if trace_joint else _functionalized_f_helper
520
+ if config.functionalize_rng_ops:
521
+ # Setup the wrapper for functionalization of rng ops
522
+ helper, args = create_functionalized_rng_ops_wrapper(helper, args, trace_joint)
523
+
524
+ # Additionally pass in tokens as inputs
525
+ # See Note [Side-Effectful Tokens in AOTAutograd]
526
+ additional_token_inputs = [torch.tensor([])] * len(meta.tokens)
527
+ if trace_joint:
528
+ args = ([*additional_token_inputs, *args[0]], *args[1:])
529
+ else:
530
+ args = [*additional_token_inputs, *args]
531
+
532
+ return helper, args
533
+
534
+
535
+ # Given a function operating on Subclass -> Subclass, returns an function that operates on Tensor -> Tensor
536
+ # Also returns:
537
+ # - the new set of arguments to pass into this function (now that tensor subclasses have been eliminated)
538
+ # - the updated ViewAndMutationMeta for this dense -> dense function.
539
+ # The other important arguments are:
540
+ # - flat_fn_maybe_joint: when is_joint_structure=True, this is the joint fw-bw function.
541
+ # when is_joint_structure=False, this is just the forward function.
542
+ # - fw_only: this is *always* the forward-only function.
543
+ # Why do we need this? We need to collect updated ViewAndMutationMeta on our new dense -> dense functions.
544
+ # In particular, we need this to tell the partitioner how many dense forward outputs there are.
545
+ def aot_dispatch_subclass(
546
+ flat_fn_maybe_joint,
547
+ args: List[Any],
548
+ *,
549
+ is_joint_structure: bool,
550
+ meta: ViewAndMutationMeta,
551
+ fw_only: Callable,
552
+ ) -> SubclassTracingInfo:
553
+ # Skip logic if we don't need to trace through any subclasses
554
+ req_subclass_dispatch = requires_subclass_dispatch(args, meta)
555
+ if not req_subclass_dispatch:
556
+ return SubclassTracingInfo(
557
+ plain_tensor_trace_fn=flat_fn_maybe_joint,
558
+ plain_tensor_args=args,
559
+ maybe_subclass_meta=None,
560
+ )
561
+
562
+ # TODO: add subclass guards (later PR).
563
+
564
+ # What's going on here? We need to compute subclass metadata about the outputs of the joint (grad_inputs).
565
+ # Annoying: we don't know the grad input metas until we're in the middle of tracing the joint,
566
+ # so we set it later, while we're tracing the joint (see inner_fn() below).
567
+ # Another option would be to run our run_functionalized_fw_and_collect_metadata() function
568
+ # directly on the joint, but this would hurt compile time (adding yet another pass through the joint).
569
+ subclass_meta = SubclassMeta()
570
+
571
+ def inner_fn(fn, args, *, use_trace_joint: bool):
572
+ # Step 1: wrap tensor inputs into subclasses if necessary
573
+ all_args = wrap_tensor_subclasses_maybe_joint(
574
+ args, is_joint_structure=use_trace_joint, meta=meta
575
+ )
576
+
577
+ # Step 2: call the inner function, with our (maybe subclass) inputs
578
+ wrapped_outs = fn(*all_args)
579
+
580
+ if use_trace_joint:
581
+ # See Note: [Computing Subclass Metadata about grad_inputs]
582
+ # We also stash subclass info on our grad_inputs, if we're tracing the joint.
583
+ nonlocal subclass_meta
584
+ assert isinstance(wrapped_outs, tuple) and len(wrapped_outs) == 2
585
+ # Don't need fw outs since we already have subclass metadata on them
586
+ grad_inputs = wrapped_outs[1]
587
+ subclass_meta.grad_input_metas = create_subclass_meta(grad_inputs)
588
+
589
+ # Step 3: Unwrap any subclass outputs back into dense tensors
590
+ unwrapped_outs = unwrap_tensor_subclasses(
591
+ wrapped_outs, is_joint_structure=use_trace_joint
592
+ )
593
+ return unwrapped_outs
594
+
595
+ def joint_fn(primals, tangents):
596
+ return inner_fn(flat_fn_maybe_joint, (primals, tangents), use_trace_joint=True)
597
+
598
+ def fw_fn(*primals):
599
+ return inner_fn(flat_fn_maybe_joint, primals, use_trace_joint=False)
600
+
601
+ def metadata_fn(*primals):
602
+ return inner_fn(fw_only, primals, use_trace_joint=False)
603
+
604
+ args_unwrapped = unwrap_tensor_subclasses(
605
+ args, is_joint_structure=is_joint_structure
606
+ )
607
+
608
+ if is_joint_structure:
609
+ primals_unwrapped = args_unwrapped[0]
610
+ fn_to_trace = joint_fn
611
+ else:
612
+ primals_unwrapped = args_unwrapped
613
+ fn_to_trace = fw_fn
614
+
615
+ # Note: [Partitioner handling for Subclasses, Part 1]
616
+ # The way the partitioner works is that:
617
+ # (1) we pass is a single graph containing the joint fw/bw,
618
+ # where the # of graph outputs corresponds to # fw_outputs + # grad_inputs
619
+ # (2) The partitioner accepts an arguments, num_fwd_outputs,
620
+ # and assumes that the first "num_fwd_outputs" graph outputs correspond
621
+ # to outputs of the forward graph.
622
+ # How do tensor subclasses enter the picture?
623
+ # the num_fwd_outputs in the final graph is actually non-trivial to compute,
624
+ # because it can be influenced by input mutations and intermediate bases.
625
+ # So we compute it by inspecting the current ViewAndMutationMeta object.
626
+ # However, the original ViewAndMutationMeta that we computed was created
627
+ # on the subclass -> subclass graph,
628
+ # which can have a different number of outputs than the dense -> dense graph.
629
+ # That's why we createa a fresh metadata object on the dense -> dense function here,
630
+ # and plumb it back up to the partitioner.
631
+ # See Note: [Partitioner handling for Subclasses, Part 2] for more info.
632
+ meta_updated = run_functionalized_fw_and_collect_metadata(
633
+ metadata_fn,
634
+ keep_input_mutations=meta.keep_input_mutations,
635
+ is_train=meta.is_train,
636
+ )(*primals_unwrapped)
637
+
638
+ subclass_meta.fw_metadata = meta_updated
639
+
640
+ return SubclassTracingInfo(
641
+ plain_tensor_trace_fn=fn_to_trace,
642
+ plain_tensor_args=args_unwrapped,
643
+ maybe_subclass_meta=subclass_meta,
644
+ )
645
+
646
+
647
+ class PropagateUnbackedSymInts(torch.fx.Interpreter):
648
+ def run_node(self, n: torch.fx.Node):
649
+ import sympy
650
+
651
+ result = super().run_node(n)
652
+ # TODO: handle Tensor returns
653
+ if "example_value" in n.meta:
654
+ if isinstance(result, torch.SymInt) and isinstance(
655
+ result.node.expr, sympy.Symbol
656
+ ):
657
+ torch._check(result == n.meta["example_value"])
658
+
659
+ return result
660
+
661
+
662
+ def create_functional_call(mod, params_spec, params_len, store_orig_mod=False):
663
+ # Redundant with dynamo, but worth having in case this gets invoked elsewhere.
664
+ # https://github.com/pytorch/pytorch/issues/103569
665
+
666
+ def functional_call(*args, **kwargs):
667
+ with stateless._reparametrize_module(
668
+ mod, pytree.tree_unflatten(args[:params_len], params_spec)
669
+ ):
670
+ if isinstance(mod, torch.fx.GraphModule):
671
+ with fx_traceback.preserve_node_meta(), warnings.catch_warnings():
672
+ warnings.filterwarnings(
673
+ "ignore", "Anomaly Detection has been enabled."
674
+ )
675
+ with torch.autograd.detect_anomaly(check_nan=False):
676
+ out = PropagateUnbackedSymInts(mod).run(
677
+ *args[params_len:], **kwargs
678
+ )
679
+ else:
680
+ out = mod(*args[params_len:], **kwargs)
681
+
682
+ if not isinstance(out, (tuple, list)):
683
+ raise RuntimeError(
684
+ "Graph output must be a tuple(). This is so that we can avoid "
685
+ "pytree processing of the outputs. Please change the module to "
686
+ "have tuple outputs or use aot_module instead."
687
+ )
688
+ return out
689
+
690
+ # Note [Preserving the nn module stack metadata during export non-strict mode]
691
+ # This path is currently only used by the non-strict export flow,
692
+ # where we cannot rely on dynamo to preserve nn stack metadata in our captured graph.
693
+ # Instead, we stash the original user nn module here, and rely on `make_fx` to grab
694
+ # this stashed module and use it to track nn module stack metadata
695
+ if store_orig_mod and not hasattr(functional_call, "_orig_mod"):
696
+ functional_call._orig_mod = mod # type: ignore[attr-defined]
697
+
698
+ return functional_call
llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains various utils for AOTAutograd, including those for handling collections.
3
+ """
4
+
5
+ import dataclasses
6
+ import warnings
7
+ from contextlib import nullcontext
8
+ from functools import wraps
9
+ from typing import Any, Callable, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.utils._pytree as pytree
13
+ from torch.fx.experimental._backward_state import BackwardState
14
+ from torch.fx.experimental.proxy_tensor import py_sym_types
15
+
16
+ KNOWN_TYPES = [
17
+ torch.Tensor,
18
+ BackwardState,
19
+ int,
20
+ str,
21
+ float,
22
+ bool,
23
+ type(None),
24
+ *py_sym_types,
25
+ ]
26
+
27
+ original_zip = zip
28
+
29
+
30
+ def strict_zip(*iterables, strict=True, **kwargs):
31
+ if not strict:
32
+ return original_zip(*iterables, **kwargs)
33
+
34
+ shortest_length = min(len(it) for it in iterables)
35
+ for iterable in iterables:
36
+ if len(iterable) != shortest_length:
37
+ raise ValueError(
38
+ "The iterables have different lengths and strict mode is enabled."
39
+ )
40
+
41
+ return original_zip(*iterables, **kwargs)
42
+
43
+
44
+ def _get_symint_hints(exprs):
45
+ """
46
+ Get the hints of a list/tuple of int/SymInt.
47
+ """
48
+ if isinstance(exprs, (list, tuple)):
49
+ return type(exprs)(_get_symint_hints(e) for e in exprs)
50
+ elif isinstance(exprs, torch.SymInt):
51
+ return exprs.node.shape_env.size_hint(exprs.node.expr)
52
+ else:
53
+ return exprs
54
+
55
+
56
+ def partial_flatten_asdict(obj: Any) -> Any:
57
+ if dataclasses.is_dataclass(obj):
58
+ return {
59
+ field.name: getattr(obj, field.name) for field in dataclasses.fields(obj)
60
+ }
61
+ elif isinstance(obj, (list, tuple)):
62
+ return obj.__class__([partial_flatten_asdict(item) for item in obj])
63
+ elif isinstance(obj, dict):
64
+ return {k: partial_flatten_asdict(v) for k, v in obj.items()}
65
+ else:
66
+ return obj
67
+
68
+
69
+ def normalize_as_list(x):
70
+ if isinstance(x, tuple):
71
+ return list(x)
72
+ elif isinstance(x, list):
73
+ return x
74
+ return [x]
75
+
76
+
77
+ def _get_autocast_states():
78
+ return [
79
+ torch.is_autocast_enabled(),
80
+ torch.is_autocast_cpu_enabled(),
81
+ torch.get_autocast_gpu_dtype(),
82
+ torch.get_autocast_cpu_dtype(),
83
+ torch.is_autocast_cache_enabled(),
84
+ ]
85
+
86
+
87
+ def make_boxed_func(f):
88
+ def g(args):
89
+ return f(*args)
90
+
91
+ g._boxed_call = True # type: ignore[attr-defined]
92
+ return g
93
+
94
+
95
+ def make_boxed_compiler(compiler):
96
+ @wraps(compiler)
97
+ def f(fx_g, inps):
98
+ out_f = compiler(fx_g, inps)
99
+ fx_g = make_boxed_func(out_f)
100
+ return fx_g
101
+
102
+ return f
103
+
104
+
105
+ def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False):
106
+ if not steal_args:
107
+ args = list(args)
108
+ assert isinstance(args, list)
109
+
110
+ context = torch._C._DisableAutocast if disable_amp else nullcontext
111
+ with context():
112
+ if hasattr(f, "_boxed_call"):
113
+ out = normalize_as_list(f(args))
114
+ else:
115
+ # TODO: Please remove soon
116
+ # https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670
117
+ warnings.warn(
118
+ "Your compiler for AOTAutograd is returning a function that doesn't take boxed arguments. "
119
+ "Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. "
120
+ "See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale."
121
+ )
122
+ out = normalize_as_list(f(*args))
123
+ return out
124
+
125
+
126
+ # Inspired by autodidax (thanks!)
127
+ class PytreeThunk:
128
+ spec: Optional[pytree.TreeSpec] = None
129
+ # These are some kinda dumb microoptimizations that save about 3-4 us of overhead.
130
+ is_simple: Optional[
131
+ bool
132
+ ] = None # if the output spec is a tuple/list, we won't bother unflattening it.
133
+ is_really_simple: Optional[bool] = None # if the output spec is a LeafSpec
134
+
135
+ def set(self, spec: pytree.TreeSpec) -> None:
136
+ assert self.spec is None or self.spec == spec
137
+ assert spec is not None
138
+ self.spec: pytree.TreeSpec = spec
139
+ if self.spec.type in {tuple, list} and all(
140
+ child.is_leaf() for child in spec.children_specs
141
+ ):
142
+ self.is_simple = True
143
+ if self.spec.is_leaf():
144
+ self.is_really_simple = True
145
+
146
+ def unflatten(self, x: List[Any]) -> Any:
147
+ if self.is_really_simple:
148
+ return x[0]
149
+ if self.is_simple:
150
+ return x
151
+ assert self.spec is not None
152
+ return pytree.tree_unflatten(x, self.spec)
153
+
154
+
155
+ # Creates a function that returns flattened inputs and outputs
156
+ # Also returns the output tree spec, which is needed to recover the "unflattened"
157
+ # output tree structure later.
158
+ def create_tree_flattened_fn(fn, args, kwargs=None) -> Tuple[Callable, PytreeThunk]:
159
+ if kwargs is None:
160
+ kwargs = {}
161
+ # Save the args_spec for flat_tensor_args to unflatten while tracing
162
+ _, tensor_args_spec = pytree.tree_flatten((args, kwargs))
163
+ out_spec = PytreeThunk()
164
+
165
+ def flat_fn(*flat_args):
166
+ # The input are flattened tensor args. Prepare the args in the
167
+ # order that original function expects. Add static args as well.
168
+ # They will appear as tensor constants in the traced graph.
169
+ nonlocal out_spec
170
+ args, kwargs = pytree.tree_unflatten(flat_args, tensor_args_spec)
171
+ tree_out = fn(*args, **kwargs)
172
+ flat_out, spec = pytree.tree_flatten(tree_out)
173
+ for i in flat_out:
174
+ is_known_type = False
175
+ for j in KNOWN_TYPES:
176
+ if isinstance(i, j):
177
+ is_known_type = True
178
+ break
179
+ if not is_known_type:
180
+ raise RuntimeError(
181
+ f"Found {type(i)} in output, which is not a known type. "
182
+ "If this type holds tensors, you need to register a pytree for it. "
183
+ "See https://github.com/pytorch/functorch/issues/475 for a brief "
184
+ "explanation why. If you don't need to register a pytree, please "
185
+ "leave a comment explaining your use case and we'll make this more "
186
+ "ergonomic to deal with"
187
+ )
188
+ out_spec.set(spec)
189
+ return flat_out
190
+
191
+ # Can't use functools.wraps here because the wrapper has different
192
+ # calling convention
193
+ if hasattr(fn, "_orig_mod"):
194
+ flat_fn._orig_mod = fn._orig_mod # type: ignore[attr-defined]
195
+
196
+ return flat_fn, out_spec
197
+
198
+
199
+ # This function takes in a tensor t, and returns one of t, t.view(), or t.clone().
200
+ # When tracing the joint forward + backward, for any inputs in the graph that are mutated,
201
+ # we need to clone them first (and similarly for metadata-only mutations, we need to view them first).
202
+ # The idea is that when we trace the backward, we need to pass in the *original* primals
203
+ # to autograd.grad(), before they were mutated.
204
+ # Note: when we have synthetic base inputs, we need to clone them *before* creating views off of them.
205
+ # This means that "idx" here represents the index of the (potentially) synthetic base.
206
+ # What we need to do is:
207
+ # (1) map the current (post-synthetic-base calling convention) input argument index
208
+ # to int index pre-synthetic-base-calling-convention.
209
+ # (2) There could be multiple, if this index corresponds to a synthetic base
210
+ # that has multiple input aliases.
211
+ # (3) If any of those corresponding inputs get metadata mutations, then we clone the base.
212
+ def maybe_to_fresh_input(idx, t, meta):
213
+ if not isinstance(t, torch.Tensor):
214
+ return t
215
+ if idx in meta.mutated_inp_runtime_indices:
216
+ # We only need to bother cloning mutated inputs that participate in autograd.
217
+ mutated_inp_idx = meta.mutated_inp_runtime_indices.index(idx)
218
+ if meta.input_info[idx].requires_grad and meta.input_info[idx].mutates_data:
219
+ # Make sure the primal we pass to autograd.grad()
220
+ # sees the tensor before the mutation
221
+ return t.clone()
222
+ if meta.input_info[idx] and meta.input_info[idx].mutates_metadata:
223
+ # Make sure the primal we pass to autograd.grad()
224
+ # sees the tensor before the metadata mutation
225
+ return t.view(t.shape)
226
+ return t
llmeval-env/lib/python3.10/site-packages/torch/_vendor/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __title__ = "packaging"
6
+ __summary__ = "Core utilities for Python packages"
7
+ __uri__ = "https://github.com/pypa/packaging"
8
+
9
+ __version__ = "23.2"
10
+
11
+ __author__ = "Donald Stufft and individual contributors"
12
+ __email__ = "[email protected]"
13
+
14
+ __license__ = "BSD-2-Clause or Apache-2.0"
15
+ __copyright__ = "2014 %s" % __author__
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (526 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+
6
+ class InfinityType:
7
+ def __repr__(self) -> str:
8
+ return "Infinity"
9
+
10
+ def __hash__(self) -> int:
11
+ return hash(repr(self))
12
+
13
+ def __lt__(self, other: object) -> bool:
14
+ return False
15
+
16
+ def __le__(self, other: object) -> bool:
17
+ return False
18
+
19
+ def __eq__(self, other: object) -> bool:
20
+ return isinstance(other, self.__class__)
21
+
22
+ def __gt__(self, other: object) -> bool:
23
+ return True
24
+
25
+ def __ge__(self, other: object) -> bool:
26
+ return True
27
+
28
+ def __neg__(self: object) -> "NegativeInfinityType":
29
+ return NegativeInfinity
30
+
31
+
32
+ Infinity = InfinityType()
33
+
34
+
35
+ class NegativeInfinityType:
36
+ def __repr__(self) -> str:
37
+ return "-Infinity"
38
+
39
+ def __hash__(self) -> int:
40
+ return hash(repr(self))
41
+
42
+ def __lt__(self, other: object) -> bool:
43
+ return True
44
+
45
+ def __le__(self, other: object) -> bool:
46
+ return True
47
+
48
+ def __eq__(self, other: object) -> bool:
49
+ return isinstance(other, self.__class__)
50
+
51
+ def __gt__(self, other: object) -> bool:
52
+ return False
53
+
54
+ def __ge__(self, other: object) -> bool:
55
+ return False
56
+
57
+ def __neg__(self: object) -> InfinityType:
58
+ return Infinity
59
+
60
+
61
+ NegativeInfinity = NegativeInfinityType()
llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/version.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+ """
5
+ .. testsetup::
6
+
7
+ from packaging.version import parse, Version
8
+ """
9
+
10
+ import itertools
11
+ import re
12
+ from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union
13
+
14
+ from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
15
+
16
+ __all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
17
+
18
+ LocalType = Tuple[Union[int, str], ...]
19
+
20
+ CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]
21
+ CmpLocalType = Union[
22
+ NegativeInfinityType,
23
+ Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],
24
+ ]
25
+ CmpKey = Tuple[
26
+ int,
27
+ Tuple[int, ...],
28
+ CmpPrePostDevType,
29
+ CmpPrePostDevType,
30
+ CmpPrePostDevType,
31
+ CmpLocalType,
32
+ ]
33
+ VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
34
+
35
+
36
+ class _Version(NamedTuple):
37
+ epoch: int
38
+ release: Tuple[int, ...]
39
+ dev: Optional[Tuple[str, int]]
40
+ pre: Optional[Tuple[str, int]]
41
+ post: Optional[Tuple[str, int]]
42
+ local: Optional[LocalType]
43
+
44
+
45
+ def parse(version: str) -> "Version":
46
+ """Parse the given version string.
47
+
48
+ >>> parse('1.0.dev1')
49
+ <Version('1.0.dev1')>
50
+
51
+ :param version: The version string to parse.
52
+ :raises InvalidVersion: When the version string is not a valid version.
53
+ """
54
+ return Version(version)
55
+
56
+
57
+ class InvalidVersion(ValueError):
58
+ """Raised when a version string is not a valid version.
59
+
60
+ >>> Version("invalid")
61
+ Traceback (most recent call last):
62
+ ...
63
+ packaging.version.InvalidVersion: Invalid version: 'invalid'
64
+ """
65
+
66
+
67
+ class _BaseVersion:
68
+ _key: Tuple[Any, ...]
69
+
70
+ def __hash__(self) -> int:
71
+ return hash(self._key)
72
+
73
+ # Please keep the duplicated `isinstance` check
74
+ # in the six comparisons hereunder
75
+ # unless you find a way to avoid adding overhead function calls.
76
+ def __lt__(self, other: "_BaseVersion") -> bool:
77
+ if not isinstance(other, _BaseVersion):
78
+ return NotImplemented
79
+
80
+ return self._key < other._key
81
+
82
+ def __le__(self, other: "_BaseVersion") -> bool:
83
+ if not isinstance(other, _BaseVersion):
84
+ return NotImplemented
85
+
86
+ return self._key <= other._key
87
+
88
+ def __eq__(self, other: object) -> bool:
89
+ if not isinstance(other, _BaseVersion):
90
+ return NotImplemented
91
+
92
+ return self._key == other._key
93
+
94
+ def __ge__(self, other: "_BaseVersion") -> bool:
95
+ if not isinstance(other, _BaseVersion):
96
+ return NotImplemented
97
+
98
+ return self._key >= other._key
99
+
100
+ def __gt__(self, other: "_BaseVersion") -> bool:
101
+ if not isinstance(other, _BaseVersion):
102
+ return NotImplemented
103
+
104
+ return self._key > other._key
105
+
106
+ def __ne__(self, other: object) -> bool:
107
+ if not isinstance(other, _BaseVersion):
108
+ return NotImplemented
109
+
110
+ return self._key != other._key
111
+
112
+
113
+ # Deliberately not anchored to the start and end of the string, to make it
114
+ # easier for 3rd party code to reuse
115
+ _VERSION_PATTERN = r"""
116
+ v?
117
+ (?:
118
+ (?:(?P<epoch>[0-9]+)!)? # epoch
119
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
120
+ (?P<pre> # pre-release
121
+ [-_\.]?
122
+ (?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
123
+ [-_\.]?
124
+ (?P<pre_n>[0-9]+)?
125
+ )?
126
+ (?P<post> # post release
127
+ (?:-(?P<post_n1>[0-9]+))
128
+ |
129
+ (?:
130
+ [-_\.]?
131
+ (?P<post_l>post|rev|r)
132
+ [-_\.]?
133
+ (?P<post_n2>[0-9]+)?
134
+ )
135
+ )?
136
+ (?P<dev> # dev release
137
+ [-_\.]?
138
+ (?P<dev_l>dev)
139
+ [-_\.]?
140
+ (?P<dev_n>[0-9]+)?
141
+ )?
142
+ )
143
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
144
+ """
145
+
146
+ VERSION_PATTERN = _VERSION_PATTERN
147
+ """
148
+ A string containing the regular expression used to match a valid version.
149
+
150
+ The pattern is not anchored at either end, and is intended for embedding in larger
151
+ expressions (for example, matching a version number as part of a file name). The
152
+ regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
153
+ flags set.
154
+
155
+ :meta hide-value:
156
+ """
157
+
158
+
159
+ class Version(_BaseVersion):
160
+ """This class abstracts handling of a project's versions.
161
+
162
+ A :class:`Version` instance is comparison aware and can be compared and
163
+ sorted using the standard Python interfaces.
164
+
165
+ >>> v1 = Version("1.0a5")
166
+ >>> v2 = Version("1.0")
167
+ >>> v1
168
+ <Version('1.0a5')>
169
+ >>> v2
170
+ <Version('1.0')>
171
+ >>> v1 < v2
172
+ True
173
+ >>> v1 == v2
174
+ False
175
+ >>> v1 > v2
176
+ False
177
+ >>> v1 >= v2
178
+ False
179
+ >>> v1 <= v2
180
+ True
181
+ """
182
+
183
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
184
+ _key: CmpKey
185
+
186
+ def __init__(self, version: str) -> None:
187
+ """Initialize a Version object.
188
+
189
+ :param version:
190
+ The string representation of a version which will be parsed and normalized
191
+ before use.
192
+ :raises InvalidVersion:
193
+ If the ``version`` does not conform to PEP 440 in any way then this
194
+ exception will be raised.
195
+ """
196
+
197
+ # Validate the version and parse it into pieces
198
+ match = self._regex.search(version)
199
+ if not match:
200
+ raise InvalidVersion(f"Invalid version: '{version}'")
201
+
202
+ # Store the parsed out pieces of the version
203
+ self._version = _Version(
204
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
205
+ release=tuple(int(i) for i in match.group("release").split(".")),
206
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
207
+ post=_parse_letter_version(
208
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
209
+ ),
210
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
211
+ local=_parse_local_version(match.group("local")),
212
+ )
213
+
214
+ # Generate a key which will be used for sorting
215
+ self._key = _cmpkey(
216
+ self._version.epoch,
217
+ self._version.release,
218
+ self._version.pre,
219
+ self._version.post,
220
+ self._version.dev,
221
+ self._version.local,
222
+ )
223
+
224
+ def __repr__(self) -> str:
225
+ """A representation of the Version that shows all internal state.
226
+
227
+ >>> Version('1.0.0')
228
+ <Version('1.0.0')>
229
+ """
230
+ return f"<Version('{self}')>"
231
+
232
+ def __str__(self) -> str:
233
+ """A string representation of the version that can be rounded-tripped.
234
+
235
+ >>> str(Version("1.0a5"))
236
+ '1.0a5'
237
+ """
238
+ parts = []
239
+
240
+ # Epoch
241
+ if self.epoch != 0:
242
+ parts.append(f"{self.epoch}!")
243
+
244
+ # Release segment
245
+ parts.append(".".join(str(x) for x in self.release))
246
+
247
+ # Pre-release
248
+ if self.pre is not None:
249
+ parts.append("".join(str(x) for x in self.pre))
250
+
251
+ # Post-release
252
+ if self.post is not None:
253
+ parts.append(f".post{self.post}")
254
+
255
+ # Development release
256
+ if self.dev is not None:
257
+ parts.append(f".dev{self.dev}")
258
+
259
+ # Local version segment
260
+ if self.local is not None:
261
+ parts.append(f"+{self.local}")
262
+
263
+ return "".join(parts)
264
+
265
+ @property
266
+ def epoch(self) -> int:
267
+ """The epoch of the version.
268
+
269
+ >>> Version("2.0.0").epoch
270
+ 0
271
+ >>> Version("1!2.0.0").epoch
272
+ 1
273
+ """
274
+ return self._version.epoch
275
+
276
+ @property
277
+ def release(self) -> Tuple[int, ...]:
278
+ """The components of the "release" segment of the version.
279
+
280
+ >>> Version("1.2.3").release
281
+ (1, 2, 3)
282
+ >>> Version("2.0.0").release
283
+ (2, 0, 0)
284
+ >>> Version("1!2.0.0.post0").release
285
+ (2, 0, 0)
286
+
287
+ Includes trailing zeroes but not the epoch or any pre-release / development /
288
+ post-release suffixes.
289
+ """
290
+ return self._version.release
291
+
292
+ @property
293
+ def pre(self) -> Optional[Tuple[str, int]]:
294
+ """The pre-release segment of the version.
295
+
296
+ >>> print(Version("1.2.3").pre)
297
+ None
298
+ >>> Version("1.2.3a1").pre
299
+ ('a', 1)
300
+ >>> Version("1.2.3b1").pre
301
+ ('b', 1)
302
+ >>> Version("1.2.3rc1").pre
303
+ ('rc', 1)
304
+ """
305
+ return self._version.pre
306
+
307
+ @property
308
+ def post(self) -> Optional[int]:
309
+ """The post-release number of the version.
310
+
311
+ >>> print(Version("1.2.3").post)
312
+ None
313
+ >>> Version("1.2.3.post1").post
314
+ 1
315
+ """
316
+ return self._version.post[1] if self._version.post else None
317
+
318
+ @property
319
+ def dev(self) -> Optional[int]:
320
+ """The development number of the version.
321
+
322
+ >>> print(Version("1.2.3").dev)
323
+ None
324
+ >>> Version("1.2.3.dev1").dev
325
+ 1
326
+ """
327
+ return self._version.dev[1] if self._version.dev else None
328
+
329
+ @property
330
+ def local(self) -> Optional[str]:
331
+ """The local version segment of the version.
332
+
333
+ >>> print(Version("1.2.3").local)
334
+ None
335
+ >>> Version("1.2.3+abc").local
336
+ 'abc'
337
+ """
338
+ if self._version.local:
339
+ return ".".join(str(x) for x in self._version.local)
340
+ else:
341
+ return None
342
+
343
+ @property
344
+ def public(self) -> str:
345
+ """The public portion of the version.
346
+
347
+ >>> Version("1.2.3").public
348
+ '1.2.3'
349
+ >>> Version("1.2.3+abc").public
350
+ '1.2.3'
351
+ >>> Version("1.2.3+abc.dev1").public
352
+ '1.2.3'
353
+ """
354
+ return str(self).split("+", 1)[0]
355
+
356
+ @property
357
+ def base_version(self) -> str:
358
+ """The "base version" of the version.
359
+
360
+ >>> Version("1.2.3").base_version
361
+ '1.2.3'
362
+ >>> Version("1.2.3+abc").base_version
363
+ '1.2.3'
364
+ >>> Version("1!1.2.3+abc.dev1").base_version
365
+ '1!1.2.3'
366
+
367
+ The "base version" is the public version of the project without any pre or post
368
+ release markers.
369
+ """
370
+ parts = []
371
+
372
+ # Epoch
373
+ if self.epoch != 0:
374
+ parts.append(f"{self.epoch}!")
375
+
376
+ # Release segment
377
+ parts.append(".".join(str(x) for x in self.release))
378
+
379
+ return "".join(parts)
380
+
381
+ @property
382
+ def is_prerelease(self) -> bool:
383
+ """Whether this version is a pre-release.
384
+
385
+ >>> Version("1.2.3").is_prerelease
386
+ False
387
+ >>> Version("1.2.3a1").is_prerelease
388
+ True
389
+ >>> Version("1.2.3b1").is_prerelease
390
+ True
391
+ >>> Version("1.2.3rc1").is_prerelease
392
+ True
393
+ >>> Version("1.2.3dev1").is_prerelease
394
+ True
395
+ """
396
+ return self.dev is not None or self.pre is not None
397
+
398
+ @property
399
+ def is_postrelease(self) -> bool:
400
+ """Whether this version is a post-release.
401
+
402
+ >>> Version("1.2.3").is_postrelease
403
+ False
404
+ >>> Version("1.2.3.post1").is_postrelease
405
+ True
406
+ """
407
+ return self.post is not None
408
+
409
+ @property
410
+ def is_devrelease(self) -> bool:
411
+ """Whether this version is a development release.
412
+
413
+ >>> Version("1.2.3").is_devrelease
414
+ False
415
+ >>> Version("1.2.3.dev1").is_devrelease
416
+ True
417
+ """
418
+ return self.dev is not None
419
+
420
+ @property
421
+ def major(self) -> int:
422
+ """The first item of :attr:`release` or ``0`` if unavailable.
423
+
424
+ >>> Version("1.2.3").major
425
+ 1
426
+ """
427
+ return self.release[0] if len(self.release) >= 1 else 0
428
+
429
+ @property
430
+ def minor(self) -> int:
431
+ """The second item of :attr:`release` or ``0`` if unavailable.
432
+
433
+ >>> Version("1.2.3").minor
434
+ 2
435
+ >>> Version("1").minor
436
+ 0
437
+ """
438
+ return self.release[1] if len(self.release) >= 2 else 0
439
+
440
+ @property
441
+ def micro(self) -> int:
442
+ """The third item of :attr:`release` or ``0`` if unavailable.
443
+
444
+ >>> Version("1.2.3").micro
445
+ 3
446
+ >>> Version("1").micro
447
+ 0
448
+ """
449
+ return self.release[2] if len(self.release) >= 3 else 0
450
+
451
+
452
+ def _parse_letter_version(
453
+ letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
454
+ ) -> Optional[Tuple[str, int]]:
455
+
456
+ if letter:
457
+ # We consider there to be an implicit 0 in a pre-release if there is
458
+ # not a numeral associated with it.
459
+ if number is None:
460
+ number = 0
461
+
462
+ # We normalize any letters to their lower case form
463
+ letter = letter.lower()
464
+
465
+ # We consider some words to be alternate spellings of other words and
466
+ # in those cases we want to normalize the spellings to our preferred
467
+ # spelling.
468
+ if letter == "alpha":
469
+ letter = "a"
470
+ elif letter == "beta":
471
+ letter = "b"
472
+ elif letter in ["c", "pre", "preview"]:
473
+ letter = "rc"
474
+ elif letter in ["rev", "r"]:
475
+ letter = "post"
476
+
477
+ return letter, int(number)
478
+ if not letter and number:
479
+ # We assume if we are given a number, but we are not given a letter
480
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
481
+ letter = "post"
482
+
483
+ return letter, int(number)
484
+
485
+ return None
486
+
487
+
488
+ _local_version_separators = re.compile(r"[\._-]")
489
+
490
+
491
+ def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
492
+ """
493
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
494
+ """
495
+ if local is not None:
496
+ return tuple(
497
+ part.lower() if not part.isdigit() else int(part)
498
+ for part in _local_version_separators.split(local)
499
+ )
500
+ return None
501
+
502
+
503
+ def _cmpkey(
504
+ epoch: int,
505
+ release: Tuple[int, ...],
506
+ pre: Optional[Tuple[str, int]],
507
+ post: Optional[Tuple[str, int]],
508
+ dev: Optional[Tuple[str, int]],
509
+ local: Optional[LocalType],
510
+ ) -> CmpKey:
511
+
512
+ # When we compare a release version, we want to compare it with all of the
513
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
514
+ # leading zeros until we come to something non zero, then take the rest
515
+ # re-reverse it back into the correct order and make it a tuple and use
516
+ # that for our sorting key.
517
+ _release = tuple(
518
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
519
+ )
520
+
521
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
522
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
523
+ # if there is not a pre or a post segment. If we have one of those then
524
+ # the normal sorting rules will handle this case correctly.
525
+ if pre is None and post is None and dev is not None:
526
+ _pre: CmpPrePostDevType = NegativeInfinity
527
+ # Versions without a pre-release (except as noted above) should sort after
528
+ # those with one.
529
+ elif pre is None:
530
+ _pre = Infinity
531
+ else:
532
+ _pre = pre
533
+
534
+ # Versions without a post segment should sort before those with one.
535
+ if post is None:
536
+ _post: CmpPrePostDevType = NegativeInfinity
537
+
538
+ else:
539
+ _post = post
540
+
541
+ # Versions without a development segment should sort after those with one.
542
+ if dev is None:
543
+ _dev: CmpPrePostDevType = Infinity
544
+
545
+ else:
546
+ _dev = dev
547
+
548
+ if local is None:
549
+ # Versions without a local segment should sort before those with one.
550
+ _local: CmpLocalType = NegativeInfinity
551
+ else:
552
+ # Versions with a local segment need that segment parsed to implement
553
+ # the sorting rules in PEP440.
554
+ # - Alpha numeric segments sort before numeric segments
555
+ # - Alpha numeric segments sort lexicographically
556
+ # - Numeric segments sort numerically
557
+ # - Shorter versions sort before longer versions when the prefixes
558
+ # match exactly
559
+ _local = tuple(
560
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
561
+ )
562
+
563
+ return epoch, _release, _pre, _post, _dev, _local
llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__init__.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
2
+
3
+ It registers custom reducers, that use shared memory to provide shared
4
+ views on the same data in different processes. Once the tensor/storage is moved
5
+ to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
6
+ to send it to other processes without making any copies.
7
+
8
+ The API is 100% compatible with the original module - it's enough to change
9
+ ``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
10
+ tensors sent through the queues or shared via other mechanisms, moved to shared
11
+ memory.
12
+
13
+ Because of the similarity of APIs we do not document most of this package
14
+ contents, and we recommend referring to very good docs of the original module.
15
+ """
16
+ import multiprocessing
17
+ import sys
18
+
19
+ import torch
20
+ from .reductions import init_reductions
21
+
22
+ __all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
23
+
24
+
25
+ from multiprocessing import * # noqa: F403
26
+
27
+
28
+ __all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
29
+
30
+
31
+ # This call adds a Linux specific prctl(2) wrapper function to this module.
32
+ # See https://github.com/pytorch/pytorch/pull/14391 for more information.
33
+ torch._C._multiprocessing_init()
34
+
35
+
36
+ """Add helper function to spawn N processes and wait for completion of any of
37
+ them. This depends `mp.get_context` which was added in Python 3.4."""
38
+ from .spawn import (
39
+ ProcessContext,
40
+ ProcessExitedException,
41
+ ProcessRaisedException,
42
+ spawn,
43
+ SpawnContext,
44
+ start_processes,
45
+ )
46
+
47
+
48
+ if sys.platform == "darwin" or sys.platform == "win32":
49
+ _sharing_strategy = "file_system"
50
+ _all_sharing_strategies = {"file_system"}
51
+ else:
52
+ _sharing_strategy = "file_descriptor"
53
+ _all_sharing_strategies = {"file_descriptor", "file_system"}
54
+
55
+
56
+ def set_sharing_strategy(new_strategy):
57
+ """Set the strategy for sharing CPU tensors.
58
+
59
+ Args:
60
+ new_strategy (str): Name of the selected strategy. Should be one of
61
+ the values returned by :func:`get_all_sharing_strategies()`.
62
+ """
63
+ global _sharing_strategy
64
+ assert new_strategy in _all_sharing_strategies
65
+ _sharing_strategy = new_strategy
66
+
67
+
68
+ def get_sharing_strategy():
69
+ """Return the current strategy for sharing CPU tensors."""
70
+ return _sharing_strategy
71
+
72
+
73
+ def get_all_sharing_strategies():
74
+ """Return a set of sharing strategies supported on a current system."""
75
+ return _all_sharing_strategies
76
+
77
+
78
+ init_reductions()
llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.25 kB). View file