diff --git a/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c9af2ce13fbc975618239b7d54e1536dd5ac71b7 --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d22d5573f9a1c363406a5caaba27fa962267b596020bb774e5e0a941bd35bdf9 +size 50332828 diff --git a/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4666413ddb6c5f306da4adac5b4fe0768ee68101 --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd6970ddc989fe7653460eb102c1cb92dec1227367988731e8b574254a582c36 +size 50332749 diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__init__.py b/venv/lib/python3.10/site-packages/torch/_functorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d8fe4afdbf9e8405be15570b659a8047092fb42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11b4e6bf79d7dd8510a4e60423f28813c63b55fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e534cf5bf864d1d0659c2a748c1676fb571af66d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa1eaa9a770a8420478792352520706940235877 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa2a50335ed9c97321e72e7e206648255ec4005d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6389cf38a30d5734b33c78af375273717667b628 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdb0ba0f2c5f8b31983becb233084e61b671eb76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0b4b0e7694a76ea3410238d1394b7af778c039e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89216162dd18406a55720ea9b0569fcce4792bfc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a18080611bbd7d7d78de89494a5c70cc4ebe428 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b9266df317ca67778d453dba79b237016b8d7ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd64429c1f0c5bc0e50b63aa90f5cf282a7ca494 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0f310aa10197d37be8f3f9f219f2ae74c229a82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48c55b124dd2b128edfaf782a64ad1fcc6414699 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc15493e201e00d36ed97bfaf14ef015164c712f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2a458ca7baed5cb17bf4192dbd6fab8455dacc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..850607f27f22e47ddc239e7aa81e15415ccf992e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9944d0cd9b9d37535cd82a7698d6c765db24184 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aad5dc2b4914c1f69348e08a8a8ee6b5e1a04b4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41da748396ba9277189f62ede2b7d81cd4ae2477 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76921ec96c7dbeb3270eb68ed325ea25a8bfcff6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..496e3c23f4ebfe9a4c308c23d2643d21f7147579 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7e01bcdb2e892f98be0a8b8e35d5a98738e4a60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1dff1ec45cbb8bdf3823b6183901d8f02768e18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ce640687387208a8581496d73c127a95effacd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f6060ec4c7ea58f3178f8947818bec3f5ba9c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a479db1afd95d59a598aae2ff07a3150c4ee8ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a17396cd307baaf522f20cdceba854c0bfc3b792 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2cfd857936a9025ff82427a006e1d24913222e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0698c8eba79309dbb0ea1d510dba70239b0a85bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..221690798fd3df076f767051c7edc03a214518a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py @@ -0,0 +1,626 @@ +""" +This module is one of the analysis modules - it takes as input a function or graph +and some preexisting properties, and returns some data that is useful for deciding +how to further proceed with compilation or construct runtime wrappers. + +In particular, the analysis here constructs view and mutation metadata from running +a functionalized version of the graph under compilation. +""" + +import collections +import logging +from functools import wraps +from typing import Callable, DefaultDict, Dict, List + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode +from torch._subclasses.meta_utils import safe_is_leaf +from torch.fx.experimental.symbolic_shapes import is_concrete_int +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) +from .functional_utils import ( + are_all_mutations_hidden_from_autograd, + are_all_mutations_under_no_grad_or_inference_mode, + from_fun, + has_data_mutation, + has_metadata_mutation, + has_same_metadata, + to_fun, +) +from .schemas import ( + InputAliasInfo, + MutationType, + OutputAliasInfo, + OutputType, + ViewAndMutationMeta, +) +from .subclass_utils import create_subclass_meta + +from .utils import _get_autocast_states, KNOWN_TYPES, strict_zip + +zip = strict_zip + +log = logging.getLogger(__name__) + + +# This is a version of functionalization that is specifically designed +# for the AOTAutograd use case. +# +# Unlike functorch's variant, this doesn't use the functorch level system, +# instead it directly uses PyTorch's conventional dispatcher to hit the +# functionalization key. In particular, this means that FunctionalTensorWrapper +# can have autograd data stored directly on it. +# +# In typical AOTAutograd usage, the dispatch key order will look like: +# +# Autograd - Functionalization ~~~~> Proxy Mode - Fake Tensor +# outer tensor inner tensor +# +# Returns: +# - ViewAndMutationMeta, telling us metadata about the inputs and outputs, and +# The list of outputs from the forward, but **only** the outputs that we need +# to pass in as tangents into the backward. +# Specifically, aliased outputs from the forward get regenerated, and don't participate +# in the compiled backward function. +def run_functionalized_fw_and_collect_metadata( + f, + *, + keep_input_mutations: bool, + # TODO: refactor to kill this flag + is_train: bool = False, + pre_dispatch: bool = False, +) -> Callable[..., ViewAndMutationMeta]: + memo: Dict[Tensor, Tensor] = {} + + def _to_fun(t): + if isinstance(t, Tensor): + if t in memo: + return memo[t] + r = to_fun(t) + memo[t] = r + return r + else: + return t + + @wraps(f) + def inner(*flat_args): + # This function is meant to be run with the forward, which expects a flat list of tensor/symint/other args. + assert all(isinstance(a, tuple(KNOWN_TYPES)) for a in flat_args) + + input_info: List[InputAliasInfo] = [] + output_info: List[OutputAliasInfo] = [] + + prior_grad_enabled = torch.is_grad_enabled() + prior_autocast_states = _get_autocast_states() + + # See Note [Disabling Functionalize TLS Above Python Functionalization] + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + # It doesn't matter if we run this under predispatch or not because it is + # only for figuring out metadata + mode = FunctionalTensorMode(_allow_token_discovery=True) + with disable_above, mode: + # precondition: The passed in function already handles unflattening inputs + flattening outputs + flat_f_args = pytree.tree_map(_to_fun, flat_args) + flat_f_outs = f(*flat_f_args) + + if prior_autocast_states != _get_autocast_states(): + raise RuntimeError( + "AOTAutograd does not support tracing graphs that mutate the autocast state. " + "Dynamo will only insert autocast context managers (e.g. with torch.autocast(..)) into the graph, " + "which will unwind all of their mutations to autocast state before the graph exits. " + "If you encounter this error while using torch.compile, please file a bug." + ) + + # Inspect the state of the input tensor functional wrapper to detect input mutation info + # If inp[i] has a metadata-only mutation, then maybe_inputs_with_mutated_metadata[i] contains the updated version + for i, (arg, f_arg) in enumerate(zip(flat_args, flat_f_args)): + # NB: Mutation of non-contiguous tensor subclass input can result in a mismatch in + # strides between the functionalized arg inner tensors and non-functionalized arg inner + # tensors. This is a problem as the inner tensor stride change may not be reflected + # correctly in the outer tensor, so disallow this for now. + mutates_data = has_data_mutation(f_arg) + if ( + mutates_data + and not arg.is_contiguous() + and is_traceable_wrapper_subclass(arg) + ): + raise RuntimeError( + "Mutations on non-contiguous inputs are currently not allowed on " + "tensor subclasses" + ) + + if not isinstance(arg, Tensor): + new_arg = arg + else: + new_arg = from_fun(f_arg) + mutates_metadata = has_metadata_mutation( + f_arg, arg, check_only_storage_mutation=False + ) + if mutates_metadata and is_traceable_wrapper_subclass(arg): + raise RuntimeError( + "Metadata mutations are currently not allowed on tensor subclasses" + ) + mutates_storage_metadata = has_metadata_mutation( + f_arg, arg, check_only_storage_mutation=True + ) + mutations_hidden_from_autograd = are_all_mutations_hidden_from_autograd( + f_arg + ) + mutations_under_no_grad_or_inference_mode = ( + mutates_data + and are_all_mutations_under_no_grad_or_inference_mode(f_arg) + ) + + # Here, we're saying that if an input experienced a set call, inp.set_(other), + # then we can effectively not have to worry about whether its data was mutated. + # There are 3 cases: + # (1) We mutate inp *after* the set_() call. other is a graph intermediate. + # In this case, we're not really mutating the input storage of "inp"; + # we're mutating the storage of an intermdiate value (other), + # and slamming that storage into the input tensor. So no data mutation is necessary. + # (2) We mutate inp *after* the set_() call. other is a graph *input*. + # In this case, the data mutation will be properly handled in the runtime + # epilogue during the processing of "other" + # (3) We mutate inp *before* the set_() call. + # This case is *not* currently handled. + # TODO: discuss this in the PR. Both supporting this, and detecting + erroring out, + # seem painful to get working. + if mutates_storage_metadata: + mutates_data = False + + requires_grad = isinstance(f_arg, torch.Tensor) and f_arg.requires_grad + + input_info.append( + InputAliasInfo( + is_leaf=isinstance(arg, Tensor) and safe_is_leaf(arg), + mutates_data=mutates_data, + mutates_metadata=mutates_metadata, + mutations_hidden_from_autograd=mutations_hidden_from_autograd, + mutates_storage_metadata=mutates_storage_metadata, + mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode, + requires_grad=requires_grad, + keep_input_mutations=keep_input_mutations, + ) + ) + + # If a function involves creating a tensor, and returning a view of it, such that its _base is the intermediate, + # We need to make sure our graph returns the _base as a graph output, and we manually recreate the view + # to return to the user. Why? The backend compiler is free to (incorrectly) not set requires_grad + # on the base tensor, but we are obligated to properly set requires-gradness on the real output. + + inp_storage_refs = { + StorageWeakRef(inpt.untyped_storage()): idx + for idx, inpt in enumerate(flat_f_args) + if isinstance(inpt, Tensor) + } + + # We need inp tensor id's to be able to tell if an outputs **are** inputs. + inp_tensor_ids = {id(inpt) for inpt in flat_f_args if isinstance(inpt, Tensor)} + # We need output tensor id's to tell if any output._base` attributes **are** other outputs. + # (This is also a dict because we need to know that output's index, so we can regenerate + # the alias from it). + out_tensor_ids = {id(o): i for i, o in enumerate(flat_f_outs)} + + # Keep track of which outputs alias other outputs + out_tensor_alias_counts: DefaultDict = collections.defaultdict(int) + # This tells us, for a given group of outputs that alias each other, + # whether they e.g. all came from an unbind call + num_aliased_tensors_that_are_multi_output_views: DefaultDict = ( + collections.defaultdict(int) + ) + out_storage_to_tensors: DefaultDict = collections.defaultdict(set) + curr_storage = None + for o in flat_f_outs: + if isinstance(o, torch.Tensor): + curr_storage = StorageWeakRef(o.untyped_storage()) + out_tensor_alias_counts[curr_storage] += 1 + # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + # This is an optimization on top of the "alias of intermediates" logic, + # which you can read more about under Note [AOT Autograd: outputs aliasing inputs or intermediates!] + # + # Before describing the optimization: this is important for AOTAutograd to have good + # perf around, multi-output views. HOWEVER: + # - There is a more generic change to AOTAutograd that we'd like to make, that subsumes this case, + # around using pre-dispatch tracing to partition out a graph so we can faithfully replay all + # views without having to regenerate them at runtime. + # - It's loosely described in this doc (more details will be added soon): + # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit + # - Once that change lands, we should just rip out this "optimization", since: + # (1) It will be fully unnecessary + # (2) Although it is only a few lines of code, it is a bit difficult to reason about + # its correctness with the autograd engine in all cases. + # + # + # What is this optimization? Consider the below case: + # def f(x): + # intermediate = x.mul(2) + # # x and intermediate here require grad + # o1, o2, ... o10 = intermediate.unbind(-1) + # return intermediate, o1, o2, ... o10 + # Now, the "intermediate base" handling in AOTAutograd implies that we must do the following: + # (1) return "intermediate as an extra output of the compiled graph + # (2) regenerate each aliased output off of "intermediate", **outside** of the autograd.Function. + # The reason AOTAutograd ordinarily does this is for safety: the autograd engine needs to know + # that o1 through o10 are all aliased, and if we blindly return o1 through o10 from the autograd.Function, + # this information will be hidden. + # In particular, mutating one alias might require autograd to update autograd metadata on the other aliases + # (like their grad_fn, for example, when the autograd engine needs to do view-replay). + # + # However, intermediate_base logic can be bad for backward performance (we sometimes generate + # as_strided calls during the intermediate base logic, which can have a slow backward formula). + # Is it possible to find a set of conditions where it is **safe** to hide the output aliasing from autograd? + # + # For a set of outputs of the graph that alias each other, o_1...o_k, consider: + # (1) They came from the same multi-output view op, e.g. o_1, ..., o_k = intermediate.unbind(0) + # (2) If there are any other aliases of o_1 through o_k (in the example above, intermediate), + # **at most** 1 can escape from the graph (e.g. there is not some other graph input/output + # o_other, that aliases these outputs) + # (3) o_1...o_k all require_grad, they all share the same ._base, and their ._base requires grad. + # This condition is important because it's what causes slowness in the intermediate_base + # codepath of aot_autograd. Ordinarily, o_1...o_k would all get a grad_fn, and + # aot_autograd's view-replay might give each output an AsStridedBackward as its grad_fn. + # "K" AsStridedBackward calls will be *much* slower than a single UnbindBackward. + # In this setup, is it possible to mutate one of the outputs o_i in a way that would affect the autograd meta + # of the other aliases? + # + # Claim: No! Consider a few example (which I'm pretty sure cover all cases of mutation w.r.t. autograd): + # (a) What happens if we mutate any of o_1 through o_k directly? + # Autograd raises an error: + # "RuntimeError: Output 0 of UnbindBackward0 is a view and is being modified inplace. This view is + # the output of a function that returns multiple views. Such functions do not allow the output + # views to be modified inplace. You should replace the inplace operation by an out-of-place one." + # (b) What if we take a view of o_k and mutate it, o_k.view(o_k.shape).mul_(2)? + # Autograd raises the same error- the "multi-output-view"ness of an alias propagates to future views. + # (c) What if we mutate o_k under no_grad? + # Autograd raises the same error + # (d) What if we detach and mutate, e.g. o_k.detach().mul_(2)? + # Autograd allows this, *but* autograd updates all alias's grad_fn's to be error functions when accessed. + # Autograd raises the same error + # (e) What if we try to mutate another alias of o_1...o_k, that was **not** created from a multi-output view? + # We promised that there is at most **one** such alias, e.g. intermediate in the example above. + # You can mutate intermediate, but in eager mode this will change the grad_fn of o_1...o_k + # to be error fn's. + # Since intermediate was the *only* non-multi-output-alias, there are no other aliases + # of `intermediate` around that were produced by the compiled fn and have a valid grad_fn. + # + # Coming back to this optimization: + # Given that it is not possible for mutating one of these aliases to affect the autograd metadata of another alias + # without causing an error in eager mode, we will simple hide the aliasing from autograd during torch.compile + # if all of the above conditions are met. + # This has the slight downside that it's possible to write some "bad" code that autograd will raise an error on + # in eager but fail to during torch.compile, but it has the benefit that this code has much better performance. + # NOTE: if and when we eventually update AOTAutograd to do the "view graph slicing" defined here: + # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit, + # then this optimization will probably matter less and might be ok to remove. + is_cur_tensor_multi_out_view = isinstance( + o, FunctionalTensor + ) and torch._functionalize_is_multi_output_view( # type: ignore[attr-defined] + o.elem + ) + if is_cur_tensor_multi_out_view: + num_aliased_tensors_that_are_multi_output_views[curr_storage] += 1 + out_storage_to_tensors[curr_storage].add(o) + + # maps the id of an intermediate base to its index in the output of the compiled forward + intermediate_base_tensor_id_to_output_idx: Dict[int, int] = {} + intermediate_bases: List[torch.Tensor] = [] + # Why Do We Care If Storage Changed? + # It's important to understand the implications of storage changes in complex scenarios. Take this example: + # + # def f(x): + # x_storage = x.untyped_storage() + # non_leaf_tensor = torch.ones(4, requires_grad=True).clone() + # + # # Using no_grad() and _unsafe_preserve_version_counter to simulate the .data = operation + # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x): + # x.set_(non_leaf_tensor.untyped_storage()) + # + # out = x.view(-1) + # + # # Restoring x to its original storage, again simulating .data = operation + # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x): + # x.set_(x_storage) + # + # return out + # + # In this scenario, 'x' and 'out' have different shapes and are stored at different memory addresses, aka no aliasing. + # However, due to how set_() and more specificlaly, set is functionalized, is defined to preserve eager semantics, + # the autograd engine mistakenly assumes that 'x' and 'out' are aliased, treating 'x' as 'out._base'. + # This misinterpretation leads to an 'alias_of_input' flag, causing an unnecessary as_strided() call to be generated, + # which could lead to issues later in the code. + for o in flat_f_outs: + functional_tensor_storage_changed = isinstance( + o, FunctionalTensor + ) and torch._functionalize_was_storage_changed( # type: ignore[attr-defined] + o.elem + ) + curr_storage = ( + None + if not isinstance(o, torch.Tensor) + else StorageWeakRef(o.untyped_storage()) + ) + outs_with_identical_metadata_that_require_grad = ( + [] + if not isinstance(o, Tensor) + else [ + curr + for curr in out_storage_to_tensors[curr_storage] + if has_same_metadata(o, curr) + and curr.requires_grad + and o is not curr + ] + ) + + # See Note [Accessing .grad_fn on FunctionalTensor] + # In-place operations on views will trigger a lazy rebase of the autograd graph; + # this runs during access to the .grad_fn. The rebase logic will invoke view ops + # on FunctionalTensors, so we must enable a FunctionalTensorMode here to ensure + # these op calls succeed. + grad_fn = None + if isinstance(o, Tensor): + with FunctionalTensorMode(): + grad_fn = o.grad_fn + + is_result_of_custom_autograd_fn = False + # Need to check for both custom cpp (CppFunction) and python (BackwardCFunction) + # autograd fns + if type(grad_fn).__name__ == "CppFunction": + is_result_of_custom_autograd_fn = True + if isinstance(grad_fn, torch.autograd.function.BackwardCFunction): + is_result_of_custom_autograd_fn = True + + if not isinstance(o, Tensor): + output_type = OutputType.non_alias + base_idx = None + elif ( + curr_storage in inp_storage_refs + and grad_fn is not None + and is_result_of_custom_autograd_fn + ): + output_type = OutputType.custom_function_view + base_idx = None + elif ( + curr_storage in inp_storage_refs + and not functional_tensor_storage_changed + ): + base_idx = inp_storage_refs[curr_storage] + is_input_tensor = id(o) in inp_tensor_ids + num_aliased_outs = out_tensor_alias_counts[curr_storage] + num_multi_output_view_outs = ( + num_aliased_tensors_that_are_multi_output_views[curr_storage] + ) + num_aliased_outs_that_are_not_multi_output_views = ( + num_aliased_outs - num_multi_output_view_outs + ) + if ( + grad_fn is not None + and num_aliased_outs_that_are_not_multi_output_views == 0 + ): + # See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + # In particular, given: + # def f(x): + # return list(x.unbind(0)) + # The main reason we ordinarily try to regenerate these output aliases outside of the + # compiled autograd.Function is because if any of the outputs are later mutated, + # autograd needs to perform view-replay to regenerate them. + # However, autograd does not allow users to mutate multi-output views + # in any way that can change the autograd metadata of other aliases. + # So we hide this aliasing from autograd here. + log.debug( + "Encountered AOTAutograd case: differentiable outputs that \ +alias each other from a multi-output view call" + ) + output_type = OutputType.non_alias + elif is_input_tensor: + output_type = OutputType.is_input + else: + output_type = OutputType.alias_of_input + + # We only need to handle the intermediate base case when both + # the intermediate base and the output require gradients. + # See Note [AOT Autograd: outputs aliasing inputs or intermediates!] + elif o._base is not None and o.requires_grad and o._base.requires_grad: + num_aliased_outs = out_tensor_alias_counts[curr_storage] + num_multi_output_view_outs = ( + num_aliased_tensors_that_are_multi_output_views[curr_storage] + ) + num_aliased_outs_that_are_not_multi_output_views = ( + num_aliased_outs - num_multi_output_view_outs + ) + # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + if ( + out_tensor_alias_counts[curr_storage] == 1 + or num_aliased_outs_that_are_not_multi_output_views <= 1 + ): + # Note [Intermediate Bases Optimization] + # Normally if we have an output that aliases an intermediate, + # we need to add the extra "intermediate base" logic further down + # to prevent autograd from yelling at us if the user later tries to + # mutate that output. + # However, the common case here is if we have an output that aliases an intermediate, + # but doesn't alias any other outputs. + # In that case, autograd shouldn't have to worry about the aliasing at all + # (if that output is mutated, there are no other live aliases for autograd to worry about). + # The "intermediate bases" can hurt inductor perf by forcing more variables to become outputs. + # So as an optimization, we won't do intermediate base handling in this case. + # Instead, we'll hide the aliasing from autograd using aten._unsafe_view(). + if ( + out_tensor_alias_counts[curr_storage] != 1 + and num_aliased_outs_that_are_not_multi_output_views <= 1 + ): + log.debug( + "Encountered AOTAutograd case: differentiable outputs that alias each other \ +from a multi-output view call" + ) + output_type = OutputType.unsafe_view_alias + base_idx = None + else: + # First, check if o's ._base is an existing output + maybe_existing_out_idx = out_tensor_ids.get(id(o._base), None) + if maybe_existing_out_idx is not None: + # Special case where the output is an alias of a graph intermediate, but that intermediate + # is itself also a user output. + output_type = ( + OutputType.alias_of_intermediate_base_is_user_output + ) + base_idx = maybe_existing_out_idx + else: + # Next, check if o's ._base is an intermediate base that we already returned + maybe_existing_base_output_idx = ( + intermediate_base_tensor_id_to_output_idx.get( + id(o._base), None + ) + ) + if maybe_existing_base_output_idx is not None: + output_type = OutputType.alias_of_intermediate + base_idx = maybe_existing_base_output_idx + else: + # Otherwise, take o._base and explicitly return it as an output in the compiled graph + new_out_idx = len(intermediate_bases) + base_idx = new_out_idx + # Indicate to the logic later on (when we trace the joint) + # that this particular output should get it's ._base appended to the forward graph outputs + output_type = ( + OutputType.alias_of_intermediate_save_as_output + ) + intermediate_base_tensor_id_to_output_idx[ + id(o._base) + ] = new_out_idx + intermediate_bases.append(o._base) + elif ( + # See https://github.com/pytorch/pytorch/issues/100348 for this case. + # This protects against the specific case where a user fn returns (output, output.detach()) + out_tensor_alias_counts[curr_storage] > 1 + and len(outs_with_identical_metadata_that_require_grad) > 0 + and not o.requires_grad + ): + assert len(outs_with_identical_metadata_that_require_grad) > 0 + # In theory we could use any of these tensors to regenerate the aliased outputs from, + # since they all alias each other and have identical metatadata + out_alias = outs_with_identical_metadata_that_require_grad[0] + existing_out_idx = out_tensor_ids[id(out_alias)] + output_type = OutputType.alias_of_intermediate_base_is_user_output + base_idx = existing_out_idx + else: + output_type = OutputType.non_alias + base_idx = None + + if isinstance(o, torch.Tensor): + dynamic_dims = { + i for i, s in enumerate(o.shape) if not is_concrete_int(s) + } + else: + dynamic_dims = None + out_info = OutputAliasInfo( + output_type=output_type, + raw_type=type(o), + base_idx=base_idx, + dynamic_dims=dynamic_dims, + requires_grad=isinstance(o, torch.Tensor) and o.requires_grad, + ) + output_info.append(out_info) + + # See Note [AOT Autograd: Views to avoid tangents aliasing inputs] + def view_avoid_dupes_with_primals(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + return transform_subclass( + t, lambda _, inner_t: view_avoid_dupes_with_primals(inner_t) + ) + if isinstance(t, Tensor): + return t.view(t.shape) + return t + + # This analysis function returns *only* the outputs that are meant to be tangents to the backwards. + # Anything that aliases (inputs returned in the fw due to metadata mutations, or outputs that alias inputs/intermediates) + # are *regenerated* later, and not used directly in the autograd graph + f_input_tangents = [ + inp + for inp, info in zip(flat_f_args, input_info) + if info.mutation_type == MutationType.MUTATED_OUT_GRAPH + and info.mutates_data + and info.requires_grad + ] + f_output_tangents = [ + o + for o, info in zip(flat_f_outs, output_info) + if info.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + and issubclass(info.raw_type, torch.Tensor) + and info.requires_grad + ] + # intermediate bases are also included in the backward graph + f_tangents = f_input_tangents + f_output_tangents + intermediate_bases + traced_tangents = pytree.tree_map(from_fun, f_tangents) + traced_tangents = pytree.tree_map( + view_avoid_dupes_with_primals, traced_tangents + ) + user_outs = pytree.tree_map(from_fun, f_output_tangents) + + f_mutated_inputs = [ + inp + for inp, info in zip(flat_f_args, input_info) + if info.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] + f_metadata_mutated_inputs = [ + inp for inp, info in zip(flat_f_args, input_info) if info.mutates_metadata + ] + # This logic (annoyingly) re-figures out exactly what the outputs to the compiled fw graph will be. + # When handling subclasses, we need info about **all** outputs of compiled forward graph, + # so we know precisely which graph outputs to wrap back into tensor subclasses + # Ideally we would refactor this so not have an is_train flag, and have the separate + # inference and training paths decide which inputs/output to ask for subclass info on. + # However, we currently stash indexing information on each SubclassMeta about its order + # in the graph outputs list. + f_fw_graph_outs = list(flat_f_outs) + if is_train or not keep_input_mutations: + f_fw_graph_outs = f_mutated_inputs + f_fw_graph_outs + else: + # even when "keep_input_mutations" is True, + # we never keep metadata-only mutations in the fw graph + f_fw_graph_outs = f_metadata_mutated_inputs + f_fw_graph_outs + if is_train: + f_fw_graph_outs = f_fw_graph_outs + intermediate_bases + fw_graph_outs = pytree.tree_map(from_fun, f_fw_graph_outs) + + grad_enabled_mutation = None + if torch.is_grad_enabled() != prior_grad_enabled: + grad_enabled_mutation = torch.is_grad_enabled() + torch.set_grad_enabled( + prior_grad_enabled + ) # Restore the prior state after tracing it + log.debug( + ( + "grad_mode mutation encountered in graph. " + "Will emit mutation epilogue, to set grad_mode=%s" + ), + grad_enabled_mutation, + ) + + metadata = ViewAndMutationMeta( + input_info=input_info, + output_info=output_info, + num_intermediate_bases=len(intermediate_bases), + keep_input_mutations=keep_input_mutations, + traced_tangents=traced_tangents, + subclass_inp_meta=create_subclass_meta(flat_args), + subclass_fw_graph_out_meta=create_subclass_meta(fw_graph_outs), + subclass_tangent_meta=create_subclass_meta(traced_tangents), + is_train=is_train, + grad_enabled_mutation=grad_enabled_mutation, + tokens=mode._tokens, + ) + return metadata + + return inner diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2ba2e6aee07fd00e2452fa960bb3dd84610268 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py @@ -0,0 +1,192 @@ +""" +This module dispatches the graphs to either the forward-only or joint compilation +pathways, taking into account the AOTConfig and the collected ViewAndMutationMetadata. +""" + +from typing import Any, Callable, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +import torch.utils.dlpack +from torch import Tensor +from torch._dispatch.python import enable_python_dispatcher +from torch._dynamo.utils import lazy_format_graph_code +from torch._logging import getArtifactLogger, trace_structured +from torch._subclasses.functional_tensor import FunctionalTensorMode +from torch.fx.experimental.proxy_tensor import make_fx + +from .functional_utils import ( + assert_functional_graph, + propagate_input_mutation_stacktraces, +) +from .schemas import AOTConfig, SubclassMeta, ViewAndMutationMeta +from .traced_function_transforms import ( + aot_dispatch_subclass, + create_functionalized_fn, + create_joint, + fn_input_mutations_to_outputs, + fn_prepped_for_autograd, +) + +aot_graphs_log = getArtifactLogger(__name__, "aot_graphs") + + +def _create_graph(f, args, *, aot_config: AOTConfig) -> torch.fx.GraphModule: + # FunctionalTensorMode must be enabled here. + # See Note [Accessing .grad_fn on FunctionalTensor] + with enable_python_dispatcher(), FunctionalTensorMode( + pre_dispatch=aot_config.pre_dispatch, export=aot_config.is_export + ): + fx_g = make_fx( + f, + decomposition_table=aot_config.decompositions, + record_module_stack=True, + pre_dispatch=aot_config.pre_dispatch, + )(*args) + + return fx_g + + +def aot_dispatch_base_graph( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]: + # aot_dispatch_base requires functionalization, but doesn't need to handle as many cases as the autograd case. + # The cases that aot_dispatch_base doesn't need to handle include: + # - outputs that are aliases of graph intermediates + # - outputs that are aliases of graph inputs + # While cases that it does need to handle include: + # - input mutations (including when inputs are aliases of each other) + # - input metadata mutations + fn_to_trace = fn_input_mutations_to_outputs( + flat_fn, + fw_metadata, + keep_data_input_mutations=aot_config.keep_inference_input_mutations, + ) + + fn_to_trace, updated_flat_args = create_functionalized_fn( + fn_to_trace, + flat_args, + meta=fw_metadata, + aot_config=aot_config, + trace_joint=False, + ) + + ( + fn_to_trace, + updated_flat_args_subclasses_desugared, + maybe_subclass_meta, + ) = aot_dispatch_subclass( + fn_to_trace, + updated_flat_args, + is_joint_structure=False, + meta=fw_metadata, + fw_only=flat_fn, + ) + + fw_module = _create_graph( + fn_to_trace, + updated_flat_args_subclasses_desugared, + aot_config=aot_config, + ) + + # As long as we opted to remove input mutations, then + # there should be *NO* mutating ops in the graph at this point. + copy_count = assert_functional_graph(fw_module.graph) + + fw_module.graph.eliminate_dead_code() + fw_module.recompile() + + copy_count2 = assert_functional_graph(fw_module.graph) + propagate_input_mutation_stacktraces(fw_module.graph) + + assert copy_count == copy_count2 + + if aot_config.enable_log: + aot_graphs_log.info( + "%s", lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id) + ) + trace_structured( + "aot_forward_graph", + payload_fn=lambda: fw_module.print_readable(print_output=False), + ) + + # TODO: should factor this into a separate function for export that always only returns just the graph. + if aot_config.is_export: + assert ( + maybe_subclass_meta is None + ), "aot_export_module does not support tensor subclass inputs for now." + return fw_module + return fw_module, list(updated_flat_args_subclasses_desugared), maybe_subclass_meta + + +# Has the precondition that there +# are no duplicate arguments in flat_args (e.g., the same Tensor +# object never shows up twice. However, two tensor inputs MAY alias +# the same storage, so long as they have separate TensorImpls.) +def aot_dispatch_autograd_graph( + flat_fn, + flat_args: List[Any], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]: + # traced_tangents corresponds to the set of outputs in the traced forward that should get grad_outputs in the traced backward. + # It includes outputs of the original forward, *and* any updated inputs due to input mutations. + # However, it does *not* include any outputs that are aliases of inputs or intermediates, or any metadata-only input mutations. + traced_tangents = pytree.tree_map( + lambda x: x.detach().contiguous() if isinstance(x, Tensor) else x, + fw_metadata.traced_tangents, + ) + + joint_inputs = (flat_args, traced_tangents) + + fn_prepared_for_autograd = fn_prepped_for_autograd( + flat_fn, + fw_metadata, + ) + joint_fn_to_trace = create_joint(fn_prepared_for_autograd, aot_config=aot_config) + + joint_fn_to_trace, updated_joint_inputs = create_functionalized_fn( + joint_fn_to_trace, + joint_inputs, + meta=fw_metadata, + aot_config=aot_config, + trace_joint=True, + ) + + subclass_tracing_info = aot_dispatch_subclass( + joint_fn_to_trace, + updated_joint_inputs, + is_joint_structure=True, + meta=fw_metadata, + fw_only=flat_fn, + ) + + joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn + updated_joint_inputs = subclass_tracing_info.plain_tensor_args + maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta + + fx_g = _create_graph(joint_fn_to_trace, updated_joint_inputs, aot_config=aot_config) + + # There should be *NO* mutating ops in the graph at this point. + assert_functional_graph(fx_g.graph) + + # Redundant with the check above, but worth having in case tracing introduced + # a fake tensor. Unlikely. + # See Note: [Fake Modules and AOTAutograd] + torch._dynamo.utils.assert_no_fake_params_or_buffers(fx_g) + fx_g.graph.eliminate_dead_code() + fx_g.recompile() + # TODO: in AOTAutograd, we create metadata like _indices_of_inps_to_detach to detect + # when we need to manually detach() some inputs in the forward. + # Higher order ops might eventually need to do the same. + if aot_config.is_export: + assert ( + maybe_subclass_meta is None + ), "aot_export_module does not support tensor subclass inputs for now." + return fx_g + return fx_g, updated_joint_inputs, maybe_subclass_meta diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ddeaa0bae7c51b4e794e8f42ef31e557ed911953 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py @@ -0,0 +1,370 @@ +""" +This file contains utilities related to functionalization in AOTAutograd: +1. converting to/from functional tensors +2. detecting Tensor mutations - both metadata and Tensor value +3. regenerating/replaying views from their base +4. checking if a graph is functional i.e. whether it contains any mutation ops +""" + +import torch +from torch import Tensor +from torch._subclasses.fake_tensor import FakeTensor +from torch._subclasses.functional_tensor import FunctionalTensor +from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) + + +def to_fun(t): + if isinstance(t, Tensor): + if is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + out = transform_subclass(t, lambda _, inner_t: to_fun(inner_t)) + torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined] + return out + else: + return FunctionalTensor.to_functional(t) + else: + return t + + +def sync_functional_tensor(t): + if is_traceable_wrapper_subclass(t): + attrs, ctx = t.__tensor_flatten__() # type: ignore[attr-defined] + for attr in attrs: + sync_functional_tensor(getattr(t, attr)) + else: + torch._sync(t) + + +# When subclasses are involved, t here will usually look something like: +# SubclassA(SubclassB(FunctionalTensor(_to_fun_tensor(FakeTensor)))) +def from_fun(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + out = transform_subclass(t, lambda _, inner_t: from_fun(inner_t)) + torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined] + return out + + if not isinstance(t, FunctionalTensor): + # quick sanity assert + if isinstance(t, torch.Tensor): + assert not torch._is_functional_tensor(t) # type: ignore[attr-defined] + return t + sync_functional_tensor(t) + return torch._from_functional_tensor(t.elem) + + +def is_fun(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + t_attrs, _ = t.__tensor_flatten__() # type: ignore[attr-defined] + t_inners = [getattr(t, attr) for attr in t_attrs] + any_fun = any(is_fun(x) for x in t_inners) + all_fun = all(is_fun(x) for x in t_inners) + assert any_fun == all_fun + return any_fun + + return isinstance(t, FunctionalTensor) + + +# t here is either +# (1) A FunctionalTensor(_to_functional_tensor(FakeTensor)) +# (2) A traceable tensor subclass that holds a FunctionalTensor +# (3) Not a tensor +def has_data_mutation(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + # A tensor subclass was updated if any of its inner elements were updated + return any(has_data_mutation(getattr(t, attr)) for attr in attrs) + else: + if isinstance(t, torch.Tensor): + assert isinstance(t, FunctionalTensor) + return torch._functionalize_has_data_mutation(t.elem) # type: ignore[attr-defined] + return False + + +def are_all_mutations_hidden_from_autograd(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + # If all inner elements are mutations hidden from autograd, then it is a mutation hidden from autograd. + return all( + are_all_mutations_hidden_from_autograd(getattr(t, attr)) for attr in attrs + ) + elif isinstance(t, torch.Tensor): + assert isinstance(t, FunctionalTensor) + return torch._functionalize_are_all_mutations_hidden_from_autograd(t.elem) + else: + return False + + +def are_all_mutations_under_no_grad_or_inference_mode(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + return all( + are_all_mutations_under_no_grad_or_inference_mode(getattr(t, attr)) + for attr in attrs + ) + else: + assert isinstance(t, FunctionalTensor) + return torch._functionalize_are_all_mutations_under_no_grad_or_inference_mode( + t.elem + ) + + +# f_arg here is either +# (1) A FunctionalTensor(_to_functional_tensor(FakeTensor)) +# (2) A traceable tensor subclass that holds a FunctionalTensor +# (3) Not a tensor +# Assumption: arg promises to be the "original" tensor wrapped by f_arg +# Note: "storage mutations" coming from set_() are a type of metadata mutation. So: +# - check_only_storage_mutation=True: only return true if there was a storage mutation +# - check_only_storage_mutation=Flse: return true if there was any metadata mutation (including a storage mutation) +def has_metadata_mutation(f_arg, arg, *, check_only_storage_mutation: bool): + if is_traceable_wrapper_subclass(f_arg): + attrs, _ = f_arg.__tensor_flatten__() + # A tensor subclass was updated if any of its inner elements were updated + f_inner_ts = [getattr(f_arg, attr) for attr in attrs] + inner_ts = [getattr(arg, attr) for attr in attrs] + return any( + has_metadata_mutation( + f_inner_t, + inner_t, + check_only_storage_mutation=check_only_storage_mutation, + ) + for f_inner_t, inner_t in zip(f_inner_ts, inner_ts) + ) + else: + if not isinstance(f_arg, torch.Tensor): + assert not isinstance(arg, torch.Tensor) + return False + assert isinstance(f_arg, FunctionalTensor) + assert isinstance(arg, FakeTensor) + + arg_after = torch._from_functional_tensor(f_arg.elem) + # This is true if the current tensor experienced at least one set_() call + maybe_storage_changed = torch._functionalize_was_storage_changed(f_arg.elem) # type: ignore[attr-defined] + # However, multiple set_() calls can cancel out. So we also check whether the + # storage of the tensor has changed. + # Note: if an input experienced two set_() calls that cancel out, **and** + # it experiences an data mutation, we pessimistically think that the set_() + # call is necessary here. We could in theory fix this, but this will + # hopefully never happen in user code, and is not needed for fsdp. + same_storages = StorageWeakRef(arg.untyped_storage()) == StorageWeakRef( + arg_after.untyped_storage() + ) + has_storage_metadata_mutation = maybe_storage_changed and not same_storages + if check_only_storage_mutation: + return has_storage_metadata_mutation + + # storage metadata mutation is a type of metadata mutation, so return true if we saw one + if has_storage_metadata_mutation: + return True + + maybe_metadata_mutated = torch._functionalize_has_metadata_mutation(f_arg.elem) # type: ignore[attr-defined] + # This is true if the current tensor experienced at least one metadata mutation. + # So if false, we know there was no metadata mutation + if not maybe_metadata_mutated: + return False + + # However, multi metadata mutations can cancel out. + # So we also check if the concrete sizes/strides on the tensor have changed. + same_sizes = arg.shape == arg_after.shape + same_strides = arg.stride() == arg_after.stride() + same_offsets = arg.storage_offset() == arg_after.storage_offset() + has_metadata_mutation_ = maybe_metadata_mutated and not ( + same_sizes and same_strides and same_offsets + ) + # We consider a tensor to have been metadata mutated if its storage was mutated through a set_() call. + return has_metadata_mutation_ + + +def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires_grad): + # Try to do view-replay if possible. + # fall back to .as_strided() if we can't. + if target_meta_tensor._base is not None: + # The base that we want to replay our view off of might have a different shape than the view's original base. + b = target_meta_tensor._base + abt = aliased_base_tensor + # Don't unnecessarily call as_strided if nothing changed; as_strided's + # backward is poorly implemented and slow + if abt is not b and ( + abt.size() != b.size() + or abt.stride() != b.stride() + or abt.storage_offset() != b.storage_offset() + ): + reshaped_base_tensor = aliased_base_tensor.as_strided( + b.size(), b.stride(), b.storage_offset() + ) + else: + reshaped_base_tensor = aliased_base_tensor + out = target_meta_tensor._view_func(reshaped_base_tensor) + # This shape mismatch can happen due to a bug in inplace/view handling in autograd. + # Try putting a breakpoint here and running + # `test/functorch/test_aotdispatch TestAOTAutograd.test_output_all_alias_types` + # Also, https://github.com/pytorch/pytorch/issues/49825 + # + # As a stopgap, we'll fall back to as_strided. + if out is not None and out.shape == target_meta_tensor.shape: + if aliased_base_tensor.requires_grad and not target_requires_grad: + out = out.detach() + elif not aliased_base_tensor.requires_grad and target_requires_grad: + out.requires_grad_(True) + return out + size = target_meta_tensor.size() + stride = target_meta_tensor.stride() + storage_offset = target_meta_tensor.storage_offset() + if aliased_base_tensor.is_complex() and not target_meta_tensor.is_complex(): + aliased_out = torch.view_as_real(aliased_base_tensor).as_strided( + size, stride, storage_offset + ) + elif not aliased_base_tensor.is_complex() and target_meta_tensor.is_complex(): + aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided( + size, stride, storage_offset + ) + else: + aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset) + # For outputs aliasing inputs, we need to check if the requires-gradness has changed. + if aliased_base_tensor.requires_grad and not target_requires_grad: + aliased_out = aliased_out.detach() + elif not aliased_base_tensor.requires_grad and target_requires_grad: + aliased_out.requires_grad_(True) + # For outputs aliasing inputs, we need to check if the dtype has changed. + # as_strided() is the "most generic" view, but it does not cover cross-dtype views + if aliased_out.dtype != target_meta_tensor.dtype: + aliased_out = aliased_out.view(target_meta_tensor.dtype) + return aliased_out + + +def has_same_metadata(t1, t2): + return ( + definitely_true(sym_eq(t1.size(), t2.size())) + and definitely_true(sym_eq(t1.stride(), t2.stride())) + and definitely_true(t1.storage_offset() == t2.storage_offset()) + and t1.is_conj() == t2.is_conj() + and t1.is_neg() == t2.is_neg() + ) + + +# new_arg and arg here are either: +# (1) both a FakeTensor +# (2) both a traceable tensor subclass that holds a FakeTensor +# Pre-condition: the two args are the "old" and "new" inputs from running functionalization. +# When we run functionalization and wrap our inputs into FunctionalTensors, +# we can detect whether or not an input was mutated by checking to see if the inner tensor has changed +# +# Normally it would be enough just to check if arg is new_arg, which is normally enough for functionalization +# to confirm that inputs were not mutated when running the user's model with functionalization on. +# But when we have subclass inputs, we can't rely on that: +# `from_fun(to_fun(x)) is x` will return False, because the call to `from_fun` constructs +# a brand new subclass instance: we are calling __tensor_unflatten__, and going +# from Subclass(FakeTensor) to Subclass(FunctionalTensor(FakeTensor)) +def was_tensor_updated(arg, new_arg): + if is_traceable_wrapper_subclass(arg): + assert is_traceable_wrapper_subclass(new_arg) + attrs, _ = arg.__tensor_flatten__() + new_attrs, _ = new_arg.__tensor_flatten__() + assert attrs == new_attrs + # A tensor subclass was updated if any of its inner elements were updated + return any( + was_tensor_updated(getattr(arg, attr), getattr(new_arg, attr)) + for attr in attrs + ) + else: + return arg is not new_arg + + +# new_arg and arg here are either: +# (1) both a FakeTensor +# (2) both a traceable tensor subclass that holds a FakeTensor +# Pre-condition: the two args are the "old" and "new" inputs from running functionalization. +# When we run functionalization and wrap our inputs into FunctionalTensors, +# we can detect whether or not an input was mutated by checking to see if the inner tensor has changed, +# but shares storage with the old input +def was_tensor_metadata_updated(arg, new_arg): + if is_traceable_wrapper_subclass(arg): + assert is_traceable_wrapper_subclass(new_arg) + attrs, _ = arg.__tensor_flatten__() + new_attrs, _ = new_arg.__tensor_flatten__() + assert attrs == new_attrs + # A tensor subclass was updated if any of its inner elements were updated + return any( + was_tensor_metadata_updated(getattr(arg, attr), getattr(new_arg, attr)) + for attr in attrs + ) + else: + return arg is not new_arg and StorageWeakRef( + arg.untyped_storage() + ) == StorageWeakRef(new_arg.untyped_storage()) + + +# Returns the number of detected copy_ +def assert_functional_graph(fx_g: torch.fx.Graph) -> int: + placeholders = set() + copy_count = 0 + # NB: It would also be nice to verify that the mutations all happen at the + # end, but we also do some administrative views after mutations so this + # isn't actually true. (TODO: Could this cause problems for Inductor?) + for n in fx_g.nodes: + if n.op == "placeholder": + placeholders.add(n) + if isinstance(n.target, torch._ops.OpOverload): + if n.target is torch.ops.aten.copy_.default: + suffix = True + # Can only copy_ into an input, and can only do so once + assert n.args[0] in placeholders + placeholders.remove(n.args[0]) + copy_count += 1 + else: + assert ( + not n.target._schema.is_mutable + ), f"aot_autograd expected to have an entirely functional graph, but found {n.format_node()}" + return copy_count + + +def propagate_input_mutation_stacktraces(fx_g: torch.fx.Graph) -> None: + placeholders = set() + for n in fx_g.nodes: + if n.op == "placeholder": + placeholders.add(n) + if isinstance(n.target, torch._ops.OpOverload): + if n.target is torch.ops.aten.copy_.default: + # Can only copy_ into an input, and can only do so once + assert n.args[0] in placeholders + placeholders.remove(n.args[0]) + copy_from_node = n.args[1] + # Pre-condition: every node has a "stack_trace" field in its meta, + # but copy_() nodes do not (since we manually added them during functionalization). + # Instead, we manually propagate here. + if "stack_trace" in copy_from_node.meta: + assert "stack_trace" not in n.meta, str(n) + n.meta["stack_trace"] = copy_from_node.meta["stack_trace"] + + +def _check_if_mutation_can_be_in_graph( + keep_input_mutations: bool, + mutates_data, + mutates_metadata, + mutations_hidden_from_autograd, + mutations_under_no_grad_or_inference_mode, + requires_grad, +): + if keep_input_mutations: + return mutates_data and ( + (not mutates_metadata and not requires_grad) + or mutations_hidden_from_autograd + or mutations_under_no_grad_or_inference_mode + ) + return False diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..7645c8c9f254bb01234ecbbffc9511e62e182edd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py @@ -0,0 +1,432 @@ +""" +This module is one of the analysis modules - it takes as input a function or graph +and some preexisting properties, and returns some data that is useful for deciding +how to further proceed with compilation or construct runtime wrappers. + +In particular, the following analyses are provided: +1. Refine the view and mutation metadata collected previously - removing duplicate + inputs or mapping views to their bases. +2. We also analyze the function signature for export graphs. +""" + +import itertools +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._subclasses.functional_tensor import FunctionalTensor +from torch.fx.experimental.symbolic_shapes import is_concrete_int +from .schemas import ( + BackwardSignature, + GraphSignature, + InputAliasInfo, + OutputAliasInfo, + OutputType, + ViewAndMutationMeta, +) +from .utils import strict_zip + +zip = strict_zip + + +def remove_dupe_metadata( + m: ViewAndMutationMeta, + keep_arg_mask: List[bool], + add_dupe_map: List[int], +) -> ViewAndMutationMeta: + assert len(m.input_info) == len(keep_arg_mask) + # Easy invariant: the first argument should never be a dupe (it will be kept) + assert len(keep_arg_mask) > 0 and keep_arg_mask[0] + + # Filter dupe'd mutated inputs out of traced_tangents + num_data_mutations = len([x for x in m.input_info if x.mutates_data]) + other_traced_tangents = m.traced_tangents[num_data_mutations:] + inp_traced_tangents = m.traced_tangents[:num_data_mutations] + filtered_inp_traced_tangents = [ + x + for i, x in enumerate(inp_traced_tangents) + if keep_arg_mask[m.mutated_inp_runtime_indices[i]] + ] + traced_tangents = filtered_inp_traced_tangents + other_traced_tangents + + return ViewAndMutationMeta( + input_info=[x for i, x in enumerate(m.input_info) if keep_arg_mask[i]], + # For outputs that are views of inputs, we store the index of the input that the output + # was generated from. Need to update that index to account for removed dupes. + output_info=[ + OutputAliasInfo( + output_type=o.output_type, + raw_type=o.raw_type, + dynamic_dims=o.dynamic_dims, + base_idx=None if o.base_idx is None else add_dupe_map[o.base_idx], + requires_grad=o.requires_grad, + ) + for o in m.output_info + ], + num_intermediate_bases=m.num_intermediate_bases, + keep_input_mutations=m.keep_input_mutations, + traced_tangents=traced_tangents, + # We are guaranteed not to get here, since dupes are not supported today with subclass inputs. + subclass_inp_meta=[], + subclass_fw_graph_out_meta=[], + subclass_tangent_meta=[], + is_train=m.is_train, + ) + + +# Given our ViewAndMutation metadata, this fn constructs a new set of metadata, +# after adding synthetic base arguments to the function. +# Most of the work in this fn is slogging through all of the metadata corresponding to inputs, +# and updating it with our synthetic base calling convention. +# +# When config.debug_assert is set, we automatically regenerate the metadata +# and compare it to this output for sanity. +# +# In addition to the updated metadata, also return the list of input indices +# that will need to be updated in the synthetic base epilogue + + +# Given our ViewAndMutation metadata, this fn constructs a new set of metadata, +# after adding synthetic base arguments to the function. +# Most of the work in this fn is slogging through all of the metadata corresponding to inputs, +# and updating it with our synthetic base calling convention. +# +# When config.debug_assert is set, we automatically regenerate the metadata +# and compare it to this output for sanity. +# +# In addition to the updated metadata, also return the list of input indices +# that will need to be updated in the synthetic base epilogue +def create_synthetic_base_metadata( + m: ViewAndMutationMeta, + # Maps each outer argument idx to its inner idx (or, if this outer arg is generated from a + # synthetic base, you get a tuple of (i, TensorMeta), telling you the base tensor idx, and view metadata) + synthetic_base_info: List[Union[int, Tuple[int, torch.Tensor]]], + outer_args: List[Any], + inner_args: List[Any], +) -> Tuple[ViewAndMutationMeta, List[int]]: + # maps inner arg indices to outer arg indices + synthetic_base_to_indices: Dict[int, List[int]] = {} + for inner_idx in range(len(inner_args)): + outer_aliased_indices_of_current_base_arg = [ + outer_idx + for outer_idx, inner_idx_or_tuple in enumerate(synthetic_base_info) + if (isinstance(inner_idx_or_tuple, int) and inner_idx_or_tuple == inner_idx) + or ( + isinstance(inner_idx_or_tuple, tuple) + and inner_idx_or_tuple[0] == inner_idx + ) + ] + synthetic_base_to_indices[inner_idx] = outer_aliased_indices_of_current_base_arg + + # given the requires_grad info on mutated inputs, + # generate the requires_grad info on those same mutated inputs, but after constructing synthetic bases. + input_infos = [] + for outer_indices in synthetic_base_to_indices.values(): + # leaf-ness should be all-or-nothing for aliased tensor. + # (aka if "a" and "b" are views, then a.is_leaf == b.is_leaf) + any_leaf = any(m.input_info[x].is_leaf for x in outer_indices) + all_leaf = all(m.input_info[x].is_leaf for x in outer_indices) + assert any_leaf == all_leaf + + mutates_data = ( + True + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_data + ) + mutates_metadata = ( + False + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_metadata + ) + requires_grad = any(m.input_info[x].requires_grad for x in outer_indices) + mutations_hidden_from_autograd = all( + m.input_info[x].mutations_hidden_from_autograd for x in outer_indices + ) + mutations_under_no_grad_or_inference_mode = all( + m.input_info[x].mutations_under_no_grad_or_inference_mode + for x in outer_indices + ) + + inpt_info = InputAliasInfo( + # If len(outer_indices) > 1, then this input is a synthetic base. + # The invariant is that to the rest of aot autograd, synthetic bases only show up if + # one of their aliases gets a data mutation. And if any of their aliases get metadata + # mutations, they will be hidden from the rest of aot autograd. + mutates_data=mutates_data, + mutates_metadata=mutates_metadata, + mutations_hidden_from_autograd=all( + m.input_info[x].mutations_hidden_from_autograd for x in outer_indices + ), + mutates_storage_metadata=False + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_storage_metadata, + mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode, + is_leaf=any_leaf, + requires_grad=requires_grad, + keep_input_mutations=m.keep_input_mutations, + ) + input_infos.append(inpt_info) + + # Find any inputs that fulfill the following criteria: + # (1) They are part of a synthetic base (because they alias another input, + # and at least one input experiences a data mutation) + # (2) They experience a metadata mutation + outer_aliased_arg_idx_with_metadata_mutations = [ + outer_idx + for outer_idx, inpt_info in enumerate(m.input_info) + if inpt_info.mutates_metadata + and not isinstance(synthetic_base_info[outer_idx], int) + ] + + # grab the original requires grad info on the outputs, except the ones from the mutated inputs + input_metadata_output_info = [ + OutputAliasInfo( + output_type=OutputType.alias_of_input, + raw_type=FunctionalTensor, + dynamic_dims={ + i + for i, s in enumerate(outer_args[outer_idx].shape) + if not is_concrete_int(s) + }, + base_idx=synthetic_base_info[outer_idx][0], # type: ignore[index] + requires_grad=outer_args[outer_idx].requires_grad, + ) + for outer_idx in outer_aliased_arg_idx_with_metadata_mutations + ] + existing_output_infos = [] + for o in m.output_info: + new_base_idx = ( + None + if o.base_idx is None + else ( + synthetic_base_info[o.base_idx] + if isinstance(synthetic_base_info[o.base_idx], int) + else synthetic_base_info[o.base_idx][0] # type: ignore[index] + ) + ) + # If base_idx is changed for OutputType.is_input, we need to update the output type to reflect the change + new_output_type = ( + OutputType.alias_of_input + if o.output_type == OutputType.is_input and o.base_idx != new_base_idx + else o.output_type + ) + existing_output_infos.append( + OutputAliasInfo( + output_type=new_output_type, + raw_type=o.raw_type, + dynamic_dims=o.dynamic_dims, + # Map the input idx pre-synthetic-bases to the new idx post-synthetic-bases + base_idx=new_base_idx, # type: ignore[arg-type] + requires_grad=o.requires_grad, + ) + ) + + inner_mutated_tangents = [ + x + for inner_idx, x in enumerate(inner_args) + if input_infos[inner_idx].mutates_data and input_infos[inner_idx].requires_grad + ] + + output_info = existing_output_infos + input_metadata_output_info + # Regenerate traced tangents to include mutated inputs including synthetic bases + traced_tangents = ( + inner_mutated_tangents + m.traced_tangents[len(inner_mutated_tangents) :] + ) + + return ( + ViewAndMutationMeta( + input_info=input_infos, + output_info=output_info, + num_intermediate_bases=m.num_intermediate_bases, + keep_input_mutations=m.keep_input_mutations, + traced_tangents=traced_tangents, + # We are guaranteed not to get here, since synthetic_base codepaths are not supported today with subclass inputs. + subclass_inp_meta=[], + subclass_fw_graph_out_meta=[], + subclass_tangent_meta=[], + is_train=m.is_train, + ), + outer_aliased_arg_idx_with_metadata_mutations, + ) + + +def _get_last_mem_address(x): + out = x.storage_offset() + for size, stride in zip(x.size(), x.stride()): + out += (size - 1) * stride + return out + + +# Assumption: x and y are known to share a storage, and we are trying to determine +# if their memory is actually completely disjoint, based on sizes/strides/storage_offset +def _tensors_definitely_do_not_overlap(x, y): + if x is y: + return False + if x.numel() == 0 or y.numel() == 0: + return True + + # Make x always on the left + if x.storage_offset() > y.storage_offset(): + x, y = y, x + # Short-circuit in the "obvious" overlapping case: both tensors are contiguous + if x.is_contiguous() and y.is_contiguous(): + if x.storage_offset() + x.numel() > y.storage_offset(): + # definitely overlap + return False + else: + # definitely no overlap + return True + + # Short-circuit: if last memory address of x is < start of y, then not overlapping. + x_last = _get_last_mem_address(x) + if x_last < y.storage_offset(): + return True + + if x.dim() == 2 and y.dim() == 2 and x.stride(1) == 1 and y.stride(1) == 1: + # This cases is needed for the shampoo optimizer. + # All tensors are 2d (non-contiguous), have the same outer stride, and have an inner stride of 1 + # (so rows are contiguous) + if x.stride(0) == y.stride(0): + offset_delta = y.storage_offset() - x.storage_offset() + if offset_delta < x.size(1): + # definitely overlaps (row 0 of y overlaps with row 0 of x) + # Example: + # base = torch.arange(32).reshape(4, 8) + # x = base.narrow(1, 0, 4) + # x: size=(4, 4), stride=(8, 1), offset=0 + # y = base.narrow(1, 3, 4) + # y: size=(4, 4), stride=(8, 1), offset=3 + return False + x_total_elems_covered = x.stride(0) * (x.size(0) - 1) + x.size(1) + if x_total_elems_covered <= offset_delta: + # definitely does not overlap (last byte of x is before start of y) + # Example: + # x: size=(4, 4), stride=(8, 1), offset=0 (last byte is 27) + # y: size=(4, 4), stride=(8, 1), offset=28 (start byte is 28) + return True + # At this point, we want to check if the 0th row of y + # overlaps with **some** row of x. + # We can check this by shifting y backward by the shared stride, repeatedly, + # until the first row of y is before the first row of x. + # Then we can check if these rows overlap. + # We can accomplish this by modding our offset by the stride. + offset_delta_mod = offset_delta % x.stride(0) + # Example: + # 0 1 2 3 + # 9 10 11 12 + # 18 19 20 21 + # 27 28 29 30 + # x: size=(4, 4), stride=(9, 1), offset=0 + # y: size=(4, 4), stride=(9, 1), offset=22 (this would not overlap) + # y: size=(4, 4), stride=(9, 1), offset=23 (this would not overlap) + # y: size=(4, 4), stride=(9, 1), offset=24 (this would overlap) + # y: size=(4, 4), stride=(9, 1), offset=25 (this would overlap) + # If the interval [modded_offset, modded_offset + x_size] falls entirely + # without + if offset_delta_mod + y.size(1) <= x.stride(0): + return True + else: + return False + return False + + +def compute_overlapping_inputs(fwd_inputs, aliased_input_indices): + actual_aliased_indices = set() + for j in range(len(aliased_input_indices)): + for i in range(j): + i_ = aliased_input_indices[i] + j_ = aliased_input_indices[j] + if not _tensors_definitely_do_not_overlap(fwd_inputs[i_], fwd_inputs[j_]): + actual_aliased_indices.add(i_) + actual_aliased_indices.add(j_) + return actual_aliased_indices + + +def _graph_input_names(gm): + return [node.name for node in gm.graph.nodes if node.op == "placeholder"] + + +def _graph_output_names(gm): + output_node = next(iter(reversed(gm.graph.nodes))) + assert output_node.op == "output" and len(output_node.args) == 1 + return_args = output_node.args[0] + return [getattr(return_arg, "name", None) for return_arg in return_args] + + +def create_graph_signature( + fx_g: torch.fx.GraphModule, + fw_metadata: ViewAndMutationMeta, + in_spec: pytree.TreeSpec, + out_spec: pytree.TreeSpec, + *, + user_args_flat: List[Tensor], + params_and_buffers_flat: List[Tensor], + param_names: List[str], + buffer_names: List[str], + trace_joint: bool, + num_user_fw_outs: Optional[int], + loss_index: Optional[int], +) -> GraphSignature: + # Retrieve graph input names + graph_input_names = _graph_input_names(fx_g) + # Retrieve graph output names + graph_output_names = _graph_output_names(fx_g) + + num_params_buffers = len(param_names) + len(buffer_names) + num_tokens = len(fw_metadata.tokens) + # We have enough restrictions on the graph (no de-duping, synthetic bases, etc), + # Such that # graph inps = # user inps + # params + # buffers + num_user_args = len(graph_input_names) - num_params_buffers - num_tokens + + if trace_joint: + assert num_user_fw_outs is not None + num_fw_outs = num_user_fw_outs + fw_metadata.num_mutated_inp_runtime_indices + backward_output_names = graph_output_names[num_fw_outs:] + + grad_index = itertools.count(0) + gradients_to_parameters = { + backward_output_names[next(grad_index)]: param_names[i] + for i, param in enumerate(params_and_buffers_flat) + if param.requires_grad + } + + gradients_to_user_inputs = { + backward_output_names[next(grad_index)]: graph_input_names[ + i + len(params_and_buffers_flat) + ] + for i, user_input in enumerate(user_args_flat) + if user_input.requires_grad + } + + assert len(gradients_to_parameters) + len(gradients_to_user_inputs) == len( + backward_output_names + ) + + # Check that we have fully accounted for all graph outputs + backward_signature = BackwardSignature( + gradients_to_parameters, + gradients_to_user_inputs, + graph_output_names[loss_index], + ) + else: + backward_signature = None + num_user_fw_outs = ( + len(graph_output_names) + - fw_metadata.num_mutated_inp_runtime_indices + - num_tokens + ) + + return GraphSignature.from_tracing_metadata( + in_spec=in_spec, + out_spec=out_spec, + graph_input_names=graph_input_names, + graph_output_names=graph_output_names, + view_mutation_metadata=fw_metadata, + named_parameters=param_names, + named_buffers=buffer_names, + num_user_inputs=num_user_args, + num_user_outputs=num_user_fw_outs, + loss_index=loss_index, + backward_signature=backward_signature, + ) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c995c7a12b55dedad1cedeeb5e4cc817ae65af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py @@ -0,0 +1,936 @@ +""" +These are the runtime wrappers that are associated with JIT-compiling. + +This includes the forward-only and joint JIT runtime wrappers. + +This module depends heavily on the runtime wrapper building blocks defined +in `runtime_wrappers`. +""" + +import logging +from contextlib import nullcontext +from functools import wraps +from typing import Any, List, Optional + +import torch +import torch.utils.dlpack +from torch import Tensor +from torch._dynamo.utils import lazy_format_graph_code +from torch._guards import detect_fake_mode, tracing, TracingContext +from torch._logging import getArtifactLogger, trace_structured +from torch._prims_common import CUDARngStateHelper +from torch._subclasses import FakeTensor +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import is_sym_node +from torch.fx.experimental.symbolic_shapes import fx_placeholder_vals +from .. import config +from .dispatch_and_compile_graph import ( + aot_dispatch_autograd_graph, + aot_dispatch_base_graph, +) +from .logging_utils import describe_input, format_guard_bug_msg, track_graph_compiling + +from .runtime_wrappers import ( + aot_dispatch_subclass_wrapper, + create_runtime_wrapper, + functionalized_rng_runtime_epilogue, +) +from .schemas import ( + AOTConfig, + MutationType, + OutputType, + SubclassMeta, + TensorAlias, + ViewAndMutationMeta, +) +from .subclass_utils import ( + compute_inner_mutated_inp_indices_from_subclass_meta, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, +) + +from .utils import ( + _get_symint_hints, + call_func_at_runtime_with_args, + make_boxed_func, + normalize_as_list, + strict_zip, +) + +zip = strict_zip + +log = logging.getLogger(__name__) +aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph") +aot_graphs_log = getArtifactLogger(__name__, "aot_graphs") + +aten = torch.ops.aten + + +def _compute_output_meta_with_inductor_strides(fw_module, fwd_output_strides): + out = [n.meta["val"] for n in (list(fw_module.graph.nodes)[-1].args[0])] + # will only be set for inductor + if not fwd_output_strides: + return out + with TracingContext.get().fake_mode.shape_env.suppress_guards(): + for i in range(len(out)): + if not isinstance(out[i], Tensor): + continue + if all(s1 == s2 for s1, s2 in zip(out[i].stride(), fwd_output_strides[i])): + continue + out[i] = out[i].as_strided(out[i].shape, fwd_output_strides[i]) + return out + + +def aot_dispatch_base( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +): + fw_module, updated_flat_args, maybe_subclass_meta = aot_dispatch_base_graph( # type: ignore[misc] + flat_fn, flat_args, aot_config, fw_metadata=fw_metadata + ) + + disable_amp = torch._C._is_any_autocast_enabled() + context = torch._C._DisableAutocast if disable_amp else nullcontext + fakified_out = None + + with context(), track_graph_compiling(aot_config, "inference"): + compiler = ( + aot_config.inference_compiler + if aot_config.inference_compiler is not None + else aot_config.fw_compiler + ) + if config.functionalize_rng_ops: + # Add the seed and offset as example inputs to pass to the compiler + fake_mode = detect_fake_mode() + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode) + updated_flat_args.extend([seed, offset]) + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.fw_metadata = ( + fw_metadata + if maybe_subclass_meta is None + else maybe_subclass_meta.fw_metadata + ) + + with TracingContext.report_output_strides() as fwd_output_strides: + compiled_fw = compiler(fw_module, updated_flat_args) + + # see note: [Returning Fake Tensors on First AOT Autograd Call] + if tracing_context and tracing_context.fakify_first_call: + fakified_out = _compute_output_meta_with_inductor_strides( + fw_module, fwd_output_strides + ) + + # However, create_runtime_wrapper does not expect the rng offsets in the + # output. So, we have to create another wrapper and take out the offset. As + # a result, we have to account for not boxed_call compilers as well. + if not hasattr(compiled_fw, "_boxed_call"): + compiled_fw = make_boxed_func(compiled_fw) + + # Create a wrapper to set up the rng functionalize bits + @wraps(compiled_fw) + def rng_functionalization_wrapper(args): + # see note: [Returning Fake Tensors on First AOT Autograd Call] + nonlocal fakified_out + if fakified_out is not None: + out = fakified_out + fakified_out = None + return out + + # args is a list because compiled_fw is boxed_call + if fw_metadata.is_rng_op_functionalized: + # Add the seed and offset to args + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple() + args.extend([seed, offset]) + out = compiled_fw(args) + out = functionalized_rng_runtime_epilogue(fw_metadata, out) + return out + else: + return compiled_fw(args) + + if maybe_subclass_meta is not None: + compiled_fw_func = aot_dispatch_subclass_wrapper( + rng_functionalization_wrapper, + subclass_metas=fw_metadata.subclass_fw_graph_out_meta, + num_fw_outs_saved_for_bw=None, + ) + else: + compiled_fw_func = rng_functionalization_wrapper + + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + compiled_fn = create_runtime_wrapper( + compiled_fw_func, + runtime_metadata=fw_metadata, + indices_of_inps_to_detach=[], + trace_joint=False, + keep_input_mutations=aot_config.keep_inference_input_mutations, + disable_amp=disable_amp, + ) + + return compiled_fn + + +def aot_dispatch_autograd( + flat_fn, + flat_args: List[Any], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +): + fw_metadata.deterministic = torch.are_deterministic_algorithms_enabled() + fx_g, joint_inputs, maybe_subclass_meta = aot_dispatch_autograd_graph( # type: ignore[misc] + flat_fn, flat_args, aot_config, fw_metadata=fw_metadata + ) + + # Copied from aot_dispatch_autograd_graph. + disable_amp = torch._C._is_any_autocast_enabled() + + if aot_config.enable_log: + aot_joint_log.info( + "%s", lazy_format_graph_code("Joint graph", fx_g, aot_config.aot_id) + ) + trace_structured( + "aot_joint_graph", + payload_fn=lambda: fx_g.print_readable(print_output=False), # type: ignore[union-attr] + ) + + fakify_first_call = False + fakified_out = None + + with torch.no_grad(): + inner_meta = ( + fw_metadata + if maybe_subclass_meta is None + else maybe_subclass_meta.fw_metadata + ) + with track_graph_compiling(aot_config, "joint"): + # See Note: [Partitioner handling for Subclasses, Part 1] + # See Note: [Recomputing subclass mutation handling] + mutated_inp_runtime_indices = ( + compute_inner_mutated_inp_indices_from_subclass_meta( + fw_metadata, inner_meta + ) + ) + num_mutated_inp_runtime_indices = len(mutated_inp_runtime_indices) + num_inner_fwd_outputs = ( + num_mutated_inp_runtime_indices + + inner_meta.num_outputs + + inner_meta.num_intermediate_bases + + inner_meta.num_outputs_rng_offset + + len( + fw_metadata.tokens + ) # See Note [Side-Effectful Tokens in AOTAutograd] + ) + fw_module, bw_module = aot_config.partition_fn( + fx_g, joint_inputs, num_fwd_outputs=num_inner_fwd_outputs + ) + + fw_outs = next(n for n in fw_module.graph.nodes if n.op == "output").args[0] + # we only need to bookkeep the symints that are saved for bw, not any symints + # the user forward might have returned in its own output + fw_outs_saved_for_bw = fw_outs[num_inner_fwd_outputs:] + num_fw_outs_saved_for_bw = len(fw_outs_saved_for_bw) + symint_outs_saved_for_bw = [ + n for n in fw_outs_saved_for_bw if is_sym_node(n) + ] + fw_metadata.num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + inner_meta.num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + _num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + + # Note [Detaching inputs that never need gradients] + # See https://github.com/pytorch/pytorch/issues/97745 + # Suppose we have a function like this that we want to compile: + # + # def f(x, y): + # return torch.mul(x, y.detach()) + # + # What gradients should we compute for x and y? + # By default, AOTAutograd will compute a gradient for **every** input that requires gradients, + # and so we'll compute: + # x_grad_input = y + # y_grad_input = None + # Does this preserve the semantics of eager mode? + # Unfortunately, no. + # Doing the above will cause autograd to **continue** to backprop the autograd tape + # that was generated from constructing y. + # + # This is **different** from what would have happened in eager mode. + # In eager mode, if we backprop through the output of this function, autograd will only traverse + # the bit of the autograd tape corresponding to "x". + # In particular, if a user had previously backpropped through y's autograd tape, + # And then they try to backprop through the output of the above function, + # then we'll hit the dreaded "Trying to backward through the graph a second time" error. + # + # You might think: If autograd sees that a gradient is None, shouldn't it stop early, + # instead of continuing the backprop through the ancestors of that node in the graph? + # + # Autograd has two passes: + # (1) a first pass that traverses the autograd graph and figures out which nodes need to be executed + # (2) a second pass that actually goes ahead and executes each node when it becomes ready, + # propagating gradients + # By the time we're executing a node and we see that it produces a None, the set of nodes to execute + # is already locked-in. + # + # The fix: instead, we can recognize statically that the graph we're compiling will never contribute + # gradients to y, and prevent autograd from trying to traverse y's autograd tape at all. + # We can do this by manually detach'ing y before sending it through the `CompiledFunction`. + # + # Note that this solution is not bulletproof. + # It's possible to construct a case where eager may or may not have have tried to autograd through y, + # depending on the actual grad_outputs that were passed in during the backward. + # There is no easy fix for this: the simplest fix would be to run with `retain_graph=True`, + # allowing autograd to re-use the graph. + # + # An example of this case is: + # def f(x): + # return x.detach() * 2, x * 3 + # If we were to only backprop through outs[0], in eager, we would stop + # If we backward only on the first output, we shouldn't send a grad through x. + # But the custom autograd function doesn't know that: it will materialize zero grads for x * 3 + # and we will end up with a zero grad at x. + # If we later backprop through the second output, this will also require backprop'ing through x. + # Meaning we'll need to use `retain_graph=True` to be able to backprop through x the second time. + _indices_of_inps_to_detach = [] + bw_outs = next(n for n in bw_module.graph.nodes if n.op == "output").args[0] + + # TODO: we should apply the below "detach inputs if their gradients are statically known to be None" + # optimization even if we have subclass inputs/outputs (we do not handle this today). + # Computing which our our inputs get None gradients is a bit more complicated, + # if any of our inputs are subclasses. Why? + # (a) we need to make sure that we call .detach() on the input subclasses, since autograd sees subclasses. + # (b) The grad_outputs that we AOT computed in our backward graph are the desugared tensor tensors, + # so we need to figure out which subclass fw inputs they map to. + if maybe_subclass_meta is None: + assert ( + len(bw_outs) + == len(fw_metadata.input_info) + inner_meta.num_outputs_rng_offset + ) + for i, (bw_out) in enumerate(bw_outs): + if bw_out is None: + _indices_of_inps_to_detach.append(i) + + if aot_config.enable_log: + aot_graphs_log.info( + "%s", + lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id), + ) + aot_graphs_log.info( + "%s", + lazy_format_graph_code("Backward graph", bw_module, aot_config.aot_id), + ) + trace_structured( + "aot_forward_graph", + payload_fn=lambda: fw_module.print_readable(print_output=False), + ) + trace_structured( + "aot_backward_graph", + payload_fn=lambda: bw_module.print_readable(print_output=False), + ) + + with track_graph_compiling(aot_config, "forward"): + # flat_args at this point might still be subclasses- + # make sure to pass the unwrapped fake tensors into the compiler! + adjusted_flat_args = joint_inputs[0] + if config.functionalize_rng_ops: + # Update example inputs for the fw_compiler + fake_mode = detect_fake_mode() + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode) + adjusted_flat_args.extend([seed, offset]) + # We are not clearing flat_args here because + # 1) There is a check in the debug compiler at the end + # 2) It does not matter as these are fake tensors + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.fw_metadata = inner_meta + + with TracingContext.report_output_strides() as fwd_output_strides: + compiled_fw_func = aot_config.fw_compiler(fw_module, adjusted_flat_args) + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + # see note: [Returning Fake Tensors on First AOT Autograd Call] + if tracing_context and tracing_context.fakify_first_call: + fakified_out = _compute_output_meta_with_inductor_strides( + fw_module, fwd_output_strides + ) + fakify_first_call = True + + if maybe_subclass_meta is not None: + # Why do we need to pass in num_fw_outs_saved_for_bw? + # See Note: [Partitioner handling for Subclasses, Part 2] + compiled_fw_func = aot_dispatch_subclass_wrapper( + compiled_fw_func, + subclass_metas=fw_metadata.subclass_fw_graph_out_meta, + num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw, + ) + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + # NB: It's important to compile backwards ahead of time, as this may + # add extra guards which we need to apply to the Dynamo cache at + # forwards + with track_graph_compiling(aot_config, "backward"): + placeholder_list = fx_placeholder_vals(bw_module) + + forward_saved_for_backwards_strides = None + if fwd_output_strides is not None: + forward_saved_for_backwards_strides = fwd_output_strides[ + inner_meta.tensors_saved_for_backwards_slice + ] + + # saved activations can have different stride to eager if + # the compiler does layout optimization. We should restride the + # tensor passed in for compiling the backward graph using the + # saved tensor's stride. + for i in range(len(placeholder_list)): + ph_arg = placeholder_list[i] + if not isinstance(ph_arg, torch.Tensor): + continue + + if forward_saved_for_backwards_strides is None: + continue + + real_stride = None + # Per all_args calling convention + j = i - len(symint_outs_saved_for_bw) + if 0 <= j < len(forward_saved_for_backwards_strides): + real_stride = forward_saved_for_backwards_strides[j] + if real_stride is None: + continue + + # Comparing ph_arg.stride() with real_stride directly may + # cause dynamic dimensions in ph_arg being specialized to static + # value. Using the hints to avoid that. + if _get_symint_hints(ph_arg.stride()) != real_stride: + # Note that here we use the stride of the real tensor to + # restride a FakeTensor. This does not cause trouble + # for dynamic shape since this code path only get + # executed if layout optimization is enabled. And we + # disable layout optimization for dynamic shape right + # now. + # + # A solution that decide stride order based on real + # tensor's stride and then apply that stride order to + # the FakeTensor does not work smoothly since some + # tensor's layout is not 'dense'. E.g. mixnet_l has a + # tensor with size [8, 64, 112, 112] and strides + # (2408448, 1, 21504, 192). The solution mentioned will + # decide a stride of (802816, 1, 7168, 64) for this + # tensor which is wrong. + placeholder_list[i] = ph_arg.as_strided(ph_arg.size(), real_stride) + + compiled_bw_func = None + if len(symint_outs_saved_for_bw): + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + try: + compiled_bw_func = aot_config.bw_compiler( + bw_module, placeholder_list + ) + except Exception: + log.warning( + "failed to eagerly compile backwards for dynamic, suppressing in case backwards not needed", + exc_info=True, + ) + # Compiled autograd will run the bw_module in the backward pass, + # so recompilation need happen anyway if the backward pass is ever + # called. + # + # The reason we do the GraphModule recompilation here is because + # the lazy recompilation will cause issue in the backward pass + # with compiled autograd. + # + # Do the _LazyGraphModule.force_recompile here rather than when + # bw_module is first generated by the partitioner because the bw_module.recompile + # may be called in some code path later and cause the _LazyGraphModule.forward + # becomes the lazy version again. One example is when dynamic shape is enabled + # upfront, the bw_compiler will be called above which can cause extra + # graph module recompilation on bw_module. + if torch._dynamo.compiled_autograd.compiled_autograd_enabled_count: + from torch.fx._lazy_graph_module import _LazyGraphModule + + _LazyGraphModule.force_recompile(bw_module) + + saved_context = TracingContext.try_get() + + backward_state_indices = [ + idx for idx, x in enumerate(flat_args) if isinstance(x, BackwardState) + ] + assert len(backward_state_indices) <= 1 + + class CompiledFunction(torch.autograd.Function): + compiled_fw = compiled_fw_func + compiled_bw = compiled_bw_func + metadata: ViewAndMutationMeta = fw_metadata # type: ignore[assignment] + maybe_subclass_metadata: Optional[SubclassMeta] = maybe_subclass_meta + num_symints_saved_for_bw = _num_symints_saved_for_bw + _compiled_autograd_should_lift = False + _fakify_first_call = fakify_first_call + + @staticmethod + def _compiled_autograd_key(ctx): + return (ctx._autograd_function_id, *ctx.symints) + + @staticmethod + def forward(ctx, *deduped_flat_tensor_args): + args = deduped_flat_tensor_args + if backward_state_indices: + bw_state = args[backward_state_indices[0]] + assert isinstance(bw_state, BackwardState) + ctx._compiled_autograd_backward_state = bw_state + + marked_dirty_inps = [] + for i in fw_metadata.mutated_graph_handled_indices_seen_by_autograd: + arg = deduped_flat_tensor_args[i] + if not (arg.requires_grad and arg.is_leaf): # would error + ctx.mark_dirty(arg) + marked_dirty_inps.append(arg) + + if not CompiledFunction._fakify_first_call: + if CompiledFunction.metadata.is_rng_op_functionalized: + # Add the seed and offset to args + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple() + args = (*args, seed, offset) + # There is a pretty complicated calling convention around what the compiled fw returns. + # The full list of outputs and their relative order is: + # (*tokens, *mutated_inputs, *fw_outs, *fw_intermediate_bases, *saved_tensors, *saved_symints) + # - Note that in the synthetic bases case, mutated_inputs will correspond to an updated version + # of the original view, and not the synthetic base + + fw_outs = call_func_at_runtime_with_args( + CompiledFunction.compiled_fw, + args, + disable_amp=disable_amp, + ) + else: + nonlocal fakified_out + assert fakified_out is not None + CompiledFunction._fakify_first_call = False + fw_outs = fakified_out + fakified_out = None + + num_outputs = CompiledFunction.metadata.num_outputs + num_outputs_aliased = CompiledFunction.metadata.num_outputs_aliased + num_mutated_runtime_inps = ( + CompiledFunction.metadata.num_mutated_inp_runtime_indices + ) + num_tokens = len(CompiledFunction.metadata.tokens) + num_forward_returns = CompiledFunction.metadata.num_forward_returns + num_forward = CompiledFunction.metadata.num_forward + + # Partitioners must put symint arguments at the end separate from tensor arguments + tensors_saved_for_backwards = fw_outs[ + CompiledFunction.metadata.tensors_saved_for_backwards_slice + ] + assert all(isinstance(x, torch.Tensor) for x in tensors_saved_for_backwards) + # See Note [Detaching saved tensors in AOTAutograd] + ctx.save_for_backward( + *( + x.detach() if x._is_view() else x + for x in tensors_saved_for_backwards + ) + ) + symint_outs = fw_outs[ + CompiledFunction.metadata.symints_saved_for_backwards_slice + ] + assert all( + isinstance(x, (int, float, torch.SymInt, torch.SymFloat)) + for x in symint_outs + ), str([type(x) for x in symint_outs]) + ctx.symints = symint_outs + + raw_returns = fw_outs[0 : num_forward_returns + num_tokens] + + # Wrap all autograd.Function.forward() outputs that are aliases + # so that autograd.Function doesn't treat them as tensors + if num_mutated_runtime_inps > 0: + for i, idx in enumerate( + CompiledFunction.metadata.mutated_inp_runtime_indices + ): + # We could make this faster by only looping over inputs with metadata-only mutations + # (instead of looping over inputs with either data or metadata mutations), but there shouldn't be many. + info = CompiledFunction.metadata.input_info[idx] + if info.mutates_metadata and not info.mutates_data: + raw_returns[i] = TensorAlias(raw_returns[i]) + + if config.debug_assert: + user_mutated_inputs_raw = raw_returns[0:num_mutated_runtime_inps] + mut_inp_infos = [ + x + for x in CompiledFunction.metadata.input_info + if x.mutates_data or x.mutates_metadata + ] + assert len(user_mutated_inputs_raw) == len(mut_inp_infos) + + if CompiledFunction.metadata.num_unsafe_view_outputs > 0: + for idx in CompiledFunction.metadata.unsafe_view_out_indices: + raw_return_idx = num_mutated_runtime_inps + idx + o = raw_returns[raw_return_idx] + raw_returns[raw_return_idx] = torch.ops.aten._unsafe_view( + o, o.shape + ) + + if num_outputs_aliased > 0: + for idx in CompiledFunction.metadata.aliased_out_indices: + raw_return_idx = num_mutated_runtime_inps + idx + raw_returns[raw_return_idx] = TensorAlias( + raw_returns[raw_return_idx] + ) + + if config.debug_assert: + intermediates_raw = raw_returns[ + num_mutated_runtime_inps + num_outputs : + ] + assert not any( + isinstance(x, TensorAlias) for x in intermediates_raw + ) + + # invariant: intermediate bases always require gradients, so we don't have to + # consider marking them as non-differentiable. + raw_returns_not_including_intermediate_bases = raw_returns[ + : num_mutated_runtime_inps + num_outputs + ] + raw_returns_meta = [ + x + for x in CompiledFunction.metadata.input_info + if x.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] + CompiledFunction.metadata.output_info + + fw_outs_not_requiring_grad = [ + x + for (i, x) in enumerate(raw_returns_not_including_intermediate_bases) + if isinstance(x, torch.Tensor) and not raw_returns_meta[i].requires_grad + ] + ctx.mark_non_differentiable(*fw_outs_not_requiring_grad) + ctx._materialize_non_diff_grads = False + + functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, + fw_outs[num_forward_returns:num_forward], + return_new_outs=False, + ) + return tuple(raw_returns) + tuple(marked_dirty_inps) + + @staticmethod + def backward(ctx, *flat_args): + # Calling convention: we expect a grad_out passed to the backward: + # - for every output of the fw that does *not* alias an input or graph intermediate + # - for every updated_input generated by the fw that does *not* alias an input (aka only data-mutations) + # - for every graph intermediate that we need to use to generate an output later. + # The other outputs in the autograd.Function.forward that do *not* show up in the backward include: + # - outputs that alias inputs or graph intermediates + # - updated inputs due to metadata-only mutations. + # We need to return them in the forward, but ensure that they all do not get gradients in the backward, + # and we filter them out here before passing the remaining grad_outputs into the compiled backward. + num_intermediate_bases = CompiledFunction.metadata.num_intermediate_bases + num_graph_handled_inputs = ( + CompiledFunction.metadata.num_mutated_graph_handled_indices_seen_by_autograd + ) + num_mutated_runtime_inps = ( + CompiledFunction.metadata.num_mutated_inp_runtime_indices + ) + expected_grad_outs = ( + CompiledFunction.metadata.num_outputs + + num_mutated_runtime_inps + + num_intermediate_bases + ) + deterministic = CompiledFunction.metadata.deterministic + global_deterministic = torch.are_deterministic_algorithms_enabled() + if deterministic is not None: + torch._check( + not (not deterministic and global_deterministic), + lambda: ( + "This compiled backward function is being run with " + "torch.use_deterministic_algorithms(True), " + "but it was previously generated during the forward function while " + "torch.use_deterministic_algorithms(False) was set." + ), + ) + + if num_graph_handled_inputs > 0: + flat_args = flat_args[:-num_graph_handled_inputs] + assert len(flat_args) == expected_grad_outs + out_info = CompiledFunction.metadata.output_info + + inp_tangents, out_tangents, intermediate_base_tangents = ( + flat_args[0:num_mutated_runtime_inps], + flat_args[ + num_mutated_runtime_inps : num_mutated_runtime_inps + + CompiledFunction.metadata.num_outputs + ], + flat_args[ + num_mutated_runtime_inps + CompiledFunction.metadata.num_outputs : + ], + ) + # input_info contains info on *every* input, + # But in the backward(), we are only given grad outputs for every mutated input + # We then need to filter out the grad outputs that correspond to metadata-only mutations or don't require grad + input_info = CompiledFunction.metadata.input_info + inp_tangents_filtered = [ + x + for x, info_idx in zip( + inp_tangents, CompiledFunction.metadata.mutated_inp_runtime_indices + ) + if input_info[info_idx].mutates_data + and input_info[info_idx].requires_grad + ] + # We also need to filter out grad outputs that correspond to outputs aliasing inputs/intermediates + out_tangents_filtered = [ + x + for x, info in zip(out_tangents, out_info) + if info.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + and issubclass(info.raw_type, torch.Tensor) + and info.requires_grad + ] + # intermediate bases always require gradients, and always participate in the backward graph. + flat_bw_args_with_grads = [ + *inp_tangents_filtered, + *out_tangents_filtered, + *intermediate_base_tangents, + ] + num_flat_bw_args_with_grads = len(flat_bw_args_with_grads) + + # sanity asserts + # metadata_only_inps = [ + # x for x, info_idx in zip(inp_tangents, mutated_inp_indices) + # if not input_info[info_idx].mutates_data + # ] + # aliased_outputs = [ + # x for x, info in zip(out_tangents, out_info) if info.output_type != OutputType.non_alias] + # assert all(x is None for x in metadata_only_inps) + # assert all(x is None for x in aliased_outputs) + + rng_args = [] + if CompiledFunction.metadata.is_rng_op_functionalized: + # Add the seed and offset to args + rng_args = CUDARngStateHelper.get_torch_state_as_tuple() + + all_args = [ + *ctx.symints, + *ctx.saved_tensors, + *flat_bw_args_with_grads, + *rng_args, + ] + del flat_bw_args_with_grads + + tangents_start_idx = ( + len(all_args) - num_flat_bw_args_with_grads - len(rng_args) + ) + tangents_end_idx = len(all_args) - len(rng_args) + + # Note: [AOTAutograd Backward Guards] + # During AOTDispatch, we eagerly create and trace out a joint fw-bw graph. + # Doing so requires us to "guess" about some of the metadata of our grad_outputs. + # + # In particular: if an output to the forward is a plain tensor or a subclass, + # its corresponding grad_output in the backward **may or may not** be + # a plain tensor or a subclass. The main cases are: + # (1) If an output is a plain tensor, its grad_out will also be a plain tensor, + # *unless* the output is used in some subclass compute later in the forward graph, + # which will cause its grad_output to become a subclass + # (2) If an output is a subclass, its grad_out will also be a subclass, + # *unless* the output of the forward did not actually participate in the gradient computation, + # in which case autograd will insert a plain tensor of zeros for the grad_output. + # We could avoid this case with `torch.autograd.Function.set_materialize_grads`, + # although this is not turned on today in AOTAutgrad and would require more work. + # + # Today, we make a guess on subclass-ness based on the above examples, + # and hard-error in the backward if we guessed wrong. + # + # In the future, we should add backward guards that would allow us to + # properly handle this case instead of erroring: we would need to retrace the backward graph, + # since we might produce an entirely different trace if our grad_outputs are subclass or not. + assert ( + len(CompiledFunction.metadata.output_types) + == num_flat_bw_args_with_grads + ) + grad_output_types = [ + type(x) for x in all_args[-num_flat_bw_args_with_grads:] + ] + # In general, we can add more asserts/guards here for when we partitioned + # with incorrect assumptions about the grad_outputs. + # Normalize FakeTensor -> torch.Tensor + # - during tracing our types are FakeTensor + # - at runtime in the backward our types are torch.Tensor... + # - unless we're running compiled backward, in which case they are also FakeTensor + grad_output_types_ = [ + torch.Tensor if x is FakeTensor else x for x in grad_output_types + ] + assert ( + grad_output_types_ == CompiledFunction.metadata.output_types + ), f"""\ +We incorrectly attempted to compile the backward with incorrect subclass metadata. +If you run into this error, please file an issue. +Expected grad_output types: {str(CompiledFunction.metadata.output_types)} +Got grad_output types: {str(grad_output_types)}""" + + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + # Get the number of tangents after unwrapping + len_tangents = len( + unwrap_tensor_subclasses( + all_args[tangents_start_idx:tangents_end_idx], + is_joint_structure=False, + ) + ) + all_args = unwrap_tensor_subclasses(all_args, is_joint_structure=False) + tangents_start_idx = len(all_args) - len_tangents - len(rng_args) + tangents_end_idx = tangents_start_idx + len_tangents + + # Make the tangents contiguous. Note that we must do this after subclass desugaring + # because inputs to inductor have to be contiguous + all_args = [ + t.contiguous() + if ( + (tangents_start_idx <= i < tangents_end_idx) + and (not t.is_contiguous()) + ) + else t + for i, t in enumerate(all_args) + ] + + def call_compiled_backward(): + if ctx._is_compiled_autograd_tracing(): + # For compiled autograd, run raw FX graph so that it can be inlined into the larger graph + symints = ctx._get_compiled_autograd_symints() + assert len(symints) == len(ctx.symints) + all_args[: len(symints)] = symints + if backward_state_indices: + assert ctx._compiled_autograd_backward_state.proxy is not None + all_args.append(ctx._compiled_autograd_backward_state) + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + out = normalize_as_list(bw_module(*all_args)) + out = functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, out + ) + return tuple(out) + assert ( + not backward_state_indices + ), "BackwardState requires CompiledAutograd" + ctx.maybe_clear_saved_tensors() + if CompiledFunction.compiled_bw is None: + context = torch._C._DisableAutocast if disable_amp else nullcontext + with tracing(saved_context), context(), track_graph_compiling( + aot_config, "backward" + ): + CompiledFunction.compiled_bw = aot_config.bw_compiler( + bw_module, placeholder_list + ) + + out = call_func_at_runtime_with_args( + CompiledFunction.compiled_bw, + all_args, + steal_args=True, + disable_amp=disable_amp, + ) + + out = functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, out + ) + return tuple(out) + + if torch.is_grad_enabled() and any( + t.requires_grad for t in all_args if isinstance(t, torch.Tensor) + ): + # Ensure that the graph is connected, and error if double backward is performed. + # See comment for why once_differentiable is not sufficient: + # https://github.com/pytorch/pytorch/pull/92348/files#r1072962107 + class CompiledFunctionBackward(torch.autograd.Function): + # CompiledFunctionBackward is not yet supported in dynamo skipfiles + _compiled_autograd_should_lift = False + + @staticmethod + def forward(ctx, *unused_args): + outs = call_compiled_backward() + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + assert ( + CompiledFunction.maybe_subclass_metadata.grad_input_metas + is not None + ) + outs_wrapped = wrap_tensor_subclasses( + outs, + subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas, + ) + return outs_wrapped + return outs + + @staticmethod + def backward(ctx, *args): + raise RuntimeError( + "torch.compile with aot_autograd does not currently support double backward" + ) + + CompiledFunctionBackward._compiled_autograd_key = ( # type: ignore[method-assign] + CompiledFunction._compiled_autograd_key + ) + + # Pass args even though they're unused, so that the graph is built + out = CompiledFunctionBackward.apply(*all_args) + else: + out = call_compiled_backward() + + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + assert ( + CompiledFunction.maybe_subclass_metadata.grad_input_metas + is not None + ) + outs_wrapped = wrap_tensor_subclasses( + out, + subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas, + ) + return outs_wrapped + return out + + compiled_function = create_runtime_wrapper( + CompiledFunction.apply, + runtime_metadata=fw_metadata, + indices_of_inps_to_detach=_indices_of_inps_to_detach, + trace_joint=True, + keep_input_mutations=aot_config.keep_inference_input_mutations, + disable_amp=disable_amp, + ) + + if not config.debug_assert: + return compiled_function + + flat_requires_grad = [ + a.requires_grad if isinstance(a, Tensor) else None for a in flat_args + ] + + @wraps(compiled_function) + def debug_compiled_function(*args): + # TODO: Check aliasing relationships + # TODO: Check strides for metadata mutation + # (NB: ideally, this logic is factored out of this function and + # you move these debug checks there) + + # Check requires grad. Bad case is when we compiled with + # requires_grad = False, but input requires_grad = True + # (vice versa is OK; we compute a gradient and then throw + # it away when it hits the input.) + for i, a in enumerate(args): + can_require_grad = flat_requires_grad[i] + if can_require_grad is None: + assert not isinstance(a, Tensor) + elif not can_require_grad: + assert not a.requires_grad, format_guard_bug_msg( + aot_config, + f"{describe_input(i, aot_config)} would not require grad", + ) + + return compiled_function(*args) + + return debug_compiled_function diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..28f82555ac974a46f94780473c9d19af81575423 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py @@ -0,0 +1,135 @@ +""" +Contains utils for logging in AOTAutograd, including managing the names of the graphs under +compilation, capturing user-friendly tracebacks, and debug messages. +""" + +import collections +from contextlib import contextmanager +from typing import List, Tuple + +import torch +import torch.fx.traceback as fx_traceback + +# This is a list since looking forward, we can have this arbitrarily nested. +graph_being_compiled: List[str] = [] +# TODO: It would be nice to reset the numbering every time aot_id goes +# up, but this is annoying to do right now (because we don't know if +# an aot_id will come back from the dead), so right now this also happens +# to be a globally unique number too (at the cost of wobbling if you change +# how the graphs compile) +nth_graph: int = 0 +model_name: str = "model" + + +def set_model_name(name): + global model_name + model_name = name + + +def get_aot_compilation_context() -> Tuple[List[str], str, int]: + return list(graph_being_compiled), model_name, nth_graph + + +def get_aot_graph_name() -> str: + """ + Returns the name of the graph being compiled. + """ + global model_name, graph_being_compiled, nth_graph + return f"{model_name}__{'_'.join(graph_being_compiled)}_{nth_graph}" + + +get_graph_being_compiled = get_aot_graph_name + + +@contextmanager +def track_graph_compiling(aot_config, graph_name): + global graph_being_compiled + # TODO: Don't shove the aot_id in here; set it in the context + graph_being_compiled = [f"{aot_config.aot_id}_{graph_name}"] + try: + yield + finally: + global nth_graph + nth_graph += 1 + graph_being_compiled = [] + + +# Set up hooks so that during backward the fx's stack_trace is properly set +callback_set = False + + +def setup_stacktrace_preservation_hooks(roots: List): + def iter_graph(roots): + if not roots: + return + seen = set() + q = collections.deque() # type: ignore[var-annotated] + for node in roots: + if node is not None and node not in seen: + seen.add(node) + q.append(node) + + while q: + node = q.popleft() + for fn, _idx in node.next_functions: + if fn in seen or fn is None: + continue + seen.add(fn) + q.append(fn) + + yield node + + def get_callback(saved_stack_): + def callback(): + global callback_set + fx_traceback.set_stack_trace(saved_stack_) + callback_set = False + + return callback + + def get_prehook(stack_, seq_nr): + def prehook(grad_output): + global callback_set + + if not callback_set: + torch.autograd.variable.Variable._execution_engine.queue_callback( # type: ignore[attr-defined] + get_callback(fx_traceback.format_stack()) + ) + callback_set = True + + fx_traceback.set_stack_trace(stack_) + fx_traceback.set_grad_fn_seq_nr(seq_nr) + + return prehook + + def get_posthook(special_stack_, seq_nr): + def posthook(grad_input, grad_output): + fx_traceback.set_stack_trace(special_stack_) + fx_traceback.reset_grad_fn_seq_nr() + + return posthook + + for node in iter_graph(roots): + forward_node_stack = node.metadata.get("traceback_", []) + node.register_prehook(get_prehook(forward_node_stack, node._sequence_nr())) + + special_stack = forward_node_stack.copy() + special_stack.append( + "Gradient addition node due to multiple use of tensor around:" + ) + node.register_hook(get_posthook(special_stack, node._sequence_nr())) + + +def describe_input(i, aot_config): + if i < aot_config.num_params_buffers: + return f"parameter/buffer {i}" + else: + return f"input {i - aot_config.num_params_buffers}" + + +def format_guard_bug_msg(aot_config, expected): + return ( + f"At compilation time, graph {aot_config.aot_id} was compiled under the " + f"assumption that {expected}, but at runtime this was not the case. " + "This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch." + ) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..acb03d232bdf902452d321d0d97e266b8cf45ad6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py @@ -0,0 +1,1021 @@ +""" +This module defines runtime wrappers, which, based on previous analysis attempts to: +1. process the inputs and outputs +2. apply mutations +3. handle functionalized randomness +4. deduplicate inputs and consolidate views into their bases (see input_output_analysis) +""" + +import collections +import pprint +from functools import wraps +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils.dlpack +from torch import Tensor +from torch._guards import DuplicateInputs, TracingContext +from torch._prims_common import CUDARngStateHelper +from torch.multiprocessing.reductions import StorageWeakRef +from .. import config +from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata + +from .functional_utils import gen_alias_from_base +from .input_output_analysis import ( + compute_overlapping_inputs, + create_synthetic_base_metadata, + remove_dupe_metadata, +) +from .logging_utils import describe_input, format_guard_bug_msg +from .schemas import ( + AOTConfig, + InputAliasInfo, + OutputType, + SubclassCreationMeta, + TensorAlias, + ViewAndMutationMeta, +) +from .subclass_utils import ( + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, +) + +from .utils import ( + call_func_at_runtime_with_args, + make_boxed_func, + partial_flatten_asdict, + strict_zip, +) + + +zip = strict_zip + + +# The wrapper created by this function handles all of the runtime aliasing and mutation "epilogue" logic +# that needs to run after the compiled function. +# +# This function accepts a trace_joint flag, indicating whether or not we're generating the runtime +# epilogue for a forward-only inference graph, or for an autograd.Function.apply function. +# This is because there are some minor differences in how we treat these cases at runtime: +# - resize_() is currently handled in the inference case, but not fully handled in the autograd case. +# - the autograd cases inserts TensorAlias wrapper objects for outputs that alias inputs +def create_runtime_wrapper( + compiled_fn, + *, + runtime_metadata: ViewAndMutationMeta, + indices_of_inps_to_detach: List[int], + trace_joint: bool, + keep_input_mutations: bool, + disable_amp: bool, +): + num_tokens = len(runtime_metadata.tokens) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + def runtime_wrapper(*args): + # Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd]) + args = (*[torch.tensor([])] * num_tokens, *args) + + if trace_joint: + args_ = list(args) + # See Note [Detaching inputs that never need gradients] + for idx in indices_of_inps_to_detach: + if isinstance(args_[idx], torch.Tensor): + args_[idx] = args_[idx].detach() + with torch.autograd._force_original_view_tracking(True): + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args_, + disable_amp=disable_amp, + ) + else: + # When we have an inference graph, we run with torch.no_grad. + # It's possible to get an inference graph with inputs that require grad, + # in which case we want to make sure autograd is disabled + # (since e.g., inductor will generate aten.addmm.out calls which autograd will complain on) + if torch.is_grad_enabled(): + with torch.no_grad(): + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args, + disable_amp=disable_amp, + ) + else: + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args, + disable_amp=disable_amp, + ) + + num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices + num_intermediate_bases = runtime_metadata.num_intermediate_bases + + if keep_input_mutations and trace_joint: + num_input_mutations_handled_by_autograd = ( + runtime_metadata.num_mutated_graph_handled_indices_seen_by_autograd + ) + # autograd.Function requires us to return the mutated inputs as extra outputs to the autograd.Function.forward + if num_input_mutations_handled_by_autograd > 0: + all_outs = all_outs[:-num_input_mutations_handled_by_autograd] + + assert ( + len(all_outs) + == num_mutated_runtime_inps + + runtime_metadata.num_outputs + + num_intermediate_bases + + num_tokens + ) + + # Toss out the effect tokens (See Note [Side-Effectful Tokens in AOTAutograd]) + all_outs = all_outs[num_tokens:] + + # Step 3: After running the compiled fw, apply updates to mutated inputs + num_mutations_to_apply = runtime_metadata.num_mutated_inp_runtime_indices + if num_mutations_to_apply > 0: + updated_inputs = all_outs[:num_mutations_to_apply] + fw_outs = all_outs[num_mutations_to_apply:] + + for i, inpt_idx in enumerate(runtime_metadata.mutated_inp_runtime_indices): + meta = runtime_metadata.input_info[inpt_idx] + if not meta.mutates_data and not meta.mutates_metadata: + continue + original_inpt = args[inpt_idx] + updated_inpt = updated_inputs[i] + if meta.mutates_storage_metadata: + # mutates_storage_metadata means our input saw a x.set_(y) call. + # What if x **also** saw a data and/or a metadata mutation? + # (1) If the [meta]data mutation occurred after the set_(), + # then there is no need to copy_() the data. + # When we perform x.set_(x_updated), we are guaranteed that + # x_updated already has the final version of the data/metadata + # (2) If a data mutation occurred before the set_(). + # This case seems very difficult to support. + # TODO: discuss on the PR and decide if we want to tr to + # either support it, or detect and ban it. + if trace_joint: + assert isinstance(updated_inpt, TensorAlias) + updated_inpt = updated_inpt.alias + with torch.no_grad(): + original_inpt.set_(updated_inpt) + continue + if meta.mutates_metadata and not meta.mutates_data: + if trace_joint: + assert isinstance(updated_inpt, TensorAlias) + updated_inpt = updated_inpt.alias + # We need to grab the size/stride/storage_offset from the compiled forward, + # and use that to mutate the metadata of the input + original_inpt.as_strided_( + updated_inpt.size(), + updated_inpt.stride(), + updated_inpt.storage_offset(), + ) + else: + if meta.mutates_data and meta.mutates_metadata: + original_inpt.as_strided_( + updated_inpt.size(), + updated_inpt.stride(), + updated_inpt.storage_offset(), + ) + else: + assert meta.mutates_data + if meta.is_leaf and original_inpt.requires_grad: + # We can hit this situation in this case: + # def f(x): + # x.detach().mul_(2) + # return x + 1 + # AOTAutograd will see a mutation in the above case, and try to + # apply a copy_() here, in the epilogue. + # But if x required gradients, and is a leaf, then autograd + # will yell at us for trying to mutate it. + # However, it's only possible to end up in this scenario (like the above) + # if all of the mutations to the leaf input were non-autograd-tracking mutations + # (aka mutations under no_grad(), or on detached views). + # In that case, we fully want to hide the mutation from autograd, so detaching is ok. + original_inpt.detach().copy_(updated_inpt) + else: + original_inpt.copy_(updated_inpt) + else: + fw_outs = all_outs + + # Step 4: Manually regenerate any outputs that are aliased to inputs, instead of + # compiling them. + if runtime_metadata.num_outputs_aliased > 0: + # The compiled forward also returned intermediate bases. We don't want to return them to the user. + if runtime_metadata.num_intermediate_bases > 0: + fw_outs_no_intermediate_bases = fw_outs[ + : -runtime_metadata.num_intermediate_bases + ] + intermediate_bases = fw_outs[-runtime_metadata.num_intermediate_bases :] + else: + fw_outs_no_intermediate_bases = fw_outs + intermediate_bases = [] + + assert len(fw_outs_no_intermediate_bases) == len( + runtime_metadata.output_info + ) + fw_outs_including_aliases = [] + for i, (o, info) in enumerate( + zip(fw_outs_no_intermediate_bases, runtime_metadata.output_info) + ): + if info.output_type in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ]: + fw_outs_including_aliases.append(o) + continue + if trace_joint: + assert isinstance(o, TensorAlias) + o_ = o.alias + else: + o_ = o + + o_grad = runtime_metadata.output_info[i].requires_grad + if info.output_type == OutputType.alias_of_input: + aliased_base_tensor = args[info.base_idx] # type: ignore[index] + regenerated_out = gen_alias_from_base( + aliased_base_tensor, o_, o_grad + ) + fw_outs_including_aliases.append(regenerated_out) + continue + elif info.output_type == OutputType.is_input: + aliased_base_tensor = args[info.base_idx] # type: ignore[index] + regenerated_out = aliased_base_tensor + fw_outs_including_aliases.append(regenerated_out) + continue + elif info.output_type == OutputType.alias_of_intermediate: + base_tensor_list = intermediate_bases + elif ( + info.output_type == OutputType.alias_of_intermediate_save_as_output + ): + base_tensor_list = intermediate_bases + else: + assert ( + info.output_type + == OutputType.alias_of_intermediate_base_is_user_output + ) + base_tensor_list = fw_outs_no_intermediate_bases + aliased_base_tensor = base_tensor_list[info.base_idx] + # TODO: handle the custom autograd function case here. + # We need a way to check whether a tensor came from a custom autograd fn from python, + # AND a way to replay that custom view fn. + regenerated_out = gen_alias_from_base(aliased_base_tensor, o_, o_grad) + fw_outs_including_aliases.append(regenerated_out) + ret_outs = fw_outs_including_aliases + else: + ret_outs = fw_outs + + if runtime_metadata.dynamic_outputs: + for t, o in zip(ret_outs, runtime_metadata.output_info): + if o.dynamic_dims is None: + continue + if hasattr(t, "_dynamo_weak_dynamic_indices"): + t._dynamo_weak_dynamic_indices |= o.dynamic_dims + else: + t._dynamo_weak_dynamic_indices = o.dynamic_dims.copy() + if runtime_metadata.grad_enabled_mutation is not None: + torch.set_grad_enabled(runtime_metadata.grad_enabled_mutation) + return ret_outs + + return runtime_wrapper + + +# Calling convention: If we are running functionalized RNG, then outs consists +# of (user_outs, rng_offset) +def functionalized_rng_runtime_epilogue( + metadata: ViewAndMutationMeta, outs, return_new_outs=True +): + if metadata.is_rng_op_functionalized: + assert metadata.num_outputs_rng_offset == 1 + new_rng_offset = outs[-1] + CUDARngStateHelper.set_new_offset(new_rng_offset) + if return_new_outs: + user_outs = outs[:-1] + return user_outs + else: + return None + return outs + + +# This wrapper handles the AOTDispatch runtime logic for tensor subclasses. +# At runtime, we have a compiled function that knows how to operate on the domain of DenseTensor -> DenseTensor, +# But the user might have passed us some tensor subclass inputs (or expect some subclass tensor outputs). +# This function handles the wrapping and unwrapping of tensor subclasses at runtime. +def aot_dispatch_subclass_wrapper( + runtime_fn: Callable, + *, + subclass_metas: List[Union[int, SubclassCreationMeta]], + num_fw_outs_saved_for_bw: Optional[int], +) -> Callable: + def inner_fn(args): + unwrapped_args = unwrap_tensor_subclasses(args, is_joint_structure=False) + # expectation: runtime_fn is a boxed fn + unwrapped_outs = runtime_fn(unwrapped_args) + wrapped_outs = wrap_tensor_subclasses( + unwrapped_outs, + subclass_metas=subclass_metas, + num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw, + is_runtime=True, + ) + return wrapped_outs + + # box it + inner_fn._boxed_call = True # type: ignore[attr-defined] + return inner_fn + + +# MOTIVATION: +# +# When tracing functions for future execution, one must be careful not to pass +# in the same input tensor multiple times (e.g., f(x, x), as this can result +# in graphs that are ONLY valid if you later pass a new tensor in exactly the +# same way (e.g., f(y, y)). (NB: we really mean duplicate; two distinct +# tensors that alias each other is a different situation that is covered by +# aot_dispatch_deduplicated_autograd). Here are two examples: +# +# (1) Suppose you have a function: +# +# def f(x, y): +# return x + y +# +# If you make_fx(f)(x, x), you will trace out: +# +# def f(x, y): +# return y + y +# +# Oops! +# +# (2) For most tensors x and y, you can compute f's gradient with respect to +# these to inputs by saying torch.autograd.grad(f(x, y), (x, y)). However, +# if x is y, you will trace out a program that gets incorrect gradients: +# +# >>> x = torch.randn(1, requires_grad=True) +# >>> torch.autograd.grad(x + x, (x, x)) +# (tensor([2.]), tensor([2.])) +# +# In other words, the gradient is double-counted. Deduplicating the arguments +# gives you an appropriate gradient: +# +# >>> y = torch.randn(1, requires_grad=True) +# >>> torch.autograd.grad(x + y, (x, y)) +# (tensor([1.]), tensor([1.])) +# +# HOW TO DEDUPLICATE: +# +# There are a few strategies, in order of preference: +# +# 1. For every duplicate argument to the function, detach it into +# a separate leaf tensor, so that it is no longer duplicated. +# +# PRO: The resulting compiled graph works for any configuration +# of duplicated arguments. +# +# CON: It does not (naively) work if you mutate the metadata of inputs: +# +# def f(x, y): +# x.transpose_(0, 1) +# y.transpose_(0, 2) +# +# x = torch.randn(2, 3, 4) +# f(x, x) +# +# The ordering of the transposes inside f dictates whether or not +# you get [4, 2, 3] or [3, 4, 2]. This means that you cannot precompute +# what metadata mutations should get applied to each input; you need to +# assume they aren't duplicates (what we do today) or preserve +# the original metadata mutations exactly in order, so that they work +# for any duplicate configuration. +# +# CON: It does not (naively) work if you mutate the data of inputs. +# In particular, leaf tensors that require grad cannot be mutated, +# this makes it impossible to differentiate with respect to the original +# base. +# +# 2. For every duplicate argument to the function, remove it, so it is +# no longer part of the "true" signature: +# +# PRO: Implemented naively, it still works for metadata/data mutation. +# +# CON: The resulting compiled graph is duplicate-specialized: it only +# works if future calls duplicate arguments in exactly the same way. +# Horribly, Dynamo doesn't guard on this at the moment. But even if +# it did, you could still end up recompiling a bunch of each duplicate. +# +# Our strategy is to do (1) if we can, and do (2) otherwise, erroring if +# Dynamo's guards are not enough. In practice, this seems to cover +# everything. +# +def aot_wrapper_dedupe( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + compiler_fn, + fw_metadata, +): + # Use information about whether or not flat_fn mutates its arguments + # or not to handle dupe args + + # Strategy 1: For any input that is not mutated, we can leafify it if we + # need to remove a duplicate. + leaf_flat_args = [] + args_set = set() + ok = True + + for i, a in enumerate(flat_args): + if not isinstance(a, torch.Tensor): + leaf_flat_args.append(a) + elif a not in args_set: + args_set.add(a) + leaf_flat_args.append(a) + elif ( + not fw_metadata.input_info[i].mutates_data + and not fw_metadata.input_info[i].mutates_metadata + ): + leaf_flat_args.append(a.detach().requires_grad_(a.requires_grad)) + else: + ok = False + break + + if ok: + return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata) + + if requires_subclass_dispatch(leaf_flat_args, fw_metadata): + raise RuntimeError( + """\ +Encountered duplicate inputs that are mutated in the graph, but at least one input/output +to the graph is a tensor subclass. This is not supported today. You can try to +remove the aliasing yourself as a workaround, or otherwise file an issue on github.""" + ) + + # export path: ban duplicate inputs for now, add later if requested. + if aot_config.is_export: + raise RuntimeError( + f"""\ +Encountered duplicated inputs that are mutated in the graph you are trying to export. +This functionality is currently not supported. If needed, please file a github issue. + +fw_metadata={str(fw_metadata)} + """ + ) + + # Strategy 2: Duplicate specialize. + # + # In Haskell types, suppose you have: + # + # add_dupe_args :: DedupedArgs -> Args + # remove_dupe_args :: Args -> DedupedArgs + # + # compiler_fn + # :: (DedupedArgs -> R) -> DedupedArgs -> AOTConfig -> (DedupedArgs -> R) + # deped_compiler_fn + # :: (Args -> R) -> Args -> AOTConfig -> (Args -> R) + # + # Then the code below can be written in point-free style as: + # + # deduped_compiler_fn f a c = + # compiler_fn (f . add_dupe_args) (remove_dupe_args a) c . remove_dupe_args + # + # Suppose you have: + # + # [a, b, a, c] + # + # We want: + # + # remove_dupe_args([a, b, a, c]) == [a, b, c] + # add_dupe_args([a, b, c]) == [a, b, a, c] + # + # This is done via (respectively): + # + # seen_args = {a: 0, b: 1, c: 2} + # enumerate(add_dupe_map) = [ # how to get args from the deduped list + # (0, 0), + # (1, 1), + # (2, 0), + # (3, 2), + # ] + # keep_arg_mask = [True, True, False, True] + + seen_args: Dict[Tensor, int] = {} + keep_arg_mask = [] + # Implicitly map duped arg position (list index) to de-duped arg position + add_dupe_map: List[int] = [] + duped_arg_len = len(flat_args) + + j = 0 # index into deduped_flat_args + for t in flat_args: + if isinstance(t, torch.Tensor): + if t in seen_args: + keep_arg_mask.append(False) + add_dupe_map.append(seen_args[t]) + continue + seen_args[t] = j + + keep_arg_mask.append(True) + add_dupe_map.append(j) + j += 1 + assert ( + len(add_dupe_map) == duped_arg_len + ), f"Expects add_dupe_map to have length {duped_arg_len} but got {len(add_dupe_map)}" + + # NB: Hot path, avoid set lookups here + # TODO: Can avoid the zip here too, probably + def remove_dupe_args(args): + return [t for t, keep in zip(args, keep_arg_mask) if keep] + + def add_dupe_args(args): + return [args[add_dupe_map[i]] for i in range(duped_arg_len)] + + deduped_flat_args = remove_dupe_args(flat_args) + + # Update our input metadata to remove duped input metadata. + updated_fw_metadata = remove_dupe_metadata(fw_metadata, keep_arg_mask, add_dupe_map) + + if ( + tracing_context := TracingContext.try_get() + and aot_config.aot_autograd_arg_pos_to_source + ): + # TODO(voz): This structure is 1:1, we could consider an alternate structure like + # kept_pos:[dupe_arg_pos], however, add_dupe_map is 1:1 so we would need a new structure there, + # which feels like needless complexity for a tiny bit of efficiency at this point. + for dupe_arg_pos, (kept_pos, keep_arg) in enumerate( + zip(add_dupe_map, keep_arg_mask) + ): + if not keep_arg: + dupe_arg_source = aot_config.aot_autograd_arg_pos_to_source[ + dupe_arg_pos + ] + kept_arg_source = aot_config.aot_autograd_arg_pos_to_source[kept_pos] + tracing_context.guards_context.aotautograd_guards.append( # type: ignore[attr-defined] + DuplicateInputs(kept_arg_source, dupe_arg_source) + ) + + @wraps(flat_fn) + def wrapped_flat_fn(*args): + return flat_fn(*add_dupe_args(args)) + + if config.debug_assert: + ref_fw_metadata = run_functionalized_fw_and_collect_metadata( + wrapped_flat_fn, + keep_input_mutations=fw_metadata.keep_input_mutations, + is_train=fw_metadata.is_train, + )(*deduped_flat_args) + assert ( + ref_fw_metadata == updated_fw_metadata + ), f"ref_metadata={str(ref_fw_metadata)}, actual_metadata={str(updated_fw_metadata)}" + + compiled_fn = compiler_fn( + wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata + ) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + @wraps(compiled_fn) + def wrapped_compiled_fn(args): + deduped_args = remove_dupe_args(args) + args.clear() + return compiled_fn(deduped_args) + + wrapped_compiled_fn._boxed_call = True # type: ignore[attr-defined] + + # This can be uncommented when we properly guard for duplicates, + # but right now we must not do it. + # if not config.debug_assert: + # return wrapped_compiled_fn + + @wraps(wrapped_compiled_fn) + def debugged_compiled_fn(args): + # Test that the computed remove/add arg functions are an inverse + new_args = add_dupe_args(remove_dupe_args(args)) + seen: Dict[Any, None] = {} + for i, (x, y) in enumerate(zip(new_args, args)): + seen[y] = None + assert x is y, format_guard_bug_msg( + aot_config, + f"{describe_input(i, aot_config)} would be a duplicate of " + f"{describe_input(add_dupe_map[i], aot_config)}", + ) + # This is only an error if there is metadata mutation on both of + # the duped arguments; in this case, we need to know what order + # the metadata mutation applies in. You'll get the correct result + # otherwise, because a graph that assumes distinct inputs works if + # you dupe the inputs (the gradient contributions from each input + # will get summed up appropriately.) + # + # TODO: work out how to setup this assert correctly + """ + assert len(seen) == unique_args, format_guard_bug_msg(aot_config, + f"there would be {unique_args} distinct arguments" + ) + """ + return wrapped_compiled_fn(args) + + debugged_compiled_fn._boxed_call = True # type: ignore[attr-defined] + + return debugged_compiled_fn + + +# This layer handles the situation where you have two inputs that alias each other, +# and one of the inputs is mutated. +# We need to take special care to ensure that the mutation is applied to the other aliases in the graph. +# +# pre-condition: aot_wrapper_dedup has already run. +# (This function will in theory work if there are duplicate args. +# However, the synthetic base code path is a bit sub-optimal, and running with dupe'd inputs +# would cause us to hit that path more frequently). +def aot_wrapper_synthetic_base( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, + # Currently, the only reason we need to plumb this bool is because + # the synthetic base code prohibits more cases in the autograd case than the inference case. + needs_autograd: bool, + compiler_fn, +): + is_inference = not needs_autograd + flat_args_with_synthetic_bases, synthetic_base_info = merge_view_inputs( + flat_args, + fw_metadata.input_info, + is_inference=is_inference, + ) + # Happy path: we don't need synthetic bases + if synthetic_base_info is None: + return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata) + + # export path: ban synthetic bases for now, add later if requested. + if requires_subclass_dispatch(flat_args, fw_metadata): + raise RuntimeError( + """\ +Encountered aliased inputs that are mutated in the graph, but at least one input/output +to the graph is a tensor subclass. This is not supported today. You can try to +remove the aliasing yourself as a workaround, or otherwise file an issue on github.""" + ) + + if aot_config.is_export: + raise RuntimeError( + f"""\ +Encountered aliased inputs that are mutated in the graph you are trying to export. +This functionality is currently not supported. If needed, please file a github issue. + +synthetic_base_info={str(synthetic_base_info)} + +fw_metadata={str(fw_metadata)} + """ + ) + + assert len(fw_metadata.input_info) == len(synthetic_base_info) + + # Update our forward metadata to take synthetic bases into account + ( + fw_metadata_updated, + aliased_arg_idx_with_metadata_mutations, + ) = create_synthetic_base_metadata( + fw_metadata, synthetic_base_info, flat_args, flat_args_with_synthetic_bases + ) + + num_aliased_args_with_metadata_mutations = len( + aliased_arg_idx_with_metadata_mutations + ) + + def _unpack_synthetic_bases(primals: Tuple[Any, ...]) -> List[Any]: + f_args_inner = [] + for inner_idx_or_tuple in synthetic_base_info: + if isinstance(inner_idx_or_tuple, int): + f_args_inner.append(primals[inner_idx_or_tuple]) + else: + inner_base_idx, view_tensor = inner_idx_or_tuple + base = primals[inner_base_idx] + view_arg = gen_alias_from_base( + base, view_tensor, view_tensor.requires_grad + ) + f_args_inner.append(view_arg) + return f_args_inner + + @wraps(flat_fn) + def wrapped_flat_fn(*args): + unpacked_args = _unpack_synthetic_bases(args) + # This is a bit subtle. The goal of this entire function (aot_dispatch_synthetic_bases) + # is to relieve the downstream logic from having to reason about mutations on inputs that alias + # each other, by replacing aliased inputs with a synthetic base. + # One area where this breaks down a bit however is if one of those aliased inputs + # experienced a metadata mutation. + # We are now obligated to reapply the metadata mutation directly to the user's input; + # it isn't enough to apply mutations back to the synthetic base in the downstream logic. + # + # The way we handle this is by pretending that those aliased inputs that experience metadata mutations + # are additional outputs in the user's forward function. + # The downstream logic will just treat these as "user outputs that alias inputs". + # However, we will manually grab them at runtime here, use them to reapply the metadata mutation + # to the user inputs, and not return them to the user. + aliased_args_with_metadata_mutations = [ + x + for i, x in enumerate(unpacked_args) + if i in aliased_arg_idx_with_metadata_mutations + ] + if len(aliased_args_with_metadata_mutations) > 0: + return *(flat_fn(*unpacked_args)), *aliased_args_with_metadata_mutations + else: + return flat_fn(*unpacked_args) + + if config.debug_assert: + ref_fw_metadata = run_functionalized_fw_and_collect_metadata( + wrapped_flat_fn, + keep_input_mutations=fw_metadata.keep_input_mutations, + is_train=fw_metadata.is_train, + )(*flat_args_with_synthetic_bases) + assert ref_fw_metadata == fw_metadata_updated, ( + f"ref_metadata={pprint.pformat(partial_flatten_asdict(ref_fw_metadata))}, " + f"\nactual_metadata={pprint.pformat(partial_flatten_asdict(fw_metadata_updated))}" + ) + + compiled_fn = compiler_fn( + wrapped_flat_fn, + flat_args_with_synthetic_bases, + aot_config, + fw_metadata=fw_metadata_updated, + ) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + @wraps(compiled_fn) + def wrapped_compiled_fn(args): + args_with_synthetic_bases, synthetic_base_info = merge_view_inputs( + args, fw_metadata.input_info, is_inference=is_inference + ) + assert synthetic_base_info is not None + aliased_args_w_metadata_mutations = [ + args[i] for i in aliased_arg_idx_with_metadata_mutations + ] + args.clear() + outs = compiled_fn(args_with_synthetic_bases) + if num_aliased_args_with_metadata_mutations > 0: + # This code does not handle **all** input metadata mutations. + # Instead, it only handles metadata mutations on inputs that were converted into synthetic bases + # (which only happens if at least one aliased input experienced a data mutation). + # e.g: + # def f(a, b): + # a.mul_(2) + # b.t_(1, 0) + # f(x.view(2, 2), x.view(2, 2)) + mutated_metadata_inps = outs[-num_aliased_args_with_metadata_mutations:] + user_outs = outs[:-num_aliased_args_with_metadata_mutations] + for inp, mutated_inp in zip( + aliased_args_w_metadata_mutations, mutated_metadata_inps + ): + inp.as_strided_( + mutated_inp.size(), + mutated_inp.stride(), + mutated_inp.storage_offset(), + ) + return user_outs + return outs + + return wrapped_compiled_fn + + +# Note [Handling mutations on an input that aliases other inputs] +# The easiest example to show-case this edge case is here: +# +# def f(a, b): +# a.mul_(2) +# out = a + b +# return out +# b = torch.ones(...) +# a = b.view(-1) +# f(a, b) +# +# In this situation, if a and b happened to be aliased, we need to trace something different! +# Suppose we had b = a.view(-1) +# (In this case, that means that `a._base is b`) +# +# We need to ensure that the aliasing relationship between a and b is preserved. +# We do that detecting the specific situation above (mutate an input that aliases another input), +# and when we do that, we create a synthetic base argument. Then inside of the traced forward, +# we regenerate a and b off of that base. +# The complete example of the transformed function looks like this: +# +# // The traced forward takes in a synthetic base, and regenerates the aliased inputs as views +# // We could consider getting view-replay support here to minimize as_strided_scatter ops in the graph +# def traced_forward(base): +# a = base.as_strided(...) +# b = base.as_strided(...) +# a_updated = a.mul(2) +# base_updated = torch.as_strided_scatter(base, a_updated, ...) +# b_updated = base_updated.as_strided(...) +# out = a_updated + b_updated +# return a_updated, out +# +# def compiled_fn(a, b): +# // we detect that a is the "differentiable base" here +# base = a +# // In other situations, we might do either: +# // (1) a and b are both views off of some larger differentiable base +# // assert a._base is b._base and a._base is not None +# // base = a._base +# // (2) a and b both don't require gradients. Create a base from the storage +# // assert a._base is None and b._base is None +# // base = torch.Tensor(a.storage()) +# a_updated, out = traced_forward(base) +# a.copy_(a_updated) +# return out +# +# This function: +# (1) Merges input views into a synthetic base argument, when any of those input views are mutated +# (2) Returns metadata telling the autograd.Function how to modify their arguments properly, +# to respect the new calling convention. +# +# The calling convention is as follows. +# Any inputs that were originally views of one another get yanked, and replaced with a synthetic base. +# The argument list ordering goes [base1, ..., baseN], [arg1, ..., argN], +# Where the ordering of the bases is determined from the ordering of the original view args. +# baseA will come before baseB if the earliest original argument coming from baseA +# showed up earlier in the argument list than the earliest original argument coming from baseB. +# +# Example, given some tensors a, b, c, d +# call site: +# f(a, c.view(-1), b.view(-1), b, c, d) +# Modified argument list: +# c_base comes first because the first c view came earlier in arg list than the first b view +# a and d still show up in the modified arg list, but b and c don't- they're regenerated from their bases +# b_base = torch.Tensor(b.storage()) +# c_base = torch.Tensor(c.storage()) +# f(c_base, b_base, a, d) +def merge_view_inputs( + fwd_inputs: List[Any], + mutated_input_info: List[InputAliasInfo], + *, + # The autograd case currently has more restrictions than the inference case. + is_inference: bool, +) -> Tuple[List[Any], Optional[List[Union[int, Tuple[int, torch.Tensor]]]]]: + def _are_differentiable_views(view1, view2): + if view1 is view2: + return True + if view1._base is None and view2._base is None: + return False + if view1._base is view2._base or view1._base is view2 or view1 is view2._base: + return True + return False + + def _same_dtype_views(view1, view2): + if view1.dtype != view2.dtype: + return False + if view1._base is not None and view1.dtype != view1._base.dtype: + return False + if view2._base is not None and view2.dtype != view2._base.dtype: + return False + return True + + assert len(fwd_inputs) == len(mutated_input_info) + storage_ref_to_idx: Dict[StorageWeakRef, List[int]] = collections.defaultdict(list) + base_args = [] + other_args = [] + for i, inpt in enumerate(fwd_inputs): + if isinstance(inpt, Tensor): + storage_ref = StorageWeakRef(inpt.untyped_storage()) + storage_ref_to_idx[storage_ref].append(i) + else: + other_args.append(inpt) + # Note [Synthetic Base Info Metadata] + # This list contains metadata that tells you what the i'th argument in the inner calling convention should be. + # It's either: + # - another int (corresponding to the index in the argument list of the element from the outer calling convention) + # - idx, view_tensor, where we can generate the new output with view_tensor._view_func(old_args[idx]) + # idx corresponds to which synthetic base from the outer calling context to view + inner_calling_convention_meta: Dict[int, Union[int, Tuple[int, torch.Tensor]]] = {} + for aliased_input_indices in storage_ref_to_idx.values(): + if len(aliased_input_indices) <= 1 or not any( + # We only care about mutations that affect all aliases, + # so metadata mutations on an input doesn't require us to do synthetic base handling. + mutated_input_info[inpt_idx].mutates_data + for inpt_idx in aliased_input_indices + ): + for curr_idx in aliased_input_indices: + other_args.append(fwd_inputs[curr_idx]) + continue + + # Here, we attempt to do a more complicated check to detect false aliasing + # (e.g. if all the tensors have the same storage, but don't actually overlap) + # In theory, we could have a large group of tensors that all share storages, where only *some* of them + # have overlapping memory. + # I don't bother with that case for now: here, we only bail out earlier if we detect that **every** pair + # of tensors in the current group that shares a storage is non-overlapping. + aliased_input_indices_no_false_sharing = compute_overlapping_inputs( + fwd_inputs, aliased_input_indices + ) + if len(aliased_input_indices_no_false_sharing) <= 1: + for curr_idx in aliased_input_indices: + other_args.append(fwd_inputs[curr_idx]) + continue + + # We detected an input that was mutated, AND aliases with another input. + # we need to replace this set of aliased inputs with a single synthetic base. + # For now, I'm banning a bunch of cases. We expect dynamo to properly detect these cases + # and error out. We can fix them later. + # These checks are transitive, so we don't need to check every pair. + for idx1, idx2 in zip( + aliased_input_indices, aliased_input_indices[1:], strict=False + ): + view1 = fwd_inputs[idx1] + view2 = fwd_inputs[idx2] + # The "inputs that are aliased but have different differentiable bases" case + # is more complicated and hopefully pretty rare. Not currently handled. + if not is_inference: + assert _are_differentiable_views( + view1, view2 + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + # Regenerating views when reinterpreting complex / real tensors seems non-trivial, + # not handling for now + assert _same_dtype_views( + view1, view2 + ), "aot_autograd() does not yet handle input mutations on views with different dtypes." + non_none_bases = [ + fwd_inputs[i]._base + for i in aliased_input_indices + if fwd_inputs[i]._base is not None + ] + aliases_with_none_bases = [ + fwd_inputs[i] for i in aliased_input_indices if fwd_inputs[i]._base is None + ] + if len(non_none_bases) == 0: + # Case where none of the aliases have a ._base + # we generate a synthetic base without gradients, and generate views off of it + # We hit this case when we have input tensors to the graph that share a storage, + # but do not have a ._base field. + # Wondering when we hit this case? + # The _base field simply says that autograd knows about the aliasing relationship, + # but sometimes we create tensors which are aliased out of the same storage but guaranteed + # to be disjoint. In these cases, we will skip setting up the _base relationship + # for performance reasons (because the fact that the tensors share the same storage + # is unobservable unless you (1) do naughty things with resize_/as_strided + # or (2) look at the storage--as we are doing here.) + # One particular example of this is optimizer steps on the LSTM module: + # LSTM parameters are packed into a contiguous storage for efficiency reasons when + # calling cuDNN kernels, so when these parameters get passed to the optimizer we will + # find they share the same storage, but do not have _base set since they are all disjoint. + # + # NOTE: There is one case where this is unsafe: + # torch.Tensor(storage) will ALWAYS create a 1D tensor, which is not necessarily + # the same shape as the "actual" base that the tensor came from. + # For the most part this is fine, because we always use as_strided() + # to generate the original aliased inputs again. + # If we were to use view-replay though, this could cause the aliased views + # to have incorrect sizes. + example_idx = aliased_input_indices[0] + example_alias = fwd_inputs[example_idx] + # Note that this function is re-used at both trace time and runtime. + # At trace time, we're under a FakeMode so synthetic_base becomes a FakeTensor. + synthetic_base = torch.empty( + (0,), dtype=example_alias.dtype, device=example_alias.device + ) + # We don't actually have a convenient way of going from storage -> tensor, + # So using set_() here (we suffer some minor overhead, but this case is rare). + synthetic_base.set_(example_alias.untyped_storage()) + else: + # Case where all of the aliases require gradients, and have the same _base. + synthetic_base = non_none_bases[0] + for other_base in non_none_bases[1:]: + assert ( + other_base is synthetic_base + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + for alias in aliases_with_none_bases: + assert ( + alias is synthetic_base + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + base_args.append(synthetic_base) + for curr_view_idx in aliased_input_indices: + curr_view = fwd_inputs[curr_view_idx] + base_idx = len(base_args) - 1 + # We store just enough info here so that we can regenerate the view later. + # Regeneration: curr_view._view_func(args[base_idx]) + inner_calling_convention_meta[curr_view_idx] = (base_idx, curr_view) + if len(base_args) == 0: + assert len(other_args) == len(fwd_inputs) + # If no synthetic bases are necessary, just return the original inputs. + return fwd_inputs, None + else: + # Otherwise, return: + # (1) The new args according to the updated calling convention: (synthetic_bases, other_args) + # (2) Metadata telling functionalization how to generate the inner argument list given the outer calling convention. + # We post-process it into a list, where meta[i] tells you info about the i'th argument in the inner calling convention. + args_to_functionalization = base_args + other_args + arg_to_old_idx_map = {arg: i for (i, arg) in enumerate(fwd_inputs)} + for i, other_arg in enumerate(other_args): + new_idx = len(base_args) + i + old_idx = arg_to_old_idx_map[other_arg] + inner_calling_convention_meta[old_idx] = new_idx + # post process into a list + post_processed_calling_convention_meta: List[ + Union[int, Tuple[int, torch.Tensor]] + ] = [-1 for _ in range(len(inner_calling_convention_meta))] + for k, v in inner_calling_convention_meta.items(): + post_processed_calling_convention_meta[k] = v + # Quick assert: every argument in the inner calling convention should be accounted for. + for x in post_processed_calling_convention_meta: + assert x != -1 + return args_to_functionalization, post_processed_calling_convention_meta diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..9099ced991a88c9e96e09f87bcfd72c821b385fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py @@ -0,0 +1,696 @@ +""" +The various dataclasses, Enums, namedtuples etc used in AOTAutograd. This includes +input/output types, metadata, config, function signatures etc. +""" + +import collections +import functools +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, Dict, List, NewType, Optional, Set, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch._guards import Source +from torch._subclasses import FakeTensor +from torch._subclasses.fake_tensor import is_fake + +from .. import config + +from .functional_utils import _check_if_mutation_can_be_in_graph +from .utils import strict_zip + +zip = strict_zip + +OutputType = Enum( + "OutputType", + ( + # output is not an alias + "non_alias", + # output aliases an input + "alias_of_input", + # output **is** an input tensor + "is_input", + # output has a ._base tensor, which is a graph intermediate. + # We need to return its ._base as a graph output, + # so its requires_grad info is populated correctly. + # Instructs the runtime code to regenerate the current output + # from a base tensor, graph_intermediates[base_idx] + "alias_of_intermediate_save_as_output", + # Same as above; but we don't need to explicitly add its ._base + # as a graph output, because it already **is** a graph output. + "alias_of_intermediate", + # Same as above; but the output's ._base is **already** a user output. + # Instructs the runtime code to regenerate the current output from + # a base tensor, user_outputs[base_idx] + "alias_of_intermediate_base_is_user_output", + # See Note [Intermediate Bases Optimization] + "unsafe_view_alias", + # output is an alias, but has a custom autograd.Function backward. + # In this case, we don't want to do view-replay, since we won't be able to replay the custom function. + # Instead, we'll treat this output "normally", and trace its backward into the graph. + "custom_function_view", + ), +) + + +# This class stores info about every user output. +@dataclass(frozen=True) +class OutputAliasInfo: + # Tells us if this output is: + # (1) a regular (non-aliased) output + # (2) an alias of a forward input + # (3) **is** a forward input (special case of "alias_of_input") + # (4) an alias of an intermediate (aka an alias of an output of the inner traced forward) + # (5) an alias of an intermediate, that explicitly requires returning the intermediate + # as a graph output + # (6) an alias of an intermediate, where that intermediate is also a user output + output_type: OutputType + # The raw type of the output (torch.Tensor, SymInt, etc) + raw_type: type + # If (1) above, then + # - base_idx is None + # If (2) or (3) above, then + # - Tells us that the base of this alias is user_fwd_input[base_idx] + # (This is an index into the inputs *before* we make synthetic bases) + # If (4) or (5) above, then + # - Tells us that the base of this alias is output_graph_intermediates[base_idx] + # here, this refers to the index of the *direct* traced + # If (6) above, then: + # - Tells us that the base of this alias is output_user_fwds[base_idx] + # here, this refers to the index of the *direct* traced + base_idx: Optional[int] + # If it is a Tensor, what the dynamic dims are (otherwise is None) + dynamic_dims: Optional[Set[int]] + # requires_grad + requires_grad: bool + + +class MutationType(Enum): + NOT_MUTATED = 1 + MUTATED_IN_GRAPH = 2 + MUTATED_OUT_GRAPH = 3 + + +# This class tells us info about user inputs. +@dataclass(frozen=True) +class InputAliasInfo: + is_leaf: bool + mutates_data: bool + mutates_metadata: bool + mutations_hidden_from_autograd: bool + mutations_under_no_grad_or_inference_mode: bool + mutates_storage_metadata: bool + requires_grad: bool + keep_input_mutations: bool + + def __post_init__(self): + if self.mutates_storage_metadata: + # For convenience, we guarantee that this is always true. + # In practice, If we call .set_(), then at runtime there is no need + # to additionally fix up the tensor metadata, since our runtime + # call to inp.set_(updated_inp) will already have the right metadata + assert self.mutates_metadata + + @functools.cached_property + def mutation_type(self) -> MutationType: + if (not self.mutates_data) and (not self.mutates_metadata): + return MutationType.NOT_MUTATED + + if _check_if_mutation_can_be_in_graph( + self.keep_input_mutations, + self.mutates_data, + self.mutates_metadata, + self.mutations_hidden_from_autograd, + self.mutations_under_no_grad_or_inference_mode, + self.requires_grad, + ): + return MutationType.MUTATED_IN_GRAPH + + return MutationType.MUTATED_OUT_GRAPH + + +@dataclass +class SubclassCreationMeta: + """ + Used for AOTDispatch. + This dataclass gives us the information we need to reconstruct a tensor subclass + from our flat inputs. + Why is this important? The graph that we'd like to trace out contains flat tensor inputs, + But the user's original model may have subclass inputs and outputs. + So we need to wrap/unwrap subclasses as necessary to translate between the user's + view (subclass inps/outs), and the backend compiler's view (graph with no subclass args). + + Complications arise mostly from the fact that a subclass can hold more than one inner tensor; + So for a given subclass input/output, we need to carefully track which indices map + to the subclass tensor in the corresponding "dense-tensor-only" graph. + """ + + # In the inner graph that only takes in dense tensor inputs, + # this maps to the first index of "tensors that should go in this subclass wrapper" + flat_tensor_start_idx: int + # The number of tensors that live in this subclass wrapper + arg_count: int + # Stores the original subclass itself. + # This is needed because we need the autograd metadata on the original subclass + # (this is guaranteed to be a wrapper subclass that holds a fake tensor, + # so holding onto this at runtime shouldn't leak memory) + original_subclass: torch.Tensor + # meta and inner_keys are produced by the subclass's __tensor_flatten__. + # We need to keep them around along with outer_size / outer_stride to plumb them + # into __tensor_unflatten__. + meta: Any + inner_keys: List[Any] + outer_size: Tuple[int, ...] + outer_stride: Tuple[int, ...] + + def creation_fn(self, all_args, *, is_runtime: bool): + curr_args = all_args[ + self.flat_tensor_start_idx : self.flat_tensor_start_idx + self.arg_count + ] + assert len(curr_args) == len( + self.inner_keys + ), f"inner_keys: {str(self.inner_keys)}. len(curr_args): {len(curr_args)}" + # NB: Sometimes we have real inner tensors and symbolic metadata. + # TODO: Resolve this so we always have matching real / symbolic tensors / metadata. + out = type(self.original_subclass).__tensor_unflatten__( # type: ignore[attr-defined] + dict(zip(self.inner_keys, curr_args)), + self.meta, + self.outer_size, + self.outer_stride, + ) + if not is_runtime: + # After wrapping up the inner dense tensors into a subclass, we need to make sure that our new wrapper + # has correct autograd metadata, since we'll be tracing through the autograd engine with the subclass. + # We don't trace through the autograd engine at runtime though, so no need + # to compute this extra metadata then! + torch._mirror_autograd_meta_to(self.original_subclass, out) # type: ignore[attr-defined] + + return out + + def __post_init__(self): + # sanity assert to make sure we don't leak memory + assert is_fake(self.original_subclass) + + +# This class encapsulates all aliasing + mutation info we need about the forward graph +# See a more detailed overview of the edge case handling at +# https://docs.google.com/document/d/19UoIh_SVrMy_b2Sx5ZaeOJttm6P0Qmyss2rdBuyfoic/edit +@dataclass(eq=False) +class ViewAndMutationMeta: + # length = # user inputs + # This gives us info about every input, and what sort of mutation happened to it (if any) + input_info: List[InputAliasInfo] + + # length = # user outputs + # This gives us info about every output (mostly around whether it aliases other tensors) + output_info: List[OutputAliasInfo] + + # length = the number of intermediate bases appended as outputs to the end of the forward graph. + # Note: this is not necessarily the same thing as: + # len([x for x in output_info if x.output_type == OutputType.alias_of_intermediate]) + # Because outputs might share a ._base, or an output's ._base might itself be + # another user output (in both cases, we won't redundantly append bases to the end of the graph) + num_intermediate_bases: int + + # For inference only: instructs us to keep data-only input mutations directly in the graph + keep_input_mutations: bool + + # length = (# inputs w data mutations) + (# user outputs that are non_aliasing tensors) + # + (# intermediate bases) + # These are the FakeTensor (or potential SymInt) outputs that we traced from our + # metadata pass of the user's forward function. + # Their only use today is to pass them as a best-guess for tangents when tracing the joint. + # Stashing them as part of our "metadata" makes it simpler if we want to run our analysis + # pass once, and re-use the output throughout AOTAutograd + traced_tangents: List[Any] + + # Each of these is a list telling us about subclasses for the inputs/outputs/grad_outs + # They are used throughout AOTDispatch to tell us how to generate a list of subclass tensors, + # Given a (potentially larger) list of plain torch tensors. + + # Taking subclass_inp_meta as an example: + # subclass_inp_meta[i] = j (an int) tells us: + # "The i'th user input is not a subclass, and corresponds to inputs[j] of the plain-tensor graph." + # subclass_inp_meta[i] = SubclassCreationMeta(flat_tensor_start_idx=3, arg_count=2) + # "The i'th user input is subclass holding two inner tensors, which are + # inputs[3] and inputs[4] of the plain-tensor graph". + + # length = # user inputs + subclass_inp_meta: List[Union[int, SubclassCreationMeta]] + # So, the full set of outputs to the forward graph looks something like: + # (*mutated_inps, *user_outs, *intermediate_bases, *saved_for_bw_tensors) + # where the first 3 of those 4 can be subclasses + # (but not saved_for_bw tensors, since these are internal to the compiler + # and not user visible, so there's no point in wrapping/unwrapping them at runtime). + # This list contains subclass information on all of the fw graph outputs + # except for saved_for_bw_tensors. + subclass_fw_graph_out_meta: List[Union[int, SubclassCreationMeta]] + # length = # backward graph inputs + subclass_tangent_meta: List[Union[int, SubclassCreationMeta]] + # TODO: we should kill this + # (need to default it to not break internal) + is_train: bool = False + + num_symints_saved_for_bw: Optional[int] = None + + # The grad_enabled mutation that will be emitted in the runtime_wrapper epilogue + # NOTE: AOTAutograd will assume that the ambient `is_grad_enabled` is the grad mode + # that is intended to be in effect prior to running the graph, in keeping with + # equivalence to eager mode. It is the responsibility of upstream graph acquisition + # to reset the grad mode to its pre-graph value prior to calling aot_autograd. + grad_enabled_mutation: Optional[bool] = None + + # Keeps track of whether `torch.use_deterministic_algorithms` was turned on + # when the forward was run. If deterministic mode was turned off during the + # forward, but is turned on during the backward call, then an error is + # raised + deterministic: Optional[bool] = None + + # Map of effect type (ex. _EffectType.ORDERED) to token. If there are + # side-effectful operators, FunctionalTensorMode will populate this + # dictionary telling us how many tokens we will need during tracing. + tokens: Dict[Any, torch.Tensor] = field(default_factory=dict) + + def __post_init__(self): + # pre-compute the indices of the inputs that are mutated. + # When keep_input_mutations is set, we don't need to worry about our epilogue + # handling data-only mutations, because we keep them directly in the graph. + + mutated_inp_runtime_indices = [ + i + for i, m in enumerate(self.input_info) + if (m.mutation_type == MutationType.MUTATED_OUT_GRAPH) + ] + + mutated_graph_handled_indices = [ + i + for i, m in enumerate(self.input_info) + if m.mutation_type == MutationType.MUTATED_IN_GRAPH + ] + self.mutated_graph_handled_indices = mutated_graph_handled_indices + self.num_mutated_graph_handled_indices = len(self.mutated_graph_handled_indices) + + mutated_graph_handled_indices_seen_by_autograd = [ + i + for i in mutated_graph_handled_indices + if not self.input_info[i].mutations_hidden_from_autograd + ] + + self.mutated_graph_handled_indices_seen_by_autograd = ( + mutated_graph_handled_indices_seen_by_autograd + ) + self.num_mutated_graph_handled_indices_seen_by_autograd = len( + self.mutated_graph_handled_indices_seen_by_autograd + ) + + aliased_out_indices = [ + i + for i, m in enumerate(self.output_info) + if m.output_type + not in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + ] + unsafe_view_out_indices = [ + i + for i, m in enumerate(self.output_info) + if m.output_type is OutputType.unsafe_view_alias + ] + + # This is pre-computed in post_init for perf. + # It contains the index of every element + # of input_info that corresponds to a mutation (data or metadata or both) + self.mutated_inp_runtime_indices = mutated_inp_runtime_indices + self.num_mutated_inp_runtime_indices = len(self.mutated_inp_runtime_indices) + + # This is pre-computed for perf. + # It contains the index of every element + # of output_info that corresponds to an alias (either of an input or intermediate) + self.aliased_out_indices = aliased_out_indices + self.unsafe_view_out_indices = unsafe_view_out_indices + self.num_outputs = len(self.output_info) + self.num_outputs_non_aliased = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + ] + ) + self.num_outputs_aliased_to_inputs = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.alias_of_input, + OutputType.is_input, + ] + ] + ) + self.num_unsafe_view_outputs = len(self.unsafe_view_out_indices) + self.num_outputs_aliased_to_intermediates = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.alias_of_intermediate, + OutputType.alias_of_intermediate_save_as_output, + OutputType.alias_of_intermediate_base_is_user_output, + ] + ] + ) + self.num_outputs_aliased = ( + self.num_outputs_aliased_to_inputs + + self.num_outputs_aliased_to_intermediates + ) + + self.dynamic_outputs = any(o.dynamic_dims for o in self.output_info) + # See Note: [AOTAutograd Backward Guards] + # This is pre-computed for fast asserts on the types of our grad_outputs in the backward. + # Eventually, we should kill this and replace with real backward guards. + # (we want to precompute the "runtime" types, so replace FakeTensor with torch.Tensor) + self.output_types = [ + torch.Tensor if isinstance(x, FakeTensor) else type(x) + for x in self.traced_tangents + ] + + self.is_rng_op_functionalized = config.functionalize_rng_ops + # All of the above metadata is collected by tracing the fw function. + # However, extra outputs for rng offsets behave differently. Both fwd + # and bwd graphs have their own outputs for the total consumed offsets. + # Unlike mutated inputs, we don't have to worry about sending the right + # set of tensors between fwd and bwd. Fwd and bwd offsets are + # independent and simpler to handle. Therefore, we track them + # separately. + self.num_outputs_rng_offset = 1 if self.is_rng_op_functionalized else 0 + + # Our forward() returns both (mutated_inputs, outputs, output_intermediate_bases, saved_tensors, saved_symints) + self.num_forward_returns = ( + self.num_mutated_inp_runtime_indices + + self.num_outputs + + self.num_intermediate_bases + ) + # In case of functionalization of rng ops, the fw_module returns one + # additional output for rng offset. This rng offset is used right + # away to advance the rng state, and is not passed on to the raw + # outputs. However, we need to know the exact boundary to identify + # which tensors to be saved for the bwd graph. num_forward captures + # this information. + self.num_forward = self.num_forward_returns + self.num_outputs_rng_offset + + @property + def tensors_saved_for_backwards_slice(self): + assert self.num_symints_saved_for_bw is not None + if self.num_symints_saved_for_bw > 0: + return slice(self.num_forward, -self.num_symints_saved_for_bw) + else: + return slice(self.num_forward, None) + + @property + def symints_saved_for_backwards_slice(self): + assert self.num_symints_saved_for_bw is not None + if self.num_symints_saved_for_bw > 0: + return slice(-self.num_symints_saved_for_bw, None) + else: + return slice(0, 0) # empty slice + + def __eq__(self, other): + if not isinstance(other, ViewAndMutationMeta): + return NotImplemented + return ( + self.input_info == other.input_info + and self.output_info == other.output_info + and self.num_intermediate_bases == other.num_intermediate_bases + and self.keep_input_mutations == other.keep_input_mutations + and self.is_rng_op_functionalized == other.is_rng_op_functionalized + and self.num_outputs_rng_offset == other.num_outputs_rng_offset + and len(self.traced_tangents) == len(other.traced_tangents) + and all( + x.shape == y.shape and x.dtype == y.dtype + for x, y, in zip(self.traced_tangents, other.traced_tangents) + ) + ) + + +@dataclass(eq=False) +class SubclassMeta: + # A copy of all forward metadata, but computed on the *dense* tensor forward (after desugaring subclasses) + # So for example, if the user had a model containing two `TwoTensor` inputs, + # Then `SubclassMeta.fw_metadata.input_infos` would have length 4 here. + fw_metadata: ViewAndMutationMeta + + # Note: [Computing Subclass Metadata about grad_inputs] + # Given a list of flattened, plain tensor grad_inputs, this tells us how to reconstruct the grad_input subclasses + # + # You might think: why not just assume that all grad_inputs will have the same subclass-ness as the original inputs? + # (AOTAutograd generally assumes other properties, e.g. that grad_outputs are contiguous) + # + # This doesn't really work though. take this example: + # + # def f(DoubleTensor, DenseTensor): + # return DoubleTensor * DenseTensor + # + # In the above example, the .grad field of *both* DoubleTensor and DenseTensor will be a DoubleTensor. + # When we trace out a joint fw-bw graph, we'll end up returning two subclasses for the two grad_inputs. + # This means that our backward graph will return 4 outputs (two dense tensors for each DoubleTensor grad_input) + # and we need to properly store the metadata that tells us how to turn these 4 outputs back into DoubleTensors. + # + # Note that this info **cannot** easily be figured out from ViewAndMutationMeta. + # We can only compute this info by tracing the entire joint and examining the grad_inputs that we computed. + # + # See Note: [AOTAutograd Backward Guards] + # This will also eventually require us to install backward guards, + # in case we made incorrect assumptions about the subclass-ness of our grad_outputs + # + # Optional field because we don't compute for inference graphs + grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]] + + def __init__(self): + # The fields in this class get set after its construction. + pass + + +# This class exists because: +# - the autograd.Function.forward() in aot autograd returns outputs that might alias inputs +# - we only care about the metadata on those aliases, so we can regenerate them. +# We do not want them to participate in the autograd.Function. +# We do that by wrapping them in an opaque class, so the autograd.Function +# does not know to treat them as tensors. +@dataclass(frozen=True) +class TensorAlias: + alias: torch.Tensor + + +@dataclass +class BackwardSignature: + """ + Provides information about the backward section of an exported + joint forward-backward graph. + For a particular fx GraphModule, this class contains information on: + (1) A mapping from each gradient (backwards output) to the parameter + it corresponds to (forward input) + (2) A mapping from each gradient (backwards output) to the user input + it corresponds to (forward input) + (3) Which of the forward outputs corresponds to the loss, that we backprop on. + + Each string name is the `node.name` of the corresponding node in the fx graph. + """ + + gradients_to_parameters: Dict[str, str] + gradients_to_user_inputs: Dict[str, str] + loss_output: str + + +GraphOutputName = NewType("GraphOutputName", str) +GraphInputName = NewType("GraphInputName", str) +FQN = NewType("FQN", str) + + +@dataclass +class GraphSignature: + """ + Provides information about an exported module. + For a particular fx GraphModule, this class contains information on: + (1) Which graph inputs are parameters, buffers, or user inputs + (2) (for params/buffers) a mapping from the name of each graph argument + to its parameter/buffer FQN in the original nn.Module. + (3) If there are input mutations, these are represented as extra outputs + in the fx GraphModule. We provide a mapping from these + extra output names to the names of the actual inputs. + (4) The pytree metadata on how to flatten/unflatten inputs and outputs. + The corresponding FX GraphModule only accepts and returns + pytree-flattened inputs/outputs. + (5) (Optionally) if the FX is a joint forward-backward graph, we provide + a signature on the backward section of the joint graph. + """ + + parameters: List[FQN] + buffers: List[FQN] + + user_inputs: List[GraphInputName] + user_outputs: List[GraphOutputName] + inputs_to_parameters: Dict[GraphInputName, FQN] + inputs_to_buffers: Dict[GraphInputName, FQN] + + # If the user's module mutates a buffer, + # it's represented in the graph as an extra graph output. + # This dict is a mapping from + # "graph outputs that correspond to updated buffers" + # to the FQN names of those mutated buffers. + buffers_to_mutate: Dict[GraphOutputName, FQN] + user_inputs_to_mutate: Dict[GraphOutputName, GraphInputName] + + in_spec: pytree.TreeSpec + out_spec: pytree.TreeSpec + + backward_signature: Optional[BackwardSignature] + + input_tokens: List[GraphInputName] + output_tokens: List[GraphOutputName] + + @classmethod + def from_tracing_metadata( + cls, + *, + in_spec: pytree.TreeSpec, + out_spec: pytree.TreeSpec, + graph_input_names: List[str], + graph_output_names: List[str], + view_mutation_metadata: ViewAndMutationMeta, + named_parameters: List[str], + named_buffers: List[str], + num_user_inputs: int, + num_user_outputs: int, + loss_index: Optional[int], + backward_signature: Optional[BackwardSignature], + ) -> "GraphSignature": + graph_inputs = graph_input_names + graph_outputs = graph_output_names + parameters = list(named_parameters) + buffers = list(named_buffers) + num_tokens = len(view_mutation_metadata.tokens) + + # Calling convention assumptions: + # (1) graph inputs = (input_tokens, params, buffers, user_inputs) + # (2) graph outputs = (output_tokens, mutated_inputs, user_outs, param_gradients) + # (If we are capturing an inference graph, this convention is identical + # except that param_gradients is empty) + # See Note [Side-Effectful Tokens in AOTAutograd] for information on tokens + + # Address input calling conventions: + start, stop = 0, num_tokens + input_tokens = graph_inputs[start:stop] + + start, stop = stop, stop + len(parameters) + inputs_to_parameters = dict(zip(graph_inputs[start:stop], parameters)) + + start, stop = stop, stop + len(buffers) + inputs_to_buffers = dict( + zip( + graph_inputs[start:stop], + buffers, + ) + ) + + start, stop = stop, stop + num_user_inputs + user_inputs = graph_inputs[start:stop] + + # We should've gone through all the inputs now + assert len(graph_inputs) - stop == 0 + + # Address output calling conventions: + start, stop = 0, num_tokens + output_tokens = graph_outputs[start:stop] + + names = [*input_tokens, *parameters, *buffers, *user_inputs] + mutations = [] + for idx, input_info in enumerate(view_mutation_metadata.input_info): + if input_info.mutates_data: + # Only buffers can be mutated, not parameters + assert idx >= len(parameters) + mutations.append(names[idx + num_tokens]) + + assert len(mutations) == view_mutation_metadata.num_mutated_inp_runtime_indices + + start, stop = ( + stop, + stop + view_mutation_metadata.num_mutated_inp_runtime_indices, + ) + outputs_to_mutations = dict(zip(graph_outputs[start:stop], mutations)) + + user_inputs_to_mutate = {} + buffers_to_mutate = {} + for output_name, mutation_name in outputs_to_mutations.items(): + if mutation_name in user_inputs: + user_inputs_to_mutate[output_name] = mutation_name + else: + assert mutation_name in buffers + buffers_to_mutate[output_name] = mutation_name + + start, stop = stop, stop + num_user_outputs + user_outputs = graph_outputs[start:stop] + + unused_outputs = len(graph_outputs) - stop + if backward_signature is not None: + unused_outputs -= len(backward_signature.gradients_to_parameters) + len( + backward_signature.gradients_to_user_inputs + ) + assert unused_outputs == 0 + + return GraphSignature( + parameters=parameters, # type: ignore[arg-type] + buffers=buffers, # type: ignore[arg-type] + user_inputs=user_inputs, # type: ignore[arg-type] + user_outputs=user_outputs, # type: ignore[arg-type] + inputs_to_buffers=inputs_to_buffers, # type: ignore[arg-type] + inputs_to_parameters=inputs_to_parameters, # type: ignore[arg-type] + user_inputs_to_mutate=user_inputs_to_mutate, + buffers_to_mutate=buffers_to_mutate, # type: ignore[arg-type] + in_spec=in_spec, + out_spec=out_spec, + backward_signature=backward_signature, + input_tokens=input_tokens, # type: ignore[arg-type] + output_tokens=output_tokens, # type: ignore[arg-type] + ) + + +@dataclass +class AOTConfig: + """ + Configuration for AOTDispatcher + """ + + fw_compiler: Callable + bw_compiler: Callable + partition_fn: Callable + decompositions: Dict[Callable, Callable] + num_params_buffers: int + aot_id: int + keep_inference_input_mutations: bool + is_export: bool = False + no_tangents: bool = False + dynamic_shapes: bool = False + aot_autograd_arg_pos_to_source: Optional[List[Source]] = None + inference_compiler: Optional[Callable] = None + enable_log: bool = True + # this is always false outside of export. + pre_dispatch: bool = False + + def __post_init__(self): + if self.pre_dispatch: + assert self.is_export, "Can only have pre_dispatch IR for export." + + +SubclassTracingInfo = collections.namedtuple( + "SubclassTracingInfo", + ["plain_tensor_trace_fn", "plain_tensor_args", "maybe_subclass_meta"], +) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cee3cf6e4edab5a263a136427da0830a0dab3c4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py @@ -0,0 +1,295 @@ +""" +This file contains utilities for tracing through __torch_dispatch__ based tensor subclasses and modes. +AOTAutograd's responsibility is to trace through all pytorch capabilities that live in the pytorch dispatcher, +and this includes tensor subclasses that implement __torch_dispatch__. +""" + +from typing import Any, List, Optional, Tuple, Union + +import torch.utils._pytree as pytree + +from torch import Tensor +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +from .schemas import MutationType, SubclassCreationMeta, ViewAndMutationMeta +from .utils import strict_zip + +zip = strict_zip + + +def requires_subclass_dispatch(args, fw_metadata: ViewAndMutationMeta) -> bool: + args_flattened = pytree.arg_tree_leaves(*args) + any_subclass_args = any( + is_traceable_wrapper_subclass(x) + for x in args_flattened + if isinstance(x, Tensor) + ) + from torch._functorch._aot_autograd.schemas import SubclassCreationMeta + + any_subclass_outputs = any( + type(x) is SubclassCreationMeta for x in fw_metadata.subclass_fw_graph_out_meta + ) + # This tells us whether or not we need to perform any unwrapping/wrapping of tensor subclasses at runtime. + return any_subclass_args or any_subclass_outputs + + +# Given a flat list of arguments, some of which may be tensor subclasses, +# computes metadata about "how to reconstruct the current list of subclasses, +# if we were given their flattened dense tensors instead" +def create_subclass_meta( + curr_args: Union[List[Any], Tuple[Any, ...]], +) -> List[Union[int, SubclassCreationMeta]]: + idx = 0 + infos: List[Union[int, SubclassCreationMeta]] = [] + for a in curr_args: + if isinstance(a, Tensor) and is_traceable_wrapper_subclass(a): + attrs, meta = a.__tensor_flatten__() # type: ignore[attr-defined] + start_idx = idx + cnt = len(attrs) + curr_cnt = cnt + infos.append( + SubclassCreationMeta( + flat_tensor_start_idx=start_idx, + arg_count=curr_cnt, + original_subclass=a, + meta=meta, + inner_keys=attrs, + outer_size=a.shape, + outer_stride=a.stride(), + ) + ) + else: + infos.append(idx) + cnt = 1 + idx += cnt + return infos + + +# Output structure: +# - List[Tensor] if tracing an inference graph +# - Tuple[List[Tensor], List[Tensor]] if tracing a joint graph. +# This function effectively concats each inner list of subclass tensors +# into a (potentially longer) list of inner tensors. +# +# This function takes in a pytree of arguments and unwraps any tensor subclasses. +# Annoyingly, we can't use pytrees to perform the unwrapping, because unwrapping returns +# a list of tensors that we would then need to concat together. +# Instead, we specialize the logic for the inference vs. joint graph case. +# NOTE: this function is hot, since we unwrap tensor subclass inputs at runtime +def unwrap_tensor_subclasses(wrapped_args, *, is_joint_structure: bool): + def concat_inner_tensors_from_subclasses(xs): + xs_inner = [] + for x in xs: + if isinstance(x, Tensor) and is_traceable_wrapper_subclass(x): + attrs, _ = x.__tensor_flatten__() # type: ignore[attr-defined] + xs_inner += [getattr(x, attr) for attr in attrs] + else: + xs_inner += [x] + return xs_inner + + if is_joint_structure: + assert isinstance(wrapped_args, tuple) and len(wrapped_args) == 2 + assert isinstance(wrapped_args[0], (tuple, list)) and isinstance( + wrapped_args[1], (tuple, list) + ) + unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args[0]) + unwrapped_args_tangents = concat_inner_tensors_from_subclasses(wrapped_args[1]) + unwrapped_args = (unwrapped_args_fw, unwrapped_args_tangents) + else: + assert isinstance(wrapped_args, (list, tuple)) + unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args) + unwrapped_args = unwrapped_args_fw + return unwrapped_args + + +# Turns a flattened list of tensor arguments into (maybe) subclass tensors. +# This function is used both at trace time and runtime, so we have an is_runtime flag telling us which context we're in. +def wrap_tensor_subclasses( + unwrapped_args: Union[Tuple[Any, ...], List[Any]], + *, + subclass_metas: List[Union[int, SubclassCreationMeta]], + num_fw_outs_saved_for_bw: Optional[int] = None, + is_runtime: bool = False, +) -> Tuple[Any, ...]: + wrapped_args = [] + num_args_tallied = 0 + for subclass_meta in subclass_metas: + if isinstance(subclass_meta, int): + wrapped_args.append(unwrapped_args[subclass_meta]) + num_args_tallied += 1 + else: + assert isinstance(subclass_meta, SubclassCreationMeta) + wrapped_args.append( + subclass_meta.creation_fn(unwrapped_args, is_runtime=is_runtime) + ) + num_args_tallied += subclass_meta.arg_count + + # Note: [Partitioner handling for Subclasses, Part 2] + # At the beginning of AOTAutograd, we collect metadata on the inputs and outputs of the user fw, + # to figure out which inputs/outputs are subclasses, and how to reconstruct the subclasses after flattening them. + # + # When this function is called at runtime in the forward, + # we have been passed a list of (flattened) dense-tensor fw-outs, and need to reconstruct any subclass fw outs. + # + # One reasonable question that you should ask: when should the dense_tensor -> subclass_tensor wrapping happen? + # Answer: we do it **inside of our compiled autograd.Function**. + # This seems like morally the right place: autograd happens above subclass desugaring, + # so autograd should see actual tensor subclasses at runtime, and not flattened dense tensors. + # + # This causes a tricky interaction though: when we run the min-cut partitioner to divvy up the joint graph + # into a forward and backward graph, we end up with some activations that show up as extra outputs + # in the compiled forward graph, that are **not** user outputs. + # These activations are not visible to the user, and so there's no need for us to wrap them back into subclasses. + # + # On top of that, when we first computed subclass metadata (in `run_functionalized_fw_and_collect_metadata`), + # we computed subclass metadata on every forward output, but this did **not** include activations + # created by the partitioner. + # as a result, `unwrapped_args` here will correspond to (*unwrapped_user_fw_outs, *activations), + # but `subclass_metas` will only correspond to subclass metatadata on `user_fw_outs`. + # We then need to make sure that we return (*wrapped_user_fw_outs, *activations). + if num_fw_outs_saved_for_bw is not None: + assert len(unwrapped_args) == num_args_tallied + num_fw_outs_saved_for_bw, ( + f"Expected the number actual unwrapped-subclass outputs {len(unwrapped_args)} to equal " + f"the number of args calculated from subclasses ({num_args_tallied}) plus the number of " + f"additional activations saved for the backward pass ({num_fw_outs_saved_for_bw})" + ) + activations = unwrapped_args[num_args_tallied:] + if isinstance(wrapped_args, tuple) and isinstance(activations, tuple): + return wrapped_args + activations + return tuple(list(wrapped_args) + list(activations)) + else: + assert len(unwrapped_args) == num_args_tallied + return tuple(wrapped_args) + + +# Given a bunch of "dense" tensor arguments, this function (potentially) wraps them into tensor subclasses. +# This function carefully handles the inference vs. joint cases: +# - when is_joint_structure is True, args is (primals, tangents) +# - when is_joint_structure is False, args is [*primals] +def wrap_tensor_subclasses_maybe_joint( + unwrapped_args, *, is_joint_structure: bool, meta: ViewAndMutationMeta +) -> Union[Tuple[Any, ...], List[Any]]: + # Since this function is re-used for both inference and joint graphs, + if is_joint_structure: + assert isinstance(unwrapped_args, tuple) and len(unwrapped_args) == 2 + assert isinstance(unwrapped_args[0], (tuple, list)) and isinstance( + unwrapped_args[1], (tuple, list) + ) + primals, tangents = unwrapped_args[0], unwrapped_args[1] + wrapped_primals = wrap_tensor_subclasses( + primals, subclass_metas=meta.subclass_inp_meta + ) + wrapped_tangents = wrap_tensor_subclasses( + tangents, subclass_metas=meta.subclass_tangent_meta + ) + return (wrapped_primals, wrapped_tangents) + else: + wrapped_args = wrap_tensor_subclasses( + unwrapped_args, subclass_metas=meta.subclass_inp_meta + ) + return wrapped_args + + +# TODO: UNUSED. delete? +def create_metadata_for_subclass(meta: ViewAndMutationMeta) -> ViewAndMutationMeta: + # input infos + input_info = [] + for inp, subclass_meta in zip(meta.input_info, meta.subclass_inp_meta): + num_inps = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count + for _ in range(num_inps): + input_info.append(inp) + + # output infos + output_info = [] + subclass_out_meta_user_outs_only = meta.subclass_fw_graph_out_meta[ + meta.num_mutated_inp_runtime_indices : + ] + if meta.num_intermediate_bases > 0: + subclass_out_meta_user_outs_only = subclass_out_meta_user_outs_only[ + : -meta.num_intermediate_bases + ] + # sanity assert + assert len(meta.output_info) == len(subclass_out_meta_user_outs_only) + # Assume that the information on the output is shared by all of its inner tensors. + for out, subclass_meta in zip(meta.output_info, subclass_out_meta_user_outs_only): + num_outs = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count + for _ in range(num_outs): + output_info.append(out) + + # A bit hacky, but we don't actually care about all of the metadata here. + # This metadata is used **underneath** both autograd and subclass de-sugaring, + # So all we really care about is stuff like: + # - num inputs/outputs (needed by the partitioner) + # - input mutations (**not** used today, since we don't handle input mutations inside the subclass, + # although we should handle this eventually) + # TODO: add a test case to assert we error when this happens, instead of getting silent correctness + num_intermediate_bases = None + keep_input_mutations = meta.keep_input_mutations + traced_tangents = None + subclass_inp_meta = None + subclass_fw_graph_out_meta = None + subclass_tangent_meta = None + + metadata = ViewAndMutationMeta( + input_info=input_info, # type: ignore[arg-type] + output_info=output_info, # type: ignore[arg-type] + num_intermediate_bases=num_intermediate_bases, # type: ignore[arg-type] + keep_input_mutations=keep_input_mutations, # type: ignore[arg-type] + traced_tangents=traced_tangents, # type: ignore[arg-type] + subclass_inp_meta=subclass_inp_meta, # type: ignore[arg-type] + subclass_fw_graph_out_meta=subclass_fw_graph_out_meta, # type: ignore[arg-type] + subclass_tangent_meta=subclass_tangent_meta, # type: ignore[arg-type] + ) + return metadata + + +def compute_inner_mutated_inp_indices_from_subclass_meta( + fw_metadata: ViewAndMutationMeta, + inner_metadata: ViewAndMutationMeta, +) -> List[int]: + # Note: [Recomputing subclass mutation handling] + # + # Generally, if a subclass requires grad, its components will not require grad. + # But for the purposes of tracking returned tensors, we should treat those component + # tensors as if they require grad. + # + # For example, if the subclass tensor requires grad and will be mutated in a way that + # requires us to handle the mutation outside of the graph, we need to return it + # from the forward graph. The inner_meta data won't consider the component tensors + # as if they need to be returned, because they don't require grad; but really, we + # should handle those tensors the same way we handle the subclass tensor itself; i.e. + # if we'd include the subclass tensor as part of the outputs, then we should also + # include the component tensors. + # + # To do this, we patch num_mutated_inp_runtime_indices below by expanding the inputs + # from the outer subclass tensors and propagating + + updated_input_info = [] + inner_idx = 0 + if not fw_metadata.subclass_inp_meta: + # Sometimes we don't have subclass info, e.g. synthetic_base codepaths + return inner_metadata.mutated_inp_runtime_indices + assert len(fw_metadata.subclass_inp_meta) == len(fw_metadata.input_info) + for outer_idx, inp_meta in enumerate(fw_metadata.subclass_inp_meta): + if isinstance(inp_meta, int): + assert outer_idx < len(fw_metadata.input_info) + if inner_metadata is not None: + assert inner_idx < len(inner_metadata.input_info) + assert ( + inner_metadata.input_info[inner_idx] + == fw_metadata.input_info[outer_idx] + ) + updated_input_info.append(fw_metadata.input_info[outer_idx]) + inner_idx += 1 + else: + for _ in range(inp_meta.arg_count): + updated_input_info.append(fw_metadata.input_info[outer_idx]) + inner_idx += 1 + if inner_metadata is not None: + assert len(inner_metadata.input_info) == len(updated_input_info) + + return [ + i + for i, inp in enumerate(updated_input_info) + if inp.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..73938ddd08b2d5736779e28ad1934e4deb88017c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py @@ -0,0 +1,698 @@ +""" +This module is responsible for transforming functions to be traced into a form +that is easier for the downstream infra (e.g. Autograd, FX, AOTAutograd analysis) +to handle. + +It does so by: +1. functionalization (including RNG functionalzation) +2. creating a joint graph when required +3. transforming mutations into extra outputs +4. dispatching subclasses +""" + +import warnings +from contextlib import nullcontext +from functools import wraps +from typing import Any, Callable, List, Tuple, Union +from unittest.mock import patch + +import torch +import torch.fx.traceback as fx_traceback +import torch.utils._pytree as pytree +from torch import Tensor +from torch._decomp.decompositions_for_rng import PhiloxStateTracker +from torch._guards import detect_fake_mode +from torch._prims_common import CUDARngStateHelper +from torch.fx.experimental.symbolic_shapes import definitely_false, sym_eq +from torch.nn.utils import stateless + +from .. import config +from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata +from .functional_utils import ( + from_fun, + has_data_mutation, + has_metadata_mutation, + is_fun, + sync_functional_tensor, + to_fun, +) +from .logging_utils import setup_stacktrace_preservation_hooks +from .schemas import ( + AOTConfig, + MutationType, + OutputType, + SubclassMeta, + SubclassTracingInfo, + ViewAndMutationMeta, +) +from .subclass_utils import ( + create_subclass_meta, + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses_maybe_joint, +) +from .utils import maybe_to_fresh_input + + +# This function returns a new function that returns mutated inputs as outputs. +# if keep_data_input_mutations is set, then we assume that data-only mutations +# will be left in the graph, and we only return metadata-mutated inputs as outputs. +def fn_input_mutations_to_outputs( + fn: Callable, + meta: ViewAndMutationMeta, + keep_data_input_mutations: bool, +) -> Any: + @wraps(fn) + def inner_fn(*args): + outs = fn(*args) + assert len(meta.output_info) == len(outs) + # The compiled fw will return mutated input tensors, *including* metadata-only mutation. + # However, if keep_data_input_mutations is set, the compiled fw only needs to return metadata-mutated inputs. + # (because data-only input mutations are handled directly in the compiled graph) + mutated_inputs_to_return = [ + x for (i, x) in enumerate(args) if i in meta.mutated_inp_runtime_indices + ] + return *mutated_inputs_to_return, *outs + + return inner_fn + + +# This function takes in a fn with external aliasing and mutation, +# and returns a new fn with no external aliasing and mutation, +# as needed for autograd. +# The main transformations are: +# - Return mutated inputs as extra outputs +# - Clone mutated inputs that require gradients, +# because autograd will require us to pass the pre-mutated inputs into autograd.grad +# - Return intermediate bases of outputs as additional outputs, +# needed to appease autograd.Function +# The new function returns: +# (1) The updated outputs +# (2) A boolean mask of len(new_fn_outputs), +# that can be used to tell autograd.grad which outputs should get tangents +# if we trace the backward. +def fn_prepped_for_autograd( + fn: Callable, + meta: ViewAndMutationMeta, +) -> Any: + @wraps(fn) + def inner_fn(*args): + args_maybe_cloned = [ + maybe_to_fresh_input(i, t, meta) for i, t in enumerate(args) + ] + + outs = fn(*args_maybe_cloned) + assert isinstance(outs, (tuple, list)) + outs = list(outs) + assert len(meta.output_info) == len(outs) + + mutated_inputs_to_return = [ + x + for (i, x) in enumerate(args_maybe_cloned) + if i in meta.mutated_inp_runtime_indices + ] + + intermediate_bases = [] + for i, (o, info) in enumerate(zip(outs, meta.output_info)): + if info.output_type == OutputType.alias_of_intermediate_save_as_output: + intermediate_bases.append(o._base) + + assert meta.num_intermediate_bases == len(intermediate_bases) + + # the compiled forward should return (mutated_inputs, user_outs, intermediate_bases) + fw_outs_to_return = *mutated_inputs_to_return, *outs, *intermediate_bases + + # Also return a boolean mask specifying which outputs to this function will be used as tangents + mutated_inputs_grad_mask = [ + meta.input_info[meta.mutated_inp_runtime_indices[i]].mutates_data + and meta.input_info[meta.mutated_inp_runtime_indices[i]].requires_grad + for (i, x) in enumerate(mutated_inputs_to_return) + ] + + # Pass any (non-aliased) outputs in as tangents, since they'll be returned as outputs in the fw + # For outputs that are aliases of intermediates, we will have returned the output's _base as an output in the graph instead, + # which we *should* send to grad() + output_grad_mask = [ + meta.output_info[i].output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + # Also, only tensor outputs should participate in the backward + # (in particular, Symint outputs in the forward graph shouldn't get tangents) + and issubclass(meta.output_info[i].raw_type, Tensor) + and meta.output_info[i].requires_grad + for (i, x) in enumerate(outs) + ] + + intermediate_base_grad_mask = [True for _ in range(len(intermediate_bases))] + + out_grad_mask = ( + mutated_inputs_grad_mask + output_grad_mask + intermediate_base_grad_mask + ) + assert len(out_grad_mask) == len(fw_outs_to_return) + + # Take care to grab and sync the updated inputs from primals_after_cloning (the inputs we actually mutate!) + # and not primals (the preserved inputs, pre-mutation, that we pass to grad()) + # This is annoying: our joint function needs to be aware of functionalization + # (syncing mutated inputs before calling autograd.grad()) + # In theory, we could make the autograd engine do this automatically, although that probably isn't any cleaner. + for arg in args_maybe_cloned: + if not isinstance(arg, Tensor): + continue + sync_functional_tensor(arg) + + return fw_outs_to_return, out_grad_mask + + return inner_fn + + +# Given a fn, computes the joint. +# NOTE: fn is expects the following behavior: +# (1) fn() needs to return a tuple of (outs, mask), +# where `mask` tells us which outputs are meant to have tangents. +# we don't know this info automatically, because we don't actually want to blindly +# compute tangents for every output that requires grad. +# Specifically, outputs that alias inputs won't participate in the backward and get tangents. +# (2) fn() cannot mutate any inputs that require gradient. +# otherwise, when we compute autograd.grad(), we will not take those input mutations into account +# (the way this is handled is that we ensure any inputs that normally get mutated are cloned first) +def create_joint(fn: Callable, *, aot_config: AOTConfig) -> Any: + def inner_fn(primals: List[Any], tangents: List[Any]): + outs, tangent_mask = fn(*primals) + assert len(tangent_mask) == len(outs) + outs_to_grad = [ + o for needs_tangent, o in zip(tangent_mask, outs) if needs_tangent + ] + assert len(outs_to_grad) == len(tangents) + + # Get the inputs that need gradients + grad_primals = [] + inputs_needs_grads = [] + # Note that we're not using primals here, + # being carefully not to pass any mutated inputs into autograd.grad() + for p in primals: + is_grad_tensor = isinstance(p, Tensor) and p.requires_grad + inputs_needs_grads.append(is_grad_tensor) + if is_grad_tensor: + grad_primals.append(p) + + # Get the outputs that need gradients + needed_outs = [] + needed_tangents = [] + for out, tangent in zip(outs_to_grad, tangents): + if isinstance(out, Tensor) and out.requires_grad: + # A bit sketchy, but fixes e.g. test_aot_autograd_exhaustive_matmul_cpu_float32 + # The issue is that we are sensitive to decomps that don't accurately maintain + # their output's _base.shape compared to eager mode, and this helps mitigate a bit. + # The not definitely_false is also sketchy; if unbacked + # symints are involved, we're just going to assume that the + # decomps setup the base shape correctly + needed_outs.append( + out + if not definitely_false(sym_eq(out.shape, tangent.shape)) + else out.view(tangent.shape) + ) + needed_tangents.append(tangent) + + setup_stacktrace_preservation_hooks([out.grad_fn for out in needed_outs]) + + if config.functionalize_rng_ops: + PhiloxStateTracker.mark_beginning_of_backward() + backward_out: Tuple[Tensor, ...] = tuple() + # Call the backwards pass + if grad_primals: + with fx_traceback.preserve_node_meta(): + # for full graph export, we always export a joint graph where we assume no tangents are needed. + if aot_config.no_tangents: + assert len(needed_tangents) == 1 and needed_tangents[0].numel() == 1 + backward_out = torch.autograd.grad( + needed_outs, + grad_primals, + allow_unused=True, + ) + else: + backward_out = torch.autograd.grad( + needed_outs, + grad_primals, + grad_outputs=needed_tangents, + allow_unused=True, + ) + backward_out_iter = iter(backward_out) + return outs, [ + next(backward_out_iter) if i else None for i in inputs_needs_grads + ] + + def inner_fn_with_anomaly(*args): + with fx_traceback.preserve_node_meta(), warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Anomaly Detection has been enabled.") + with torch.autograd.detect_anomaly(check_nan=False): + return inner_fn(*args) + + return inner_fn_with_anomaly + + +def create_functionalized_rng_ops_wrapper(func, args, trace_joint=True) -> Any: + # Functionalization of rng ops changes the calling convention of the joint graph. + # It goes from (primals, tangents) to (seed, offset, primals, tangents) + # At runtime, we pass on the current seed and offset. This is hidden from + # the user. + fake_mode = detect_fake_mode() + if fake_mode is None: + fake_mode = nullcontext() + + def override_get_rng_state(device: Union[int, str, torch.device] = "cuda"): + out = PhiloxStateTracker.get_state_as_tensor() + return out + + def override_set_rng_state(x, device: Union[int, str, torch.device] = "cuda"): + PhiloxStateTracker.set_state_from_tensor(x) + + def append_rng_offsets(args): + if trace_joint: + # args signature before: Tuple(fwd_outputs), Tuple(bwd_outputs) + # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset), Tuple(bwd_offset, new_bwd_rng_offset) + return ( + (*args[0], PhiloxStateTracker.get_updated_fwd_offset()), + (*args[1], PhiloxStateTracker.get_updated_bwd_offset()), + ) + else: + # args signature before: Tuple(fwd_outputs) + # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset) + return (*args, PhiloxStateTracker.get_updated_fwd_offset()) + + def traced_joint( + primals, tangents, fwd_seed, fwd_base_offset, bwd_seed, bwd_base_offset + ): + with patch("torch.cuda.get_rng_state", override_get_rng_state), patch( + "torch.cuda.set_rng_state", override_set_rng_state + ): + return append_rng_offsets(func(primals, tangents)) + + def traced_forward(*primals_fwd_seed_fwd_base_offset): + # The signature is (*primals, seed, offset) + with patch("torch.cuda.get_rng_state", override_get_rng_state), patch( + "torch.cuda.set_rng_state", override_set_rng_state + ): + return append_rng_offsets(func(*primals_fwd_seed_fwd_base_offset[:-2])) + + if trace_joint: + # Get the current seed and offset to setup tracing. + fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + bwd_seed, bwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward") + PhiloxStateTracker.record_state(bwd_seed, bwd_base_offset, "backward") + return traced_joint, ( + *args, + fwd_seed, + fwd_base_offset, + bwd_seed, + bwd_base_offset, + ) + else: + # Get the current seed and offset to setup tracing. + fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward") + return traced_forward, (*args, fwd_seed, fwd_base_offset) + + +# This creates the final function that we want to trace using make_fx(), +# in both aot_dispatch_autograd and aot_dispatch_base. +# Preconditions: +# - fn corresponds to the user's fw function +# - fn arguments have been flattened, duplicate arguments have been handled +# - In the returned function, the "primals" arguments *includes* synthetic bases. +# This function does the work of functionalizing the input function, +# and performing copy_() calls at the end of the function if `keep_input_mutations` is set. +# The function returned has signature that is either: +# (1) "traced_fn(primals: List[Any])" if trace_joint is False +# (2) "traced_fn(primals: List[Any], tangents: List[Any])" if trace_joint is True +# Returns a new (functionalized) function, and updated arguments to call it with. +def create_functionalized_fn( + fn, + args, + *, + meta: ViewAndMutationMeta, + aot_config: AOTConfig, + trace_joint: bool, +) -> Any: + @wraps(fn) + def _functionalized_f_helper(*args): + # See Note [Disabling Functionalize TLS Above Python Functionalization] + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + # See Note [Side-Effectful Tokens in AOTAutograd] + if trace_joint: + assert ( + isinstance(args, tuple) + and len(args) == 2 + and isinstance(args[0], (list, tuple)) + ) + tokens = args[0][: len(meta.tokens)] + actual_args = args[0][len(meta.tokens) :] + args = (actual_args, args[1]) + else: + tokens = args[: len(meta.tokens)] + args = args[len(meta.tokens) :] + assert all(token.numel() == 0 for token in tokens) + + with disable_above: + # Wrap inputs into functional wrappers + f_args = pytree.tree_map(to_fun, args) + f_tokens = pytree.tree_map(to_fun, tokens) + + # Populate the current FunctionalTensorMode with the tokens per + # operator. See Note [FunctionalTensorMode is Stateful] + functional_tensor_mode = ( + torch.utils._python_dispatch._detect_functional_mode() + ) + assert functional_tensor_mode is not None + for i, k in enumerate(meta.tokens.keys()): + functional_tensor_mode._tokens[k] = f_tokens[i] + + # Run the joint + f_outs = fn(*f_args) + + # Return both the tokens and the outputs + # See Note [Side-Effectful Tokens in AOTAutograd] + f_outs = (*functional_tensor_mode._tokens.values(), *f_outs) + + if trace_joint: + # We support a limited amount of mutation of graph inputs during the backward pass. + # (This is used e.g. by Float8, which needs to update buffers during the backward pass) + # Here, we perform extra checks for primals that were mutated in the **backward** + # We're doing the checks here instead of doing them with the rest of the input mutation handling because: + # - We need to detect inputs that were mutated in the backward **separately** from mutations that happened + # during the forward, because the handling is different: some input mutations from the the forward + # can be only handled in a fw-only runtime epilogue, and in theory if we wanted to handle those same + # types of mutations in the backward we would need a bw-only runtime epilogue. + # - We could in theory have our analysis pass differentiate mutations in the fw from mutations in + # the bw by running our analysis first on the fw-only graph, and then on the joint graph. This would + # require an extra round of tracing though, so it's more efficient to do in-line here. + assert ( + isinstance(args, tuple) + and len(args) == 2 + and isinstance(args[0], (list, tuple)) + ) + # Only look at mutations that happened to forward inputs (e.g. fw buffers that were saved for bw) + primals_before = args[0] + primals_after = pytree.tree_map(from_fun, f_args[0]) + for f_inpt, before, after, inpt_info in zip( + f_args[0], primals_before, primals_after, meta.input_info + ): + # Ban metadata mutations on fw inputs during the bw + if not inpt_info.mutates_metadata: + assert not has_metadata_mutation( + f_inpt, before, check_only_storage_mutation=False + ), "Found a graph input that had its metadata mutated in the backward. This is not supported" + # Allow data mutations on fw inputs during the bw, but only if they do not require grad + # So we can guarantee that we can keep the mutations in the graph + if has_data_mutation(f_inpt) and not inpt_info.mutates_data: + assert ( + not inpt_info.requires_grad + ), "Found a graph input that requires_grad and was mutated in the backward. This is not supported" + # Otherwise, put the mutation in the graph + before.copy_(after) + # Now that we covered mutations to *forward* inputs during the backward, + # we also need to cover mutations to *backward-only* inputs during the backward (e.g. mutation to a grad_out). + # Today, we will just error in all cases of this happening unless someone needs us to support it. + tangents_before = args[1] + tangents_after = pytree.tree_map(from_fun, f_args[1]) + for f_inpt, before, after in zip( + f_args[1], tangents_before, tangents_after + ): + assert not has_metadata_mutation( + f_inpt, before, check_only_storage_mutation=False + ) and not has_data_mutation( + f_inpt + ), "Found an input to the backward that was mutated during the backward pass. This is not supported" + + if aot_config.keep_inference_input_mutations: + # Note: This is a bit annoying. There's a layering issue here, where: + # (1) functionalization needs to operate on **synthetic base** inputs, before unpacking them into the "real" inputs. + # (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs. + # However, we **only** want to support this for inputs that have data-only (and no metadata) mutations, + # because inductor (and backends in generally) would prefer not to see these (e.g. as_strided_(), resize_()). + # This makes it pretty difficult for this logic to operate on synthetic bases. + # (3) In addition, there are cases where it's significantly cheaper to perform the copy on the individual + # (unpacked) input aliases, instead of the synthetic base. + # Example case where (3) could be important: + # + # def f(x, y): + # x.mul_(2) + # y.mul_(3) + # return x, y + # a = torch.ones(1'000'000) + # x, y = out(a[0:9], a[1:10]) + # + # It would be much better to add copy_() calls into the graph for the two tiny slices, instead of materializing + # a giant "updated synthetic base" and copying into a's entire storage. + # + # For now, we are pessimistically not performing the optimization from (3); + # we will materialize an "updated" synthetic base, and copy it back to the synthetic input base. + # This allows us to factor aot autograd much more nicely, since only one area of the code needs to worry + # about synthetic bases. + for i, (inpt_old, inpt_f) in enumerate( + zip(args, f_args) if not trace_joint else zip(args[0], f_args[0]) + ): + if not isinstance(inpt_f, torch.Tensor): + continue + assert is_fun(inpt_f) + inpt_new = from_fun(inpt_f) + if meta.input_info[i].mutation_type == MutationType.MUTATED_IN_GRAPH: + # We found an input that had a (data-only) mutation. + # Since keep_input_mutations is set, we need to faithfully apply a copy_() + # so the compiler will see the input mutation in the graph. + if meta.input_info[i].mutations_hidden_from_autograd: + # Hidden from autograd = run under no_grad, **and** don't bump VC + with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter( + inpt_old + ): + inpt_old.copy_(inpt_new) + elif meta.input_info[i].mutations_under_no_grad_or_inference_mode: + # Under no_grad = run under no_grad (we still bump the VC though) + # (inference_mode will also bump the VC, as long as the tensor in question + # was created outside of inference_mode) + with torch.no_grad(): + inpt_old.copy_(inpt_new) + else: + inpt_old.copy_(inpt_new) + + # When an output tensor is a functionalized mutated input, and we + # were able to move the mutation in to the graph then we can return + # the mutated input directly. This prevents duplicating the + # tensors contents. + flat_outs, outs_spec = pytree.tree_flatten(f_outs) + flat_outs = [from_fun(o) for o in flat_outs] + num_outs = len(meta.output_info) + + for i, outp in enumerate(flat_outs[:num_outs]): + info = meta.output_info[i] + if info.output_type != OutputType.is_input: + continue + + assert info.base_idx is not None + if ( + meta.input_info[info.base_idx].mutation_type + == MutationType.MUTATED_IN_GRAPH + ): + flat_outs[i] = args[info.base_idx] + return pytree.tree_unflatten(flat_outs, outs_spec) + + return pytree.tree_map(from_fun, f_outs) + + # Kinda annoying, but needed to make sure that the fx graph we trace out has "primals" + # and "tangents" as its input names (which are special-cased by the partitioner) + # TODO (tmanlaibaatar) revisit this if we ever need to turn on non-strict joint graph export + def joint_helper(primals, tangents): + return _functionalized_f_helper(primals, tangents) + + helper = joint_helper if trace_joint else _functionalized_f_helper + if config.functionalize_rng_ops: + # Setup the wrapper for functionalization of rng ops + helper, args = create_functionalized_rng_ops_wrapper(helper, args, trace_joint) + + # Additionally pass in tokens as inputs + # See Note [Side-Effectful Tokens in AOTAutograd] + additional_token_inputs = [torch.tensor([])] * len(meta.tokens) + if trace_joint: + args = ([*additional_token_inputs, *args[0]], *args[1:]) + else: + args = [*additional_token_inputs, *args] + + return helper, args + + +# Given a function operating on Subclass -> Subclass, returns an function that operates on Tensor -> Tensor +# Also returns: +# - the new set of arguments to pass into this function (now that tensor subclasses have been eliminated) +# - the updated ViewAndMutationMeta for this dense -> dense function. +# The other important arguments are: +# - flat_fn_maybe_joint: when is_joint_structure=True, this is the joint fw-bw function. +# when is_joint_structure=False, this is just the forward function. +# - fw_only: this is *always* the forward-only function. +# Why do we need this? We need to collect updated ViewAndMutationMeta on our new dense -> dense functions. +# In particular, we need this to tell the partitioner how many dense forward outputs there are. +def aot_dispatch_subclass( + flat_fn_maybe_joint, + args: List[Any], + *, + is_joint_structure: bool, + meta: ViewAndMutationMeta, + fw_only: Callable, +) -> SubclassTracingInfo: + # Skip logic if we don't need to trace through any subclasses + req_subclass_dispatch = requires_subclass_dispatch(args, meta) + if not req_subclass_dispatch: + return SubclassTracingInfo( + plain_tensor_trace_fn=flat_fn_maybe_joint, + plain_tensor_args=args, + maybe_subclass_meta=None, + ) + + # TODO: add subclass guards (later PR). + + # What's going on here? We need to compute subclass metadata about the outputs of the joint (grad_inputs). + # Annoying: we don't know the grad input metas until we're in the middle of tracing the joint, + # so we set it later, while we're tracing the joint (see inner_fn() below). + # Another option would be to run our run_functionalized_fw_and_collect_metadata() function + # directly on the joint, but this would hurt compile time (adding yet another pass through the joint). + subclass_meta = SubclassMeta() + + def inner_fn(fn, args, *, use_trace_joint: bool): + # Step 1: wrap tensor inputs into subclasses if necessary + all_args = wrap_tensor_subclasses_maybe_joint( + args, is_joint_structure=use_trace_joint, meta=meta + ) + + # Step 2: call the inner function, with our (maybe subclass) inputs + wrapped_outs = fn(*all_args) + + if use_trace_joint: + # See Note: [Computing Subclass Metadata about grad_inputs] + # We also stash subclass info on our grad_inputs, if we're tracing the joint. + nonlocal subclass_meta + assert isinstance(wrapped_outs, tuple) and len(wrapped_outs) == 2 + # Don't need fw outs since we already have subclass metadata on them + grad_inputs = wrapped_outs[1] + subclass_meta.grad_input_metas = create_subclass_meta(grad_inputs) + + # Step 3: Unwrap any subclass outputs back into dense tensors + unwrapped_outs = unwrap_tensor_subclasses( + wrapped_outs, is_joint_structure=use_trace_joint + ) + return unwrapped_outs + + def joint_fn(primals, tangents): + return inner_fn(flat_fn_maybe_joint, (primals, tangents), use_trace_joint=True) + + def fw_fn(*primals): + return inner_fn(flat_fn_maybe_joint, primals, use_trace_joint=False) + + def metadata_fn(*primals): + return inner_fn(fw_only, primals, use_trace_joint=False) + + args_unwrapped = unwrap_tensor_subclasses( + args, is_joint_structure=is_joint_structure + ) + + if is_joint_structure: + primals_unwrapped = args_unwrapped[0] + fn_to_trace = joint_fn + else: + primals_unwrapped = args_unwrapped + fn_to_trace = fw_fn + + # Note: [Partitioner handling for Subclasses, Part 1] + # The way the partitioner works is that: + # (1) we pass is a single graph containing the joint fw/bw, + # where the # of graph outputs corresponds to # fw_outputs + # grad_inputs + # (2) The partitioner accepts an arguments, num_fwd_outputs, + # and assumes that the first "num_fwd_outputs" graph outputs correspond + # to outputs of the forward graph. + # How do tensor subclasses enter the picture? + # the num_fwd_outputs in the final graph is actually non-trivial to compute, + # because it can be influenced by input mutations and intermediate bases. + # So we compute it by inspecting the current ViewAndMutationMeta object. + # However, the original ViewAndMutationMeta that we computed was created + # on the subclass -> subclass graph, + # which can have a different number of outputs than the dense -> dense graph. + # That's why we createa a fresh metadata object on the dense -> dense function here, + # and plumb it back up to the partitioner. + # See Note: [Partitioner handling for Subclasses, Part 2] for more info. + meta_updated = run_functionalized_fw_and_collect_metadata( + metadata_fn, + keep_input_mutations=meta.keep_input_mutations, + is_train=meta.is_train, + )(*primals_unwrapped) + + subclass_meta.fw_metadata = meta_updated + + return SubclassTracingInfo( + plain_tensor_trace_fn=fn_to_trace, + plain_tensor_args=args_unwrapped, + maybe_subclass_meta=subclass_meta, + ) + + +class PropagateUnbackedSymInts(torch.fx.Interpreter): + def run_node(self, n: torch.fx.Node): + import sympy + + result = super().run_node(n) + # TODO: handle Tensor returns + if "example_value" in n.meta: + if isinstance(result, torch.SymInt) and isinstance( + result.node.expr, sympy.Symbol + ): + torch._check(result == n.meta["example_value"]) + + return result + + +def create_functional_call(mod, params_spec, params_len, store_orig_mod=False): + # Redundant with dynamo, but worth having in case this gets invoked elsewhere. + # https://github.com/pytorch/pytorch/issues/103569 + + def functional_call(*args, **kwargs): + with stateless._reparametrize_module( + mod, pytree.tree_unflatten(args[:params_len], params_spec) + ): + if isinstance(mod, torch.fx.GraphModule): + with fx_traceback.preserve_node_meta(), warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Anomaly Detection has been enabled." + ) + with torch.autograd.detect_anomaly(check_nan=False): + out = PropagateUnbackedSymInts(mod).run( + *args[params_len:], **kwargs + ) + else: + out = mod(*args[params_len:], **kwargs) + + if not isinstance(out, (tuple, list)): + raise RuntimeError( + "Graph output must be a tuple(). This is so that we can avoid " + "pytree processing of the outputs. Please change the module to " + "have tuple outputs or use aot_module instead." + ) + return out + + # Note [Preserving the nn module stack metadata during export non-strict mode] + # This path is currently only used by the non-strict export flow, + # where we cannot rely on dynamo to preserve nn stack metadata in our captured graph. + # Instead, we stash the original user nn module here, and rely on `make_fx` to grab + # this stashed module and use it to track nn module stack metadata + if store_orig_mod and not hasattr(functional_call, "_orig_mod"): + functional_call._orig_mod = mod # type: ignore[attr-defined] + + return functional_call diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8c787f219a0b8d775b477ea122d9e838c7395805 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py @@ -0,0 +1,226 @@ +""" +Contains various utils for AOTAutograd, including those for handling collections. +""" + +import dataclasses +import warnings +from contextlib import nullcontext +from functools import wraps +from typing import Any, Callable, List, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import py_sym_types + +KNOWN_TYPES = [ + torch.Tensor, + BackwardState, + int, + str, + float, + bool, + type(None), + *py_sym_types, +] + +original_zip = zip + + +def strict_zip(*iterables, strict=True, **kwargs): + if not strict: + return original_zip(*iterables, **kwargs) + + shortest_length = min(len(it) for it in iterables) + for iterable in iterables: + if len(iterable) != shortest_length: + raise ValueError( + "The iterables have different lengths and strict mode is enabled." + ) + + return original_zip(*iterables, **kwargs) + + +def _get_symint_hints(exprs): + """ + Get the hints of a list/tuple of int/SymInt. + """ + if isinstance(exprs, (list, tuple)): + return type(exprs)(_get_symint_hints(e) for e in exprs) + elif isinstance(exprs, torch.SymInt): + return exprs.node.shape_env.size_hint(exprs.node.expr) + else: + return exprs + + +def partial_flatten_asdict(obj: Any) -> Any: + if dataclasses.is_dataclass(obj): + return { + field.name: getattr(obj, field.name) for field in dataclasses.fields(obj) + } + elif isinstance(obj, (list, tuple)): + return obj.__class__([partial_flatten_asdict(item) for item in obj]) + elif isinstance(obj, dict): + return {k: partial_flatten_asdict(v) for k, v in obj.items()} + else: + return obj + + +def normalize_as_list(x): + if isinstance(x, tuple): + return list(x) + elif isinstance(x, list): + return x + return [x] + + +def _get_autocast_states(): + return [ + torch.is_autocast_enabled(), + torch.is_autocast_cpu_enabled(), + torch.get_autocast_gpu_dtype(), + torch.get_autocast_cpu_dtype(), + torch.is_autocast_cache_enabled(), + ] + + +def make_boxed_func(f): + def g(args): + return f(*args) + + g._boxed_call = True # type: ignore[attr-defined] + return g + + +def make_boxed_compiler(compiler): + @wraps(compiler) + def f(fx_g, inps): + out_f = compiler(fx_g, inps) + fx_g = make_boxed_func(out_f) + return fx_g + + return f + + +def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False): + if not steal_args: + args = list(args) + assert isinstance(args, list) + + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + if hasattr(f, "_boxed_call"): + out = normalize_as_list(f(args)) + else: + # TODO: Please remove soon + # https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 + warnings.warn( + "Your compiler for AOTAutograd is returning a function that doesn't take boxed arguments. " + "Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. " + "See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale." + ) + out = normalize_as_list(f(*args)) + return out + + +# Inspired by autodidax (thanks!) +class PytreeThunk: + spec: Optional[pytree.TreeSpec] = None + # These are some kinda dumb microoptimizations that save about 3-4 us of overhead. + is_simple: Optional[ + bool + ] = None # if the output spec is a tuple/list, we won't bother unflattening it. + is_really_simple: Optional[bool] = None # if the output spec is a LeafSpec + + def set(self, spec: pytree.TreeSpec) -> None: + assert self.spec is None or self.spec == spec + assert spec is not None + self.spec: pytree.TreeSpec = spec + if self.spec.type in {tuple, list} and all( + child.is_leaf() for child in spec.children_specs + ): + self.is_simple = True + if self.spec.is_leaf(): + self.is_really_simple = True + + def unflatten(self, x: List[Any]) -> Any: + if self.is_really_simple: + return x[0] + if self.is_simple: + return x + assert self.spec is not None + return pytree.tree_unflatten(x, self.spec) + + +# Creates a function that returns flattened inputs and outputs +# Also returns the output tree spec, which is needed to recover the "unflattened" +# output tree structure later. +def create_tree_flattened_fn(fn, args, kwargs=None) -> Tuple[Callable, PytreeThunk]: + if kwargs is None: + kwargs = {} + # Save the args_spec for flat_tensor_args to unflatten while tracing + _, tensor_args_spec = pytree.tree_flatten((args, kwargs)) + out_spec = PytreeThunk() + + def flat_fn(*flat_args): + # The input are flattened tensor args. Prepare the args in the + # order that original function expects. Add static args as well. + # They will appear as tensor constants in the traced graph. + nonlocal out_spec + args, kwargs = pytree.tree_unflatten(flat_args, tensor_args_spec) + tree_out = fn(*args, **kwargs) + flat_out, spec = pytree.tree_flatten(tree_out) + for i in flat_out: + is_known_type = False + for j in KNOWN_TYPES: + if isinstance(i, j): + is_known_type = True + break + if not is_known_type: + raise RuntimeError( + f"Found {type(i)} in output, which is not a known type. " + "If this type holds tensors, you need to register a pytree for it. " + "See https://github.com/pytorch/functorch/issues/475 for a brief " + "explanation why. If you don't need to register a pytree, please " + "leave a comment explaining your use case and we'll make this more " + "ergonomic to deal with" + ) + out_spec.set(spec) + return flat_out + + # Can't use functools.wraps here because the wrapper has different + # calling convention + if hasattr(fn, "_orig_mod"): + flat_fn._orig_mod = fn._orig_mod # type: ignore[attr-defined] + + return flat_fn, out_spec + + +# This function takes in a tensor t, and returns one of t, t.view(), or t.clone(). +# When tracing the joint forward + backward, for any inputs in the graph that are mutated, +# we need to clone them first (and similarly for metadata-only mutations, we need to view them first). +# The idea is that when we trace the backward, we need to pass in the *original* primals +# to autograd.grad(), before they were mutated. +# Note: when we have synthetic base inputs, we need to clone them *before* creating views off of them. +# This means that "idx" here represents the index of the (potentially) synthetic base. +# What we need to do is: +# (1) map the current (post-synthetic-base calling convention) input argument index +# to int index pre-synthetic-base-calling-convention. +# (2) There could be multiple, if this index corresponds to a synthetic base +# that has multiple input aliases. +# (3) If any of those corresponding inputs get metadata mutations, then we clone the base. +def maybe_to_fresh_input(idx, t, meta): + if not isinstance(t, torch.Tensor): + return t + if idx in meta.mutated_inp_runtime_indices: + # We only need to bother cloning mutated inputs that participate in autograd. + mutated_inp_idx = meta.mutated_inp_runtime_indices.index(idx) + if meta.input_info[idx].requires_grad and meta.input_info[idx].mutates_data: + # Make sure the primal we pass to autograd.grad() + # sees the tensor before the mutation + return t.clone() + if meta.input_info[idx] and meta.input_info[idx].mutates_metadata: + # Make sure the primal we pass to autograd.grad() + # sees the tensor before the metadata mutation + return t.view(t.shape) + return t diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py b/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..e53bbeabb7c199d2332d1b2d8e59af64fe960fde --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py @@ -0,0 +1,1246 @@ +# mypy: ignore-errors + +import itertools +from contextlib import nullcontext +from functools import partial, wraps +from typing import Any, Callable, Dict, List, Optional, Tuple +from unittest.mock import patch + +import torch +import torch.nn as nn +import torch.utils._pytree as pytree +import torch.utils.dlpack +from torch import Tensor +from torch._dispatch.python import enable_python_dispatcher +from torch._dynamo import compiled_autograd +from torch._dynamo.utils import dynamo_timed, preserve_rng_state +from torch._guards import detect_fake_mode +from torch._subclasses import FakeTensor, FakeTensorMode +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.experimental.symbolic_shapes import ( + ShapeEnv +) +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch._decomp.decompositions_for_rng import PhiloxStateTracker, rng_decompositions +from . import config +from .partitioners import default_partition + +from ._aot_autograd.utils import ( # noqa: F401 + strict_zip, + _get_symint_hints, + KNOWN_TYPES, + partial_flatten_asdict, + normalize_as_list, + _get_autocast_states, + make_boxed_func, + make_boxed_compiler, + call_func_at_runtime_with_args, + create_tree_flattened_fn, + maybe_to_fresh_input, +) +from ._aot_autograd.logging_utils import ( # noqa: F401 + graph_being_compiled, + nth_graph, + model_name, + set_model_name, + get_aot_compilation_context, + get_aot_graph_name, + get_graph_being_compiled, + track_graph_compiling, + callback_set, + setup_stacktrace_preservation_hooks, + describe_input, + format_guard_bug_msg, +) +from ._aot_autograd.functional_utils import ( # noqa: F401 + is_fun, + to_fun, + from_fun, + sync_functional_tensor, + has_metadata_mutation, + has_data_mutation, + are_all_mutations_hidden_from_autograd, + are_all_mutations_under_no_grad_or_inference_mode, + gen_alias_from_base, + assert_functional_graph, + _check_if_mutation_can_be_in_graph, +) +from ._aot_autograd.schemas import ( # noqa: F401 + OutputType, + OutputAliasInfo, + MutationType, + InputAliasInfo, + SubclassCreationMeta, + ViewAndMutationMeta, + SubclassMeta, + TensorAlias, + BackwardSignature, + GraphOutputName, + GraphInputName, + FQN, + GraphSignature, + AOTConfig, +) +from ._aot_autograd.subclass_utils import ( # noqa: F401 + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, + wrap_tensor_subclasses_maybe_joint, + create_metadata_for_subclass, +) +from ._aot_autograd.collect_metadata_analysis import ( # noqa: F401 + run_functionalized_fw_and_collect_metadata, +) +from ._aot_autograd.input_output_analysis import ( # noqa: F401 + remove_dupe_metadata, + create_synthetic_base_metadata, + _tensors_definitely_do_not_overlap, + compute_overlapping_inputs, + create_graph_signature, +) +from ._aot_autograd.traced_function_transforms import ( # noqa: F401 + fn_input_mutations_to_outputs, + fn_prepped_for_autograd, + create_functionalized_fn, + create_functionalized_rng_ops_wrapper, + aot_dispatch_subclass, + create_functional_call, + create_joint, +) +from ._aot_autograd.runtime_wrappers import ( # noqa: F401 + create_runtime_wrapper, + functionalized_rng_runtime_epilogue, + aot_dispatch_subclass_wrapper, + aot_wrapper_dedupe, + aot_wrapper_synthetic_base, + merge_view_inputs, +) +from ._aot_autograd.dispatch_and_compile_graph import ( # noqa: F401 + aot_dispatch_base_graph, + aot_dispatch_autograd_graph, +) +from ._aot_autograd.jit_compile_runtime_wrappers import ( # noqa: F401 + aot_dispatch_base, + aot_dispatch_autograd, +) + +zip = strict_zip + +# This global counter increments every time we compile a graph with +# AOTAutograd. You can use this to correlate runtime error messages +# with compile time (e.g., if you get an error at runtime saying +# compiled graph 3 failed, you can set a breakpoint at compile time +# for this graph number to investigate further at compile time.) +# +# NB: this is different from get_aot_compilation_context, which tracks +# each underlying graph that is compiled. In contrast, AOT_COUNTER +# corresponds to top-level invocations of aot_module/aot_function; +# one counter is allocated per entire compiled block (but this block +# may involve compiling multiple subgraphs; e.g., for forwards/backwards) +AOT_COUNTER = itertools.count() + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# AOT Autograd contains a pretty non-trivial amount of logic to handle edge cases around aliasing and mutation +# that are external to the graph (they show up as side effects in some way when you run the graph). +# +# Take a look at `test_aotdispatch.py TestAOTAutograd.test_input_mutation*` tests for some examples functions +# and what they're compiled graphs looks like. +# Below is a very long comment detailing several edge cases, and showing how AOT Autograd handles them. +# +# Note [AOT Autograd: input data mutations] +# +# If we compile a function that mutates inputs, then those input mutations are real side effects +# that a user expects to see after running the compiled graph. +# However, the graph that we want to send to a backend needs to be *entirely* functional. +# The way we reconcile this difference is that we remove the mutations completely from the graph that we compile +# but we update the graph to return (updated_inputs, user_outputs). +# In the epilogue that runs after the compiled graph is executed, we copy the updated inputs back to the originals. +# +# Example: original user code: +# def f(x): +# x.mul_(2) +# out = x.mul(3) +# return out +# +# After AOT Autograd compiles, we end up with a: +# (a) compiled graph +# (b) autograd.Function.forward() method, that executes the compiled graph +# (c) wrapper function, that calls the autograd.Function.forward() and performs the epilogue +# +# The output of (a, b, c) are all written below. +# +# def compiled_forward_graph(x): +# x_updated = x.mul(2) +# out = x_updated.mul(3) +# return x_updated, out +# +# # x_updated gets a gradient in the compiled backward +# def compiled_backward_graph(grad_x_updated, grad_out): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# x_updated, out = compiled_forward_graph(x) +# return x_updated, out +# +# def compiled_wrapper(x): +# x_updated, out = autograd.Function.apply(x) +# x.copy_(x_updated) +# return out +# +# Another important thing to note is that updated inputs (due to data mutations) *do* participate +# in the compiled backward graph! Since the compiled forward graph gets N extra outputs +# (due to updated inputs showing up as graph outputs), +# The compiled backward gets an additional N inputs. +# That way, during the x.copy_(x_updated) bit in the epilogue, gradients will flow from the updated input +# back to the original input. + + +# Note [AOT Autograd: input metadata mutations] +# +# For the same reason as input mutations, we also don't put input metadata mutations in the graph. +# Instead, we return the updated version of the input (a view), and mutate the input's metadata outside of the graph +# +# Example: original user code: +# def f(x): +# x.t_() +# out = x.mul(3) +# return out +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(x): +# x_updated = x.t() +# out = x_updated.mul(3) +# return x_updated, out +# +# # x_updated does *not* get a gradient in the compiled backward +# def compiled_backward_graph(grad_out): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# x_updated, out = compiled_forward_graph(x) +# return x_updated, out +# +# def compiled_wrapper(x): +# x_updated, out = autograd.Function.apply(x) +# x.as_strided_(x_updated) +# return out + + +# Note [AOT Autograd: outputs aliasing inputs or intermediates!] +# +# AOT Autograd needs special handling for outputs that alias graph inputs or intermediates! +# Why? +# (1) autograd.Function.forward() has a limitation, where views that returned in the forward cannot later be mutated. +# (2) views don't need to be compiled in the graph anyway - it's cheap to generate them outside of the compiled graph, +# in an epilogue. +# For outputs that alias inputs, we do the following: +# (a) *still* return the aliased output as a graph output +# (b) In the AOT Autograd wrapper/epilogue, we don't return that aliased output. Instead, we use it to regenerate the output. +# +# For outputs that alias *intermediates*, we do the following: +# (a) Return the output in the compiled forward, **and** return it's ._base (a graph intermediates) as an output in the forward +# (b) Use (output, graph_intermediate) to regenerate the alias, and return that to the user (instead of the compiled fw output). +# You might wonder why we return the aliased output directly in the graph (and making the graph compute it), +# only to not return it and instead generate a fresh alias off of the intermediate, +# instead of (say) just storing metadata about the size/stride of the output somewhere to generate the alias. There are two reasons: +# (1) Getting the actual alias tensor allows us to use view-replay to generate the alias, instead of an as_strided() call +# (2) Inductor (and other backends) are free to change the memory format of graph outputs, if it results in better performance. +# This can result in problems if a user later tries to .view() that output expecting it to have one set of strides, +# when it has a different set of strides. +# By including the view op directly in the graph, inductor takes that into account when deciding what memory format +# the graph intermediate should be. +# +# Another important thing to note is how our traced backward() graph handles aliases. +# (this applies to outputs aliasing inputs, outputs aliasing intermediates, +# *and* updated inputs returned in the compiled forward due to metadata-only mutations). +# Any outputs that alias (either inputs or intermediates) do NOT participate in the compiled backward graph +# It would be wasteful to include them in the compiled backward(), because we regenerate them eagerly +# at the end of the forward. +# +# Example: original user code: +# def f(x): +# out1 = x.t() +# intermediate = x.mul(2) +# out2 = intermediate.view(-1) +# return out1, out2 +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(x): +# out1 = x.t() +# intermediate = x.mul(2) +# out2 = intermediate.view(-1) +# # the compiled graph also returns the intermediate +# return out1, out2, intermediate +# +# # intermediate gets a gradient in the compiled backward. +# # both output aliases (out1 and out2) do not. +# def compiled_backward_graph(grad_intermediate): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# out1, out2, intermediate = compiled_forward_graph(x) +# return out1, out2, intermediate +# +# def compiled_wrapper(x): +# out1, out2, intermediate = autograd.Function.apply(x) +# # regenerate out1 from the input +# out1_regenerated = out1._view_func(x) +# # regenerate out1 from the intermediate +# out2_regenerated = out2._view_func(intermediate) +# return out1_regenerated, out2_regenerated + + +# Note [AOT Autograd: mutations to inputs that alias other inputs] +# +# Another edge case that is (only partially) handled today is when an input is mutated, but itself aliases another input. +# AOT Autograd needs to **ensure** that functionalization knows that the two inputs are aliased to each other. +# That way, when the aliased input is accessed later in the graph, functionalization knows to "update" the alias +# given the mutation that occurred. +# +# This is handled by updating the calling convention: we create a "synthetic base" that becomes a new input +# in the compiled function, and we regenerate the original (aliased) inputs directly off of the base +# inside of the compiled function. +# +# This logic is fully encapsulated in aot_wrapper_synthetic_base() +# +# Example: original user code: +# def f(x, x_view): +# x.mul_(2) +# out = x * x_view +# return out +# f(x, x.view(-1)) +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(base) +# x = generate_x(base) +# x_view = generate_x_view(base) +# x_updated = x.mul(2) +# x_view_updated = x_updated.view(-1) +# out = x_updated * x_view_updated +# return x_updated, out +# +# # The calling convention change from (aliases) -> (base) happens +# # *outside* of the autograd.Function.forward(). +# # That means the forward() only has 1 input (base), +# # and the backward() only has 1 output (grad_base) +# def compiled_backward_graph(grad_out): +# grad_base = ... +# return grad_base +# +# def autograd.Function.forward(base): +# x_updated, out = compiled_forward_graph(base) +# return x_updated, out +# +# # The compiled wrapper is where we create synthetic bases. +# # The info on which inputs are mutated is also tracked *before* synthetic base creation. +# def compiled_wrapper(x, x_view): +# base = merge_view_inputs(x, x_view) +# x_updated, out = autograd.Function.apply(base) +# # x and x_view are aliased in eager mode, so this mutation to x will automatically affect x_view. +# x.copy_(x_updated) +# return out + + +# Note [AOT Autograd: Views to avoid tangents aliasing inputs] +# +# We view every forward output when creating out tangent tensors to handle the problematic +# case in which a subclass does extra aliasing between graph outputs/inputs in a way that +# is not visible above the sublass. +# +# Ordinarily, when constructing the joint function that we want to trace in AOTAutograd, +# we're guaranteed that the tangent tensors that we pass +# into the joint are distinct tensors from the primals. This is because when +# decide which forward outputs to create tangents for, we only create tangents +# for forward outputs that are not aliases of inputs (See Note +# [AOT Autograd: outputs aliasing inputs or intermediates!]). +# +# However, when wrapper tensor subclasses enter the picture, it is possible +# to have an output of the forward that is a subclass that is not an +# input / alias of an input, but one of its inner tensors is an alias! +# NestedTensor is an example: Performing an out-of-place pointwise op on a +# NestedTensor constructs a fresh NestedTensor that holds onto the input's +# offsets tensor directly. +# +# Having tangent tensors that are the same as the (primal) forward inputs, +# can cause problems during tracing as make_fx() will specialize on our +# duplicate inputs: If we passed in the same tensor for primals_1 and +# tangents_1 during tracing, make_fx() will happily sub out all usages of +# tangents_1 with primals_1 in the graph, which is not what we want. +# +# To work around this, we view every forward output when creating out tangent +# tensors so that tangents can never be the same as forward inputs even if +# forward inputs alias forward outputs. + +# Note [Side-Effectful Tokens in AOTAutograd] +# +# We allow some some side-effectful operators in +# the post-AOTAutograd (functional) graph, such as prints and torchbind operations. +# To ensure that these side-effects are compatible to future graph passes that +# assume that the graph is functional, we will thread "effect tokens" to show +# data dependence between these side-effectful operators. Practically speaking, +# effect tokens are just dummy values (torch.tensor([])). The graph would look +# like the following: +# +# def gm(self, token0, reader): +# token1, frame = with_token(ordered_effect_op, (reader,), token0) +# frame = frame * 2 +# token2, frame2 = with_token(ordered_effect_op, (reader,), token1) +# frame2 = frame2 * 2 +# return token2, frame, frame2 +# +# We will pass the token as an input to the graph, thread it through +# side-effectful operators using the `with_effects` high order operator, and then +# return the updated token as an output. +# So the signature of the graph input would look something like +# (*tokens, *params_buffers, *user_inputs), and the signature of the graph +# output would look something like (*tokens, *outputs). + +# +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +aot_autograd_decompositions = {} + +@dynamo_timed +def create_aot_dispatcher_function( + flat_fn, flat_args: List[Any], aot_config: AOTConfig +): + """ + Traces the forward and backward graphs of the attr:`flat_fn` to generate a + joint graph. The joint graph is an Fx graph with Aten ops. Please refer to + the tracing mechanism to understand the graph capturing details. + + The joint graph is then passed through attr:`partition_fn` to isolate the + forward and backward portions, which are then respectively compiled via the + provided attr:`fw_compiler` and attr:`bw_compiler`. + + The resulting compiled forward and backward graphs are then wrapped up in a + ``torch.autograd.Function`` object. + + The calling convention here is that the first aot_config.num_params_buffers + inputs in flat_args are parameters and buffers, and the rest are inputs. + + We use this to assume that parameters/buffer's shapes don't change. + + Note: this function is used both by aot_function and aot_export (controlled by aot_config.is_export) + When aot_config.is_export is True, we return an FX graph + metadata + When aot_config.is_export is False, we return an ordinary runtime function + """ + + # This is the main entry point. + # TODO: Chillee argues that dynamo itself should pass in fake tensors to + # the list of arguments when compiling; at the moment we do not do this + + if aot_config.decompositions is None: + aot_config.decompositions = {} + + + aot_config.decompositions = { + **aot_autograd_decompositions, + **aot_config.decompositions, + } + + if config.functionalize_rng_ops: + # Update the decompositions with functionalized random decompositions + aot_config.decompositions = { + **rng_decompositions, + **aot_config.decompositions, + } + + # Check flat_args to see if they're already fake. If so, use that fake + # mode instead. + + fake_mode = detect_fake_mode(flat_args) + if fake_mode is None: + shape_env = ShapeEnv() if aot_config.dynamic_shapes else None + fake_mode = FakeTensorMode(shape_env=shape_env) + else: + shape_env = fake_mode.shape_env + + python_dispatcher_mode = ( + enable_python_dispatcher() if shape_env is not None else nullcontext() + ) + + with torch.autograd.set_multithreading_enabled( + False + ), preserve_rng_state(), fake_mode, python_dispatcher_mode, PhiloxStateTracker(): + + def process_inputs(flat_args): + def convert(idx, x): + if shape_env is not None: + from torch._dynamo.source import ConstantSource + if isinstance(x, int): + # We always specialize on scalar values in export. + if aot_config.is_export: + return x + source = ConstantSource(f"sym_{idx}") + return shape_env.create_symintnode( + shape_env.create_symbol(x, source), + hint=x, + source=source + ) + if not isinstance(x, torch.Tensor): + return x + if isinstance(x, FakeTensor): + assert x.fake_mode is fake_mode + return x + if is_traceable_wrapper_subclass(x): + attrs, _ = x.__tensor_flatten__() + if all(isinstance(getattr(x, attr), FakeTensor) for attr in attrs): + assert all(getattr(x, attr).fake_mode is fake_mode for attr in attrs) + return x + + + # see note [Tensor Fakification and Symbol Caching] + symbolic_context = None + source = None + if tracing_context := torch._guards.TracingContext.try_get(): + if x in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[x] + source = symbolic_context.tensor_source + if ( + idx < aot_config.num_params_buffers + and config.static_weight_shapes + and not symbolic_context + ): + # TODO: Ensure that this codepath is never exercised from + # Dynamo + return fake_mode.from_tensor(x, static_shapes=True) + + return fake_mode.from_tensor( + x, static_shapes=False, symbolic_context=symbolic_context, source=source + ) + return [convert(idx, x) for idx, x in enumerate(flat_args)] + + fake_flat_args = process_inputs(flat_args) + + needs_autograd = ( + any(x.requires_grad for x in fake_flat_args if isinstance(x, Tensor)) + and torch.is_grad_enabled() + ) + + with enable_python_dispatcher(): + # Patch set_rng_state as set_rng_state with fake tensors is + # nonsensical. This does not affect the collection of metadata. + with patch("torch.cuda.set_rng_state", lambda *args: None): + fw_metadata = run_functionalized_fw_and_collect_metadata( + flat_fn, + keep_input_mutations=aot_config.keep_inference_input_mutations, + is_train=needs_autograd, + pre_dispatch=aot_config.pre_dispatch, + )(*fake_flat_args) + + req_subclass_dispatch = requires_subclass_dispatch(fake_flat_args, fw_metadata) + + if needs_autograd and not any(x.requires_grad for x in fw_metadata.output_info): + # We realized that none of the outputs require grad, + # so we actually have an inference graph. + needs_autograd = False + # A bit silly: right now in the subclass codepath, our ViewAndMutationMeta + # changes depending on whether we pass in is_train / keep_input_mutations, + # so we're forced to recompute the metadata. + # TODO: refactor the subclass path of run_functionalized_fw_and_collect_metadata + # so that this is unnecessary. + if req_subclass_dispatch: + fw_metadata = run_functionalized_fw_and_collect_metadata( + flat_fn, + keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd, + is_train=needs_autograd, + pre_dispatch=aot_config.pre_dispatch, + )(*fake_flat_args) + else: + fw_metadata = ViewAndMutationMeta( + input_info=fw_metadata.input_info, + output_info=fw_metadata.output_info, + num_intermediate_bases=fw_metadata.num_intermediate_bases, + keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd, + traced_tangents=fw_metadata.traced_tangents, + subclass_inp_meta=fw_metadata.subclass_inp_meta, + subclass_fw_graph_out_meta=fw_metadata.subclass_fw_graph_out_meta, + subclass_tangent_meta=fw_metadata.subclass_tangent_meta, + is_train=needs_autograd, + ) + + + if fw_metadata.num_intermediate_bases > 0: + assert not req_subclass_dispatch, f"""\ +torch.compile is currently being used with tensor subclass inputs: +{','.join([str(type(x)) for x in fake_flat_args])}. We are attempting to a compile a graph with two graph outputs +that alias one another, which is currently unsupported in the subclass use case. If you run into this, +please file a github issue""" + + if aot_config.is_export: + # aot_export: ban input metadata mutations for now to keep shared code paths simpler. + # Keeping .resize_() in the graph will require some work + # Allowing it but keeping the graph functional will require some calling convention changes. + if len([x for x in fw_metadata.input_info if x.mutates_metadata]) != 0: + raise RuntimeError(f"""\ +Found an input that received a metadata mutation, through e.g. a call to `.resize_()` or `.transpose_()`. +This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue. + +fw_metadata={str(fw_metadata)}""") + # In export, banning data mutations on inputs that require grad for now. + # This should be rare, and is tricky to get right. When we trace the backward, + # we currently trace with autograd.grad instead of .backward(), which makes it difficult + # to ensure that we run autograd all the way through the input **before** it saw the mutation. + if len([x for x in fw_metadata.input_info if x.requires_grad and x.mutates_data]) != 0: + raise RuntimeError(f"""\ +Found a graph input that requires gradients, and received a mutation. +This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue. + +fw_metadata={str(fw_metadata)}""") + if req_subclass_dispatch: + raise RuntimeError("""\ +aot_export is not currently supported with traceable tensor subclass. +If you need this feature, please comment on """) + + # Need to decide on a strategy for functionalized RNG: toggling via global config seems bad, + # and turning it on will require a non-trivial calling convention change for any export runtime. + if config.functionalize_rng_ops: + raise RuntimeError("""\ +Functionalized RNG is not currently supported in the aot_export workflow. Please file a github issue, +or otherwise set torch._functorch.config.functionalize_rng_ops = False.""") + + # crappy version of dispatcher + # TODO: Do this properly + if needs_autograd: + # For now, aot_dispatch_autograd knows to explicitly return a graph + # when run with export, and an opaque callable otherwise. + # In theory we could factor these out, but I wanted to let the dust + # settle on how functionalized rng fits into export first. + compiler_fn = aot_dispatch_autograd_graph if aot_config.is_export else aot_dispatch_autograd + else: + # aot_dispatch_base_graph contains only the "graph bits", while aot_dispatch_base + # includes some extra work around handling a runtime epilogue. + compiler_fn = aot_dispatch_base_graph if aot_config.is_export else aot_dispatch_base + + compiler_fn = partial(aot_wrapper_synthetic_base, compiler_fn=compiler_fn, needs_autograd=needs_autograd) + compiler_fn = partial(aot_wrapper_dedupe, compiler_fn=compiler_fn) + # You can put more passes here + + compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata) + if aot_config.is_export: + # During export, we don't get back a callable - we get back the raw fx graph + # (either a joint or an inference-only graph) + assert isinstance(compiled_fn, torch.fx.GraphModule) + return compiled_fn, fw_metadata + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + return compiled_fn + + +def aot_function( + fn: Callable, + fw_compiler: Callable, + bw_compiler: Optional[Callable] = None, + partition_fn: Callable = default_partition, + decompositions: Optional[Dict] = None, + num_params_buffers: int = 0, + keep_inference_input_mutations: bool = False, + inference_compiler: Optional[Callable] = None, + *, + # Whether or not to trace with dynamic shapes + dynamic=False, + enable_log=True, +) -> Callable: + """ + Traces the forward and backward graph of :attr:`fn` using torch dispatch + mechanism, and then compiles the generated forward and backward graphs + through :attr:`fw_compiler` and :attr:`bw_compiler`. + + :func:`aot_function` traces the forward and backward graph ahead of time, + and generates a joint forward and backward graph. :attr:`partition_fn` is + then used to separate out forward and backward graphs. The partitioner + function can be used to perform optimizations such as recomputation. One can + set `decompositions` dictionary to decompose the operators into a sequence + of core or simpler operators supported by the backend compilers. + + .. warning:: + This API is experimental and likely to change. + + Args: + fn (Callable): A Python function that takes one ore more arguments. Must + return one or more Tensors. + fw_compiler (Callable): A Python function that accepts an Fx graph with + Aten ops and input args, and returns a Callable that semantically is + equivalent to the input Fx graph. + bw_compiler (Optional[Callable]): A Python function that accepts an + Fx graph with Aten ops and input args, and returns a Callable that + semantically is equivalent to the input Fx graph. Default: None + (when None, it defaults to the :attr:`fw_compiler`) + partition_fn (Callable): A Python function that takes a joint forward + and backward graph, and partitions it into separate forward and + backward graphs. + decompositions (Dict): A dictionary to define the decomposition of + larger Aten ops into simpler or core Aten ops. + inference_compiler (Optional[Callable]): A Python function that accepts an + Fx graph with Aten ops and input args, and returns a Callable that + semantically is equivalent to the input Fx graph. inference_compiler is invoked + if no autograd is needed. Default: None + (when None, it defaults to the :attr:`fw_compiler`) + Returns: + Returns a ``Callable`` that retains the eager behavior of the original + :attr:`fn`, but with forward and backward graph compiled via + :attr:`fw_compile` and :attr:`bw_compile`. + + A simple example usage of :func:`aot_function` is as follows. This example + will print the forward and backward graphs of the function ``fn`` + + >>> fn = lambda x : x.sin().cos() + >>> def print_compile_fn(fx_module, args): + >>> print(fx_module) + >>> return fx_module + >>> aot_fn = aot_function(fn, print_compile_fn) + >>> x = torch.randn(4, 5, requires_grad=True) + >>> aot_fn(x) + """ + + if bw_compiler is None: + bw_compiler = fw_compiler + if inference_compiler is None: + inference_compiler = fw_compiler + aot_config = AOTConfig( + fw_compiler=fw_compiler, + bw_compiler=bw_compiler, + inference_compiler=inference_compiler, + partition_fn=partition_fn, + decompositions=decompositions, + num_params_buffers=num_params_buffers, + aot_id=next(AOT_COUNTER), + keep_inference_input_mutations=keep_inference_input_mutations, + dynamic_shapes=dynamic, + aot_autograd_arg_pos_to_source=None, + is_export=False, + no_tangents=False, + enable_log=enable_log, + ) + cached_res = None + + @wraps(fn) + def returned_function(*args, **kwargs): + nonlocal cached_res + # Now flatten the tensor args + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + + # Compile the function and save it in the cache + if cached_res is None: + flat_fn, out_spec = create_tree_flattened_fn(fn, args, kwargs) + + compiled_fn = create_aot_dispatcher_function( + flat_fn, + flat_args, + aot_config, + ) + cached_res = (compiled_fn, out_spec) + + cached_fn, out_spec = cached_res + out = cached_fn(flat_args) + return out_spec.unflatten(out) + + return returned_function + + +def aot_module(mod: nn.Module, *args, **kwargs) -> nn.Module: + """ + Traces the forward and backward graph of :attr:`mod` using torch dispatch + tracing mechanism. It is wrapper function, that underneath uses + :func:`aot_function` to perform tracing and compilation. + + :func:`aot_module` lifts the parameters and buffers of ``nn.Module`` as inputs + to a new callable which is then compiled through :func:`aot_function`. + + .. warning:: + This API is experimental and likely to change. + + Args: + mod (Callable): A ``nn.Module`` module. + args : args to be passed to :func:`aot_function` + kwargs : kwargs to be passed to :func:`aot_function` + + Returns: + Returns a ``nn.Module`` that retains the eager behavior of the original + :attr:`mod`, but with forward and backward graph compiled. + + """ + # See Note: [Fake Modules and AOTAutograd] + torch._dynamo.utils.assert_no_fake_params_or_buffers(mod) + + def functional_call(named_params, named_buffers, *args, **kwargs): + params_and_buffers = {**named_params, **named_buffers} + return torch.func.functional_call(mod, params_and_buffers, args, kwargs) + + named_params = dict(mod.named_parameters(remove_duplicate=False)) + named_buffers = dict(mod.named_buffers(remove_duplicate=False)) + num_params_buffers = len(named_params) + len(named_buffers) + compiled_f = aot_function( + functional_call, *args, num_params_buffers=num_params_buffers, **kwargs + ) + + class AOTModule(nn.Module): + def __init__(self): + super().__init__() + self.orig_module = mod + + def forward(self, *args, **kwargs): + return compiled_f( + named_params, + named_buffers, + *args, + **kwargs, + ) + + return AOTModule() + + +def aot_module_simplified( + mod: nn.Module, + args, + fw_compiler: Callable, + bw_compiler: Optional[Callable] = None, + partition_fn: Callable = default_partition, + decompositions: Optional[Dict] = None, + keep_inference_input_mutations=False, + inference_compiler: Optional[Callable] = None, +) -> nn.Module: + """ + This is the simplified or low overhead version of aot_module. For frontends + like TorchDynamo, the input functions/modules to AOT are static and have + unpacked inputs/outputs. This gives us an opportunity to remove the + (1) pytree overhead to parse inputs/outputs, + (2) AOT Autograd cache, + (3) Reading of params/buffers in every forward call + + :func:`aot_module_simplified` removes these overheads. + """ + params = { + **dict(mod.named_parameters(remove_duplicate=False)), + **dict(mod.named_buffers(remove_duplicate=False)), + } + params_flat, params_spec = pytree.tree_flatten(params) + params_flat = list(params_flat) + params_len = len(params_flat) + + functional_call = create_functional_call(mod, params_spec, params_len) + + if bw_compiler is None: + bw_compiler = fw_compiler + if inference_compiler is None: + inference_compiler = fw_compiler + + seen_sources = set() + + full_args = [] + # First, the params + full_args.extend(params_flat) + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.params_flat = params_flat + + aot_autograd_arg_pos_to_source = None + # Then, the params 1:1 mapped sources, if relevant. + if hasattr(mod, "_param_name_to_source"): + aot_autograd_arg_pos_to_source = [] + # We now know this came from dynamo, and (1) we care about guards, + # so setting up aot_autograd_arg_pos_to_source for downstream dedup guards + # can now be done safely. (2) Dynamo logic protects the 1:1 sizing below. + for name in params.keys(): + assert name in mod._param_name_to_source, f"{name} not found." + source = mod._param_name_to_source[name] + assert source not in seen_sources, source + seen_sources.add(source) + aot_autograd_arg_pos_to_source.append(source) + + # Next, the input args + full_args.extend(args) + + if hasattr(mod, "graph"): + # Non dynamo entrypoints can get to here... + for i, node in enumerate(mod.graph.nodes): + if node.op == "placeholder": + if hasattr(node, "_dynamo_source"): + # ... but not here! + if aot_autograd_arg_pos_to_source is None: + aot_autograd_arg_pos_to_source = [] + source = node._dynamo_source + assert source not in seen_sources, source + seen_sources.add(source) + aot_autograd_arg_pos_to_source.append(source) + + if aot_autograd_arg_pos_to_source is not None: + assert len(full_args) == len(aot_autograd_arg_pos_to_source) + + dynamic_shapes = False + for x in full_args: + if isinstance(x, FakeTensor): + dynamic_shapes = x.fake_mode.shape_env is not None + break + + aot_config = AOTConfig( + fw_compiler=fw_compiler, + bw_compiler=bw_compiler, + inference_compiler=inference_compiler, + partition_fn=partition_fn, + decompositions=decompositions, + num_params_buffers=params_len, + aot_id=next(AOT_COUNTER), + keep_inference_input_mutations=keep_inference_input_mutations, + dynamic_shapes=dynamic_shapes, + aot_autograd_arg_pos_to_source=aot_autograd_arg_pos_to_source, + is_export=False, + no_tangents=False, + ) + + with compiled_autograd.disable(): + compiled_fn = create_aot_dispatcher_function( + functional_call, + full_args, + aot_config, + ) + + # TODO: There is something deeply wrong here; compiled_fn running with + # the boxed calling convention, but aot_module_simplified somehow + # historically returned a function that was not the boxed calling + # convention. This should get fixed... + def forward(*runtime_args): + full_args = [] + full_args.extend(params_flat) + full_args.extend(runtime_args) + return compiled_fn(full_args) + + # Just for convenience + forward.zero_grad = mod.zero_grad + forward.named_parameters = mod.named_parameters + forward.named_buffers = mod.named_buffers + + return forward + + +def aot_export_module( + mod: nn.Module, + args, + *, + decompositions: Optional[Dict] = None, + # If true, we'll return a joint forward-backward graph, + # As well as metadata on the loss + gradients in the backward. + trace_joint: bool, + # If trace_joint is True, we expect your module to return a scalar loss. + # Your module can return multiple outputs, so you must specify which output the loss is. + output_loss_index: Optional[int] = None, + pre_dispatch: bool = False, + kwargs=None, +) -> Tuple[torch.fx.GraphModule, GraphSignature]: + """ + This function takes in a module, and returns: + (1) an FX graph that can be exported + (2) some metadata about the graph + + If `trace_joint=True` we will return a joint graph of the forward + backward. + + The traced FX graph will have the following properties compared to the original module: + (1) Inputs and outputs to the module will be pytree-flattened + (2) Parameters and buffers on the module will be lifted into graph inputs, + graph_inputs = (*parameters, *buffers, *user_inputs) + (3) The graph will be fully functionalized + (4) Any input mutations will be converted into additional outputs in the graph, + meaning whoever calls this graph is responsible for applying the mutations + back to the original inputs. + (5) If is_joint is provided the graph will return parameter gradients in addition to user outputs. + The graph output will look like: + graph_outputs = (*updated_inputs, *user_outputs, *param_gradients) + + There are also several restrictions on what modules can use this API. In particular: + (1) If trace_joint is specified, we expect the loss function to be **fused** + into the module forward. One of the outputs to the forward must be a scalar loss, + which is specified with `output_loss_index`. + All other outputs to the forward are presumed to not require gradients. + (2) This API cannot capture optimizers (although in theory we could build an API for this). + (3) Metadata mutations on params/buffers/inputs are banned. + (4) Data mutations on anything that requires gradients are banned (parameters) + (5) If an input is mutated, it is not allowed to alias any other inputs. + (6) Parameters must not be duplicated. + """ + if pre_dispatch and trace_joint: + raise RuntimeError("pre_dispatch is not supported when trace_joint is True.") + named_parameters = dict(mod.named_parameters(remove_duplicate=False)) + named_buffers = dict(mod.named_buffers(remove_duplicate=False)) + + params_and_buffers = { + **dict(named_parameters), + **dict(named_buffers), + } + params_and_buffers_flat, params_spec = pytree.tree_flatten(params_and_buffers) + params_and_buffers_flat = tuple(params_and_buffers_flat) + params_len = len(params_and_buffers_flat) + + kwargs = kwargs or {} + + functional_call = create_functional_call(mod, params_spec, params_len, store_orig_mod=True) + + num_fw_outs = None + + if trace_joint: + # This helper effectively just adds some extra asserts about what the backward will look like: + # Outputs must include a scalar loss, that we compute gradients w.r.t. + # We don't compute gradients w.r.t. anything else: so just in case we detach() + # and other output tensors. + def fn_to_trace(*args): + nonlocal num_fw_outs + out = functional_call(*args) + if output_loss_index is None: + raise RuntimeError("""\ +If trace_joint=Trueit is required that one of your forward outputs must be a scalar loss. +You must specify the which (index) output is the loss with output_loss_index.""") + if isinstance(out, (torch.Tensor)): + out = (out,) + if not isinstance(out, (tuple, list)): + raise RuntimeError(f"Expected forward output to be either a tensor or a list/tuple of tensors. found {type(out)}") + + for i, o in enumerate(out): + # We only want to create a backward graph w.r.t. the loss that the user passed in. + # This implies that every other output should not require gradients. + # Instead of making this an error (and forcing the user to detach all other outputs + # of their forward), + # we'll automatically detach them here. + if o.requires_grad and i != output_loss_index: + raise RuntimeError(f"""\ +Found an output of the forward that requires gradients, that was not the scalar loss. +We require all outputs to the forward that are not the scalar loss to not require gradient, +because we will only compute a backward graph against the scalar loss. +You can fix this by calling .detach() on each of your forward outputs that is not the loss. +You specified that output index {output_loss_index} is the loss, but we found that +the output at index {i} requires gradients.""") + out_loss = out[output_loss_index] + num_fw_outs = len(out) + if not out_loss.requires_grad: + raise RuntimeError(f"""\ +The output at index {output_loss_index} was marked as the loss, but it does not require gradients""") + if out_loss.numel() != 1: + raise RuntimeError(f"""\ +We require the output marked as the loss (at index {output_loss_index}) to be a scalar, but it has shape {out_loss.shape}""") + return out + ctx = nullcontext + else: + # Run under no_grad, so our tracing machinery only traces an inference graph. + ctx = torch.no_grad + fn_to_trace = functional_call + + full_args = [] + # First, the params + # NB: It is REQUIRED that parameters come first, Inductor infers "fixed" + # parameters by looking at the difference in parameter count outside + # and inside AOTAutograd, and assumes the prefix of arguments are fixed + # arguments + full_args.extend(params_and_buffers_flat) + # Next, the input args + full_args.extend(args) + + with ctx(): + fx_g, metadata, in_spec, out_spec = _aot_export_function( + fn_to_trace, + full_args, + decompositions=decompositions, + num_params_buffers=params_len, + no_tangents=True, + pre_dispatch=pre_dispatch, + kwargs=kwargs, + ) + if trace_joint: + def flattened_joint(*args): + # The idea here is that the joint graph that AOTAutograd creates has some strict properties: + # (1) It accepts two arguments (primals, tangents), and pytree_flattens them + # (2) It returns a tuple of (fw_outs, gradients) + # This is a very useful convention for anyone who wants to partition the joint graph + # into a separate forward and backward graph. + # However, + # (1) for people exporting a single joint graph, it would be preferable not to have + # any pytrees in the graph. + # (2) We are guaranteed in the aot_export_module case that the forward outputs a loss, + # and there are therefore no tangents that are needed to run the joint graph. + # (3) AOTAutograd creates a grad_input for every input in the forward, + # including None's for inputs that are not grad-requiring tensors. + # we don't want these in our export graph. + # and there are therefore no tangents that are needed to run the joint graph. + # This function "fixes" both of the above by removing any tangent inputs, + # and removing pytrees from the original FX graph. + fake_tangents = [None for _ in range(metadata.num_outputs + metadata.num_mutated_inp_runtime_indices)] + fw_outs, gradients = fx_g(args, fake_tangents) + assert len(gradients) == len(args) + output_gradients = [] + for i, (a, grad) in enumerate(zip(args, gradients)): + if isinstance(a, torch.Tensor) and a.requires_grad: + assert grad is not None, """\ +Found a parameter that did not receive a gradient. +"This is most likely a bug, but if this needs to be supported please comment on this Github issue: +https://github.com/pytorch/pytorch/issues/101192 +""" + output_gradients.append(grad) + else: + assert grad is None + return *fw_outs, *output_gradients + fx_g = make_fx(flattened_joint)(*full_args) + + user_args_flat = pytree.arg_tree_leaves(*args, **kwargs) + return fx_g, create_graph_signature( + fx_g, + metadata, + in_spec, + out_spec, + user_args_flat=user_args_flat, + params_and_buffers_flat=params_and_buffers_flat, + param_names=list(named_parameters.keys()), + buffer_names=list(named_buffers.keys()), + trace_joint=trace_joint, + num_user_fw_outs=num_fw_outs, + loss_index=output_loss_index, + ) + +def aot_export_joint_simple( + func: Callable, + args, + *, + trace_joint: bool, + # It looks like the main consequence of this API is that for dynamic shapes, + # it will assume that parms/buffers are static. + # With the new inferred dynamic shapes API, maybe this doesn't matter? + num_params_buffers: int = 0, + decompositions: Optional[Dict] = None, +) -> torch.fx.GraphModule: + """ + A simplified version of export. Used by higher order operators. + + This function makes a high-level "no calling convention changes" guarantee: + - If no inputs require grad (so we export an inference graph), + there are *no* calling convention change between the exported graph, and "func". + - If at least one input requires grad (so we trace out and export a joint fw-bw graph), + Then if you were partition the graph into a separate forward and backward graph, + The forward graph will have no calling convention changes compared to "func". + + The above also relies on some strong restrictions around which functions this API accepts: + (1) `args` cannot contain any pytrees (they must have been pytree_flattened already) + (2) `func` cannot mutate any inputs + (3) The outputs of `func` cannot alias any inputs. + + Note: this function is only lightly tested today. It will probably be tested more heavily by higher order ops. + """ + if trace_joint: + ctx = nullcontext + else: + # Run under no_grad, so our tracing machinery only traces an inference graph. + ctx = torch.no_grad + + with ctx(): + fx_g, metadata, in_spec, out_spec = _aot_export_function( + func, + args, + decompositions=decompositions, + ) + in_spec, _kw_in_spec = in_spec.children_specs + # At this point, we can just directly return the (joint or inference graph) that we traced. + # First though: a bunch of assertions to make sure that our graph doesn't require + # any calling convention changes compared to the original function. + # These restrictions are *in addition to* the general restrictions on export. + + # No input mutations + if len([x for x in metadata.input_info if x.mutates_data or x.mutates_metadata]) != 0: + raise RuntimeError(f"aot_export_joint_simple does not support input mutations. {str(metadata)}") + # No output aliasing + if len([x for x in metadata.output_info if x.output_type != OutputType.non_alias]) != 0: + raise RuntimeError(f"aot_export_joint_simple does not support outputs that alias inputs. {str(metadata)}") + # No pytrees + if in_spec.is_leaf(): + raise RuntimeError(f"aot_export_joint_simple requires inputs to be a single list/tuple. in_spec={str(in_spec)}") + if not all(child.is_leaf() for child in in_spec.children_specs): + raise RuntimeError(f"aot_export_joint_simple requires individual inputs not to be pytrees. in_spec={str(in_spec)}") + if out_spec.is_leaf(): + raise RuntimeError(f"aot_export_joint_simple requires outputs to be a single list/tuple. out_spec={str(out_spec)}") + if not all(child.is_leaf() for child in out_spec.children_specs): + raise RuntimeError(f"aot_export_joint_simple requires individual outputs not to be pytrees. out_spec={str(out_spec)}") + # TODO: we might have to temporarily patch config.functionalize_rng + # so that it doesn't run when we're exporting a higher order op. + + if config.debug_assert: + # Smoke test that after partitioning, we can run the forward without any calling convention changes. + fw_module, bw_module = aot_config.default_partition( # noqa: F821 + fx_g, args, num_fwd_outputs=len(fw_metadata.output_infos) # noqa: F821 + ) + # Attempt to run the fw_module with the original user inputs + fake_mode = detect_fake_mode(args) + if fake_mode is None: + fake_mode = FakeTensorMode() + with fake_mode: + fw_module(*args) + return fx_g + +# Private for now because we aren't providing a contract on what to return +# for joint graphs (we could when there's a clearer use case) +# In the future, we may need to add more export API's that provide their own strong guarantees. +# This is meant as a general helper function for handling various export-y use cases. +def _aot_export_function( + func: Callable, + args, + *, + num_params_buffers: int = 0, + decompositions: Optional[Dict] = None, + # If we're exporting a joint graph and we don't want any tangent inputs in the graph + # (because we are backpropping through a scalar 1 loss), + # we need to explicitly specify not to include tangents in the graph. + # It's not enough just to check that our tangent is a scalar, since we also + # need to know if it is a 1 (no need to make it a graph input), or something else + # (requiring it to be a graph input). + # We don't know this info at trace time though, so we need to make it an explicit config. + no_tangents: bool = False, + pre_dispatch: bool = False, + kwargs=None, +) -> Tuple[torch.fx.GraphModule, ViewAndMutationMeta, pytree.TreeSpec, pytree.TreeSpec]: + kwargs = kwargs or {} + + flat_fn, out_spec = create_tree_flattened_fn(func, args, kwargs) + flat_args, in_spec = pytree.tree_flatten((args, kwargs)) + + dynamic_shapes = False + for x in flat_args: + if isinstance(x, FakeTensor): + dynamic_shapes = x.fake_mode.shape_env is not None + break + + # The export use case doesn't care about several bits of AOTConfig + # (1) compilers (we just export the graph) + # (2) partitioners (export is only full graph, user can partition themselves) + aot_config = AOTConfig( + fw_compiler=None, + bw_compiler=None, + inference_compiler=None, + partition_fn=None, + decompositions=decompositions, + num_params_buffers=num_params_buffers, + aot_id=next(AOT_COUNTER), + # For now there's no use case involving keeping input mutations in the graph + # (which we can only do in the inference case anyway). + # We can add this later if we need to. + keep_inference_input_mutations=False, + dynamic_shapes=dynamic_shapes, + aot_autograd_arg_pos_to_source=None, + is_export=True, + no_tangents=no_tangents, + pre_dispatch=pre_dispatch, + ) + + fx_g, meta = create_aot_dispatcher_function( + flat_fn, + flat_args, + aot_config, + ) + return fx_g, meta, in_spec, out_spec.spec + + +compiled_function = aot_function +compiled_module = aot_module diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/apis.py b/venv/lib/python3.10/site-packages/torch/_functorch/apis.py new file mode 100644 index 0000000000000000000000000000000000000000..46325716936c591ce559ebcd114cb77f6b3df34d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/apis.py @@ -0,0 +1,401 @@ +# NOTE: We allow Dynamo to see this file (via torch/_dynamo/trace_rules.py) so that it can +# trace through functorch transforms. +# Currently, we can't allow Dynamo to see `eager_transforms.py`/`vmap.py` as that break a lot of thing +# and there isn't a mechanism to selectively expose only some functions (eg. grad) from a file +# to Dynamo. +from torch._functorch.vmap import (vmap_impl, _check_randomness_arg, + Callable, in_dims_t, out_dims_t, _check_out_dims_is_int_or_int_pytree, + _process_batched_inputs, _chunked_vmap) +from torch._functorch.utils import exposed_in, argnums_t +import functools + +# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors, +# sends those into func, and then unwraps the output BatchedTensors. Operations +# on BatchedTensors perform the batched operations that the user is asking for. +# +# vmap's randomness behavior differs from JAX's, which would require a PRNG key +# to be passed everywhere. + + +@exposed_in('torch.func') +def vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + *, + chunk_size=None) -> Callable: + """ + vmap is the vectorizing map; ``vmap(func)`` returns a new function that + maps ``func`` over some dimension of the inputs. Semantically, vmap + pushes the map into PyTorch operations called by ``func``, effectively + vectorizing those operations. + + vmap is useful for handling batch dimensions: one can write a function + ``func`` that runs on examples and then lift it to a function that can + take batches of examples with ``vmap(func)``. vmap can also be used to + compute batched gradients when composed with autograd. + + .. note:: + :func:`torch.vmap` is aliased to :func:`torch.func.vmap` for + convenience. Use whichever one you'd like. + + Args: + func (function): A Python function that takes one or more arguments. + Must return one or more Tensors. + in_dims (int or nested structure): Specifies which dimension of the + inputs should be mapped over. ``in_dims`` should have a + structure like the inputs. If the ``in_dim`` for a particular + input is None, then that indicates there is no map dimension. + Default: 0. + out_dims (int or Tuple[int]): Specifies where the mapped dimension + should appear in the outputs. If ``out_dims`` is a Tuple, then + it should have one element per output. Default: 0. + randomness (str): Specifies whether the randomness in this + vmap should be the same or different across batches. If 'different', + the randomness for each batch will be different. If 'same', the + randomness will be the same across batches. If 'error', any calls to + random functions will error. Default: 'error'. WARNING: this flag + only applies to random PyTorch operations and does not apply to + Python's random module or numpy randomness. + chunk_size (None or int): If None (default), apply a single vmap over inputs. + If not None, then compute the vmap :attr:`chunk_size` samples at a time. + Note that :attr:`chunk_size=1` is equivalent to computing the vmap with a for-loop. + If you run into memory issues computing the vmap, please try a non-None chunk_size. + + Returns: + Returns a new "batched" function. It takes the same inputs as + ``func``, except each input has an extra dimension at the index + specified by ``in_dims``. It takes returns the same outputs as + ``func``, except each output has an extra dimension at the index + specified by ``out_dims``. + + .. warning: + :func:`vmap` works best with functional-style code. Please do not + perform any side-effects in ``func``, with the exception of + in-place PyTorch operations. Examples of side-effects include mutating + Python data structures and assigning values to variables not captured + in ``func``. + + One example of using :func:`vmap` is to compute batched dot products. PyTorch + doesn't provide a batched ``torch.dot`` API; instead of unsuccessfully + rummaging through docs, use :func:`vmap` to construct a new function. + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.func.vmap(torch.dot) # [N, D], [N, D] -> [N] + >>> x, y = torch.randn(2, 5), torch.randn(2, 5) + >>> batched_dot(x, y) + + :func:`vmap` can be helpful in hiding batch dimensions, leading to a simpler + model authoring experience. + + >>> batch_size, feature_size = 3, 5 + >>> weights = torch.randn(feature_size, requires_grad=True) + >>> + >>> def model(feature_vec): + >>> # Very simple linear model with activation + >>> return feature_vec.dot(weights).relu() + >>> + >>> examples = torch.randn(batch_size, feature_size) + >>> result = torch.vmap(model)(examples) + + :func:`vmap` can also help vectorize computations that were previously difficult + or impossible to batch. One example is higher-order gradient computation. + The PyTorch autograd engine computes vjps (vector-Jacobian products). + Computing a full Jacobian matrix for some function f: R^N -> R^N usually + requires N calls to ``autograd.grad``, one per Jacobian row. Using :func:`vmap`, + we can vectorize the whole computation, computing the Jacobian in a single + call to ``autograd.grad``. + + >>> # Setup + >>> N = 5 + >>> f = lambda x: x ** 2 + >>> x = torch.randn(N, requires_grad=True) + >>> y = f(x) + >>> I_N = torch.eye(N) + >>> + >>> # Sequential approach + >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0] + >>> for v in I_N.unbind()] + >>> jacobian = torch.stack(jacobian_rows) + >>> + >>> # vectorized gradient computation + >>> def get_vjp(v): + >>> return torch.autograd.grad(y, x, v) + >>> jacobian = torch.vmap(get_vjp)(I_N) + + :func:`vmap` can also be nested, producing an output with multiple batched dimensions + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.vmap(torch.vmap(torch.dot)) # [N1, N0, D], [N1, N0, D] -> [N1, N0] + >>> x, y = torch.randn(2, 3, 5), torch.randn(2, 3, 5) + >>> batched_dot(x, y) # tensor of size [2, 3] + + If the inputs are not batched along the first dimension, ``in_dims`` specifies + the dimension that each inputs are batched along as + + >>> torch.dot # [N], [N] -> [] + >>> batched_dot = torch.vmap(torch.dot, in_dims=1) # [N, D], [N, D] -> [D] + >>> x, y = torch.randn(2, 5), torch.randn(2, 5) + >>> batched_dot(x, y) # output is [5] instead of [2] if batched along the 0th dimension + + If there are multiple inputs each of which is batched along different dimensions, + ``in_dims`` must be a tuple with the batch dimension for each input as + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.vmap(torch.dot, in_dims=(0, None)) # [N, D], [D] -> [N] + >>> x, y = torch.randn(2, 5), torch.randn(5) + >>> batched_dot(x, y) # second arg doesn't have a batch dim because in_dim[1] was None + + If the input is a Python struct, ``in_dims`` must be a tuple containing a struct + matching the shape of the input: + + >>> f = lambda dict: torch.dot(dict['x'], dict['y']) + >>> x, y = torch.randn(2, 5), torch.randn(5) + >>> input = {'x': x, 'y': y} + >>> batched_dot = torch.vmap(f, in_dims=({'x': 0, 'y': None},)) + >>> batched_dot(input) + + By default, the output is batched along the first dimension. However, it can be batched + along any dimension by using ``out_dims`` + + >>> f = lambda x: x ** 2 + >>> x = torch.randn(2, 5) + >>> batched_pow = torch.vmap(f, out_dims=1) + >>> batched_pow(x) # [5, 2] + + For any function that uses kwargs, the returned function will not batch the kwargs but will + accept kwargs + + >>> x = torch.randn([2, 5]) + >>> def fn(x, scale=4.): + >>> return x * scale + >>> + >>> batched_pow = torch.vmap(fn) + >>> assert torch.allclose(batched_pow(x), x * 4) + >>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5] + + .. note:: + vmap does not provide general autobatching or handle variable-length + sequences out of the box. + """ + _check_randomness_arg(randomness) + if not (chunk_size is None or chunk_size > 0): + raise ValueError(f"vmap: chunk_size should be None or greater than 0. (got {chunk_size})") + + # @functools.wraps(func) + def wrapped(*args, **kwargs): + return vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs) + + return wrapped + + +def chunk_vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + chunks=2) -> Callable: + """ + chunk_vmap is the vectorizing map (vmap) using chunks of input data. It is a mix of vmap (which vectorizes + everything) and map (which executes things sequentially). ``chunk_vmap`` vectorizes the input with number of + chunks at a time. For more details about vectorizing map, see :func:`vmap`. + + .. note:: + Please use :func:`vmap` with ``chunk_size`` argument instead of this API. + + Args: + func (function): A Python function that takes one or more arguments. + Must return one or more Tensors. + in_dims (int or nested structure): Specifies which dimension of the + inputs should be mapped over. ``in_dims`` should have a + structure like the inputs. If the ``in_dim`` for a particular + input is None, then that indicates there is no map dimension. + Default: 0. + out_dims (int or Tuple[int]): Specifies where the mapped dimension + should appear in the outputs. If ``out_dims`` is a Tuple, then + it should have one element per output. Default: 0. + randomness (str): Specifies whether the randomness in this + vmap should be the same or different across batches. If 'different', + the randomness for each batch will be different. If 'same', the + randomness will be the same across batches. If 'error', any calls to + random functions will error. Default: 'error'. WARNING: this flag + only applies to random PyTorch operations and does not apply to + Python's random module or numpy randomness. + chunks (int): Number of chunks to use to split the input data. Default is 2. + If equals to 1 then :func:`vmap` is called. + + Returns: + Returns a new "batched" function. It takes the same inputs as + ``func``, except each input has an extra dimension at the index + specified by ``in_dims``. It takes returns the same outputs as + ``func``, except each output has an extra dimension at the index + specified by ``out_dims``. + """ + _check_randomness_arg(randomness) + + if chunks == 1: + return vmap(func, in_dims=in_dims, out_dims=out_dims, randomness=randomness) + + def _get_chunk_flat_args(flat_args_, flat_in_dims_, chunks_): + flat_args_chunks = tuple( + t.chunk(chunks_, dim=in_dim) if in_dim is not None else [t, ] * chunks_ + for t, in_dim in zip(flat_args_, flat_in_dims_) + ) + # transpose chunk dim and flatten structure + # chunks_flat_args is a list of flatten args + chunks_flat_args = zip(*flat_args_chunks) + return chunks_flat_args + + @functools.wraps(func) + def wrapped_with_chunks(*args, **kwargs): + _check_out_dims_is_int_or_int_pytree(out_dims, func) + _, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func) + # Chunk flat arguments + chunks_flat_args = _get_chunk_flat_args(flat_args, flat_in_dims, chunks) + + # Apply vmap on chunks + return _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs) + + return wrapped_with_chunks + + +@exposed_in("torch.func") +def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + """``grad`` operator helps computing gradients of ``func`` with respect to the + input(s) specified by ``argnums``. This operator can be nested to + compute higher-order gradients. + + Args: + func (Callable): A Python function that takes one or more arguments. + Must return a single-element Tensor. If specified ``has_aux`` equals ``True``, + function can return a tuple of single-element Tensor and other auxiliary objects: + ``(output, aux)``. + argnums (int or Tuple[int]): Specifies arguments to compute gradients with respect to. + ``argnums`` can be single integer or tuple of integers. Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a tensor and other + auxiliary objects: ``(output, aux)``. Default: False. + + Returns: + Function to compute gradients with respect to its inputs. By default, the output of + the function is the gradient tensor(s) with respect to the first argument. + If specified ``has_aux`` equals ``True``, tuple of gradients and output auxiliary objects + is returned. If ``argnums`` is a tuple of integers, a tuple of output gradients with + respect to each ``argnums`` value is returned. + + Example of using ``grad``: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad + >>> x = torch.randn([]) + >>> cos_x = grad(lambda x: torch.sin(x))(x) + >>> assert torch.allclose(cos_x, x.cos()) + >>> + >>> # Second-order gradients + >>> neg_sin_x = grad(grad(lambda x: torch.sin(x)))(x) + >>> assert torch.allclose(neg_sin_x, -x.sin()) + + When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad, vmap + >>> batch_size, feature_size = 3, 5 + >>> + >>> def model(weights, feature_vec): + >>> # Very simple linear model with activation + >>> assert feature_vec.dim() == 1 + >>> return feature_vec.dot(weights).relu() + >>> + >>> def compute_loss(weights, example, target): + >>> y = model(weights, example) + >>> return ((y - target) ** 2).mean() # MSELoss + >>> + >>> weights = torch.randn(feature_size, requires_grad=True) + >>> examples = torch.randn(batch_size, feature_size) + >>> targets = torch.randn(batch_size) + >>> inputs = (weights, examples, targets) + >>> grad_weight_per_example = vmap(grad(compute_loss), in_dims=(None, 0, 0))(*inputs) + + Example of using ``grad`` with ``has_aux`` and ``argnums``: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad + >>> def my_loss_func(y, y_pred): + >>> loss_per_sample = (0.5 * y_pred - y) ** 2 + >>> loss = loss_per_sample.mean() + >>> return loss, (y_pred, loss_per_sample) + >>> + >>> fn = grad(my_loss_func, argnums=(0, 1), has_aux=True) + >>> y_true = torch.rand(4) + >>> y_preds = torch.rand(4, requires_grad=True) + >>> out = fn(y_true, y_preds) + >>> # > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample)) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``grad``. + + Case 1: Using ``torch.no_grad`` inside a function: + + >>> # xdoctest: +SKIP + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``grad(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``grad`` inside ``torch.no_grad`` context manager: + + >>> # xdoctest: +SKIP + >>> with torch.no_grad(): + >>> grad(f)(x) + + In this case, ``grad`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``grad`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + + """ + # To avoid cyclical dependency. + import torch._functorch.eager_transforms as eager_transforms + + @functools.wraps(func) + def wrapper(*args, **kwargs): + return eager_transforms.grad_impl(func, argnums, has_aux, args, kwargs) + return wrapper + + +@exposed_in("torch.func") +def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + """ + Returns a function to compute a tuple of the gradient and primal, or + forward, computation. + + Args: + func (Callable): A Python function that takes one or more arguments. + Must return a single-element Tensor. If specified ``has_aux`` + equals ``True``, function can return a tuple of single-element + Tensor and other auxiliary objects: ``(output, aux)``. + argnums (int or Tuple[int]): Specifies arguments to compute gradients + with respect to. ``argnums`` can be single integer or tuple of + integers. Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a tensor and + other auxiliary objects: ``(output, aux)``. Default: False. + + Returns: + Function to compute a tuple of gradients with respect to its inputs + and the forward computation. By default, the output of the function is + a tuple of the gradient tensor(s) with respect to the first argument + and the primal computation. If specified ``has_aux`` equals + ``True``, tuple of gradients and tuple of the forward computation with + output auxiliary objects is returned. If ``argnums`` is a tuple of + integers, a tuple of a tuple of the output gradients with respect to + each ``argnums`` value and the forward computation is returned. + + See :func:`grad` for examples + """ + from torch._functorch import eager_transforms + + @functools.wraps(func) + def wrapper(*args, **kwargs): + return eager_transforms.grad_and_value_impl(func, argnums, has_aux, args, kwargs) + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/autograd_function.py b/venv/lib/python3.10/site-packages/torch/_functorch/autograd_function.py new file mode 100644 index 0000000000000000000000000000000000000000..0a4fbf81c7259c8739ad39d88195fc930b828acb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/autograd_function.py @@ -0,0 +1,659 @@ +import torch +from torch._ops import HigherOrderOperator +from torch._C._functorch import TransformType +from torch._functorch.utils import enable_single_level_autograd_function +import torch.utils._pytree as pytree +from torch._C._functorch import ( + _wrap_for_grad, + _unwrap_for_grad, + current_level, +) +from torch._functorch.vmap import ( + wrap_batched, + unwrap_batched, + restore_vmap, + _add_batch_dim, +) +from torch._functorch.apis import vmap +from torch._functorch.vmap import _broadcast_to_and_flatten +from torch.autograd.forward_ad import _set_fwd_grad_enabled +from typing import Any, NamedTuple, Tuple + +# autograd.Function technically runs before the regular PyTorch dispatcher. +# This is how features like autocast and torch_dispatch (e.g. PythonTLSSnapshot) +# work with it. One day we might decide to change this, but until then, +# we need to give the illusion that autograd.Function runs before those things. +# +# We do this by using creating a custom HigherOrderOperator that only functorch +# dispatches specially. +class CustomFunctionHigherOrderOperator(HigherOrderOperator): + def __init__(self): + super().__init__('custom_function_call') + + def __call__(self, autograd_function, *args, **kwargs): + # When custom_function_call is done dispatching through functorch, + # it should just invoke the autograd.Function. This is consistent + # with the autograd.Function behavior of being invoked before the + # PyTorch dispatcher. + # + # This will lead us into trouble later down the line, but this is + # pre-existing. There is an invariant that a function traced by + # make_fx should have the same behavior when provided the same + # Tensor. However, make_fx sees autograd.Function as a composite + # (because autograd.Function happens before the Python dispatch key) + # and only traces the forward pass. + if torch._C._are_functorch_transforms_active(): + return super().__call__(autograd_function, *args, **kwargs) + return autograd_function.apply(*args, **kwargs) + + +# "custom_function_call" +# This is the mechanism for an autograd.Function that works with functorch transforms. +# It wraps an autograd.Function; interactions with functorch transforms are defined +# via PyDispatcher and HigherOrderOperator rather than through the traditional PyTorch +# dispatcher. +custom_function_call = CustomFunctionHigherOrderOperator() + + +# The grad rule for custom_function_call is to construct a new _SingleLevelFunction +# (autograd.Function that only works with a single layer (level) of functorch) that: +# - unwraps the inputs +# - redispatches to custom_function_call +# - wraps the outputs +# and whose backward pass calls the original autograd.Function's backward. +# +# Why do we need to redispatch to custom_function_call? +# ----------------------------------------------------- +# This is consistent with how ATen operators work with functorch's grad transform: +# they always redispatch to the original operator. +# Consider torch.sin, and let's say we do grad0(grad1(torch.sin))(x) +# +# grad1 will: +# - set up the autograd graph +# - unwrap the inputs +# - redispatch to at::sin (*) +# - rewrap the outputs on the return +# +# On the redispatch in (*), grad0 will: +# - set up the autograd graph +# - unwrap the inputs +# - redispatch to at::sin +# - rewrap the outputs on the return +# +# To "set up the autograd graph", we generate a _SingleLevelFunction +# and apply it. +@custom_function_call.py_impl(TransformType.Grad) +@custom_function_call.py_impl(TransformType.Jvp) +def custom_function_call_grad(interpreter, autograd_function, *operands): + Generated = generate_single_level_function(interpreter, autograd_function) + with enable_single_level_autograd_function(): + flat_out = Generated.apply(*operands) + return flat_out + + +def generate_single_level_function(interpreter, autograd_function): + level = interpreter.level() + + def forward(*operands): + unwrapped_operands = pytree.tree_map_only( + torch.Tensor, + lambda x: _unwrap_for_grad(x, level), + operands) + # Both enable_grad() and _set_fwd_grad_enabled() are necessary no matter + # the transform. _SingleLevelFunction will turn off both fwd and bwd + # gradient computation and we need to turn it back on here. + with torch.enable_grad(), _set_fwd_grad_enabled(True), interpreter.lower(): + unwrapped_output = custom_function_call(autograd_function, *unwrapped_operands) + + # See NOTE [mark_dirty object identity check] + def wrap_fn(output): + return _wrap_for_grad(output, level) + + return wrap_outputs_maintaining_identity( + unwrapped_output, + unwrapped_operands, + operands, + wrap_fn) + + def setup_context(ctx, inputs, output): + return autograd_function.setup_context(ctx, inputs, output) + + # backward is only used if the transform is TransformType.Grad + def backward(ctx, *grads): + result = autograd_function.backward(ctx, *grads) + return result + + # jvp is only used if the transform is TransformType.Jvp + def jvp(ctx, *tangents): + result = autograd_function.jvp(ctx, *tangents) + return result + + # This is the sequence of magic words to dynamically generate a Subclass with + # a given name. A Tensor's .grad_fn field has a class name that is the original + # autograd.Function's name + Backward, so we do this to generate some + # meaningful name. + name = f'{autograd_function.__name__}Generated' + Generated = type( + name, + (torch.autograd.function._SingleLevelFunction,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + 'jvp': staticmethod(jvp), + 'setup_context': staticmethod(setup_context), + }, + ) + return Generated + +# wrap_outputs_maintaining_identity handles outputs from the vmap, +# backward (vjp), and jvp staticmethod. The way it distinguishes +# between the vmap case and the {backward, jvp} case is if the out_dims +# are specified or not. +# +# NB: we cannot use out_dims=None as the deciding factor. This because +# out_dims=None can still happen in the vmap staticmethod! What the +# user is saying in that case is that their output does not have a +# dimension that is being vmapped over, which is valid. +NO_OUT_DIMS = "not specified" + +# NOTE [mark_dirty object identity check] +# autograd.Function's ctx.mark_dirty expect a returned input +# to have the same object identity as the input. +# Mode-only functorch will greatly simplify this logic. +def wrap_outputs_maintaining_identity( + outputs, unwrapped_inputs, orig_inputs, wrap_fn, out_dims=NO_OUT_DIMS): + flat_unwrapped_inputs = pytree.arg_tree_leaves(*unwrapped_inputs) + flat_orig_inputs = pytree.arg_tree_leaves(*orig_inputs) + + unwrapped_input_to_orig_input = { + id(unwrapped): orig + for unwrapped, orig in zip(flat_unwrapped_inputs, flat_orig_inputs) + } + + flat_outputs, spec = pytree.tree_flatten(outputs) + result = [] + + out_dims_specified = out_dims != NO_OUT_DIMS + + if out_dims_specified: + flat_out_dims = _broadcast_to_and_flatten(out_dims, spec) + # _broadcast_to_and_flatten returns None if it is unable to broadcast. + # TODO: update following link from master to stable once that's out + if flat_out_dims is None: + raise RuntimeError( + f"The autograd.Function's vmap staticmethod returned an " + f"incompatible (output, out_dims) tuple. " + f"Expected out_dims={out_dims} " + f"to be compatible with the structure of `output`. " + f"out_dims has structure {pytree.tree_flatten(out_dims)[1]} " + f"but output has structure {spec}. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html" + ) + + for i, output in enumerate(flat_outputs): + if not isinstance(output, torch.Tensor): + result.append(output) + continue + if id(output) in unwrapped_input_to_orig_input: + result.append(unwrapped_input_to_orig_input[id(output)]) + continue + if out_dims_specified: + result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[possibly-undefined, index] + else: + result.append(wrap_fn(output)) + + return pytree.tree_unflatten(result, spec) + + +# NOTE: [functorch vjp and autograd interaction] +# There's an edge case with the functorch vjp and autograd interaction +# that will eventually be fixed by mode-only functorch. +# The TL;DR is that there's no way to unwrap a dead GradTensorWrapper, +# so we (the framework) need to do it manually. Regular PyTorch operators +# automatically do so this is consistent. +# +# class MyExp(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return x.exp() +# +# @staticmethod +# def setup_context(ctx, inputs, output): +# y = output +# ctx.save_for_backward(y) +# +# @staticmethod +# def backward(gy): +# y, = ctx.saved_tensors() +# return MyMul.apply(gy, y) +# +# x = torch.randn([], requires_grad=True) +# gy = torch.randn([], requires_grad=True) +# _, vjp_fn = vjp(MySin.apply, x) +# result = vjp_fn(gy) +# +# MyMul is an autograd.Function that is not shown here. +# It saves a `y` for backward (since gy requires grad). +# +# in vjp_fn(gy), we get: +# > MyMul.apply(gy, GradTensorWrapper(y, level=dead)) +# Because the y that is saved for backward by MyExp is a GradTensorWrapper +# but is now dead since we are outside the vjp context. +# +# PyTorch dispatcher operations, upon seeing a dead GradTensorWrapper, +# will automatically unwrap the GradTensorWrapper when applied. +# But since autograd.Function technically sits above the regular PyTorch +# dispatcher, it doesn't get this treatment. So we manually do +# the unwrapping to be consistent with regular PyTorch dispatcher operations. + + +class VmapInfo(NamedTuple): + batch_size: int + randomness: str + + +def has_overriden_vmap_rule(autograd_function): + return autograd_function.vmap is not torch.autograd.Function.vmap + + +def validate_vmap_returns_tuple_of_two_elements(result): + base_error_msg = ( + "Expected the vmap staticmethod to have two returns, an output " + "and out_dims with pytree structure compatible with the output. " + ) + if not isinstance(result, tuple): + raise RuntimeError(base_error_msg + f"Got a {type(result)} instead") + if not len(result) == 2: + raise RuntimeError(base_error_msg + f"Got {len(result)} returns instead") + +@custom_function_call.py_impl(TransformType.Vmap) +def custom_function_call_vmap(interpreter, autograd_function, *operands): + if autograd_function.generate_vmap_rule: + if has_overriden_vmap_rule(autograd_function): + # TODO: Update link to stable once that's out + # https://github.com/pytorch/pytorch/issues/92029 + raise RuntimeError( + f"You tried to vmap over {autograd_function.__name__}, but " + f"it has both generate_vmap_rule=True and an overriden vmap " + f"staticmethod. Please set generate_vmap_rule=False or delete " + f"the overriden vmap staticmethod to avoid ambiguity. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html") + return custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands) + + if not has_overriden_vmap_rule(autograd_function): + # TODO: Update link to stable once that's out + # https://github.com/pytorch/pytorch/issues/92029 + raise RuntimeError( + f"You tried to vmap over {autograd_function.__name__}, but " + f"it does not have vmap support. Please override and implement the " + f"vmap staticmethod or set generate_vmap_rule=True. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html") + + current_level = interpreter.level() + info = VmapInfo( + batch_size=interpreter.batch_size(), + randomness=interpreter.randomness(), + ) + unwrapped_operands, in_dims = unwrap_batched(operands, current_level) + + # If none of the tensors are batched at the current level, then we skip the + # current level. This saves the user from needing to handle this case in + # their vmap staticmethod (and is consistent with our C++ batching rule API) + if pytree.tree_all(lambda dim: dim is None, in_dims): + with interpreter.lower(): + return custom_function_call(autograd_function, *operands) + + with interpreter.lower(): + result = autograd_function.vmap(info, in_dims, *unwrapped_operands) + validate_vmap_returns_tuple_of_two_elements(result) + unwrapped_output, out_dims = result + + # See NOTE [mark_dirty object identity check] + def wrap_fn(output, out_dim): + return output if out_dim is None else _add_batch_dim(output, out_dim, current_level) + + return wrap_outputs_maintaining_identity( + unwrapped_output, + unwrapped_operands, + operands, + wrap_fn, + out_dims=out_dims) + + +def custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands): + unwrapped_operands, in_dims = unwrap_batched(operands, interpreter.level()) + vmapped_function, get_out_dims = vmapify_autograd_function( + autograd_function, in_dims, interpreter.batch_size(), interpreter.randomness()) + + with interpreter.lower(): + output = custom_function_call(vmapped_function, *unwrapped_operands) + + out_dims = get_out_dims() + return wrap_batched(output, out_dims, interpreter.level()) + + +@custom_function_call.py_impl(TransformType.Functionalize) +def custom_function_call_functionalize(interpreter, autograd_function, generate_vmap_rule, *operands): + raise RuntimeError("NYI: Functionalize rule for custom_function_call") + + +def vmapify_autograd_function(autograd_function, in_dims, batch_size, randomness): + # The following values are saved from the forward() and setup_context() + # and used in backward(). + # Why do we save the values out here instead of on the ctx object? + # - out_dims: There's no way to retrieve this from forward() + # - input_shapes, saved_tensors_bdims: I'm a bit scared of nesting + # vmap(vmap( but not completely sure if it is a problem. If we + # assigned those fields to the ctx object, the worry is that they + # get overwritten. + init_val = "not populated" + out_dims = init_val + input_shapes: Any = init_val + saved_tensors_bdims: Any = init_val + + def forward(*operands): + nonlocal out_dims + outputs, out_dims = restore_vmap( + autograd_function.forward, in_dims, batch_size, randomness)(*operands) + return outputs + + def setup_context(ctx, inputs, outputs): + input_shapes_ = None + saved_tensors_bdims_ = None + + def inner(inputs, outputs): + # wrapped_ctx.save_for_backward will: + # - unwrap batchedtensors into (tensor, bdim) + # - save_for_backward(*unwrapped_tensors) + # - assign the bdims to wrapped_ctx._pt_saved_tensors_bdims + wrapped_ctx = CtxCustomSave(ctx, current_level()) + autograd_function.setup_context(wrapped_ctx, inputs, outputs) + + # input_shapes are used for reductify later to reduce expanded gradients + # to the correct shape. + # See NOTE: [Why can't we rely on autograd to reduce expanded gradients?] + # for more details + nonlocal input_shapes_ + input_shapes_ = tuple(inp.shape if isinstance(inp, torch.Tensor) else None + for inp in inputs) + nonlocal saved_tensors_bdims_ + saved_tensors_bdims_ = wrapped_ctx._pt_saved_tensors_bdims + + # See NOTE: [Why do we need to run setup_context under a vmap?] + restore_vmap( + inner, + (in_dims, out_dims), + batch_size, + randomness, + )(inputs, outputs) + + nonlocal input_shapes + input_shapes = input_shapes_ + nonlocal saved_tensors_bdims + saved_tensors_bdims = saved_tensors_bdims_ + + def jvp(ctx, *tangents): + assert out_dims != init_val + assert saved_tensors_bdims != init_val + + def jvp_no_context(saved_tensors, tangents): + wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors) + return autograd_function.jvp(wrapped_ctx, *tangents) + + tangent_in_dims = get_tangents_in_dims(in_dims, tangents) + out_tangents, out_tangents_dims = restore_vmap( + jvp_no_context, (saved_tensors_bdims, tangent_in_dims), batch_size, randomness)( + ctx.saved_tensors, tangents) + + result = reductify(out_tangents, out_tangents_dims, out_dims, batch_size) + return result + + def backward(ctx, *grad_outputs): + assert out_dims != init_val + assert input_shapes != init_val + assert saved_tensors_bdims != init_val + + def backward_no_context(inputs): + saved_tensors, grad_outputs = inputs + wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors) + return autograd_function.backward(wrapped_ctx, *grad_outputs) + + grad_ins, grad_ins_dims = restore_vmap( + backward_no_context, ((saved_tensors_bdims, out_dims),), batch_size, randomness)( + (ctx.saved_tensors, grad_outputs)) + result = reductify(grad_ins, grad_ins_dims, in_dims, batch_size, input_shapes) + return result + + name = f'Vmapped{autograd_function.__name__}' + Generated = type( + name, + (torch.autograd.Function,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + 'jvp': staticmethod(jvp), + 'setup_context': staticmethod(setup_context), + 'generate_vmap_rule': True + } + ) + + def get_out_dims(): + assert out_dims != init_val + return out_dims + + return Generated, get_out_dims + + +# tangents might be None, so we need to replace +# the corresponding in_dims with None. +def get_tangents_in_dims(input_dims, tangents): + flat_in_dims, spec = pytree.tree_flatten(input_dims) + flat_tangents = pytree.arg_tree_leaves(*tangents) + result = [None if tangent is None else in_dim + for in_dim, tangent in zip(flat_in_dims, flat_tangents)] + return pytree.tree_unflatten(result, spec) + + +# NOTE: [Why do we need to run setup_context under a vmap?] +# Consider the following autograd.Function +# +# class Sum(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return x.sum() +# @staticmethod +# def setup_context(ctx, inputs, outputs): +# ctx.x_shape = inputs[0] +# @staticmethod +# def backward(ctx, gy): +# return gy.expand(ctx.x_shape) +# +# x = torch.randn(B, 4) +# in_dims = 0 +# vmap(Sum.apply, in_dims)(x) +# +# Let’s assume for a moment that we didn’t vmap setup_context in VmappedSum: +# +# class VmappedSum(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return vmap(Sum.forward, in_dims)(x) +# +# @staticmethod +# def setup_context(ctx, inputs, outputs): +# Sum.setup_context(ctx, inputs, outputs) +# +# @staticmethod +# def backward(ctx, gy): +# def backward_no_context(gy): +# return gy.expand(ctx.x_shape) +# +# dims = (0,) +# gx = vmap(backward_no_context, dims)(gy) +# return gx +# +# We end up saving [B, 4] as x_shape. In the backward, gy has shape [B], +# and we’re doing: +# +# def backward_no_context(gy): +# return gy.expand([B, 4]) +# +# gx = vmap(backward_no_context, dims)(gy: "Tensor[B]") +# +# This gives us the wrong result (gx has shape [B, B, 4], but it should +# have shape [4]). Performing vmap over setup_context means the shape +# saved has shape [4] and leads to a correct result shape for gx. + +# Wraps a ctx object. Forwards all attr accesses to the underlying object +# except for the attrs in _pt_attrs +class WrappedCtx: + _pt_reserved_attrs: Tuple[str, ...] = ('_pt_reserved_attrs', '_pt_inner_ctx') + + def __init__(self, ctx): + if not isinstance(ctx, WrappedCtx): + reserved_attrs = type(self)._pt_reserved_attrs + for name in reserved_attrs: + if not hasattr(ctx, name): + continue + raise RuntimeError( + f'PyTorch reserves the {reserved_attrs} field on ctx. ' + 'Please name your fields on ctx something else to avoid name ' + 'collision.') + self._pt_inner_ctx = ctx + + def __getattr__(self, name): + return getattr(self._pt_inner_ctx, name) + + def __setattr__(self, name, value): + if name in type(self)._pt_reserved_attrs: + self.__dict__[name] = value + return + return setattr(self._pt_inner_ctx, name, value) + +# Wraps ctx to create a new ctx object that overrides saved_tensors. +class CtxWithSavedTensors(WrappedCtx): + _pt_reserved_attrs = ('_pt_new_saved_tensors', *WrappedCtx._pt_reserved_attrs) + + def __init__(self, ctx, new_saved_tensors): + super().__init__(ctx) + self._pt_new_saved_tensors = new_saved_tensors + + @property + def saved_tensors(self): + return self._pt_new_saved_tensors + +class CtxCustomSave(WrappedCtx): + _pt_reserved_attrs = ('_pt_saved_tensors_bdims', '_pt_current_level', + *WrappedCtx._pt_reserved_attrs) + + def __init__(self, ctx, current_level): + super().__init__(ctx) + self._pt_saved_tensors_bdims = () + self._pt_current_level = current_level + + def save_for_backward(self, *tensors): + unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) + self._pt_inner_ctx.save_for_backward(*unwrapped_tensors) + self._pt_saved_tensors_bdims = bdims + + def save_for_forward(self, *tensors): + unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) + self._pt_inner_ctx.save_for_forward(*unwrapped_tensors) + self._pt_saved_tensors_bdims = bdims + + +def reductify(grad_input, grad_input_bdim, input_bdim, batch_size, + target_shape_without_bdim_to_reduce_to=None): + if not isinstance(grad_input, tuple): + grad_input = (grad_input,) + if not isinstance(grad_input_bdim, tuple): + grad_input_bdim = (grad_input_bdim,) + if not isinstance(input_bdim, tuple): + input_bdim = (input_bdim,) + + if target_shape_without_bdim_to_reduce_to is None: + target_shape_without_bdim_to_reduce_to = len(grad_input) * (None,) + result = tuple( + reductify_leaf(gi, gi_bdim, i_bdim, batch_size, maybe_ishape) + for gi, gi_bdim, i_bdim, maybe_ishape in + zip(grad_input, grad_input_bdim, input_bdim, target_shape_without_bdim_to_reduce_to) + ) + return result + + +def reductify_leaf(grad_input, grad_input_bdim, input_bdim, batch_size, + target_shape_without_bdim_to_reduce_to=None): + if grad_input is None: + return None + + if grad_input_bdim is None and input_bdim is None: + return grad_input + + if grad_input_bdim is not None and input_bdim is None: + return grad_input.sum(grad_input_bdim) + + # NOTE: [Why can't we rely on autograd to reduce expanded gradients?] + # For reverse-mode AD, + # given a grad_input and input, it is valid for the user to return a + # grad_input that has a broadcasted shape when compared to the input. + # In this situation, autograd automatically reduces the grad_input to + # the shape of the input. + # + # However, when input_bdim is not None, we have problems. + # + # [example 1] + # grad_input: Tensor[3, 4], input: Tensor[B, 4] + # We can expand grad_input to Tensor[B, 3, 4], but that isn't broadcastable + # from [B, 4]. + # + # [example 2] + # grad_input: Tensor[3, B, 4], input: Tensor[B, 4] + # We can swizzle grad_input to Tensor[B, 3, 4], but that isn't broadcastable + # from [B, 4]. + # + # This means that we need to also reduce the grad_input to the shape of the + # input. This behavior is controlled by the `target_shape_without_bdim_to_reduce_to` flag; + # if not-None then we do the reducing manually, otherwise, we do not do a reduction. + assert input_bdim is not None + + if grad_input_bdim is None: + grad_input = grad_input.unsqueeze(input_bdim) + new_shape = list(grad_input.shape) + new_shape[input_bdim] = batch_size + grad_input = grad_input.expand(new_shape) + grad_input_bdim = input_bdim + + if target_shape_without_bdim_to_reduce_to is not None: + return vmap(torch.Tensor.sum_to_size, in_dims=(grad_input_bdim, None), out_dims=input_bdim)( + grad_input, target_shape_without_bdim_to_reduce_to) + + if input_bdim != grad_input_bdim: + grad_input = grad_input.movedim(grad_input_bdim, input_bdim) + return grad_input + + +class AutogradFunctionApply(HigherOrderOperator): + def __init__(self): + super().__init__("autograd_function_apply") + + def __call__(self, fwd, bwd, *fwd_args): + saved_values = None + + class ApplyTemplate(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + nonlocal saved_values + output, saved_values = fwd(None, *args) + return output + + @staticmethod + def backward(ctx, *grad): + return bwd(None, *grad, *saved_values) + + return ApplyTemplate.apply(*fwd_args) + + +autograd_function_apply = AutogradFunctionApply() diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py b/venv/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py new file mode 100644 index 0000000000000000000000000000000000000000..29715581837e19fab7c2ab511abf289ec35ef662 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py @@ -0,0 +1,24 @@ +import torch.nn as nn +from torch._functorch.utils import exposed_in + + +def batch_norm_without_running_stats(module: nn.Module): + if isinstance(module, nn.modules.batchnorm._BatchNorm) and module.track_running_stats: + module.running_mean = None + module.running_var = None + module.num_batches_tracked = None + module.track_running_stats = False + + +@exposed_in("torch.func") +def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module: + """ + In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and + setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root` + """ + # base case + batch_norm_without_running_stats(root) + + for obj in root.modules(): + batch_norm_without_running_stats(obj) + return root diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d77ae7aae84d53bf388d4f702b2576c2b2a503e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py @@ -0,0 +1,195 @@ +# mypy: ignore-errors + +import contextlib +import time +import os +import json + +import torch +from torch.profiler import profile, ProfilerActivity + + +def synchronize(): + pass + + +def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1, + devices=None, kwargs_for_f=None, kwargs_for_profiler=None): + """ + Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx] + [num_runs] times to [trace_filename]. + + [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA. + Return total runtime without the profiler + + Outputs to trace_filename + """ + + if devices is None: + devices = ["cuda"] + + global synchronize + if devices != ["cpu"] and torch.cuda.is_available(): + synchronize = torch.cuda.synchronize + + if kwargs_for_f is None: + kwargs_for_f = {} + if kwargs_for_profiler is None: + kwargs_for_profiler = {} + + with optimize_ctx: + torch.manual_seed(1337) + for _ in range(5): # warmup runs + f(input, **kwargs_for_f) + synchronize() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(num_runs): + f(input, **kwargs_for_f) + synchronize() + t1 = time.perf_counter() + timing = t1 - t0 + + with profile(activities=activities, **kwargs_for_profiler) as prof: + with optimize_ctx: + synchronize() + torch.manual_seed(1337) + for _ in range(num_runs): + f(input, **kwargs_for_f) + synchronize() + prof.export_chrome_trace(trace_filename) + + return timing + + +def get_chrome_trace_events(filename): + f = open(filename) + data = json.load(f) + events = data["traceEvents"] + return events + + +def is_gpu_compute_event(event): + global gpu_pids + return "pid" in event and event["pid"] in gpu_pids and "ph" in event and event["ph"] == "X" + + +def get_sorted_gpu_events(events): + sorted_gpu_events = [] + for event in events: + if not is_gpu_compute_event(event): + continue + sorted_gpu_events.append(event) + return sorted(sorted_gpu_events, key=lambda x: x["ts"]) + + +def get_duration(sorted_gpu_events): + if len(sorted_gpu_events) == 0: + return 0 + event = sorted_gpu_events[0] + current_end_time = event["ts"] + event["dur"] + total_duration = event["dur"] + for event in sorted_gpu_events[1:]: + start_time = max(event["ts"], current_end_time) + end_time = event["ts"] + event["dur"] + total_duration = total_duration + max(end_time - start_time, 0) + current_end_time = max(current_end_time, end_time) + return total_duration + + +def get_sorted_gpu_mm_conv_events(events): + def is_mm_conv_event(event): + return "name" in event and ("gemm" in event["name"] or "conv" in event["name"] + or "cutlass" in event["name"] or "wgrad" in event["name"]) + gpu_events = get_sorted_gpu_events(events) + sorted_events = [] + for event in gpu_events: + if not is_mm_conv_event(event): + continue + sorted_events.append(event) + return sorted_events + + +gpu_pids = [] + + +def compute_utilization(filename: str, total_length: float): + """ + Process the chrome traces outputs by the pytorch profiler to compute GPU Utilization + and percent of times spent on matmul and convolution + + Args: + filename(str): Name of chrome traces file produced by pytorch profiler + + total_length(float): total length of the process without profiler in second + + Return: + tuple: (GPU Utilization, percent of time spent on matmul and convolution) + """ + events = get_chrome_trace_events(filename) + + # get pids of GPU events + global gpu_pids + gpu_pids = [] + for event in events: + if "name" not in event: + continue + if event["name"] == 'process_labels' and "GPU" in event["args"]["labels"]: + gpu_pids.append(event["pid"]) + + total_length = total_length * 1e6 + sorted_gpu_events = get_sorted_gpu_events(events) + utilization = get_duration(sorted_gpu_events) / total_length + + sorted_gpu_mm_conv_events = get_sorted_gpu_mm_conv_events(events) + mm_conv_utilization = get_duration(sorted_gpu_mm_conv_events) / total_length + + return utilization, mm_conv_utilization + + +def benchmark_utilization(f, input, trace_folder, optimize_ctx=None, trace_file_name="tmp_chrome_trace", num_runs=1): + """ + Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of + running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times. + It will produce a chrome trace file in trace_folder/trace_file_name.json + + Example: + + ``` + def f(a): + return a.sum() + a = torch.rand(2**20, device="cuda") + utilization, mm_conv_utilization = benchmark_utilization(f, a, "tmp", trace_file_name = "tmp_chrome_trace") + ``` + + Args: + f: function to benchmark + + input: input to :attr:`f` + + trace_folder: name of the folder to store the chrome trace + + optimize_ctx: the context in which f will run + + trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace" + + num_runs: number of times to run f, excluding the warm-up runs, default to 1. + + Return: + tuple: (GPU Utilization, percent of time spent on matmul and convolution) + + """ + isExist = os.path.exists(trace_folder) + if not isExist: + os.makedirs(trace_folder) + print("create folder " + trace_folder) + + if optimize_ctx is None: + optimize_ctx = contextlib.nullcontext() + + chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + ".json") + total_length = dump_chrome_trace(f, input, chrome_trace_file_name, optimize_ctx, + [ProfilerActivity.CUDA], num_runs=num_runs, devices="cuda") + utilization, mm_conv_utilization = compute_utilization(chrome_trace_file_name, total_length) + + return utilization, mm_conv_utilization diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/compile_utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/compile_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f512a14ce232f0b398ba79732510a3b4110dba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/compile_utils.py @@ -0,0 +1,97 @@ +# mypy: ignore-errors + + +import torch +import torch.fx as fx +from torch.utils._pytree import tree_flatten +from torch.utils import _pytree as pytree + +aten = torch.ops.aten + + +def get_aten_target(node): + if hasattr(node.target, 'overloadpacket'): + return node.target.overloadpacket + return node.target + + +rand_ops = [aten.dropout, aten._fused_dropout, aten._standard_gamma, + aten.bernoulli, aten.multinomial, aten.native_dropout, + aten.normal, aten.poisson, aten.binomial, aten.rrelu, + aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm] + + +# return a new copy of torch.fx.graph.Graph with CSE applied to the input graph +def fx_graph_cse(fx_g: torch.fx.graph.Graph): + new_graph = fx.Graph() + env = {} # map from node in the old graph to node in the new graph + hash_env = {} # map from hash to a node in the new graph + token_map = {} # map from hash to token + for n in fx_g.nodes: + # The placeholder, output, and get_attr nodes are copied to the new graph without change + # do not CSE away random operations + if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in rand_ops: + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method' + # substitute args and kwargs members to their mapping in env if exists + # specs can be used to reconstruct nested list/dictionaries + def substitute(arg_list): + arg_list, spec = tree_flatten(arg_list) + for i in range(len(arg_list)): + v = arg_list[i] + if isinstance(v, torch.fx.node.Node) and v in env: + arg_list[i] = env[v] + if isinstance(v, (torch.SymBool, torch.SymInt, torch.SymFloat)): + arg_list[i] = v.node + return tuple(arg_list), spec + args, args_spec = substitute(n.args) + kwargs, kwargs_spec = substitute(n.kwargs) + + # each token corresponds to a unique node + # nodes with the same token can be substituted + token = {"target": n.target, "args": args, "args_spec": args_spec, + "kwargs": kwargs, "kwargs_spec": kwargs_spec} + + # hash substituted args to a number, do not hash specs because specs are not hashable + # We need to add type into hash to avoid situations like: + # hash((primals_2, 1.0)) == hash((primals_2, 1)) + hash_arg = hash((tuple((a, type(a)) for a in args), tuple((a, type(a)) for a in kwargs))) + hash_val = (n.target, hash_arg) + + # check if a node has a substitute and can be eliminated + hash_val_in_hash_env = hash_val in hash_env + if hash_val_in_hash_env and token_map[hash_val] == token: + env[n] = hash_env[hash_val] + continue + + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + if not hash_val_in_hash_env: + hash_env[hash_val] = new_node + token_map[hash_val] = token + + return new_graph + + +def strip_overloads(gm): + """ + Modifies the target of graph nodes in :attr:`gm` to strip overloads. + + Args: + gm(fx.GraphModule): The input Fx graph module to be modified + """ + for node in gm.graph.nodes: + if isinstance(node.target, torch._ops.OpOverload): + node.target = node.target.overloadpacket + gm.recompile() + + +def get_placeholders(graph): + return list(filter(lambda x: x.op == 'placeholder', graph.nodes)) + +def get_outputs(graph): + for node in graph.nodes: + if node.op == 'output': + return pytree.tree_leaves(node.args[0]) + raise AssertionError("No output node found") diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/compilers.py b/venv/lib/python3.10/site-packages/torch/_functorch/compilers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0117904e148323e2a7703cf309b214bccfaac4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/compilers.py @@ -0,0 +1,441 @@ +# mypy: ignore-errors + +import copy +import logging +import os +import pickle +import random +from contextlib import contextmanager +from functools import partial +from typing import Callable, Union +import sympy + +import torch +from torch import SymInt +import torch.fx as fx +import torch.nn as nn +from torch._decomp import get_decompositions +from torch.fx.experimental.symbolic_shapes import bind_symbols + +from .aot_autograd import aot_function, aot_module, make_boxed_compiler +from .compile_utils import strip_overloads +from .partitioners import ( + default_partition, + draw_graph, + min_cut_rematerialization_partition, +) +import torch.utils._pytree as pytree + + +log = logging.getLogger(__name__) + + +# These canonicalizations are needed here (and not decompositions), as the ops +# we're trying to canonicalize to CompositeImplicitAutograd. +def _canonicalize(fx_g): + for node in fx_g.graph.nodes: + if node.target == torch.ops.aten._to_copy: + node.target = torch.ops.aten.to + fx_g.recompile() + return fx_g + + +@contextmanager +def _disable_jit_autocast(): + old_jit_autocast_flag = torch._C._jit_set_autocast_mode(False) + try: + yield + finally: + torch._C._jit_set_autocast_mode(old_jit_autocast_flag) + + +@make_boxed_compiler +def ts_compile(fx_g: fx.GraphModule, inps) -> Callable: + """ + Compiles the :attr:`fx_g` with Torchscript compiler. + + .. warning:: + This API is experimental and likely to change. + + Args: + fx_g(fx.GraphModule): The input Fx graph module to be compiled. + + Returns: + Torch scripted model. + """ + + with _disable_jit_autocast(): + strip_overloads(fx_g) + + for node in fx_g.graph.nodes: + if ( + node.target == torch.ops.aten._to_copy + and len(node.args) == 1 + and len(node.kwargs) == 1 + and "dtype" in node.kwargs + ): + node.target = torch.ops.aten.to + + for node in fx_g.graph.nodes: + new_kwargs = {} + for k, v in node.kwargs.items(): + if isinstance(v, torch.device): + v = v.type + new_kwargs[k] = v + node.kwargs = new_kwargs + + fx_g.graph.lint() + + fx_g.recompile() + + f = torch.jit.script(fx_g) + + torch._C._jit_pass_remove_mutation(f.graph) + + f = torch.jit.freeze(f.eval()) + f = torch.jit.optimize_for_inference(f) + if not any(isinstance(t, torch._subclasses.FakeTensor) for t in inps): + f(*inps) + return f + + +def _draw_graph_compile(fx_g, _, name, clear_meta=True): + print(fx_g.code) + draw_graph(fx_g, name, clear_meta=clear_meta) + return fx_g + + +def draw_graph_compile(name): + return make_boxed_compiler( + partial(_draw_graph_compile, name=name) + ) + + +@make_boxed_compiler +def nop(fx_g: fx.GraphModule, _) -> Callable: + """ + Returns the :attr:`fx_g` Fx graph module as it is. This is a no-op compiler + and can be used to check accuracy. + + .. warning:: + This API is experimental and likely to change. + + """ + return fx_g + +class DebugInterpreter(fx.Interpreter): + def run(self, *args): + self.symbol_mapping = bind_symbols(self.module, *args) + super().run(*args) + + def run_node(self, n): + + def subst_symint(ni): + if not isinstance(ni, SymInt): + return ni + r = sympy.expand(ni.node.expr.xreplace(self.symbol_mapping)) + assert r.is_number, r + return int(r) + + def subst_symint_tuple(nis): + return tuple(subst_symint(ni) for ni in nis) + + def check_significant_strides(a, b): + if subst_symint(a.numel()) > 0: + for idx in range(a.ndim): + if subst_symint(a.stride(idx)) != b.stride(idx) and subst_symint(a.size(idx)) > 1: + return False + return True + + def check(nv, rv, desc): + assert callable(desc) + assert nv.dtype == rv.dtype, f"{desc()}: {nv.dtype} != {rv.dtype}" + assert subst_symint_tuple(nv.size()) == rv.size(), \ + f"{desc()}: {nv.size()} aka {subst_symint_tuple(nv.size())} != {rv.size()}" + same_strides = check_significant_strides(nv, rv) + assert same_strides, f"{desc()}: {nv.stride()} aka {subst_symint_tuple(nv.stride())} != {rv.stride()}" + + r = super().run_node(n) + if 'val' in n.meta: + n_vals, n_spec = pytree.tree_flatten(n.meta['val']) + r_vals, r_spec = pytree.tree_flatten(r) + # TODO: There is some sort of problem where we record that an + # operator returned a tuple/list, and then later it turns out the + # real version of the operator returned a list/tuple. Need to + # figure out what's actually going on here, the error itself is + # harmless enough as we only getitem out the outputs. + # assert n_spec == r_spec, f"{n_spec} != {r_spec}" + assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}" + for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals): + if not isinstance(rv, torch.Tensor): + continue + check(nv, rv, lambda: f"output {i} where {self.symbol_mapping}") + return r + + +@make_boxed_compiler +def debug_nop(fx_g: fx.GraphModule, _) -> Callable: + """ + Returns a (slow) interpreter over the FX graph module that also checks + various debugging properties (e.g., that tracing strides matched real + strides.) + """ + return DebugInterpreter(fx_g).run + +@make_boxed_compiler +def simple_ts_compile(fx_g, _): + strip_overloads(fx_g) + f = torch.jit.script(fx_g) + f = torch.jit.freeze(f.eval()) + return f + + +def nnc_jit(f): + return aot_function(f, simple_ts_compile) + + +aten = torch.ops.aten +default_decompositions = { + aten.detach, + aten.gelu_backward, + aten.leaky_relu_backward, + aten.sigmoid_backward, + aten.threshold_backward, + aten.hardtanh_backward, + aten.hardsigmoid_backward, + aten.hardswish_backward, + aten.tanh_backward, + aten.silu_backward, + aten.elu_backward, + aten.cudnn_batch_norm, + aten.cudnn_batch_norm_backward, + aten.masked_fill.Scalar, + aten.masked_fill.Tensor, + aten.elu, + aten.leaky_relu, + aten.hardtanh, + aten.hardswish, + aten.hardsigmoid, + aten.conj_physical, + aten.is_same_size, +} + +default_decompositions = get_decompositions(default_decompositions) + + +@make_boxed_compiler +def print_compile(fx_g, _): + print(fx_g.code) + return fx_g + + +def memory_efficient_fusion( + fn: Union[Callable, nn.Module], + **kwargs, +): + """ + Wrapper function over :func:`aot_function` and :func:`aot_module` to perform + memory efficient fusion. It uses the + :func:`min_cut_rematerialization_partition` partitioner to perform efficient + recomputation. It uses NVFuser to compile the generated forward and backward + graphs. + + .. warning:: + This API is experimental and likely to change. + + Args: + fn (Union[Callable, nn.Module]): A Python function or a ``nn.Module`` + that takes one ore more arguments. Must return one or more Tensors. + **kwargs: Any other overrides you want to make to the settings + + Returns: + Returns a ``Callable`` or ``nn.Module`` that retains the eager behavior + of the original :attr:`fn`, but whose forward and backward graphs have + gone through recomputation optimizations, and the graphs have been + compiled with nvfuser. + + """ + config = { + "fw_compiler": ts_compile, + "bw_compiler": ts_compile, + "partition_fn": min_cut_rematerialization_partition, + "decompositions": default_decompositions, + } + config.update(kwargs) + if isinstance(fn, torch.nn.Module): + return aot_module(fn, **config) + else: + return aot_function(fn, **config) + + +def debug_compile(fx_g, inps): + fx_g.to_folder("foo") + print( + f""" +############################################################## +# To minimize FX graph, copy and paste the below and run it # +############################################################## + +import torch +import torch.fx as fx +from functorch.compile import minifier, check_nvfuser_subprocess, check_nvfuser_correctness_subprocess + +inps = {[(i.shape, i.dtype) for i in inps]} +inps = [torch.ones(shape, dtype=dtype, device='cuda') for (shape, dtype) in inps] +from foo import FxModule +mod = FxModule().cuda() + +with torch.jit.fuser("fuser2"): + # check_nvfuser_subprocess can be replaced with check_nvfuser_correctness_subprocess + minifier(fx.symbolic_trace(mod), inps, check_nvfuser_subprocess) +""" + ) + from foo import FxModule + + FxModule().cuda()(*inps) + + return ts_compile(fx_g, inps) + + +graph_index = 0 + + +def get_inputs(input_data_path): + """ + Return a random input for the given inputs meta generated from _save_fx_default. + """ + inputs = [] + with (open(input_data_path, "rb")) as f: + inputs_meta = pickle.load(f) + inputs = [] + for meta in inputs_meta: + if len(meta) == 1: + type = meta + input = type(random.rand()) + else: + type, shape, stride, dtype, device = meta + if dtype in { + torch.int, + torch.int32, + torch.int64, + torch.bool, + torch.int, + torch.uint8, + int, + float, + }: + input = torch.randint(0, 1, shape, dtype=dtype, device=device) + else: + input = torch.rand(shape, dtype=dtype, device=device) + inputs.append(input) + return inputs + + +def _save_fx_default(current_name, folder_name, dump_example_input, gm, example_inputs): + """ + The forward, backward, and joint computation graph will be stored in + {folder_name}/{current_name}/{current_name}_forward_{graph_index}, + {folder_name}/{current_name}/{current_name}_backward_{graph_index}, and + {folder_name}/{current_name}/{current_name}_joint_{graph_index} respectively. + The input shape of the graphs will be stored in the .input files. + These files can be loaded with pickle, + and is a list of format (type, shape, stride, dtype, device). + In the case of type = int or float, it is just (type,). + For joint graph input, it is a nested list [[],[]] + where the two inner lists have the same format. + If dump_example_input is True, example_inputs will be stored in .pt file. + Since each function might produce multiple graphs, + the graph_index is used to distinguish difference graphs + """ + from functorch.compile import aot_module_simplified + + def get_input_meta(args): + input_meta = [] + if len(args) > 0 and isinstance(args[0], tuple): # joint input + input_meta += get_input_meta(args[0]) + input_meta += get_input_meta(args[1]) + return input_meta + for arg in args: + if type(arg) == int or type(arg) == float: + input_meta.append((type(arg),)) + else: + input_meta.append( + (type(arg), arg.shape, arg.stride(), arg.dtype, arg.device) + ) + return input_meta + + def graph_saver_helper(gm_to_save, args, type_name): + global graph_index + if len(gm_to_save.graph.nodes) == 0: + log.log( + logging.WARNING, + "No nodes in graph {%s}_{%s}_{%s}.", + current_name, + type_name, + graph_index, + ) + return + + gm = copy.deepcopy(gm_to_save) + gm.graph.set_codegen(torch.fx.graph.CodeGen()) # remove codegen + gm.recompile() + + input_meta = get_input_meta(args) + + os.makedirs(f"{folder_name}/{current_name}", exist_ok=True) + gm.to_folder( + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}" + ) + pickle.dump( + input_meta, + open( + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.input", # noqa: B950 + "wb", + ), + ) # noqa: E501 + if dump_example_input: + torch.save( + args, + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.pt", # noqa: B950 + ) # noqa: E501 + + def graph_saver_forward(gm, fw_args): + graph_saver_helper(gm, fw_args, "forward") + return gm + + def graph_saver_backward(gm, bw_args): + graph_saver_helper(gm, bw_args, "backward") + global graph_index + graph_index += 1 + return gm + + def graph_saver_joint(gm, joint_args): + graph_saver_helper(gm, joint_args, "joint") + return default_partition(gm, joint_args) + + return aot_module_simplified( + gm, + example_inputs, + fw_compiler=graph_saver_forward, + bw_compiler=graph_saver_backward, + partition_fn=graph_saver_joint, + decompositions=default_decompositions, + ) + + +# WARNING: This isn't tested anywhere!! +def graph_dumper_aot(current_name, folder_name, dump_example_input=False): + """ + Dump the forward, backward, and joint computation graph. + Example Usage: + save_fx_func = graph_dumper_aot(current_name, folder_name, dump_example_input = False) + optimize_ctx = torchdynamo.optimize( + save_fx_func + ) + with torch.enable_grad(): + with optimize_ctx: + result = forward_and_backward_pass(model, example_inputs) + """ + global graph_index + graph_index = 0 + return partial(_save_fx_default, current_name, folder_name, dump_example_input) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/config.py b/venv/lib/python3.10/site-packages/torch/_functorch/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9e084b04489c48e964a4b57ca9f25fbf7f1b058c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/config.py @@ -0,0 +1,48 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Global flags for aot autograd +""" +import os +import sys +from typing import TYPE_CHECKING + +# Converts torch rng ops to their functional philox rng equivalents. Note that +# we functionalize only CUDA rng ops today. +functionalize_rng_ops = False + +# can be useful for debugging if we are incorrectly creating meta fake tensors +fake_tensor_allow_meta = os.environ.get("FAKE_ALLOW_META", True) + +# Enables optional asserts in hotpath code to check for errors. If +# you are seeing weird accuracy problems, try turning this on. +# This is currently off by default as it will harm tracing time, +# but it is on by default for aot_eager. +debug_assert = False + +debug_partitioner = os.environ.get("AOT_PARTITIONER_DEBUG", False) + +static_weight_shapes = True + +# Applies CSE to the graph before partitioning +cse = True + +# Restricts the amount of computation AOTAutograd can do. +max_dist_from_bw = 3 + +# Enable aggressive_recomputation in the min-cut algorithm in partitioners to reduce +# memory usage with some penalty of performance. It allows more ops to be considered +# as recomputable except random ops and compute-intensive ops. +aggressive_recomputation = False + +if TYPE_CHECKING: + from torch.utils._config_typing import * # noqa: F401, F403 + +from torch.utils._config_module import install_config_module + +# adds patch, save_config, invalid config checks, etc +install_config_module(sys.modules[__name__]) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/deprecated.py b/venv/lib/python3.10/site-packages/torch/_functorch/deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..0272095147150874aa1012cad792305e71d322c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/deprecated.py @@ -0,0 +1,125 @@ +import torch._functorch.apis as apis +import torch._functorch.eager_transforms as _impl +import torch._functorch.make_functional as _nn_impl +from torch._functorch.vmap import in_dims_t, out_dims_t +from torch._functorch.eager_transforms import argnums_t +import torch.nn as nn +import textwrap +from typing import Any, Callable, Optional, Tuple, Union +import warnings + +""" +The APIs in this file are exposed as `functorch.*`. They are thin wrappers +around the torch.func.* APIs that have deprecation warnings -- we're trying +to move people to the torch.func.* equivalents. + +NB: We don't use *args, **kwargs in the signatures because that changes the +documentation. +""" + +def get_warning(api, new_api=None, replace_newlines=False): + if new_api is None: + new_api = f'torch.func.{api}' + warning = ( + f"We've integrated functorch into PyTorch. As the final step of the \n" + f"integration, functorch.{api} is deprecated as of PyTorch \n" + f"2.0 and will be deleted in a future version of PyTorch >= 2.3. \n" + f"Please use {new_api} instead; see the PyTorch 2.0 release notes \n" + f"and/or the torch.func migration guide for more details \n" + f"https://pytorch.org/docs/master/func.migrating.html" + ) + if replace_newlines: + warning = warning.replace("\n", "") + return warning + + +def warn_deprecated(api, new_api=None): + warning = get_warning(api, new_api, replace_newlines=True) + warnings.warn(warning, stacklevel=2) + + +def setup_docs(functorch_api, torch_func_api=None, new_api_name=None): + api_name = functorch_api.__name__ + if torch_func_api is None: + torch_func_api = getattr(_impl, api_name) + # See https://docs.python.org/3/using/cmdline.html#cmdoption-OO + if torch_func_api.__doc__ is None: + return + + warning = get_warning(api_name, new_api_name) + warning_note = "\n.. warning::\n\n" + textwrap.indent(warning, " ") + warning_note = textwrap.indent(warning_note, " ") + functorch_api.__doc__ = torch_func_api.__doc__ + warning_note + +def vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + *, + chunk_size=None) -> Callable: + warn_deprecated('vmap', 'torch.vmap') + return apis.vmap(func, in_dims, out_dims, randomness, chunk_size=chunk_size) + +def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + warn_deprecated('grad') + return apis.grad(func, argnums, has_aux) + +def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + warn_deprecated('grad_and_value') + return apis.grad_and_value(func, argnums, has_aux) + +def vjp(func: Callable, *primals, has_aux: bool = False): + warn_deprecated('vjp') + return _impl.vjp(func, *primals, has_aux=has_aux) + +def jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool = False, has_aux: bool = False): + warn_deprecated('jvp') + return _impl.jvp(func, primals, tangents, strict=strict, has_aux=has_aux) + +def jacrev(func: Callable, argnums: Union[int, Tuple[int]] = 0, *, has_aux=False, + chunk_size: Optional[int] = None, + _preallocate_and_copy=False): + warn_deprecated('jacrev') + return _impl.jacrev(func, argnums, has_aux=has_aux, chunk_size=chunk_size, + _preallocate_and_copy=_preallocate_and_copy) + +def jacfwd(func: Callable, argnums: argnums_t = 0, has_aux: bool = False, *, randomness: str = "error"): + warn_deprecated('jacfwd') + return _impl.jacfwd(func, argnums, has_aux, randomness=randomness) + +def hessian(func, argnums=0): + warn_deprecated('hessian') + return _impl.hessian(func, argnums=argnums) + +def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable: + warn_deprecated('functionalize') + return _impl.functionalize(func, remove=remove) + +def make_functional(model: nn.Module, disable_autograd_tracking: bool = False): + warn_deprecated('make_functional', 'torch.func.functional_call') + return _nn_impl.make_functional(model, disable_autograd_tracking) + +def make_functional_with_buffers(model: nn.Module, disable_autograd_tracking: bool = False): + warn_deprecated('make_functional_with_buffers', 'torch.func.functional_call') + return _nn_impl.make_functional_with_buffers(model, disable_autograd_tracking) + +def combine_state_for_ensemble(models): + warn_deprecated('combine_state_for_ensemble', 'torch.func.stack_module_state') + return _nn_impl.combine_state_for_ensemble(models) + +setup_docs(vmap, apis.vmap, 'torch.vmap') +setup_docs(grad, apis.grad) +setup_docs(grad_and_value, apis.grad_and_value) +setup_docs(vjp) +setup_docs(jvp) +setup_docs(jacrev) +setup_docs(jacfwd) +setup_docs(hessian) +setup_docs(functionalize) +setup_docs(make_functional, _nn_impl.make_functional, + 'torch.func.functional_call') +setup_docs(make_functional_with_buffers, _nn_impl.make_functional, + 'torch.func.functional_call') +setup_docs(combine_state_for_ensemble, _nn_impl.combine_state_for_ensemble, + 'torch.func.stack_module_state') diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py b/venv/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c7aad808b51a83f5dd0dca44770311fc2062266c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py @@ -0,0 +1,1640 @@ +# mypy: ignore-errors + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Callable, Union, Tuple, List, Any, Optional +import torch +from functools import partial, wraps +import contextlib +from torch.utils._pytree import ( + tree_flatten, + tree_unflatten, + tree_map, + tree_map_only, + tree_map_, + treespec_pprint, +) +from torch.utils import _pytree as pytree +from torch.fx.experimental import const_fold +from torch.fx.experimental.proxy_tensor import make_fx +import torch.autograd.forward_ad as fwAD +from torch._subclasses.functional_tensor import FunctionalTensor + +from .vmap import doesnt_support_saved_tensors_hooks, get_chunk_sizes +from .apis import vmap + +from torch._C._functorch import ( + _wrap_for_grad, + _unwrap_for_grad, + _grad_increment_nesting, + _grad_decrement_nesting, + _jvp_increment_nesting, + _jvp_decrement_nesting, + _wrap_functional_tensor, + _unwrap_functional_tensor, + _func_decrement_nesting, + _func_increment_nesting, + _assert_wrapped_functional, + _propagate_functional_input_mutation, + set_inplace_requires_grad_allowed, + get_inplace_requires_grad_allowed, +) +from torch._functorch.utils import exposed_in, argnums_t + + +def lazy_dynamo_disable(func): + import torch._dynamo + return torch._dynamo.disable(func) + +@contextlib.contextmanager +def enable_inplace_requires_grad(enabled): + prev_state = get_inplace_requires_grad_allowed() + set_inplace_requires_grad_allowed(enabled) + try: + yield + finally: + set_inplace_requires_grad_allowed(prev_state) + + +def _vjp_treespec_compare(primals_out, cotangents): + # Revert this once #116264 gets fixed + _, primals_out_spec = tree_flatten(primals_out) + _, cotangents_spec = tree_flatten(cotangents) + # Dynamo fails to trace operator.ne below. To bypass this limitation, this + # function is not inlined. + if primals_out_spec != cotangents_spec: + raise RuntimeError( + f'Expected pytree structure of cotangents to be the same ' + f'as pytree structure of outputs to the function. ' + f'cotangents: {treespec_pprint(cotangents_spec)}, ' + f'primal output: {treespec_pprint(primals_out_spec)}') + + +def _set_tensor_requires_grad(x): + # avoid graph-break on x.requires_grad_() + # https://github.com/pytorch/pytorch/pull/110053 + return x.requires_grad_() + +def _create_differentiable(inps, level=None): + def create_differentiable(x): + if isinstance(x, torch.Tensor): + with enable_inplace_requires_grad(True): + return _set_tensor_requires_grad(x) + raise ValueError(f'Thing passed to transform API must be Tensor, ' + f'got {type(x)}') + return tree_map(create_differentiable, inps) + + +def _undo_create_differentiable(inps, level=None): + def unwrap_tensors(x): + if isinstance(x, torch.Tensor): + return _unwrap_for_grad(x, level) + # TODO: Remove the following hack for namedtuples + if isinstance(x, tuple): + return tree_map(unwrap_tensors, tuple(x)) + + raise RuntimeError(f"Expected tensors, got unsupported type {type(x)}") + + return tree_map(unwrap_tensors, inps) + + +def _is_differentiable(maybe_tensor): + if not isinstance(maybe_tensor, torch.Tensor): + return False + return maybe_tensor.requires_grad + + +def _any_differentiable(tensor_or_tuple_of_tensors): + flat_args, _ = tree_unflatten(tensor_or_tuple_of_tensors) + return any(tuple(map(_is_differentiable, flat_args))) + + +def _wrap_tensor_for_grad(maybe_tensor, level): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + return _wrap_for_grad(maybe_tensor, level) + + +def _wrap_all_tensors(tensor_pytree, level): + return tree_map(partial(_wrap_tensor_for_grad, level=level), tensor_pytree) + + +def _as_tuple(val): + if isinstance(val, tuple): + return val + return (val,) + +# Version of autograd.grad that handles outputs that don't depend on inputs + + +def _autograd_grad(outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True): + if grad_outputs is None: + diff_outputs = tuple(out for out in outputs if out.requires_grad) + else: + result = tuple((out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad) + if len(result) == 0: + diff_outputs, grad_outputs = (), () + else: + diff_outputs, grad_outputs = zip(*result) + if len(diff_outputs) == 0: + return tuple(torch.zeros_like(inp) for inp in inputs) + grad_inputs = torch.autograd.grad(diff_outputs, inputs, grad_outputs, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=True) + grad_inputs = tuple(torch.zeros_like(inp) if gi is None else gi + for gi, inp in zip(grad_inputs, inputs)) + return grad_inputs + +# NOTE [grad and vjp interaction with no_grad] +# +# def f(x): +# with torch.no_grad(): +# c = x ** 2 +# return x - c +# +# The thing to consider is if enable_grad is on/off before grad gets called. +# +# Case 1: enable_grad is on. +# grad(f)(x) +# In this case, `grad` should respect the inner torch.no_grad. +# +# Case 2: enable_grad is off +# with torch.no_grad(): +# grad(f)(x) +# In this case, `grad` should respect the inner torch.no_grad, but not the +# outer one. This is because `grad` is a "function transform": its result +# should not depend on the result of a context manager outside of `f`. +# +# This gives us the following desired behavior: +# - (nested) grad transforms must obey torch.no_grad inside them +# - (nested) grad transforms should not obey torch.no_grad outside them +# +# To achieve this behavior, upon entering grad/vjp: +# - we save the current ("previous") is_grad_enabled (*) +# - we unconditionally enable grad. +# +# Inside DynamicLayerBackFallback, when we're temporarily popping `grad` layer +# off the stack: +# - if grad_mode is disabled, then we do nothing. (there is a torch.no_grad +# active, all subsequent grad transforms must obey it). +# - if grad_mode is enabled, and the previous is_grad_enabled (*) is False, +# then we temporarily restore the previous `is_grad_enabled`. This is +# because we're crossing the boundary from a `grad` outside the +# no_grad to a `grad` inside the no_grad. +# +# NB: vjp has some interesting behavior because the vjp's callable can be called +# under a different grad_mode than the forward computation... +# +# NB: forward-mode AD: forward-mode AD doesn't respect torch.no_grad, but +# it respects c10::AutoFwGradMode. We've implemented the same logic for +# our jvp transform (it will have special handling if FwGradMode is disabled). + + +# How do we increment and decrement the nesting? I don't think we can. +@exposed_in("torch.func") +def vjp(func: Callable, *primals, has_aux: bool = False): + """ + Standing for the vector-Jacobian product, returns a tuple containing the + results of ``func`` applied to ``primals`` and a function that, when + given ``cotangents``, computes the reverse-mode Jacobian of ``func`` with + respect to ``primals`` times ``cotangents``. + + Args: + func (Callable): A Python function that takes one or more arguments. Must + return one or more Tensors. + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. The returned function will also be computing the + derivative with respect to these arguments + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + other auxiliary objects that will not be differentiated. + Default: False. + + Returns: + Returns a ``(output, vjp_fn)`` tuple containing the output of ``func`` + applied to ``primals`` and a function that computes the vjp of + ``func`` with respect to all ``primals`` using the cotangents passed + to the returned function. If ``has_aux is True``, then instead returns a + ``(output, vjp_fn, aux)`` tuple. + The returned ``vjp_fn`` function will return a tuple of each VJP. + + When used in simple cases, :func:`vjp` behaves the same as :func:`grad` + + >>> x = torch.randn([5]) + >>> f = lambda x: x.sin().sum() + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> grad = vjpfunc(torch.tensor(1.))[0] + >>> assert torch.allclose(grad, torch.func.grad(f)(x)) + + However, :func:`vjp` can support functions with multiple outputs by + passing in the cotangents for each of the outputs + + >>> x = torch.randn([5]) + >>> f = lambda x: (x.sin(), x.cos()) + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> vjps = vjpfunc((torch.ones([5]), torch.ones([5]))) + >>> assert torch.allclose(vjps[0], x.cos() + -x.sin()) + + :func:`vjp` can even support outputs being Python structs + + >>> x = torch.randn([5]) + >>> f = lambda x: {'first': x.sin(), 'second': x.cos()} + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> cotangents = {'first': torch.ones([5]), 'second': torch.ones([5])} + >>> vjps = vjpfunc(cotangents) + >>> assert torch.allclose(vjps[0], x.cos() + -x.sin()) + + The function returned by :func:`vjp` will compute the partials with + respect to each of the ``primals`` + + >>> x, y = torch.randn([5, 4]), torch.randn([4, 5]) + >>> (_, vjpfunc) = torch.func.vjp(torch.matmul, x, y) + >>> cotangents = torch.randn([5, 5]) + >>> vjps = vjpfunc(cotangents) + >>> assert len(vjps) == 2 + >>> assert torch.allclose(vjps[0], torch.matmul(cotangents, y.transpose(0, 1))) + >>> assert torch.allclose(vjps[1], torch.matmul(x.transpose(0, 1), cotangents)) + + ``primals`` are the positional arguments for ``f``. All kwargs use their + default value + + >>> x = torch.randn([5]) + >>> def f(x, scale=4.): + >>> return x * scale + >>> + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> vjps = vjpfunc(torch.ones_like(x)) + >>> assert torch.allclose(vjps[0], torch.full(x.shape, 4.)) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``vjp``. + Case 1: Using ``torch.no_grad`` inside a function: + + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``vjp(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager: + + >>> # xdoctest: +SKIP(failing) + >>> with torch.no_grad(): + >>> vjp(f)(x) + + In this case, ``vjp`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``vjp`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + """ + return _vjp_with_argnums(func, *primals, has_aux=has_aux) + + +@contextlib.contextmanager +def grad_increment_nesting(): + try: + grad_level = _grad_increment_nesting() + yield grad_level + finally: + _grad_decrement_nesting() + + +@doesnt_support_saved_tensors_hooks +def _vjp_with_argnums(func: Callable, *primals, argnums: Optional[argnums_t] = None, has_aux: bool = False): + # This is the same function as vjp but also accepts an argnums argument + # All args are the same as vjp except for the added argument + # argnums (Optional[int or tuple[int]]): Optional, specifies the argument(s) to compute gradients with respect to. + # If None, computes the gradients with respect to all inputs (used for vjp). Default: None + # + # WARN: Users should NOT call this function directly and should just be calling vjp. + # It is only separated so that inputs passed to jacrev but not differentiated get the correct wrappers. + # + # NOTE: All error messages are produced as if vjp was being called, even if this was called by jacrev + # + # Returns the same two elements as :func:`vjp` but the function returned, vjp_fn, returns a tuple of VJPs + # for only the primal elements given by argnums. + with grad_increment_nesting() as level: + # See NOTE [grad and vjp interaction with no_grad] + with torch.enable_grad(): + primals = _wrap_all_tensors(primals, level) + # Note for the reviewer: This is extremely odd but it passes the + # assertion "len(self.block_stack) == 1" on symbolic_convert.py + # The equivalent "if argnums is None" fails for some reason + if not isinstance(argnums, int) and not argnums: + diff_primals = _create_differentiable(primals, level) + else: + diff_primals = _slice_argnums(primals, argnums, as_tuple=False) + tree_map_(partial(_create_differentiable, level=level), diff_primals) + primals_out = func(*primals) + + if has_aux: + if not (isinstance(primals_out, tuple) and len(primals_out) == 2): + raise RuntimeError( + "vjp(f, *primals): output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + primals_out, aux = primals_out + aux = _undo_create_differentiable(aux, level) + + flat_primals_out, primals_out_spec = tree_flatten(primals_out) + assert_non_empty_tensor_output(flat_primals_out, 'vjp(f, *primals)') + flat_diff_primals, primals_spec = tree_flatten(diff_primals) + results = _undo_create_differentiable(primals_out, level) + + for primal_out in flat_primals_out: + assert isinstance(primal_out, torch.Tensor) + if primal_out.is_floating_point() or primal_out.is_complex(): + continue + raise RuntimeError("vjp(f, ...): All outputs of f must be " + "floating-point or complex Tensors, got Tensor " + f"with dtype {primal_out.dtype}") + + def wrapper(cotangents, retain_graph=True, create_graph=None): + if create_graph is None: + create_graph = torch.is_grad_enabled() + flat_cotangents, cotangents_spec = tree_flatten(cotangents) + _vjp_treespec_compare(primals_out, cotangents) + result = _autograd_grad(flat_primals_out, flat_diff_primals, flat_cotangents, + retain_graph=retain_graph, create_graph=create_graph) + return tree_unflatten(result, primals_spec) + + if has_aux: + return results, wrapper, aux + else: + return results, wrapper + + +def _safe_zero_index(x): + assert len(x) == 1 + return x[0] + +# jacrev and jacfwd don't support complex functions +# Helper function to throw appropriate error. +def error_if_complex(func_name, args, is_input): + flat_args = pytree.tree_leaves(args) + for idx, arg in enumerate(flat_args): + if isinstance(arg, torch.Tensor) and arg.dtype.is_complex: + input_or_output = ("inputs" if is_input else "outputs") + err_msg = (f"{func_name}: Expected all {input_or_output} " + f"to be real but received complex tensor at flattened input idx: {idx}") + raise RuntimeError(err_msg) + +@exposed_in("torch.func") +def jacrev(func: Callable, argnums: Union[int, Tuple[int]] = 0, *, has_aux=False, + chunk_size: Optional[int] = None, + _preallocate_and_copy=False): + """ + Computes the Jacobian of ``func`` with respect to the arg(s) at index + ``argnum`` using reverse mode autodiff + + .. note:: + Using :attr:`chunk_size=1` is equivalent to computing the jacobian + row-by-row with a for-loop i.e. the constraints of :func:`vmap` are + not applicable. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Jacobian with respect to. + Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + auxiliary objects that will not be differentiated. + Default: False. + chunk_size (None or int): If None (default), use the maximum chunk size + (equivalent to doing a single vmap over vjp to compute the jacobian). + If 1, then compute the jacobian row-by-row with a for-loop. + If not None, then compute the jacobian :attr:`chunk_size` rows at a time + (equivalent to doing multiple vmap over vjp). If you run into memory issues computing + the jacobian, please try to specify a non-None chunk_size. + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Jacobian of ``func`` with respect to the arg(s) at + ``argnums``. If ``has_aux is True``, then the returned function + instead returns a ``(jacobian, aux)`` tuple where ``jacobian`` + is the Jacobian and ``aux`` is auxiliary objects returned by ``func``. + + A basic usage with a pointwise, unary operation will give a diagonal array + as the Jacobian + + >>> from torch.func import jacrev + >>> x = torch.randn(5) + >>> jacobian = jacrev(torch.sin)(x) + >>> expected = torch.diag(torch.cos(x)) + >>> assert torch.allclose(jacobian, expected) + + If you would like to compute the output of the function as well as the + jacobian of the function, use the ``has_aux`` flag to return the output + as an auxiliary object: + + >>> from torch.func import jacrev + >>> x = torch.randn(5) + >>> + >>> def f(x): + >>> return x.sin() + >>> + >>> def g(x): + >>> result = f(x) + >>> return result, result + >>> + >>> jacobian_f, f_x = jacrev(g, has_aux=True)(x) + >>> assert torch.allclose(f_x, f(x)) + + :func:`jacrev` can be composed with vmap to produce batched + Jacobians: + + >>> from torch.func import jacrev, vmap + >>> x = torch.randn(64, 5) + >>> jacobian = vmap(jacrev(torch.sin))(x) + >>> assert jacobian.shape == (64, 5, 5) + + Additionally, :func:`jacrev` can be composed with itself to produce + Hessians + + >>> from torch.func import jacrev + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hessian = jacrev(jacrev(f))(x) + >>> assert torch.allclose(hessian, torch.diag(-x.sin())) + + By default, :func:`jacrev` computes the Jacobian with respect to the first + input. However, it can compute the Jacboian with respect to a different + argument by using ``argnums``: + + >>> from torch.func import jacrev + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacrev(f, argnums=1)(x, y) + >>> expected = torch.diag(2 * y) + >>> assert torch.allclose(jacobian, expected) + + Additionally, passing a tuple to ``argnums`` will compute the Jacobian + with respect to multiple arguments + + >>> from torch.func import jacrev + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacrev(f, argnums=(0, 1))(x, y) + >>> expectedX = torch.diag(torch.ones_like(x)) + >>> expectedY = torch.diag(2 * y) + >>> assert torch.allclose(jacobian[0], expectedX) + >>> assert torch.allclose(jacobian[1], expectedY) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``jacrev``. + Case 1: Using ``torch.no_grad`` inside a function: + + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``jacrev(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``jacrev`` inside ``torch.no_grad`` context manager: + + >>> with torch.no_grad(): + >>> jacrev(f)(x) + + In this case, ``jacrev`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``jacrev`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + """ + if not (chunk_size is None or chunk_size > 0): + raise ValueError("jacrev: `chunk_size` should be greater than 0.") + + @wraps(func) + def wrapper_fn(*args): + error_if_complex("jacrev", args, is_input=True) + vjp_out = _vjp_with_argnums(func, *args, argnums=argnums, has_aux=has_aux) + if has_aux: + output, vjp_fn, aux = vjp_out + else: + output, vjp_fn = vjp_out + + # See NOTE: [Computing jacobian with vmap and vjp for multiple outputs] + flat_output, output_spec = tree_flatten(output) + + error_if_complex("jacrev", flat_output, is_input=False) + + # NB: vjp already checks that all outputs are tensors + # Step 1: Construct grad_outputs by splitting the standard basis + flat_output_numels = tuple(out.numel() for out in flat_output) + + primals = _slice_argnums(args, argnums) + flat_primals, primals_spec = tree_flatten(primals) + + def compute_jacobian_stacked(): + # Helper function to compute chunked Jacobian + # The intermediate chunked calculation are only + # scoped at this function level. + chunked_results = [] + for flat_basis_chunk in _chunked_standard_basis_for_(flat_output, + flat_output_numels, + chunk_size=chunk_size): + if chunk_size == 1: + # sanity check. + for t in flat_basis_chunk: + assert t.size(0) == 1 + + flat_basis_chunk = tree_map(lambda t: torch.squeeze(t, 0), flat_basis_chunk) + + basis = tree_unflatten(flat_basis_chunk, output_spec) + + if chunk_size == 1: + # Behaviour with `chunk_size=1` is same as `for-loop` + # i.e. user shouldn't deal with the limitations of vmap. + chunked_result = vjp_fn(basis) + else: # chunk_size is None or chunk_size != 1 + chunked_result = vmap(vjp_fn)(basis) + + flat_results = pytree.tree_leaves(chunked_result) + + if chunk_size == 1: + flat_results = tree_map(lambda t: torch.unsqueeze(t, 0), flat_results) + + chunked_results.append(flat_results) + + if len(chunked_results) == 1: + # Short-circuit if we used a single chunk + return chunked_results[0] + + # Concatenate chunks. + flat_results = [] + # Iterate and concat the jacobians of different + # inputs. + for idx in range(len(flat_primals)): + r = tuple(r_[idx] for r_ in chunked_results) + flat_results.append(torch.cat(r, 0)) + + return flat_results + + def compute_jacobian_preallocate_and_copy(): + # Helper function to compute chunked Jacobian + # The intermediate chunked calculation are only + # scoped at this function level. + out_vec_size = sum(flat_output_numels) + + # Don't pre-allocate if we have a single chunk. + if not (chunk_size is None or chunk_size >= out_vec_size): + stacked_results = [primal.new_zeros(out_vec_size, *primal.shape) for primal in flat_primals] + + for idx, flat_basis_chunk in enumerate(_chunked_standard_basis_for_(flat_output, + flat_output_numels, + chunk_size=chunk_size)): + if chunk_size == 1: + # sanity check. + for t in flat_basis_chunk: + assert t.size(0) == 1 + + flat_basis_chunk = [torch.squeeze(t, 0) for t in flat_basis_chunk] + + basis = tree_unflatten(flat_basis_chunk, output_spec) + + if chunk_size == 1: + # Behaviour with `chunk_size=1` is same as `for-loop` + # i.e. user shouldn't deal with the limitations of vmap. + chunked_result = vjp_fn(basis) + else: # chunk_size is None or chunk_size != 1 + chunked_result = vmap(vjp_fn)(basis) + + flat_results = pytree.tree_leaves(chunked_result) + + # Short-circuit if we have a single chunk. + if chunk_size is None or chunk_size >= out_vec_size: + if chunk_size == 1: # and out_vec_size == 1 + # Since we squeezed the output dim + flat_results = tree_map(lambda t: torch.unsqueeze(t, 0), flat_results) + return flat_results + + for r, sr in zip(flat_results, stacked_results): + sr[idx * chunk_size: (idx + 1) * chunk_size].copy_(r) + + return stacked_results + + if _preallocate_and_copy: + flat_jacobians_per_input = compute_jacobian_preallocate_and_copy() + else: + flat_jacobians_per_input = compute_jacobian_stacked() + + # Step 2: The returned jacobian is one big tensor per input. In this step, + # we split each Tensor by output. + flat_jacobians_per_input = [result.split(flat_output_numels, dim=0) for result in flat_jacobians_per_input] + flat_input_flat_output = [ + tuple(split.view(out.shape + primal.shape) + for split, out in zip(splits, flat_output)) + for splits, primal in zip(flat_jacobians_per_input, flat_primals) + ] + + # Step 3: Right now, `jacobian` is a List[List[Tensor]]. + # The outer List corresponds to the number of primals, + # the inner List corresponds to the number of outputs. + # We need to: + # a. Exchange the order of the outer List and inner List + # b. tree_unflatten the inner Lists (which correspond to the primals) + # c. handle the argnums=int case + # d. tree_unflatten the outer List (which corresponds to the outputs) + flat_output_flat_input = tuple(zip(*flat_input_flat_output)) + + flat_output_input = tuple(tree_unflatten(flat_input, primals_spec) + for flat_input in flat_output_flat_input) + + if isinstance(argnums, int): + flat_output_input = tuple(_safe_zero_index(flat_input) + for flat_input in flat_output_input) + output_input = tree_unflatten(flat_output_input, output_spec) + if has_aux: + return output_input, aux + return output_input + return wrapper_fn + +# NOTE: [Computing jacobian with vmap and vjp for multiple outputs] +# +# Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3). +# It turns out we can compute the jacobian of this function with a single +# call to autograd.grad by using vmap over the correct grad_outputs. +# +# Firstly, one way to compute the jacobian is to stack x**2 and x.sum() +# into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()]) +# +# To get the first row of the jacobian, we call +# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0])) +# To get the 2nd row of the jacobian, we call +# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0])) +# and so on. +# +# Using vmap, we can vectorize all 4 of these computations into one by +# passing the standard basis for R^4 as the grad_output. +# vmap(partial(autograd.grad, g(x), x))(torch.eye(4)). +# +# Now, how do we compute the jacobian *without stacking the output*? +# We can just split the standard basis across the outputs. So to +# compute the jacobian of f(x), we'd use +# >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...)) +# The grad_outputs looks like the following: +# ( torch.tensor([[1, 0, 0], +# [0, 1, 0], +# [0, 0, 1], +# [0, 0, 0]]), +# torch.tensor([[0], +# [0], +# [0], +# [1]]) ) +# +# But we're not done yet! +# >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...))) +# returns a Tensor of shape [4, 3]. We have to remember to split the +# jacobian of shape [4, 3] into two: +# - one of shape [3, 3] for the first output +# - one of shape [ 3] for the second output + + +def _chunked_standard_basis_for_(tensors, tensor_numels, chunk_size=None): + # This function: + # - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix. + # - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`. + # - Each chunk corresponds to one tensor. The chunk has the same dtype and + # device as the tensor + # + # For example, with tensor_numels = [1, 2, 1], this function returns: + # ( tensor([[1], tensor([[0, 0], tensor([[0], + # [0], [1, 0], [0], + # [0], [0, 1], [0], + # [0]]) , [0, 0]]) , [1]]) ) + # + # Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors) + # Precondition: tensors always has at least one element. + # + # See NOTE: [Computing jacobian with vmap and grad for multiple tensors] + # for context behind this function. + # NOTE: Argument `chunk_size` is used to generate chunked basis instead of + # one huge basis matrix. `chunk_size` dictates the maximum size of the + # basis matrix along dim=0. + assert len(tensors) == len(tensor_numels) + assert len(tensors) > 0 + assert chunk_size is None or chunk_size > 0 + total_numel = sum(tensor_numels) + if chunk_size and chunk_size < total_numel: + chunk_numels = get_chunk_sizes(total_numel, chunk_size) + else: # chunk_size is None or chunk_size >= total_numel + chunk_size = total_numel + chunk_numels = [total_numel] + + diag_start_indices = (0, *torch.tensor(tensor_numels).cumsum(dim=0)[:-1].neg().unbind()) + + for chunk_idx, total_numel in enumerate(chunk_numels): + chunks = tuple(tensor.new_zeros(total_numel, tensor_numel) + for tensor, tensor_numel in zip(tensors, tensor_numels)) + + for chunk, diag_start_idx in zip(chunks, diag_start_indices): + chunk.diagonal(diag_start_idx + chunk_idx * chunk_size).fill_(1) + chunks = tuple(chunk.view(total_numel, *tensor.shape) + for chunk, tensor in zip(chunks, tensors)) + yield chunks + +def _construct_standard_basis_for(tensors, tensor_numels): + for basis in _chunked_standard_basis_for_(tensors, tensor_numels, chunk_size=None): + return basis + + +def _validate_and_wrap_argnum(argnum, num_args): + if not isinstance(argnum, int): + raise RuntimeError(f'argnum must be int, got: {type(argnum)}') + if argnum >= 0 and argnum < num_args: + return argnum + if argnum < 0 and argnum >= -num_args: + return argnum + num_args + raise RuntimeError(f'Got argnum={argnum}, but only {num_args} positional inputs') + + +def _check_unique_non_empty(argnums): + if isinstance(argnums, tuple): + if len(argnums) == 0: + raise RuntimeError("argnums must be non-empty") + if len(set(argnums)) != len(argnums): + raise RuntimeError(f"argnums elements must be unique, got {argnums}") + + +def _replace_args(old_args, new_args, argnums): + if isinstance(argnums, int): + if len(new_args) != 1: + raise RuntimeError(f'new_args should be of size 1, was of size {len(new_args)}') + return tuple(new_args[0] if i == argnums else old_args[i] for i in range(len(old_args))) + if isinstance(argnums, tuple): + if len(new_args) != len(argnums): + raise RuntimeError( + "new_args should have the same size as argnums. " + f"Argnums size {len(argnums)}, new_args size {len(new_args)}") + + def get_right_elem(i): + return new_args[argnums.index(i)] if i in argnums else old_args[i] + + return tuple(get_right_elem(i) for i in range(len(old_args))) + raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}') + + +def _validate_and_wrap_argnums(argnums, num_args): + if isinstance(argnums, int): + return _validate_and_wrap_argnum(argnums, num_args) + if isinstance(argnums, tuple): + return tuple(_validate_and_wrap_argnum(argnum, num_args) for argnum in argnums) + raise AssertionError("Should never get here") + + +def _slice_argnums(args, argnums, as_tuple=True): + if not isinstance(argnums, int) and not isinstance(argnums, tuple): + raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}') + argnums = _validate_and_wrap_argnums(argnums, len(args)) + _check_unique_non_empty(argnums) + if isinstance(argnums, int): + if as_tuple: + return (args[argnums],) + else: + return args[argnums] + return tuple(args[i] for i in argnums) + + +JVP_NESTING = 0 + + +@contextlib.contextmanager +def noop(): + yield + + +def assert_flat_tuple_of_tensors(elts: Any, api: str, argname: str) -> None: + if not isinstance(elts, tuple): + raise RuntimeError( + f'{api}: Expected {argname} to be a tuple of Tensors, got {type(elts)}') + for elt in elts: + if isinstance(elt, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected {argname} to be a tuple of Tensors, got ' + f'a tuple with an element of type {type(elt)}') + if len(elts) == 0: + raise RuntimeError( + f'{api}: Expected {argname} to be a non-empty tuple of Tensors.') + + +def assert_non_empty_tensor_output(output: List[Any], api: str) -> None: + if (len(output) == 1 and output[0] is None) or len(output) < 1: + raise RuntimeError( + f'{api}: Expected f to be a function that has non-empty output (got output = {output})' + ) + for o in output: + if not isinstance(o, torch.Tensor): + raise RuntimeError( + f'{api}: expected f(*primals) to return only tensors' + f', got unsupported type {type(o)}' + ) + + +def assert_output_is_tensor_or_tensors(output: Any, api: str) -> None: + if isinstance(output, torch.Tensor): + return + if not isinstance(output, tuple): + raise RuntimeError( + f'{api}: Expected output of f to be a Tensor or Tensors, got ' + f'{type(output)}') + if len(output) == 0: + raise RuntimeError( + f'{api}: Expected output of f to be a non-empty tuple of Tensors.') + for out in output: + if isinstance(out, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected output of f to be a Tensor or Tensors, got ' + f'{type(out)} as an output') + + +def assert_non_empty_list_of_tensors(output: List[torch.Tensor], api: str, argname: str) -> None: + if len(output) == 0: + raise RuntimeError( + f'{api}: Expected {argname} to contain at least one Tensor.') + for out in output: + if isinstance(out, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected {argname} to only contain Tensors, got ' + f'{type(out)}') + + +jvp_str = 'jvp(f, primals, tangents)' + + +def safe_unpack_dual(dual, strict): + if not isinstance(dual, torch.Tensor): + raise RuntimeError( + f'{jvp_str}: expected f(*args) to return only tensors' + f', got unsupported type {type(dual)}' + ) + + primal, tangent = fwAD.unpack_dual(dual) + if tangent is None: + if strict: + raise RuntimeError( + 'jvp(f, primals, tangents, strict=True): ' + 'The output of f is independent of ' + 'the inputs. This is not allowed with strict=True.') + tangent = torch.zeros_like(primal) + return primal, tangent + + +@exposed_in("torch.func") +def jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool = False, has_aux: bool = False): + """ + Standing for the Jacobian-vector product, returns a tuple containing + the output of `func(*primals)` and the "Jacobian of ``func`` evaluated at + ``primals``" times ``tangents``. This is also known as forward-mode autodiff. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. The returned function will also be computing the + derivative with respect to these arguments + tangents (Tensors): The "vector" for which Jacobian-vector-product is + computed. Must be the same structure and sizes as the inputs to + ``func``. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + other auxiliary objects that will not be differentiated. + Default: False. + + Returns: + Returns a ``(output, jvp_out)`` tuple containing the output of ``func`` + evaluated at ``primals`` and the Jacobian-vector product. + If ``has_aux is True``, then instead returns a ``(output, jvp_out, aux)`` tuple. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + + jvp is useful when you wish to compute gradients of a function R^1 -> R^N + + >>> from torch.func import jvp + >>> x = torch.randn([]) + >>> f = lambda x: x * torch.tensor([1., 2., 3]) + >>> value, grad = jvp(f, (x,), (torch.tensor(1.),)) + >>> assert torch.allclose(value, f(x)) + >>> assert torch.allclose(grad, torch.tensor([1., 2, 3])) + + :func:`jvp` can support functions with multiple inputs by passing in the + tangents for each of the inputs + + >>> from torch.func import jvp + >>> x = torch.randn(5) + >>> y = torch.randn(5) + >>> f = lambda x, y: (x * y) + >>> _, output = jvp(f, (x, y), (torch.ones(5), torch.ones(5))) + >>> assert torch.allclose(output, x + y) + + """ + + return _jvp_with_argnums(func, primals, tangents, argnums=None, strict=strict, has_aux=has_aux) + + +@doesnt_support_saved_tensors_hooks +def _jvp_with_argnums(func: Callable, primals: Any, tangents: Any, argnums: Optional[argnums_t], *, + strict: bool = False, has_aux: bool): + # This is the same function as jvp but also accepts an argnums argument + # Most args are the same as jvp except for the added argument + # argnums (Optional[int or tuple[int]]): Optional, specifies the argument(s) to compute gradients with respect to. + # If None, computes the gradients with respect to all inputs (used for jvp). Default: None + # Because of this, tangents must be of length argnums and matches up to the corresponding primal whose index is + # given by argnums + # + # WARN: Users should NOT call this function directly and should just be calling jvp. + # It is only separated so that inputs passed to jacfwd but not differentiated get the correct wrappers. + # + # NOTE: All error messages are produced as if jvp was being called, even if this was called by jacfwd + # + # Returns the same two elements as :func:`jvp` but the returned tuple, ``jvp_out``, only has JVPs with respect to + # the primals given by argnums + if not isinstance(primals, tuple): + raise RuntimeError( + f'{jvp_str}: Expected primals to be a tuple. ' + f'E.g. it should be valid to call f(*primals).') + diff_args = primals if argnums is None else _slice_argnums(primals, argnums) + flat_primals, primals_spec = tree_flatten(diff_args) + flat_tangents, tangents_spec = tree_flatten(tangents) + if primals_spec != tangents_spec: + raise RuntimeError( + f'{jvp_str}: Expected primals and tangents to have the same python ' + f'structure. For example, if primals is a tuple of 3 tensors, ' + f'tangents also must be. Got primals with structure {primals_spec} ' + f'and tangents with structure {tangents_spec}') + assert_non_empty_list_of_tensors(flat_primals, jvp_str, 'primals') + assert_non_empty_list_of_tensors(flat_tangents, jvp_str, 'tangents') + + level = _jvp_increment_nesting() + try: + global JVP_NESTING + JVP_NESTING += 1 + with fwAD._set_fwd_grad_enabled(True): + ctx = fwAD.dual_level if JVP_NESTING == 1 else noop + with ctx(): + flat_duals = tuple(fwAD.make_dual(p, t) + for p, t in zip(flat_primals, flat_tangents)) + duals = tree_unflatten(flat_duals, primals_spec) + if argnums is not None: + primals = _wrap_all_tensors(primals, level) + duals = _replace_args(primals, duals, argnums) + result_duals = func(*duals) + if has_aux: + if not (isinstance(result_duals, tuple) and len(result_duals) == 2): + raise RuntimeError( + f"{jvp_str}: output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + result_duals, aux = result_duals + aux = _undo_create_differentiable(aux, level) + + result_duals, spec = tree_flatten(result_duals) + assert_non_empty_tensor_output(result_duals, jvp_str) + + primals_out, tangents_out = \ + zip(*[safe_unpack_dual(dual, strict) for dual in result_duals]) + primals_out = tree_map( + partial(_undo_create_differentiable, level=level), primals_out) + tangents_out = tree_map( + partial(_undo_create_differentiable, level=level), tangents_out) + + primals_out_unflatten = tree_unflatten(primals_out, spec) + tangents_out_unflatten = tree_unflatten(tangents_out, spec) + if has_aux: + return primals_out_unflatten, tangents_out_unflatten, aux + + return primals_out_unflatten, tangents_out_unflatten + finally: + _jvp_decrement_nesting() + JVP_NESTING -= 1 + + +def safe_unflatten(tensor, dim, shape): + if len(shape) == 0: + assert tensor.shape[dim] == 1 + return tensor.squeeze(dim) + return tensor.unflatten(dim, shape) + + +@exposed_in("torch.func") +def jacfwd(func: Callable, argnums: argnums_t = 0, has_aux: bool = False, *, randomness: str = "error"): + """ + Computes the Jacobian of ``func`` with respect to the arg(s) at index + ``argnum`` using forward-mode autodiff + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Jacobian with respect to. + Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + auxiliary objects that will not be differentiated. + Default: False. + randomness(str): Flag indicating what type of randomness to use. + See :func:`vmap` for more detail. Allowed: "different", "same", "error". + Default: "error" + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Jacobian of ``func`` with respect to the arg(s) at + ``argnums``. If ``has_aux is True``, then the returned function + instead returns a ``(jacobian, aux)`` tuple where ``jacobian`` + is the Jacobian and ``aux`` is auxiliary objects returned by ``func``. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + An alternative is to use :func:`jacrev`, which has better operator coverage. + + A basic usage with a pointwise, unary operation will give a diagonal array + as the Jacobian + + >>> from torch.func import jacfwd + >>> x = torch.randn(5) + >>> jacobian = jacfwd(torch.sin)(x) + >>> expected = torch.diag(torch.cos(x)) + >>> assert torch.allclose(jacobian, expected) + + :func:`jacfwd` can be composed with vmap to produce batched + Jacobians: + + >>> from torch.func import jacfwd, vmap + >>> x = torch.randn(64, 5) + >>> jacobian = vmap(jacfwd(torch.sin))(x) + >>> assert jacobian.shape == (64, 5, 5) + + If you would like to compute the output of the function as well as the + jacobian of the function, use the ``has_aux`` flag to return the output + as an auxiliary object: + + >>> from torch.func import jacfwd + >>> x = torch.randn(5) + >>> + >>> def f(x): + >>> return x.sin() + >>> + >>> def g(x): + >>> result = f(x) + >>> return result, result + >>> + >>> jacobian_f, f_x = jacfwd(g, has_aux=True)(x) + >>> assert torch.allclose(f_x, f(x)) + + Additionally, :func:`jacrev` can be composed with itself or :func:`jacrev` + to produce Hessians + + >>> from torch.func import jacfwd, jacrev + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hessian = jacfwd(jacrev(f))(x) + >>> assert torch.allclose(hessian, torch.diag(-x.sin())) + + By default, :func:`jacfwd` computes the Jacobian with respect to the first + input. However, it can compute the Jacboian with respect to a different + argument by using ``argnums``: + + >>> from torch.func import jacfwd + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacfwd(f, argnums=1)(x, y) + >>> expected = torch.diag(2 * y) + >>> assert torch.allclose(jacobian, expected) + + Additionally, passing a tuple to ``argnums`` will compute the Jacobian + with respect to multiple arguments + + >>> from torch.func import jacfwd + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacfwd(f, argnums=(0, 1))(x, y) + >>> expectedX = torch.diag(torch.ones_like(x)) + >>> expectedY = torch.diag(2 * y) + >>> assert torch.allclose(jacobian[0], expectedX) + >>> assert torch.allclose(jacobian[1], expectedY) + + """ + @wraps(func) + def wrapper_fn(*args): + error_if_complex("jacfwd", args, is_input=True) + primals = args if argnums is None else _slice_argnums(args, argnums) + flat_primals, primals_spec = tree_flatten(primals) + flat_primals_numels = tuple(p.numel() for p in flat_primals) + flat_basis = _construct_standard_basis_for(flat_primals, flat_primals_numels) + basis = tree_unflatten(flat_basis, primals_spec) + + def push_jvp(basis): + output = _jvp_with_argnums(func, args, basis, argnums=argnums, has_aux=has_aux) + # output[0] is the output of `func(*args)` + error_if_complex("jacfwd", output[0], is_input=False) + if has_aux: + _, jvp_out, aux = output + return jvp_out, aux + _, jvp_out = output + return jvp_out + + results = vmap(push_jvp, randomness=randomness)(basis) + if has_aux: + results, aux = results + # aux is in the standard basis format, e.g. NxN matrix + # We need to fetch the first element as original `func` output + flat_aux, aux_spec = tree_flatten(aux) + flat_aux = [value[0] for value in flat_aux] + aux = tree_unflatten(flat_aux, aux_spec) + + jac_outs, spec = tree_flatten(results) + # Most probably below output check can never raise an error + # as jvp should test the output before + # assert_non_empty_output(jac_outs, 'jacfwd(f, ...)(*args)') + + jac_outs_ins = tuple( + tuple( + safe_unflatten(jac_out_in, -1, primal.shape) + for primal, jac_out_in in + zip(flat_primals, jac_out.movedim(0, -1).split(flat_primals_numels, dim=-1)) + ) + for jac_out in jac_outs + ) + jac_outs_ins = tuple(tree_unflatten(jac_ins, primals_spec) for jac_ins in jac_outs_ins) + + if isinstance(argnums, int): + jac_outs_ins = tuple(jac_ins[0] for jac_ins in jac_outs_ins) + if has_aux: + return tree_unflatten(jac_outs_ins, spec), aux + return tree_unflatten(jac_outs_ins, spec) + return wrapper_fn + + +@exposed_in("torch.func") +def hessian(func, argnums=0): + """ + Computes the Hessian of ``func`` with respect to the arg(s) at index + ``argnum`` via a forward-over-reverse strategy. + + The forward-over-reverse strategy (composing ``jacfwd(jacrev(func))``) is + a good default for good performance. It is possible to compute Hessians + through other compositions of :func:`jacfwd` and :func:`jacrev` like + ``jacfwd(jacfwd(func))`` or ``jacrev(jacrev(func))``. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Hessian with respect to. + Default: 0. + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Hessian of ``func`` with respect to the arg(s) at + ``argnums``. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + An alternative is to use ``jacrev(jacrev(func))``, which has better + operator coverage. + + A basic usage with a R^N -> R^1 function gives a N x N Hessian: + + >>> from torch.func import hessian + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hess = hessian(f)(x) # equivalent to jacfwd(jacrev(f))(x) + >>> assert torch.allclose(hess, torch.diag(-x.sin())) + + """ + return jacfwd(jacrev(func, argnums), argnums) + + +@doesnt_support_saved_tensors_hooks +def grad_and_value_impl(func, argnums, has_aux, args, kwargs) -> Callable: + with grad_increment_nesting() as level: + output, aux, grad_input = None, None, None + # See NOTE [grad and vjp interaction with no_grad] + with torch.enable_grad(): + args = _wrap_all_tensors(args, level) + kwargs = _wrap_all_tensors(kwargs, level) + diff_args = _slice_argnums(args, argnums, as_tuple=False) + tree_map_(partial(_create_differentiable, level=level), diff_args) + + output = func(*args, **kwargs) + if has_aux: + if not (isinstance(output, tuple) and len(output) == 2): + raise RuntimeError( + "grad_and_value(f)(*args): output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + output, aux = output + + if not isinstance(output, torch.Tensor): + raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) ' + f'to return a Tensor, got {type(output)}') + if output.dim() != 0: + raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) ' + 'to return a scalar Tensor, got tensor with ' + f'{output.dim()} dims. Maybe you wanted to ' + 'use the vjp or jacrev APIs instead?') + + flat_diff_args, spec = tree_flatten(diff_args) + + # NB: need create_graph so that backward pass isn't run in no_grad mode + flat_outputs = _as_tuple(output) + flat_grad_input = _autograd_grad(flat_outputs, flat_diff_args, create_graph=True) + grad_input = tree_unflatten(flat_grad_input, spec) + + grad_input = _undo_create_differentiable(grad_input, level) + output = _undo_create_differentiable(output, level) + if has_aux: + aux = _undo_create_differentiable(aux, level) + + if has_aux: + return grad_input, (output, aux) + return grad_input, output + + +def grad_impl(func: Callable, argnums: argnums_t, has_aux: bool, args, kwargs): + results = grad_and_value_impl(func, argnums, has_aux, args, kwargs) + if has_aux: + grad, (_, aux) = results + return grad, aux + grad, _ = results + return grad + +def _maybe_wrap_functional_tensor(maybe_tensor, level, *, _python_functionalize: bool = False): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + wrapped = _wrap_functional_tensor(maybe_tensor, level) + _assert_wrapped_functional(maybe_tensor, wrapped) + if _python_functionalize: + out = FunctionalTensor(wrapped) + torch._mirror_autograd_meta_to(maybe_tensor, out) + return out + return wrapped + + +def _wrap_all_tensors_to_functional(tensor_pytree, level, *, _python_functionalize: bool = False): + return tree_map(partial(lambda x: _maybe_wrap_functional_tensor( + x, level, _python_functionalize=_python_functionalize)), tensor_pytree) + + +def _maybe_unwrap_functional_tensor(maybe_tensor, *, reapply_views: bool): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + if isinstance(maybe_tensor, FunctionalTensor): + maybe_tensor = maybe_tensor.elem + + if not torch._is_functional_tensor(maybe_tensor): + # If it's not a functional tensor, just return it. + # This can happen if we functionalize a fn that returns a global, + # which was never wrapped properly. + return maybe_tensor + # Sync any pending updates on the output tensor + torch._sync(maybe_tensor) + return _unwrap_functional_tensor(maybe_tensor, reapply_views) + + +def _unwrap_all_tensors_from_functional(tensor_pytree, *, reapply_views: bool): + return tree_map(lambda t: _maybe_unwrap_functional_tensor(t, reapply_views=reapply_views), tensor_pytree) + + +@exposed_in("torch.func") +def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable: + """ + functionalize is a transform that can be used to remove (intermediate) + mutations and aliasing from a function, while preserving the function's + semantics. + + ``functionalize(func)`` returns a new function with the same semantics + as ``func``, but with all intermediate mutations removed. + Every inplace operation performed on an intermediate tensor: + ``intermediate.foo_()`` + gets replaced by its out-of-place equivalent: + ``intermediate_updated = intermediate.foo()``. + + functionalize is useful for shipping a pytorch program off to + backends or compilers that aren't able to easily represent + mutations or aliasing operators. + + Args: + func (Callable): A Python function that takes one or more arguments. + remove (str): An optional string argument, that takes on either + the value 'mutations' or 'mutations_and_views'. + If 'mutations' is passed in then all mutating operators + will be replaced with their non-mutating equivalents. + If 'mutations_and_views' is passed in, then additionally, all aliasing + operators will be replaced with their non-aliasing equivalents. + Default: 'mutations'. + + Returns: + Returns a new "functionalized" function. It takes the same inputs as + ``func``, and has the same behavior, but any mutations + (and optionally aliasing) performed on intermediate tensors + in the function will be removed. + + functionalize will also remove mutations (and views) that were performed on function inputs. + However to preserve semantics, functionalize will "fix up" the mutations after + the transform has finished running, by detecting if any tensor inputs "should have" + been mutated, and copying the new data back to the inputs if necessary. + + + Example:: + + >>> # xdoctest: +SKIP + >>> import torch + >>> from torch.fx.experimental.proxy_tensor import make_fx + >>> from torch.func import functionalize + >>> + >>> # A function that uses mutations and views, but only on intermediate tensors. + >>> def f(a): + ... b = a + 1 + ... c = b.view(-1) + ... c.add_(1) + ... return b + ... + >>> inpt = torch.randn(2) + >>> + >>> out1 = f(inpt) + >>> out2 = functionalize(f)(inpt) + >>> + >>> # semantics are the same (outputs are equivalent) + >>> print(torch.allclose(out1, out2)) + True + >>> + >>> f_traced = make_fx(f)(inpt) + >>> f_no_mutations_traced = make_fx(functionalize(f))(inpt) + >>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt) + >>> + >>> print(f_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view = torch.ops.aten.view(add, [-1]) + add_ = torch.ops.aten.add_(view, 1); view = None + return add + + >>> print(f_no_mutations_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view = torch.ops.aten.view(add, [-1]); add = None + add_1 = torch.ops.aten.add(view, 1); view = None + view_1 = torch.ops.aten.view(add_1, [2]); add_1 = None + return view_1 + + >>> print(f_no_mutations_and_views_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view_copy = torch.ops.aten.view_copy(add, [-1]); add = None + add_1 = torch.ops.aten.add(view_copy, 1); view_copy = None + view_copy_1 = torch.ops.aten.view_copy(add_1, [2]); add_1 = None + return view_copy_1 + + + >>> # A function that mutates its input tensor + >>> def f(a): + ... b = a.view(-1) + ... b.add_(1) + ... return a + ... + >>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt) + >>> # + >>> # All mutations and views have been removed, + >>> # but there is an extra copy_ in the graph to correctly apply the mutation to the input + >>> # after the function has completed. + >>> print(f_no_mutations_and_views_traced.code) + + + + def forward(self, a_1): + view_copy = torch.ops.aten.view_copy(a_1, [-1]) + add = torch.ops.aten.add(view_copy, 1); view_copy = None + view_copy_1 = torch.ops.aten.view_copy(add, [2]); add = None + copy_ = torch.ops.aten.copy_(a_1, view_copy_1); a_1 = None + return view_copy_1 + + + There are a few "failure modes" for functionalize that are worth calling out: + (1) Like other torch.func transforms, `functionalize()` doesn't work with functions + that directly use `.backward()`. The same is true for torch.autograd.grad. + If you want to use autograd, you can compute gradients directly + with `functionalize(grad(f))`. + (2) Like other torch.func transforms, `functionalize()` doesn't work with global state. + If you call `functionalize(f)` on a function that takes views / mutations of + non-local state, functionalization will simply no-op and pass the view/mutation + calls directly to the backend. + One way to work around this is is to ensure that any non-local state creation + is wrapped into a larger function, which you then call functionalize on. + (3) `resize_()` has some limitations: functionalize will only work on programs + that use resize_()` as long as the tensor being resized is not a view. + (4) `as_strided()` has some limitations: functionalize will not work on + `as_strided()` calls that result in tensors with overlapping memory. + + + Finally, a helpful mental model for understanding functionalization is that + most user pytorch programs are writing with the public torch API. + When executed, torch operators are generally decomposed into + our internal C++ "ATen" API. + The logic for functionalization happens entirely at the level of ATen. + Functionalization knows how to take every aliasing operator in ATen, + and map it to its non-aliasing equivalent + (e.g. ``tensor.view({-1})`` -> ``at::view_copy(tensor, {-1})``), + and how to take every mutating operator in ATen, + and map it to its non-mutating equivalent + (e.g. ``tensor.add_(1)`` -> ``at::add(tensor, -1)``), + while tracking aliases and mutations out-of-line to know when to fix things up. + Information about which ATen operators are aliasing or mutating all comes from + https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml. + """ + if remove == 'mutations': + reapply_views = True + elif remove == 'mutations_and_views': + reapply_views = False + else: + raise RuntimeError( + f"functionalize(f, remove='mutations'): received invalid argument for remove={remove}." + " Valid options are:\n" + " remove='mutations': all inplace and out= operators will be removed from the program, and replaced" + " with their out-of-place equivalents.\n" + " remove='mutations_and_views': In addition to the above, all aliasing operators {view} will be" + " replaced with their non-aliasing counterparts, {view}_copy.\n" + ) + + @doesnt_support_saved_tensors_hooks + @wraps(func) + def wrapped(*args, **kwargs): + try: + func_level = _func_increment_nesting(reapply_views) + func_args = _wrap_all_tensors_to_functional(args, func_level) + func_kwargs = _wrap_all_tensors_to_functional(kwargs, func_level) + + flattened_unwrapped_args = pytree.arg_tree_leaves(*args) + flattened_wrapped_args = pytree.arg_tree_leaves(*func_args) + flattened_unwrapped_kwargs = pytree.arg_tree_leaves(**kwargs) + flattened_wrapped_kwargs = pytree.arg_tree_leaves(**func_kwargs) + + func_outputs = func(*func_args, **func_kwargs) + outputs = _unwrap_all_tensors_from_functional(func_outputs, reapply_views=reapply_views) + flat_outputs, func_out_spec = tree_flatten(outputs) + + for a in flattened_wrapped_args + flattened_wrapped_kwargs: + if isinstance(a, torch.Tensor): + # Call sync_() on the inputs, to ensure that any pending mutations have been applied. + torch._sync(a) + + # And if any mutations were applied to the inputs, we need to propagate them back to the user. + for unwrapped, wrapped in zip(flattened_unwrapped_args, flattened_wrapped_args): + if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor): + _propagate_functional_input_mutation(unwrapped, wrapped) + for unwrapped, wrapped in zip(flattened_unwrapped_kwargs, flattened_wrapped_kwargs): + if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor): + _propagate_functional_input_mutation(unwrapped, wrapped) + + return outputs + finally: + _func_decrement_nesting() + return wrapped + +@exposed_in("torch.func") +def linearize(func: Callable, *primals) -> Tuple[Any, Callable]: + ''' + Returns the value of ``func`` at ``primals`` and linear approximation + at ``primals``. + + Args: + func (Callable): A Python function that takes one or more arguments. + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. These are the values at which the function is linearly approximated. + + Returns: + Returns a ``(output, jvp_fn)`` tuple containing the output of ``func`` + applied to ``primals`` and a function that computes the jvp of + ``func`` evaluated at ``primals``. + + linearize is useful if jvp is to be computed multiple times at ``primals``. However, + to achieve this, linearize saves intermediate computation and has higher memory requirements + than directly applying `jvp`. So, if all the ``tangents`` are known, it maybe more efficient + to compute vmap(jvp) instead of using linearize. + + .. note:: + linearize evaluates ``func`` twice. Please file an issue for an implementation + with a single evaluation. + + Example:: + >>> import torch + >>> from torch.func import linearize + >>> def fn(x): + ... return x.sin() + ... + >>> output, jvp_fn = linearize(fn, torch.zeros(3, 3)) + >>> jvp_fn(torch.ones(3, 3)) + tensor([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]) + >>> + + ''' + # Note: We evaluate `fn` twice. + # Once for returning the output and other while + # tracing the graph. + # If this becomes a bottle-neck, we should update + # make_fx such that it also returns the output. + + output = func(*primals) + _, output_spec = tree_flatten(output) + + flat_primals, primals_argspec = tree_flatten(primals) + + # tangents for tracing + flat_tangents = tuple(p.new_empty(()).expand_as(p) for p in flat_primals) + + # function to trace + def trace_fn(flat_tangents): + with fwAD.dual_level(): + flat_duals = tuple(fwAD.make_dual(p, t) for p, t in zip(flat_primals, flat_tangents)) + duals = tree_unflatten(flat_duals, primals_argspec) + output = func(*duals) + tangents = tree_map_only(torch.Tensor, lambda t: fwAD.unpack_dual(t)[1], output) + + return tangents + + jvp_graph = make_fx(trace_fn)(flat_tangents) + const_folded_jvp_graph = const_fold.split_const_subgraphs(jvp_graph) + + # Hold only the meta-data regarding the primals. + flat_primals_shape = tuple(p.shape for p in flat_primals) + flat_primals_device = tuple(p.device for p in flat_primals) + flat_primals_dtype = tuple(p.dtype for p in flat_primals) + + def forward_ad_checks(flat_tangents): + for idx, t in enumerate(flat_tangents): + if t.shape != flat_primals_shape[idx]: + msg = (f"tangent:{idx} with shape {t.shape} in flattened " + f"pytree doesn't match the shape {flat_primals_shape[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + if t.device != flat_primals_device[idx]: + msg = (f"tangent:{idx} with device {t.device} in flattened " + f"pytree doesn't match the device {flat_primals_device[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + if t.dtype != flat_primals_dtype[idx]: + msg = (f"tangent:{idx} with dtype {t.dtype} in flattened " + f"pytree doesn't match the dtype {flat_primals_dtype[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + # jvp_fn : callable to return + # It takes care of checking the argspec of tangents, + # calling the folded fx graph and unflattening fx graph output + def jvp_fn(*tangents): + flat_tangents, tangent_argspec = tree_flatten(tangents) + if tangent_argspec != primals_argspec: + raise RuntimeError(f"Expected the tangents {tangent_argspec} to have " + f"the same argspec as the primals {primals_argspec}") + + forward_ad_checks(flat_tangents) + + flat_output = const_folded_jvp_graph(*flat_tangents) + # const folded graph can return flat output, + # so transform output. + return tree_unflatten(flat_output, output_spec) + + return output, jvp_fn diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/functional_call.py b/venv/lib/python3.10/site-packages/torch/_functorch/functional_call.py new file mode 100644 index 0000000000000000000000000000000000000000..7533811ed235ba8e527306f20908faa605162228 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/functional_call.py @@ -0,0 +1,248 @@ +from collections import Counter +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from torch import Tensor +from torch._functorch.utils import exposed_in + + +@exposed_in("torch.func") +def functional_call( + module: "torch.nn.Module", + parameter_and_buffer_dicts: Union[Dict[str, Tensor], Sequence[Dict[str, Tensor]]], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + r"""Performs a functional call on the module by replacing the module parameters + and buffers with the provided ones. + + .. note:: If the module has active parametrizations, passing a value in the + :attr:`parameter_and_buffer_dicts` argument with the name set to the regular parameter + name will completely disable the parametrization. + If you want to apply the parametrization function to the value passed + please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``. + + .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected + in the ``parameter_and_buffer_dicts`` input. + + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # does self.foo = self.foo + 1 + >>> print(mod.foo) # tensor(0.) + >>> functional_call(mod, a, torch.ones(())) + >>> print(mod.foo) # tensor(0.) + >>> print(a['foo']) # tensor(1.) + + .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the + tie_weights flag. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied + >>> print(mod.foo) # tensor(1.) + >>> mod(torch.zeros(())) # tensor(2.) + >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too + >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated + >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())} + >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.) + + An example of passing multiple dictionaries + + .. code-block:: python + + a = ({'weight': torch.ones(1, 1)}, {'buffer': torch.zeros(1)}) # two separate dictionaries + mod = nn.Bar(1, 1) # return self.weight @ x + self.buffer + print(mod.weight) # tensor(...) + print(mod.buffer) # tensor(...) + x = torch.randn((1, 1)) + print(x) + functional_call(mod, a, x) # same as x + print(mod.weight) # same as before functional_call + + + And here is an example of applying the grad transform over the parameters + of a model. + + .. code-block:: python + + import torch + import torch.nn as nn + from torch.func import functional_call, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + + def compute_loss(params, x, t): + y = functional_call(model, params, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(dict(model.named_parameters()), x, t) + + .. note:: If the user does not need grad tracking outside of grad transforms, they can detach all of the + parameters for better performance and memory usage + + Example:: + + >>> detached_params = {k: v.detach() for k, v in model.named_parameters()} + >>> grad_weights = grad(compute_loss)(detached_params, x, t) + >>> grad_weights.grad_fn # None--it's not tracking gradients outside of grad + + This means that the user cannot call ``grad_weight.backward()``. However, if they don't need autograd tracking + outside of the transforms, this will result in less memory usage and faster speeds. + + Args: + module (torch.nn.Module): the module to call + parameters_and_buffer_dicts (Dict[str, Tensor] or tuple of Dict[str, Tensor]): the parameters that will be used in + the module call. If given a tuple of dictionaries, they must have distinct keys so that all dictionaries can + be used together + args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument. + kwargs (dict): keyword arguments to be passed to the module call + tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as + tied in the reparameterized version. Therefore, if True and different values are passed for the tied + parameters and buffers, it will error. If False, it will not respect the originally tied parameters and + buffers unless the values passed for both weights are the same. Default: True. + strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and + buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will + error. Default: False. + + Returns: + Any: the result of calling ``module``. + """ + if isinstance(parameter_and_buffer_dicts, dict): + parameters_and_buffers = parameter_and_buffer_dicts + elif isinstance(parameter_and_buffer_dicts, Sequence): + if not all(isinstance(d, dict) for d in parameter_and_buffer_dicts): + raise ValueError( + "Expected all elements of parameter_and_buffer_dicts to be dictionaries" + ) + all_keys = [k for d in parameter_and_buffer_dicts for k in d.keys()] + repeated_keys = [key for key, n in Counter(all_keys).items() if n > 1] + if len(repeated_keys) > 0: + raise ValueError( + f"{repeated_keys} appeared in multiple dictionaries; behavior of functional call is ambiguous" + ) + parameters_and_buffers = { + k: v for d in parameter_and_buffer_dicts for k, v in d.items() + } + else: + raise ValueError( + f"Expected parameter_and_buffer_dicts to be a dict, or a list/tuple of dicts, " + f"but got {type(parameter_and_buffer_dicts)}" + ) + + return nn.utils.stateless._functional_call( + module, + parameters_and_buffers, + args, + kwargs, + tie_weights=tie_weights, + strict=strict, + ) + + +@exposed_in("torch.func") +def stack_module_state( + models: List[nn.Module], +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """stack_module_state(models) -> params, buffers + + Prepares a list of torch.nn.Modules for ensembling with :func:`vmap`. + + Given a list of ``M`` ``nn.Modules`` of the same class, returns two dictionaries + that stack all of their parameters and buffers together, indexed by name. + The stacked parameters are optimizable (i.e. they are new leaf nodes in the + autograd history that are unrelated to the original parameters and can be + passed directly to an optimizer). + + Here's an example of how to ensemble over a very simple model: + + .. code-block:: python + + num_models = 5 + batch_size = 64 + in_features, out_features = 3, 3 + models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)] + data = torch.randn(batch_size, 3) + + def wrapper(params, buffers, data): + return torch.func.functional_call(model[0], (params, buffers), data) + + params, buffers = stack_module_state(models) + output = vmap(wrapper, (0, 0, None))(params, buffers, data) + + assert output.shape == (num_models, batch_size, out_features) + + When there's submodules, this follows state dict naming conventions + + .. code-block:: python + + import torch.nn as nn + class Foo(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + hidden = 4 + self.l1 = nn.Linear(in_features, hidden) + self.l2 = nn.Linear(hidden, out_features) + + def forward(self, x): + return self.l2(self.l1(x)) + + num_models = 5 + in_features, out_features = 3, 3 + models = [Foo(in_features, out_features) for i in range(num_models)] + params, buffers = stack_module_state(models) + print(list(params.keys())) # "l1.weight", "l1.bias", "l2.weight", "l2.bias" + + .. warning:: + All of the modules being stacked together must be the same (except for + the values of their parameters/buffers). For example, they should be in the + same mode (training vs eval). + """ + if len(models) == 0: + raise RuntimeError("stack_module_state: Expected at least one model, got 0.") + if not (all(m.training for m in models) or all(not m.training for m in models)): + raise RuntimeError( + "stack_module_state: Expected all models to have the same training/eval mode." + ) + model0_typ = type(models[0]) + if not all(type(m) == model0_typ for m in models): + raise RuntimeError( + "stack_module_state: Expected all models to be of the same class." + ) + all_params = [dict(model.named_parameters()) for model in models] + params = { + k: construct_stacked_leaf(tuple(params[k] for params in all_params), k) + for k in all_params[0] + } + all_buffers = [dict(model.named_buffers()) for model in models] + buffers = { + k: construct_stacked_leaf(tuple(buffers[k] for buffers in all_buffers), k) + for k in all_buffers[0] + } + + return params, buffers + + +def construct_stacked_leaf( + tensors: Union[Tuple[Tensor, ...], List[Tensor]], name: str +) -> Tensor: + all_requires_grad = all(t.requires_grad for t in tensors) + none_requires_grad = all(not t.requires_grad for t in tensors) + if not all_requires_grad and not none_requires_grad: + raise RuntimeError( + f"Expected {name} from each model to have the same .requires_grad" + ) + result = torch.stack(tensors) + if all_requires_grad: + result = result.detach().requires_grad_() + return result diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py b/venv/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py new file mode 100644 index 0000000000000000000000000000000000000000..33024b1ec8d8fb709d601c8ee50f4c7d28ea7fca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py @@ -0,0 +1,445 @@ +# mypy: ignore-errors + +import torch.fx as fx +import copy +import torch +import math +import sys +from typing import Callable, List +from functools import wraps, partial +from dataclasses import dataclass +from .compile_utils import get_placeholders, get_outputs +from torch.utils._content_store import ContentStoreWriter +from torch.hub import tqdm +from torch.multiprocessing.reductions import StorageWeakRef +import os + +is_tuple = object() + +@dataclass +class LoadTensorMeta: + size: List[int] + stride: List[int] + dtype: torch.dtype + device: torch.device + +class ConcreteProp(torch.fx.Interpreter): + def __init__(self, mod, *, writer=None, skip_offload=False): + super().__init__(mod) + self.writer = writer + self.skip_offload = skip_offload + self.seen_storages = set() + + def run_node(self, n): + self.pbar.update(1) + r = super().run_node(n) + name = n.name + + if isinstance(r, torch.Tensor): + if self.writer is None: + n.meta['concrete_value'] = r + else: + if StorageWeakRef(r.untyped_storage()) in self.seen_storages: + # Refuse to offload tensors which alias other live + # tensors, because this will violate operator contracts + n.meta['concrete_value'] = None + else: + if not self.skip_offload: + self.writer.write_tensor(os.path.join("eager", name), r) + n.meta['concrete_value'] = LoadTensorMeta( + r.size(), + r.stride(), + r.dtype, + r.device + ) + self.seen_storages.add(StorageWeakRef(r.untyped_storage())) + else: + n.meta['concrete_value'] = is_tuple + + return r + + def propagate(self, *args): + with tqdm( + desc="Saving intermediates for delta debugging", + total=len(self.module.graph.nodes), + disable=self.writer is None + ) as pbar: + self.pbar = pbar + r = super().run(*args) + if not self.skip_offload: + pbar.set_description("Saved! To skip next time, run with --skip-saving-eager-intermediates") + return r + +def is_load_tensor_node(node): + return node.op == 'call_function' and node.target is torch.ops.debugprims.load_tensor.default + + +# inplace modifies node/inps +def _convert_node_to_placeholder(graph, node, inps): + if node.op == 'output' or node.op == "placeholder": + return False + + if is_load_tensor_node(node): + return False + + concrete_val = node.meta.get('concrete_value', None) + + if isinstance(concrete_val, torch.Tensor): + node.op = 'placeholder' + node.target = node.name + node.args = () + node.kwargs = {} + + inps.append(concrete_val) + return True + + elif concrete_val is None: + return False + + elif concrete_val is is_tuple: + r = False + for tuple_user in list(node.users): + r = _convert_node_to_placeholder(graph, tuple_user, inps) or r + # NB: We must not erase the node at this point, because + # we are iterating over the nodes and this would change + # the iteration order + # graph.erase_node(node) + return r + + elif isinstance(concrete_val, LoadTensorMeta): + node.op = 'call_function' + node.target = torch.ops.debugprims.load_tensor.default + node.args = (os.path.join("eager", node.name), concrete_val.size, concrete_val.stride) + node.kwargs = { + 'device': concrete_val.device, + 'dtype': concrete_val.dtype, + } + return True + + return False + +def create_minified_hlo_graph(minified_fx_graph, inputs): + """ + Takes minified FX graph as primary input, and ports it to HLO via StableHLO + Provides minified HLO graph as output, and archive them to local directory + """ + hlo_dir = f"{os.getcwd()}/hlo_files" + os.makedirs(hlo_dir, exists_ok=True) + + from torch_xla.stablehlo import save_torch_model_as_stablehlo + save_torch_model_as_stablehlo(minified_fx_graph, inputs, hlo_dir) + +def dump_state(fx_g, inps): + print(f""" +# Working Repro with {len(fx_g.graph.nodes)} nodes +inps = {[(i.shape, i.dtype, i.device.type) for i in inps]} +inps = [torch.zeros(())] + [torch.ones(shape, dtype=dtype, device=device) for (shape, dtype, device) in inps] +{fx_g.code} +""") + +def is_power_of_two(n): + if n == 0: + return False + return (n & (n - 1)) == 0 + +@dataclass +class ReproState: + graph: fx.Graph + inps: List[torch.Tensor] + + def __post_init__(self): + ph_nodes = get_placeholders(self.graph) + assert len(ph_nodes) == len(self.inps) + +def minifier( + fail_f: fx.GraphModule, inps, module_fails, dump_state: Callable = dump_state, *, + save_dir=None, offload_to_disk=False, skip_offload=False, skip_sanity=False, + max_granularity=None +): + """ + Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails. + + Does 2 main strategies: + 1. Truncates suffix: Removes some suffix from the graph and sets a new output. + 2. Delta Debugging: Tries replacing half of the graph with inputs. If fails, + tries replacing quarter of the graph, etc. + + >>> # xdoctest: +SKIP(failing) + >>> failing_function = fx.symbolic_trace(f) + >>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps)) + + note: module_fails returns True if it fails. + """ + assert isinstance(inps, (tuple, list)) + + failing_graph = fail_f.graph + cur_size = len(failing_graph.nodes) + + if max_granularity is not None and not is_power_of_two(max_granularity): + raise RuntimeError(f"max_granularity {max_granularity} not power of two") + + num_queries = 0 + + def deepcopy_fx_graph(fx_graph): + return fx.GraphModule(fail_f, copy.deepcopy(fx_graph)).graph + + + def graph_fails(graph, inps): + nonlocal num_queries + graph = copy.deepcopy(graph) + num_queries += 1 + mod = fx.GraphModule(fail_f, graph) + mod.graph.lint() + return module_fails(mod, inps) + + writer = None + if offload_to_disk: + writer = ContentStoreWriter(save_dir) + + ConcreteProp(fail_f, writer=writer, skip_offload=skip_offload).propagate(*inps) + if not skip_sanity and not graph_fails(failing_graph, inps): + raise RuntimeError("Input graph did not fail the tester") + print(f"Started off with {cur_size} nodes", file=sys.stderr) + + def _register_strategy(strategy: Callable, name: str): + @wraps(strategy) + def new_func(old_state: ReproState, granularity=1): + print(file=sys.stderr) + print( + f"Strategy: {name} (G: {granularity}) " + f"({len(old_state.graph.nodes)} nodes, {len(old_state.inps)} inputs)", + file=sys.stderr + ) + new_state = strategy(deepcopy_fx_graph(old_state.graph), list(old_state.inps), granularity) + if new_state is not None: + new_nodes = len(new_state.graph.nodes) + old_nodes = len(old_state.graph.nodes) + new_inps = len(new_state.inps) + old_inps = len(old_state.inps) + new_outs = len(get_outputs(new_state.graph)) + old_outs = len(get_outputs(old_state.graph)) + progress_made = False + if new_nodes < old_nodes: + progress_made = True + print(f"SUCCESS: Went from {old_nodes} to {new_nodes} nodes", file=sys.stderr) + if new_inps > old_inps: + progress_made = True + print(f"SUCCESS: Went from {old_inps} to {new_inps} inputs", file=sys.stderr) + if new_outs < old_outs: + progress_made = True + print(f"SUCCESS: Went from {old_outs} to {new_outs} outputs", file=sys.stderr) + + if not progress_made: + raise RuntimeError("Success raised but no progress made?") + + if not graph_fails(new_state.graph, new_state.inps): + print("WARNING: Something went wrong, not applying this minification", file=sys.stderr) + return None + return new_state + else: + print(f"FAIL: {name}", file=sys.stderr) + return None + + return new_func + + def register_strategy(name: str): + return partial(_register_strategy, name=name) + + @register_strategy("Truncate suffix") + def remove_suffix(cur_graph, cur_inps, granularity): + tested = set() + new_graph = fx.Graph() + env = {} + for idx, node in enumerate(cur_graph.nodes): + new_node = new_graph.node_copy(node, lambda x: env[x]) + if node.op not in ['placeholder', 'output']: + # If idx is divisible by (granularity * 2), it would have been checked already. + if idx % granularity == 0 and (idx % (granularity * 2) != 0) and idx not in tested: + output_node = new_graph.output((new_node,)) + if len(new_graph.nodes) < len(cur_graph.nodes) and graph_fails(new_graph, cur_inps): + return ReproState(new_graph, cur_inps) + else: + tested.add(idx) + new_graph.erase_node(output_node) + env[node] = new_node + return None + + @register_strategy("Remove outputs") + def remove_outputs(cur_graph, cur_inps, granularity): + granularity = max(1, granularity // 2) + for idx, node in enumerate(cur_graph.nodes): + node.idx = idx + if node.op == 'output': + output = node + break + + if isinstance(output.args[0], fx.Node): + return None + + output_args = sorted(output.args[0], key=lambda x: x.idx if isinstance(x, fx.Node) else int(1e9)) + if len(output_args) == 1: + return None + + for idx in range(0, len(output_args), granularity): + output.args = (output_args[:idx] + output_args[idx + granularity:],) + if graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + + def remove_unused_inputs_unchecked(cur_state: ReproState): + cur_graph = cur_state.graph + cur_inps = cur_state.inps + ph_nodes = get_placeholders(cur_graph) + assert len(ph_nodes) == len(cur_inps) + + new_inps = [] + for idx in range(len(ph_nodes)): + if len(ph_nodes[idx].users) == 0: + cur_graph.erase_node(ph_nodes[idx]) + else: + new_inps.append(cur_inps[idx]) + if len(new_inps) < len(cur_inps): + return ReproState(cur_graph, new_inps) + return None + + def remove_unused_inputs_checked(cur_state: ReproState): + new_state = remove_unused_inputs_unchecked(cur_state) + if new_state is not None and graph_fails(new_state.graph, new_state.inps): + return new_state + return None + + def _remove_unused_wrapper(cur_graph, cur_inps, granularity): + return remove_unused_inputs_checked(ReproState(cur_graph, cur_inps)) + + remove_unused_inputs = register_strategy("Remove unused inputs")(_remove_unused_wrapper) + + @register_strategy("Eliminate dead code") + def eliminate_dead_code(cur_graph, cur_inps, granularity): + if cur_graph.eliminate_dead_code() and graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + + def _consolidate_placeholders(cur_graph, inps): + new_graph = fx.Graph() + env = {} + seen_non_placeholder = False + + # Move all placeholders to the front; also, if any load_tensor + # is at the front, convert it into an input (because it can be live + # all the time) + for node in cur_graph.nodes: + if node.op == 'placeholder': + new_node = new_graph.node_copy(node, lambda x: env[x]) + env[node] = new_node + elif not seen_non_placeholder and is_load_tensor_node(node): + new_node = new_graph.placeholder(node.name) + env[node] = new_node + inps.append(torch.ops.debugprims.load_tensor.default(*node.args, **node.kwargs)) + else: + seen_non_placeholder = True + + # Move everyone else + for node in cur_graph.nodes: + if node not in env: + new_node = new_graph.node_copy(node, lambda x: env[x]) + env[node] = new_node + return new_graph + + @register_strategy("Delta Debugging") + def delta_debugging(cur_graph: fx.Graph, cur_inps, granularity): + num_nodes = len(cur_graph.nodes) + for start_range in range(0, num_nodes, granularity): + is_removing = False + new_graph = deepcopy_fx_graph(cur_graph) + new_inps = cur_inps[:] + end_range = min(num_nodes, start_range + granularity) + for idx in range(start_range, end_range): + new_node = list(new_graph.nodes)[idx] + if _convert_node_to_placeholder(new_graph, new_node, new_inps): + is_removing = True + if not is_removing: + continue + new_graph.eliminate_dead_code() + new_graph = _consolidate_placeholders(new_graph, new_inps) + new_state = remove_unused_inputs_unchecked(ReproState(new_graph, new_inps)) + if new_state is None: + new_state = ReproState(new_graph, new_inps) + if graph_fails(new_state.graph, new_state.inps): + return ReproState(new_state.graph, new_state.inps) + + return None + + @register_strategy("Consolidate Inputs") + def consolidate_inputs(cur_graph, cur_inps, granularity): + old_len = len(cur_inps) + cur_graph = _consolidate_placeholders(cur_graph, cur_inps) + if len(cur_inps) > old_len and graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + failing_state = ReproState(failing_graph, inps) + + def try_granularity(failing_state, granularity, use_non_granular): + print(f"Trying granularity {granularity}", file=sys.stderr) + + strategies = [] + num_nodes = len(failing_state.graph.nodes) + num_outputs = len(get_outputs(failing_state.graph)) + if num_outputs > num_nodes // 2: + strategies += [remove_outputs] + + if use_non_granular: + strategies += [eliminate_dead_code, remove_unused_inputs, consolidate_inputs] + + strategies += [remove_suffix, delta_debugging] + + for strategy in strategies: + new_state = strategy(failing_state, granularity) + if new_state is not None: + return new_state + return None + + while True: + dump_state(fx.GraphModule(fail_f, failing_state.graph), failing_state.inps) + granularity = int(2**(math.floor(math.log2(len(failing_state.graph.nodes))))) + if max_granularity is not None: + granularity = min(max_granularity, granularity) + new_state = try_granularity(failing_state, granularity, use_non_granular=True) + if new_state is not None: + failing_state = new_state + continue + + granularity //= 2 + has_progress = False + while granularity >= 1: + new_state = try_granularity(failing_state, granularity, use_non_granular=False) + if new_state is not None: + failing_state = new_state + has_progress = True + break + granularity //= 2 + if has_progress: + continue + + new_state = remove_outputs(failing_state, 1) + if new_state is not None: + failing_state = new_state + continue + + break + + if not graph_fails(failing_state.graph, failing_state.inps): + raise RuntimeError("Uh oh, something went wrong :( Final graph is not failing") + + print(f"Made {num_queries} queries", file=sys.stderr) + failing_fx = fx.GraphModule(fail_f, failing_state.graph) + + # If XLA debugging environment is enabled, create minified HLO graph as well + if "XLA_HLO_DEBUG" in os.environ: + create_minified_hlo_graph(failing_fx, failing_state.inps) + + dump_state(failing_fx, failing_state.inps) + print("Wrote minimal repro out to repro.py", file=sys.stderr) + return failing_fx, failing_state.inps diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/make_functional.py b/venv/lib/python3.10/site-packages/torch/_functorch/make_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..711be174d82761ed6ade88afdccc3ad66290adf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/make_functional.py @@ -0,0 +1,615 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import copy +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + NoReturn, + Sequence, + Tuple, + Type, + Union, +) + +import torch +import torch.nn as nn +from torch import Tensor +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + +# Utilities to make nn.Module "functional" +# In particular the goal is to be able to provide a function that takes as input +# the parameters and evaluate the nn.Module using fixed inputs. + + +def raise_parameter_tying_error() -> NoReturn: + raise RuntimeError( + "make_functional(module): we don't yet support models that " + "do parameter tying (also sometimes known as weight sharing). " + "Please try to rewrite your model by replacing all instances of the " + "tied parameter with another and/or comment your support in " + "https://github.com/pytorch/functorch/issues/446" + ) + + +def create_names_map( + named_params: Union[Dict[str, Tensor], Iterable[Tuple[str, Tensor]]], + tied_named_params: Union[Dict[str, Tensor], Iterable[Tuple[str, Tensor]]], +) -> Dict[str, List[str]]: + """ + named_params is a dictionary of tensors: {'A': A, 'B': B} + tied_named_params is another dictionary of tensors {'A': A, 'B': B, 'B_tied': B} + with potentially tied (or 'duplicated') tensors + + This function creates a mapping from the names in named_params to the + names in tied_named_params: {'A': ['A'], 'B': ['B', 'B_tied']}. + """ + named_params = dict(named_params) + tied_named_params = dict(tied_named_params) + + tensors_dict_keys = set(named_params.keys()) + tied_tensors_dict_keys = set(tied_named_params.keys()) + assert tensors_dict_keys.issubset(tied_tensors_dict_keys) + + tensor_to_mapping: Dict[Tensor, Tuple[str, List[str]]] = {} + for key, tensor in named_params.items(): + tensor_to_mapping[tensor] = (key, []) + for key, tensor in tied_named_params.items(): + assert tensor in tensor_to_mapping + tensor_to_mapping[tensor][1].append(key) + return dict(tensor_to_mapping.values()) + + +def _extract_members( + mod: nn.Module, + named_members: Callable[..., Iterable[Tuple[str, Tensor]]], + subclass: Callable[[Tensor], Tensor], +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + all_named_members = tuple(named_members(remove_duplicate=False)) + unique_named_members = tuple(named_members(remove_duplicate=True)) + names_map = create_names_map(unique_named_members, all_named_members) + + # Remove all the members in the model + memo = {} + accessor = NamedMemberAccessor(mod) + for name, p in all_named_members: + if p not in memo: + memo[p] = subclass(torch.empty_like(p, device="meta")) + replacement = memo[p] + accessor.set_tensor(name, replacement) + + if len(unique_named_members) == 0: + names, params = (), () + else: + names, params = zip(*unique_named_members) # type: ignore[assignment] + return params, names, names_map + + +def extract_weights( + mod: nn.Module, +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + """ + This function removes all the Parameters from the model and + return them as a tuple as well as their original attribute names. + The weights must be re-loaded with `load_weights` before the model + can be used again. + Note that this function modifies the model in place and after this + call, mod.parameters() will be empty. + """ + return _extract_members(mod, mod.named_parameters, nn.Parameter) + + +def extract_buffers( + mod: nn.Module, +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + return _extract_members(mod, mod.named_buffers, lambda x: x) + + +def load_weights( + mod: nn.Module, + names: Sequence[str], + params: Sequence[Tensor], + as_params: bool = False, +) -> None: + """ + Reload a set of weights so that `mod` can be used again to perform a forward pass. + Note that the `params` are regular Tensors (that can have history) and so are left + as Tensors. This means that mod.parameters() will still be empty after this call. + """ + accessor = NamedMemberAccessor(mod) + if as_params: + params = [nn.Parameter(p) for p in params] + accessor.set_tensors(names, params) + + +def _swap_state( + mod: nn.Module, names_map: Dict[str, List[str]], elems: Iterable[Tensor] +) -> List[Tensor]: + result: List[Tensor] = [] + accessor = NamedMemberAccessor(mod) + for (_, attr_names), elem in zip(names_map.items(), elems): + for i, attr_name in enumerate(attr_names): + if i == 0: + result.append(accessor.swap_tensor(attr_name, elem)) + else: + accessor.set_tensor(attr_name, elem) + return result + + +def load_buffers( + mod: nn.Module, + names: Sequence[str], + buffers: Sequence[Tensor], + as_params: bool = False, +) -> None: + accessor = NamedMemberAccessor(mod) + accessor.set_tensors(names, buffers) + + +def load_state( + model: nn.Module, + weights: Sequence[Tensor], + weight_names: Sequence[str], + buffers: Sequence[Tensor] = (), + buffer_names: Sequence[str] = (), +) -> nn.Module: + """load_state(model, weights, weight_names, buffers=(), buffer_names=()) -> model + + load_state takes `weights` and `buffers` and assigns them to the model. + This is the inverse operation of `make_functional_deprecated_v1`. + """ + assert len(weight_names) == len(weights) + load_weights(model, weight_names, weights) + if len(buffers) > 0: + assert len(buffer_names) == len(buffers) + load_buffers(model, buffer_names, buffers) + return model + + +def make_functional_deprecated_v1(model: nn.Module): + """make_functional_deprecated_v1(model) -> weights, func, weight_names + + Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights) + and returns a functional version of the model, `func`. This makes + it so that it is possible use transforms over the parameters of + `model`. + + `func` can be invoked as follows: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, func, _ = make_functional_deprecated_v1(model) + func(weights, (x,)) + ``` + + And here is an example of applying the grad transform: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, _, func = make_functional_deprecated_v1(model) + grad_weights = grad(func)(weights, (x,)) + ``` + + To put the state back into a model, use `load_state`. + """ + buffers = list(model.buffers()) + if len(buffers) > 0: + raise RuntimeError( + "make_functional_deprecated_v1(model): `model` has buffers. Please use " + "make_functional_with_buffers_deprecated_v1(model) instead." + ) + weights, descriptors, _ = extract_weights(model) + + def fun(weights, data): + mutable_model = copy.deepcopy(model) + load_weights(mutable_model, descriptors, weights) + return mutable_model(*data) + + return weights, fun, descriptors + + +def make_functional_with_buffers_deprecated_v1(model: nn.Module): + """make_functional_with_buffers_deprecated_v1(model) -> weights, buffers, func, weight_names, buffer_names + + Given an nn.Module, make_functional_with_buffers_deprecated_v1 extracts the state (weights and buffers) + and returns a functional version of the model, `func`. + + `func` can be invoked as follows: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model) + func(weights, buffers, (x,)) + ``` + + And here is an example of applying the grad transform: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model) + func(weights, buffers, (x,)) + grad_weights = grad(func)(weights, buffers, (x,)) + ``` + + To put the state back into a model, use `load_state`. + """ + weights, weight_descriptors, _ = extract_weights(model) + buffers, buf_descriptors, _ = extract_buffers(model) + + def fun(weights, buffers, data): + mutable_model = copy.deepcopy(model) + load_weights(mutable_model, weight_descriptors, weights) + load_buffers(mutable_model, buf_descriptors, buffers) + return mutable_model(*data) + + return weights, buffers, fun, weight_descriptors, buf_descriptors + + +class FunctionalModuleWithBuffers(nn.Module): + """ + This is the callable object returned by :func:`make_functional_with_buffers`. + """ + + def __init__( + self, + stateless_model: nn.Module, + param_names: Tuple[str, ...], + buffer_names: Tuple[str, ...], + param_names_map: Dict[str, List[str]], + buffer_names_map: Dict[str, List[str]], + ) -> None: + super().__init__() + self.stateless_model = stateless_model + self.param_names = param_names + self.buffer_names = buffer_names + + self.all_names_map = dict(param_names_map) + self.all_names_map.update(buffer_names_map) + + @staticmethod + def _create_from( + model: nn.Module, disable_autograd_tracking: bool = False + ) -> Tuple["FunctionalModuleWithBuffers", Tuple[Tensor, ...], Tuple[Tensor, ...]]: + # TODO: We don't need to copy the model to create a stateless copy + model_copy = copy.deepcopy(model) + params, param_names, param_names_map = extract_weights(model_copy) + buffers, buffer_names, buffer_names_map = extract_buffers(model_copy) + if disable_autograd_tracking: + for param in params: + param.requires_grad_(False) + return ( + FunctionalModuleWithBuffers( + model_copy, param_names, buffer_names, param_names_map, buffer_names_map + ), + params, + buffers, + ) + + def forward( + self, params: Iterable[Tensor], buffers: Iterable[Tensor], *args, **kwargs + ) -> Any: + # Temporarily load the state back onto self.stateless_model + old_state = _swap_state( + self.stateless_model, + self.all_names_map, + tuple(params) + tuple(buffers), + ) + try: + return self.stateless_model(*args, **kwargs) + finally: + # Remove the loaded state on self.stateless_model + _swap_state(self.stateless_model, self.all_names_map, old_state) + + +class FunctionalModule(nn.Module): + """ + This is the callable object returned by :func:`make_functional`. + """ + + def __init__( + self, + stateless_model: nn.Module, + param_names: Tuple[str, ...], + names_map: Dict[str, List[str]], + ) -> None: + super().__init__() + self.stateless_model = stateless_model + self.param_names = param_names + self.names_map = names_map + + @staticmethod + def _create_from( + model: nn.Module, disable_autograd_tracking: bool = False + ) -> Tuple["FunctionalModule", Tuple[Tensor, ...]]: + # TODO: We don't need to copy the model to create a stateless copy + model_copy = copy.deepcopy(model) + params, param_names, names_map = extract_weights(model_copy) + if disable_autograd_tracking: + for param in params: + param.requires_grad_(False) + return FunctionalModule(model_copy, param_names, names_map), params + + def forward(self, params: Iterable[Tensor], *args, **kwargs) -> Any: + # Temporarily load the state back onto self.stateless_model + old_state = _swap_state(self.stateless_model, self.names_map, params) + try: + return self.stateless_model(*args, **kwargs) + finally: + # Remove the loaded state on self.stateless_model + _swap_state(self.stateless_model, self.names_map, old_state) + + +def make_functional( + model: nn.Module, disable_autograd_tracking: bool = False +) -> Tuple[FunctionalModule, Tuple[Tensor, ...]]: + """make_functional(model, disable_autograd_tracking=False) -> func, params + + Given a ``torch.nn.Module``, :func:`make_functional` extracts the state + (params) and returns a functional version of the model, ``func``. This + makes it so that it is possible use transforms over the parameters of + ``model``. + + ``func`` can be invoked as follows: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional + + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params = make_functional(model) + func(params, x) + + And here is an example of applying the grad transform over the parameters + of a model. + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params = make_functional(model) + + def compute_loss(params, x, t): + y = func(params, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(params, x, t) + + If the model has any buffers, please use :func:`make_functional_with_buffers` instead. + + Args: + model (torch.nn.Module): Input model. + disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters. + The returned params are unrelated to the set of params from the original model. If False (default), + the params will have ``requires_grad=True`` on them (aka they will be trackable with regular + PyTorch autograd), matching the requires_grad-ness of the params from the original model. + Otherwise, the returned params will have ``requires_grad=False``. Default, False. + If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or + ``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``. + Otherwise, if you're only planning on using functorch's gradient transforms, + then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking + history with PyTorch autograd. + + """ + buffers = list(model.buffers()) + if len(buffers) > 0: + raise RuntimeError( + "make_functional(model): `model` has buffers. Please use " + "make_functional_with_buffers(model) instead." + ) + return FunctionalModule._create_from( + model, disable_autograd_tracking=disable_autograd_tracking + ) + + +def make_functional_with_buffers( + model: nn.Module, disable_autograd_tracking: bool = False +) -> Tuple[FunctionalModuleWithBuffers, Tuple[Tensor, ...], Tuple[Tensor, ...]]: + """make_functional_with_buffers(model, disable_autograd_tracking=False) -> func, params, buffers + + Given a ``torch.nn.Module``, make_functional_with_buffers extracts the + state (params and buffers) and returns a functional version of the model + ``func`` that can be invoked like a function. + + ``func`` can be invoked as follows: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional_with_buffers + + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params, buffers = make_functional_with_buffers(model) + func(params, buffers, x) + + And here is an example of applying the grad transform over the parameters + of a model: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional_with_buffers, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params, buffers = make_functional_with_buffers(model) + + def compute_loss(params, buffers, x, t): + y = func(params, buffers, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(params, buffers, x, t) + + Args: + model (torch.nn.Module): Input model. + disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters. + The returned params are unrelated to the set of params from the original model. If False (default), + the params will have ``requires_grad=True`` on them (aka they will be trackable with regular + PyTorch autograd), matching the requires_grad-ness of the params from the original model. + Otherwise, the returned params will have ``requires_grad=False``. Default, False. + If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or + ``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``. + Otherwise, if you're only planning on using functorch's gradient transforms, + then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking + history with PyTorch autograd. + + """ + return FunctionalModuleWithBuffers._create_from( + model, disable_autograd_tracking=disable_autograd_tracking + ) + + +def transpose_stack( + tuple_of_tuple_of_tensors: Tuple[Tuple[Tensor, ...], ...] +) -> Tuple[Tensor, ...]: + tuple_of_tuple_of_tensors = tuple(zip(*tuple_of_tuple_of_tensors)) + results = tuple( + torch.stack(shards).detach() for shards in tuple_of_tuple_of_tensors + ) + return results + + +def combine_state_for_ensemble( + models: Sequence[nn.Module], +) -> Tuple[FunctionalModuleWithBuffers, Tuple[Tensor, ...], Tuple[Tensor, ...]]: + """combine_state_for_ensemble(models) -> func, params, buffers + + Prepares a list of torch.nn.Modules for ensembling with :func:`vmap`. + + Given a list of ``M`` ``nn.Modules`` of the same class, stacks all of their + parameters and buffers together to make ``params`` and ``buffers``. + Each parameter and buffer in the result will have an additional dimension + of size ``M``. + + :func:`combine_state_for_ensemble` also returns ``func``, a functional + version of one of the models in :attr:`models`. One cannot directly run + ``func(params, buffers, *args, **kwargs)`` directly, you probably want to + use ``vmap(func, ...)(params, buffers, *args, **kwargs)`` + + Here's an example of how to ensemble over a very simple model: + + .. code-block:: python + + num_models = 5 + batch_size = 64 + in_features, out_features = 3, 3 + models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)] + data = torch.randn(batch_size, 3) + + fmodel, params, buffers = combine_state_for_ensemble(models) + output = vmap(fmodel, (0, 0, None))(params, buffers, data) + + assert output.shape == (num_models, batch_size, out_features) + + .. warning:: + All of the modules being stacked together must be the same (except for + the values of their parameters/buffers). For example, they should be in the + same mode (training vs eval). + + This API is subject to change -- we're investigating better ways to + create ensembles and would love your feedback how to improve this. + """ + if len(models) == 0: + raise RuntimeError( + "combine_state_for_ensemble: Expected at least one model, got 0." + ) + if not (all(m.training for m in models) or all(not m.training for m in models)): + raise RuntimeError( + "combine_state_for_ensemble: Expected all models to " + "have the same training/eval mode." + ) + model0_typ = type(models[0]) + if not all(type(m) == model0_typ for m in models): + raise RuntimeError( + "combine_state_for_ensemble: Expected all models to be of the same class." + ) + funcs, params, buffers = zip( + *[make_functional_with_buffers(model) for model in models] + ) + params = transpose_stack(params) + buffers = transpose_stack(buffers) + return funcs[0], params, buffers + + +def functional_init( + model_class: Type[nn.Module], + ensemble_shape: Union[Tuple[()], Tuple[int]] = (), + device: torch.types.Device = "cpu", +): + def wrapped(*args, **kwargs): + if len(ensemble_shape) >= 2: + raise ValueError("NYI: ensemble_shape with more than 1 element") + if len(ensemble_shape) == 0: + model = model_class(*args, **kwargs).to(device) + return make_functional_deprecated_v1(model) + num_models = ensemble_shape[0] # type: ignore[misc] + if num_models <= 0: + raise ValueError(f"num_models {num_models} should be > 0") + # NB: Not very efficient, more of a POC + models = tuple( + model_class(*args, **kwargs).to(device) for _ in range(num_models) + ) + _, fn, names = make_functional_deprecated_v1(model_class(*args, **kwargs)) + weights = tuple(make_functional_deprecated_v1(model)[0] for model in models) + weights = tuple(zip(*weights)) + weights = tuple(torch.stack(shards).detach() for shards in weights) + return weights, fn, names + + return wrapped + + +def functional_init_with_buffers( + model_class: Type[nn.Module], + ensemble_shape: Union[Tuple[()], Tuple[int]] = (), + device: torch.types.Device = "cpu", +): + def wrapped(*args, **kwargs): + if len(ensemble_shape) >= 2: + raise ValueError("NYI: ensemble_shape with more than 1 element") + if len(ensemble_shape) == 0: + model = model_class(*args, **kwargs).to(device) + return make_functional_deprecated_v1(model) + num_models = ensemble_shape[0] # type: ignore[misc] + if num_models <= 0: + raise ValueError(f"num_models {num_models} should be > 0") + # NB: Not very efficient, more of a POC + models = tuple( + model_class(*args, **kwargs).to(device) for _ in range(num_models) + ) + ( + _, + _, + fn, + weight_names, + buffer_names, + ) = make_functional_with_buffers_deprecated_v1(model_class(*args, **kwargs)) + weights, buffers = zip( + *tuple( + make_functional_with_buffers_deprecated_v1(model)[:2] + for model in models + ) + ) + weights = tuple(zip(*weights)) + weights = tuple(torch.stack(shards).detach() for shards in weights) + buffers = tuple(zip(*buffers)) + buffers = tuple(torch.stack(shards).detach() for shards in buffers) + return weights, buffers, fn, weight_names, buffer_names + + return wrapped diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/partitioners.py b/venv/lib/python3.10/site-packages/torch/_functorch/partitioners.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8f86055da1d8420130e9caeb2ab59c27d2a385 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/partitioners.py @@ -0,0 +1,981 @@ +# mypy: ignore-errors + +from torch.fx.experimental.proxy_tensor import is_sym_node, py_sym_types +from torch.fx.experimental.sym_node import magic_methods, method_to_operator +from torch.fx.experimental.symbolic_shapes import ( + hint_int, free_symbols, is_symbol_binding_fx_node, find_symbol_binding_fx_nodes +) +from torch.fx.experimental._backward_state import BackwardState +import torch +import torch.fx as fx +import operator +import math +import torch.utils._pytree as pytree +import copy +import os +import itertools +import sympy +from collections import defaultdict +from torch.fx.passes import graph_drawer +from typing import List, Optional, Set, Tuple, Union +from .compile_utils import fx_graph_cse, get_aten_target +from . import config +import functools + + +AOT_PARTITIONER_DEBUG = config.debug_partitioner + + +def must_recompute(node): + return node.meta.get("recompute", False) + +def has_recomputable_ops(fx_g): + found = False + for node in fx_g.graph.nodes: + if must_recompute(node): + return True + return False + +def has_recomputable_rng_ops(fx_g): + for node in fx_g.graph.nodes: + if must_recompute(node) and hasattr(node.target, "tags") and torch.Tag.nondeterministic_seeded in node.target.tags: + return True + return False + +def sym_node_size(node): + if isinstance(node.meta["val"], (torch.SymInt, torch.SymBool)): + return 1 + assert isinstance(node.meta["val"], torch.SymFloat) + return 4 + +class InvalidNodeBase: + def __repr__(self): + return "Invalid Node" + + +InvalidNode = InvalidNodeBase() + + +def _extract_graph_with_inputs_outputs(joint_graph, inputs, outputs): + """ + Given a graph, extracts out a subgraph that takes the specified nodes as + inputs and returns the specified outputs. + + This includes specifying non-placeholder nodes as inputs. + + The general strategy is to initialize all inputs with proxies as we + encounter them, and trace through the graph, only keeping values which take + in valid proxies. Then, all dead code is eliminated. + """ + new_graph = fx.Graph() + env = {} + + # Add new placeholder nodes in the order specified by the inputs + for node in inputs: + new_node = new_graph.placeholder(node.name) + # Can't use node_copy here as we may be turning previous call_function into placeholders + new_node.meta = node.meta + env[node] = new_node + + for node in joint_graph.nodes: + if node in inputs: + continue + elif node.op == 'placeholder': + env[node] = InvalidNode + elif node.op == 'call_function': + all_args = pytree.arg_tree_leaves(*node.args, **node.kwargs) + all_args = [isinstance(env[x], InvalidNodeBase) for x in all_args if isinstance(x, fx.Node)] + if any(all_args): + env[node] = InvalidNode + continue + env[node] = new_graph.node_copy(node, lambda x: env[x]) + elif node.op == 'get_attr': + env[node] = new_graph.node_copy(node, lambda x: env[x]) + elif node.op == 'output': + pass + output_values = [] + for x in outputs: + if isinstance(x, fx.Node): + if x not in env: + raise RuntimeError(f"Node {x} couldn't be found in env") + assert not isinstance(env[x], InvalidNodeBase), f"Node {x} was invalid, but is output" + output_values.append(env[x]) + else: + output_values.append(x) + new_graph.output(output_values) + + new_graph.eliminate_dead_code() + new_graph.lint() + return new_graph + + +def _is_primal(node): + return ( + node.op == "placeholder" + and "tangents" not in node.target + and not _is_bwd_seed_offset(node) + and not _is_fwd_seed_offset(node) + ) + +def _is_tangent(node): + return node.op == "placeholder" and "tangents" in node.target + +def _is_bwd_seed_offset(node): + return node.op == "placeholder" and ("bwd_seed" in node.target or "bwd_base_offset" in node.target) + +def _is_fwd_seed_offset(node): + return node.op == "placeholder" and ("fwd_seed" in node.target or "fwd_base_offset" in node.target) + +def _is_backward_state(node): + return node.op == "placeholder" and isinstance(node.meta.get("val"), BackwardState) + + +def _extract_fwd_bwd_outputs(joint_module: fx.GraphModule, *, num_fwd_outputs): + outputs = pytree.arg_tree_leaves(*(node.args for node in joint_module.graph.nodes if node.op == 'output')) + fwd_outputs = outputs[:num_fwd_outputs] + bwd_outputs = outputs[num_fwd_outputs:] + return fwd_outputs, bwd_outputs + + +def _remove_by_name(saved_values, name): + for saved_value in saved_values: + if saved_value.name == name: + saved_values.remove(saved_value) + break + +def _placeholders(nodes): + # Avoid making an entire pass over the graph if we only care about the input placeholders + result = [] + for node in nodes: + if node.op == 'placeholder': + result.append(node) + else: + break # placeholders are all at the start of graph + return result + + +def _extract_fwd_bwd_modules(joint_module: fx.GraphModule, saved_values, saved_sym_nodes, *, num_fwd_outputs): + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + placeholders = _placeholders(joint_module.graph.nodes) + primal_inputs = [*filter(_is_primal, placeholders)] + tangent_inputs = [*filter(_is_tangent, placeholders)] + fwd_seed_offset_inputs = [*filter(_is_fwd_seed_offset, placeholders)] + bwd_seed_offset_inputs = [*filter(_is_bwd_seed_offset, placeholders)] + backward_state_inputs = [*filter(_is_backward_state, placeholders)] + + bwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + saved_sym_nodes + saved_values + tangent_inputs + bwd_seed_offset_inputs, + bwd_outputs + ) + + for node in _placeholders(bwd_graph.nodes): + assert node.op == 'placeholder' + # This is to filter out saved values that don't actually end up being used by the backwards pass + if not node.users: + _remove_by_name(saved_values, node.name) + _remove_by_name(saved_sym_nodes, node.name) + elif _is_backward_state(node): + # BackwardState is saved directly + _remove_by_name(saved_values, node.name) + assert backward_state_inputs + + + # Now that we have the finalized list of saved values, we need to ensure + # we propagate all symbols which are referenced by backwards inputs. + # These are not directly used in the graph but are required for downstream + # sizevar assignment + saved_symbols: Set[sympy.Symbol] = set() + saved_sym_nodes_binding = [] + saved_sym_nodes_derived = [] + + # Some symbols may already be bound in the directly saved_sym_nodes, + # keep track of them so we don't re-bind them + for node in saved_sym_nodes: + symbol = is_symbol_binding_fx_node(node) + if symbol: + saved_symbols.add(symbol) + saved_sym_nodes_binding.append(node) + else: + saved_sym_nodes_derived.append(node) + + # Now go through all of the prospective backward inputs and track any + # other symbols we need to bind + symbol_bindings = find_symbol_binding_fx_nodes(joint_module.graph) + for node in itertools.chain(saved_sym_nodes_derived, saved_values, tangent_inputs): + if "val" not in node.meta: + continue + new_symbols = free_symbols(node.meta["val"]) - saved_symbols + # NB: Deterministic order please! + for s in sorted(new_symbols, key=lambda s: s.name): + # NB: For well formed graphs, the symbol should always be present, + # but we also have ways to produce ill-formed graphs, e.g., direct + # make_fx usages, so don't choke in this case + if s not in symbol_bindings: + continue + saved_sym_nodes_binding.append(symbol_bindings[s]) + saved_symbols |= new_symbols + + + # Update saved_sym_nodes that are now reordered to have all bindings at + # front. This can also be used later on to figure out the position of saved + # sym nodes in the output of fwd graph. + saved_sym_nodes.clear() + saved_sym_nodes.extend(saved_sym_nodes_binding + saved_sym_nodes_derived) + + # Now, we re-generate the fwd/bwd graphs. + # NB: This might increase compilation time, but I doubt it matters + fwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + primal_inputs + fwd_seed_offset_inputs, + fwd_outputs + saved_values + saved_sym_nodes + ) + bwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + saved_sym_nodes + saved_values + tangent_inputs + bwd_seed_offset_inputs + backward_state_inputs, + bwd_outputs + ) + + fwd_module = fx._lazy_graph_module._make_graph_module(joint_module, fwd_graph) + bwd_module = fx._lazy_graph_module._make_graph_module(joint_module, bwd_graph) + return fwd_module, bwd_module + + +def default_partition( + joint_module: fx.GraphModule, _joint_inputs, *, num_fwd_outputs +) -> Tuple[fx.GraphModule, fx.GraphModule]: + """ + Partitions the :attr:`joint_module` in a manner that closely resembles the + behavior observed in the original ``.forward()`` and ``.backward()`` of the + callable, i.e., the resulting forward graph contains those operators that + are executed in the original ``.forward()`` callable passed to + :func:`aot_function`. + + The default partitioner collects the operators that are between the forward + inputs and the forward outputs. This helps in finding the tensors which have + to be stashed for the backward pass. These stashed tensors become the output + of the generated forward graph. The remaining operators are then placed in + the backward graph. + + .. warning:: + This API is experimental and likely to change. + + Args: + joint_module(fx.GraphModule): The joint forward and backward graph. This + is the result of AOT Autograd tracing. + + Returns: + Returns the generated forward and backward Fx graph modules. + """ + if has_recomputable_ops(joint_module): + return min_cut_rematerialization_partition(joint_module, _joint_inputs, num_fwd_outputs=num_fwd_outputs) + primal_inputs = list(filter(_is_primal, joint_module.graph.nodes)) + fwd_seed_offset_inputs = list(filter(_is_fwd_seed_offset, joint_module.graph.nodes)) + inputs = primal_inputs + fwd_seed_offset_inputs + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, inputs, fwd_outputs) + forward_node_names = {node.name for node in forward_only_graph.nodes if node.op != 'output'} + saved_values = [] + saved_sym_nodes = [] + + for node in joint_module.graph.nodes: + if node.name not in forward_node_names: + continue + if is_sym_node(node): + # Symints must be kept separate from tensors so that PythonFunction only calls + # save_for_backward on tensors and stashes symints in autograd .ctx + saved_sym_nodes.append(node) + elif ( + 'tensor_meta' not in node.meta + and node.op == 'call_function' + ): + # Since we can't save tuple of tensor values, we need to flatten out what we're saving + users = node.users + assert all(user.target == operator.getitem for user in users) + saved_values.extend(users) + else: + backward_usages = [n for n in node.users if n.name not in forward_node_names] + if 'tensor_meta' in node.meta and all(is_sym_node(n) for n in backward_usages): + # If we have a tensor in the forward, where only its sizes/strides are needed in the backward, + # and not the actual tensor data, + # then it will be a lot cheaper to save only the sizes/strides, and not the actual tensor. + # + # Note that saving the tensor could also cause compilation problems: + # If the user mutated an input in the forward and uses its sizes/strides in the backward, + # then we would be obligated to clone the input before saving it to appease autograd. + # (This is how we originally found this bug). + saved_sym_nodes.extend(backward_usages) + else: + saved_values.append(node) + saved_values = list(dict.fromkeys(saved_values).keys()) + saved_sym_nodes = list(dict.fromkeys(saved_sym_nodes).keys()) + + return _extract_fwd_bwd_modules(joint_module, saved_values, saved_sym_nodes=saved_sym_nodes, num_fwd_outputs=num_fwd_outputs) + + +def _prod(x): + s = 1 + for i in x: + s *= i + return s + +def _tensor_nbytes(numel, dtype): + return numel * dtype.itemsize + +def _size_of(node: fx.Node) -> int: + if 'val' in node.meta: + val = node.meta['val'] + if isinstance(val, py_sym_types): + if isinstance(val, torch.SymInt): + return 1 + else: + return 999999 + # NB: The fallback values here are meaningless, maybe we should respect + # torch._inductor.config.unbacked_symint_fallback (but this is a + # layering violation) + elif isinstance(val, (list, tuple)): + return sum(_tensor_nbytes(hint_int(n.numel(), fallback=4098), n.dtype) for n in val if isinstance(n, torch.Tensor)) + elif isinstance(val, torch.Tensor): + return _tensor_nbytes(hint_int(val.numel(), fallback=4098), val.dtype) + + raise RuntimeError(f"Unknown metadata type {type(val)}") + + # Only needed since we don't always trace with fake tensors. + if 'tensor_meta' in node.meta: + metadata = node.meta['tensor_meta'] + # TODO: What is to_size_hint suppose to be? + numel = _prod(map(to_size_hint, metadata.shape)) # noqa: F821 + dtype = metadata.dtype + else: + return 0 + + return _tensor_nbytes(numel, dtype) + + +# Used for some investigative purposes +def _count_ops(graph): + from collections import defaultdict + cnt = defaultdict(int) + for node in graph.nodes: + if node.op == 'call_function': + cnt[node.target.__name__] += 1 + print(sorted(cnt.items(), key=lambda x: x[1], reverse=True)) + + +@functools.lru_cache(None) +def pointwise_ops(): + ops = [] + for attr_name in dir(torch.ops.aten): + opoverloadpacket = getattr(torch.ops.aten, attr_name) + if not isinstance(opoverloadpacket, torch._ops.OpOverloadPacket): + continue + + for overload in opoverloadpacket.overloads(): + op_overload = getattr(opoverloadpacket, overload) + if torch.Tag.pointwise in op_overload.tags: + # currently aot autograd uses packet not overload + ops.append(opoverloadpacket) + break + + return ops + +def get_depth(node, depth_map): + if node in depth_map: + return depth_map[node] + + # Base case + if node.op == "placeholder": + depth_map[node] = 0 + return depth_map[node] + + # Handle output node + if node.op == "output": + args = node.args[0] + for arg in args: + if isinstance(arg, torch.fx.node.Node): + get_depth(arg, depth_map) + return + + # Get the depth of args and set the depth of this node + arg_depths = [get_depth(arg, depth_map) for arg in node.all_input_nodes if isinstance(arg, torch.fx.node.Node)] + # factory ops like full, rand might not have any input args + if len(arg_depths) == 0: + arg_depths = [0] + depth_map[node] = max(arg_depths) + 1 + return depth_map[node] + + +def sort_depths(args, depth_map): + arg_depths = {arg: depth_map[arg] for arg in args if isinstance(arg, torch.fx.node.Node)} + return sorted(arg_depths.items(), key=lambda x: x[1], reverse=True) + + +def reordering_to_mimic_autograd_engine(gm): + """ + This pass finds the first bwd node in the graph (by looking at users of + tangents) and then reorders the graph by walking from this node to all the + way to the end of the graph. At each op in this traveral, we insert this op + in a new graph and try to bring only the relevant subgraph from the other + non-bwd edges relevant for this op. This closely mimics the behavior of + autograd engine. + + Why is this pass required in the first place? + + This is an artifact of how partitioners work today. The starting point of + partitioner is a joint graph, which is fwd and then bwd graph. In the case + of checkpointing, we keep portions of fwd graph in their original place in + the joint graph, while obtaining a bwd graph. As a result, the resulting bwd + graph has copies of recomputed fwd subgraphs followed by the original bwd + graph. If we run this naively, this leads to bad memory footprint, because + the fwd subgraphs are live for way longer duration than necessary. This pass + reorders the operations such that we prioritize the ops for the original bwd + graph while only realizing those ops from the fwd graph that are necessary + at any given point in the graph. + """ + + new_graph = fx.Graph() + env = {} + + # Add new placeholder nodes in the order specified by the inputs + for node in gm.graph.nodes: + if node.op == "placeholder": + new_node = new_graph.placeholder(node.name) + # Can't use node_copy here as we may be turning previous call_function into placeholders + new_node.meta = node.meta + env[node] = new_node + + + order = {} + for idx, node in enumerate(gm.graph.nodes): + order[node] = idx + + # Populate depth for the nodes. Depth is the distance from the inputs. + depths = {} + output_node = next(node for node in gm.graph.nodes if node.op == "output") + get_depth(output_node, depths) + + def insert_node_in_graph(node): + if node in env: + return env[node] + + # Bias traversal towards the nodes that have higher depth - prioritizes + # critical path first. + for arg, _ in sort_depths(node.all_input_nodes, depths): + env[arg] = insert_node_in_graph(arg) + env[node] = new_graph.node_copy(node, lambda x: env[x]) + return env[node] + + # Find first bwd node in the graph + tangent_inputs = list(filter(_is_tangent, gm.graph.nodes)) + first_node_in_bwd = None + minimum_order = math.inf + for tangent in tangent_inputs: + for user in tangent.users: + if order[user] < minimum_order: + minimum_order = order[user] + first_node_in_bwd = user + assert first_node_in_bwd is not None + + # Build the graph op-by-op by starting from the node all the way to the end + for node in list(gm.graph.nodes)[order[first_node_in_bwd]:]: + insert_node_in_graph(node) + + # The output node is already built by the traversal. + new_gm = torch.fx.GraphModule(gm, new_graph) + return new_gm + + +def functionalize_rng_ops(joint_module, fw_module, bw_module, num_sym_nodes): + # During user-driven activation checkpointing, we have to ensure that a rng + # op in fwd yields the same output as the recomputed rng op in the bwd. To + # do this, we use functionalize wrappers to wrap the random ops and share + # rng state between the fwd and bwd graphs. + + # There are 3 main steps to do this + # Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd. + # Step 2 - Modify the fwd pass such that + # 1) Replace rand with run_and_save_rng_state wrapper + # 2) Replace the users of the original op with the output[1] of this op. + # 3) Collect all the rng_state - output[0] of each op, and make them + # output nodes. Special care needs to be taken here because fwd outputs + # has symints at the very end. + # Step 3 - Modify the bwd pass such that + # 1) Add the input nodes just before the tangents for the stashed rng states + # 2) Replace rand with run_with_save_rng_state wrappers + # 3) Use the stashed states as inputs to these ops + + # Unique id to generate name + uid = itertools.count() + + def get_rng_ops(gmod): + random_nodes = {} + for node in gmod.graph.nodes: + if ( + node.op == "call_function" + and hasattr(node.target, "tags") + and torch.Tag.nondeterministic_seeded in node.target.tags + ): + random_nodes[node.name] = node + return random_nodes + + def get_device(node): + """ + Check the example value of the node outputs to find the device type. + """ + if "val" not in node.meta: + return None + + candidates = node.meta["val"] + if not isinstance(candidates, tuple): + candidates = (candidates,) + + for candidate in candidates: + if isinstance(candidate, torch.Tensor): + if candidate.device.type == "cuda": + return "cuda" + + return "cpu" + + def get_sample_rng_state(device): + if device == "cuda": + return torch.cuda.get_rng_state() + return torch.get_rng_state() + + # Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd. + joint_graph_rng_ops = get_rng_ops(joint_module) + fw_graph_rng_ops = get_rng_ops(fw_module) + bw_graph_rng_ops = get_rng_ops(bw_module) + recomputable_rng_ops_map = dict() + for node in joint_module.graph.nodes: + if ( + must_recompute(node) + and hasattr(node.target, "tags") + and torch.Tag.nondeterministic_seeded in node.target.tags + ): + base_node = joint_graph_rng_ops[node.name] + fw_node = fw_graph_rng_ops[node.name] + bw_node = bw_graph_rng_ops[node.name] + recomputable_rng_ops_map[base_node] = {"fwd": fw_node, "bwd": bw_node} + + run_and_save_rng = torch._prims.rng_prims.run_and_save_rng_state + run_with_rng_state = torch._prims.rng_prims.run_with_rng_state + + for node in bw_module.graph.nodes: + if node.op == "placeholder" and "tangent" in node.name: + bw_tangent_start_node = node + break + + + fw_rng_state_outputs = [] + for base_node, node_pair in recomputable_rng_ops_map.items(): + # Step 2 - Modify the fwd pass such that + fw_node = node_pair["fwd"] + bw_node = node_pair["bwd"] + fw_graph = fw_module.graph + with fw_graph.inserting_before(fw_node): + functional_fw_node = fw_graph.create_node( + "call_function", + run_and_save_rng, + args=(fw_node.target, *fw_node.args), + kwargs=fw_node.kwargs + ) + state = fw_graph.create_node("call_function", operator.getitem, args=(functional_fw_node, 0), kwargs={}) + rng_output = fw_graph.create_node("call_function", operator.getitem, args=(functional_fw_node, 1,), kwargs={}) + fw_node.replace_all_uses_with(rng_output) + fw_graph.erase_node(fw_node) + fw_rng_state_outputs.append(state) + + + # Step 3 - Modify the bwd pass such that + bw_graph = bw_module.graph + with bw_graph.inserting_before(bw_tangent_start_node): + state_name = f"rng_state_output_{next(uid)}" + bw_rng_state_node = bw_graph.placeholder(state_name) + bw_rng_state_node.meta["val"] = get_sample_rng_state(get_device(fw_node)) + + with bw_graph.inserting_before(bw_node): + rng_output = bw_graph.create_node( + "call_function", + run_with_rng_state, + args=(bw_rng_state_node, bw_node.target, *bw_node.args), + kwargs=bw_node.kwargs + ) + + bw_node.replace_all_uses_with(rng_output) + bw_graph.erase_node(bw_node) + + + # Add the rng states in the output of the fwd graph. AOT Autograd assumes + # that symints are at the end of forward graph outputs. So, insert the new + # rng states accordingly. + fw_output_node = next(node for node in fw_module.graph.nodes if node.op == "output") + fw_outputs = fw_output_node.args[0] + sym_node_start_idx = len(fw_outputs) - num_sym_nodes + outputs = fw_outputs[:sym_node_start_idx] + fw_rng_state_outputs + fw_outputs[sym_node_start_idx:] + fw_module.graph.output(outputs) + fw_module.graph.erase_node(fw_output_node) + fw_module.recompile() + bw_module.recompile() + return fw_module, bw_module + + +def cleanup_recompute_tags(joint_module): + """ + If there are two consecutive checkpointed blocks with no operator in + between, we would still want to stash the tensor at the boundary of + checkpointed blocks. The following pass makes the last output node + non-recomputable to allow for that. + """ + for node in joint_module.graph.nodes: + if must_recompute(node): + for user in node.users: + if must_recompute(user) and user.meta["recompute"] > node.meta["recompute"]: + node.meta["recompute"] = 0 + return joint_module + + +def min_cut_rematerialization_partition( + joint_module: fx.GraphModule, _joint_inputs, compiler="inductor", recomputable_ops=None, + *, num_fwd_outputs +) -> Tuple[fx.GraphModule, fx.GraphModule]: + """ + Partitions the joint graph such that the backward recomputes the forward. + Recomputing helps in trading off memory bandwidth with computation. + + To create the fwd and bwd graph, we copy the joint graph, manually set the + outputs to just original forward or backward outputs. And then we run the + resulting graphs through dead code elimination. + + .. warning:: + This API is experimental and likely to change. + + Args: + joint_module(fx.GraphModule): The joint forward and backward graph. This + is the result of AOT Autograd tracing. + _joint_inputs: The inputs to the joint graph. This is unused. + compiler: This option determines the default set of recomputable ops. + Currently, there are two options: ``nvfuser`` and ``inductor``. + recomputable_ops: This is an optional set of recomputable ops. If this + is not None, then this set of ops will be used instead of the + default set of ops. + num_fwd_outputs: The number of outputs from the forward graph. + + Returns: + Returns the generated forward and backward Fx graph modules. + """ + try: + import networkx as nx + except ImportError as e: + raise RuntimeError("Need networkx installed to perform smart recomputation " + "heuristics") from e + + joint_module.graph.eliminate_dead_code() + joint_module.recompile() + + fx_g = joint_module.graph + + # add the CSE pass + if config.cse: + cse_graph = fx_graph_cse(fx_g) + joint_module.graph = cse_graph + full_bw_graph = joint_module.graph + + graph_has_recomputable_ops = has_recomputable_ops(joint_module) + graph_has_recomputable_rng_ops = has_recomputable_rng_ops(joint_module) + if graph_has_recomputable_ops: + joint_module = cleanup_recompute_tags(joint_module) + + name_to_node = {} + for node in joint_module.graph.nodes: + name_to_node[node.name] = node + + def classify_nodes(joint_module): + required_bw_nodes = set() + for node in joint_module.graph.nodes: + if node.op == 'placeholder' and "tangents" in node.target: + required_bw_nodes.add(node) + if node in required_bw_nodes: + for user in node.users: + required_bw_nodes.add(user) + + primal_inputs = list(filter(_is_primal, joint_module.graph.nodes)) + fwd_seed_offset_inputs = list(filter(_is_fwd_seed_offset, joint_module.graph.nodes)) + inputs = primal_inputs + fwd_seed_offset_inputs + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + required_bw_nodes.update(o for o in bwd_outputs if o is not None) + forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, inputs, fwd_outputs) + required_fw_nodes = {name_to_node[node.name] for node in forward_only_graph.nodes + if node.op != 'output'} + unclaimed_nodes = {node for node in joint_module.graph.nodes + if node not in required_fw_nodes and node not in required_bw_nodes} + return fwd_outputs, required_fw_nodes, required_bw_nodes, unclaimed_nodes, inputs + + orig_fw_outputs, required_fw_nodes, required_bw_nodes, unclaimed_nodes, inputs = classify_nodes(joint_module) + + # networkx blows up on graphs with no required backward nodes + # Since there's nothing to partition anyway, and the default partitioner can "handle" + # this case, send our graph over to the default partitioner. + if len(required_bw_nodes) == 0: + return default_partition(joint_module, _joint_inputs, num_fwd_outputs=num_fwd_outputs) + + for node in reversed(joint_module.graph.nodes): + if node not in required_fw_nodes: + node.dist_from_bw = 0 + else: + node.dist_from_bw = int(1e9) + for user in node.users: + node.dist_from_bw = min(node.dist_from_bw, user.dist_from_bw + 1) + + aten = torch.ops.aten + prims = torch.ops.prims + + # compiler == "nvfuser" is the default set of recomputable ops + default_recomputable_ops = [aten.add, aten.sub, aten.div, aten.atan2, aten.mul, aten.max, aten.min, aten.pow, aten.remainder, aten.fmod, aten.__and__, aten.__or__, aten.__xor__, aten.__lshift__, aten.__rshift__, aten.eq, aten.ne, aten.ge, aten.gt, aten.le, aten.lt, aten.abs, aten.bitwise_not, aten.ceil, aten.floor, aten.frac, aten.neg, aten.relu, aten.round, aten.silu, aten.trunc, aten.log, aten.log10, aten.log1p, aten.log2, aten.lgamma, aten.exp, aten.expm1, aten.erf, aten.erfc, aten.cos, aten.acos, aten.cosh, aten.sin, aten.asin, aten.sinh, aten.tan, aten.atan, aten.tanh, aten.atanh, aten.sqrt, aten.rsqrt, aten.reciprocal, aten.sigmoid, aten.softplus, aten.threshold, aten.threshold_backward, aten.clamp, aten.where, aten.lerp, aten.addcmul, aten.gelu, aten.gelu_backward, aten.sum, aten.mean, aten._grad_sum_to_size, aten.sum_to_size, aten.amax, aten.to, aten.type_as, operator.getitem, aten.squeeze, aten.unsqueeze, aten.rsub, aten._to_copy] # noqa: E501,B950 + view_ops = [aten.squeeze, aten.unsqueeze, aten.alias] + if compiler == "inductor": + default_recomputable_ops += [prims.div, prims.convert_element_type, aten.clone, aten._to_copy, aten.full_like, prims.var, prims.sum, aten.var, aten.std, prims.broadcast_in_dim, aten.select, aten.permute, aten._unsafe_view, aten.view, aten.expand, aten.slice, aten.reshape, aten.broadcast_tensors, aten.scalar_tensor, aten.ones, aten.new_zeros, aten.lift_fresh_copy, aten.arange, aten.triu, aten.var_mean, aten.isinf, aten.any, aten.full, aten.as_strided, aten.zeros, aten.argmax, aten.maximum] # noqa: E501,B950 + view_ops += [aten.view, aten.slice, aten.permute, aten.t, prims.broadcast_in_dim, aten.expand, aten.as_strided] + # Natalia said that we should allow recomputing indexing :) + default_recomputable_ops += [aten.index] + default_recomputable_ops += view_ops + + default_recomputable_ops += pointwise_ops() + + default_recomputable_ops += [ + aten.zeros_like, + ] + + default_recomputable_ops += [ + method_to_operator(m) + for m in magic_methods + ] + + recomputable_ops = set(recomputable_ops) if recomputable_ops is not None else set(default_recomputable_ops) + + random_ops = [aten.native_dropout, aten.rand_like, aten.randn_like] + compute_intensive_ops = [aten.mm, aten.convolution, aten.convolution_backward, aten.bmm, aten.addmm, aten.upsample_bilinear2d, aten._softmax, aten._softmax_backward_data, aten.native_layer_norm, aten.native_layer_norm_backward, aten.native_batch_norm, aten.native_batch_norm_backward, aten._native_batch_norm_legit] # noqa: E501,B950 + + fusible_ops = recomputable_ops | set(random_ops) + if AOT_PARTITIONER_DEBUG: + joint_module_ops = { + str(node.target._overloadpacket) + for node in joint_module.graph.nodes + if node.op == "call_function" and hasattr(node.target, "_overloadpacket") + } + ops_ignored = joint_module_ops - {str(i) for i in recomputable_ops} + print("Ops banned from rematerialization: ", ops_ignored) + print() + + def is_materialized_backwards(node): + cur_nodes = {node} + while len(cur_nodes) > 0: + cur = cur_nodes.pop() + for user in cur.users: + if user not in required_fw_nodes and not is_fusible(cur, user): + return True + if user not in required_fw_nodes and get_aten_target(user) in view_ops: + cur_nodes.add(user) + + return False + + def ban_recomputation(node): + if "recompute" in node.meta: + return node.meta["recompute"] == 0 + elif config.aggressive_recomputation: + ignored_ops = random_ops + compute_intensive_ops + return (node.op == 'call_function' and get_aten_target(node) in ignored_ops) + else: + if node.op != 'call_function': + return False + if get_aten_target(node) not in recomputable_ops: + return True + if node.target == operator.getitem: + return False + if node.target in [aten.lift_fresh_copy.default, aten.lift_fresh.default]: + return False + + # If a node *must* be materialized in the backwards pass, then we + # should never recompute it. This is a pretty subtle point. In + # general, the assumption we make is that recomputing a node in the + # backwards pass is "free". However, if a node must be materialized + # in the backwards pass, then recomputing it is never free. + if is_materialized_backwards(node): + return True + + # Arbitrary hack that sometimes seems to help things. The above + # modification appears to have made this heuristic a lot less critical + # for performance. + # TODO: Investigate why this hack helps. + # TODO: Investigate the interaction with compiler assisted + # activation checkpointing. Removing the heuristic improves both + # memory footprint and speedup. + if not graph_has_recomputable_ops: + if compiler == "inductor" and node.dist_from_bw > config.max_dist_from_bw: + return True + # If the output of an op is 4x smaller (arbitrary choice), + # then we don't allow recomputation. + input_tensors_size = sum(_size_of(i) for i in node.args if isinstance(i, fx.Node)) + output_size = _size_of(node) + return (output_size * 4 < input_tensors_size) + + def is_fusible(a, b): + # We can perform "memory fusion" into a cat, but cat cannot be a + # producer to a fusion + if get_aten_target(b) == aten.cat: + return True + return get_aten_target(a) in fusible_ops and get_aten_target(b) in fusible_ops + + def is_materialized(node): + if node.op == 'placeholder': + return True + + return not all(is_fusible(node, user) for user in node.users) + + def get_node_weight(node) -> int: + mem_sz = _size_of(node) + + # Heuristic to bias towards nodes closer to the backwards pass + # Complete guess about current value + mem_sz = int(mem_sz * (1.1 ** max(min(node.dist_from_bw, 100), 1))) + # mem_sz = int(mem_sz + node.dist_from_bw) + + if is_materialized(node): + return mem_sz + else: + return mem_sz * 2 + + nx_graph = nx.DiGraph() + for node in full_bw_graph.nodes: + if node.op == 'output': + continue + + if node in required_bw_nodes: + if node not in inputs: + nx_graph.add_edge(node.name + "_in", "sink", capacity=math.inf) + continue + # If someone saves a input for backward as-is and backward + # returns that tensor as-is as a grad input, then the node x would + # be both a required_bw_node and an input. In this case we + # (1) connect x_in to to the source, (2) x_out to the sink, and + # (3) assign the proper weight to the x_in-x_out edge, so that + # x would be part of cut nodes. A case where this happens is if + # NestedTensor saves a offset tensor as part of the singleton int + # in sizes. + nx_graph.add_edge(node.name + "_out", "sink", capacity=math.inf) + + if _is_primal(node) or _is_fwd_seed_offset(node): + nx_graph.add_edge("source", node.name + "_in", capacity=math.inf) + + # If a node can't be recomputed (too expensive or involves randomness), + # we prevent it from being recomputed by adding an inf edge to the source + # We only need to ban nodes in the fw pass, as those are the only ones that would be recomputed. + if ban_recomputation(node) and node in required_fw_nodes: + nx_graph.add_edge("source", node.name + "_in", capacity=math.inf) + + # Checks if a node is actually a tuple. Can be simplified to just an isinstance check if we always use faketensors. + is_non_tensor_node = (('val' not in node.meta and 'tensor_meta' not in node.meta) or + ('val' in node.meta and not isinstance(node.meta['val'], torch.Tensor))) + + if is_sym_node(node): + weight = sym_node_size(node) + elif is_non_tensor_node: + weight = 0 if isinstance(node.meta.get("val"), BackwardState) else math.inf + else: + weight = get_node_weight(node) + + # Creates the weights on the "node" edge + nx_graph.add_edge(node.name + "_in", node.name + "_out", capacity=weight) + for user in node.users: + nx_graph.add_edge(node.name + "_out", user.name + "_in", capacity=math.inf) + + try: + cut_value, partition = nx.minimum_cut(nx_graph, "source", "sink") + except Exception: + print('Failed to compute min-cut on following graph:') + print('\n'.join(nx.readwrite.edgelist.generate_edgelist(nx_graph))) + raise + + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, nx_graph[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + cut_nodes = set() + for node_in, node_out in cutset: + assert node_in[:-3] == node_out[:-4] + node_name = node_in[:-3] + cut_nodes.add(node_name) + + # To make this stuff deterministic + node_idx = {node: idx for idx, node in enumerate(joint_module.graph.nodes)} + saved_values = sorted((name_to_node[node] for node in cut_nodes), key=lambda x: node_idx[x]) + # save_for_backward on tensors and stashes symints in autograd .ctx + saved_sym_nodes = list(filter(is_sym_node, saved_values)) + saved_values = list(filter(lambda n: not is_sym_node(n), saved_values)) + # NB: saved_sym_nodes will be mutated to reflect the actual saved symbols + fw_module, bw_module = _extract_fwd_bwd_modules( + joint_module, saved_values, saved_sym_nodes=saved_sym_nodes, num_fwd_outputs=num_fwd_outputs) + + if graph_has_recomputable_ops: + if graph_has_recomputable_rng_ops: + fw_module, bw_module = functionalize_rng_ops( + joint_module, fw_module, bw_module, len(saved_sym_nodes) + ) + bw_module = reordering_to_mimic_autograd_engine(bw_module) + + if AOT_PARTITIONER_DEBUG: + print("Theoretical Activations Stored: ", sum([_size_of(i) for i in saved_values]) / 1e9) + fw_module_nodes = {node.name for node in fw_module.graph.nodes if node.op == 'call_function'} + bw_module_nodes = {node.name for node in bw_module.graph.nodes if node.op == 'call_function'} + remat_nodes = fw_module_nodes & bw_module_nodes + + counts = defaultdict(int) + for node in fw_module.graph.nodes: + if node.name in remat_nodes and hasattr(node.target, '_overloadpacket'): + counts[str(node.target._overloadpacket)] += 1 + print(f"# remat/fw/bw: {len(remat_nodes)}/{len(fw_module_nodes)}/{len(bw_module_nodes)}") + print("Count of Ops Rematerialized: ", sorted(counts.items(), key=lambda x: x[1], reverse=True)) + return fw_module, bw_module + + +def draw_graph( + traced: torch.fx.GraphModule, + fname: str, + figname: str = "fx_graph", + clear_meta: bool = True, + prog: Union[str, List[str]] = None, + parse_stack_trace: bool = False, + dot_graph_shape: Optional[str] = None, +) -> None: + if clear_meta: + new_graph = copy.deepcopy(traced.graph) + traced = fx.GraphModule(traced, new_graph) + for node in traced.graph.nodes: + node.meta = {} + base, ext = os.path.splitext(fname) + if not ext: + ext = ".svg" + print(f"Writing FX graph to file: {base}{ext}") + g = graph_drawer.FxGraphDrawer( + traced, + figname, + parse_stack_trace=parse_stack_trace, + dot_graph_shape=dot_graph_shape, + ) + x = g.get_main_dot_graph() + write_method = getattr(x, "write_" + ext.lstrip(".")) + fname = f"{base}{ext}" + if prog is None: + write_method(fname) + else: + write_method(fname, prog=prog) + + +def draw_joint_graph( + graph: torch.fx.GraphModule, + joint_inputs, + file_name: str = "full_graph.png", + dot_graph_shape: Optional[str] = None, +): + draw_graph(graph, file_name, dot_graph_shape=dot_graph_shape) + return default_partition(graph, joint_inputs) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py b/venv/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py new file mode 100644 index 0000000000000000000000000000000000000000..39d8b8ba7861eda2dc2d265221717b12da4f4dee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py @@ -0,0 +1,252 @@ +from abc import ABC, abstractmethod +import contextlib +from typing import Any, List, Tuple +import torch +import torch.utils._pytree as pytree +from torch._C._functorch import ( + TransformType, + RandomnessType, + CInterpreter, + CGradInterpreterPtr, + CFunctionalizeInterpreterPtr, + CVmapInterpreterPtr, + CJvpInterpreterPtr, + pop_dynamic_layer_stack, + push_dynamic_layer_stack, +) +from torch.autograd.forward_ad import _set_fwd_grad_enabled + +""" +This file contains the functorch integration with PyDispatcher. + +PyDispatcher does not understand functorch's DynamicLayerStack dispatching +logic because it is entirely implemented in C++ in the fallbacks for two +dispatch keys, FuncTorchDynamicLayer{Front, Back}Mode (PyDispatcher is unable +to directly reuse C++ boxed fallbacks). + +Instead of trying to hammer PyDispatcher into understanding those fallbacks, +we re-implement the logic of peeking the top of the stack for an interpreter, +selecting the interpreter to dispatch on, etc, in Python. This leads to a +simpler design. + +The main difference between C++ functorch and PyDispatcher's functorch logic +is that: +- C++ functorch needs to manually tweak dispatch keys to ping-pong between + DynamicLayerFrontMode and DynamicLayerBackMode. +- PyDispatcher's functorch logic pops an Interpreter from the top of the stack + and asks it to execute the rule associated with the Interpreter. + +In C++ we do the ping-pong because e.g. vmap rules are associated with the +batched DispatchKey, but in PyDispatcher we are able to avoid this by asking +the user to register a batching rule directly to a transform that an +interpreter then invokes. +""" + + +# FuncTorchInterpreter is the Python version of Interpreter (recall that +# the DynamicLayerStack is a stack of interpreters). +# It is a wrapper around the actual C++ Interpreter object. +# +# Keep the methods in sync with aten/src/ATen/functorch/Interpreter.h +class FuncTorchInterpreter(ABC): + def __init__(self, cptr: Any): + self._cptr = cptr + + # Process an operation. eg for vmap, this is invoking a batching rule. + # Conceptually this is analogous to Interpreter::process in C++ + @abstractmethod + def process(self, op, args, kwargs): + pass + + # lower an operation from this Interpreter to the next Interpreter on the stack. + # Concretely, this involves temporarily popping the current Interpreter. + # Conceptually this is analogous to Interpreter::sendToNextInterpreter in C++ + def lower(self): + return temporarily_pop_interpreter_stack() + + def level(self): + return self._cptr.level() + + def key(self): + return self._cptr.key() + + def get_state(self): + raise NotImplementedError() + + def check_state(self, state): + return state == self.get_state() + + +@contextlib.contextmanager +def temporarily_pop_interpreter_stack(): + try: + saved = pop_dynamic_layer_stack() + yield + finally: + push_dynamic_layer_stack(saved) + + +class VmapInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Vmap + # NOTE: [Interpreter cdata vs cptr] + # cdata is a generic CInterpreter. We wrap it in a CVmapInterpreterPtr + # so that we can access methods specific to the vmap interpreter + self._cdata = cdata + self._cptr = CVmapInterpreterPtr(cdata) + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Vmap] + return kernel(self, *args, **kwargs) + + def batch_size(self): + return self._cptr.batchSize() + + def randomness(self): + typ = self._cptr.randomness() + if typ == RandomnessType.Error: + return "error" + elif typ == RandomnessType.Same: + return "same" + elif typ == RandomnessType.Different: + return "different" + raise RuntimeError(f"Unknown RandomnessType: {typ}") + + def get_state(self): + return (self.key().name, self.level(), self.randomness()) + + +@contextlib.contextmanager +def nested(*contexts): + with contextlib.ExitStack() as stack: + for ctx in contexts: + stack.enter_context(ctx) + yield contexts + + +class GradInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Grad + # See NOTE: [Interpreter cdata vs cptr] + self._cdata = cdata + self._cptr = CGradInterpreterPtr(cdata) + + def lift(self, args, kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, self._cptr.lift, [args, kwargs]) + return args, kwargs + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Grad] + args, kwargs = self.lift(args, kwargs) + return kernel(self, *args, **kwargs) + + # GradInterpreter has custom lower because of the no_grad interaction + # See NOTE [grad and vjp interaction with no_grad] + # This logic is mirrored from C++ GradInterpreterPtr::sendToNextInterpreter + def lower(self): + prev_grad_mode = self.prev_grad_mode() + if not prev_grad_mode: + return nested(torch.no_grad(), super().lower()) + return super().lower() + + def prev_grad_mode(self): + return self._cptr.prevGradMode() + + def get_state(self): + return (self.key().name, self.level(), self.prev_grad_mode()) + + +class JvpInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Jvp + # See NOTE: [Interpreter cdata vs cptr] + self._cdata = cdata + self._cptr = CJvpInterpreterPtr(cdata) + + def lift(self, args, kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, self._cptr.lift, [args, kwargs]) + return args, kwargs + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Jvp] + args, kwargs = self.lift(args, kwargs) + return kernel(self, *args, **kwargs) + + # Jvp has custom lower because of the no_fwd_grad interaction + # See NOTE [grad and vjp interaction with no_grad] for related info. + # This logic is mirrored from C++ JvpInterpreterPtr::sendToNextInterpreter + def lower(self): + prev_fwd_grad_mode = self.prev_fwd_grad_mode() + if not prev_fwd_grad_mode: + return nested(_set_fwd_grad_enabled(False), super().lower()) + return super().lower() + + def prev_fwd_grad_mode(self): + return self._cptr.prevFwdGradMode() + + +class FunctionalizeInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Functionalize + self._cdata = cdata + self._cptr = CFunctionalizeInterpreterPtr(cdata) + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Functionalize] + return kernel(self, *args, **kwargs) + + def functionalize_add_back_views(self): + return self._cptr.functionalizeAddBackViews() + + +def coerce_cinterpreter(cinterpreter: CInterpreter) -> FuncTorchInterpreter: + key = cinterpreter.key() + if key == TransformType.Grad: + return GradInterpreter(cinterpreter) + if key == TransformType.Vmap: + return VmapInterpreter(cinterpreter) + if key == TransformType.Jvp: + return JvpInterpreter(cinterpreter) + if key == TransformType.Functionalize: + return FunctionalizeInterpreter(cinterpreter) + raise RuntimeError(f"NYI: PyDispatcher has not implemented support for {key}") + + +def retrieve_current_functorch_interpreter() -> FuncTorchInterpreter: + interpreter = torch._C._functorch.peek_interpreter_stack() + assert interpreter is not None + return coerce_cinterpreter(interpreter) + + +def retrieve_all_functorch_interpreters() -> List[FuncTorchInterpreter]: + cis = torch._C._functorch.get_interpreter_stack() + if cis is None: + return [] + return [coerce_cinterpreter(ci) for ci in cis] + + +def compare_functorch_state(states: List[Tuple[Any, ...]]) -> bool: + # There are four possible cases covered here: + # 1. Current stack empty AND stack when generated not empty -> Invalidate + # 2. Current stack not empty AND stack when generated empty -> Invalidate + # 3. Current stack and generated stack empty -> Valid FX graph + # 4. Current stack and generated stack not empty -> Valid if both states match + peek = torch._C._functorch.peek_interpreter_stack() + if (peek is None and len(states) != 0) or (peek is not None and len(states) == 0): + return False + + cis = retrieve_all_functorch_interpreters() + return len(cis) == len(states) and \ + all(ci.check_state(state) for ci, state in zip(cis, states)) + + +def dispatch_functorch(op, args, kwargs): + interpreter = retrieve_current_functorch_interpreter() + # In traditional PyTorch operators, DispatchKey::FuncTorchTensorWrapper's + # unwrap_dead_tensors fallback handles unwrapping dead tensor wrappers. + # PyDispatcher sidesteps the PyTorch dispatcher when dealing with functorch + # transforms, so we manually unwrap the dead tensors here. + # This logic won't need to exist when we have mode-only functorch. + args, kwargs = pytree.tree_map_only( + torch.Tensor, torch._C._functorch.unwrap_if_dead, (args, kwargs)) + return interpreter.process(op, args, kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/python_key.py b/venv/lib/python3.10/site-packages/torch/_functorch/python_key.py new file mode 100644 index 0000000000000000000000000000000000000000..e7c805841a62b015e6fee609d370109ddd45821a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/python_key.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +__all__ = ["make_fx", "dispatch_trace", "PythonKeyTracer", "pythonkey_decompose"] +from torch.fx.experimental.proxy_tensor import make_fx, dispatch_trace, PythonKeyTracer, decompose + +pythonkey_decompose = decompose diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py b/venv/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py new file mode 100644 index 0000000000000000000000000000000000000000..8c4b50bc6ad4c3d36ec358493569a5296cd2d053 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py @@ -0,0 +1,22 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + +# TODO: remove this file when the migration of the pytree utility is done +from torch.utils._pytree import tree_map_, treespec_pprint + + +__all__ = ["tree_map_", "treespec_pprint"] + + +with warnings.catch_warnings(): + warnings.simplefilter("always") + warnings.warn( + "torch._functorch.pytree_hacks is deprecated and will be removed in a future release. " + "Please use torch.utils._pytree instead.", + DeprecationWarning, + ) diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py b/venv/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..12e87e60f6befd0399b1ffb634f83f53588d436b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py @@ -0,0 +1,625 @@ +# mypy: ignore-errors + +""" +From https://docs.google.com/spreadsheets/d/12R3nCOLskxPYjjiNkdqy4OdQ65eQp_htebXGODsjSeA/edit#gid=0 +Try to keep this list in sync with that. +""" +top_torch = [ + ("t", 6837449), + ("tensor", 585786), + ("mode", 462182), + ("cat", 394818), + ("max", 368038), + ("zeros", 329495), + ("load", 327756), + ("no_grad", 294694), + ("save", 265130), + ("from_numpy", 243063), + ("manual_seed", 165044), + ("ones", 153696), + ("randn", 150796), + ("stack", 133358), + ("sum", 130772), + ("arange", 98087), + ("rand", 94715), + ("mean", 88546), + ("exp", 73883), + ("zeros_like", 72831), + ("min", 72248), + ("sigmoid", 66798), + ("log", 62135), + ("matmul", 47811), + ("clamp", 45304), + ("sqrt", 44911), + ("abs", 43535), + ("tanh", 42793), + ("empty", 40311), + ("argmax", 38435), + ("bmm", 33984), + ("pow", 33571), + ("norm", 31125), + ("mm", 30995), + ("is_tensor", 29546), + ("ones_like", 29512), + ("nonzero", 28681), + ("full", 28373), + ("unsqueeze", 27911), + ("where", 26585), + ("randperm", 26450), + ("eye", 24342), + ("mul", 23236), + ("topk", 22537), + ("as_tensor", 21967), + ("sort", 21412), + ("squeeze", 20863), + ("randint", 20771), + ("linspace", 20041), + ("add", 19201), + ("transpose", 18663), + ("split", 18325), + ("gather", 17904), + ("set_grad_enabled", 16013), + ("sin", 15669), + ("cos", 15562), + ("div", 15513), + ("index_select", 14866), + ("multinomial", 14331), + ("flatten", 14267), + ("isnan", 14170), + ("randn_like", 13096), + ("eq", 12680), + ("einsum", 12480), + ("round", 12367), + ("floor", 11628), + ("allclose", 11000), + ("reshape", 10605), + ("diag", 10167), + ("chunk", 9581), + ("std", 9379), + ("set_default_tensor_type", 9281), + ("triu", 8559), + ("meshgrid", 8292), + ("set_num_threads", 8126), + ("unique", 7964), + ("full_like", 7780), + ("tril", 7538), + ("dot", 7275), + ("sign", 6943), + ("equal", 6916), + ("normal", 6750), + ("cumsum", 6556), + ("dist", 6058), + ("isfinite", 6030), + ("gt", 5935), + ("set_printoptions", 5888), + ("range", 5491), + ("empty_like", 5351), + ("flip", 5342), + ("masked_select", 5341), + ("bernoulli", 5262), + ("atan", 5253), + ("var", 5247), + ("prod", 5200), + ("erf", 5088), + ("inverse", 5072), + ("addmm", 4854), + ("logsumexp", 4582), + ("fft", 4436), + ("lt", 4421), + ("log2", 4316), + ("enable_grad", 4238), + ("rand_like", 4187), + ("argsort", 3972), + ("seed", 3932), + ("mv", 3547), + ("ger", 3309), + ("ge", 3248), + ("atan2", 3210), + ("ceil", 3202), + ("ne", 3075), + ("bincount", 3063), + ("acos", 3055), + ("rsqrt", 3031), + ("svd", 3029), + ("numel", 3003), + ("log1p", 2840), + ("unbind", 2808), + ("le", 2714), + ("isinf", 2707), + ("cross", 2646), + ("set_default_dtype", 2536), + ("argmin", 2535), + ("sparse_coo_tensor", 2489), + ("log10", 2304), + ("kthvalue", 2192), + ("set_rng_state", 2158), + ("get_rng_state", 1996), + ("get_default_dtype", 1879), + ("det", 1868), + ("qr", 1864), + ("histc", 1852), + ("symeig", 1832), + ("trace", 1801), + ("median", 1795), + ("addcmul", 1751), + ("remainder", 1717), + ("baddbmm", 1693), + ("lgamma", 1665), + ("repeat_interleave", 1598), + ("fmod", 1576), + ("reciprocal", 1575), + ("tan", 1560), + ("initial_seed", 1532), + ("take", 1529), + ("stft", 1487), + ("get_num_threads", 1477), + ("real", 1459), + ("cholesky", 1406), + ("quantize_per_tensor", 1392), + ("diag_embed", 1364), + ("lerp", 1363), + ("asin", 1345), + ("eig", 1333), + ("trunc", 1290), + ("diagonal", 1287), + ("cosh", 1279), + ("rfft", 1269), + ("cumprod", 1260), + ("addr", 1211), + ("roll", 1198), + ("narrow", 1188), + ("digamma", 1172), + ("square", 1163), + ("sinh", 1131), + ("logspace", 1084), + ("broadcast_tensors", 1070), + ("irfft", 1013), + ("frac", 997), + ("hann_window", 994), + ("solve", 989), + ("logdet", 977), + ("expm1", 968), + ("cdist", 946), + ("addmv", 903), + ("randint_like", 888), + ("tensordot", 888), + ("ifft", 877), + ("true_divide", 854), + ("erfinv", 830), + ("addcdiv", 819), + ("addbmm", 813), + ("renorm", 781), + ("pinverse", 753), + ("isclose", 740), + ("erfc", 729), + ("is_storage", 725), + ("triangular_solve", 723), + ("rot90", 709), + ("logical_not", 686), + ("geqrf", 681), + ("slogdet", 677), + ("lu", 665), + ("hamming_window", 659), + ("orgqr", 651), + ("ormqr", 622), + ("is_floating_point", 602), + ("diagflat", 562), + ("cholesky_solve", 559), + ("tril_indices", 552), + ("chain_matmul", 551), + ("triu_indices", 548), + ("angle", 522), + ("poisson", 505), + ("matrix_power", 485), + ("unique_consecutive", 471), + ("quantize_per_channel", 465), + ("std_mean", 458), + ("bartlett_window", 447), + ("var_mean", 428), + ("lstsq", 421), + ("logical_and", 419), + ("mvlgamma", 411), + ("blackman_window", 400), + ("bitwise_not", 395), + ("cholesky_inverse", 388), + ("as_strided", 384), + ("floor_divide", 353), + ("cartesian_prod", 321), + ("lu_solve", 317), + ("set_flush_denormal", 310), + ("empty_strided", 283), + ("logical_xor", 282), + ("polygamma", 282), + ("logical_or", 280), + ("set_num_interop_threads", 278), + ("combinations", 274), + ("trapz", 270), + ("matrix_rank", 260), + ("lu_unpack", 255), + ("result_type", 244), + ("conj", 231), + ("cummax", 230), + ("lobpcg", 229), + ("bitwise_xor", 217), + ("promote_types", 213), + ("get_num_interop_threads", 211), + ("cummin", 205), + ("bitwise_and", 198), + ("dequantize", 192), + ("bitwise_or", 191), + ("imag", 191), + ("can_cast", 184), + ("istft", 180), + ("compiled_with_cxx11_abi", 159), + ("is_complex", 151), + ("block_diag", 136), + ("pca_lowrank", 124), + ("absolute", 122), + ("svd_lowrank", 108), + ("neg", 2), +] + +top_nn_functional = [ + ("nn.functional.softmax", 10522), + ("nn.functional.relu", 8572), + ("nn.functional.interpolate", 7277), + ("nn.functional.pad", 5207), + ("nn.functional.log_softmax", 4699), + ("nn.functional.normalize", 2338), + ("nn.functional.cross_entropy", 2083), + ("nn.functional.grid_sample", 1970), + ("nn.functional.one_hot", 1967), + ("nn.functional.mse_loss", 1920), + ("nn.functional.conv2d", 1593), + ("nn.functional.dropout", 1516), + ("nn.functional.softplus", 1385), + ("nn.functional.sigmoid", 1128), + ("nn.functional.linear", 1036), + ("nn.functional.gelu", 930), + ("nn.functional.avg_pool2d", 899), + ("nn.functional.max_pool2d", 876), + ("nn.functional.nll_loss", 863), + ("nn.functional.embedding", 737), + ("nn.functional.tanh", 664), + ("nn.functional.leaky_relu", 640), + ("nn.functional.adaptive_avg_pool2d", 633), + ("nn.functional.cosine_similarity", 627), + ("nn.functional.unfold", 609), + ("nn.functional.conv1d", 596), + ("nn.functional.binary_cross_entropy_with_logits", 591), + ("nn.functional.l1_loss", 571), + ("nn.functional.binary_cross_entropy", 492), + ("nn.functional.elu", 416), + ("nn.functional.batch_norm", 413), + ("nn.functional.upsample", 413), + ("nn.functional.fold", 305), + ("nn.functional.affine_grid", 298), + ("nn.functional.max_pool1d", 297), + ("nn.functional.torch", 294), + ("nn.functional.threshold", 263), + ("nn.functional.smooth_l1_loss", 262), + ("nn.functional.pairwise_distance", 253), + ("nn.functional.logsigmoid", 243), + ("nn.functional.adaptive_max_pool2d", 235), + ("nn.functional.relu6", 213), + ("nn.functional.pixel_shuffle", 209), + ("nn.functional.avg_pool3d", 203), + ("nn.functional.bilinear", 203), + ("nn.functional.conv_transpose2d", 201), + ("nn.functional.gumbel_softmax", 197), + ("nn.functional.max_unpool2d", 196), + ("nn.functional.kl_div", 191), + ("nn.functional.hardtanh", 189), + ("nn.functional.ctc_loss", 185), + ("nn.functional.layer_norm", 178), + ("nn.functional.conv3d", 172), + ("nn.functional.max_unpool3d", 167), + ("nn.functional.hardshrink", 165), + ("nn.functional.hardswish", 156), + ("nn.functional.selu", 156), + ("nn.functional.glu", 155), + ("nn.functional.assert_int_or_pair", 150), + ("nn.functional.hardsigmoid", 146), + ("nn.functional.upsample_bilinear", 146), + ("nn.functional.max_pool3d", 140), + ("nn.functional.adaptive_avg_pool3d", 139), + ("nn.functional.instance_norm", 124), + ("nn.functional.embedding_bag", 122), + ("nn.functional.upsample_nearest", 110), + ("nn.functional.avg_pool1d", 105), + ("nn.functional.prelu", 102), + ("nn.functional.celu", 92), + ("nn.functional.dropout2d", 86), + ("nn.functional.hinge_embedding_loss", 82), + ("nn.functional.softsign", 81), + ("nn.functional.max_unpool1d", 74), + ("nn.functional.silu", 74), + ("nn.functional.softshrink", 70), + ("nn.functional.leaky_relu_", 68), + ("nn.functional.softmin", 67), + ("nn.functional.channel_shuffle", 66), + ("nn.functional.multilabel_margin_loss", 66), + ("nn.functional.dropout3d", 65), + ("nn.functional.multi_margin_loss", 65), + ("nn.functional.lp_pool2d", 64), + ("nn.functional.conv_transpose1d", 62), + ("nn.functional.triplet_margin_loss", 62), + ("nn.functional.tanhshrink", 61), + ("nn.functional.adaptive_max_pool1d", 59), + ("nn.functional.cosine_embedding_loss", 58), + ("nn.functional.multi_head_attention_forward", 58), + ("nn.functional.max_pool1d_with_indices", 53), + ("nn.functional.poisson_nll_loss", 53), + ("nn.functional.margin_ranking_loss", 52), + ("nn.functional.soft_margin_loss", 52), + ("nn.functional.adaptive_max_pool3d", 51), + ("nn.functional.group_norm", 51), + ("nn.functional.local_response_norm", 51), + ("nn.functional.multilabel_soft_margin_loss", 51), + ("nn.functional.relu_", 50), + ("nn.functional.alpha_dropout", 49), + ("nn.functional.feature_alpha_dropout", 49), + ("nn.functional.lp_pool1d", 49), + ("nn.functional.adaptive_max_pool1d_with_indices", 48), + ("nn.functional.adaptive_max_pool2d_with_indices", 48), + ("nn.functional.adaptive_max_pool3d_with_indices", 48), + ("nn.functional.fractional_max_pool2d", 48), + ("nn.functional.fractional_max_pool2d_with_indices", 48), + ("nn.functional.fractional_max_pool3d", 48), + ("nn.functional.fractional_max_pool3d_with_indices", 48), + ("nn.functional.max_pool2d_with_indices", 48), + ("nn.functional.max_pool3d_with_indices", 48), + ("nn.functional.handle_torch_function", 47), + ("nn.functional.has_torch_function", 47), + ("nn.functional.adaptive_avg_pool1d", 43), + ("nn.functional.pdist", 43), + ("nn.functional.rrelu_", 37), + ("nn.functional.elu_", 34), + ("nn.functional.boolean_dispatch", 33), + ("nn.functional.hardtanh_", 26), + ("nn.functional.triplet_margin_with_distance_loss", 23), + ("nn.functional.selu_", 20), + ("nn.functional.pixel_unshuffle", 19), + ("nn.functional.conv_transpose3d", 18), + ("nn.functional.gaussian_nll_loss", 15), + ("nn.functional.has_torch_function_unary", 15), + ("nn.functional.has_torch_function_variadic", 15), + ("nn.functional.celu_", 13), + ("nn.functional.huber_loss", 7), + ("nn.functional.mish", 4), + ("nn.functional.threshold_", 3), + ("nn.functional.grad", 2), + ("nn.functional.conv_tbc", 1), + ("nn.functional.math", 1), +] + +top_nn_module = [ + ("nn.Module", 927129, None), + ("nn.Linear", 530688, "nn.functional.linear"), + ("nn.Sequential", 384968, None), + ("nn.Conv2d", 383320, "nn.functional.conv2d"), + ("nn.ReLU", 318877, "nn.functional.relu"), + ("nn.BatchNorm2d", 233265, "nn.functional.batch_norm"), + ("nn.Dropout", 179268, "nn.functional.dropout"), + ("nn.ModuleList", 171225, None), + ("nn.Parameter", 153291, None), + ("nn.CrossEntropyLoss", 152696, "nn.functional.cross_entropy"), + ("nn.MaxPool2d", 138619, "nn.functional.max_pool2d"), + ("nn.Embedding", 111844, "nn.functional.embedding"), + ("nn.DataParallel", 104238, None), + ("nn.MSELoss", 82954, "nn.functional.mse_loss"), + ("nn.Sigmoid", 75810, "nn.functional.sigmoid"), + ("nn.LeakyReLU", 65632, "nn.functional.leaky_relu"), + ("nn.BatchNorm1d", 65374, "nn.functional.batch_norm"), + ("nn.Softmax", 65114, "nn.functional.softmax"), + ("nn.Tanh", 59445, "nn.functional.tanh"), + ("nn.AdaptiveAvgPool2d", 59071, "nn.functional.adaptive_avg_pool2d"), + ("nn.AvgPool2d", 58377, "nn.functional.avg_pool2d"), + ("nn.ConvTranspose2d", 57524, "nn.functional.conv_transpose2d"), + ("nn.LSTM", 57411, None), + ("nn.Conv1d", 41108, "nn.functional.conv1d"), + ("nn.LayerNorm", 36089, "nn.functional.layer_norm"), + ("nn.BCELoss", 34005, "nn.functional.binary_cross_entropy"), + ("nn.Upsample", 32527, "nn.functional.interpolate"), + ("nn.BCEWithLogitsLoss", 29944, "nn.functional.binary_cross_entropy_with_logits"), + ("nn.GRU", 25421, None), + ("nn.Dropout2d", 23512, "nn.functional.dropout2d"), + ("nn.LogSoftmax", 22897, "nn.functional.log_softmax"), + ("nn.L1Loss", 22778, "nn.functional.l1_loss"), + ("nn.GroupNorm", 22183, "nn.functional.group_norm"), + ("nn.NLLLoss", 21751, "nn.functional.nll_loss"), + ("nn.Conv3d", 20874, "nn.functional.conv3d"), + ("nn.Identity", 17911, None), + ("nn.InstanceNorm2d", 16426, "nn.functional.instance_norm"), + ("nn.BatchNorm3d", 16378, "nn.functional.batch_norm"), + ("nn.PReLU", 13472, "nn.functional.prelu"), + ("nn.ReLU6", 12622, "nn.functional.relu6"), + ("nn.ELU", 12508, "nn.functional.elu"), + ("nn.LSTMCell", 10885, None), + ("nn.Flatten", 10384, "torch.flatten"), + ("nn.ModuleDict", 10255, None), + ("nn.ReflectionPad2d", 9954, "nn.functional.pad"), + ("nn.MaxPool3d", 9526, "nn.functional.max_pool3d"), + ("nn.MaxPool1d", 9154, "nn.functional.max_pool1d"), + ("nn.RNN", 9154, None), + ("nn.ZeroPad2d", 8847, "nn.functional.pad"), + ("nn.ParameterList", 7702, None), + ("nn.SyncBatchNorm", 6814, None), + ("nn.PixelShuffle", 6571, "nn.functional.pixel_shuffle"), + ("nn.SmoothL1Loss", 6517, "nn.functional.smooth_l1_loss"), + ("nn.Hardswish", 6458, "nn.functional.hardswish"), + ("nn.AdaptiveMaxPool2d", 6071, "nn.functional.adaptive_max_pool2d"), + ("nn.SELU", 6043, "nn.functional.selu"), + ("nn.ConvTranspose3d", 6039, "nn.functional.conv_transpose3d"), + ("nn.GRUCell", 5840, None), + ("nn.ReplicationPad2d", 5600, "nn.functional.pad"), + ("nn.KLDivLoss", 5541, "nn.functional.kl_div"), + ("nn.ConvTranspose1d", 5183, "nn.functional.conv_transpose1d"), + ("nn.Softplus", 5120, "nn.functional.softplus"), + ("nn.SiLU", 4895, "nn.functional.silu"), + ("nn.AvgPool3d", 4523, "nn.functional.avg_pool3d"), + ("nn.CosineSimilarity", 4058, "nn.functional.cosine_similarity"), + ("nn.GELU", 3932, "nn.functional.gelu"), + ("nn.UpsamplingBilinear2d", 3673, "nn.functional.interpolate"), + ("nn.InstanceNorm1d", 3658, "nn.functional.instance_norm"), + ("nn.Transformer", 3604, None), + ("nn.MultiheadAttention", 3435, "nn.functional.multi_head_attention_forward"), + ("nn.AvgPool1d", 3195, "nn.functional.avg_pool1d"), + ("nn.Dropout3d", 2964, "nn.functional.dropout3d"), + ("nn.AdaptiveAvgPool3d", 2915, "nn.functional.adaptive_avg_pool3d"), + ("nn.InstanceNorm3d", 2893, "nn.functional.instance_norm"), + ("nn.Hardtanh", 2613, "nn.functional.hardtanh"), + ("nn.MarginRankingLoss", 2568, "nn.functional.margin_ranking_loss"), + ("nn.GLU", 2526, "nn.functional.glu"), + ("nn.AdaptiveAvgPool1d", 2481, "nn.functional.adaptive_avg_pool1d"), + ("nn.EmbeddingBag", 2344, "nn.functional.embedding_bag"), + ("nn.TransformerEncoderLayer", 2292, None), + ("nn.TransformerEncoder", 2091, None), + ("nn.MaxUnpool2d", 2031, "nn.functional.max_unpool2d"), + ("nn.UpsamplingNearest2d", 2004, "nn.functional.interpolate"), + ("nn.ConstantPad1d", 1904, "nn.functional.pad"), + ("nn.ConstantPad2d", 1791, "nn.functional.pad"), + ("nn.CTCLoss", 1789, "nn.functional.ctc_loss"), + ("nn.AdaptiveMaxPool1d", 1713, "nn.functional.adaptive_max_pool1d"), + ("nn.AdaptiveLogSoftmaxWithLoss", 1665, None), + ("nn.Bilinear", 1664, "nn.functional.bilinear"), + ("nn.RNNCell", 1653, None), + ("nn.MultiLabelSoftMarginLoss", 1624, "nn.functional.multilabel_soft_margin_loss"), + ("nn.Unfold", 1452, "nn.functional.unfold"), + ("nn.RReLU", 1431, "nn.functional.rrelu"), + ("nn.CosineEmbeddingLoss", 1357, "nn.functional.cosine_embedding_loss"), + ("nn.LocalResponseNorm", 1331, "nn.functional.local_response_norm"), + ("nn.Softmax2d", 1300, "nn.functional.softmax"), + ("nn.PairwiseDistance", 1241, "nn.functional.pairwise_distance"), + ("nn.LogSigmoid", 1235, "nn.functional.logsigmoid"), + ("nn.TripletMarginLoss", 1230, "nn.functional.triplet_margin_loss"), + ("nn.RNNBase", 1133, None), + ("nn.Threshold", 1043, "nn.functional.threshold"), + ("nn.AdaptiveMaxPool3d", 1025, "nn.functional.adaptive_max_pool3d"), + ("nn.CELU", 1018, "nn.functional.celu"), + ("nn.NLLLoss2d", 966, "nn.functional.nll_loss"), + ("nn.Softsign", 877, "nn.functional.softsign"), + ("nn.ReplicationPad1d", 862, "nn.functional.pad"), + ("nn.SoftMarginLoss", 856, "nn.functional.soft_margin_loss"), + ("nn.ParameterDict", 742, None), + ("nn.ReflectionPad1d", 731, "nn.functional.pad"), + ("nn.Softshrink", 713, "nn.functional.softshrink"), + ("nn.AlphaDropout", 710, "nn.functional.alpha_dropout"), + ("nn.Tanhshrink", 681, "nn.functional.tanhshrink"), + ("nn.PoissonNLLLoss", 676, "nn.functional.poisson_nll_loss"), + ("nn.MaxUnpool3d", 660, "nn.functional.max_unpool3d"), + ("nn.Fold", 630, "nn.functional.fold"), + ("nn.MultiMarginLoss", 622, "nn.functional.multi_margin_loss"), + ("nn.TransformerDecoderLayer", 614, None), + ("nn.TransformerDecoder", 607, None), + ("nn.Hardshrink", 592, "nn.functional.hardshrink"), + ("nn.ConstantPad3d", 582, "nn.functional.pad"), + ("nn.MultiLabelMarginLoss", 580, "nn.functional.multilabel_margin_loss"), + ("nn.LPPool2d", 550, "nn.functional.lp_pool2d"), + ("nn.Softmin", 537, "nn.functional.softmin"), + ("nn.MaxUnpool1d", 518, "nn.functional.max_unpool1d"), + ("nn.FractionalMaxPool2d", 484, "nn.functional.fractional_max_pool2d"), + ("nn.Hardsigmoid", 477, "nn.functional.hardsigmoid"), + ("nn.ReplicationPad3d", 470, "nn.functional.pad"), + ("nn.HingeEmbeddingLoss", 442, "nn.functional.hinge_embedding_loss"), + ("nn.LPPool1d", 386, "nn.functional.lp_pool1d"), + ("nn.FractionalMaxPool3d", 252, "nn.functional.fractional_max_pool3d"), + ("nn.Container", 217, None), + ("nn.Unflatten", 206, "nn.functional.unflatten"), + ("nn.FeatureAlphaDropout", 136, "nn.functional.feature_alpha_dropout"), + ("nn.TripletMarginWithDistanceLoss", 107, "nn.functional.triplet_margin_with_distance_loss"), + ("nn.ChannelShuffle", 90, "nn.functional.channel_shuffle"), + ("nn.RNNCellBase", 88, None), + ("nn.LazyLinear", 81, "nn.functional.linear"), + ("nn.UninitializedParameter", 60, None), + ("nn.CrossMapLRN2d", 59, None), + ("nn.GaussianNLLLoss", 55, "nn.functional.gaussian_nll_loss"), + ("nn.PixelUnshuffle", 45, "nn.functional.pixel_unshuffle"), + ("nn.Mish", 31, "nn.functional.mish"), + ("nn.ReflectionPad3d", 22, "nn.functional.pad"), + ("nn.HuberLoss", 18, "nn.functional.huber_loss"), + ("nn.LazyConv2d", 15, None), + ("nn.LazyConv1d", 9, None), + ("nn.LazyConv3d", 8, None), + ("nn.LazyConvTranspose1d", 8, None), + ("nn.LazyConvTranspose2d", 8, None), + ("nn.LazyConvTranspose3d", 8, None), + ("nn.LazyBatchNorm1d", 3, None), + ("nn.LazyBatchNorm2d", 3, None), + ("nn.LazyBatchNorm3d", 3, None), + ("nn.UninitializedBuffer", 3, None), +] + +# No rankings because these are a little hard to get rankings for +method_only_ops = [ + 'bfloat16', + 'bool', + 'byte', + 'char', + 'contiguous', + 'cpu', + 'cuda', + 'detach', + 'double', + 'expand', + 'expand_as', + 'float', + 'get_device', + 'half', + 'hardshrink', + 'index_add', + 'index_copy', + 'index_fill', + 'index_put', + 'int', + 'is_contiguous', + 'is_pinned', + 'is_set_to', + 'is_shared', + 'is_signed', + 'item', + 'long', + 'masked_scatter', + 'masked_fill', + 'narrow_copy', + 'numpy', + 'pin_memory', + 'repeat', + 'reshape_as', + 'select', + 'short', + 'storage_offset', + 'sum_to_size', + 'to', + 'to_mkldnn', + 'tolist', + 'type', + 'type_as', + 'unfold', + 'view', + 'view_as', +] + + +def get_nn_functional_top_list(): + top_nn_functional_ = dict(top_nn_functional) + for _, count, functional_name in top_nn_module: + if functional_name is None: + continue + if functional_name == 'torch.flatten': + continue + if functional_name not in top_nn_functional_: + top_nn_functional_[functional_name] = count + else: + top_nn_functional_[functional_name] += count + + top_nn_functional_ = list(top_nn_functional_.items()) + top_nn_functional_.sort(key=lambda x: x[1], reverse=True) + return top_nn_functional_ + + +usage_count = {} +for k, v in get_nn_functional_top_list(): + usage_count[k] = v +for k, v in top_torch: + usage_count[k] = v diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/utils.py b/venv/lib/python3.10/site-packages/torch/_functorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8e3c60029978e7dabe78a9acb831bcf9757fac88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/utils.py @@ -0,0 +1,41 @@ +import contextlib +import torch +from torch._C._functorch import ( + set_single_level_autograd_function_allowed, + get_single_level_autograd_function_allowed, + unwrap_if_dead, +) +from typing import Union, Tuple + +@contextlib.contextmanager +def enable_single_level_autograd_function(): + try: + prev_state = get_single_level_autograd_function_allowed() + set_single_level_autograd_function_allowed(True) + yield + finally: + set_single_level_autograd_function_allowed(prev_state) + +def unwrap_dead_wrappers(args): + # NB: doesn't use tree_map_only for performance reasons + result = tuple( + unwrap_if_dead(arg) if isinstance(arg, torch.Tensor) else arg + for arg in args + ) + return result + +# Allows one to expose an API in a private submodule publicly as per the definition +# in PyTorch's public api policy. +# +# It is a temporary solution while we figure out if it should be the long-term solution +# or if we should amend PyTorch's public api policy. The concern is that this approach +# may not be very robust because it's not clear what __module__ is used for. +# However, both numpy and jax overwrite the __module__ attribute of their APIs +# without problem, so it seems fine. +def exposed_in(module): + def wrapper(fn): + fn.__module__ = module + return fn + return wrapper + +argnums_t = Union[int, Tuple[int, ...]] diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/vmap.py b/venv/lib/python3.10/site-packages/torch/_functorch/vmap.py new file mode 100644 index 0000000000000000000000000000000000000000..1f776ca0f1815cb280b38d9672da872347a24d6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_functorch/vmap.py @@ -0,0 +1,452 @@ +# mypy: ignore-errors + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import contextlib +import functools +import threading +from torch import Tensor +from typing import Any, Callable, Optional, Tuple, Union, List +from torch.utils._pytree import ( + tree_flatten, + tree_unflatten, + tree_map_, + _broadcast_to_and_flatten, + TreeSpec, +) +from functools import partial +import os +import itertools + +from torch._C._functorch import ( + _add_batch_dim, + _remove_batch_dim, + _vmap_decrement_nesting, + _vmap_increment_nesting, + is_batchedtensor, +) + +in_dims_t = Union[int, Tuple] +out_dims_t = Union[int, Tuple[int, ...]] + + +def doesnt_support_saved_tensors_hooks(f): + message = ( + "torch.func transforms don't yet support saved tensor hooks. " + "Please open an issue with your use case." + ) + + @functools.wraps(f) + def fn(*args, **kwargs): + with torch.autograd.graph.disable_saved_tensors_hooks(message): + return f(*args, **kwargs) + return fn + + +# Checks that all args-to-be-batched have the same batch dim size +def _validate_and_get_batch_size( + flat_in_dims: List[Optional[int]], + flat_args: List) -> int: + batch_sizes = [arg.size(in_dim) for in_dim, arg in zip(flat_in_dims, flat_args) + if in_dim is not None] + if len(batch_sizes) == 0: + raise ValueError('vmap: Expected at least one Tensor to vmap over') + if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes): + raise ValueError( + f'vmap: Expected all tensors to have the same size in the mapped ' + f'dimension, got sizes {batch_sizes} for the mapped dimension') + return batch_sizes[0] + + +def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int: + if isinstance(batched_outputs, tuple): + return len(batched_outputs) + return 1 + +# If value is a tuple, check it has length `num_elements`. +# If value is not a tuple, make a tuple with `value` repeated `num_elements` times + + +def _as_tuple(value: Any, num_elements: int, error_message_lambda: Callable[[], str]) -> Tuple: + if not isinstance(value, tuple): + return (value,) * num_elements + if len(value) != num_elements: + raise ValueError(error_message_lambda()) + return value + + +def _process_batched_inputs( + in_dims: in_dims_t, args: Tuple, func: Callable +) -> Tuple[int, List[Any], List[Any], TreeSpec]: + if not isinstance(in_dims, int) and not isinstance(in_dims, tuple): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'expected `in_dims` to be int or a (potentially nested) tuple ' + f'matching the structure of inputs, got: {type(in_dims)}.') + if len(args) == 0: + raise ValueError( + f'vmap({_get_name(func)})(): got no inputs. Maybe you forgot to add ' + f'inputs, or you are trying to vmap over a function with no inputs. ' + f'The latter is unsupported.') + + flat_args, args_spec = tree_flatten(args) + flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec) + if flat_in_dims is None: + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'in_dims is not compatible with the structure of `inputs`. ' + f'in_dims has structure {tree_flatten(in_dims)[1]} but inputs ' + f'has structure {args_spec}.') + + for i, (arg, in_dim) in enumerate(zip(flat_args, flat_in_dims)): + if not isinstance(in_dim, int) and in_dim is not None: + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for an input but in_dim must be either ' + f'an integer dimension or None.') + if isinstance(in_dim, int) and not isinstance(arg, Tensor): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for an input but the input is of type ' + f'{type(arg)}. We cannot vmap over non-Tensor arguments, ' + f'please use None as the respective in_dim') + if in_dim is not None and (in_dim < -arg.dim() or in_dim >= arg.dim()): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for some input, but that input is a Tensor ' + f'of dimensionality {arg.dim()} so expected in_dim to satisfy ' + f'-{arg.dim()} <= in_dim < {arg.dim()}.') + if in_dim is not None and in_dim < 0: + flat_in_dims[i] = in_dim % arg.dim() + + return _validate_and_get_batch_size(flat_in_dims, flat_args), flat_in_dims, flat_args, args_spec + +# Creates BatchedTensors for every Tensor in arg that should be batched. +# Returns the (potentially) batched arguments and the batch_size. + + +def _create_batched_inputs( + flat_in_dims: List[Any], flat_args: List[Any], vmap_level: int, args_spec) -> Tuple: + # See NOTE [Ignored _remove_batch_dim, _add_batch_dim] + batched_inputs = [arg if in_dim is None else + _add_batch_dim(arg, in_dim, vmap_level) + for in_dim, arg in zip(flat_in_dims, flat_args)] + return tree_unflatten(batched_inputs, args_spec) + + +def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_dim): + + if out_dim is None: + if isinstance(batched_output, torch.Tensor) and is_batchedtensor(batched_output): + raise ValueError( + f'vmap({name}, ...): `{name}` can not return a ' + f'BatchedTensor when out_dim is None' + ) + return batched_output + + # out_dim is non None + if not isinstance(batched_output, torch.Tensor): + raise ValueError(f'vmap({name}, ...): `{name}` must only return ' + f'Tensors, got type {type(batched_output)}. ' + 'Did you mean to set out_dim= to None for output?') + + return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim) + + +# Undos the batching (and any batch dimensions) associated with the `vmap_level`. +def _unwrap_batched( + batched_outputs: Union[Tensor, Tuple[Tensor, ...]], + out_dims: out_dims_t, + vmap_level: int, batch_size: int, func: Callable) -> Tuple: + flat_batched_outputs, output_spec = tree_flatten(batched_outputs) + + def incompatible_error(): + raise ValueError( + f'vmap({_get_name(func)}, ..., out_dims={out_dims})(): ' + f'out_dims is not compatible with the structure of `outputs`. ' + f'out_dims has structure {tree_flatten(out_dims)[1]} but outputs ' + f'has structure {output_spec}.') + + if isinstance(batched_outputs, torch.Tensor): + # Some weird edge case requires us to spell out the following + # see test_out_dims_edge_case + if isinstance(out_dims, int): + flat_out_dims = [out_dims] + elif isinstance(out_dims, tuple) and len(out_dims) == 1: + flat_out_dims = out_dims + elif out_dims is None: + flat_out_dims = [out_dims] + else: + incompatible_error() + else: + flat_out_dims = _broadcast_to_and_flatten(out_dims, output_spec) + if flat_out_dims is None: + incompatible_error() + + flat_outputs = [ + _maybe_remove_batch_dim(_get_name(func), batched_output, vmap_level, batch_size, out_dim) + for batched_output, out_dim in zip(flat_batched_outputs, flat_out_dims) + ] + return tree_unflatten(flat_outputs, output_spec) + + +def _check_int_or_none(x, func, out_dims): + if isinstance(x, int): + return + if x is None: + return + raise ValueError( + f'vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be ' + f'an int, None or a python collection of ints representing where in the outputs the ' + f'vmapped dimension should appear.') + + +def _check_out_dims_is_int_or_int_pytree(out_dims: out_dims_t, func: Callable) -> None: + if isinstance(out_dims, int): + return + tree_map_(partial(_check_int_or_none, func=func, out_dims=out_dims), out_dims) + + +def _get_name(func: Callable): + if hasattr(func, '__name__'): + return func.__name__ + + # Not all callables have __name__, in fact, only static functions/methods do. + # A callable created via functools.partial or an nn.Module, to name some + # examples, don't have a __name__. + return repr(func) + + +DECOMPOSITIONS_LOADED = False +DECOMPOSITIONS_LOCK = threading.Lock() +VMAP_DECOMPOSITIONS_LIB = None + +# torch.package, Python 3.11, and torch.jit-less environments are unhappy with +# decompositions. Only load them when needed if possible. +def lazy_load_decompositions(): + global DECOMPOSITIONS_LOADED + if DECOMPOSITIONS_LOADED: + return + + with DECOMPOSITIONS_LOCK: + if DECOMPOSITIONS_LOADED: + return + + if not (os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__): + DECOMPOSITIONS_LOADED = True + return + + # use an alternate way to register an operator into the decomposition table + # _register_jit_decomposition doesn't work for some operators, e.g. addr, + # because the Tensor types generated cannot be unioned by torchscript + # decomp should be type OpOverload + global VMAP_DECOMPOSITIONS_LIB + VMAP_DECOMPOSITIONS_LIB = torch.library.Library("aten", "IMPL", "FuncTorchBatched") + + from torch._decomp import decomposition_table + + def _register_python_decomposition_vmap(decomp): + if decomp in decomposition_table: + VMAP_DECOMPOSITIONS_LIB.impl(decomp, decomposition_table[decomp]) + else: + raise RuntimeError(f"could not find decomposition for {decomp}") + + _register_python_decomposition_vmap(torch.ops.aten.mse_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.smooth_l1_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.huber_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss_forward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_forward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.addr.default) + + DECOMPOSITIONS_LOADED = True + +def vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs): + lazy_load_decompositions() + _check_out_dims_is_int_or_int_pytree(out_dims, func) + batch_size, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func) + + if chunk_size is not None: + chunks_flat_args = _get_chunked_inputs(flat_args, flat_in_dims, batch_size, chunk_size) + return _chunked_vmap(func, flat_in_dims, chunks_flat_args, + args_spec, out_dims, randomness, **kwargs) + + # If chunk_size is not specified. + return _flat_vmap( + func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs + ) + +def get_chunk_sizes(total_elems, chunk_size): + n_chunks = n_chunks = total_elems // chunk_size + chunk_sizes = [chunk_size] * n_chunks + # remainder chunk + remainder = total_elems % chunk_size + if remainder != 0: + chunk_sizes.append(remainder) + return chunk_sizes + +def _get_chunked_inputs(flat_args, flat_in_dims, batch_size, chunk_size): + split_idxs = (batch_size,) + if chunk_size is not None: + chunk_sizes = get_chunk_sizes(batch_size, chunk_size) + split_idxs = tuple(itertools.accumulate(chunk_sizes)) + + flat_args_chunks = tuple( + t.tensor_split(split_idxs, dim=in_dim) if in_dim is not None else [t, ] * len(split_idxs) + for t, in_dim in zip(flat_args, flat_in_dims) + ) + + # transpose chunk dim and flatten structure + # chunks_flat_args is a list of flatten args + chunks_flat_args = zip(*flat_args_chunks) + return chunks_flat_args + + +def _flatten_chunks_output(chunks_output_): + # chunks_output is a list of chunked outputs + # flatten chunked outputs: + flat_chunks_output = [] + arg_spec = None + for output in chunks_output_: + flat_output, arg_specs = tree_flatten(output) + flat_chunks_output.append(flat_output) + if arg_spec is None: + arg_spec = arg_specs + + # transpose chunk dim and flatten structure + # flat_output_chunks is flat list of chunks + flat_output_chunks = list(zip(*flat_chunks_output)) + return flat_output_chunks, arg_spec + + +def _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks): + # concat chunks on out_dim + flat_out_dims = _broadcast_to_and_flatten(out_dims, arg_spec) + assert len(flat_out_dims) == len(flat_output_chunks) + flat_output = [] + for idx, out_dim in enumerate(flat_out_dims): + flat_output.append(torch.cat(flat_output_chunks[idx], dim=out_dim)) + # release tensors + flat_output_chunks[idx] = None + + return flat_output + + +# Applies vmap on chunked_input and returns concatenated output over the chunks. +def _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs): + + chunks_output = [] + rs = torch.get_rng_state() if randomness == "same" else None + for flat_args in chunks_flat_args: + batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args) + + # The way we compute split the input in `_get_chunked_inputs`, + # we may get a tensor with `0` batch-size. We skip any computation + # in that case. + # Eg. + # >>> chunk_size = 1 + # >>> batch_size = 6 + # >>> t = torch.zeros(batch_size, 1) + # >>> t.tensor_split([1, 2, 3, 4, 5, 6]) + # (tensor([[0.]]), tensor([[0.]]), tensor([[0.]]), tensor([[0.]]), + # tensor([[0.]]), tensor([[0.]]), tensor([], size=(0, 1))) + if batch_size == 0: + continue + + if rs is not None: + torch.set_rng_state(rs) + chunks_output.append( + _flat_vmap( + func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs + ) + ) + + flat_output_chunks, arg_spec = _flatten_chunks_output(chunks_output) + + # chunked output tensors are held by both `flat_output_chunks` and `chunks_output`. + # eagerly remove the reference from `chunks_output`. + del chunks_output + + # concat chunks on out_dim + flat_output = _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks) + + # finally unflatten the output + return tree_unflatten(flat_output, arg_spec) + + +# Vmap refactored helper functions: +def _check_randomness_arg(randomness): + if randomness not in ['error', 'different', 'same']: + raise RuntimeError(f"Only allowed values for randomness are 'error', 'different', or 'same'. Got {randomness}") + + +@contextlib.contextmanager +def vmap_increment_nesting(batch_size, randomness): + try: + vmap_level = _vmap_increment_nesting(batch_size, randomness) + yield vmap_level + finally: + _vmap_decrement_nesting() + + +@doesnt_support_saved_tensors_hooks +def _flat_vmap(func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs): + + with vmap_increment_nesting(batch_size, randomness) as vmap_level: + batched_inputs = _create_batched_inputs(flat_in_dims, flat_args, vmap_level, args_spec) + batched_outputs = func(*batched_inputs, **kwargs) + return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func) + + +# `restore_vmap` is a private helper function. It is vmap but has the following +# differences: +# - instead of returning outputs, it returns an (outputs, out_dims) tuple. +# out_dims is a pytree of same shape as outputs and contains Optional[int] +# specifying where the vmapped dimension, if it exists, is in the corresponding output. +# - does no validation on in_dims or inputs (vmap expects at least one Tensor to be vmapped). +# restore_vmap allows for no inputs to have the vmap dimension +# - does no validation on outputs (vmap expects only Tensor outputs) +# restore_vmap allows for return of arbitrary outputs (not just Tensors) +# +# The TL;DR is that restore_vmap is more general than vmap and has a slightly +# different API. The relaxations are so that we can "pause" vmap in the middle +# of its execution and then "restore" it later (this is what we do in +# the generate_vmap_rule=True implementation of autograd.Function). +# +# restore_vmap can be technically used in the implementation of vmap, but doing +# that refactor is a bit technically challenging because: +# - vmap couples the tensor-wrapping code with error checking +# - vmap's tensor unwrapping code is in C++; we would need to rewrite part of it +# in python because it overlaps with unwrap_batched +@doesnt_support_saved_tensors_hooks +def restore_vmap(func, in_dims, batch_size, randomness): + def inner(*args, **kwargs): + with vmap_increment_nesting(batch_size, randomness) as vmap_level: + batched_inputs = wrap_batched(args, in_dims, vmap_level) + batched_outputs = func(*batched_inputs, **kwargs) + return unwrap_batched(batched_outputs, vmap_level) + return inner + + +def wrap_batched(args, bdims, level): + flat_args, spec = tree_flatten(args) + flat_bdims = _broadcast_to_and_flatten(bdims, spec) + assert flat_bdims is not None + result = _create_batched_inputs(flat_bdims, flat_args, level, spec) + return result + + +def unwrap_batched(args, level): + flat_args, spec = tree_flatten(args) + if len(flat_args) == 0: + return args, () + result = [torch._C._functorch._unwrap_batched(arg, level) if isinstance(arg, torch.Tensor) + else (arg, None) for arg in flat_args] + output, bdims = zip(*result) + return tree_unflatten(output, spec), tree_unflatten(bdims, spec) diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d6c52028dfa28b1c8d7101635f0f24d5b54b03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fdaeaffabc3c6aae81ba8012e0b05d25bf28643 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3a514dd0053afa3b64859436b628dbbc572e38a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb2f1a5bf73773ec0a768ec21bfbac99c0f13876 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/xpu/__init__.py b/venv/lib/python3.10/site-packages/torch/xpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4184ac8eee2d0a89991541f21a74b422f26ac1cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/xpu/__init__.py @@ -0,0 +1,485 @@ +r""" +This package introduces support for the XPU backend, specifically tailored for +Intel GPU optimization. + +This package is lazily initialized, so you can always import it, and use +:func:`is_available()` to determine if your system supports XPU. +""" +import threading +import traceback +from functools import lru_cache +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch._C +from .. import device as _device +from .._utils import _dummy_type, _LazySeedTracker +from ._utils import _get_device_index +from .streams import Event, Stream + +_initialized = False +_tls = threading.local() +_initialization_lock = threading.Lock() +_queued_calls: List[ + Tuple[Callable[[], None], List[str]] +] = [] # don't invoke these until initialization occurs +_is_in_bad_fork = getattr(torch._C, "_xpu_isInBadFork", lambda: False) +_device_t = Union[_device, str, int, None] +_lazy_seed_tracker = _LazySeedTracker() +default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment] + + +def _is_compiled() -> bool: + r"""Return true if compile with XPU support.""" + return torch._C._has_xpu + + +if _is_compiled(): + _XpuDeviceProperties = torch._C._XpuDeviceProperties + _exchange_device = torch._C._xpu_exchangeDevice + _maybe_exchange_device = torch._C._xpu_maybeExchangeDevice +else: + # Define dummy if PyTorch was compiled without XPU + _XpuDeviceProperties = _dummy_type("_XpuDeviceProperties") # type: ignore[assignment, misc] + + def _exchange_device(device: int) -> int: + raise NotImplementedError("PyTorch was compiled without XPU support") + + def _maybe_exchange_device(device: int) -> int: + raise NotImplementedError("PyTorch was compiled without XPU support") + + +@lru_cache(maxsize=1) +def device_count() -> int: + r"""Return the number of XPU device available.""" + if not _is_compiled(): + return 0 + return torch._C._xpu_getDeviceCount() + + +def is_available() -> bool: + r"""Return a bool indicating if XPU is currently available.""" + # This function nerver throws. + return device_count() > 0 + + +def is_bf16_supported(): + r"""Return a bool indicating if the current XPU device supports dtype bfloat16.""" + return True + + +def is_initialized(): + r"""Return whether PyTorch's XPU state has been initialized.""" + return _initialized and not _is_in_bad_fork() + + +def _lazy_call(callable, **kwargs): + if is_initialized(): + callable() + else: + global _lazy_seed_tracker + if kwargs.get("seed_all", False): + _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack()) + elif kwargs.get("seed", False): + _lazy_seed_tracker.queue_seed(callable, traceback.format_stack()) + else: + # Don't store the actual traceback to avoid memory cycle + _queued_calls.append((callable, traceback.format_stack())) + + +def init(): + r"""Initialize PyTorch's XPU state. + This is a Python API about lazy initialization that avoids initializing + XPU until the first time it is accessed. Does nothing if the XPU state is + already initialized. + """ + _lazy_init() + + +def _lazy_init(): + global _initialized, _queued_calls + if is_initialized() or hasattr(_tls, "is_initializing"): + return + with _initialization_lock: + # This test was was protected via GIL. Double-check whether XPU has + # already been initialized. + if is_initialized(): + return + # Stop promptly upon encountering a bad fork error. + if _is_in_bad_fork(): + raise RuntimeError( + "Cannot re-initialize XPU in forked subprocess. To use XPU with " + "multiprocessing, you must use the 'spawn' start method" + ) + if not _is_compiled(): + raise AssertionError("Torch not compiled with XPU enabled") + # This function inits XPU backend and detects bad fork processing. + torch._C._xpu_init() + # Some of the queued calls may reentrantly call _lazy_init(); We need to + # just return without initializing in that case. + _tls.is_initializing = True + + for calls in _lazy_seed_tracker.get_calls(): + if calls: + _queued_calls.append(calls) + + try: + for queued_call, orig_traceback in _queued_calls: + try: + queued_call() + except Exception as e: + msg = ( + f"XPU call failed lazily at initialization with error: {str(e)}\n\n" + f"XPU call was originally invoked at:\n\n{''.join(orig_traceback)}" + ) + raise Exception(msg) from e + finally: + delattr(_tls, "is_initializing") + _initialized = True + + +class _DeviceGuard: + def __init__(self, index: int): + self.idx = index + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.xpu._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.xpu._maybe_exchange_device(self.prev_idx) + return False + + +class device: + r"""Context-manager that changes the selected device. + + Args: + device (torch.device or int or str): device index to select. It's a no-op if + this argument is a negative integer or ``None``. + """ + + def __init__(self, device: Any): + self.idx = _get_device_index(device, optional=True) + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.xpu._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.xpu._maybe_exchange_device(self.prev_idx) + return False + + +class device_of(device): + r"""Context-manager that changes the current device to that of given object. + + You can use both tensors and storages as arguments. If a given object is + not allocated on a XPU, this is a no-op. + + Args: + obj (Tensor or Storage): object allocated on the selected device. + """ + + def __init__(self, obj): + idx = obj.get_device() if obj.is_xpu else -1 + super().__init__(idx) + + +def set_device(device: _device_t) -> None: + r"""Set the current device. + + Args: + device (torch.device or int or str): selected device. This function is a + no-op if this argument is negative. + """ + _lazy_init() + device = _get_device_index(device) + if device >= 0: + torch._C._xpu_setDevice(device) + + +def get_device_name(device: Optional[_device_t] = None) -> str: + r"""Get the name of a device. + + Args: + device (torch.device or int or str, optional): device for which to + return the name. This function is a no-op if this argument is a + negative integer. It uses the current device, given by :func:`~torch.xpu.current_device`, + if :attr:`device` is ``None`` (default). + + Returns: + str: the name of the device + """ + return get_device_properties(device).name + + +def get_device_capability(device: Optional[_device_t] = None) -> Dict[str, Any]: + r"""Get the xpu capability of a device. + + Args: + device (torch.device or int or str, optional): device for which to + return the device capability. This function is a no-op if this + argument is a negative integer. It uses the current device, given by + :func:`~torch.xpu.current_device`, if :attr:`device` is ``None`` + (default). + + Returns: + Dict[str, Any]: the xpu capability dictionary of the device + """ + prop = get_device_properties(device) + return { + "max_work_group_size": prop.max_work_group_size, + "max_num_sub_groups": prop.max_num_sub_groups, + "sub_group_sizes": prop.sub_group_sizes, + } + + +def get_device_properties(device: Optional[_device_t] = None) -> _XpuDeviceProperties: + r"""Get the properties of a device. + + Args: + device (torch.device or int or str): device for which to return the + properties of the device. + + Returns: + _XpuDeviceProperties: the properties of the device + """ + _lazy_init() + device = _get_device_index(device, optional=True) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device index") + return _get_device_properties(device) # type: ignore[name-defined] # noqa: F821 + + +def current_device() -> int: + r"""Return the index of a currently selected device.""" + _lazy_init() + return torch._C._xpu_getDevice() + + +def _get_device(device: Union[int, str, torch.device]) -> torch.device: + r"""Return the torch.device type object from the passed in device. + + Args: + device (torch.device or int or str): selected device. + """ + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("xpu", device) + return device + + +class StreamContext: + r"""Context-manager that selects a given stream. + + All XPU kernels queued within its context will be enqueued on a selected + stream. + + Args: + Stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + .. note:: Streams are per-device. + """ + cur_stream: Optional["torch.xpu.Stream"] + + def __init__(self, stream: Optional["torch.xpu.Stream"]): + self.stream = stream + self.idx = _get_device_index(None, True) + if self.idx is None: + self.idx = -1 + + def __enter__(self): + cur_stream = self.stream + if cur_stream is None or self.idx == -1: + return + self.src_prev_stream = torch.xpu.current_stream(None) + + # If the stream is not on the current device, then set the current stream on the device + if self.src_prev_stream.device != cur_stream.device: + with device(cur_stream.device): + self.dst_prev_stream = torch.xpu.current_stream(cur_stream.device) + torch.xpu.set_stream(cur_stream) + + def __exit__(self, type: Any, value: Any, traceback: Any): + cur_stream = self.stream + if cur_stream is None or self.idx == -1: + return + + # Reset the stream on the original device and destination device + if self.src_prev_stream.device != cur_stream.device: + torch.xpu.set_stream(self.dst_prev_stream) + torch.xpu.set_stream(self.src_prev_stream) + + +def stream(stream: Optional["torch.xpu.Stream"]) -> StreamContext: + r"""Wrap around the Context-manager StreamContext that selects a given stream. + + Arguments: + stream (Stream): selected stream. This manager is a no-op if it's ``None``. + """ + return StreamContext(stream) + + +def _set_stream_by_id(stream_id, device_index, device_type): + r"""set stream specified by the stream id, device index and device type + + Args: stream_id (int): not visible to the user, used to assigned to the specific stream. + device_index (int): selected device index. + device_type (int): selected device type. + """ + torch._C._xpu_setStream( + stream_id=stream_id, + device_index=device_index, + device_type=device_type, + ) + + +def set_stream(stream: Stream): + r"""Set the current stream.This is a wrapper API to set the stream. + Usage of this function is discouraged in favor of the ``stream`` + context manager. + + Args: + stream (Stream): selected stream. This function is a no-op + if this argument is ``None``. + """ + if stream is None: + return + _lazy_init() + _set_stream_by_id( + stream_id=stream.stream_id, + device_index=stream.device_index, + device_type=stream.device_type, + ) + + +def current_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the currently selected :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the currently selected :class:`Stream` for the current device, given + by :func:`~torch.xpu.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._xpu_getCurrentStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def synchronize(device: _device_t = None) -> None: + r"""Wait for all kernels in all streams on a XPU device to complete. + + Args: + device (torch.device or int, optional): device for which to synchronize. + It uses the current device, given by :func:`~torch.xpu.current_device`, + if :attr:`device` is ``None`` (default). + """ + _lazy_init() + device = _get_device_index(device, optional=True) + return torch._C._xpu_synchronize(device) + + +def empty_cache() -> None: + r"""Release all unoccupied cached memory currently held by the caching + allocator so that those can be used in other XPU application. + + .. note:: + :func:`~torch.xpu.empty_cache` doesn't increase the amount of XPU + memory available for PyTorch. However, it may help reduce fragmentation + of XPU memory in certain cases. + """ + if is_initialized(): + torch._C._xpu_emptyCache() + + +def _get_generator(device: torch.device) -> torch._C.Generator: + r"""Return the XPU Generator object for the given device. + + Args: + device (torch.device): selected device. + """ + idx = device.index + if idx is None: + idx = current_device() + return torch.xpu.default_generators[idx] + + +def _set_rng_state_offset( + offset: int, device: Union[int, str, torch.device] = "xpu" +) -> None: + r"""Set the random number generator state offset of the specified GPU. + + Args: + offset (int): The desired offset + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). + """ + final_device = _get_device(device) + + def cb(): + default_generator = _get_generator(final_device) + default_generator.set_offset(offset) + + _lazy_call(cb) + + +def _get_rng_state_offset(device: Union[int, str, torch.device] = "xpu") -> int: + r"""Return the random number generator state offset of the specified GPU. + + Args: + device (torch.device or int, optional): The device to return the RNG state offset of. + Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). + + .. warning:: + This function eagerly initializes XPU. + """ + _lazy_init() + final_device = _get_device(device) + default_generator = _get_generator(final_device) + return default_generator.get_offset() + + +from .random import * # noqa: F403 + + +__all__ = [ + "Event", + "Stream", + "StreamContext", + "current_device", + "current_stream", + "default_generators", + "device", + "device_of", + "device_count", + "empty_cache", + "get_device_capability", + "get_device_name", + "get_device_properties", + "get_rng_state", + "get_rng_state_all", + "get_stream", + "init", + "initial_seed", + "is_available", + "is_bf16_supported", + "is_initialized", + "manual_seed", + "manual_seed_all", + "seed", + "seed_all", + "set_device", + "set_rng_state", + "set_rng_state_all", + "set_stream", + "stream", + "streams", + "synchronize", +] diff --git a/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c979d03c5681dd5f98e37d43dd0d249a908d62aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a57f54c08815b5b870f6da74b8a642983e1b00c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cd8807d2e61c06e54aef2a594c996ad4c6bccd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/streams.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d361479cd2045bfba893e13bd384e9f4164936b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/xpu/__pycache__/streams.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/xpu/_utils.py b/venv/lib/python3.10/site-packages/torch/xpu/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f738267459a2791a4a33ca4bec74800a58f0b9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/xpu/_utils.py @@ -0,0 +1,39 @@ +from typing import Any + +import torch + +# The _get_device_index has been moved to torch.utils._get_device_index +from torch._utils import _get_device_index as _torch_get_device_index + + +def _get_device_index( + device: Any, optional: bool = False, allow_cpu: bool = False +) -> int: + r"""Get the device index from :attr:`device`, which can be a torch.device + object, a Python integer, or ``None``. + + If :attr:`device` is a torch.device object, returns the device index if it + is a XPU device. Note that for a XPU device without a specified index, + i.e., ``torch.device('xpu')``, this will return the current default XPU + device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, + CPU devices will be accepted and ``-1`` will be returned in this case. + + If :attr:`device` is a Python integer, it is returned as is. + + If :attr:`device` is ``None``, this will return the current default XPU + device if :attr:`optional` is ``True``. + """ + if isinstance(device, int): + return device + if isinstance(device, str): + device = torch.device(device) + if isinstance(device, torch.device): + if allow_cpu: + if device.type not in ["xpu", "cpu"]: + raise ValueError(f"Expected a xpu or cpu device, but got: {device}") + elif device.type != "xpu": + raise ValueError(f"Expected a xpu device, but got: {device}") + if not torch.jit.is_scripting(): + if isinstance(device, torch.xpu.device): + return device.idx + return _torch_get_device_index(device, optional, allow_cpu) diff --git a/venv/lib/python3.10/site-packages/torch/xpu/random.py b/venv/lib/python3.10/site-packages/torch/xpu/random.py new file mode 100644 index 0000000000000000000000000000000000000000..733c55b658cd7115a3d89530f8ff18c839dd3fa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/xpu/random.py @@ -0,0 +1,176 @@ +from typing import Iterable, List, Union + +import torch +from .. import Tensor +from . import _lazy_call, _lazy_init, current_device, device_count + + +def get_rng_state(device: Union[int, str, torch.device] = "xpu") -> Tensor: + r"""Return the random number generator state of the specified GPU as a ByteTensor. + + Args: + device (torch.device or int, optional): The device to return the RNG state of. + Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). + + .. warning:: + This function eagerly initializes XPU. + """ + _lazy_init() + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("xpu", device) + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.xpu.default_generators[idx] + return default_generator.get_state() + + +def get_rng_state_all() -> List[Tensor]: + r"""Return a list of ByteTensor representing the random number states of all devices.""" + results = [] + for i in range(device_count()): + results.append(get_rng_state(i)) + return results + + +def set_rng_state( + new_state: Tensor, device: Union[int, str, torch.device] = "xpu" +) -> None: + r"""Set the random number generator state of the specified GPU. + + Args: + new_state (torch.ByteTensor): The desired state + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device). + """ + with torch._C._DisableFuncTorch(): + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("xpu", device) + + def cb(): + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.xpu.default_generators[idx] + default_generator.set_state(new_state_copy) + + _lazy_call(cb) + + +def set_rng_state_all(new_states: Iterable[Tensor]) -> None: + r"""Set the random number generator state of all devices. + + Args: + new_states (Iterable of torch.ByteTensor): The desired state for each device. + """ + for i, state in enumerate(new_states): + set_rng_state(state, i) + + +def manual_seed(seed: int) -> None: + r"""Set the seed for generating random numbers for the current GPU. + + It's safe to call this function if XPU is not available; in that case, it is silently ignored. + + Args: + seed (int): The desired seed. + + .. warning:: + If you are working with a multi-GPU model, this function is insufficient + to get determinism. To seed all GPUs, use :func:`manual_seed_all`. + """ + seed = int(seed) + + def cb(): + idx = current_device() + default_generator = torch.xpu.default_generators[idx] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed=True) + + +def manual_seed_all(seed: int) -> None: + r"""Set the seed for generating random numbers on all GPUs. + + It's safe to call this function if XPU is not available; in that case, it is silently ignored. + + Args: + seed (int): The desired seed. + """ + seed = int(seed) + + def cb(): + for i in range(device_count()): + default_generator = torch.xpu.default_generators[i] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed_all=True) + + +def seed() -> None: + r"""Set the seed for generating random numbers to a random number for the current GPU. + + It's safe to call this function if XPU is not available; in that case, it is silently ignored. + + .. warning:: + If you are working with a multi-GPU model, this function will only initialize + the seed on one GPU. To initialize all GPUs, use :func:`seed_all`. + """ + + def cb(): + idx = current_device() + default_generator = torch.xpu.default_generators[idx] + default_generator.seed() + + _lazy_call(cb) + + +def seed_all() -> None: + r"""Set the seed for generating random numbers to a random number on all GPUs. + + It's safe to call this function if XPU is not available; in that case, it is silently ignored. + """ + + def cb(): + random_seed = 0 + seeded = False + for i in range(device_count()): + default_generator = torch.xpu.default_generators[i] + if not seeded: + default_generator.seed() + random_seed = default_generator.initial_seed() + seeded = True + else: + default_generator.manual_seed(random_seed) + + _lazy_call(cb) + + +def initial_seed() -> int: + r"""Return the current random seed of the current GPU. + + .. warning:: + This function eagerly initializes XPU. + """ + _lazy_init() + idx = current_device() + default_generator = torch.xpu.default_generators[idx] + return default_generator.initial_seed() + + +__all__ = [ + "get_rng_state", + "get_rng_state_all", + "set_rng_state", + "set_rng_state_all", + "manual_seed", + "manual_seed_all", + "seed", + "seed_all", + "initial_seed", +] diff --git a/venv/lib/python3.10/site-packages/torch/xpu/streams.py b/venv/lib/python3.10/site-packages/torch/xpu/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3c3a63d58bdb7b4bd85a25740330d309a707bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/xpu/streams.py @@ -0,0 +1,169 @@ +import ctypes + +import torch +from torch._streambase import _EventBase, _StreamBase +from .._utils import _dummy_type + + +if not hasattr(torch._C, "_XpuStreamBase"): + # Define dummy base classes + torch._C.__dict__["_XpuStreamBase"] = _dummy_type("_XpuStreamBase") + torch._C.__dict__["_XpuEventBase"] = _dummy_type("_XpuEventBase") + + +class Stream(torch._C._XpuStreamBase, _StreamBase): + r"""Wrapper around a XPU stream. + + A XPU stream is a linear sequence of execution that belongs to a specific + device, independent from other streams. + + Args: + device(torch.device or int, optional): a device on which to allocate + the stream. If :attr:`device` is ``None`` (default) or a negative + integer, this will use the current device. + priority(int, optional): priority of the stream, should be 0 or + negative, where negative numbers indicate higher priority. By default, + streams have priority 0. + """ + + def __new__(cls, device=None, priority=0, **kwargs): + # setting device manager is expensive, so we avoid it unless necessary + if device is None or ("stream_id" in kwargs and "device_index" in kwargs): + return super().__new__(cls, priority=priority, **kwargs) + else: + with torch.xpu.device(device): + return super().__new__(cls, priority=priority, **kwargs) + + def wait_event(self, event): + r"""Make all future work submitted to the stream wait for an event. + + Args: + event (torch.xpu.Event): an event to wait for. + """ + event.wait(self) + + def wait_stream(self, stream): + r"""Synchronize with another stream. + + All future work submitted to this stream will wait until all kernels + submitted to a given stream at the time of call complete. + + Args: + stream (Stream): a stream to synchronize. + """ + self.wait_event(stream.record_event()) + + def record_event(self, event=None): + r"""Record an event. + + Args: + event (torch.xpu.Event, optional): event to record. If not given, a new one + will be allocated. + + Returns: + Recorded event. + """ + if event is None: + event = Event() + event.record(self) + return event + + def query(self): + r"""Check if all the work submitted has been completed. + + Returns: + A boolean indicating if all kernels in this stream are completed. + """ + return super().query() + + def synchronize(self): + r"""Wait for all the kernels in this stream to complete.""" + super().synchronize() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.sycl_queue) + + def __eq__(self, o): + if isinstance(o, Stream): + return super().__eq__(o) + return False + + def __hash__(self): + return hash((self.sycl_queue, self.device)) + + def __repr__(self): + return f"torch.xpu.Stream(device={self.device} sycl_queue={self.sycl_queue:#x})" + + +class Event(torch._C._XpuEventBase, _EventBase): + r"""Wrapper around a XPU event. + + XPU events are synchronization markers that can be used to monitor the + device's progress, and to synchronize XPU streams. + + The underlying XPU events are lazily initialized when the event is first + recorded. After creation, only streams on the same device may record the + event. However, streams on any device can wait on the event. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + """ + + def __new__(cls, enable_timing=False): + return super().__new__(cls, enable_timing=enable_timing) + + def record(self, stream=None): + r"""Record the event in a given stream. + + Uses ``torch.xpu.current_stream()`` if no stream is specified. The + stream's device must match the event's device. + """ + if stream is None: + stream = torch.xpu.current_stream() + super().record(stream) + + def wait(self, stream=None): + r"""Make all future work submitted to the given stream wait for this event. + + Use ``torch.xpu.current_stream()`` if no stream is specified. + """ + if stream is None: + stream = torch.xpu.current_stream() + super().wait(stream) + + def query(self): + r"""Check if all work currently captured by event has completed. + + Returns: + A boolean indicating if all work currently captured by event has + completed. + """ + return super().query() + + def elapsed_time(self, end_event): + r"""Return the time elapsed. + + Time reported in milliseconds after the event was recorded and + before the end_event was recorded. + """ + return super().elapsed_time(end_event) + + def synchronize(self): + r"""Wait for the event to complete. + + Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + """ + super().synchronize() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.sycl_event) + + def __repr__(self): + if self.sycl_event: + return f"torch.xpu.Event(sycl_event={self.sycl_event:#x})" + else: + return "torch.xpu.Event(uninitialized)"