diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ee580a17dc02c2aa183ee4c976f7ac711d5806 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffce2ac444e67c1573ca000d7fea137746d29ede Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/aot_autograd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1839b101c56764cc6f6f912eb7b130a824745ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a7efd025059f7e069a9b46d1a8a650e87349b6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/autograd_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8584361308e08a62bb58e4664db490fc90d8d47b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/batch_norm_replacement.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bba3a436eedcdd44d133dba299b101e30563cdf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compile_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e0936ee88a1aa78ea1fd2640900220b37824dc8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/compilers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bbc73f0c67e3c3b5ecbb58c245d23a597cd424a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14d91037b2e19c1b33b9323e9f34001ddc579c6b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..677ea343e9f806ef605d49ede811a853333daf5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/functional_call.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ac9b858dfa3b7972186cae0dfc26286700bd076 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/fx_minifier.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67c72277b26c3c8c104c682ca73e6f0cf300aca2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/make_functional.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23a2ff78d1aabff7f165eb7e9e5b267aa6d64169 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/partitioners.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46bd22b31ed6ae3249be1162b01affc5f8975ae1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pyfunctorch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7f0f9e8a30407a6396c91a85775cf74d6a928e4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/python_key.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05c380fc4fc7e3d9cd21fca0553fb45b969d9c09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/pytree_hacks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c29864f561634d6d8d832c5d9affcc7794806a7c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/top_operators_github_usage.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7afdc76a55a66b3165268add7493fd30ac3a2a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eebbcab061e1aa9754b224c3a7377615cdcd0058 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b427d757bfb29368bcf6c1915b5ab79e84e22c8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/collect_metadata_analysis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0345767f5206be179812f81fdd205fb49fc363c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/dispatch_and_compile_graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09b5d772707d21a5b13eb797c65a4ea7bb08c6ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/functional_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a50702ea0462aec57d513c87f410aeed89e1012b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/input_output_analysis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7811700b740f582826299d22fbc247e0ca185a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/jit_compile_runtime_wrappers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78d8d73777b0fbf3efb0565c6713f5578dc26a68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f9cd6f0cad2deccebd3a70c13d2277050dd5d5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/runtime_wrappers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6adfd596456ce4d9cde850dacf3e6be1b67c1be5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/schemas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d7960191fc6a8a5c640278985c3803cf6170d48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/subclass_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..971eb0f407b9bfb4b566bd7b203bcc8c51836a1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/traced_function_transforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..832542302bbf629218637e68cb2c527255b0952e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..221690798fd3df076f767051c7edc03a214518a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/collect_metadata_analysis.py @@ -0,0 +1,626 @@ +""" +This module is one of the analysis modules - it takes as input a function or graph +and some preexisting properties, and returns some data that is useful for deciding +how to further proceed with compilation or construct runtime wrappers. + +In particular, the analysis here constructs view and mutation metadata from running +a functionalized version of the graph under compilation. +""" + +import collections +import logging +from functools import wraps +from typing import Callable, DefaultDict, Dict, List + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode +from torch._subclasses.meta_utils import safe_is_leaf +from torch.fx.experimental.symbolic_shapes import is_concrete_int +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) +from .functional_utils import ( + are_all_mutations_hidden_from_autograd, + are_all_mutations_under_no_grad_or_inference_mode, + from_fun, + has_data_mutation, + has_metadata_mutation, + has_same_metadata, + to_fun, +) +from .schemas import ( + InputAliasInfo, + MutationType, + OutputAliasInfo, + OutputType, + ViewAndMutationMeta, +) +from .subclass_utils import create_subclass_meta + +from .utils import _get_autocast_states, KNOWN_TYPES, strict_zip + +zip = strict_zip + +log = logging.getLogger(__name__) + + +# This is a version of functionalization that is specifically designed +# for the AOTAutograd use case. +# +# Unlike functorch's variant, this doesn't use the functorch level system, +# instead it directly uses PyTorch's conventional dispatcher to hit the +# functionalization key. In particular, this means that FunctionalTensorWrapper +# can have autograd data stored directly on it. +# +# In typical AOTAutograd usage, the dispatch key order will look like: +# +# Autograd - Functionalization ~~~~> Proxy Mode - Fake Tensor +# outer tensor inner tensor +# +# Returns: +# - ViewAndMutationMeta, telling us metadata about the inputs and outputs, and +# The list of outputs from the forward, but **only** the outputs that we need +# to pass in as tangents into the backward. +# Specifically, aliased outputs from the forward get regenerated, and don't participate +# in the compiled backward function. +def run_functionalized_fw_and_collect_metadata( + f, + *, + keep_input_mutations: bool, + # TODO: refactor to kill this flag + is_train: bool = False, + pre_dispatch: bool = False, +) -> Callable[..., ViewAndMutationMeta]: + memo: Dict[Tensor, Tensor] = {} + + def _to_fun(t): + if isinstance(t, Tensor): + if t in memo: + return memo[t] + r = to_fun(t) + memo[t] = r + return r + else: + return t + + @wraps(f) + def inner(*flat_args): + # This function is meant to be run with the forward, which expects a flat list of tensor/symint/other args. + assert all(isinstance(a, tuple(KNOWN_TYPES)) for a in flat_args) + + input_info: List[InputAliasInfo] = [] + output_info: List[OutputAliasInfo] = [] + + prior_grad_enabled = torch.is_grad_enabled() + prior_autocast_states = _get_autocast_states() + + # See Note [Disabling Functionalize TLS Above Python Functionalization] + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + # It doesn't matter if we run this under predispatch or not because it is + # only for figuring out metadata + mode = FunctionalTensorMode(_allow_token_discovery=True) + with disable_above, mode: + # precondition: The passed in function already handles unflattening inputs + flattening outputs + flat_f_args = pytree.tree_map(_to_fun, flat_args) + flat_f_outs = f(*flat_f_args) + + if prior_autocast_states != _get_autocast_states(): + raise RuntimeError( + "AOTAutograd does not support tracing graphs that mutate the autocast state. " + "Dynamo will only insert autocast context managers (e.g. with torch.autocast(..)) into the graph, " + "which will unwind all of their mutations to autocast state before the graph exits. " + "If you encounter this error while using torch.compile, please file a bug." + ) + + # Inspect the state of the input tensor functional wrapper to detect input mutation info + # If inp[i] has a metadata-only mutation, then maybe_inputs_with_mutated_metadata[i] contains the updated version + for i, (arg, f_arg) in enumerate(zip(flat_args, flat_f_args)): + # NB: Mutation of non-contiguous tensor subclass input can result in a mismatch in + # strides between the functionalized arg inner tensors and non-functionalized arg inner + # tensors. This is a problem as the inner tensor stride change may not be reflected + # correctly in the outer tensor, so disallow this for now. + mutates_data = has_data_mutation(f_arg) + if ( + mutates_data + and not arg.is_contiguous() + and is_traceable_wrapper_subclass(arg) + ): + raise RuntimeError( + "Mutations on non-contiguous inputs are currently not allowed on " + "tensor subclasses" + ) + + if not isinstance(arg, Tensor): + new_arg = arg + else: + new_arg = from_fun(f_arg) + mutates_metadata = has_metadata_mutation( + f_arg, arg, check_only_storage_mutation=False + ) + if mutates_metadata and is_traceable_wrapper_subclass(arg): + raise RuntimeError( + "Metadata mutations are currently not allowed on tensor subclasses" + ) + mutates_storage_metadata = has_metadata_mutation( + f_arg, arg, check_only_storage_mutation=True + ) + mutations_hidden_from_autograd = are_all_mutations_hidden_from_autograd( + f_arg + ) + mutations_under_no_grad_or_inference_mode = ( + mutates_data + and are_all_mutations_under_no_grad_or_inference_mode(f_arg) + ) + + # Here, we're saying that if an input experienced a set call, inp.set_(other), + # then we can effectively not have to worry about whether its data was mutated. + # There are 3 cases: + # (1) We mutate inp *after* the set_() call. other is a graph intermediate. + # In this case, we're not really mutating the input storage of "inp"; + # we're mutating the storage of an intermdiate value (other), + # and slamming that storage into the input tensor. So no data mutation is necessary. + # (2) We mutate inp *after* the set_() call. other is a graph *input*. + # In this case, the data mutation will be properly handled in the runtime + # epilogue during the processing of "other" + # (3) We mutate inp *before* the set_() call. + # This case is *not* currently handled. + # TODO: discuss this in the PR. Both supporting this, and detecting + erroring out, + # seem painful to get working. + if mutates_storage_metadata: + mutates_data = False + + requires_grad = isinstance(f_arg, torch.Tensor) and f_arg.requires_grad + + input_info.append( + InputAliasInfo( + is_leaf=isinstance(arg, Tensor) and safe_is_leaf(arg), + mutates_data=mutates_data, + mutates_metadata=mutates_metadata, + mutations_hidden_from_autograd=mutations_hidden_from_autograd, + mutates_storage_metadata=mutates_storage_metadata, + mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode, + requires_grad=requires_grad, + keep_input_mutations=keep_input_mutations, + ) + ) + + # If a function involves creating a tensor, and returning a view of it, such that its _base is the intermediate, + # We need to make sure our graph returns the _base as a graph output, and we manually recreate the view + # to return to the user. Why? The backend compiler is free to (incorrectly) not set requires_grad + # on the base tensor, but we are obligated to properly set requires-gradness on the real output. + + inp_storage_refs = { + StorageWeakRef(inpt.untyped_storage()): idx + for idx, inpt in enumerate(flat_f_args) + if isinstance(inpt, Tensor) + } + + # We need inp tensor id's to be able to tell if an outputs **are** inputs. + inp_tensor_ids = {id(inpt) for inpt in flat_f_args if isinstance(inpt, Tensor)} + # We need output tensor id's to tell if any output._base` attributes **are** other outputs. + # (This is also a dict because we need to know that output's index, so we can regenerate + # the alias from it). + out_tensor_ids = {id(o): i for i, o in enumerate(flat_f_outs)} + + # Keep track of which outputs alias other outputs + out_tensor_alias_counts: DefaultDict = collections.defaultdict(int) + # This tells us, for a given group of outputs that alias each other, + # whether they e.g. all came from an unbind call + num_aliased_tensors_that_are_multi_output_views: DefaultDict = ( + collections.defaultdict(int) + ) + out_storage_to_tensors: DefaultDict = collections.defaultdict(set) + curr_storage = None + for o in flat_f_outs: + if isinstance(o, torch.Tensor): + curr_storage = StorageWeakRef(o.untyped_storage()) + out_tensor_alias_counts[curr_storage] += 1 + # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + # This is an optimization on top of the "alias of intermediates" logic, + # which you can read more about under Note [AOT Autograd: outputs aliasing inputs or intermediates!] + # + # Before describing the optimization: this is important for AOTAutograd to have good + # perf around, multi-output views. HOWEVER: + # - There is a more generic change to AOTAutograd that we'd like to make, that subsumes this case, + # around using pre-dispatch tracing to partition out a graph so we can faithfully replay all + # views without having to regenerate them at runtime. + # - It's loosely described in this doc (more details will be added soon): + # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit + # - Once that change lands, we should just rip out this "optimization", since: + # (1) It will be fully unnecessary + # (2) Although it is only a few lines of code, it is a bit difficult to reason about + # its correctness with the autograd engine in all cases. + # + # + # What is this optimization? Consider the below case: + # def f(x): + # intermediate = x.mul(2) + # # x and intermediate here require grad + # o1, o2, ... o10 = intermediate.unbind(-1) + # return intermediate, o1, o2, ... o10 + # Now, the "intermediate base" handling in AOTAutograd implies that we must do the following: + # (1) return "intermediate as an extra output of the compiled graph + # (2) regenerate each aliased output off of "intermediate", **outside** of the autograd.Function. + # The reason AOTAutograd ordinarily does this is for safety: the autograd engine needs to know + # that o1 through o10 are all aliased, and if we blindly return o1 through o10 from the autograd.Function, + # this information will be hidden. + # In particular, mutating one alias might require autograd to update autograd metadata on the other aliases + # (like their grad_fn, for example, when the autograd engine needs to do view-replay). + # + # However, intermediate_base logic can be bad for backward performance (we sometimes generate + # as_strided calls during the intermediate base logic, which can have a slow backward formula). + # Is it possible to find a set of conditions where it is **safe** to hide the output aliasing from autograd? + # + # For a set of outputs of the graph that alias each other, o_1...o_k, consider: + # (1) They came from the same multi-output view op, e.g. o_1, ..., o_k = intermediate.unbind(0) + # (2) If there are any other aliases of o_1 through o_k (in the example above, intermediate), + # **at most** 1 can escape from the graph (e.g. there is not some other graph input/output + # o_other, that aliases these outputs) + # (3) o_1...o_k all require_grad, they all share the same ._base, and their ._base requires grad. + # This condition is important because it's what causes slowness in the intermediate_base + # codepath of aot_autograd. Ordinarily, o_1...o_k would all get a grad_fn, and + # aot_autograd's view-replay might give each output an AsStridedBackward as its grad_fn. + # "K" AsStridedBackward calls will be *much* slower than a single UnbindBackward. + # In this setup, is it possible to mutate one of the outputs o_i in a way that would affect the autograd meta + # of the other aliases? + # + # Claim: No! Consider a few example (which I'm pretty sure cover all cases of mutation w.r.t. autograd): + # (a) What happens if we mutate any of o_1 through o_k directly? + # Autograd raises an error: + # "RuntimeError: Output 0 of UnbindBackward0 is a view and is being modified inplace. This view is + # the output of a function that returns multiple views. Such functions do not allow the output + # views to be modified inplace. You should replace the inplace operation by an out-of-place one." + # (b) What if we take a view of o_k and mutate it, o_k.view(o_k.shape).mul_(2)? + # Autograd raises the same error- the "multi-output-view"ness of an alias propagates to future views. + # (c) What if we mutate o_k under no_grad? + # Autograd raises the same error + # (d) What if we detach and mutate, e.g. o_k.detach().mul_(2)? + # Autograd allows this, *but* autograd updates all alias's grad_fn's to be error functions when accessed. + # Autograd raises the same error + # (e) What if we try to mutate another alias of o_1...o_k, that was **not** created from a multi-output view? + # We promised that there is at most **one** such alias, e.g. intermediate in the example above. + # You can mutate intermediate, but in eager mode this will change the grad_fn of o_1...o_k + # to be error fn's. + # Since intermediate was the *only* non-multi-output-alias, there are no other aliases + # of `intermediate` around that were produced by the compiled fn and have a valid grad_fn. + # + # Coming back to this optimization: + # Given that it is not possible for mutating one of these aliases to affect the autograd metadata of another alias + # without causing an error in eager mode, we will simple hide the aliasing from autograd during torch.compile + # if all of the above conditions are met. + # This has the slight downside that it's possible to write some "bad" code that autograd will raise an error on + # in eager but fail to during torch.compile, but it has the benefit that this code has much better performance. + # NOTE: if and when we eventually update AOTAutograd to do the "view graph slicing" defined here: + # https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit, + # then this optimization will probably matter less and might be ok to remove. + is_cur_tensor_multi_out_view = isinstance( + o, FunctionalTensor + ) and torch._functionalize_is_multi_output_view( # type: ignore[attr-defined] + o.elem + ) + if is_cur_tensor_multi_out_view: + num_aliased_tensors_that_are_multi_output_views[curr_storage] += 1 + out_storage_to_tensors[curr_storage].add(o) + + # maps the id of an intermediate base to its index in the output of the compiled forward + intermediate_base_tensor_id_to_output_idx: Dict[int, int] = {} + intermediate_bases: List[torch.Tensor] = [] + # Why Do We Care If Storage Changed? + # It's important to understand the implications of storage changes in complex scenarios. Take this example: + # + # def f(x): + # x_storage = x.untyped_storage() + # non_leaf_tensor = torch.ones(4, requires_grad=True).clone() + # + # # Using no_grad() and _unsafe_preserve_version_counter to simulate the .data = operation + # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x): + # x.set_(non_leaf_tensor.untyped_storage()) + # + # out = x.view(-1) + # + # # Restoring x to its original storage, again simulating .data = operation + # with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(x): + # x.set_(x_storage) + # + # return out + # + # In this scenario, 'x' and 'out' have different shapes and are stored at different memory addresses, aka no aliasing. + # However, due to how set_() and more specificlaly, set is functionalized, is defined to preserve eager semantics, + # the autograd engine mistakenly assumes that 'x' and 'out' are aliased, treating 'x' as 'out._base'. + # This misinterpretation leads to an 'alias_of_input' flag, causing an unnecessary as_strided() call to be generated, + # which could lead to issues later in the code. + for o in flat_f_outs: + functional_tensor_storage_changed = isinstance( + o, FunctionalTensor + ) and torch._functionalize_was_storage_changed( # type: ignore[attr-defined] + o.elem + ) + curr_storage = ( + None + if not isinstance(o, torch.Tensor) + else StorageWeakRef(o.untyped_storage()) + ) + outs_with_identical_metadata_that_require_grad = ( + [] + if not isinstance(o, Tensor) + else [ + curr + for curr in out_storage_to_tensors[curr_storage] + if has_same_metadata(o, curr) + and curr.requires_grad + and o is not curr + ] + ) + + # See Note [Accessing .grad_fn on FunctionalTensor] + # In-place operations on views will trigger a lazy rebase of the autograd graph; + # this runs during access to the .grad_fn. The rebase logic will invoke view ops + # on FunctionalTensors, so we must enable a FunctionalTensorMode here to ensure + # these op calls succeed. + grad_fn = None + if isinstance(o, Tensor): + with FunctionalTensorMode(): + grad_fn = o.grad_fn + + is_result_of_custom_autograd_fn = False + # Need to check for both custom cpp (CppFunction) and python (BackwardCFunction) + # autograd fns + if type(grad_fn).__name__ == "CppFunction": + is_result_of_custom_autograd_fn = True + if isinstance(grad_fn, torch.autograd.function.BackwardCFunction): + is_result_of_custom_autograd_fn = True + + if not isinstance(o, Tensor): + output_type = OutputType.non_alias + base_idx = None + elif ( + curr_storage in inp_storage_refs + and grad_fn is not None + and is_result_of_custom_autograd_fn + ): + output_type = OutputType.custom_function_view + base_idx = None + elif ( + curr_storage in inp_storage_refs + and not functional_tensor_storage_changed + ): + base_idx = inp_storage_refs[curr_storage] + is_input_tensor = id(o) in inp_tensor_ids + num_aliased_outs = out_tensor_alias_counts[curr_storage] + num_multi_output_view_outs = ( + num_aliased_tensors_that_are_multi_output_views[curr_storage] + ) + num_aliased_outs_that_are_not_multi_output_views = ( + num_aliased_outs - num_multi_output_view_outs + ) + if ( + grad_fn is not None + and num_aliased_outs_that_are_not_multi_output_views == 0 + ): + # See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + # In particular, given: + # def f(x): + # return list(x.unbind(0)) + # The main reason we ordinarily try to regenerate these output aliases outside of the + # compiled autograd.Function is because if any of the outputs are later mutated, + # autograd needs to perform view-replay to regenerate them. + # However, autograd does not allow users to mutate multi-output views + # in any way that can change the autograd metadata of other aliases. + # So we hide this aliasing from autograd here. + log.debug( + "Encountered AOTAutograd case: differentiable outputs that \ +alias each other from a multi-output view call" + ) + output_type = OutputType.non_alias + elif is_input_tensor: + output_type = OutputType.is_input + else: + output_type = OutputType.alias_of_input + + # We only need to handle the intermediate base case when both + # the intermediate base and the output require gradients. + # See Note [AOT Autograd: outputs aliasing inputs or intermediates!] + elif o._base is not None and o.requires_grad and o._base.requires_grad: + num_aliased_outs = out_tensor_alias_counts[curr_storage] + num_multi_output_view_outs = ( + num_aliased_tensors_that_are_multi_output_views[curr_storage] + ) + num_aliased_outs_that_are_not_multi_output_views = ( + num_aliased_outs - num_multi_output_view_outs + ) + # Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call] + if ( + out_tensor_alias_counts[curr_storage] == 1 + or num_aliased_outs_that_are_not_multi_output_views <= 1 + ): + # Note [Intermediate Bases Optimization] + # Normally if we have an output that aliases an intermediate, + # we need to add the extra "intermediate base" logic further down + # to prevent autograd from yelling at us if the user later tries to + # mutate that output. + # However, the common case here is if we have an output that aliases an intermediate, + # but doesn't alias any other outputs. + # In that case, autograd shouldn't have to worry about the aliasing at all + # (if that output is mutated, there are no other live aliases for autograd to worry about). + # The "intermediate bases" can hurt inductor perf by forcing more variables to become outputs. + # So as an optimization, we won't do intermediate base handling in this case. + # Instead, we'll hide the aliasing from autograd using aten._unsafe_view(). + if ( + out_tensor_alias_counts[curr_storage] != 1 + and num_aliased_outs_that_are_not_multi_output_views <= 1 + ): + log.debug( + "Encountered AOTAutograd case: differentiable outputs that alias each other \ +from a multi-output view call" + ) + output_type = OutputType.unsafe_view_alias + base_idx = None + else: + # First, check if o's ._base is an existing output + maybe_existing_out_idx = out_tensor_ids.get(id(o._base), None) + if maybe_existing_out_idx is not None: + # Special case where the output is an alias of a graph intermediate, but that intermediate + # is itself also a user output. + output_type = ( + OutputType.alias_of_intermediate_base_is_user_output + ) + base_idx = maybe_existing_out_idx + else: + # Next, check if o's ._base is an intermediate base that we already returned + maybe_existing_base_output_idx = ( + intermediate_base_tensor_id_to_output_idx.get( + id(o._base), None + ) + ) + if maybe_existing_base_output_idx is not None: + output_type = OutputType.alias_of_intermediate + base_idx = maybe_existing_base_output_idx + else: + # Otherwise, take o._base and explicitly return it as an output in the compiled graph + new_out_idx = len(intermediate_bases) + base_idx = new_out_idx + # Indicate to the logic later on (when we trace the joint) + # that this particular output should get it's ._base appended to the forward graph outputs + output_type = ( + OutputType.alias_of_intermediate_save_as_output + ) + intermediate_base_tensor_id_to_output_idx[ + id(o._base) + ] = new_out_idx + intermediate_bases.append(o._base) + elif ( + # See https://github.com/pytorch/pytorch/issues/100348 for this case. + # This protects against the specific case where a user fn returns (output, output.detach()) + out_tensor_alias_counts[curr_storage] > 1 + and len(outs_with_identical_metadata_that_require_grad) > 0 + and not o.requires_grad + ): + assert len(outs_with_identical_metadata_that_require_grad) > 0 + # In theory we could use any of these tensors to regenerate the aliased outputs from, + # since they all alias each other and have identical metatadata + out_alias = outs_with_identical_metadata_that_require_grad[0] + existing_out_idx = out_tensor_ids[id(out_alias)] + output_type = OutputType.alias_of_intermediate_base_is_user_output + base_idx = existing_out_idx + else: + output_type = OutputType.non_alias + base_idx = None + + if isinstance(o, torch.Tensor): + dynamic_dims = { + i for i, s in enumerate(o.shape) if not is_concrete_int(s) + } + else: + dynamic_dims = None + out_info = OutputAliasInfo( + output_type=output_type, + raw_type=type(o), + base_idx=base_idx, + dynamic_dims=dynamic_dims, + requires_grad=isinstance(o, torch.Tensor) and o.requires_grad, + ) + output_info.append(out_info) + + # See Note [AOT Autograd: Views to avoid tangents aliasing inputs] + def view_avoid_dupes_with_primals(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + return transform_subclass( + t, lambda _, inner_t: view_avoid_dupes_with_primals(inner_t) + ) + if isinstance(t, Tensor): + return t.view(t.shape) + return t + + # This analysis function returns *only* the outputs that are meant to be tangents to the backwards. + # Anything that aliases (inputs returned in the fw due to metadata mutations, or outputs that alias inputs/intermediates) + # are *regenerated* later, and not used directly in the autograd graph + f_input_tangents = [ + inp + for inp, info in zip(flat_f_args, input_info) + if info.mutation_type == MutationType.MUTATED_OUT_GRAPH + and info.mutates_data + and info.requires_grad + ] + f_output_tangents = [ + o + for o, info in zip(flat_f_outs, output_info) + if info.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + and issubclass(info.raw_type, torch.Tensor) + and info.requires_grad + ] + # intermediate bases are also included in the backward graph + f_tangents = f_input_tangents + f_output_tangents + intermediate_bases + traced_tangents = pytree.tree_map(from_fun, f_tangents) + traced_tangents = pytree.tree_map( + view_avoid_dupes_with_primals, traced_tangents + ) + user_outs = pytree.tree_map(from_fun, f_output_tangents) + + f_mutated_inputs = [ + inp + for inp, info in zip(flat_f_args, input_info) + if info.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] + f_metadata_mutated_inputs = [ + inp for inp, info in zip(flat_f_args, input_info) if info.mutates_metadata + ] + # This logic (annoyingly) re-figures out exactly what the outputs to the compiled fw graph will be. + # When handling subclasses, we need info about **all** outputs of compiled forward graph, + # so we know precisely which graph outputs to wrap back into tensor subclasses + # Ideally we would refactor this so not have an is_train flag, and have the separate + # inference and training paths decide which inputs/output to ask for subclass info on. + # However, we currently stash indexing information on each SubclassMeta about its order + # in the graph outputs list. + f_fw_graph_outs = list(flat_f_outs) + if is_train or not keep_input_mutations: + f_fw_graph_outs = f_mutated_inputs + f_fw_graph_outs + else: + # even when "keep_input_mutations" is True, + # we never keep metadata-only mutations in the fw graph + f_fw_graph_outs = f_metadata_mutated_inputs + f_fw_graph_outs + if is_train: + f_fw_graph_outs = f_fw_graph_outs + intermediate_bases + fw_graph_outs = pytree.tree_map(from_fun, f_fw_graph_outs) + + grad_enabled_mutation = None + if torch.is_grad_enabled() != prior_grad_enabled: + grad_enabled_mutation = torch.is_grad_enabled() + torch.set_grad_enabled( + prior_grad_enabled + ) # Restore the prior state after tracing it + log.debug( + ( + "grad_mode mutation encountered in graph. " + "Will emit mutation epilogue, to set grad_mode=%s" + ), + grad_enabled_mutation, + ) + + metadata = ViewAndMutationMeta( + input_info=input_info, + output_info=output_info, + num_intermediate_bases=len(intermediate_bases), + keep_input_mutations=keep_input_mutations, + traced_tangents=traced_tangents, + subclass_inp_meta=create_subclass_meta(flat_args), + subclass_fw_graph_out_meta=create_subclass_meta(fw_graph_outs), + subclass_tangent_meta=create_subclass_meta(traced_tangents), + is_train=is_train, + grad_enabled_mutation=grad_enabled_mutation, + tokens=mode._tokens, + ) + return metadata + + return inner diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2ba2e6aee07fd00e2452fa960bb3dd84610268 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py @@ -0,0 +1,192 @@ +""" +This module dispatches the graphs to either the forward-only or joint compilation +pathways, taking into account the AOTConfig and the collected ViewAndMutationMetadata. +""" + +from typing import Any, Callable, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +import torch.utils.dlpack +from torch import Tensor +from torch._dispatch.python import enable_python_dispatcher +from torch._dynamo.utils import lazy_format_graph_code +from torch._logging import getArtifactLogger, trace_structured +from torch._subclasses.functional_tensor import FunctionalTensorMode +from torch.fx.experimental.proxy_tensor import make_fx + +from .functional_utils import ( + assert_functional_graph, + propagate_input_mutation_stacktraces, +) +from .schemas import AOTConfig, SubclassMeta, ViewAndMutationMeta +from .traced_function_transforms import ( + aot_dispatch_subclass, + create_functionalized_fn, + create_joint, + fn_input_mutations_to_outputs, + fn_prepped_for_autograd, +) + +aot_graphs_log = getArtifactLogger(__name__, "aot_graphs") + + +def _create_graph(f, args, *, aot_config: AOTConfig) -> torch.fx.GraphModule: + # FunctionalTensorMode must be enabled here. + # See Note [Accessing .grad_fn on FunctionalTensor] + with enable_python_dispatcher(), FunctionalTensorMode( + pre_dispatch=aot_config.pre_dispatch, export=aot_config.is_export + ): + fx_g = make_fx( + f, + decomposition_table=aot_config.decompositions, + record_module_stack=True, + pre_dispatch=aot_config.pre_dispatch, + )(*args) + + return fx_g + + +def aot_dispatch_base_graph( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]: + # aot_dispatch_base requires functionalization, but doesn't need to handle as many cases as the autograd case. + # The cases that aot_dispatch_base doesn't need to handle include: + # - outputs that are aliases of graph intermediates + # - outputs that are aliases of graph inputs + # While cases that it does need to handle include: + # - input mutations (including when inputs are aliases of each other) + # - input metadata mutations + fn_to_trace = fn_input_mutations_to_outputs( + flat_fn, + fw_metadata, + keep_data_input_mutations=aot_config.keep_inference_input_mutations, + ) + + fn_to_trace, updated_flat_args = create_functionalized_fn( + fn_to_trace, + flat_args, + meta=fw_metadata, + aot_config=aot_config, + trace_joint=False, + ) + + ( + fn_to_trace, + updated_flat_args_subclasses_desugared, + maybe_subclass_meta, + ) = aot_dispatch_subclass( + fn_to_trace, + updated_flat_args, + is_joint_structure=False, + meta=fw_metadata, + fw_only=flat_fn, + ) + + fw_module = _create_graph( + fn_to_trace, + updated_flat_args_subclasses_desugared, + aot_config=aot_config, + ) + + # As long as we opted to remove input mutations, then + # there should be *NO* mutating ops in the graph at this point. + copy_count = assert_functional_graph(fw_module.graph) + + fw_module.graph.eliminate_dead_code() + fw_module.recompile() + + copy_count2 = assert_functional_graph(fw_module.graph) + propagate_input_mutation_stacktraces(fw_module.graph) + + assert copy_count == copy_count2 + + if aot_config.enable_log: + aot_graphs_log.info( + "%s", lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id) + ) + trace_structured( + "aot_forward_graph", + payload_fn=lambda: fw_module.print_readable(print_output=False), + ) + + # TODO: should factor this into a separate function for export that always only returns just the graph. + if aot_config.is_export: + assert ( + maybe_subclass_meta is None + ), "aot_export_module does not support tensor subclass inputs for now." + return fw_module + return fw_module, list(updated_flat_args_subclasses_desugared), maybe_subclass_meta + + +# Has the precondition that there +# are no duplicate arguments in flat_args (e.g., the same Tensor +# object never shows up twice. However, two tensor inputs MAY alias +# the same storage, so long as they have separate TensorImpls.) +def aot_dispatch_autograd_graph( + flat_fn, + flat_args: List[Any], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +) -> Union[Callable, Tuple[Callable, List[Any], Optional[SubclassMeta]]]: + # traced_tangents corresponds to the set of outputs in the traced forward that should get grad_outputs in the traced backward. + # It includes outputs of the original forward, *and* any updated inputs due to input mutations. + # However, it does *not* include any outputs that are aliases of inputs or intermediates, or any metadata-only input mutations. + traced_tangents = pytree.tree_map( + lambda x: x.detach().contiguous() if isinstance(x, Tensor) else x, + fw_metadata.traced_tangents, + ) + + joint_inputs = (flat_args, traced_tangents) + + fn_prepared_for_autograd = fn_prepped_for_autograd( + flat_fn, + fw_metadata, + ) + joint_fn_to_trace = create_joint(fn_prepared_for_autograd, aot_config=aot_config) + + joint_fn_to_trace, updated_joint_inputs = create_functionalized_fn( + joint_fn_to_trace, + joint_inputs, + meta=fw_metadata, + aot_config=aot_config, + trace_joint=True, + ) + + subclass_tracing_info = aot_dispatch_subclass( + joint_fn_to_trace, + updated_joint_inputs, + is_joint_structure=True, + meta=fw_metadata, + fw_only=flat_fn, + ) + + joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn + updated_joint_inputs = subclass_tracing_info.plain_tensor_args + maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta + + fx_g = _create_graph(joint_fn_to_trace, updated_joint_inputs, aot_config=aot_config) + + # There should be *NO* mutating ops in the graph at this point. + assert_functional_graph(fx_g.graph) + + # Redundant with the check above, but worth having in case tracing introduced + # a fake tensor. Unlikely. + # See Note: [Fake Modules and AOTAutograd] + torch._dynamo.utils.assert_no_fake_params_or_buffers(fx_g) + fx_g.graph.eliminate_dead_code() + fx_g.recompile() + # TODO: in AOTAutograd, we create metadata like _indices_of_inps_to_detach to detect + # when we need to manually detach() some inputs in the forward. + # Higher order ops might eventually need to do the same. + if aot_config.is_export: + assert ( + maybe_subclass_meta is None + ), "aot_export_module does not support tensor subclass inputs for now." + return fx_g + return fx_g, updated_joint_inputs, maybe_subclass_meta diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ddeaa0bae7c51b4e794e8f42ef31e557ed911953 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/functional_utils.py @@ -0,0 +1,370 @@ +""" +This file contains utilities related to functionalization in AOTAutograd: +1. converting to/from functional tensors +2. detecting Tensor mutations - both metadata and Tensor value +3. regenerating/replaying views from their base +4. checking if a graph is functional i.e. whether it contains any mutation ops +""" + +import torch +from torch import Tensor +from torch._subclasses.fake_tensor import FakeTensor +from torch._subclasses.functional_tensor import FunctionalTensor +from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) + + +def to_fun(t): + if isinstance(t, Tensor): + if is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + out = transform_subclass(t, lambda _, inner_t: to_fun(inner_t)) + torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined] + return out + else: + return FunctionalTensor.to_functional(t) + else: + return t + + +def sync_functional_tensor(t): + if is_traceable_wrapper_subclass(t): + attrs, ctx = t.__tensor_flatten__() # type: ignore[attr-defined] + for attr in attrs: + sync_functional_tensor(getattr(t, attr)) + else: + torch._sync(t) + + +# When subclasses are involved, t here will usually look something like: +# SubclassA(SubclassB(FunctionalTensor(_to_fun_tensor(FakeTensor)))) +def from_fun(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + out = transform_subclass(t, lambda _, inner_t: from_fun(inner_t)) + torch._mirror_autograd_meta_to(t, out) # type: ignore[attr-defined] + return out + + if not isinstance(t, FunctionalTensor): + # quick sanity assert + if isinstance(t, torch.Tensor): + assert not torch._is_functional_tensor(t) # type: ignore[attr-defined] + return t + sync_functional_tensor(t) + return torch._from_functional_tensor(t.elem) + + +def is_fun(t): + if isinstance(t, Tensor) and is_traceable_wrapper_subclass(t): + # See Note [Functionalization always runs last] + # This means that if we want to "functionalize" a subclass, we need to ensure that the functional wrapper + # goes at the bottom. + # recurse here, so we can support nested wrapper subclasses + t_attrs, _ = t.__tensor_flatten__() # type: ignore[attr-defined] + t_inners = [getattr(t, attr) for attr in t_attrs] + any_fun = any(is_fun(x) for x in t_inners) + all_fun = all(is_fun(x) for x in t_inners) + assert any_fun == all_fun + return any_fun + + return isinstance(t, FunctionalTensor) + + +# t here is either +# (1) A FunctionalTensor(_to_functional_tensor(FakeTensor)) +# (2) A traceable tensor subclass that holds a FunctionalTensor +# (3) Not a tensor +def has_data_mutation(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + # A tensor subclass was updated if any of its inner elements were updated + return any(has_data_mutation(getattr(t, attr)) for attr in attrs) + else: + if isinstance(t, torch.Tensor): + assert isinstance(t, FunctionalTensor) + return torch._functionalize_has_data_mutation(t.elem) # type: ignore[attr-defined] + return False + + +def are_all_mutations_hidden_from_autograd(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + # If all inner elements are mutations hidden from autograd, then it is a mutation hidden from autograd. + return all( + are_all_mutations_hidden_from_autograd(getattr(t, attr)) for attr in attrs + ) + elif isinstance(t, torch.Tensor): + assert isinstance(t, FunctionalTensor) + return torch._functionalize_are_all_mutations_hidden_from_autograd(t.elem) + else: + return False + + +def are_all_mutations_under_no_grad_or_inference_mode(t): + if is_traceable_wrapper_subclass(t): + attrs, _ = t.__tensor_flatten__() + return all( + are_all_mutations_under_no_grad_or_inference_mode(getattr(t, attr)) + for attr in attrs + ) + else: + assert isinstance(t, FunctionalTensor) + return torch._functionalize_are_all_mutations_under_no_grad_or_inference_mode( + t.elem + ) + + +# f_arg here is either +# (1) A FunctionalTensor(_to_functional_tensor(FakeTensor)) +# (2) A traceable tensor subclass that holds a FunctionalTensor +# (3) Not a tensor +# Assumption: arg promises to be the "original" tensor wrapped by f_arg +# Note: "storage mutations" coming from set_() are a type of metadata mutation. So: +# - check_only_storage_mutation=True: only return true if there was a storage mutation +# - check_only_storage_mutation=Flse: return true if there was any metadata mutation (including a storage mutation) +def has_metadata_mutation(f_arg, arg, *, check_only_storage_mutation: bool): + if is_traceable_wrapper_subclass(f_arg): + attrs, _ = f_arg.__tensor_flatten__() + # A tensor subclass was updated if any of its inner elements were updated + f_inner_ts = [getattr(f_arg, attr) for attr in attrs] + inner_ts = [getattr(arg, attr) for attr in attrs] + return any( + has_metadata_mutation( + f_inner_t, + inner_t, + check_only_storage_mutation=check_only_storage_mutation, + ) + for f_inner_t, inner_t in zip(f_inner_ts, inner_ts) + ) + else: + if not isinstance(f_arg, torch.Tensor): + assert not isinstance(arg, torch.Tensor) + return False + assert isinstance(f_arg, FunctionalTensor) + assert isinstance(arg, FakeTensor) + + arg_after = torch._from_functional_tensor(f_arg.elem) + # This is true if the current tensor experienced at least one set_() call + maybe_storage_changed = torch._functionalize_was_storage_changed(f_arg.elem) # type: ignore[attr-defined] + # However, multiple set_() calls can cancel out. So we also check whether the + # storage of the tensor has changed. + # Note: if an input experienced two set_() calls that cancel out, **and** + # it experiences an data mutation, we pessimistically think that the set_() + # call is necessary here. We could in theory fix this, but this will + # hopefully never happen in user code, and is not needed for fsdp. + same_storages = StorageWeakRef(arg.untyped_storage()) == StorageWeakRef( + arg_after.untyped_storage() + ) + has_storage_metadata_mutation = maybe_storage_changed and not same_storages + if check_only_storage_mutation: + return has_storage_metadata_mutation + + # storage metadata mutation is a type of metadata mutation, so return true if we saw one + if has_storage_metadata_mutation: + return True + + maybe_metadata_mutated = torch._functionalize_has_metadata_mutation(f_arg.elem) # type: ignore[attr-defined] + # This is true if the current tensor experienced at least one metadata mutation. + # So if false, we know there was no metadata mutation + if not maybe_metadata_mutated: + return False + + # However, multi metadata mutations can cancel out. + # So we also check if the concrete sizes/strides on the tensor have changed. + same_sizes = arg.shape == arg_after.shape + same_strides = arg.stride() == arg_after.stride() + same_offsets = arg.storage_offset() == arg_after.storage_offset() + has_metadata_mutation_ = maybe_metadata_mutated and not ( + same_sizes and same_strides and same_offsets + ) + # We consider a tensor to have been metadata mutated if its storage was mutated through a set_() call. + return has_metadata_mutation_ + + +def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires_grad): + # Try to do view-replay if possible. + # fall back to .as_strided() if we can't. + if target_meta_tensor._base is not None: + # The base that we want to replay our view off of might have a different shape than the view's original base. + b = target_meta_tensor._base + abt = aliased_base_tensor + # Don't unnecessarily call as_strided if nothing changed; as_strided's + # backward is poorly implemented and slow + if abt is not b and ( + abt.size() != b.size() + or abt.stride() != b.stride() + or abt.storage_offset() != b.storage_offset() + ): + reshaped_base_tensor = aliased_base_tensor.as_strided( + b.size(), b.stride(), b.storage_offset() + ) + else: + reshaped_base_tensor = aliased_base_tensor + out = target_meta_tensor._view_func(reshaped_base_tensor) + # This shape mismatch can happen due to a bug in inplace/view handling in autograd. + # Try putting a breakpoint here and running + # `test/functorch/test_aotdispatch TestAOTAutograd.test_output_all_alias_types` + # Also, https://github.com/pytorch/pytorch/issues/49825 + # + # As a stopgap, we'll fall back to as_strided. + if out is not None and out.shape == target_meta_tensor.shape: + if aliased_base_tensor.requires_grad and not target_requires_grad: + out = out.detach() + elif not aliased_base_tensor.requires_grad and target_requires_grad: + out.requires_grad_(True) + return out + size = target_meta_tensor.size() + stride = target_meta_tensor.stride() + storage_offset = target_meta_tensor.storage_offset() + if aliased_base_tensor.is_complex() and not target_meta_tensor.is_complex(): + aliased_out = torch.view_as_real(aliased_base_tensor).as_strided( + size, stride, storage_offset + ) + elif not aliased_base_tensor.is_complex() and target_meta_tensor.is_complex(): + aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided( + size, stride, storage_offset + ) + else: + aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset) + # For outputs aliasing inputs, we need to check if the requires-gradness has changed. + if aliased_base_tensor.requires_grad and not target_requires_grad: + aliased_out = aliased_out.detach() + elif not aliased_base_tensor.requires_grad and target_requires_grad: + aliased_out.requires_grad_(True) + # For outputs aliasing inputs, we need to check if the dtype has changed. + # as_strided() is the "most generic" view, but it does not cover cross-dtype views + if aliased_out.dtype != target_meta_tensor.dtype: + aliased_out = aliased_out.view(target_meta_tensor.dtype) + return aliased_out + + +def has_same_metadata(t1, t2): + return ( + definitely_true(sym_eq(t1.size(), t2.size())) + and definitely_true(sym_eq(t1.stride(), t2.stride())) + and definitely_true(t1.storage_offset() == t2.storage_offset()) + and t1.is_conj() == t2.is_conj() + and t1.is_neg() == t2.is_neg() + ) + + +# new_arg and arg here are either: +# (1) both a FakeTensor +# (2) both a traceable tensor subclass that holds a FakeTensor +# Pre-condition: the two args are the "old" and "new" inputs from running functionalization. +# When we run functionalization and wrap our inputs into FunctionalTensors, +# we can detect whether or not an input was mutated by checking to see if the inner tensor has changed +# +# Normally it would be enough just to check if arg is new_arg, which is normally enough for functionalization +# to confirm that inputs were not mutated when running the user's model with functionalization on. +# But when we have subclass inputs, we can't rely on that: +# `from_fun(to_fun(x)) is x` will return False, because the call to `from_fun` constructs +# a brand new subclass instance: we are calling __tensor_unflatten__, and going +# from Subclass(FakeTensor) to Subclass(FunctionalTensor(FakeTensor)) +def was_tensor_updated(arg, new_arg): + if is_traceable_wrapper_subclass(arg): + assert is_traceable_wrapper_subclass(new_arg) + attrs, _ = arg.__tensor_flatten__() + new_attrs, _ = new_arg.__tensor_flatten__() + assert attrs == new_attrs + # A tensor subclass was updated if any of its inner elements were updated + return any( + was_tensor_updated(getattr(arg, attr), getattr(new_arg, attr)) + for attr in attrs + ) + else: + return arg is not new_arg + + +# new_arg and arg here are either: +# (1) both a FakeTensor +# (2) both a traceable tensor subclass that holds a FakeTensor +# Pre-condition: the two args are the "old" and "new" inputs from running functionalization. +# When we run functionalization and wrap our inputs into FunctionalTensors, +# we can detect whether or not an input was mutated by checking to see if the inner tensor has changed, +# but shares storage with the old input +def was_tensor_metadata_updated(arg, new_arg): + if is_traceable_wrapper_subclass(arg): + assert is_traceable_wrapper_subclass(new_arg) + attrs, _ = arg.__tensor_flatten__() + new_attrs, _ = new_arg.__tensor_flatten__() + assert attrs == new_attrs + # A tensor subclass was updated if any of its inner elements were updated + return any( + was_tensor_metadata_updated(getattr(arg, attr), getattr(new_arg, attr)) + for attr in attrs + ) + else: + return arg is not new_arg and StorageWeakRef( + arg.untyped_storage() + ) == StorageWeakRef(new_arg.untyped_storage()) + + +# Returns the number of detected copy_ +def assert_functional_graph(fx_g: torch.fx.Graph) -> int: + placeholders = set() + copy_count = 0 + # NB: It would also be nice to verify that the mutations all happen at the + # end, but we also do some administrative views after mutations so this + # isn't actually true. (TODO: Could this cause problems for Inductor?) + for n in fx_g.nodes: + if n.op == "placeholder": + placeholders.add(n) + if isinstance(n.target, torch._ops.OpOverload): + if n.target is torch.ops.aten.copy_.default: + suffix = True + # Can only copy_ into an input, and can only do so once + assert n.args[0] in placeholders + placeholders.remove(n.args[0]) + copy_count += 1 + else: + assert ( + not n.target._schema.is_mutable + ), f"aot_autograd expected to have an entirely functional graph, but found {n.format_node()}" + return copy_count + + +def propagate_input_mutation_stacktraces(fx_g: torch.fx.Graph) -> None: + placeholders = set() + for n in fx_g.nodes: + if n.op == "placeholder": + placeholders.add(n) + if isinstance(n.target, torch._ops.OpOverload): + if n.target is torch.ops.aten.copy_.default: + # Can only copy_ into an input, and can only do so once + assert n.args[0] in placeholders + placeholders.remove(n.args[0]) + copy_from_node = n.args[1] + # Pre-condition: every node has a "stack_trace" field in its meta, + # but copy_() nodes do not (since we manually added them during functionalization). + # Instead, we manually propagate here. + if "stack_trace" in copy_from_node.meta: + assert "stack_trace" not in n.meta, str(n) + n.meta["stack_trace"] = copy_from_node.meta["stack_trace"] + + +def _check_if_mutation_can_be_in_graph( + keep_input_mutations: bool, + mutates_data, + mutates_metadata, + mutations_hidden_from_autograd, + mutations_under_no_grad_or_inference_mode, + requires_grad, +): + if keep_input_mutations: + return mutates_data and ( + (not mutates_metadata and not requires_grad) + or mutations_hidden_from_autograd + or mutations_under_no_grad_or_inference_mode + ) + return False diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c995c7a12b55dedad1cedeeb5e4cc817ae65af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py @@ -0,0 +1,936 @@ +""" +These are the runtime wrappers that are associated with JIT-compiling. + +This includes the forward-only and joint JIT runtime wrappers. + +This module depends heavily on the runtime wrapper building blocks defined +in `runtime_wrappers`. +""" + +import logging +from contextlib import nullcontext +from functools import wraps +from typing import Any, List, Optional + +import torch +import torch.utils.dlpack +from torch import Tensor +from torch._dynamo.utils import lazy_format_graph_code +from torch._guards import detect_fake_mode, tracing, TracingContext +from torch._logging import getArtifactLogger, trace_structured +from torch._prims_common import CUDARngStateHelper +from torch._subclasses import FakeTensor +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import is_sym_node +from torch.fx.experimental.symbolic_shapes import fx_placeholder_vals +from .. import config +from .dispatch_and_compile_graph import ( + aot_dispatch_autograd_graph, + aot_dispatch_base_graph, +) +from .logging_utils import describe_input, format_guard_bug_msg, track_graph_compiling + +from .runtime_wrappers import ( + aot_dispatch_subclass_wrapper, + create_runtime_wrapper, + functionalized_rng_runtime_epilogue, +) +from .schemas import ( + AOTConfig, + MutationType, + OutputType, + SubclassMeta, + TensorAlias, + ViewAndMutationMeta, +) +from .subclass_utils import ( + compute_inner_mutated_inp_indices_from_subclass_meta, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, +) + +from .utils import ( + _get_symint_hints, + call_func_at_runtime_with_args, + make_boxed_func, + normalize_as_list, + strict_zip, +) + +zip = strict_zip + +log = logging.getLogger(__name__) +aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph") +aot_graphs_log = getArtifactLogger(__name__, "aot_graphs") + +aten = torch.ops.aten + + +def _compute_output_meta_with_inductor_strides(fw_module, fwd_output_strides): + out = [n.meta["val"] for n in (list(fw_module.graph.nodes)[-1].args[0])] + # will only be set for inductor + if not fwd_output_strides: + return out + with TracingContext.get().fake_mode.shape_env.suppress_guards(): + for i in range(len(out)): + if not isinstance(out[i], Tensor): + continue + if all(s1 == s2 for s1, s2 in zip(out[i].stride(), fwd_output_strides[i])): + continue + out[i] = out[i].as_strided(out[i].shape, fwd_output_strides[i]) + return out + + +def aot_dispatch_base( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +): + fw_module, updated_flat_args, maybe_subclass_meta = aot_dispatch_base_graph( # type: ignore[misc] + flat_fn, flat_args, aot_config, fw_metadata=fw_metadata + ) + + disable_amp = torch._C._is_any_autocast_enabled() + context = torch._C._DisableAutocast if disable_amp else nullcontext + fakified_out = None + + with context(), track_graph_compiling(aot_config, "inference"): + compiler = ( + aot_config.inference_compiler + if aot_config.inference_compiler is not None + else aot_config.fw_compiler + ) + if config.functionalize_rng_ops: + # Add the seed and offset as example inputs to pass to the compiler + fake_mode = detect_fake_mode() + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode) + updated_flat_args.extend([seed, offset]) + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.fw_metadata = ( + fw_metadata + if maybe_subclass_meta is None + else maybe_subclass_meta.fw_metadata + ) + + with TracingContext.report_output_strides() as fwd_output_strides: + compiled_fw = compiler(fw_module, updated_flat_args) + + # see note: [Returning Fake Tensors on First AOT Autograd Call] + if tracing_context and tracing_context.fakify_first_call: + fakified_out = _compute_output_meta_with_inductor_strides( + fw_module, fwd_output_strides + ) + + # However, create_runtime_wrapper does not expect the rng offsets in the + # output. So, we have to create another wrapper and take out the offset. As + # a result, we have to account for not boxed_call compilers as well. + if not hasattr(compiled_fw, "_boxed_call"): + compiled_fw = make_boxed_func(compiled_fw) + + # Create a wrapper to set up the rng functionalize bits + @wraps(compiled_fw) + def rng_functionalization_wrapper(args): + # see note: [Returning Fake Tensors on First AOT Autograd Call] + nonlocal fakified_out + if fakified_out is not None: + out = fakified_out + fakified_out = None + return out + + # args is a list because compiled_fw is boxed_call + if fw_metadata.is_rng_op_functionalized: + # Add the seed and offset to args + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple() + args.extend([seed, offset]) + out = compiled_fw(args) + out = functionalized_rng_runtime_epilogue(fw_metadata, out) + return out + else: + return compiled_fw(args) + + if maybe_subclass_meta is not None: + compiled_fw_func = aot_dispatch_subclass_wrapper( + rng_functionalization_wrapper, + subclass_metas=fw_metadata.subclass_fw_graph_out_meta, + num_fw_outs_saved_for_bw=None, + ) + else: + compiled_fw_func = rng_functionalization_wrapper + + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + compiled_fn = create_runtime_wrapper( + compiled_fw_func, + runtime_metadata=fw_metadata, + indices_of_inps_to_detach=[], + trace_joint=False, + keep_input_mutations=aot_config.keep_inference_input_mutations, + disable_amp=disable_amp, + ) + + return compiled_fn + + +def aot_dispatch_autograd( + flat_fn, + flat_args: List[Any], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, +): + fw_metadata.deterministic = torch.are_deterministic_algorithms_enabled() + fx_g, joint_inputs, maybe_subclass_meta = aot_dispatch_autograd_graph( # type: ignore[misc] + flat_fn, flat_args, aot_config, fw_metadata=fw_metadata + ) + + # Copied from aot_dispatch_autograd_graph. + disable_amp = torch._C._is_any_autocast_enabled() + + if aot_config.enable_log: + aot_joint_log.info( + "%s", lazy_format_graph_code("Joint graph", fx_g, aot_config.aot_id) + ) + trace_structured( + "aot_joint_graph", + payload_fn=lambda: fx_g.print_readable(print_output=False), # type: ignore[union-attr] + ) + + fakify_first_call = False + fakified_out = None + + with torch.no_grad(): + inner_meta = ( + fw_metadata + if maybe_subclass_meta is None + else maybe_subclass_meta.fw_metadata + ) + with track_graph_compiling(aot_config, "joint"): + # See Note: [Partitioner handling for Subclasses, Part 1] + # See Note: [Recomputing subclass mutation handling] + mutated_inp_runtime_indices = ( + compute_inner_mutated_inp_indices_from_subclass_meta( + fw_metadata, inner_meta + ) + ) + num_mutated_inp_runtime_indices = len(mutated_inp_runtime_indices) + num_inner_fwd_outputs = ( + num_mutated_inp_runtime_indices + + inner_meta.num_outputs + + inner_meta.num_intermediate_bases + + inner_meta.num_outputs_rng_offset + + len( + fw_metadata.tokens + ) # See Note [Side-Effectful Tokens in AOTAutograd] + ) + fw_module, bw_module = aot_config.partition_fn( + fx_g, joint_inputs, num_fwd_outputs=num_inner_fwd_outputs + ) + + fw_outs = next(n for n in fw_module.graph.nodes if n.op == "output").args[0] + # we only need to bookkeep the symints that are saved for bw, not any symints + # the user forward might have returned in its own output + fw_outs_saved_for_bw = fw_outs[num_inner_fwd_outputs:] + num_fw_outs_saved_for_bw = len(fw_outs_saved_for_bw) + symint_outs_saved_for_bw = [ + n for n in fw_outs_saved_for_bw if is_sym_node(n) + ] + fw_metadata.num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + inner_meta.num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + _num_symints_saved_for_bw = len(symint_outs_saved_for_bw) + + # Note [Detaching inputs that never need gradients] + # See https://github.com/pytorch/pytorch/issues/97745 + # Suppose we have a function like this that we want to compile: + # + # def f(x, y): + # return torch.mul(x, y.detach()) + # + # What gradients should we compute for x and y? + # By default, AOTAutograd will compute a gradient for **every** input that requires gradients, + # and so we'll compute: + # x_grad_input = y + # y_grad_input = None + # Does this preserve the semantics of eager mode? + # Unfortunately, no. + # Doing the above will cause autograd to **continue** to backprop the autograd tape + # that was generated from constructing y. + # + # This is **different** from what would have happened in eager mode. + # In eager mode, if we backprop through the output of this function, autograd will only traverse + # the bit of the autograd tape corresponding to "x". + # In particular, if a user had previously backpropped through y's autograd tape, + # And then they try to backprop through the output of the above function, + # then we'll hit the dreaded "Trying to backward through the graph a second time" error. + # + # You might think: If autograd sees that a gradient is None, shouldn't it stop early, + # instead of continuing the backprop through the ancestors of that node in the graph? + # + # Autograd has two passes: + # (1) a first pass that traverses the autograd graph and figures out which nodes need to be executed + # (2) a second pass that actually goes ahead and executes each node when it becomes ready, + # propagating gradients + # By the time we're executing a node and we see that it produces a None, the set of nodes to execute + # is already locked-in. + # + # The fix: instead, we can recognize statically that the graph we're compiling will never contribute + # gradients to y, and prevent autograd from trying to traverse y's autograd tape at all. + # We can do this by manually detach'ing y before sending it through the `CompiledFunction`. + # + # Note that this solution is not bulletproof. + # It's possible to construct a case where eager may or may not have have tried to autograd through y, + # depending on the actual grad_outputs that were passed in during the backward. + # There is no easy fix for this: the simplest fix would be to run with `retain_graph=True`, + # allowing autograd to re-use the graph. + # + # An example of this case is: + # def f(x): + # return x.detach() * 2, x * 3 + # If we were to only backprop through outs[0], in eager, we would stop + # If we backward only on the first output, we shouldn't send a grad through x. + # But the custom autograd function doesn't know that: it will materialize zero grads for x * 3 + # and we will end up with a zero grad at x. + # If we later backprop through the second output, this will also require backprop'ing through x. + # Meaning we'll need to use `retain_graph=True` to be able to backprop through x the second time. + _indices_of_inps_to_detach = [] + bw_outs = next(n for n in bw_module.graph.nodes if n.op == "output").args[0] + + # TODO: we should apply the below "detach inputs if their gradients are statically known to be None" + # optimization even if we have subclass inputs/outputs (we do not handle this today). + # Computing which our our inputs get None gradients is a bit more complicated, + # if any of our inputs are subclasses. Why? + # (a) we need to make sure that we call .detach() on the input subclasses, since autograd sees subclasses. + # (b) The grad_outputs that we AOT computed in our backward graph are the desugared tensor tensors, + # so we need to figure out which subclass fw inputs they map to. + if maybe_subclass_meta is None: + assert ( + len(bw_outs) + == len(fw_metadata.input_info) + inner_meta.num_outputs_rng_offset + ) + for i, (bw_out) in enumerate(bw_outs): + if bw_out is None: + _indices_of_inps_to_detach.append(i) + + if aot_config.enable_log: + aot_graphs_log.info( + "%s", + lazy_format_graph_code("Forward graph", fw_module, aot_config.aot_id), + ) + aot_graphs_log.info( + "%s", + lazy_format_graph_code("Backward graph", bw_module, aot_config.aot_id), + ) + trace_structured( + "aot_forward_graph", + payload_fn=lambda: fw_module.print_readable(print_output=False), + ) + trace_structured( + "aot_backward_graph", + payload_fn=lambda: bw_module.print_readable(print_output=False), + ) + + with track_graph_compiling(aot_config, "forward"): + # flat_args at this point might still be subclasses- + # make sure to pass the unwrapped fake tensors into the compiler! + adjusted_flat_args = joint_inputs[0] + if config.functionalize_rng_ops: + # Update example inputs for the fw_compiler + fake_mode = detect_fake_mode() + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode) + adjusted_flat_args.extend([seed, offset]) + # We are not clearing flat_args here because + # 1) There is a check in the debug compiler at the end + # 2) It does not matter as these are fake tensors + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.fw_metadata = inner_meta + + with TracingContext.report_output_strides() as fwd_output_strides: + compiled_fw_func = aot_config.fw_compiler(fw_module, adjusted_flat_args) + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + # see note: [Returning Fake Tensors on First AOT Autograd Call] + if tracing_context and tracing_context.fakify_first_call: + fakified_out = _compute_output_meta_with_inductor_strides( + fw_module, fwd_output_strides + ) + fakify_first_call = True + + if maybe_subclass_meta is not None: + # Why do we need to pass in num_fw_outs_saved_for_bw? + # See Note: [Partitioner handling for Subclasses, Part 2] + compiled_fw_func = aot_dispatch_subclass_wrapper( + compiled_fw_func, + subclass_metas=fw_metadata.subclass_fw_graph_out_meta, + num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw, + ) + if not hasattr(compiled_fw_func, "_boxed_call"): + compiled_fw_func = make_boxed_func(compiled_fw_func) + + # NB: It's important to compile backwards ahead of time, as this may + # add extra guards which we need to apply to the Dynamo cache at + # forwards + with track_graph_compiling(aot_config, "backward"): + placeholder_list = fx_placeholder_vals(bw_module) + + forward_saved_for_backwards_strides = None + if fwd_output_strides is not None: + forward_saved_for_backwards_strides = fwd_output_strides[ + inner_meta.tensors_saved_for_backwards_slice + ] + + # saved activations can have different stride to eager if + # the compiler does layout optimization. We should restride the + # tensor passed in for compiling the backward graph using the + # saved tensor's stride. + for i in range(len(placeholder_list)): + ph_arg = placeholder_list[i] + if not isinstance(ph_arg, torch.Tensor): + continue + + if forward_saved_for_backwards_strides is None: + continue + + real_stride = None + # Per all_args calling convention + j = i - len(symint_outs_saved_for_bw) + if 0 <= j < len(forward_saved_for_backwards_strides): + real_stride = forward_saved_for_backwards_strides[j] + if real_stride is None: + continue + + # Comparing ph_arg.stride() with real_stride directly may + # cause dynamic dimensions in ph_arg being specialized to static + # value. Using the hints to avoid that. + if _get_symint_hints(ph_arg.stride()) != real_stride: + # Note that here we use the stride of the real tensor to + # restride a FakeTensor. This does not cause trouble + # for dynamic shape since this code path only get + # executed if layout optimization is enabled. And we + # disable layout optimization for dynamic shape right + # now. + # + # A solution that decide stride order based on real + # tensor's stride and then apply that stride order to + # the FakeTensor does not work smoothly since some + # tensor's layout is not 'dense'. E.g. mixnet_l has a + # tensor with size [8, 64, 112, 112] and strides + # (2408448, 1, 21504, 192). The solution mentioned will + # decide a stride of (802816, 1, 7168, 64) for this + # tensor which is wrong. + placeholder_list[i] = ph_arg.as_strided(ph_arg.size(), real_stride) + + compiled_bw_func = None + if len(symint_outs_saved_for_bw): + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + try: + compiled_bw_func = aot_config.bw_compiler( + bw_module, placeholder_list + ) + except Exception: + log.warning( + "failed to eagerly compile backwards for dynamic, suppressing in case backwards not needed", + exc_info=True, + ) + # Compiled autograd will run the bw_module in the backward pass, + # so recompilation need happen anyway if the backward pass is ever + # called. + # + # The reason we do the GraphModule recompilation here is because + # the lazy recompilation will cause issue in the backward pass + # with compiled autograd. + # + # Do the _LazyGraphModule.force_recompile here rather than when + # bw_module is first generated by the partitioner because the bw_module.recompile + # may be called in some code path later and cause the _LazyGraphModule.forward + # becomes the lazy version again. One example is when dynamic shape is enabled + # upfront, the bw_compiler will be called above which can cause extra + # graph module recompilation on bw_module. + if torch._dynamo.compiled_autograd.compiled_autograd_enabled_count: + from torch.fx._lazy_graph_module import _LazyGraphModule + + _LazyGraphModule.force_recompile(bw_module) + + saved_context = TracingContext.try_get() + + backward_state_indices = [ + idx for idx, x in enumerate(flat_args) if isinstance(x, BackwardState) + ] + assert len(backward_state_indices) <= 1 + + class CompiledFunction(torch.autograd.Function): + compiled_fw = compiled_fw_func + compiled_bw = compiled_bw_func + metadata: ViewAndMutationMeta = fw_metadata # type: ignore[assignment] + maybe_subclass_metadata: Optional[SubclassMeta] = maybe_subclass_meta + num_symints_saved_for_bw = _num_symints_saved_for_bw + _compiled_autograd_should_lift = False + _fakify_first_call = fakify_first_call + + @staticmethod + def _compiled_autograd_key(ctx): + return (ctx._autograd_function_id, *ctx.symints) + + @staticmethod + def forward(ctx, *deduped_flat_tensor_args): + args = deduped_flat_tensor_args + if backward_state_indices: + bw_state = args[backward_state_indices[0]] + assert isinstance(bw_state, BackwardState) + ctx._compiled_autograd_backward_state = bw_state + + marked_dirty_inps = [] + for i in fw_metadata.mutated_graph_handled_indices_seen_by_autograd: + arg = deduped_flat_tensor_args[i] + if not (arg.requires_grad and arg.is_leaf): # would error + ctx.mark_dirty(arg) + marked_dirty_inps.append(arg) + + if not CompiledFunction._fakify_first_call: + if CompiledFunction.metadata.is_rng_op_functionalized: + # Add the seed and offset to args + seed, offset = CUDARngStateHelper.get_torch_state_as_tuple() + args = (*args, seed, offset) + # There is a pretty complicated calling convention around what the compiled fw returns. + # The full list of outputs and their relative order is: + # (*tokens, *mutated_inputs, *fw_outs, *fw_intermediate_bases, *saved_tensors, *saved_symints) + # - Note that in the synthetic bases case, mutated_inputs will correspond to an updated version + # of the original view, and not the synthetic base + + fw_outs = call_func_at_runtime_with_args( + CompiledFunction.compiled_fw, + args, + disable_amp=disable_amp, + ) + else: + nonlocal fakified_out + assert fakified_out is not None + CompiledFunction._fakify_first_call = False + fw_outs = fakified_out + fakified_out = None + + num_outputs = CompiledFunction.metadata.num_outputs + num_outputs_aliased = CompiledFunction.metadata.num_outputs_aliased + num_mutated_runtime_inps = ( + CompiledFunction.metadata.num_mutated_inp_runtime_indices + ) + num_tokens = len(CompiledFunction.metadata.tokens) + num_forward_returns = CompiledFunction.metadata.num_forward_returns + num_forward = CompiledFunction.metadata.num_forward + + # Partitioners must put symint arguments at the end separate from tensor arguments + tensors_saved_for_backwards = fw_outs[ + CompiledFunction.metadata.tensors_saved_for_backwards_slice + ] + assert all(isinstance(x, torch.Tensor) for x in tensors_saved_for_backwards) + # See Note [Detaching saved tensors in AOTAutograd] + ctx.save_for_backward( + *( + x.detach() if x._is_view() else x + for x in tensors_saved_for_backwards + ) + ) + symint_outs = fw_outs[ + CompiledFunction.metadata.symints_saved_for_backwards_slice + ] + assert all( + isinstance(x, (int, float, torch.SymInt, torch.SymFloat)) + for x in symint_outs + ), str([type(x) for x in symint_outs]) + ctx.symints = symint_outs + + raw_returns = fw_outs[0 : num_forward_returns + num_tokens] + + # Wrap all autograd.Function.forward() outputs that are aliases + # so that autograd.Function doesn't treat them as tensors + if num_mutated_runtime_inps > 0: + for i, idx in enumerate( + CompiledFunction.metadata.mutated_inp_runtime_indices + ): + # We could make this faster by only looping over inputs with metadata-only mutations + # (instead of looping over inputs with either data or metadata mutations), but there shouldn't be many. + info = CompiledFunction.metadata.input_info[idx] + if info.mutates_metadata and not info.mutates_data: + raw_returns[i] = TensorAlias(raw_returns[i]) + + if config.debug_assert: + user_mutated_inputs_raw = raw_returns[0:num_mutated_runtime_inps] + mut_inp_infos = [ + x + for x in CompiledFunction.metadata.input_info + if x.mutates_data or x.mutates_metadata + ] + assert len(user_mutated_inputs_raw) == len(mut_inp_infos) + + if CompiledFunction.metadata.num_unsafe_view_outputs > 0: + for idx in CompiledFunction.metadata.unsafe_view_out_indices: + raw_return_idx = num_mutated_runtime_inps + idx + o = raw_returns[raw_return_idx] + raw_returns[raw_return_idx] = torch.ops.aten._unsafe_view( + o, o.shape + ) + + if num_outputs_aliased > 0: + for idx in CompiledFunction.metadata.aliased_out_indices: + raw_return_idx = num_mutated_runtime_inps + idx + raw_returns[raw_return_idx] = TensorAlias( + raw_returns[raw_return_idx] + ) + + if config.debug_assert: + intermediates_raw = raw_returns[ + num_mutated_runtime_inps + num_outputs : + ] + assert not any( + isinstance(x, TensorAlias) for x in intermediates_raw + ) + + # invariant: intermediate bases always require gradients, so we don't have to + # consider marking them as non-differentiable. + raw_returns_not_including_intermediate_bases = raw_returns[ + : num_mutated_runtime_inps + num_outputs + ] + raw_returns_meta = [ + x + for x in CompiledFunction.metadata.input_info + if x.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] + CompiledFunction.metadata.output_info + + fw_outs_not_requiring_grad = [ + x + for (i, x) in enumerate(raw_returns_not_including_intermediate_bases) + if isinstance(x, torch.Tensor) and not raw_returns_meta[i].requires_grad + ] + ctx.mark_non_differentiable(*fw_outs_not_requiring_grad) + ctx._materialize_non_diff_grads = False + + functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, + fw_outs[num_forward_returns:num_forward], + return_new_outs=False, + ) + return tuple(raw_returns) + tuple(marked_dirty_inps) + + @staticmethod + def backward(ctx, *flat_args): + # Calling convention: we expect a grad_out passed to the backward: + # - for every output of the fw that does *not* alias an input or graph intermediate + # - for every updated_input generated by the fw that does *not* alias an input (aka only data-mutations) + # - for every graph intermediate that we need to use to generate an output later. + # The other outputs in the autograd.Function.forward that do *not* show up in the backward include: + # - outputs that alias inputs or graph intermediates + # - updated inputs due to metadata-only mutations. + # We need to return them in the forward, but ensure that they all do not get gradients in the backward, + # and we filter them out here before passing the remaining grad_outputs into the compiled backward. + num_intermediate_bases = CompiledFunction.metadata.num_intermediate_bases + num_graph_handled_inputs = ( + CompiledFunction.metadata.num_mutated_graph_handled_indices_seen_by_autograd + ) + num_mutated_runtime_inps = ( + CompiledFunction.metadata.num_mutated_inp_runtime_indices + ) + expected_grad_outs = ( + CompiledFunction.metadata.num_outputs + + num_mutated_runtime_inps + + num_intermediate_bases + ) + deterministic = CompiledFunction.metadata.deterministic + global_deterministic = torch.are_deterministic_algorithms_enabled() + if deterministic is not None: + torch._check( + not (not deterministic and global_deterministic), + lambda: ( + "This compiled backward function is being run with " + "torch.use_deterministic_algorithms(True), " + "but it was previously generated during the forward function while " + "torch.use_deterministic_algorithms(False) was set." + ), + ) + + if num_graph_handled_inputs > 0: + flat_args = flat_args[:-num_graph_handled_inputs] + assert len(flat_args) == expected_grad_outs + out_info = CompiledFunction.metadata.output_info + + inp_tangents, out_tangents, intermediate_base_tangents = ( + flat_args[0:num_mutated_runtime_inps], + flat_args[ + num_mutated_runtime_inps : num_mutated_runtime_inps + + CompiledFunction.metadata.num_outputs + ], + flat_args[ + num_mutated_runtime_inps + CompiledFunction.metadata.num_outputs : + ], + ) + # input_info contains info on *every* input, + # But in the backward(), we are only given grad outputs for every mutated input + # We then need to filter out the grad outputs that correspond to metadata-only mutations or don't require grad + input_info = CompiledFunction.metadata.input_info + inp_tangents_filtered = [ + x + for x, info_idx in zip( + inp_tangents, CompiledFunction.metadata.mutated_inp_runtime_indices + ) + if input_info[info_idx].mutates_data + and input_info[info_idx].requires_grad + ] + # We also need to filter out grad outputs that correspond to outputs aliasing inputs/intermediates + out_tangents_filtered = [ + x + for x, info in zip(out_tangents, out_info) + if info.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + and issubclass(info.raw_type, torch.Tensor) + and info.requires_grad + ] + # intermediate bases always require gradients, and always participate in the backward graph. + flat_bw_args_with_grads = [ + *inp_tangents_filtered, + *out_tangents_filtered, + *intermediate_base_tangents, + ] + num_flat_bw_args_with_grads = len(flat_bw_args_with_grads) + + # sanity asserts + # metadata_only_inps = [ + # x for x, info_idx in zip(inp_tangents, mutated_inp_indices) + # if not input_info[info_idx].mutates_data + # ] + # aliased_outputs = [ + # x for x, info in zip(out_tangents, out_info) if info.output_type != OutputType.non_alias] + # assert all(x is None for x in metadata_only_inps) + # assert all(x is None for x in aliased_outputs) + + rng_args = [] + if CompiledFunction.metadata.is_rng_op_functionalized: + # Add the seed and offset to args + rng_args = CUDARngStateHelper.get_torch_state_as_tuple() + + all_args = [ + *ctx.symints, + *ctx.saved_tensors, + *flat_bw_args_with_grads, + *rng_args, + ] + del flat_bw_args_with_grads + + tangents_start_idx = ( + len(all_args) - num_flat_bw_args_with_grads - len(rng_args) + ) + tangents_end_idx = len(all_args) - len(rng_args) + + # Note: [AOTAutograd Backward Guards] + # During AOTDispatch, we eagerly create and trace out a joint fw-bw graph. + # Doing so requires us to "guess" about some of the metadata of our grad_outputs. + # + # In particular: if an output to the forward is a plain tensor or a subclass, + # its corresponding grad_output in the backward **may or may not** be + # a plain tensor or a subclass. The main cases are: + # (1) If an output is a plain tensor, its grad_out will also be a plain tensor, + # *unless* the output is used in some subclass compute later in the forward graph, + # which will cause its grad_output to become a subclass + # (2) If an output is a subclass, its grad_out will also be a subclass, + # *unless* the output of the forward did not actually participate in the gradient computation, + # in which case autograd will insert a plain tensor of zeros for the grad_output. + # We could avoid this case with `torch.autograd.Function.set_materialize_grads`, + # although this is not turned on today in AOTAutgrad and would require more work. + # + # Today, we make a guess on subclass-ness based on the above examples, + # and hard-error in the backward if we guessed wrong. + # + # In the future, we should add backward guards that would allow us to + # properly handle this case instead of erroring: we would need to retrace the backward graph, + # since we might produce an entirely different trace if our grad_outputs are subclass or not. + assert ( + len(CompiledFunction.metadata.output_types) + == num_flat_bw_args_with_grads + ) + grad_output_types = [ + type(x) for x in all_args[-num_flat_bw_args_with_grads:] + ] + # In general, we can add more asserts/guards here for when we partitioned + # with incorrect assumptions about the grad_outputs. + # Normalize FakeTensor -> torch.Tensor + # - during tracing our types are FakeTensor + # - at runtime in the backward our types are torch.Tensor... + # - unless we're running compiled backward, in which case they are also FakeTensor + grad_output_types_ = [ + torch.Tensor if x is FakeTensor else x for x in grad_output_types + ] + assert ( + grad_output_types_ == CompiledFunction.metadata.output_types + ), f"""\ +We incorrectly attempted to compile the backward with incorrect subclass metadata. +If you run into this error, please file an issue. +Expected grad_output types: {str(CompiledFunction.metadata.output_types)} +Got grad_output types: {str(grad_output_types)}""" + + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + # Get the number of tangents after unwrapping + len_tangents = len( + unwrap_tensor_subclasses( + all_args[tangents_start_idx:tangents_end_idx], + is_joint_structure=False, + ) + ) + all_args = unwrap_tensor_subclasses(all_args, is_joint_structure=False) + tangents_start_idx = len(all_args) - len_tangents - len(rng_args) + tangents_end_idx = tangents_start_idx + len_tangents + + # Make the tangents contiguous. Note that we must do this after subclass desugaring + # because inputs to inductor have to be contiguous + all_args = [ + t.contiguous() + if ( + (tangents_start_idx <= i < tangents_end_idx) + and (not t.is_contiguous()) + ) + else t + for i, t in enumerate(all_args) + ] + + def call_compiled_backward(): + if ctx._is_compiled_autograd_tracing(): + # For compiled autograd, run raw FX graph so that it can be inlined into the larger graph + symints = ctx._get_compiled_autograd_symints() + assert len(symints) == len(ctx.symints) + all_args[: len(symints)] = symints + if backward_state_indices: + assert ctx._compiled_autograd_backward_state.proxy is not None + all_args.append(ctx._compiled_autograd_backward_state) + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + out = normalize_as_list(bw_module(*all_args)) + out = functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, out + ) + return tuple(out) + assert ( + not backward_state_indices + ), "BackwardState requires CompiledAutograd" + ctx.maybe_clear_saved_tensors() + if CompiledFunction.compiled_bw is None: + context = torch._C._DisableAutocast if disable_amp else nullcontext + with tracing(saved_context), context(), track_graph_compiling( + aot_config, "backward" + ): + CompiledFunction.compiled_bw = aot_config.bw_compiler( + bw_module, placeholder_list + ) + + out = call_func_at_runtime_with_args( + CompiledFunction.compiled_bw, + all_args, + steal_args=True, + disable_amp=disable_amp, + ) + + out = functionalized_rng_runtime_epilogue( + CompiledFunction.metadata, out + ) + return tuple(out) + + if torch.is_grad_enabled() and any( + t.requires_grad for t in all_args if isinstance(t, torch.Tensor) + ): + # Ensure that the graph is connected, and error if double backward is performed. + # See comment for why once_differentiable is not sufficient: + # https://github.com/pytorch/pytorch/pull/92348/files#r1072962107 + class CompiledFunctionBackward(torch.autograd.Function): + # CompiledFunctionBackward is not yet supported in dynamo skipfiles + _compiled_autograd_should_lift = False + + @staticmethod + def forward(ctx, *unused_args): + outs = call_compiled_backward() + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + assert ( + CompiledFunction.maybe_subclass_metadata.grad_input_metas + is not None + ) + outs_wrapped = wrap_tensor_subclasses( + outs, + subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas, + ) + return outs_wrapped + return outs + + @staticmethod + def backward(ctx, *args): + raise RuntimeError( + "torch.compile with aot_autograd does not currently support double backward" + ) + + CompiledFunctionBackward._compiled_autograd_key = ( # type: ignore[method-assign] + CompiledFunction._compiled_autograd_key + ) + + # Pass args even though they're unused, so that the graph is built + out = CompiledFunctionBackward.apply(*all_args) + else: + out = call_compiled_backward() + + # TODO: figure out how to refactor the backward properly so I can use aot_dispatch_subclass_wrapper() here. + if CompiledFunction.maybe_subclass_metadata is not None: + assert ( + CompiledFunction.maybe_subclass_metadata.grad_input_metas + is not None + ) + outs_wrapped = wrap_tensor_subclasses( + out, + subclass_metas=CompiledFunction.maybe_subclass_metadata.grad_input_metas, + ) + return outs_wrapped + return out + + compiled_function = create_runtime_wrapper( + CompiledFunction.apply, + runtime_metadata=fw_metadata, + indices_of_inps_to_detach=_indices_of_inps_to_detach, + trace_joint=True, + keep_input_mutations=aot_config.keep_inference_input_mutations, + disable_amp=disable_amp, + ) + + if not config.debug_assert: + return compiled_function + + flat_requires_grad = [ + a.requires_grad if isinstance(a, Tensor) else None for a in flat_args + ] + + @wraps(compiled_function) + def debug_compiled_function(*args): + # TODO: Check aliasing relationships + # TODO: Check strides for metadata mutation + # (NB: ideally, this logic is factored out of this function and + # you move these debug checks there) + + # Check requires grad. Bad case is when we compiled with + # requires_grad = False, but input requires_grad = True + # (vice versa is OK; we compute a gradient and then throw + # it away when it hits the input.) + for i, a in enumerate(args): + can_require_grad = flat_requires_grad[i] + if can_require_grad is None: + assert not isinstance(a, Tensor) + elif not can_require_grad: + assert not a.requires_grad, format_guard_bug_msg( + aot_config, + f"{describe_input(i, aot_config)} would not require grad", + ) + + return compiled_function(*args) + + return debug_compiled_function diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..acb03d232bdf902452d321d0d97e266b8cf45ad6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py @@ -0,0 +1,1021 @@ +""" +This module defines runtime wrappers, which, based on previous analysis attempts to: +1. process the inputs and outputs +2. apply mutations +3. handle functionalized randomness +4. deduplicate inputs and consolidate views into their bases (see input_output_analysis) +""" + +import collections +import pprint +from functools import wraps +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils.dlpack +from torch import Tensor +from torch._guards import DuplicateInputs, TracingContext +from torch._prims_common import CUDARngStateHelper +from torch.multiprocessing.reductions import StorageWeakRef +from .. import config +from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata + +from .functional_utils import gen_alias_from_base +from .input_output_analysis import ( + compute_overlapping_inputs, + create_synthetic_base_metadata, + remove_dupe_metadata, +) +from .logging_utils import describe_input, format_guard_bug_msg +from .schemas import ( + AOTConfig, + InputAliasInfo, + OutputType, + SubclassCreationMeta, + TensorAlias, + ViewAndMutationMeta, +) +from .subclass_utils import ( + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, +) + +from .utils import ( + call_func_at_runtime_with_args, + make_boxed_func, + partial_flatten_asdict, + strict_zip, +) + + +zip = strict_zip + + +# The wrapper created by this function handles all of the runtime aliasing and mutation "epilogue" logic +# that needs to run after the compiled function. +# +# This function accepts a trace_joint flag, indicating whether or not we're generating the runtime +# epilogue for a forward-only inference graph, or for an autograd.Function.apply function. +# This is because there are some minor differences in how we treat these cases at runtime: +# - resize_() is currently handled in the inference case, but not fully handled in the autograd case. +# - the autograd cases inserts TensorAlias wrapper objects for outputs that alias inputs +def create_runtime_wrapper( + compiled_fn, + *, + runtime_metadata: ViewAndMutationMeta, + indices_of_inps_to_detach: List[int], + trace_joint: bool, + keep_input_mutations: bool, + disable_amp: bool, +): + num_tokens = len(runtime_metadata.tokens) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + def runtime_wrapper(*args): + # Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd]) + args = (*[torch.tensor([])] * num_tokens, *args) + + if trace_joint: + args_ = list(args) + # See Note [Detaching inputs that never need gradients] + for idx in indices_of_inps_to_detach: + if isinstance(args_[idx], torch.Tensor): + args_[idx] = args_[idx].detach() + with torch.autograd._force_original_view_tracking(True): + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args_, + disable_amp=disable_amp, + ) + else: + # When we have an inference graph, we run with torch.no_grad. + # It's possible to get an inference graph with inputs that require grad, + # in which case we want to make sure autograd is disabled + # (since e.g., inductor will generate aten.addmm.out calls which autograd will complain on) + if torch.is_grad_enabled(): + with torch.no_grad(): + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args, + disable_amp=disable_amp, + ) + else: + all_outs = call_func_at_runtime_with_args( + compiled_fn, + args, + disable_amp=disable_amp, + ) + + num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices + num_intermediate_bases = runtime_metadata.num_intermediate_bases + + if keep_input_mutations and trace_joint: + num_input_mutations_handled_by_autograd = ( + runtime_metadata.num_mutated_graph_handled_indices_seen_by_autograd + ) + # autograd.Function requires us to return the mutated inputs as extra outputs to the autograd.Function.forward + if num_input_mutations_handled_by_autograd > 0: + all_outs = all_outs[:-num_input_mutations_handled_by_autograd] + + assert ( + len(all_outs) + == num_mutated_runtime_inps + + runtime_metadata.num_outputs + + num_intermediate_bases + + num_tokens + ) + + # Toss out the effect tokens (See Note [Side-Effectful Tokens in AOTAutograd]) + all_outs = all_outs[num_tokens:] + + # Step 3: After running the compiled fw, apply updates to mutated inputs + num_mutations_to_apply = runtime_metadata.num_mutated_inp_runtime_indices + if num_mutations_to_apply > 0: + updated_inputs = all_outs[:num_mutations_to_apply] + fw_outs = all_outs[num_mutations_to_apply:] + + for i, inpt_idx in enumerate(runtime_metadata.mutated_inp_runtime_indices): + meta = runtime_metadata.input_info[inpt_idx] + if not meta.mutates_data and not meta.mutates_metadata: + continue + original_inpt = args[inpt_idx] + updated_inpt = updated_inputs[i] + if meta.mutates_storage_metadata: + # mutates_storage_metadata means our input saw a x.set_(y) call. + # What if x **also** saw a data and/or a metadata mutation? + # (1) If the [meta]data mutation occurred after the set_(), + # then there is no need to copy_() the data. + # When we perform x.set_(x_updated), we are guaranteed that + # x_updated already has the final version of the data/metadata + # (2) If a data mutation occurred before the set_(). + # This case seems very difficult to support. + # TODO: discuss on the PR and decide if we want to tr to + # either support it, or detect and ban it. + if trace_joint: + assert isinstance(updated_inpt, TensorAlias) + updated_inpt = updated_inpt.alias + with torch.no_grad(): + original_inpt.set_(updated_inpt) + continue + if meta.mutates_metadata and not meta.mutates_data: + if trace_joint: + assert isinstance(updated_inpt, TensorAlias) + updated_inpt = updated_inpt.alias + # We need to grab the size/stride/storage_offset from the compiled forward, + # and use that to mutate the metadata of the input + original_inpt.as_strided_( + updated_inpt.size(), + updated_inpt.stride(), + updated_inpt.storage_offset(), + ) + else: + if meta.mutates_data and meta.mutates_metadata: + original_inpt.as_strided_( + updated_inpt.size(), + updated_inpt.stride(), + updated_inpt.storage_offset(), + ) + else: + assert meta.mutates_data + if meta.is_leaf and original_inpt.requires_grad: + # We can hit this situation in this case: + # def f(x): + # x.detach().mul_(2) + # return x + 1 + # AOTAutograd will see a mutation in the above case, and try to + # apply a copy_() here, in the epilogue. + # But if x required gradients, and is a leaf, then autograd + # will yell at us for trying to mutate it. + # However, it's only possible to end up in this scenario (like the above) + # if all of the mutations to the leaf input were non-autograd-tracking mutations + # (aka mutations under no_grad(), or on detached views). + # In that case, we fully want to hide the mutation from autograd, so detaching is ok. + original_inpt.detach().copy_(updated_inpt) + else: + original_inpt.copy_(updated_inpt) + else: + fw_outs = all_outs + + # Step 4: Manually regenerate any outputs that are aliased to inputs, instead of + # compiling them. + if runtime_metadata.num_outputs_aliased > 0: + # The compiled forward also returned intermediate bases. We don't want to return them to the user. + if runtime_metadata.num_intermediate_bases > 0: + fw_outs_no_intermediate_bases = fw_outs[ + : -runtime_metadata.num_intermediate_bases + ] + intermediate_bases = fw_outs[-runtime_metadata.num_intermediate_bases :] + else: + fw_outs_no_intermediate_bases = fw_outs + intermediate_bases = [] + + assert len(fw_outs_no_intermediate_bases) == len( + runtime_metadata.output_info + ) + fw_outs_including_aliases = [] + for i, (o, info) in enumerate( + zip(fw_outs_no_intermediate_bases, runtime_metadata.output_info) + ): + if info.output_type in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ]: + fw_outs_including_aliases.append(o) + continue + if trace_joint: + assert isinstance(o, TensorAlias) + o_ = o.alias + else: + o_ = o + + o_grad = runtime_metadata.output_info[i].requires_grad + if info.output_type == OutputType.alias_of_input: + aliased_base_tensor = args[info.base_idx] # type: ignore[index] + regenerated_out = gen_alias_from_base( + aliased_base_tensor, o_, o_grad + ) + fw_outs_including_aliases.append(regenerated_out) + continue + elif info.output_type == OutputType.is_input: + aliased_base_tensor = args[info.base_idx] # type: ignore[index] + regenerated_out = aliased_base_tensor + fw_outs_including_aliases.append(regenerated_out) + continue + elif info.output_type == OutputType.alias_of_intermediate: + base_tensor_list = intermediate_bases + elif ( + info.output_type == OutputType.alias_of_intermediate_save_as_output + ): + base_tensor_list = intermediate_bases + else: + assert ( + info.output_type + == OutputType.alias_of_intermediate_base_is_user_output + ) + base_tensor_list = fw_outs_no_intermediate_bases + aliased_base_tensor = base_tensor_list[info.base_idx] + # TODO: handle the custom autograd function case here. + # We need a way to check whether a tensor came from a custom autograd fn from python, + # AND a way to replay that custom view fn. + regenerated_out = gen_alias_from_base(aliased_base_tensor, o_, o_grad) + fw_outs_including_aliases.append(regenerated_out) + ret_outs = fw_outs_including_aliases + else: + ret_outs = fw_outs + + if runtime_metadata.dynamic_outputs: + for t, o in zip(ret_outs, runtime_metadata.output_info): + if o.dynamic_dims is None: + continue + if hasattr(t, "_dynamo_weak_dynamic_indices"): + t._dynamo_weak_dynamic_indices |= o.dynamic_dims + else: + t._dynamo_weak_dynamic_indices = o.dynamic_dims.copy() + if runtime_metadata.grad_enabled_mutation is not None: + torch.set_grad_enabled(runtime_metadata.grad_enabled_mutation) + return ret_outs + + return runtime_wrapper + + +# Calling convention: If we are running functionalized RNG, then outs consists +# of (user_outs, rng_offset) +def functionalized_rng_runtime_epilogue( + metadata: ViewAndMutationMeta, outs, return_new_outs=True +): + if metadata.is_rng_op_functionalized: + assert metadata.num_outputs_rng_offset == 1 + new_rng_offset = outs[-1] + CUDARngStateHelper.set_new_offset(new_rng_offset) + if return_new_outs: + user_outs = outs[:-1] + return user_outs + else: + return None + return outs + + +# This wrapper handles the AOTDispatch runtime logic for tensor subclasses. +# At runtime, we have a compiled function that knows how to operate on the domain of DenseTensor -> DenseTensor, +# But the user might have passed us some tensor subclass inputs (or expect some subclass tensor outputs). +# This function handles the wrapping and unwrapping of tensor subclasses at runtime. +def aot_dispatch_subclass_wrapper( + runtime_fn: Callable, + *, + subclass_metas: List[Union[int, SubclassCreationMeta]], + num_fw_outs_saved_for_bw: Optional[int], +) -> Callable: + def inner_fn(args): + unwrapped_args = unwrap_tensor_subclasses(args, is_joint_structure=False) + # expectation: runtime_fn is a boxed fn + unwrapped_outs = runtime_fn(unwrapped_args) + wrapped_outs = wrap_tensor_subclasses( + unwrapped_outs, + subclass_metas=subclass_metas, + num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw, + is_runtime=True, + ) + return wrapped_outs + + # box it + inner_fn._boxed_call = True # type: ignore[attr-defined] + return inner_fn + + +# MOTIVATION: +# +# When tracing functions for future execution, one must be careful not to pass +# in the same input tensor multiple times (e.g., f(x, x), as this can result +# in graphs that are ONLY valid if you later pass a new tensor in exactly the +# same way (e.g., f(y, y)). (NB: we really mean duplicate; two distinct +# tensors that alias each other is a different situation that is covered by +# aot_dispatch_deduplicated_autograd). Here are two examples: +# +# (1) Suppose you have a function: +# +# def f(x, y): +# return x + y +# +# If you make_fx(f)(x, x), you will trace out: +# +# def f(x, y): +# return y + y +# +# Oops! +# +# (2) For most tensors x and y, you can compute f's gradient with respect to +# these to inputs by saying torch.autograd.grad(f(x, y), (x, y)). However, +# if x is y, you will trace out a program that gets incorrect gradients: +# +# >>> x = torch.randn(1, requires_grad=True) +# >>> torch.autograd.grad(x + x, (x, x)) +# (tensor([2.]), tensor([2.])) +# +# In other words, the gradient is double-counted. Deduplicating the arguments +# gives you an appropriate gradient: +# +# >>> y = torch.randn(1, requires_grad=True) +# >>> torch.autograd.grad(x + y, (x, y)) +# (tensor([1.]), tensor([1.])) +# +# HOW TO DEDUPLICATE: +# +# There are a few strategies, in order of preference: +# +# 1. For every duplicate argument to the function, detach it into +# a separate leaf tensor, so that it is no longer duplicated. +# +# PRO: The resulting compiled graph works for any configuration +# of duplicated arguments. +# +# CON: It does not (naively) work if you mutate the metadata of inputs: +# +# def f(x, y): +# x.transpose_(0, 1) +# y.transpose_(0, 2) +# +# x = torch.randn(2, 3, 4) +# f(x, x) +# +# The ordering of the transposes inside f dictates whether or not +# you get [4, 2, 3] or [3, 4, 2]. This means that you cannot precompute +# what metadata mutations should get applied to each input; you need to +# assume they aren't duplicates (what we do today) or preserve +# the original metadata mutations exactly in order, so that they work +# for any duplicate configuration. +# +# CON: It does not (naively) work if you mutate the data of inputs. +# In particular, leaf tensors that require grad cannot be mutated, +# this makes it impossible to differentiate with respect to the original +# base. +# +# 2. For every duplicate argument to the function, remove it, so it is +# no longer part of the "true" signature: +# +# PRO: Implemented naively, it still works for metadata/data mutation. +# +# CON: The resulting compiled graph is duplicate-specialized: it only +# works if future calls duplicate arguments in exactly the same way. +# Horribly, Dynamo doesn't guard on this at the moment. But even if +# it did, you could still end up recompiling a bunch of each duplicate. +# +# Our strategy is to do (1) if we can, and do (2) otherwise, erroring if +# Dynamo's guards are not enough. In practice, this seems to cover +# everything. +# +def aot_wrapper_dedupe( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + compiler_fn, + fw_metadata, +): + # Use information about whether or not flat_fn mutates its arguments + # or not to handle dupe args + + # Strategy 1: For any input that is not mutated, we can leafify it if we + # need to remove a duplicate. + leaf_flat_args = [] + args_set = set() + ok = True + + for i, a in enumerate(flat_args): + if not isinstance(a, torch.Tensor): + leaf_flat_args.append(a) + elif a not in args_set: + args_set.add(a) + leaf_flat_args.append(a) + elif ( + not fw_metadata.input_info[i].mutates_data + and not fw_metadata.input_info[i].mutates_metadata + ): + leaf_flat_args.append(a.detach().requires_grad_(a.requires_grad)) + else: + ok = False + break + + if ok: + return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata) + + if requires_subclass_dispatch(leaf_flat_args, fw_metadata): + raise RuntimeError( + """\ +Encountered duplicate inputs that are mutated in the graph, but at least one input/output +to the graph is a tensor subclass. This is not supported today. You can try to +remove the aliasing yourself as a workaround, or otherwise file an issue on github.""" + ) + + # export path: ban duplicate inputs for now, add later if requested. + if aot_config.is_export: + raise RuntimeError( + f"""\ +Encountered duplicated inputs that are mutated in the graph you are trying to export. +This functionality is currently not supported. If needed, please file a github issue. + +fw_metadata={str(fw_metadata)} + """ + ) + + # Strategy 2: Duplicate specialize. + # + # In Haskell types, suppose you have: + # + # add_dupe_args :: DedupedArgs -> Args + # remove_dupe_args :: Args -> DedupedArgs + # + # compiler_fn + # :: (DedupedArgs -> R) -> DedupedArgs -> AOTConfig -> (DedupedArgs -> R) + # deped_compiler_fn + # :: (Args -> R) -> Args -> AOTConfig -> (Args -> R) + # + # Then the code below can be written in point-free style as: + # + # deduped_compiler_fn f a c = + # compiler_fn (f . add_dupe_args) (remove_dupe_args a) c . remove_dupe_args + # + # Suppose you have: + # + # [a, b, a, c] + # + # We want: + # + # remove_dupe_args([a, b, a, c]) == [a, b, c] + # add_dupe_args([a, b, c]) == [a, b, a, c] + # + # This is done via (respectively): + # + # seen_args = {a: 0, b: 1, c: 2} + # enumerate(add_dupe_map) = [ # how to get args from the deduped list + # (0, 0), + # (1, 1), + # (2, 0), + # (3, 2), + # ] + # keep_arg_mask = [True, True, False, True] + + seen_args: Dict[Tensor, int] = {} + keep_arg_mask = [] + # Implicitly map duped arg position (list index) to de-duped arg position + add_dupe_map: List[int] = [] + duped_arg_len = len(flat_args) + + j = 0 # index into deduped_flat_args + for t in flat_args: + if isinstance(t, torch.Tensor): + if t in seen_args: + keep_arg_mask.append(False) + add_dupe_map.append(seen_args[t]) + continue + seen_args[t] = j + + keep_arg_mask.append(True) + add_dupe_map.append(j) + j += 1 + assert ( + len(add_dupe_map) == duped_arg_len + ), f"Expects add_dupe_map to have length {duped_arg_len} but got {len(add_dupe_map)}" + + # NB: Hot path, avoid set lookups here + # TODO: Can avoid the zip here too, probably + def remove_dupe_args(args): + return [t for t, keep in zip(args, keep_arg_mask) if keep] + + def add_dupe_args(args): + return [args[add_dupe_map[i]] for i in range(duped_arg_len)] + + deduped_flat_args = remove_dupe_args(flat_args) + + # Update our input metadata to remove duped input metadata. + updated_fw_metadata = remove_dupe_metadata(fw_metadata, keep_arg_mask, add_dupe_map) + + if ( + tracing_context := TracingContext.try_get() + and aot_config.aot_autograd_arg_pos_to_source + ): + # TODO(voz): This structure is 1:1, we could consider an alternate structure like + # kept_pos:[dupe_arg_pos], however, add_dupe_map is 1:1 so we would need a new structure there, + # which feels like needless complexity for a tiny bit of efficiency at this point. + for dupe_arg_pos, (kept_pos, keep_arg) in enumerate( + zip(add_dupe_map, keep_arg_mask) + ): + if not keep_arg: + dupe_arg_source = aot_config.aot_autograd_arg_pos_to_source[ + dupe_arg_pos + ] + kept_arg_source = aot_config.aot_autograd_arg_pos_to_source[kept_pos] + tracing_context.guards_context.aotautograd_guards.append( # type: ignore[attr-defined] + DuplicateInputs(kept_arg_source, dupe_arg_source) + ) + + @wraps(flat_fn) + def wrapped_flat_fn(*args): + return flat_fn(*add_dupe_args(args)) + + if config.debug_assert: + ref_fw_metadata = run_functionalized_fw_and_collect_metadata( + wrapped_flat_fn, + keep_input_mutations=fw_metadata.keep_input_mutations, + is_train=fw_metadata.is_train, + )(*deduped_flat_args) + assert ( + ref_fw_metadata == updated_fw_metadata + ), f"ref_metadata={str(ref_fw_metadata)}, actual_metadata={str(updated_fw_metadata)}" + + compiled_fn = compiler_fn( + wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata + ) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + @wraps(compiled_fn) + def wrapped_compiled_fn(args): + deduped_args = remove_dupe_args(args) + args.clear() + return compiled_fn(deduped_args) + + wrapped_compiled_fn._boxed_call = True # type: ignore[attr-defined] + + # This can be uncommented when we properly guard for duplicates, + # but right now we must not do it. + # if not config.debug_assert: + # return wrapped_compiled_fn + + @wraps(wrapped_compiled_fn) + def debugged_compiled_fn(args): + # Test that the computed remove/add arg functions are an inverse + new_args = add_dupe_args(remove_dupe_args(args)) + seen: Dict[Any, None] = {} + for i, (x, y) in enumerate(zip(new_args, args)): + seen[y] = None + assert x is y, format_guard_bug_msg( + aot_config, + f"{describe_input(i, aot_config)} would be a duplicate of " + f"{describe_input(add_dupe_map[i], aot_config)}", + ) + # This is only an error if there is metadata mutation on both of + # the duped arguments; in this case, we need to know what order + # the metadata mutation applies in. You'll get the correct result + # otherwise, because a graph that assumes distinct inputs works if + # you dupe the inputs (the gradient contributions from each input + # will get summed up appropriately.) + # + # TODO: work out how to setup this assert correctly + """ + assert len(seen) == unique_args, format_guard_bug_msg(aot_config, + f"there would be {unique_args} distinct arguments" + ) + """ + return wrapped_compiled_fn(args) + + debugged_compiled_fn._boxed_call = True # type: ignore[attr-defined] + + return debugged_compiled_fn + + +# This layer handles the situation where you have two inputs that alias each other, +# and one of the inputs is mutated. +# We need to take special care to ensure that the mutation is applied to the other aliases in the graph. +# +# pre-condition: aot_wrapper_dedup has already run. +# (This function will in theory work if there are duplicate args. +# However, the synthetic base code path is a bit sub-optimal, and running with dupe'd inputs +# would cause us to hit that path more frequently). +def aot_wrapper_synthetic_base( + flat_fn, + flat_args: List[Tensor], + aot_config: AOTConfig, + *, + fw_metadata: ViewAndMutationMeta, + # Currently, the only reason we need to plumb this bool is because + # the synthetic base code prohibits more cases in the autograd case than the inference case. + needs_autograd: bool, + compiler_fn, +): + is_inference = not needs_autograd + flat_args_with_synthetic_bases, synthetic_base_info = merge_view_inputs( + flat_args, + fw_metadata.input_info, + is_inference=is_inference, + ) + # Happy path: we don't need synthetic bases + if synthetic_base_info is None: + return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata) + + # export path: ban synthetic bases for now, add later if requested. + if requires_subclass_dispatch(flat_args, fw_metadata): + raise RuntimeError( + """\ +Encountered aliased inputs that are mutated in the graph, but at least one input/output +to the graph is a tensor subclass. This is not supported today. You can try to +remove the aliasing yourself as a workaround, or otherwise file an issue on github.""" + ) + + if aot_config.is_export: + raise RuntimeError( + f"""\ +Encountered aliased inputs that are mutated in the graph you are trying to export. +This functionality is currently not supported. If needed, please file a github issue. + +synthetic_base_info={str(synthetic_base_info)} + +fw_metadata={str(fw_metadata)} + """ + ) + + assert len(fw_metadata.input_info) == len(synthetic_base_info) + + # Update our forward metadata to take synthetic bases into account + ( + fw_metadata_updated, + aliased_arg_idx_with_metadata_mutations, + ) = create_synthetic_base_metadata( + fw_metadata, synthetic_base_info, flat_args, flat_args_with_synthetic_bases + ) + + num_aliased_args_with_metadata_mutations = len( + aliased_arg_idx_with_metadata_mutations + ) + + def _unpack_synthetic_bases(primals: Tuple[Any, ...]) -> List[Any]: + f_args_inner = [] + for inner_idx_or_tuple in synthetic_base_info: + if isinstance(inner_idx_or_tuple, int): + f_args_inner.append(primals[inner_idx_or_tuple]) + else: + inner_base_idx, view_tensor = inner_idx_or_tuple + base = primals[inner_base_idx] + view_arg = gen_alias_from_base( + base, view_tensor, view_tensor.requires_grad + ) + f_args_inner.append(view_arg) + return f_args_inner + + @wraps(flat_fn) + def wrapped_flat_fn(*args): + unpacked_args = _unpack_synthetic_bases(args) + # This is a bit subtle. The goal of this entire function (aot_dispatch_synthetic_bases) + # is to relieve the downstream logic from having to reason about mutations on inputs that alias + # each other, by replacing aliased inputs with a synthetic base. + # One area where this breaks down a bit however is if one of those aliased inputs + # experienced a metadata mutation. + # We are now obligated to reapply the metadata mutation directly to the user's input; + # it isn't enough to apply mutations back to the synthetic base in the downstream logic. + # + # The way we handle this is by pretending that those aliased inputs that experience metadata mutations + # are additional outputs in the user's forward function. + # The downstream logic will just treat these as "user outputs that alias inputs". + # However, we will manually grab them at runtime here, use them to reapply the metadata mutation + # to the user inputs, and not return them to the user. + aliased_args_with_metadata_mutations = [ + x + for i, x in enumerate(unpacked_args) + if i in aliased_arg_idx_with_metadata_mutations + ] + if len(aliased_args_with_metadata_mutations) > 0: + return *(flat_fn(*unpacked_args)), *aliased_args_with_metadata_mutations + else: + return flat_fn(*unpacked_args) + + if config.debug_assert: + ref_fw_metadata = run_functionalized_fw_and_collect_metadata( + wrapped_flat_fn, + keep_input_mutations=fw_metadata.keep_input_mutations, + is_train=fw_metadata.is_train, + )(*flat_args_with_synthetic_bases) + assert ref_fw_metadata == fw_metadata_updated, ( + f"ref_metadata={pprint.pformat(partial_flatten_asdict(ref_fw_metadata))}, " + f"\nactual_metadata={pprint.pformat(partial_flatten_asdict(fw_metadata_updated))}" + ) + + compiled_fn = compiler_fn( + wrapped_flat_fn, + flat_args_with_synthetic_bases, + aot_config, + fw_metadata=fw_metadata_updated, + ) + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + @wraps(compiled_fn) + def wrapped_compiled_fn(args): + args_with_synthetic_bases, synthetic_base_info = merge_view_inputs( + args, fw_metadata.input_info, is_inference=is_inference + ) + assert synthetic_base_info is not None + aliased_args_w_metadata_mutations = [ + args[i] for i in aliased_arg_idx_with_metadata_mutations + ] + args.clear() + outs = compiled_fn(args_with_synthetic_bases) + if num_aliased_args_with_metadata_mutations > 0: + # This code does not handle **all** input metadata mutations. + # Instead, it only handles metadata mutations on inputs that were converted into synthetic bases + # (which only happens if at least one aliased input experienced a data mutation). + # e.g: + # def f(a, b): + # a.mul_(2) + # b.t_(1, 0) + # f(x.view(2, 2), x.view(2, 2)) + mutated_metadata_inps = outs[-num_aliased_args_with_metadata_mutations:] + user_outs = outs[:-num_aliased_args_with_metadata_mutations] + for inp, mutated_inp in zip( + aliased_args_w_metadata_mutations, mutated_metadata_inps + ): + inp.as_strided_( + mutated_inp.size(), + mutated_inp.stride(), + mutated_inp.storage_offset(), + ) + return user_outs + return outs + + return wrapped_compiled_fn + + +# Note [Handling mutations on an input that aliases other inputs] +# The easiest example to show-case this edge case is here: +# +# def f(a, b): +# a.mul_(2) +# out = a + b +# return out +# b = torch.ones(...) +# a = b.view(-1) +# f(a, b) +# +# In this situation, if a and b happened to be aliased, we need to trace something different! +# Suppose we had b = a.view(-1) +# (In this case, that means that `a._base is b`) +# +# We need to ensure that the aliasing relationship between a and b is preserved. +# We do that detecting the specific situation above (mutate an input that aliases another input), +# and when we do that, we create a synthetic base argument. Then inside of the traced forward, +# we regenerate a and b off of that base. +# The complete example of the transformed function looks like this: +# +# // The traced forward takes in a synthetic base, and regenerates the aliased inputs as views +# // We could consider getting view-replay support here to minimize as_strided_scatter ops in the graph +# def traced_forward(base): +# a = base.as_strided(...) +# b = base.as_strided(...) +# a_updated = a.mul(2) +# base_updated = torch.as_strided_scatter(base, a_updated, ...) +# b_updated = base_updated.as_strided(...) +# out = a_updated + b_updated +# return a_updated, out +# +# def compiled_fn(a, b): +# // we detect that a is the "differentiable base" here +# base = a +# // In other situations, we might do either: +# // (1) a and b are both views off of some larger differentiable base +# // assert a._base is b._base and a._base is not None +# // base = a._base +# // (2) a and b both don't require gradients. Create a base from the storage +# // assert a._base is None and b._base is None +# // base = torch.Tensor(a.storage()) +# a_updated, out = traced_forward(base) +# a.copy_(a_updated) +# return out +# +# This function: +# (1) Merges input views into a synthetic base argument, when any of those input views are mutated +# (2) Returns metadata telling the autograd.Function how to modify their arguments properly, +# to respect the new calling convention. +# +# The calling convention is as follows. +# Any inputs that were originally views of one another get yanked, and replaced with a synthetic base. +# The argument list ordering goes [base1, ..., baseN], [arg1, ..., argN], +# Where the ordering of the bases is determined from the ordering of the original view args. +# baseA will come before baseB if the earliest original argument coming from baseA +# showed up earlier in the argument list than the earliest original argument coming from baseB. +# +# Example, given some tensors a, b, c, d +# call site: +# f(a, c.view(-1), b.view(-1), b, c, d) +# Modified argument list: +# c_base comes first because the first c view came earlier in arg list than the first b view +# a and d still show up in the modified arg list, but b and c don't- they're regenerated from their bases +# b_base = torch.Tensor(b.storage()) +# c_base = torch.Tensor(c.storage()) +# f(c_base, b_base, a, d) +def merge_view_inputs( + fwd_inputs: List[Any], + mutated_input_info: List[InputAliasInfo], + *, + # The autograd case currently has more restrictions than the inference case. + is_inference: bool, +) -> Tuple[List[Any], Optional[List[Union[int, Tuple[int, torch.Tensor]]]]]: + def _are_differentiable_views(view1, view2): + if view1 is view2: + return True + if view1._base is None and view2._base is None: + return False + if view1._base is view2._base or view1._base is view2 or view1 is view2._base: + return True + return False + + def _same_dtype_views(view1, view2): + if view1.dtype != view2.dtype: + return False + if view1._base is not None and view1.dtype != view1._base.dtype: + return False + if view2._base is not None and view2.dtype != view2._base.dtype: + return False + return True + + assert len(fwd_inputs) == len(mutated_input_info) + storage_ref_to_idx: Dict[StorageWeakRef, List[int]] = collections.defaultdict(list) + base_args = [] + other_args = [] + for i, inpt in enumerate(fwd_inputs): + if isinstance(inpt, Tensor): + storage_ref = StorageWeakRef(inpt.untyped_storage()) + storage_ref_to_idx[storage_ref].append(i) + else: + other_args.append(inpt) + # Note [Synthetic Base Info Metadata] + # This list contains metadata that tells you what the i'th argument in the inner calling convention should be. + # It's either: + # - another int (corresponding to the index in the argument list of the element from the outer calling convention) + # - idx, view_tensor, where we can generate the new output with view_tensor._view_func(old_args[idx]) + # idx corresponds to which synthetic base from the outer calling context to view + inner_calling_convention_meta: Dict[int, Union[int, Tuple[int, torch.Tensor]]] = {} + for aliased_input_indices in storage_ref_to_idx.values(): + if len(aliased_input_indices) <= 1 or not any( + # We only care about mutations that affect all aliases, + # so metadata mutations on an input doesn't require us to do synthetic base handling. + mutated_input_info[inpt_idx].mutates_data + for inpt_idx in aliased_input_indices + ): + for curr_idx in aliased_input_indices: + other_args.append(fwd_inputs[curr_idx]) + continue + + # Here, we attempt to do a more complicated check to detect false aliasing + # (e.g. if all the tensors have the same storage, but don't actually overlap) + # In theory, we could have a large group of tensors that all share storages, where only *some* of them + # have overlapping memory. + # I don't bother with that case for now: here, we only bail out earlier if we detect that **every** pair + # of tensors in the current group that shares a storage is non-overlapping. + aliased_input_indices_no_false_sharing = compute_overlapping_inputs( + fwd_inputs, aliased_input_indices + ) + if len(aliased_input_indices_no_false_sharing) <= 1: + for curr_idx in aliased_input_indices: + other_args.append(fwd_inputs[curr_idx]) + continue + + # We detected an input that was mutated, AND aliases with another input. + # we need to replace this set of aliased inputs with a single synthetic base. + # For now, I'm banning a bunch of cases. We expect dynamo to properly detect these cases + # and error out. We can fix them later. + # These checks are transitive, so we don't need to check every pair. + for idx1, idx2 in zip( + aliased_input_indices, aliased_input_indices[1:], strict=False + ): + view1 = fwd_inputs[idx1] + view2 = fwd_inputs[idx2] + # The "inputs that are aliased but have different differentiable bases" case + # is more complicated and hopefully pretty rare. Not currently handled. + if not is_inference: + assert _are_differentiable_views( + view1, view2 + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + # Regenerating views when reinterpreting complex / real tensors seems non-trivial, + # not handling for now + assert _same_dtype_views( + view1, view2 + ), "aot_autograd() does not yet handle input mutations on views with different dtypes." + non_none_bases = [ + fwd_inputs[i]._base + for i in aliased_input_indices + if fwd_inputs[i]._base is not None + ] + aliases_with_none_bases = [ + fwd_inputs[i] for i in aliased_input_indices if fwd_inputs[i]._base is None + ] + if len(non_none_bases) == 0: + # Case where none of the aliases have a ._base + # we generate a synthetic base without gradients, and generate views off of it + # We hit this case when we have input tensors to the graph that share a storage, + # but do not have a ._base field. + # Wondering when we hit this case? + # The _base field simply says that autograd knows about the aliasing relationship, + # but sometimes we create tensors which are aliased out of the same storage but guaranteed + # to be disjoint. In these cases, we will skip setting up the _base relationship + # for performance reasons (because the fact that the tensors share the same storage + # is unobservable unless you (1) do naughty things with resize_/as_strided + # or (2) look at the storage--as we are doing here.) + # One particular example of this is optimizer steps on the LSTM module: + # LSTM parameters are packed into a contiguous storage for efficiency reasons when + # calling cuDNN kernels, so when these parameters get passed to the optimizer we will + # find they share the same storage, but do not have _base set since they are all disjoint. + # + # NOTE: There is one case where this is unsafe: + # torch.Tensor(storage) will ALWAYS create a 1D tensor, which is not necessarily + # the same shape as the "actual" base that the tensor came from. + # For the most part this is fine, because we always use as_strided() + # to generate the original aliased inputs again. + # If we were to use view-replay though, this could cause the aliased views + # to have incorrect sizes. + example_idx = aliased_input_indices[0] + example_alias = fwd_inputs[example_idx] + # Note that this function is re-used at both trace time and runtime. + # At trace time, we're under a FakeMode so synthetic_base becomes a FakeTensor. + synthetic_base = torch.empty( + (0,), dtype=example_alias.dtype, device=example_alias.device + ) + # We don't actually have a convenient way of going from storage -> tensor, + # So using set_() here (we suffer some minor overhead, but this case is rare). + synthetic_base.set_(example_alias.untyped_storage()) + else: + # Case where all of the aliases require gradients, and have the same _base. + synthetic_base = non_none_bases[0] + for other_base in non_none_bases[1:]: + assert ( + other_base is synthetic_base + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + for alias in aliases_with_none_bases: + assert ( + alias is synthetic_base + ), "aot_autograd() does not yet handle non-differentiable view input mutations." + base_args.append(synthetic_base) + for curr_view_idx in aliased_input_indices: + curr_view = fwd_inputs[curr_view_idx] + base_idx = len(base_args) - 1 + # We store just enough info here so that we can regenerate the view later. + # Regeneration: curr_view._view_func(args[base_idx]) + inner_calling_convention_meta[curr_view_idx] = (base_idx, curr_view) + if len(base_args) == 0: + assert len(other_args) == len(fwd_inputs) + # If no synthetic bases are necessary, just return the original inputs. + return fwd_inputs, None + else: + # Otherwise, return: + # (1) The new args according to the updated calling convention: (synthetic_bases, other_args) + # (2) Metadata telling functionalization how to generate the inner argument list given the outer calling convention. + # We post-process it into a list, where meta[i] tells you info about the i'th argument in the inner calling convention. + args_to_functionalization = base_args + other_args + arg_to_old_idx_map = {arg: i for (i, arg) in enumerate(fwd_inputs)} + for i, other_arg in enumerate(other_args): + new_idx = len(base_args) + i + old_idx = arg_to_old_idx_map[other_arg] + inner_calling_convention_meta[old_idx] = new_idx + # post process into a list + post_processed_calling_convention_meta: List[ + Union[int, Tuple[int, torch.Tensor]] + ] = [-1 for _ in range(len(inner_calling_convention_meta))] + for k, v in inner_calling_convention_meta.items(): + post_processed_calling_convention_meta[k] = v + # Quick assert: every argument in the inner calling convention should be accounted for. + for x in post_processed_calling_convention_meta: + assert x != -1 + return args_to_functionalization, post_processed_calling_convention_meta diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..9099ced991a88c9e96e09f87bcfd72c821b385fe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/schemas.py @@ -0,0 +1,696 @@ +""" +The various dataclasses, Enums, namedtuples etc used in AOTAutograd. This includes +input/output types, metadata, config, function signatures etc. +""" + +import collections +import functools +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, Dict, List, NewType, Optional, Set, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch._guards import Source +from torch._subclasses import FakeTensor +from torch._subclasses.fake_tensor import is_fake + +from .. import config + +from .functional_utils import _check_if_mutation_can_be_in_graph +from .utils import strict_zip + +zip = strict_zip + +OutputType = Enum( + "OutputType", + ( + # output is not an alias + "non_alias", + # output aliases an input + "alias_of_input", + # output **is** an input tensor + "is_input", + # output has a ._base tensor, which is a graph intermediate. + # We need to return its ._base as a graph output, + # so its requires_grad info is populated correctly. + # Instructs the runtime code to regenerate the current output + # from a base tensor, graph_intermediates[base_idx] + "alias_of_intermediate_save_as_output", + # Same as above; but we don't need to explicitly add its ._base + # as a graph output, because it already **is** a graph output. + "alias_of_intermediate", + # Same as above; but the output's ._base is **already** a user output. + # Instructs the runtime code to regenerate the current output from + # a base tensor, user_outputs[base_idx] + "alias_of_intermediate_base_is_user_output", + # See Note [Intermediate Bases Optimization] + "unsafe_view_alias", + # output is an alias, but has a custom autograd.Function backward. + # In this case, we don't want to do view-replay, since we won't be able to replay the custom function. + # Instead, we'll treat this output "normally", and trace its backward into the graph. + "custom_function_view", + ), +) + + +# This class stores info about every user output. +@dataclass(frozen=True) +class OutputAliasInfo: + # Tells us if this output is: + # (1) a regular (non-aliased) output + # (2) an alias of a forward input + # (3) **is** a forward input (special case of "alias_of_input") + # (4) an alias of an intermediate (aka an alias of an output of the inner traced forward) + # (5) an alias of an intermediate, that explicitly requires returning the intermediate + # as a graph output + # (6) an alias of an intermediate, where that intermediate is also a user output + output_type: OutputType + # The raw type of the output (torch.Tensor, SymInt, etc) + raw_type: type + # If (1) above, then + # - base_idx is None + # If (2) or (3) above, then + # - Tells us that the base of this alias is user_fwd_input[base_idx] + # (This is an index into the inputs *before* we make synthetic bases) + # If (4) or (5) above, then + # - Tells us that the base of this alias is output_graph_intermediates[base_idx] + # here, this refers to the index of the *direct* traced + # If (6) above, then: + # - Tells us that the base of this alias is output_user_fwds[base_idx] + # here, this refers to the index of the *direct* traced + base_idx: Optional[int] + # If it is a Tensor, what the dynamic dims are (otherwise is None) + dynamic_dims: Optional[Set[int]] + # requires_grad + requires_grad: bool + + +class MutationType(Enum): + NOT_MUTATED = 1 + MUTATED_IN_GRAPH = 2 + MUTATED_OUT_GRAPH = 3 + + +# This class tells us info about user inputs. +@dataclass(frozen=True) +class InputAliasInfo: + is_leaf: bool + mutates_data: bool + mutates_metadata: bool + mutations_hidden_from_autograd: bool + mutations_under_no_grad_or_inference_mode: bool + mutates_storage_metadata: bool + requires_grad: bool + keep_input_mutations: bool + + def __post_init__(self): + if self.mutates_storage_metadata: + # For convenience, we guarantee that this is always true. + # In practice, If we call .set_(), then at runtime there is no need + # to additionally fix up the tensor metadata, since our runtime + # call to inp.set_(updated_inp) will already have the right metadata + assert self.mutates_metadata + + @functools.cached_property + def mutation_type(self) -> MutationType: + if (not self.mutates_data) and (not self.mutates_metadata): + return MutationType.NOT_MUTATED + + if _check_if_mutation_can_be_in_graph( + self.keep_input_mutations, + self.mutates_data, + self.mutates_metadata, + self.mutations_hidden_from_autograd, + self.mutations_under_no_grad_or_inference_mode, + self.requires_grad, + ): + return MutationType.MUTATED_IN_GRAPH + + return MutationType.MUTATED_OUT_GRAPH + + +@dataclass +class SubclassCreationMeta: + """ + Used for AOTDispatch. + This dataclass gives us the information we need to reconstruct a tensor subclass + from our flat inputs. + Why is this important? The graph that we'd like to trace out contains flat tensor inputs, + But the user's original model may have subclass inputs and outputs. + So we need to wrap/unwrap subclasses as necessary to translate between the user's + view (subclass inps/outs), and the backend compiler's view (graph with no subclass args). + + Complications arise mostly from the fact that a subclass can hold more than one inner tensor; + So for a given subclass input/output, we need to carefully track which indices map + to the subclass tensor in the corresponding "dense-tensor-only" graph. + """ + + # In the inner graph that only takes in dense tensor inputs, + # this maps to the first index of "tensors that should go in this subclass wrapper" + flat_tensor_start_idx: int + # The number of tensors that live in this subclass wrapper + arg_count: int + # Stores the original subclass itself. + # This is needed because we need the autograd metadata on the original subclass + # (this is guaranteed to be a wrapper subclass that holds a fake tensor, + # so holding onto this at runtime shouldn't leak memory) + original_subclass: torch.Tensor + # meta and inner_keys are produced by the subclass's __tensor_flatten__. + # We need to keep them around along with outer_size / outer_stride to plumb them + # into __tensor_unflatten__. + meta: Any + inner_keys: List[Any] + outer_size: Tuple[int, ...] + outer_stride: Tuple[int, ...] + + def creation_fn(self, all_args, *, is_runtime: bool): + curr_args = all_args[ + self.flat_tensor_start_idx : self.flat_tensor_start_idx + self.arg_count + ] + assert len(curr_args) == len( + self.inner_keys + ), f"inner_keys: {str(self.inner_keys)}. len(curr_args): {len(curr_args)}" + # NB: Sometimes we have real inner tensors and symbolic metadata. + # TODO: Resolve this so we always have matching real / symbolic tensors / metadata. + out = type(self.original_subclass).__tensor_unflatten__( # type: ignore[attr-defined] + dict(zip(self.inner_keys, curr_args)), + self.meta, + self.outer_size, + self.outer_stride, + ) + if not is_runtime: + # After wrapping up the inner dense tensors into a subclass, we need to make sure that our new wrapper + # has correct autograd metadata, since we'll be tracing through the autograd engine with the subclass. + # We don't trace through the autograd engine at runtime though, so no need + # to compute this extra metadata then! + torch._mirror_autograd_meta_to(self.original_subclass, out) # type: ignore[attr-defined] + + return out + + def __post_init__(self): + # sanity assert to make sure we don't leak memory + assert is_fake(self.original_subclass) + + +# This class encapsulates all aliasing + mutation info we need about the forward graph +# See a more detailed overview of the edge case handling at +# https://docs.google.com/document/d/19UoIh_SVrMy_b2Sx5ZaeOJttm6P0Qmyss2rdBuyfoic/edit +@dataclass(eq=False) +class ViewAndMutationMeta: + # length = # user inputs + # This gives us info about every input, and what sort of mutation happened to it (if any) + input_info: List[InputAliasInfo] + + # length = # user outputs + # This gives us info about every output (mostly around whether it aliases other tensors) + output_info: List[OutputAliasInfo] + + # length = the number of intermediate bases appended as outputs to the end of the forward graph. + # Note: this is not necessarily the same thing as: + # len([x for x in output_info if x.output_type == OutputType.alias_of_intermediate]) + # Because outputs might share a ._base, or an output's ._base might itself be + # another user output (in both cases, we won't redundantly append bases to the end of the graph) + num_intermediate_bases: int + + # For inference only: instructs us to keep data-only input mutations directly in the graph + keep_input_mutations: bool + + # length = (# inputs w data mutations) + (# user outputs that are non_aliasing tensors) + # + (# intermediate bases) + # These are the FakeTensor (or potential SymInt) outputs that we traced from our + # metadata pass of the user's forward function. + # Their only use today is to pass them as a best-guess for tangents when tracing the joint. + # Stashing them as part of our "metadata" makes it simpler if we want to run our analysis + # pass once, and re-use the output throughout AOTAutograd + traced_tangents: List[Any] + + # Each of these is a list telling us about subclasses for the inputs/outputs/grad_outs + # They are used throughout AOTDispatch to tell us how to generate a list of subclass tensors, + # Given a (potentially larger) list of plain torch tensors. + + # Taking subclass_inp_meta as an example: + # subclass_inp_meta[i] = j (an int) tells us: + # "The i'th user input is not a subclass, and corresponds to inputs[j] of the plain-tensor graph." + # subclass_inp_meta[i] = SubclassCreationMeta(flat_tensor_start_idx=3, arg_count=2) + # "The i'th user input is subclass holding two inner tensors, which are + # inputs[3] and inputs[4] of the plain-tensor graph". + + # length = # user inputs + subclass_inp_meta: List[Union[int, SubclassCreationMeta]] + # So, the full set of outputs to the forward graph looks something like: + # (*mutated_inps, *user_outs, *intermediate_bases, *saved_for_bw_tensors) + # where the first 3 of those 4 can be subclasses + # (but not saved_for_bw tensors, since these are internal to the compiler + # and not user visible, so there's no point in wrapping/unwrapping them at runtime). + # This list contains subclass information on all of the fw graph outputs + # except for saved_for_bw_tensors. + subclass_fw_graph_out_meta: List[Union[int, SubclassCreationMeta]] + # length = # backward graph inputs + subclass_tangent_meta: List[Union[int, SubclassCreationMeta]] + # TODO: we should kill this + # (need to default it to not break internal) + is_train: bool = False + + num_symints_saved_for_bw: Optional[int] = None + + # The grad_enabled mutation that will be emitted in the runtime_wrapper epilogue + # NOTE: AOTAutograd will assume that the ambient `is_grad_enabled` is the grad mode + # that is intended to be in effect prior to running the graph, in keeping with + # equivalence to eager mode. It is the responsibility of upstream graph acquisition + # to reset the grad mode to its pre-graph value prior to calling aot_autograd. + grad_enabled_mutation: Optional[bool] = None + + # Keeps track of whether `torch.use_deterministic_algorithms` was turned on + # when the forward was run. If deterministic mode was turned off during the + # forward, but is turned on during the backward call, then an error is + # raised + deterministic: Optional[bool] = None + + # Map of effect type (ex. _EffectType.ORDERED) to token. If there are + # side-effectful operators, FunctionalTensorMode will populate this + # dictionary telling us how many tokens we will need during tracing. + tokens: Dict[Any, torch.Tensor] = field(default_factory=dict) + + def __post_init__(self): + # pre-compute the indices of the inputs that are mutated. + # When keep_input_mutations is set, we don't need to worry about our epilogue + # handling data-only mutations, because we keep them directly in the graph. + + mutated_inp_runtime_indices = [ + i + for i, m in enumerate(self.input_info) + if (m.mutation_type == MutationType.MUTATED_OUT_GRAPH) + ] + + mutated_graph_handled_indices = [ + i + for i, m in enumerate(self.input_info) + if m.mutation_type == MutationType.MUTATED_IN_GRAPH + ] + self.mutated_graph_handled_indices = mutated_graph_handled_indices + self.num_mutated_graph_handled_indices = len(self.mutated_graph_handled_indices) + + mutated_graph_handled_indices_seen_by_autograd = [ + i + for i in mutated_graph_handled_indices + if not self.input_info[i].mutations_hidden_from_autograd + ] + + self.mutated_graph_handled_indices_seen_by_autograd = ( + mutated_graph_handled_indices_seen_by_autograd + ) + self.num_mutated_graph_handled_indices_seen_by_autograd = len( + self.mutated_graph_handled_indices_seen_by_autograd + ) + + aliased_out_indices = [ + i + for i, m in enumerate(self.output_info) + if m.output_type + not in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + ] + unsafe_view_out_indices = [ + i + for i, m in enumerate(self.output_info) + if m.output_type is OutputType.unsafe_view_alias + ] + + # This is pre-computed in post_init for perf. + # It contains the index of every element + # of input_info that corresponds to a mutation (data or metadata or both) + self.mutated_inp_runtime_indices = mutated_inp_runtime_indices + self.num_mutated_inp_runtime_indices = len(self.mutated_inp_runtime_indices) + + # This is pre-computed for perf. + # It contains the index of every element + # of output_info that corresponds to an alias (either of an input or intermediate) + self.aliased_out_indices = aliased_out_indices + self.unsafe_view_out_indices = unsafe_view_out_indices + self.num_outputs = len(self.output_info) + self.num_outputs_non_aliased = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + ] + ) + self.num_outputs_aliased_to_inputs = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.alias_of_input, + OutputType.is_input, + ] + ] + ) + self.num_unsafe_view_outputs = len(self.unsafe_view_out_indices) + self.num_outputs_aliased_to_intermediates = len( + [ + x + for x in self.output_info + if x.output_type + in [ + OutputType.alias_of_intermediate, + OutputType.alias_of_intermediate_save_as_output, + OutputType.alias_of_intermediate_base_is_user_output, + ] + ] + ) + self.num_outputs_aliased = ( + self.num_outputs_aliased_to_inputs + + self.num_outputs_aliased_to_intermediates + ) + + self.dynamic_outputs = any(o.dynamic_dims for o in self.output_info) + # See Note: [AOTAutograd Backward Guards] + # This is pre-computed for fast asserts on the types of our grad_outputs in the backward. + # Eventually, we should kill this and replace with real backward guards. + # (we want to precompute the "runtime" types, so replace FakeTensor with torch.Tensor) + self.output_types = [ + torch.Tensor if isinstance(x, FakeTensor) else type(x) + for x in self.traced_tangents + ] + + self.is_rng_op_functionalized = config.functionalize_rng_ops + # All of the above metadata is collected by tracing the fw function. + # However, extra outputs for rng offsets behave differently. Both fwd + # and bwd graphs have their own outputs for the total consumed offsets. + # Unlike mutated inputs, we don't have to worry about sending the right + # set of tensors between fwd and bwd. Fwd and bwd offsets are + # independent and simpler to handle. Therefore, we track them + # separately. + self.num_outputs_rng_offset = 1 if self.is_rng_op_functionalized else 0 + + # Our forward() returns both (mutated_inputs, outputs, output_intermediate_bases, saved_tensors, saved_symints) + self.num_forward_returns = ( + self.num_mutated_inp_runtime_indices + + self.num_outputs + + self.num_intermediate_bases + ) + # In case of functionalization of rng ops, the fw_module returns one + # additional output for rng offset. This rng offset is used right + # away to advance the rng state, and is not passed on to the raw + # outputs. However, we need to know the exact boundary to identify + # which tensors to be saved for the bwd graph. num_forward captures + # this information. + self.num_forward = self.num_forward_returns + self.num_outputs_rng_offset + + @property + def tensors_saved_for_backwards_slice(self): + assert self.num_symints_saved_for_bw is not None + if self.num_symints_saved_for_bw > 0: + return slice(self.num_forward, -self.num_symints_saved_for_bw) + else: + return slice(self.num_forward, None) + + @property + def symints_saved_for_backwards_slice(self): + assert self.num_symints_saved_for_bw is not None + if self.num_symints_saved_for_bw > 0: + return slice(-self.num_symints_saved_for_bw, None) + else: + return slice(0, 0) # empty slice + + def __eq__(self, other): + if not isinstance(other, ViewAndMutationMeta): + return NotImplemented + return ( + self.input_info == other.input_info + and self.output_info == other.output_info + and self.num_intermediate_bases == other.num_intermediate_bases + and self.keep_input_mutations == other.keep_input_mutations + and self.is_rng_op_functionalized == other.is_rng_op_functionalized + and self.num_outputs_rng_offset == other.num_outputs_rng_offset + and len(self.traced_tangents) == len(other.traced_tangents) + and all( + x.shape == y.shape and x.dtype == y.dtype + for x, y, in zip(self.traced_tangents, other.traced_tangents) + ) + ) + + +@dataclass(eq=False) +class SubclassMeta: + # A copy of all forward metadata, but computed on the *dense* tensor forward (after desugaring subclasses) + # So for example, if the user had a model containing two `TwoTensor` inputs, + # Then `SubclassMeta.fw_metadata.input_infos` would have length 4 here. + fw_metadata: ViewAndMutationMeta + + # Note: [Computing Subclass Metadata about grad_inputs] + # Given a list of flattened, plain tensor grad_inputs, this tells us how to reconstruct the grad_input subclasses + # + # You might think: why not just assume that all grad_inputs will have the same subclass-ness as the original inputs? + # (AOTAutograd generally assumes other properties, e.g. that grad_outputs are contiguous) + # + # This doesn't really work though. take this example: + # + # def f(DoubleTensor, DenseTensor): + # return DoubleTensor * DenseTensor + # + # In the above example, the .grad field of *both* DoubleTensor and DenseTensor will be a DoubleTensor. + # When we trace out a joint fw-bw graph, we'll end up returning two subclasses for the two grad_inputs. + # This means that our backward graph will return 4 outputs (two dense tensors for each DoubleTensor grad_input) + # and we need to properly store the metadata that tells us how to turn these 4 outputs back into DoubleTensors. + # + # Note that this info **cannot** easily be figured out from ViewAndMutationMeta. + # We can only compute this info by tracing the entire joint and examining the grad_inputs that we computed. + # + # See Note: [AOTAutograd Backward Guards] + # This will also eventually require us to install backward guards, + # in case we made incorrect assumptions about the subclass-ness of our grad_outputs + # + # Optional field because we don't compute for inference graphs + grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]] + + def __init__(self): + # The fields in this class get set after its construction. + pass + + +# This class exists because: +# - the autograd.Function.forward() in aot autograd returns outputs that might alias inputs +# - we only care about the metadata on those aliases, so we can regenerate them. +# We do not want them to participate in the autograd.Function. +# We do that by wrapping them in an opaque class, so the autograd.Function +# does not know to treat them as tensors. +@dataclass(frozen=True) +class TensorAlias: + alias: torch.Tensor + + +@dataclass +class BackwardSignature: + """ + Provides information about the backward section of an exported + joint forward-backward graph. + For a particular fx GraphModule, this class contains information on: + (1) A mapping from each gradient (backwards output) to the parameter + it corresponds to (forward input) + (2) A mapping from each gradient (backwards output) to the user input + it corresponds to (forward input) + (3) Which of the forward outputs corresponds to the loss, that we backprop on. + + Each string name is the `node.name` of the corresponding node in the fx graph. + """ + + gradients_to_parameters: Dict[str, str] + gradients_to_user_inputs: Dict[str, str] + loss_output: str + + +GraphOutputName = NewType("GraphOutputName", str) +GraphInputName = NewType("GraphInputName", str) +FQN = NewType("FQN", str) + + +@dataclass +class GraphSignature: + """ + Provides information about an exported module. + For a particular fx GraphModule, this class contains information on: + (1) Which graph inputs are parameters, buffers, or user inputs + (2) (for params/buffers) a mapping from the name of each graph argument + to its parameter/buffer FQN in the original nn.Module. + (3) If there are input mutations, these are represented as extra outputs + in the fx GraphModule. We provide a mapping from these + extra output names to the names of the actual inputs. + (4) The pytree metadata on how to flatten/unflatten inputs and outputs. + The corresponding FX GraphModule only accepts and returns + pytree-flattened inputs/outputs. + (5) (Optionally) if the FX is a joint forward-backward graph, we provide + a signature on the backward section of the joint graph. + """ + + parameters: List[FQN] + buffers: List[FQN] + + user_inputs: List[GraphInputName] + user_outputs: List[GraphOutputName] + inputs_to_parameters: Dict[GraphInputName, FQN] + inputs_to_buffers: Dict[GraphInputName, FQN] + + # If the user's module mutates a buffer, + # it's represented in the graph as an extra graph output. + # This dict is a mapping from + # "graph outputs that correspond to updated buffers" + # to the FQN names of those mutated buffers. + buffers_to_mutate: Dict[GraphOutputName, FQN] + user_inputs_to_mutate: Dict[GraphOutputName, GraphInputName] + + in_spec: pytree.TreeSpec + out_spec: pytree.TreeSpec + + backward_signature: Optional[BackwardSignature] + + input_tokens: List[GraphInputName] + output_tokens: List[GraphOutputName] + + @classmethod + def from_tracing_metadata( + cls, + *, + in_spec: pytree.TreeSpec, + out_spec: pytree.TreeSpec, + graph_input_names: List[str], + graph_output_names: List[str], + view_mutation_metadata: ViewAndMutationMeta, + named_parameters: List[str], + named_buffers: List[str], + num_user_inputs: int, + num_user_outputs: int, + loss_index: Optional[int], + backward_signature: Optional[BackwardSignature], + ) -> "GraphSignature": + graph_inputs = graph_input_names + graph_outputs = graph_output_names + parameters = list(named_parameters) + buffers = list(named_buffers) + num_tokens = len(view_mutation_metadata.tokens) + + # Calling convention assumptions: + # (1) graph inputs = (input_tokens, params, buffers, user_inputs) + # (2) graph outputs = (output_tokens, mutated_inputs, user_outs, param_gradients) + # (If we are capturing an inference graph, this convention is identical + # except that param_gradients is empty) + # See Note [Side-Effectful Tokens in AOTAutograd] for information on tokens + + # Address input calling conventions: + start, stop = 0, num_tokens + input_tokens = graph_inputs[start:stop] + + start, stop = stop, stop + len(parameters) + inputs_to_parameters = dict(zip(graph_inputs[start:stop], parameters)) + + start, stop = stop, stop + len(buffers) + inputs_to_buffers = dict( + zip( + graph_inputs[start:stop], + buffers, + ) + ) + + start, stop = stop, stop + num_user_inputs + user_inputs = graph_inputs[start:stop] + + # We should've gone through all the inputs now + assert len(graph_inputs) - stop == 0 + + # Address output calling conventions: + start, stop = 0, num_tokens + output_tokens = graph_outputs[start:stop] + + names = [*input_tokens, *parameters, *buffers, *user_inputs] + mutations = [] + for idx, input_info in enumerate(view_mutation_metadata.input_info): + if input_info.mutates_data: + # Only buffers can be mutated, not parameters + assert idx >= len(parameters) + mutations.append(names[idx + num_tokens]) + + assert len(mutations) == view_mutation_metadata.num_mutated_inp_runtime_indices + + start, stop = ( + stop, + stop + view_mutation_metadata.num_mutated_inp_runtime_indices, + ) + outputs_to_mutations = dict(zip(graph_outputs[start:stop], mutations)) + + user_inputs_to_mutate = {} + buffers_to_mutate = {} + for output_name, mutation_name in outputs_to_mutations.items(): + if mutation_name in user_inputs: + user_inputs_to_mutate[output_name] = mutation_name + else: + assert mutation_name in buffers + buffers_to_mutate[output_name] = mutation_name + + start, stop = stop, stop + num_user_outputs + user_outputs = graph_outputs[start:stop] + + unused_outputs = len(graph_outputs) - stop + if backward_signature is not None: + unused_outputs -= len(backward_signature.gradients_to_parameters) + len( + backward_signature.gradients_to_user_inputs + ) + assert unused_outputs == 0 + + return GraphSignature( + parameters=parameters, # type: ignore[arg-type] + buffers=buffers, # type: ignore[arg-type] + user_inputs=user_inputs, # type: ignore[arg-type] + user_outputs=user_outputs, # type: ignore[arg-type] + inputs_to_buffers=inputs_to_buffers, # type: ignore[arg-type] + inputs_to_parameters=inputs_to_parameters, # type: ignore[arg-type] + user_inputs_to_mutate=user_inputs_to_mutate, + buffers_to_mutate=buffers_to_mutate, # type: ignore[arg-type] + in_spec=in_spec, + out_spec=out_spec, + backward_signature=backward_signature, + input_tokens=input_tokens, # type: ignore[arg-type] + output_tokens=output_tokens, # type: ignore[arg-type] + ) + + +@dataclass +class AOTConfig: + """ + Configuration for AOTDispatcher + """ + + fw_compiler: Callable + bw_compiler: Callable + partition_fn: Callable + decompositions: Dict[Callable, Callable] + num_params_buffers: int + aot_id: int + keep_inference_input_mutations: bool + is_export: bool = False + no_tangents: bool = False + dynamic_shapes: bool = False + aot_autograd_arg_pos_to_source: Optional[List[Source]] = None + inference_compiler: Optional[Callable] = None + enable_log: bool = True + # this is always false outside of export. + pre_dispatch: bool = False + + def __post_init__(self): + if self.pre_dispatch: + assert self.is_export, "Can only have pre_dispatch IR for export." + + +SubclassTracingInfo = collections.namedtuple( + "SubclassTracingInfo", + ["plain_tensor_trace_fn", "plain_tensor_args", "maybe_subclass_meta"], +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cee3cf6e4edab5a263a136427da0830a0dab3c4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/subclass_utils.py @@ -0,0 +1,295 @@ +""" +This file contains utilities for tracing through __torch_dispatch__ based tensor subclasses and modes. +AOTAutograd's responsibility is to trace through all pytorch capabilities that live in the pytorch dispatcher, +and this includes tensor subclasses that implement __torch_dispatch__. +""" + +from typing import Any, List, Optional, Tuple, Union + +import torch.utils._pytree as pytree + +from torch import Tensor +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +from .schemas import MutationType, SubclassCreationMeta, ViewAndMutationMeta +from .utils import strict_zip + +zip = strict_zip + + +def requires_subclass_dispatch(args, fw_metadata: ViewAndMutationMeta) -> bool: + args_flattened = pytree.arg_tree_leaves(*args) + any_subclass_args = any( + is_traceable_wrapper_subclass(x) + for x in args_flattened + if isinstance(x, Tensor) + ) + from torch._functorch._aot_autograd.schemas import SubclassCreationMeta + + any_subclass_outputs = any( + type(x) is SubclassCreationMeta for x in fw_metadata.subclass_fw_graph_out_meta + ) + # This tells us whether or not we need to perform any unwrapping/wrapping of tensor subclasses at runtime. + return any_subclass_args or any_subclass_outputs + + +# Given a flat list of arguments, some of which may be tensor subclasses, +# computes metadata about "how to reconstruct the current list of subclasses, +# if we were given their flattened dense tensors instead" +def create_subclass_meta( + curr_args: Union[List[Any], Tuple[Any, ...]], +) -> List[Union[int, SubclassCreationMeta]]: + idx = 0 + infos: List[Union[int, SubclassCreationMeta]] = [] + for a in curr_args: + if isinstance(a, Tensor) and is_traceable_wrapper_subclass(a): + attrs, meta = a.__tensor_flatten__() # type: ignore[attr-defined] + start_idx = idx + cnt = len(attrs) + curr_cnt = cnt + infos.append( + SubclassCreationMeta( + flat_tensor_start_idx=start_idx, + arg_count=curr_cnt, + original_subclass=a, + meta=meta, + inner_keys=attrs, + outer_size=a.shape, + outer_stride=a.stride(), + ) + ) + else: + infos.append(idx) + cnt = 1 + idx += cnt + return infos + + +# Output structure: +# - List[Tensor] if tracing an inference graph +# - Tuple[List[Tensor], List[Tensor]] if tracing a joint graph. +# This function effectively concats each inner list of subclass tensors +# into a (potentially longer) list of inner tensors. +# +# This function takes in a pytree of arguments and unwraps any tensor subclasses. +# Annoyingly, we can't use pytrees to perform the unwrapping, because unwrapping returns +# a list of tensors that we would then need to concat together. +# Instead, we specialize the logic for the inference vs. joint graph case. +# NOTE: this function is hot, since we unwrap tensor subclass inputs at runtime +def unwrap_tensor_subclasses(wrapped_args, *, is_joint_structure: bool): + def concat_inner_tensors_from_subclasses(xs): + xs_inner = [] + for x in xs: + if isinstance(x, Tensor) and is_traceable_wrapper_subclass(x): + attrs, _ = x.__tensor_flatten__() # type: ignore[attr-defined] + xs_inner += [getattr(x, attr) for attr in attrs] + else: + xs_inner += [x] + return xs_inner + + if is_joint_structure: + assert isinstance(wrapped_args, tuple) and len(wrapped_args) == 2 + assert isinstance(wrapped_args[0], (tuple, list)) and isinstance( + wrapped_args[1], (tuple, list) + ) + unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args[0]) + unwrapped_args_tangents = concat_inner_tensors_from_subclasses(wrapped_args[1]) + unwrapped_args = (unwrapped_args_fw, unwrapped_args_tangents) + else: + assert isinstance(wrapped_args, (list, tuple)) + unwrapped_args_fw = concat_inner_tensors_from_subclasses(wrapped_args) + unwrapped_args = unwrapped_args_fw + return unwrapped_args + + +# Turns a flattened list of tensor arguments into (maybe) subclass tensors. +# This function is used both at trace time and runtime, so we have an is_runtime flag telling us which context we're in. +def wrap_tensor_subclasses( + unwrapped_args: Union[Tuple[Any, ...], List[Any]], + *, + subclass_metas: List[Union[int, SubclassCreationMeta]], + num_fw_outs_saved_for_bw: Optional[int] = None, + is_runtime: bool = False, +) -> Tuple[Any, ...]: + wrapped_args = [] + num_args_tallied = 0 + for subclass_meta in subclass_metas: + if isinstance(subclass_meta, int): + wrapped_args.append(unwrapped_args[subclass_meta]) + num_args_tallied += 1 + else: + assert isinstance(subclass_meta, SubclassCreationMeta) + wrapped_args.append( + subclass_meta.creation_fn(unwrapped_args, is_runtime=is_runtime) + ) + num_args_tallied += subclass_meta.arg_count + + # Note: [Partitioner handling for Subclasses, Part 2] + # At the beginning of AOTAutograd, we collect metadata on the inputs and outputs of the user fw, + # to figure out which inputs/outputs are subclasses, and how to reconstruct the subclasses after flattening them. + # + # When this function is called at runtime in the forward, + # we have been passed a list of (flattened) dense-tensor fw-outs, and need to reconstruct any subclass fw outs. + # + # One reasonable question that you should ask: when should the dense_tensor -> subclass_tensor wrapping happen? + # Answer: we do it **inside of our compiled autograd.Function**. + # This seems like morally the right place: autograd happens above subclass desugaring, + # so autograd should see actual tensor subclasses at runtime, and not flattened dense tensors. + # + # This causes a tricky interaction though: when we run the min-cut partitioner to divvy up the joint graph + # into a forward and backward graph, we end up with some activations that show up as extra outputs + # in the compiled forward graph, that are **not** user outputs. + # These activations are not visible to the user, and so there's no need for us to wrap them back into subclasses. + # + # On top of that, when we first computed subclass metadata (in `run_functionalized_fw_and_collect_metadata`), + # we computed subclass metadata on every forward output, but this did **not** include activations + # created by the partitioner. + # as a result, `unwrapped_args` here will correspond to (*unwrapped_user_fw_outs, *activations), + # but `subclass_metas` will only correspond to subclass metatadata on `user_fw_outs`. + # We then need to make sure that we return (*wrapped_user_fw_outs, *activations). + if num_fw_outs_saved_for_bw is not None: + assert len(unwrapped_args) == num_args_tallied + num_fw_outs_saved_for_bw, ( + f"Expected the number actual unwrapped-subclass outputs {len(unwrapped_args)} to equal " + f"the number of args calculated from subclasses ({num_args_tallied}) plus the number of " + f"additional activations saved for the backward pass ({num_fw_outs_saved_for_bw})" + ) + activations = unwrapped_args[num_args_tallied:] + if isinstance(wrapped_args, tuple) and isinstance(activations, tuple): + return wrapped_args + activations + return tuple(list(wrapped_args) + list(activations)) + else: + assert len(unwrapped_args) == num_args_tallied + return tuple(wrapped_args) + + +# Given a bunch of "dense" tensor arguments, this function (potentially) wraps them into tensor subclasses. +# This function carefully handles the inference vs. joint cases: +# - when is_joint_structure is True, args is (primals, tangents) +# - when is_joint_structure is False, args is [*primals] +def wrap_tensor_subclasses_maybe_joint( + unwrapped_args, *, is_joint_structure: bool, meta: ViewAndMutationMeta +) -> Union[Tuple[Any, ...], List[Any]]: + # Since this function is re-used for both inference and joint graphs, + if is_joint_structure: + assert isinstance(unwrapped_args, tuple) and len(unwrapped_args) == 2 + assert isinstance(unwrapped_args[0], (tuple, list)) and isinstance( + unwrapped_args[1], (tuple, list) + ) + primals, tangents = unwrapped_args[0], unwrapped_args[1] + wrapped_primals = wrap_tensor_subclasses( + primals, subclass_metas=meta.subclass_inp_meta + ) + wrapped_tangents = wrap_tensor_subclasses( + tangents, subclass_metas=meta.subclass_tangent_meta + ) + return (wrapped_primals, wrapped_tangents) + else: + wrapped_args = wrap_tensor_subclasses( + unwrapped_args, subclass_metas=meta.subclass_inp_meta + ) + return wrapped_args + + +# TODO: UNUSED. delete? +def create_metadata_for_subclass(meta: ViewAndMutationMeta) -> ViewAndMutationMeta: + # input infos + input_info = [] + for inp, subclass_meta in zip(meta.input_info, meta.subclass_inp_meta): + num_inps = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count + for _ in range(num_inps): + input_info.append(inp) + + # output infos + output_info = [] + subclass_out_meta_user_outs_only = meta.subclass_fw_graph_out_meta[ + meta.num_mutated_inp_runtime_indices : + ] + if meta.num_intermediate_bases > 0: + subclass_out_meta_user_outs_only = subclass_out_meta_user_outs_only[ + : -meta.num_intermediate_bases + ] + # sanity assert + assert len(meta.output_info) == len(subclass_out_meta_user_outs_only) + # Assume that the information on the output is shared by all of its inner tensors. + for out, subclass_meta in zip(meta.output_info, subclass_out_meta_user_outs_only): + num_outs = 1 if isinstance(subclass_meta, int) else subclass_meta.arg_count + for _ in range(num_outs): + output_info.append(out) + + # A bit hacky, but we don't actually care about all of the metadata here. + # This metadata is used **underneath** both autograd and subclass de-sugaring, + # So all we really care about is stuff like: + # - num inputs/outputs (needed by the partitioner) + # - input mutations (**not** used today, since we don't handle input mutations inside the subclass, + # although we should handle this eventually) + # TODO: add a test case to assert we error when this happens, instead of getting silent correctness + num_intermediate_bases = None + keep_input_mutations = meta.keep_input_mutations + traced_tangents = None + subclass_inp_meta = None + subclass_fw_graph_out_meta = None + subclass_tangent_meta = None + + metadata = ViewAndMutationMeta( + input_info=input_info, # type: ignore[arg-type] + output_info=output_info, # type: ignore[arg-type] + num_intermediate_bases=num_intermediate_bases, # type: ignore[arg-type] + keep_input_mutations=keep_input_mutations, # type: ignore[arg-type] + traced_tangents=traced_tangents, # type: ignore[arg-type] + subclass_inp_meta=subclass_inp_meta, # type: ignore[arg-type] + subclass_fw_graph_out_meta=subclass_fw_graph_out_meta, # type: ignore[arg-type] + subclass_tangent_meta=subclass_tangent_meta, # type: ignore[arg-type] + ) + return metadata + + +def compute_inner_mutated_inp_indices_from_subclass_meta( + fw_metadata: ViewAndMutationMeta, + inner_metadata: ViewAndMutationMeta, +) -> List[int]: + # Note: [Recomputing subclass mutation handling] + # + # Generally, if a subclass requires grad, its components will not require grad. + # But for the purposes of tracking returned tensors, we should treat those component + # tensors as if they require grad. + # + # For example, if the subclass tensor requires grad and will be mutated in a way that + # requires us to handle the mutation outside of the graph, we need to return it + # from the forward graph. The inner_meta data won't consider the component tensors + # as if they need to be returned, because they don't require grad; but really, we + # should handle those tensors the same way we handle the subclass tensor itself; i.e. + # if we'd include the subclass tensor as part of the outputs, then we should also + # include the component tensors. + # + # To do this, we patch num_mutated_inp_runtime_indices below by expanding the inputs + # from the outer subclass tensors and propagating + + updated_input_info = [] + inner_idx = 0 + if not fw_metadata.subclass_inp_meta: + # Sometimes we don't have subclass info, e.g. synthetic_base codepaths + return inner_metadata.mutated_inp_runtime_indices + assert len(fw_metadata.subclass_inp_meta) == len(fw_metadata.input_info) + for outer_idx, inp_meta in enumerate(fw_metadata.subclass_inp_meta): + if isinstance(inp_meta, int): + assert outer_idx < len(fw_metadata.input_info) + if inner_metadata is not None: + assert inner_idx < len(inner_metadata.input_info) + assert ( + inner_metadata.input_info[inner_idx] + == fw_metadata.input_info[outer_idx] + ) + updated_input_info.append(fw_metadata.input_info[outer_idx]) + inner_idx += 1 + else: + for _ in range(inp_meta.arg_count): + updated_input_info.append(fw_metadata.input_info[outer_idx]) + inner_idx += 1 + if inner_metadata is not None: + assert len(inner_metadata.input_info) == len(updated_input_info) + + return [ + i + for i, inp in enumerate(updated_input_info) + if inp.mutation_type == MutationType.MUTATED_OUT_GRAPH + ] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..73938ddd08b2d5736779e28ad1934e4deb88017c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/traced_function_transforms.py @@ -0,0 +1,698 @@ +""" +This module is responsible for transforming functions to be traced into a form +that is easier for the downstream infra (e.g. Autograd, FX, AOTAutograd analysis) +to handle. + +It does so by: +1. functionalization (including RNG functionalzation) +2. creating a joint graph when required +3. transforming mutations into extra outputs +4. dispatching subclasses +""" + +import warnings +from contextlib import nullcontext +from functools import wraps +from typing import Any, Callable, List, Tuple, Union +from unittest.mock import patch + +import torch +import torch.fx.traceback as fx_traceback +import torch.utils._pytree as pytree +from torch import Tensor +from torch._decomp.decompositions_for_rng import PhiloxStateTracker +from torch._guards import detect_fake_mode +from torch._prims_common import CUDARngStateHelper +from torch.fx.experimental.symbolic_shapes import definitely_false, sym_eq +from torch.nn.utils import stateless + +from .. import config +from .collect_metadata_analysis import run_functionalized_fw_and_collect_metadata +from .functional_utils import ( + from_fun, + has_data_mutation, + has_metadata_mutation, + is_fun, + sync_functional_tensor, + to_fun, +) +from .logging_utils import setup_stacktrace_preservation_hooks +from .schemas import ( + AOTConfig, + MutationType, + OutputType, + SubclassMeta, + SubclassTracingInfo, + ViewAndMutationMeta, +) +from .subclass_utils import ( + create_subclass_meta, + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses_maybe_joint, +) +from .utils import maybe_to_fresh_input + + +# This function returns a new function that returns mutated inputs as outputs. +# if keep_data_input_mutations is set, then we assume that data-only mutations +# will be left in the graph, and we only return metadata-mutated inputs as outputs. +def fn_input_mutations_to_outputs( + fn: Callable, + meta: ViewAndMutationMeta, + keep_data_input_mutations: bool, +) -> Any: + @wraps(fn) + def inner_fn(*args): + outs = fn(*args) + assert len(meta.output_info) == len(outs) + # The compiled fw will return mutated input tensors, *including* metadata-only mutation. + # However, if keep_data_input_mutations is set, the compiled fw only needs to return metadata-mutated inputs. + # (because data-only input mutations are handled directly in the compiled graph) + mutated_inputs_to_return = [ + x for (i, x) in enumerate(args) if i in meta.mutated_inp_runtime_indices + ] + return *mutated_inputs_to_return, *outs + + return inner_fn + + +# This function takes in a fn with external aliasing and mutation, +# and returns a new fn with no external aliasing and mutation, +# as needed for autograd. +# The main transformations are: +# - Return mutated inputs as extra outputs +# - Clone mutated inputs that require gradients, +# because autograd will require us to pass the pre-mutated inputs into autograd.grad +# - Return intermediate bases of outputs as additional outputs, +# needed to appease autograd.Function +# The new function returns: +# (1) The updated outputs +# (2) A boolean mask of len(new_fn_outputs), +# that can be used to tell autograd.grad which outputs should get tangents +# if we trace the backward. +def fn_prepped_for_autograd( + fn: Callable, + meta: ViewAndMutationMeta, +) -> Any: + @wraps(fn) + def inner_fn(*args): + args_maybe_cloned = [ + maybe_to_fresh_input(i, t, meta) for i, t in enumerate(args) + ] + + outs = fn(*args_maybe_cloned) + assert isinstance(outs, (tuple, list)) + outs = list(outs) + assert len(meta.output_info) == len(outs) + + mutated_inputs_to_return = [ + x + for (i, x) in enumerate(args_maybe_cloned) + if i in meta.mutated_inp_runtime_indices + ] + + intermediate_bases = [] + for i, (o, info) in enumerate(zip(outs, meta.output_info)): + if info.output_type == OutputType.alias_of_intermediate_save_as_output: + intermediate_bases.append(o._base) + + assert meta.num_intermediate_bases == len(intermediate_bases) + + # the compiled forward should return (mutated_inputs, user_outs, intermediate_bases) + fw_outs_to_return = *mutated_inputs_to_return, *outs, *intermediate_bases + + # Also return a boolean mask specifying which outputs to this function will be used as tangents + mutated_inputs_grad_mask = [ + meta.input_info[meta.mutated_inp_runtime_indices[i]].mutates_data + and meta.input_info[meta.mutated_inp_runtime_indices[i]].requires_grad + for (i, x) in enumerate(mutated_inputs_to_return) + ] + + # Pass any (non-aliased) outputs in as tangents, since they'll be returned as outputs in the fw + # For outputs that are aliases of intermediates, we will have returned the output's _base as an output in the graph instead, + # which we *should* send to grad() + output_grad_mask = [ + meta.output_info[i].output_type + in [ + OutputType.non_alias, + OutputType.unsafe_view_alias, + OutputType.custom_function_view, + ] + # Also, only tensor outputs should participate in the backward + # (in particular, Symint outputs in the forward graph shouldn't get tangents) + and issubclass(meta.output_info[i].raw_type, Tensor) + and meta.output_info[i].requires_grad + for (i, x) in enumerate(outs) + ] + + intermediate_base_grad_mask = [True for _ in range(len(intermediate_bases))] + + out_grad_mask = ( + mutated_inputs_grad_mask + output_grad_mask + intermediate_base_grad_mask + ) + assert len(out_grad_mask) == len(fw_outs_to_return) + + # Take care to grab and sync the updated inputs from primals_after_cloning (the inputs we actually mutate!) + # and not primals (the preserved inputs, pre-mutation, that we pass to grad()) + # This is annoying: our joint function needs to be aware of functionalization + # (syncing mutated inputs before calling autograd.grad()) + # In theory, we could make the autograd engine do this automatically, although that probably isn't any cleaner. + for arg in args_maybe_cloned: + if not isinstance(arg, Tensor): + continue + sync_functional_tensor(arg) + + return fw_outs_to_return, out_grad_mask + + return inner_fn + + +# Given a fn, computes the joint. +# NOTE: fn is expects the following behavior: +# (1) fn() needs to return a tuple of (outs, mask), +# where `mask` tells us which outputs are meant to have tangents. +# we don't know this info automatically, because we don't actually want to blindly +# compute tangents for every output that requires grad. +# Specifically, outputs that alias inputs won't participate in the backward and get tangents. +# (2) fn() cannot mutate any inputs that require gradient. +# otherwise, when we compute autograd.grad(), we will not take those input mutations into account +# (the way this is handled is that we ensure any inputs that normally get mutated are cloned first) +def create_joint(fn: Callable, *, aot_config: AOTConfig) -> Any: + def inner_fn(primals: List[Any], tangents: List[Any]): + outs, tangent_mask = fn(*primals) + assert len(tangent_mask) == len(outs) + outs_to_grad = [ + o for needs_tangent, o in zip(tangent_mask, outs) if needs_tangent + ] + assert len(outs_to_grad) == len(tangents) + + # Get the inputs that need gradients + grad_primals = [] + inputs_needs_grads = [] + # Note that we're not using primals here, + # being carefully not to pass any mutated inputs into autograd.grad() + for p in primals: + is_grad_tensor = isinstance(p, Tensor) and p.requires_grad + inputs_needs_grads.append(is_grad_tensor) + if is_grad_tensor: + grad_primals.append(p) + + # Get the outputs that need gradients + needed_outs = [] + needed_tangents = [] + for out, tangent in zip(outs_to_grad, tangents): + if isinstance(out, Tensor) and out.requires_grad: + # A bit sketchy, but fixes e.g. test_aot_autograd_exhaustive_matmul_cpu_float32 + # The issue is that we are sensitive to decomps that don't accurately maintain + # their output's _base.shape compared to eager mode, and this helps mitigate a bit. + # The not definitely_false is also sketchy; if unbacked + # symints are involved, we're just going to assume that the + # decomps setup the base shape correctly + needed_outs.append( + out + if not definitely_false(sym_eq(out.shape, tangent.shape)) + else out.view(tangent.shape) + ) + needed_tangents.append(tangent) + + setup_stacktrace_preservation_hooks([out.grad_fn for out in needed_outs]) + + if config.functionalize_rng_ops: + PhiloxStateTracker.mark_beginning_of_backward() + backward_out: Tuple[Tensor, ...] = tuple() + # Call the backwards pass + if grad_primals: + with fx_traceback.preserve_node_meta(): + # for full graph export, we always export a joint graph where we assume no tangents are needed. + if aot_config.no_tangents: + assert len(needed_tangents) == 1 and needed_tangents[0].numel() == 1 + backward_out = torch.autograd.grad( + needed_outs, + grad_primals, + allow_unused=True, + ) + else: + backward_out = torch.autograd.grad( + needed_outs, + grad_primals, + grad_outputs=needed_tangents, + allow_unused=True, + ) + backward_out_iter = iter(backward_out) + return outs, [ + next(backward_out_iter) if i else None for i in inputs_needs_grads + ] + + def inner_fn_with_anomaly(*args): + with fx_traceback.preserve_node_meta(), warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Anomaly Detection has been enabled.") + with torch.autograd.detect_anomaly(check_nan=False): + return inner_fn(*args) + + return inner_fn_with_anomaly + + +def create_functionalized_rng_ops_wrapper(func, args, trace_joint=True) -> Any: + # Functionalization of rng ops changes the calling convention of the joint graph. + # It goes from (primals, tangents) to (seed, offset, primals, tangents) + # At runtime, we pass on the current seed and offset. This is hidden from + # the user. + fake_mode = detect_fake_mode() + if fake_mode is None: + fake_mode = nullcontext() + + def override_get_rng_state(device: Union[int, str, torch.device] = "cuda"): + out = PhiloxStateTracker.get_state_as_tensor() + return out + + def override_set_rng_state(x, device: Union[int, str, torch.device] = "cuda"): + PhiloxStateTracker.set_state_from_tensor(x) + + def append_rng_offsets(args): + if trace_joint: + # args signature before: Tuple(fwd_outputs), Tuple(bwd_outputs) + # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset), Tuple(bwd_offset, new_bwd_rng_offset) + return ( + (*args[0], PhiloxStateTracker.get_updated_fwd_offset()), + (*args[1], PhiloxStateTracker.get_updated_bwd_offset()), + ) + else: + # args signature before: Tuple(fwd_outputs) + # args signature after: Tuple(fwd_outputs, new_fwd_rng_offset) + return (*args, PhiloxStateTracker.get_updated_fwd_offset()) + + def traced_joint( + primals, tangents, fwd_seed, fwd_base_offset, bwd_seed, bwd_base_offset + ): + with patch("torch.cuda.get_rng_state", override_get_rng_state), patch( + "torch.cuda.set_rng_state", override_set_rng_state + ): + return append_rng_offsets(func(primals, tangents)) + + def traced_forward(*primals_fwd_seed_fwd_base_offset): + # The signature is (*primals, seed, offset) + with patch("torch.cuda.get_rng_state", override_get_rng_state), patch( + "torch.cuda.set_rng_state", override_set_rng_state + ): + return append_rng_offsets(func(*primals_fwd_seed_fwd_base_offset[:-2])) + + if trace_joint: + # Get the current seed and offset to setup tracing. + fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + bwd_seed, bwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward") + PhiloxStateTracker.record_state(bwd_seed, bwd_base_offset, "backward") + return traced_joint, ( + *args, + fwd_seed, + fwd_base_offset, + bwd_seed, + bwd_base_offset, + ) + else: + # Get the current seed and offset to setup tracing. + fwd_seed, fwd_base_offset = CUDARngStateHelper.get_torch_state_as_tuple( + fake_mode + ) + PhiloxStateTracker.record_state(fwd_seed, fwd_base_offset, "forward") + return traced_forward, (*args, fwd_seed, fwd_base_offset) + + +# This creates the final function that we want to trace using make_fx(), +# in both aot_dispatch_autograd and aot_dispatch_base. +# Preconditions: +# - fn corresponds to the user's fw function +# - fn arguments have been flattened, duplicate arguments have been handled +# - In the returned function, the "primals" arguments *includes* synthetic bases. +# This function does the work of functionalizing the input function, +# and performing copy_() calls at the end of the function if `keep_input_mutations` is set. +# The function returned has signature that is either: +# (1) "traced_fn(primals: List[Any])" if trace_joint is False +# (2) "traced_fn(primals: List[Any], tangents: List[Any])" if trace_joint is True +# Returns a new (functionalized) function, and updated arguments to call it with. +def create_functionalized_fn( + fn, + args, + *, + meta: ViewAndMutationMeta, + aot_config: AOTConfig, + trace_joint: bool, +) -> Any: + @wraps(fn) + def _functionalized_f_helper(*args): + # See Note [Disabling Functionalize TLS Above Python Functionalization] + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + # See Note [Side-Effectful Tokens in AOTAutograd] + if trace_joint: + assert ( + isinstance(args, tuple) + and len(args) == 2 + and isinstance(args[0], (list, tuple)) + ) + tokens = args[0][: len(meta.tokens)] + actual_args = args[0][len(meta.tokens) :] + args = (actual_args, args[1]) + else: + tokens = args[: len(meta.tokens)] + args = args[len(meta.tokens) :] + assert all(token.numel() == 0 for token in tokens) + + with disable_above: + # Wrap inputs into functional wrappers + f_args = pytree.tree_map(to_fun, args) + f_tokens = pytree.tree_map(to_fun, tokens) + + # Populate the current FunctionalTensorMode with the tokens per + # operator. See Note [FunctionalTensorMode is Stateful] + functional_tensor_mode = ( + torch.utils._python_dispatch._detect_functional_mode() + ) + assert functional_tensor_mode is not None + for i, k in enumerate(meta.tokens.keys()): + functional_tensor_mode._tokens[k] = f_tokens[i] + + # Run the joint + f_outs = fn(*f_args) + + # Return both the tokens and the outputs + # See Note [Side-Effectful Tokens in AOTAutograd] + f_outs = (*functional_tensor_mode._tokens.values(), *f_outs) + + if trace_joint: + # We support a limited amount of mutation of graph inputs during the backward pass. + # (This is used e.g. by Float8, which needs to update buffers during the backward pass) + # Here, we perform extra checks for primals that were mutated in the **backward** + # We're doing the checks here instead of doing them with the rest of the input mutation handling because: + # - We need to detect inputs that were mutated in the backward **separately** from mutations that happened + # during the forward, because the handling is different: some input mutations from the the forward + # can be only handled in a fw-only runtime epilogue, and in theory if we wanted to handle those same + # types of mutations in the backward we would need a bw-only runtime epilogue. + # - We could in theory have our analysis pass differentiate mutations in the fw from mutations in + # the bw by running our analysis first on the fw-only graph, and then on the joint graph. This would + # require an extra round of tracing though, so it's more efficient to do in-line here. + assert ( + isinstance(args, tuple) + and len(args) == 2 + and isinstance(args[0], (list, tuple)) + ) + # Only look at mutations that happened to forward inputs (e.g. fw buffers that were saved for bw) + primals_before = args[0] + primals_after = pytree.tree_map(from_fun, f_args[0]) + for f_inpt, before, after, inpt_info in zip( + f_args[0], primals_before, primals_after, meta.input_info + ): + # Ban metadata mutations on fw inputs during the bw + if not inpt_info.mutates_metadata: + assert not has_metadata_mutation( + f_inpt, before, check_only_storage_mutation=False + ), "Found a graph input that had its metadata mutated in the backward. This is not supported" + # Allow data mutations on fw inputs during the bw, but only if they do not require grad + # So we can guarantee that we can keep the mutations in the graph + if has_data_mutation(f_inpt) and not inpt_info.mutates_data: + assert ( + not inpt_info.requires_grad + ), "Found a graph input that requires_grad and was mutated in the backward. This is not supported" + # Otherwise, put the mutation in the graph + before.copy_(after) + # Now that we covered mutations to *forward* inputs during the backward, + # we also need to cover mutations to *backward-only* inputs during the backward (e.g. mutation to a grad_out). + # Today, we will just error in all cases of this happening unless someone needs us to support it. + tangents_before = args[1] + tangents_after = pytree.tree_map(from_fun, f_args[1]) + for f_inpt, before, after in zip( + f_args[1], tangents_before, tangents_after + ): + assert not has_metadata_mutation( + f_inpt, before, check_only_storage_mutation=False + ) and not has_data_mutation( + f_inpt + ), "Found an input to the backward that was mutated during the backward pass. This is not supported" + + if aot_config.keep_inference_input_mutations: + # Note: This is a bit annoying. There's a layering issue here, where: + # (1) functionalization needs to operate on **synthetic base** inputs, before unpacking them into the "real" inputs. + # (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs. + # However, we **only** want to support this for inputs that have data-only (and no metadata) mutations, + # because inductor (and backends in generally) would prefer not to see these (e.g. as_strided_(), resize_()). + # This makes it pretty difficult for this logic to operate on synthetic bases. + # (3) In addition, there are cases where it's significantly cheaper to perform the copy on the individual + # (unpacked) input aliases, instead of the synthetic base. + # Example case where (3) could be important: + # + # def f(x, y): + # x.mul_(2) + # y.mul_(3) + # return x, y + # a = torch.ones(1'000'000) + # x, y = out(a[0:9], a[1:10]) + # + # It would be much better to add copy_() calls into the graph for the two tiny slices, instead of materializing + # a giant "updated synthetic base" and copying into a's entire storage. + # + # For now, we are pessimistically not performing the optimization from (3); + # we will materialize an "updated" synthetic base, and copy it back to the synthetic input base. + # This allows us to factor aot autograd much more nicely, since only one area of the code needs to worry + # about synthetic bases. + for i, (inpt_old, inpt_f) in enumerate( + zip(args, f_args) if not trace_joint else zip(args[0], f_args[0]) + ): + if not isinstance(inpt_f, torch.Tensor): + continue + assert is_fun(inpt_f) + inpt_new = from_fun(inpt_f) + if meta.input_info[i].mutation_type == MutationType.MUTATED_IN_GRAPH: + # We found an input that had a (data-only) mutation. + # Since keep_input_mutations is set, we need to faithfully apply a copy_() + # so the compiler will see the input mutation in the graph. + if meta.input_info[i].mutations_hidden_from_autograd: + # Hidden from autograd = run under no_grad, **and** don't bump VC + with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter( + inpt_old + ): + inpt_old.copy_(inpt_new) + elif meta.input_info[i].mutations_under_no_grad_or_inference_mode: + # Under no_grad = run under no_grad (we still bump the VC though) + # (inference_mode will also bump the VC, as long as the tensor in question + # was created outside of inference_mode) + with torch.no_grad(): + inpt_old.copy_(inpt_new) + else: + inpt_old.copy_(inpt_new) + + # When an output tensor is a functionalized mutated input, and we + # were able to move the mutation in to the graph then we can return + # the mutated input directly. This prevents duplicating the + # tensors contents. + flat_outs, outs_spec = pytree.tree_flatten(f_outs) + flat_outs = [from_fun(o) for o in flat_outs] + num_outs = len(meta.output_info) + + for i, outp in enumerate(flat_outs[:num_outs]): + info = meta.output_info[i] + if info.output_type != OutputType.is_input: + continue + + assert info.base_idx is not None + if ( + meta.input_info[info.base_idx].mutation_type + == MutationType.MUTATED_IN_GRAPH + ): + flat_outs[i] = args[info.base_idx] + return pytree.tree_unflatten(flat_outs, outs_spec) + + return pytree.tree_map(from_fun, f_outs) + + # Kinda annoying, but needed to make sure that the fx graph we trace out has "primals" + # and "tangents" as its input names (which are special-cased by the partitioner) + # TODO (tmanlaibaatar) revisit this if we ever need to turn on non-strict joint graph export + def joint_helper(primals, tangents): + return _functionalized_f_helper(primals, tangents) + + helper = joint_helper if trace_joint else _functionalized_f_helper + if config.functionalize_rng_ops: + # Setup the wrapper for functionalization of rng ops + helper, args = create_functionalized_rng_ops_wrapper(helper, args, trace_joint) + + # Additionally pass in tokens as inputs + # See Note [Side-Effectful Tokens in AOTAutograd] + additional_token_inputs = [torch.tensor([])] * len(meta.tokens) + if trace_joint: + args = ([*additional_token_inputs, *args[0]], *args[1:]) + else: + args = [*additional_token_inputs, *args] + + return helper, args + + +# Given a function operating on Subclass -> Subclass, returns an function that operates on Tensor -> Tensor +# Also returns: +# - the new set of arguments to pass into this function (now that tensor subclasses have been eliminated) +# - the updated ViewAndMutationMeta for this dense -> dense function. +# The other important arguments are: +# - flat_fn_maybe_joint: when is_joint_structure=True, this is the joint fw-bw function. +# when is_joint_structure=False, this is just the forward function. +# - fw_only: this is *always* the forward-only function. +# Why do we need this? We need to collect updated ViewAndMutationMeta on our new dense -> dense functions. +# In particular, we need this to tell the partitioner how many dense forward outputs there are. +def aot_dispatch_subclass( + flat_fn_maybe_joint, + args: List[Any], + *, + is_joint_structure: bool, + meta: ViewAndMutationMeta, + fw_only: Callable, +) -> SubclassTracingInfo: + # Skip logic if we don't need to trace through any subclasses + req_subclass_dispatch = requires_subclass_dispatch(args, meta) + if not req_subclass_dispatch: + return SubclassTracingInfo( + plain_tensor_trace_fn=flat_fn_maybe_joint, + plain_tensor_args=args, + maybe_subclass_meta=None, + ) + + # TODO: add subclass guards (later PR). + + # What's going on here? We need to compute subclass metadata about the outputs of the joint (grad_inputs). + # Annoying: we don't know the grad input metas until we're in the middle of tracing the joint, + # so we set it later, while we're tracing the joint (see inner_fn() below). + # Another option would be to run our run_functionalized_fw_and_collect_metadata() function + # directly on the joint, but this would hurt compile time (adding yet another pass through the joint). + subclass_meta = SubclassMeta() + + def inner_fn(fn, args, *, use_trace_joint: bool): + # Step 1: wrap tensor inputs into subclasses if necessary + all_args = wrap_tensor_subclasses_maybe_joint( + args, is_joint_structure=use_trace_joint, meta=meta + ) + + # Step 2: call the inner function, with our (maybe subclass) inputs + wrapped_outs = fn(*all_args) + + if use_trace_joint: + # See Note: [Computing Subclass Metadata about grad_inputs] + # We also stash subclass info on our grad_inputs, if we're tracing the joint. + nonlocal subclass_meta + assert isinstance(wrapped_outs, tuple) and len(wrapped_outs) == 2 + # Don't need fw outs since we already have subclass metadata on them + grad_inputs = wrapped_outs[1] + subclass_meta.grad_input_metas = create_subclass_meta(grad_inputs) + + # Step 3: Unwrap any subclass outputs back into dense tensors + unwrapped_outs = unwrap_tensor_subclasses( + wrapped_outs, is_joint_structure=use_trace_joint + ) + return unwrapped_outs + + def joint_fn(primals, tangents): + return inner_fn(flat_fn_maybe_joint, (primals, tangents), use_trace_joint=True) + + def fw_fn(*primals): + return inner_fn(flat_fn_maybe_joint, primals, use_trace_joint=False) + + def metadata_fn(*primals): + return inner_fn(fw_only, primals, use_trace_joint=False) + + args_unwrapped = unwrap_tensor_subclasses( + args, is_joint_structure=is_joint_structure + ) + + if is_joint_structure: + primals_unwrapped = args_unwrapped[0] + fn_to_trace = joint_fn + else: + primals_unwrapped = args_unwrapped + fn_to_trace = fw_fn + + # Note: [Partitioner handling for Subclasses, Part 1] + # The way the partitioner works is that: + # (1) we pass is a single graph containing the joint fw/bw, + # where the # of graph outputs corresponds to # fw_outputs + # grad_inputs + # (2) The partitioner accepts an arguments, num_fwd_outputs, + # and assumes that the first "num_fwd_outputs" graph outputs correspond + # to outputs of the forward graph. + # How do tensor subclasses enter the picture? + # the num_fwd_outputs in the final graph is actually non-trivial to compute, + # because it can be influenced by input mutations and intermediate bases. + # So we compute it by inspecting the current ViewAndMutationMeta object. + # However, the original ViewAndMutationMeta that we computed was created + # on the subclass -> subclass graph, + # which can have a different number of outputs than the dense -> dense graph. + # That's why we createa a fresh metadata object on the dense -> dense function here, + # and plumb it back up to the partitioner. + # See Note: [Partitioner handling for Subclasses, Part 2] for more info. + meta_updated = run_functionalized_fw_and_collect_metadata( + metadata_fn, + keep_input_mutations=meta.keep_input_mutations, + is_train=meta.is_train, + )(*primals_unwrapped) + + subclass_meta.fw_metadata = meta_updated + + return SubclassTracingInfo( + plain_tensor_trace_fn=fn_to_trace, + plain_tensor_args=args_unwrapped, + maybe_subclass_meta=subclass_meta, + ) + + +class PropagateUnbackedSymInts(torch.fx.Interpreter): + def run_node(self, n: torch.fx.Node): + import sympy + + result = super().run_node(n) + # TODO: handle Tensor returns + if "example_value" in n.meta: + if isinstance(result, torch.SymInt) and isinstance( + result.node.expr, sympy.Symbol + ): + torch._check(result == n.meta["example_value"]) + + return result + + +def create_functional_call(mod, params_spec, params_len, store_orig_mod=False): + # Redundant with dynamo, but worth having in case this gets invoked elsewhere. + # https://github.com/pytorch/pytorch/issues/103569 + + def functional_call(*args, **kwargs): + with stateless._reparametrize_module( + mod, pytree.tree_unflatten(args[:params_len], params_spec) + ): + if isinstance(mod, torch.fx.GraphModule): + with fx_traceback.preserve_node_meta(), warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Anomaly Detection has been enabled." + ) + with torch.autograd.detect_anomaly(check_nan=False): + out = PropagateUnbackedSymInts(mod).run( + *args[params_len:], **kwargs + ) + else: + out = mod(*args[params_len:], **kwargs) + + if not isinstance(out, (tuple, list)): + raise RuntimeError( + "Graph output must be a tuple(). This is so that we can avoid " + "pytree processing of the outputs. Please change the module to " + "have tuple outputs or use aot_module instead." + ) + return out + + # Note [Preserving the nn module stack metadata during export non-strict mode] + # This path is currently only used by the non-strict export flow, + # where we cannot rely on dynamo to preserve nn stack metadata in our captured graph. + # Instead, we stash the original user nn module here, and rely on `make_fx` to grab + # this stashed module and use it to track nn module stack metadata + if store_orig_mod and not hasattr(functional_call, "_orig_mod"): + functional_call._orig_mod = mod # type: ignore[attr-defined] + + return functional_call diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8c787f219a0b8d775b477ea122d9e838c7395805 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py @@ -0,0 +1,226 @@ +""" +Contains various utils for AOTAutograd, including those for handling collections. +""" + +import dataclasses +import warnings +from contextlib import nullcontext +from functools import wraps +from typing import Any, Callable, List, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import py_sym_types + +KNOWN_TYPES = [ + torch.Tensor, + BackwardState, + int, + str, + float, + bool, + type(None), + *py_sym_types, +] + +original_zip = zip + + +def strict_zip(*iterables, strict=True, **kwargs): + if not strict: + return original_zip(*iterables, **kwargs) + + shortest_length = min(len(it) for it in iterables) + for iterable in iterables: + if len(iterable) != shortest_length: + raise ValueError( + "The iterables have different lengths and strict mode is enabled." + ) + + return original_zip(*iterables, **kwargs) + + +def _get_symint_hints(exprs): + """ + Get the hints of a list/tuple of int/SymInt. + """ + if isinstance(exprs, (list, tuple)): + return type(exprs)(_get_symint_hints(e) for e in exprs) + elif isinstance(exprs, torch.SymInt): + return exprs.node.shape_env.size_hint(exprs.node.expr) + else: + return exprs + + +def partial_flatten_asdict(obj: Any) -> Any: + if dataclasses.is_dataclass(obj): + return { + field.name: getattr(obj, field.name) for field in dataclasses.fields(obj) + } + elif isinstance(obj, (list, tuple)): + return obj.__class__([partial_flatten_asdict(item) for item in obj]) + elif isinstance(obj, dict): + return {k: partial_flatten_asdict(v) for k, v in obj.items()} + else: + return obj + + +def normalize_as_list(x): + if isinstance(x, tuple): + return list(x) + elif isinstance(x, list): + return x + return [x] + + +def _get_autocast_states(): + return [ + torch.is_autocast_enabled(), + torch.is_autocast_cpu_enabled(), + torch.get_autocast_gpu_dtype(), + torch.get_autocast_cpu_dtype(), + torch.is_autocast_cache_enabled(), + ] + + +def make_boxed_func(f): + def g(args): + return f(*args) + + g._boxed_call = True # type: ignore[attr-defined] + return g + + +def make_boxed_compiler(compiler): + @wraps(compiler) + def f(fx_g, inps): + out_f = compiler(fx_g, inps) + fx_g = make_boxed_func(out_f) + return fx_g + + return f + + +def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False): + if not steal_args: + args = list(args) + assert isinstance(args, list) + + context = torch._C._DisableAutocast if disable_amp else nullcontext + with context(): + if hasattr(f, "_boxed_call"): + out = normalize_as_list(f(args)) + else: + # TODO: Please remove soon + # https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 + warnings.warn( + "Your compiler for AOTAutograd is returning a function that doesn't take boxed arguments. " + "Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. " + "See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale." + ) + out = normalize_as_list(f(*args)) + return out + + +# Inspired by autodidax (thanks!) +class PytreeThunk: + spec: Optional[pytree.TreeSpec] = None + # These are some kinda dumb microoptimizations that save about 3-4 us of overhead. + is_simple: Optional[ + bool + ] = None # if the output spec is a tuple/list, we won't bother unflattening it. + is_really_simple: Optional[bool] = None # if the output spec is a LeafSpec + + def set(self, spec: pytree.TreeSpec) -> None: + assert self.spec is None or self.spec == spec + assert spec is not None + self.spec: pytree.TreeSpec = spec + if self.spec.type in {tuple, list} and all( + child.is_leaf() for child in spec.children_specs + ): + self.is_simple = True + if self.spec.is_leaf(): + self.is_really_simple = True + + def unflatten(self, x: List[Any]) -> Any: + if self.is_really_simple: + return x[0] + if self.is_simple: + return x + assert self.spec is not None + return pytree.tree_unflatten(x, self.spec) + + +# Creates a function that returns flattened inputs and outputs +# Also returns the output tree spec, which is needed to recover the "unflattened" +# output tree structure later. +def create_tree_flattened_fn(fn, args, kwargs=None) -> Tuple[Callable, PytreeThunk]: + if kwargs is None: + kwargs = {} + # Save the args_spec for flat_tensor_args to unflatten while tracing + _, tensor_args_spec = pytree.tree_flatten((args, kwargs)) + out_spec = PytreeThunk() + + def flat_fn(*flat_args): + # The input are flattened tensor args. Prepare the args in the + # order that original function expects. Add static args as well. + # They will appear as tensor constants in the traced graph. + nonlocal out_spec + args, kwargs = pytree.tree_unflatten(flat_args, tensor_args_spec) + tree_out = fn(*args, **kwargs) + flat_out, spec = pytree.tree_flatten(tree_out) + for i in flat_out: + is_known_type = False + for j in KNOWN_TYPES: + if isinstance(i, j): + is_known_type = True + break + if not is_known_type: + raise RuntimeError( + f"Found {type(i)} in output, which is not a known type. " + "If this type holds tensors, you need to register a pytree for it. " + "See https://github.com/pytorch/functorch/issues/475 for a brief " + "explanation why. If you don't need to register a pytree, please " + "leave a comment explaining your use case and we'll make this more " + "ergonomic to deal with" + ) + out_spec.set(spec) + return flat_out + + # Can't use functools.wraps here because the wrapper has different + # calling convention + if hasattr(fn, "_orig_mod"): + flat_fn._orig_mod = fn._orig_mod # type: ignore[attr-defined] + + return flat_fn, out_spec + + +# This function takes in a tensor t, and returns one of t, t.view(), or t.clone(). +# When tracing the joint forward + backward, for any inputs in the graph that are mutated, +# we need to clone them first (and similarly for metadata-only mutations, we need to view them first). +# The idea is that when we trace the backward, we need to pass in the *original* primals +# to autograd.grad(), before they were mutated. +# Note: when we have synthetic base inputs, we need to clone them *before* creating views off of them. +# This means that "idx" here represents the index of the (potentially) synthetic base. +# What we need to do is: +# (1) map the current (post-synthetic-base calling convention) input argument index +# to int index pre-synthetic-base-calling-convention. +# (2) There could be multiple, if this index corresponds to a synthetic base +# that has multiple input aliases. +# (3) If any of those corresponding inputs get metadata mutations, then we clone the base. +def maybe_to_fresh_input(idx, t, meta): + if not isinstance(t, torch.Tensor): + return t + if idx in meta.mutated_inp_runtime_indices: + # We only need to bother cloning mutated inputs that participate in autograd. + mutated_inp_idx = meta.mutated_inp_runtime_indices.index(idx) + if meta.input_info[idx].requires_grad and meta.input_info[idx].mutates_data: + # Make sure the primal we pass to autograd.grad() + # sees the tensor before the mutation + return t.clone() + if meta.input_info[idx] and meta.input_info[idx].mutates_metadata: + # Make sure the primal we pass to autograd.grad() + # sees the tensor before the metadata mutation + return t.view(t.shape) + return t diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..969fd8e060db68050ac201a39ecdfedd80fd7e09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22809cfd5dc25792d77070c269fc8d111a12eed0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "23.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014 %s" % __author__ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e6c690b7303a5edc8c701130be098113af16a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b56bce9d01fa168cda60a74c13b7551bdbf918 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daedfd3f213453f1c0ffa38fc74d55669a619843 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..90a6465f9682c886363eea5327dac64bf623a6ff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/version.py b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5faab9bd0dcf28847960162b2b4f13a8a556ef20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_vendor/packaging/version.py @@ -0,0 +1,563 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import itertools +import re +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cbb1fb07ff885d5fc4d26667e5fb4a1670efb9e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__init__.py
@@ -0,0 +1,78 @@
+"""torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
+
+It registers custom reducers, that use shared memory to provide shared
+views on the same data in different processes. Once the tensor/storage is moved
+to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
+to send it to other processes without making any copies.
+
+The API is 100% compatible with the original module - it's enough to change
+``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
+tensors sent through the queues or shared via other mechanisms, moved to shared
+memory.
+
+Because of the similarity of APIs we do not document most of this package
+contents, and we recommend referring to very good docs of the original module.
+"""
+import multiprocessing
+import sys
+
+import torch
+from .reductions import init_reductions
+
+__all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
+
+
+from multiprocessing import *  # noqa: F403
+
+
+__all__ += multiprocessing.__all__  # noqa: PLE0605 type: ignore[attr-defined]
+
+
+# This call adds a Linux specific prctl(2) wrapper function to this module.
+# See https://github.com/pytorch/pytorch/pull/14391 for more information.
+torch._C._multiprocessing_init()
+
+
+"""Add helper function to spawn N processes and wait for completion of any of
+them. This depends `mp.get_context` which was added in Python 3.4."""
+from .spawn import (
+    ProcessContext,
+    ProcessExitedException,
+    ProcessRaisedException,
+    spawn,
+    SpawnContext,
+    start_processes,
+)
+
+
+if sys.platform == "darwin" or sys.platform == "win32":
+    _sharing_strategy = "file_system"
+    _all_sharing_strategies = {"file_system"}
+else:
+    _sharing_strategy = "file_descriptor"
+    _all_sharing_strategies = {"file_descriptor", "file_system"}
+
+
+def set_sharing_strategy(new_strategy):
+    """Set the strategy for sharing CPU tensors.
+
+    Args:
+        new_strategy (str): Name of the selected strategy. Should be one of
+            the values returned by :func:`get_all_sharing_strategies()`.
+    """
+    global _sharing_strategy
+    assert new_strategy in _all_sharing_strategies
+    _sharing_strategy = new_strategy
+
+
+def get_sharing_strategy():
+    """Return the current strategy for sharing CPU tensors."""
+    return _sharing_strategy
+
+
+def get_all_sharing_strategies():
+    """Return a set of sharing strategies supported on a current system."""
+    return _all_sharing_strategies
+
+
+init_reductions()
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ea670156879a7b730cf7aa498a162bbb0736285
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3070df6ba5782a20459cf223a604048002594e3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34a233a0cb07a76a790804a5c883d3ce52118b5e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08f03d4836bf1591202dc52c74bcc1078761b187
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79ac5238d2a4af06a59dcfb02964c44f5bfc55f4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a872d5c1442e2ee272b9ade569781b7591a0695
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py
new file mode 100644
index 0000000000000000000000000000000000000000..92a3280fee78b538230dfa63862c4681c1a5b186
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py
@@ -0,0 +1,33 @@
+import sys
+
+__all__ = ["register_after_fork"]
+
+if sys.platform == "win32":
+    import multiprocessing.util as _util
+
+    def _register(func):
+        def wrapper(arg):
+            func()
+
+        _util.register_after_fork(_register, wrapper)
+
+else:
+    import os
+
+    def _register(func):
+        os.register_at_fork(after_in_child=func)
+
+
+def register_after_fork(func):
+    """Register a callable to be executed in the child process after a fork.
+
+    Note:
+        In python < 3.7 this will only work with processes created using the
+        ``multiprocessing`` module. In python >= 3.7 it also works with
+        ``os.fork()``.
+
+    Args:
+        func (function): Function taking no arguments to be called in the child after fork
+
+    """
+    _register(func)
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/pool.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..6915203566469cfaf7170d87894ce03cc8348dd5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/pool.py
@@ -0,0 +1,52 @@
+import multiprocessing.pool
+import multiprocessing.util as util
+
+from .queue import SimpleQueue
+
+
+def clean_worker(*args, **kwargs):
+    import gc
+
+    multiprocessing.pool.worker(*args, **kwargs)
+    # Regular multiprocessing workers don't fully clean up after themselves,
+    # so we have to explicitly trigger garbage collection to make sure that all
+    # destructors are called...
+    gc.collect()
+
+
+class Pool(multiprocessing.pool.Pool):
+    """Pool implementation which uses our version of SimpleQueue.
+
+    This lets us pass tensors in shared memory across processes instead of
+    serializing the underlying data.
+    """
+
+    def _setup_queues(self):
+        self._inqueue = SimpleQueue()
+        self._outqueue = SimpleQueue()
+        self._quick_put = self._inqueue._writer.send
+        self._quick_get = self._outqueue._reader.recv
+
+    def _repopulate_pool(self):
+        """Increase the number of pool processes to the specified number.
+
+        Bring the number of pool processes up to the specified number, for use after
+        reaping workers which have exited.
+        """
+        for i in range(self._processes - len(self._pool)):
+            # changed worker -> clean_worker
+            args = (
+                self._inqueue,
+                self._outqueue,
+                self._initializer,
+                self._initargs,
+                self._maxtasksperchild,
+            )
+            if hasattr(self, "_wrap_exception"):
+                args += (self._wrap_exception,)
+            w = self.Process(target=clean_worker, args=args)
+            self._pool.append(w)
+            w.name = w.name.replace("Process", "PoolWorker")
+            w.daemon = True
+            w.start()
+            util.debug("added worker")
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/queue.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..99da145e75f1a9f6fb2467251948bc74361cbc02
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/queue.py
@@ -0,0 +1,42 @@
+import io
+import multiprocessing.queues
+import pickle
+from multiprocessing.reduction import ForkingPickler
+
+
+class ConnectionWrapper:
+    """Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization."""
+
+    def __init__(self, conn):
+        self.conn = conn
+
+    def send(self, obj):
+        buf = io.BytesIO()
+        ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
+        self.send_bytes(buf.getvalue())
+
+    def recv(self):
+        buf = self.recv_bytes()
+        return pickle.loads(buf)
+
+    def __getattr__(self, name):
+        if "conn" in self.__dict__:
+            return getattr(self.conn, name)
+        raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'")
+
+
+class Queue(multiprocessing.queues.Queue):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
+        self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
+        self._send = self._writer.send
+        self._recv = self._reader.recv
+
+
+class SimpleQueue(multiprocessing.queues.SimpleQueue):
+    def _make_methods(self):
+        if not isinstance(self._reader, ConnectionWrapper):
+            self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
+            self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
+        super()._make_methods()  # type: ignore[misc]
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/reductions.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/reductions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5eb0a6abd86f2d2036032aec894298862a322cf
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/reductions.py
@@ -0,0 +1,594 @@
+import multiprocessing
+import os
+import threading
+from multiprocessing.reduction import ForkingPickler
+from multiprocessing.util import register_after_fork
+from typing import Union
+
+import torch
+import torch.utils.hooks
+from torch._namedtensor_internals import check_serializing_named_tensor
+
+try:
+    # Early load resource_sharer to prevent a partially initialized instance
+    # from being inherited in a forked child process. The reduce_storage method
+    # requires this module indirectly through DupFd(). The built-in mp.Queue
+    # class pickles arguments in a background thread which may overlap with the
+    # fork.
+    import multiprocessing.resource_sharer
+except ImportError:
+    pass
+
+
+class StorageWeakRef:
+    r"""A weak reference to a Storage.
+
+    The cdata member is a Python number containing the integer representation of
+    the Storage pointer.
+    """
+
+    __slots__ = ["cdata", "_free_weak_ref"]
+
+    def __init__(self, storage):
+        self.cdata = storage._weak_ref()
+        # Save a direct reference to _free_weak_ref because the `torch` module
+        # might be cleared during Python shutdown before this module is cleared.
+        self._free_weak_ref = torch.Storage._free_weak_ref  # type: ignore[attr-defined]
+
+    @classmethod
+    def from_weakref(cls, cdata):
+        instance = cls.__new__(cls)
+        instance.cdata = cdata
+        instance._free_weak_ref = torch.Storage._free_weak_ref  # type: ignore[attr-defined]
+        return instance
+
+    def expired(self):
+        return torch.Storage._expired(self.cdata)  # type: ignore[attr-defined]
+
+    def __del__(self):
+        self._free_weak_ref(self.cdata)
+
+    def __hash__(self):
+        return self.cdata
+
+    def __eq__(self, other):
+        if id(self) == id(other):
+            return True
+        return self.cdata == other.cdata
+
+
+class SharedCache(dict):
+    """Dictionary from multiprocessing handles to StorageWeakRef."""
+
+    def __init__(self):
+        # free_dead_references() is called if the len exceeds the current
+        # limit. The limit scales with the number of remaining live objects.
+        self.limit = 128
+        # `fork` inherits lock state, so in case we fork when the lock is held,
+        # we register a function to reset the lock to a new object to avoid
+        # possible deadlocks, following python multiprocessing library design.
+        self._after_fork()
+        register_after_fork(self, SharedCache._after_fork)
+
+    def _after_fork(self):
+        self.lock = threading.Lock()
+
+    def get(self, key):
+        with self.lock:
+            return dict.get(self, key)
+
+    def __setitem__(self, key, storage_ref):
+        with self.lock:
+            dict.__setitem__(self, key, storage_ref)
+            if len(self) > self.limit:
+                self.free_dead_references()
+
+    def free_dead_references(self):
+        live = 0
+        for key, storage_ref in list(self.items()):
+            if storage_ref.expired():
+                del self[key]
+            else:
+                live += 1
+        self.limit = max(128, live * 2)
+
+
+# mapping from handles to StorageWeakRef objects
+shared_cache = SharedCache()
+
+
+def rebuild_event(device, handle):
+    return torch.cuda.Event.from_ipc_handle(device, handle)
+
+
+def reduce_event(event):
+    handle = event.ipc_handle()
+    return (rebuild_event, (event.device, handle))
+
+
+def rebuild_tensor(cls, storage, metadata):
+    storage_offset, size, stride, requires_grad = metadata
+    t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
+    if cls == torch.nn.parameter.Parameter:
+        # we have to pass requires_grad into constructor, rather than set it as an
+        # attribute later, because it's an important check for Integer Tensors to
+        # have requires_grad=False (or else they raise an error)
+        t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
+    else:
+        t.requires_grad = requires_grad
+    return t
+
+
+def rebuild_cuda_tensor(
+    tensor_cls,
+    tensor_size,
+    tensor_stride,
+    tensor_offset,
+    storage_cls,
+    dtype,
+    storage_device,
+    storage_handle,
+    storage_size_bytes,
+    storage_offset_bytes,
+    requires_grad,
+    ref_counter_handle,
+    ref_counter_offset,
+    event_handle,
+    event_sync_required,
+):
+    # If storage_handle is None, storage points to nullptr.
+    if storage_handle is None or storage_size_bytes == 0:
+        storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
+    else:
+        storage = storage_from_cache(
+            storage_cls, (storage_handle, storage_offset_bytes)
+        )
+        if storage is None:
+            torch.cuda._lazy_init()
+            storage = storage_cls._new_shared_cuda(
+                storage_device,
+                storage_handle,
+                storage_size_bytes,
+                storage_offset_bytes,
+                ref_counter_handle,
+                ref_counter_offset,
+                event_handle,
+                event_sync_required,
+            )
+            shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
+                storage
+            )
+        else:
+            # We already ref counting this Storage, but producer needs new ref-counters to be released.
+            storage_cls._release_ipc_counter(
+                ref_counter_handle, ref_counter_offset, device=storage_device
+            )
+
+    _storage = (
+        storage
+        if isinstance(storage, torch.UntypedStorage)
+        else storage._untyped_storage
+    )
+
+    t = torch._utils._rebuild_tensor(
+        torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
+        tensor_offset,
+        tensor_size,
+        tensor_stride,
+    )
+
+    if tensor_cls == torch.nn.parameter.Parameter:
+        # It is crucial for integer tensors to receive
+        # the requires_grad=False as an argument in the constructor
+        t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
+    else:
+        t.requires_grad = requires_grad
+
+    return t
+
+
+def reduce_tensor(tensor):
+    if tensor.requires_grad and not tensor.is_leaf:
+        raise RuntimeError(
+            "Cowardly refusing to serialize non-leaf tensor which requires_grad, "
+            "since autograd does not support crossing process boundaries.  "
+            "If you just want to transfer the data, call detach() on the tensor "
+            "before serializing (e.g., putting it on the queue)."
+        )
+
+    check_serializing_named_tensor(tensor)
+    torch.utils.hooks.warn_if_has_hooks(tensor)
+
+    # Note [CUDA IPC and the caching allocator]
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    # When you send a CUDA tensor over IPC, you might expect that you will
+    # get out the same storage from the other end.  However, the CUDA caching
+    # allocator makes it difficult to preserve this invariant.  Consider
+    # the following situation: a tensor of size 0x100 points to offset 0x20 of
+    # a storage at 0xA100 of size 0x100.  (For simplicity, all of these
+    # sizes are given in bytes).  HOWEVER, with the caching allocator, this storage
+    # might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
+    #
+    # When we want to send this CUDA tensor over IPC, we must send the
+    # *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
+    # the storage 0xA100 (because that is what CUDA supports).  So, on the
+    # other end, there simply isn't any way to say, "Wait, you gave me
+    # a bigger region (0xA000) than the one I wanted (0xA100)".
+    #
+    # OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
+    # one storage itself? No, because this cudaMalloc allocation might contain
+    # storages of mixed types: float, bytes, double... If you make the entire
+    # allocation a single storage of a type A, we'll hit an error when constructing
+    # a tensor of type B on the storage.
+    #
+    # cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
+    # receiver side. However, cudaIpcMemHandles from each device in a given process may
+    # only be opened by one context per device per other process.
+    # If we open and close a memory handle multiples times in a process, CUDA is allowed
+    # to give it a different address; similarly, once we close the memory, we're not
+    # allowed to access it(and the storage/tensor built on top of it), even if it is
+    # still live in the original process. As we cannot make a cudaMalloc allocation
+    # to a single storage in one go, this requires us to cache the device pointer for
+    # each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
+    # the old ones alives.
+    # See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
+    #
+    # This is fine, because all we need to do is to save our position in the allocation,
+    # and reconstruct storage and tensor from it.
+    # 0xA000 ->  -------CUDA Allocation------
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    # 0xA100 ->  --------storage1 begin------
+    #           |                            |
+    # 0xA120 ->  --------tensor1 begin ------
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    # 0xA160 ->  --------tensor1 end---------
+    #           |                            |
+    #           |                            |
+    #           |                            |
+    # 0xA200 ->  --------storage1 end--------
+    #           |                            |
+    # 0xE000 ->  --------CUDA allocation-----
+    #
+    # To send tensor1, the following info are required from sender to receiver for
+    # storage recontruction.
+    #   1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
+    #      basePtr may not be exactly 0xA000 since it's a different process.
+    #   2. offset(0xA100) of storage1 in the CUDA allocation.
+    #   3. size of storage1(0x100).
+    #
+    # On receiver side:
+    #   1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
+    #      of the same type using (basePtr, offset, size).
+    #   2. we can reconstruct the tensor on top of the reconstructed storage
+    #   Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
+    #
+    # This strategy has a few implications:
+    #
+    # 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
+    #    go (non-compositionally), and this requires to have a global map
+    #    memHandle -> devPtr for each process.
+    #
+    # 2. We MUST NOT let the new IPC tensor be resizable.  Originally, a resize
+    #    of the storage beyond 0x100 would merely have caused us to do a
+    #    reallocation.  You don't really want to do this, but if you did,
+    #    all that would happen is that you would lose IPC sharing.  But if
+    #    you do this in the new world, we will happily let you write out of
+    #    bounds of your "allocation", clobbering unrelated data in the cached
+    #    allocator block.  BAD!
+    #
+    # By the way, in old versions of PyTorch, we supported this situation
+    # natively using a "storage view", which permitted multiple storages to be
+    # views on each other.  But this was the *only* use of storage views, so we
+    # eliminated it so that we could just use tensor views to implement the same
+    # thing.
+    #
+
+    # TODO: Handle distinguishing between subclass and non-subclass versions of NT better
+    # https://github.com/pytorch/pytorch/issues/110543
+    from torch.nested._internal.nested_tensor import NestedTensor
+
+    if tensor.is_nested and not isinstance(tensor, NestedTensor):
+        return reduce_nested_tensor(tensor)
+
+    if tensor.layout in {
+        torch.sparse_coo,
+        torch.sparse_csr,
+        torch.sparse_bsr,
+        torch.sparse_csc,
+        torch.sparse_bsc,
+    }:
+        return reduce_sparse_tensor(tensor)
+
+    storage = tensor._typed_storage()
+
+    if storage._untyped_storage.device.type == "cuda":
+        (
+            device,
+            handle,
+            storage_size_bytes,
+            storage_offset_bytes,
+            ref_counter_handle,
+            ref_counter_offset,
+            event_handle,
+            event_sync_required,
+        ) = storage._share_cuda_()
+        tensor_offset = tensor.storage_offset()
+        shared_cache[handle] = StorageWeakRef(storage)
+        # _backward_hooks purposely omitted here, see
+        # Note [Don't serialize hooks]
+        return (
+            rebuild_cuda_tensor,
+            (
+                type(tensor),
+                tensor.size(),
+                tensor.stride(),
+                tensor_offset,  # tensor offset in its storage
+                type(storage),
+                tensor.dtype,
+                device,
+                handle,  # identifier which CUDA allocation is the storage in.
+                storage_size_bytes,  # size(in bytes) of the storage
+                storage_offset_bytes,  # offset(in bytes) of the storage in the CUDA allocation
+                tensor.requires_grad,
+                ref_counter_handle,
+                ref_counter_offset,
+                event_handle,
+                event_sync_required,
+            ),
+        )
+
+    # _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
+    metadata = (
+        tensor.storage_offset(),
+        tensor.size(),
+        tensor.stride(),
+        tensor.requires_grad,
+    )
+    return (rebuild_tensor, (type(tensor), storage, metadata))
+
+
+def rebuild_nested_tensor(
+    rebuild_buffer_func,
+    rebuild_buffer_args,
+    rebuild_sizes_func,
+    rebuild_sizes_args,
+    rebuild_strides_func,
+    rebuild_strides_args,
+    rebuild_offsets_func,
+    rebuild_offsets_args,
+):
+    buffer = rebuild_buffer_func(*rebuild_buffer_args)
+    sizes = rebuild_sizes_func(*rebuild_sizes_args)
+    strides = rebuild_strides_func(*rebuild_strides_args)
+    offsets = rebuild_offsets_func(*rebuild_offsets_args)
+    return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
+
+
+def reduce_nested_tensor(nt):
+    rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
+    rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
+    rebuild_strides_func, rebuild_strides_args = reduce_tensor(
+        nt._nested_tensor_strides()
+    )
+    rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
+        nt._nested_tensor_storage_offsets()
+    )
+
+    return (
+        rebuild_nested_tensor,
+        (
+            rebuild_buffer_func,
+            rebuild_buffer_args,
+            rebuild_sizes_func,
+            rebuild_sizes_args,
+            rebuild_strides_func,
+            rebuild_strides_args,
+            rebuild_offsets_func,
+            rebuild_offsets_args,
+        ),
+    )
+
+
+def rebuild_sparse_coo_tensor(
+    rebuild_indices_func,
+    rebuild_indices_args,
+    rebuild_values_func,
+    rebuild_values_args,
+    shape,
+    is_coalesced,
+):
+    indices = rebuild_indices_func(*rebuild_indices_args)
+    values = rebuild_values_func(*rebuild_values_args)
+    return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
+
+
+def rebuild_sparse_compressed_tensor(
+    rebuild_compressed_indices_func,
+    rebuild_compressed_indices_args,
+    rebuild_plain_indices_func,
+    rebuild_plain_indices_args,
+    rebuild_values_func,
+    rebuild_values_args,
+    shape,
+    layout,
+):
+    compressed_indices = rebuild_compressed_indices_func(
+        *rebuild_compressed_indices_args
+    )
+    plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
+    values = rebuild_values_func(*rebuild_values_args)
+    return torch.sparse_compressed_tensor(
+        compressed_indices, plain_indices, values, shape, layout=layout
+    )
+
+
+def reduce_sparse_tensor(sparse):
+    if sparse.layout is torch.sparse_coo:
+        rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
+        rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
+        return (
+            rebuild_sparse_coo_tensor,
+            (
+                rebuild_indices_func,
+                rebuild_indices_args,
+                rebuild_values_func,
+                rebuild_values_args,
+                sparse.shape,
+                sparse.is_coalesced(),
+            ),
+        )
+    else:
+        if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
+            compressed_indices = sparse.crow_indices()
+            plain_indices = sparse.col_indices()
+        elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
+            compressed_indices = sparse.ccol_indices()
+            plain_indices = sparse.row_indices()
+        else:
+            raise NotImplementedError(sparse.layout)
+        (
+            rebuild_compressed_indices_func,
+            rebuild_compressed_indices_args,
+        ) = reduce_tensor(compressed_indices)
+        rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
+            plain_indices
+        )
+        rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
+        return (
+            rebuild_sparse_compressed_tensor,
+            (
+                rebuild_compressed_indices_func,
+                rebuild_compressed_indices_args,
+                rebuild_plain_indices_func,
+                rebuild_plain_indices_args,
+                rebuild_values_func,
+                rebuild_values_args,
+                sparse.shape,
+                sparse.layout,
+            ),
+        )
+
+
+def fd_id(fd):
+    # Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
+    # this doesn't work with shared memory handles, which is why we don't
+    # support the "file_descriptor" sharing method on that platform.
+    stat = os.fstat(fd)
+    return (stat.st_ino, stat.st_dev)
+
+
+def storage_from_cache(cls, key):
+    storage_ref = shared_cache.get(key)
+    if storage_ref is None:
+        return None
+    return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
+
+
+def rebuild_storage_fd(cls, df, size):
+    fd = df.detach()
+    try:
+        storage = storage_from_cache(cls, fd_id(fd))
+        if storage is not None:
+            return storage
+        storage = cls._new_shared_fd_cpu(fd, size)
+        shared_cache[fd_id(fd)] = StorageWeakRef(storage)
+        return storage
+    finally:
+        os.close(fd)
+
+
+def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
+    storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
+        cls, handle
+    )
+    if storage is not None:
+        return storage._shared_decref()
+    if dtype is None:
+        storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
+    else:
+        byte_size = size * torch._utils._element_size(dtype)
+        untyped_storage: torch.UntypedStorage = (
+            torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
+        )
+        storage = torch.TypedStorage(
+            wrap_storage=untyped_storage, dtype=dtype, _internal=True
+        )
+    shared_cache[handle] = StorageWeakRef(storage)
+    return storage._shared_decref()
+
+
+def rebuild_storage_empty(cls):
+    return cls()
+
+
+def rebuild_typed_storage(storage, dtype):
+    return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
+
+
+# Use for torch.storage.TypedStorage
+def reduce_typed_storage(storage):
+    return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
+
+
+def rebuild_typed_storage_child(storage, storage_type):
+    return storage_type(wrap_storage=storage, _internal=True)
+
+
+# Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
+def reduce_typed_storage_child(storage):
+    return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
+
+
+def reduce_storage(storage):
+    from . import get_sharing_strategy
+
+    if storage.is_cuda:
+        raise RuntimeError(
+            "Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
+        )
+    elif get_sharing_strategy() == "file_system":
+        metadata = storage._share_filename_cpu_()
+        cache_key = metadata[1]
+        rebuild = rebuild_storage_filename
+        if isinstance(storage, torch.TypedStorage):
+            metadata += (storage.dtype,)
+        storage._shared_incref()
+    elif storage.size() == 0:
+        # This is special cased because Empty tensors
+        # (with size 0) cannot be mmapped.
+        return (rebuild_storage_empty, (type(storage),))
+    else:
+        fd, size = storage._share_fd_cpu_()
+        df = multiprocessing.reduction.DupFd(fd)
+        cache_key = fd_id(fd)
+        metadata = (df, size)
+        rebuild = rebuild_storage_fd  # type: ignore[assignment]
+
+    shared_cache[cache_key] = StorageWeakRef(storage)
+    return (rebuild, (type(storage),) + metadata)
+
+
+def init_reductions():
+    ForkingPickler.register(torch.cuda.Event, reduce_event)
+
+    for t in torch._storage_classes:
+        if t.__name__ == "UntypedStorage":
+            ForkingPickler.register(t, reduce_storage)
+        else:
+            ForkingPickler.register(t, reduce_typed_storage_child)
+
+    ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
+
+    for t in torch._tensor_classes:
+        ForkingPickler.register(t, reduce_tensor)
+
+    # TODO: Maybe this should be in tensor_classes? :)
+    ForkingPickler.register(torch.Tensor, reduce_tensor)
+    ForkingPickler.register(torch.nn.parameter.Parameter, reduce_tensor)
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/spawn.py b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/spawn.py
new file mode 100644
index 0000000000000000000000000000000000000000..fed869c9ae26469b03b48ca9d9de260312501c1d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/multiprocessing/spawn.py
@@ -0,0 +1,281 @@
+import logging
+import multiprocessing
+import multiprocessing.connection
+import os
+import pickle
+import signal
+import sys
+import tempfile
+import time
+import warnings
+from typing import Optional
+
+from . import _prctl_pr_set_pdeathsig  # type: ignore[attr-defined]
+
+log = logging.getLogger(__name__)
+
+
+class ProcessException(Exception):
+    __slots__ = ["error_index", "error_pid"]
+
+    def __init__(self, msg: str, error_index: int, pid: int):
+        super().__init__(msg)
+        self.msg = msg
+        self.error_index = error_index
+        self.pid = pid
+
+    def __reduce__(self):
+        return type(self), (self.msg, self.error_index, self.pid)
+
+
+class ProcessRaisedException(ProcessException):
+    """Exception raised when a process failed due to an exception raised by the code."""
+
+    def __init__(
+        self,
+        msg: str,
+        error_index: int,
+        error_pid: int,
+    ):
+        super().__init__(msg, error_index, error_pid)
+
+
+class ProcessExitedException(ProcessException):
+    """Exception raised when a process failed due to signal or exited with a specific code."""
+
+    __slots__ = ["exit_code"]
+
+    def __init__(
+        self,
+        msg: str,
+        error_index: int,
+        error_pid: int,
+        exit_code: int,
+        signal_name: Optional[str] = None,
+    ):
+        super().__init__(msg, error_index, error_pid)
+        self.exit_code = exit_code
+        self.signal_name = signal_name
+
+    def __reduce__(self):
+        return (
+            type(self),
+            (self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
+        )
+
+
+def _wrap(fn, i, args, error_file):
+    # prctl(2) is a Linux specific system call.
+    # On other systems the following function call has no effect.
+    # This is set to ensure that non-daemonic child processes can
+    # terminate if their parent terminates before they do.
+    _prctl_pr_set_pdeathsig(signal.SIGINT)
+
+    try:
+        fn(i, *args)
+    except KeyboardInterrupt:
+        pass  # SIGINT; Killed by parent, do nothing
+    except Exception:
+        # Propagate exception to parent process, keeping original traceback
+        import traceback
+
+        with open(error_file, "wb") as fh:
+            pickle.dump(traceback.format_exc(), fh)
+        sys.exit(1)
+
+
+class ProcessContext:
+    def __init__(self, processes, error_files):
+        self.error_files = error_files
+        self.processes = processes
+        self.sentinels = {
+            process.sentinel: index for index, process in enumerate(processes)
+        }
+
+    def pids(self):
+        return [int(process.pid) for process in self.processes]
+
+    def join(self, timeout=None):
+        r"""Join one or more processes within spawn context.
+
+        Attempt to join one or more processes in this spawn context.
+        If one of them exited with a non-zero exit status, this function
+        kills the remaining processes and raises an exception with the cause
+        of the first process exiting.
+
+        Returns ``True`` if all processes have been joined successfully,
+        ``False`` if there are more processes that need to be joined.
+
+        Args:
+            timeout (float): Wait this long before giving up on waiting.
+        """
+        # Ensure this function can be called even when we're done.
+        if len(self.sentinels) == 0:
+            return True
+
+        # Wait for any process to fail or all of them to succeed.
+        ready = multiprocessing.connection.wait(
+            self.sentinels.keys(),
+            timeout=timeout,
+        )
+
+        error_index = None
+        for sentinel in ready:
+            index = self.sentinels.pop(sentinel)
+            process = self.processes[index]
+            process.join()
+            if process.exitcode != 0:
+                error_index = index
+                break
+
+        # Return if there was no error.
+        if error_index is None:
+            # Return whether or not all processes have been joined.
+            return len(self.sentinels) == 0
+
+        # Assume failure. Terminate processes that are still alive.
+        # Try SIGTERM then SIGKILL if the process isn't going down.
+        # The reason is related to python signal handling is limited
+        # to main thread and if that is in c/c++ land and stuck it won't
+        # to handle it. We have seen processes getting stuck not handling
+        # SIGTERM for the above reason.
+        timeout: int = 30
+        for process in self.processes:
+            if process.is_alive():
+                log.warning("Terminating process %s via signal SIGTERM", process.pid)
+                process.terminate()
+        end = time.monotonic() + timeout
+        for process in self.processes:
+            time_to_wait = max(0, end - time.monotonic())
+            process.join(time_to_wait)
+        for process in self.processes:
+            if process.is_alive():
+                log.warning(
+                    "Unable to shutdown process %s via SIGTERM , forcefully exiting via SIGKILL",
+                    process.pid,
+                )
+                process.kill()
+            process.join()
+
+        # The file will only be created if the process crashed.
+        failed_process = self.processes[error_index]
+        if not os.access(self.error_files[error_index], os.R_OK):
+            exitcode = self.processes[error_index].exitcode
+            if exitcode < 0:
+                try:
+                    name = signal.Signals(-exitcode).name
+                except ValueError:
+                    name = f""
+                raise ProcessExitedException(
+                    "process %d terminated with signal %s" % (error_index, name),
+                    error_index=error_index,
+                    error_pid=failed_process.pid,
+                    exit_code=exitcode,
+                    signal_name=name,
+                )
+            else:
+                raise ProcessExitedException(
+                    "process %d terminated with exit code %d" % (error_index, exitcode),
+                    error_index=error_index,
+                    error_pid=failed_process.pid,
+                    exit_code=exitcode,
+                )
+
+        with open(self.error_files[error_index], "rb") as fh:
+            original_trace = pickle.load(fh)
+        msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
+        msg += original_trace
+        raise ProcessRaisedException(msg, error_index, failed_process.pid)
+
+
+class SpawnContext(ProcessContext):
+    def __init__(self, processes, error_files):
+        warnings.warn("SpawnContext is renamed to ProcessContext since 1.4 release.")
+        super().__init__(processes, error_files)
+
+
+# Note: [start_processes]
+# mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
+# more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
+# CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
+# works better than 'spawn'. Every helper function we created for mp.spawn is indeed
+# general enough, and backends like XLA can reuse them in Colab notebooks as well.
+# Currently we only add this API first, we can consider adding it to documentation as
+# needed in the future.
+def start_processes(
+    fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"
+):
+    mp = multiprocessing.get_context(start_method)
+    error_files = []
+    processes = []
+    for i in range(nprocs):
+        # Each process is assigned a file to write tracebacks to.  We
+        # use the file being non-empty to indicate an exception
+        # occurred (vs an expected shutdown).  Note: this previously
+        # used a multiprocessing.Queue but that can be prone to
+        # deadlocks, so we went with a simpler solution for a one-shot
+        # message between processes.
+        tf = tempfile.NamedTemporaryFile(
+            prefix="pytorch-errorfile-", suffix=".pickle", delete=False
+        )
+        tf.close()
+        os.unlink(tf.name)
+        process = mp.Process(
+            target=_wrap,
+            args=(fn, i, args, tf.name),
+            daemon=daemon,
+        )
+        process.start()
+        error_files.append(tf.name)
+        processes.append(process)
+
+    context = ProcessContext(processes, error_files)
+    if not join:
+        return context
+
+    # Loop on join until it returns True or raises an exception.
+    while not context.join():
+        pass
+
+
+def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
+    r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
+
+    If one of the processes exits with a non-zero exit status, the
+    remaining processes are killed and an exception is raised with the
+    cause of termination. In the case an exception was caught in the
+    child process, it is forwarded and its traceback is included in
+    the exception raised in the parent process.
+
+    Args:
+        fn (function): Function is called as the entrypoint of the
+            spawned process. This function must be defined at the top
+            level of a module so it can be pickled and spawned. This
+            is a requirement imposed by multiprocessing.
+
+            The function is called as ``fn(i, *args)``, where ``i`` is
+            the process index and ``args`` is the passed through tuple
+            of arguments.
+
+        args (tuple): Arguments passed to ``fn``.
+        nprocs (int): Number of processes to spawn.
+        join (bool): Perform a blocking join on all processes.
+        daemon (bool): The spawned processes' daemon flag. If set to True,
+                       daemonic processes will be created.
+        start_method (str): (deprecated) this method will always use ``spawn``
+                               as the start method. To use a different start method
+                               use ``start_processes()``.
+
+    Returns:
+        None if ``join`` is ``True``,
+        :class:`~ProcessContext` if ``join`` is ``False``
+
+    """
+    if start_method != "spawn":
+        msg = (
+            "This method only supports start_method=spawn (got: %s).\n"
+            "To use a different start_method use:\n\t\t"
+            " torch.multiprocessing.start_processes(...)" % start_method
+        )
+        warnings.warn(msg)
+    return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccdad48eca97dccf5c5930a86ec09c58d1a4ce00
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/__init__.py
@@ -0,0 +1,68 @@
+import os.path as _osp
+import torch
+
+from .throughput_benchmark import ThroughputBenchmark
+from .cpp_backtrace import get_cpp_backtrace
+from .backend_registration import rename_privateuse1_backend, generate_methods_for_privateuse1_backend
+from . import deterministic
+from . import collect_env
+import weakref
+import copyreg
+
+def set_module(obj, mod):
+    """
+    Set the module attribute on a python object for a given object for nicer printing
+    """
+    if not isinstance(mod, str):
+        raise TypeError("The mod argument should be a string")
+    obj.__module__ = mod
+
+if torch._running_with_deploy():
+    # not valid inside torch_deploy interpreter, no paths exists for frozen modules
+    cmake_prefix_path = None
+else:
+    cmake_prefix_path = _osp.join(_osp.dirname(_osp.dirname(__file__)), 'share', 'cmake')
+
+def swap_tensors(t1, t2):
+    """
+    This function swaps the content of the two Tensor objects.
+    At a high level, this will make t1 have the content of t2 while preserving
+    its identity.
+
+    This will not work if t1 and t2 have different slots.
+    """
+    # Ensure there are no weakrefs
+    if weakref.getweakrefs(t1):
+        raise RuntimeError("Cannot swap t1 because it has weakref associated with it")
+    if weakref.getweakrefs(t2):
+        raise RuntimeError("Cannot swap t2 because it has weakref associated with it")
+    t1_slots = set(copyreg._slotnames(t1.__class__))  # type: ignore[attr-defined]
+    t2_slots = set(copyreg._slotnames(t2.__class__))  # type: ignore[attr-defined]
+    if t1_slots != t2_slots:
+        raise RuntimeError("Cannot swap t1 and t2 if they have different slots")
+
+    def swap_attr(name):
+        tmp = getattr(t1, name)
+        setattr(t1, name, (getattr(t2, name)))
+        setattr(t2, name, tmp)
+
+    # Swap the types
+    # Note that this will fail if there are mismatched slots
+    swap_attr("__class__")
+
+    # Swap the dynamic attributes
+    swap_attr("__dict__")
+
+    # Swap the slots
+    for slot in t1_slots:
+        if hasattr(t1, slot) and hasattr(t2, slot):
+            swap_attr(slot)
+        elif hasattr(t1, slot):
+            setattr(t2, slot, (getattr(t1, slot)))
+            delattr(t1, slot)
+        elif hasattr(t2, slot):
+            setattr(t1, slot, (getattr(t2, slot)))
+            delattr(t2, slot)
+
+    # Swap the at::Tensor they point to
+    torch._C._swap_tensor_impl(t1, t2)
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_content_store.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_content_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..f36837ed674e9e21511fb6a22834cab0ca1a0602
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_content_store.py
@@ -0,0 +1,238 @@
+# This module provides a FAST (on GPU) content addressable store for storages
+# (and tensors on top of them) with VERY WEAK portability guarantees (e.g.,
+# don't expect CPU/CUDA to address to the same hash, don't expect it to be
+# portable across devices) that is NOT cryptographically secure.  In return,
+# we are able to hash 40G of tensor data on GPU in less than a second,
+# compared to running SHA-1 in CPU which would a minute or so.  The primary
+# use case is for efficiently snapshotting intermediate tensor data for
+# offline debugging, but it's been put in this module in case you think of
+# another use case for it.  The hash function could be replaced with a
+# straight reimplementation of SHA-1, which would give us much stronger
+# portability guarantees.
+#
+# WARNING: THERE IS NO BC/FC GUARANTEE FOR THIS FORMAT!  If you need to format
+# shift the result, consider packing it into a single torch.save object
+# with traditional view sharing.
+#
+# Because of the weak portability guarantees, you can only write to the
+# content store from a single process; we don't provide any capability
+# of "reopening" a content store to add more things to it.  But we don't
+# assume that you can keep all of the tensors you want to add to the store
+# in memory at once, because you probably can't!  Nor do we assume that
+# you know a priori whether or not two storages can be deduplicated or not.
+#
+# Note: only storages are content-addressed; tensors are name addressed
+#
+# Note: our padding strategy means that [1, 0] and [1] int16 tensors would
+# map to the same (padded) storage.  We think this will be immaterial for most
+# users.
+
+import ctypes
+import functools
+import hashlib
+import os.path
+import struct
+from collections import defaultdict
+from typing import Dict, Optional, Set
+
+import torch
+import torch._prims as prims
+import torch._utils
+import torch.nn.functional as F
+from torch._C import default_generator
+
+from torch.multiprocessing.reductions import StorageWeakRef
+
+
+def lazy_compile(**compile_kwargs):
+    """Lazily wrap a function with torch.compile on the first call
+
+    This avoids eagerly importing dynamo.
+    """
+
+    def decorate_fn(fn):
+        @functools.wraps(fn)
+        def compile_hook(*args, **kwargs):
+            compiled_fn = torch.compile(fn, **compile_kwargs)
+            globals()[fn.__name__] = functools.wraps(fn)(compiled_fn)
+            return compiled_fn(*args, **kwargs)
+
+        return compile_hook
+
+    return decorate_fn
+
+
+# Use of torch.compile is mandatory for (1) good memory usage
+# and (2) xor_sum implementation.  This is our first instance of
+# using PT2 to implement a kernel in PyTorch; if we get AOT capabilities
+# it would be good to apply it here.
+@lazy_compile(dynamic=True)
+def hash_storage_kernel(x):
+    # The randint calls are carefully written to hit things we
+    # have lowerings for in inductor.  Lack of unsigned 32-bit integer
+    # is a pain.
+    a = torch.randint(
+        -(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32
+    ).abs()
+    a = ((a % (2**31 - 1)) + 1).long()
+    b = (
+        torch.randint(-(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32)
+        .abs()
+        .long()
+    )
+    # This is a standard shift-multiply universal hash family
+    # plus xor sum hash, using Philox to generate random numbers.
+    # Our Philox RNG is not deterministic across devices so
+    # don't use this for stable hashing.
+    #
+    # This assumes fixed length so you're also obligated to bucket
+    # by the length of tensor as well
+    return prims.xor_sum((a * x + b).int(), [0])
+
+
+# Returns a hex digest of the data in the storage.  Guaranteed to be
+# SHA-1 if stable_hash=True, otherwise it will consistent for a single
+# process run but not necessarily across processes.
+def hash_storage(storage: torch.UntypedStorage, *, stable_hash: bool = False) -> str:
+    import torch._dynamo
+    from torch._dynamo.utils import is_compile_supported
+
+    device_type = storage.device.type
+    if stable_hash or not is_compile_supported(device_type):
+        cpu_storage = storage.cpu()
+        # TODO: make storage support buffer protocol so this isn't
+        # necessary
+        buf = (ctypes.c_byte * cpu_storage.nbytes()).from_address(
+            cpu_storage.data_ptr()
+        )
+        sha1 = hashlib.sha1()
+        sha1.update(buf)
+        return sha1.hexdigest()
+
+    # TODO: factor this into a random utility
+    if device_type == "cpu":
+        generator = default_generator
+    elif device_type == "cuda":
+        import torch.cuda
+
+        generator = torch.cuda.default_generators[storage.device.index]
+    else:
+        raise AssertionError(f"unhandled device type {device_type}")
+    state = generator.get_state()
+    try:
+        generator.manual_seed(0)
+        x = torch.empty(0, dtype=torch.uint8, device=storage.device).set_(storage)  # type: ignore[call-overload]
+        # The dtype-casting view cannot be compiled, and so the
+        # padding/reshaping also needs to be done externally even
+        # though it could be profitably fused
+        pad = -x.numel() % 4
+        if pad > 0:
+            x = F.pad(x, (0, pad), "constant", 0)
+        x = x.view(torch.int32)
+        # We run the 32-bit hash five times with differing parameters to
+        # reduce chance of collision
+        ITER = 5
+        cs = [hash_storage_kernel(x).item() for _ in range(ITER)]
+        return struct.pack(">" + "i" * ITER, *cs).hex()
+    finally:
+        generator.set_state(state)
+
+
+class ContentStoreWriter:
+    # Structure:
+    #   storages/
+    #     00/
+    #       0000..00
+    #   tensors/
+    #     name
+    def __init__(self, loc: str, stable_hash: bool = False) -> None:
+        self.loc: str = loc
+        self.seen_storage_hashes: Set[str] = set()
+        self.stable_hash = stable_hash
+
+    # TODO: offer some sort of non-blocking API to speed things up
+    def write_storage(self, storage: torch.UntypedStorage) -> str:
+        h = hash_storage(storage, stable_hash=self.stable_hash)
+        if h in self.seen_storage_hashes:
+            return h
+        # TODO: consider not using torch.save for this; we don't actually
+        # need any metadata for the storage
+        subfolder = os.path.join(self.loc, "storages")
+        os.makedirs(subfolder, exist_ok=True)
+        target = os.path.join(subfolder, h)
+        if os.path.exists(target):
+            return h
+        torch.save(storage, target)
+        self.seen_storage_hashes.add(h)
+        return h
+
+    def compute_tensor_metadata(self, t: torch.Tensor, h=None):
+        if h is None:
+            h = hash_storage(t.untyped_storage(), stable_hash=self.stable_hash)
+        return (
+            t.dtype,
+            h,
+            t.storage_offset(),
+            tuple(t.shape),
+            t.stride(),
+            torch._utils.get_tensor_metadata(t),
+        )
+
+    def write_tensor(self, name: str, t: torch.Tensor) -> None:
+        storage = t.untyped_storage()
+        h = self.write_storage(storage)
+        # TODO: Support more advanced snapshotting of requires_grad/grad/etc
+        d, f = os.path.split(name)
+        payload = self.compute_tensor_metadata(t, h=h)
+        subfolder = os.path.join(self.loc, "tensors", d)
+        os.makedirs(subfolder, exist_ok=True)
+        torch.save(payload, os.path.join(subfolder, f))
+
+
+class ContentStoreReader:
+    def __init__(self, loc: str, *, cache=True) -> None:
+        self.loc = loc
+        self.storage_cache: Optional[
+            Dict[Optional[torch.device], Dict[str, StorageWeakRef]]
+        ] = None
+        if cache:
+            self.storage_cache = defaultdict(dict)
+
+    def read_storage(self, h: str, *, device=None) -> torch.UntypedStorage:
+        if device is not None:
+            device = torch.device(device)
+        ws = (
+            self.storage_cache[device].get(h)
+            if self.storage_cache is not None
+            else None
+        )
+        s: Optional[torch.UntypedStorage]
+        if ws is not None:
+            s = torch.UntypedStorage._new_with_weak_ptr(ws.cdata)
+            if s is not None:
+                return s
+        s = torch.load(
+            os.path.join(self.loc, "storages", h),
+            weights_only=True,
+            map_location=device,
+        )._untyped_storage
+        assert s is not None
+        if self.storage_cache is not None:
+            self.storage_cache[device][h] = StorageWeakRef(s)
+        return s
+
+    def read_tensor_metadata(self, name: str):
+        fn = os.path.join(self.loc, "tensors", name)
+        if not os.path.exists(fn):
+            raise FileNotFoundError(fn)
+        return torch.load(fn, weights_only=True)
+
+    def read_tensor(self, name: str, *, device=None) -> torch.Tensor:
+        dtype, h, storage_offset, size, stride, metadata = self.read_tensor_metadata(
+            name
+        )
+        storage = self.read_storage(h, device=device)
+        t = torch.tensor([], dtype=dtype, device=storage.device)
+        t.set_(storage, storage_offset, size, stride)
+        torch._utils.set_tensor_metadata(t, metadata)
+        return t
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_device.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_device.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4909e54c267c8daac6dd37c52196c9870178140
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_device.py
@@ -0,0 +1,91 @@
+from typing import Optional
+import torch
+from torch.overrides import TorchFunctionMode
+from torch.utils._contextlib import context_decorator
+import functools
+
+CURRENT_DEVICE: Optional[torch.device] = None
+
+@functools.lru_cache(1)
+def _device_constructors():
+    return {
+        # standard ones
+        torch.empty,
+        torch.empty_permuted,
+        torch.empty_strided,
+        torch.empty_quantized,
+        torch.ones,
+        torch.arange,
+        torch.bartlett_window,
+        torch.blackman_window,
+        torch.eye,
+        torch.fft.fftfreq,
+        torch.fft.rfftfreq,
+        torch.full,
+        torch.fill,
+        torch.hamming_window,
+        torch.hann_window,
+        torch.kaiser_window,
+        torch.linspace,
+        torch.logspace,
+        torch.nested.nested_tensor,
+        # This function doesn't actually take a device argument
+        # torch.normal,
+        torch.ones,
+        torch.rand,
+        torch.randn,
+        torch.randint,
+        torch.randperm,
+        torch.range,
+        torch.sparse_coo_tensor,
+        torch.sparse_compressed_tensor,
+        torch.sparse_csr_tensor,
+        torch.sparse_csc_tensor,
+        torch.sparse_bsr_tensor,
+        torch.sparse_bsc_tensor,
+        torch.tril_indices,
+        torch.triu_indices,
+        torch.vander,
+        torch.zeros,
+        torch.asarray,
+        # weird ones
+        torch.tensor,
+        torch.as_tensor,
+        torch.scalar_tensor,
+        torch.asarray,
+    }
+
+# NB: This is directly called from C++ in torch/csrc/Device.cpp
+class DeviceContext(TorchFunctionMode):
+    def __init__(self, device):
+        self.device = torch.device(device)
+
+    def __enter__(self):
+        global CURRENT_DEVICE
+        self.old_device = CURRENT_DEVICE
+        CURRENT_DEVICE = self.device
+        return super().__enter__()
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        global CURRENT_DEVICE
+        CURRENT_DEVICE = self.old_device
+        return super().__exit__(exc_type, exc_val, exc_tb)
+
+    def __torch_function__(self, func, types, args=(), kwargs=None):
+        kwargs = kwargs or {}
+        if func in _device_constructors() and kwargs.get('device') is None:
+            kwargs['device'] = self.device
+        return func(*args, **kwargs)
+
+# NB: This is directly called from C++ in torch/csrc/Device.cpp
+def device_decorator(device, func):
+    return context_decorator(lambda: device, func)
+
+def set_device(device):
+    """
+    Set the default device inside of the wrapped function by decorating it with this function.
+
+    If you would like to use this as a context manager, use device as a
+    context manager directly, e.g., ``with torch.device(device)``.
+    """
+    return lambda func: device_decorator(torch.device(device), func)
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_foreach_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_foreach_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a9a6a15cada17805f12f36ea8c932a33f4606b3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_foreach_utils.py
@@ -0,0 +1,47 @@
+from typing import List, Dict, Tuple, Optional
+
+import torch
+from torch import Tensor
+from torch.autograd.grad_mode import no_grad
+from typing_extensions import TypeAlias
+
+def _get_foreach_kernels_supported_devices() -> List[str]:
+    r"""Return the device type list that supports foreach kernels."""
+    return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()]
+
+def _get_fused_kernels_supported_devices() -> List[str]:
+    r"""Return the device type list that supports fused kernels in optimizer."""
+    return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()]
+
+TensorListList: TypeAlias = List[List[Optional[Tensor]]]
+Indices: TypeAlias = List[int]
+
+# This util function splits tensors into groups by device and dtype, which is useful before sending
+# tensors off to a foreach implementation, which requires tensors to be on one device and dtype.
+# If tensorlistlist contains more than one tensorlist, the following assumptions are made BUT NOT verified:
+#   - tensorlists CAN be None
+#   - all tensors in the first specified list cannot be None
+#   - given an index i, all specified tensorlist[i]s match in dtype and device
+# with_indices (bool, optional): whether to track previous indices as the last list per dictionary entry.
+#   It comes in handy if there are Nones or literals in the tensorlists that are getting scattered out.
+#   Whereas mutating a tensor in the resulting split-up tensorlists WILL propagate changes back to the
+#   original input tensorlists, changing up Nones/literals WILL NOT propagate, and manual propagation
+#   may be necessary. Check out torch/optim/sgd.py for an example.
+@no_grad()
+def _group_tensors_by_device_and_dtype(
+    tensorlistlist: TensorListList,
+    with_indices: bool = False,
+) -> Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]]:
+    return {
+        (device, getattr(torch, str_dtype)): value
+        for (device, str_dtype), value in
+        torch._C._group_tensors_by_device_and_dtype(tensorlistlist, with_indices).items()
+    }
+
+
+def _device_has_foreach_support(device: torch.device) -> bool:
+    return device.type in (_get_foreach_kernels_supported_devices() + ["cpu"]) and not torch.jit.is_scripting()
+
+
+def _has_foreach_support(tensors: List[Tensor], device: torch.device) -> bool:
+    return _device_has_foreach_support(device) and all(t is None or type(t) == torch.Tensor for t in tensors)
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_freeze.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_freeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7be90a4baee6d0f8e70d6d12a197eb160146975
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_freeze.py
@@ -0,0 +1,289 @@
+"""
+Freeze Python packages.
+
+Freezing makes it possible to ship arbitrary Python modules as part of a C++
+library. The Python source of the module is compiled to bytecode and written
+to `.c` files, to be imported by Python's built-in FrozenImporter.
+
+In a normal Python installation, FrozenImporter is only used to bootstrap the
+initialization of the import machinery. Python's importers are defined in
+Python (see `_bootstrap.py` and `_bootstrap_external.py`) but need to be
+retrieved before any importers are available. Freezing the module bytecode
+resolves this circular dependency.
+
+This script will freeze the Python standard library. It produces two things:
+- Bytecode files: A set of `.c` that define C variables containing Python bytecode.
+- Main file: A `main.c` file listing all of these modules in the right form to be
+  consumed by FrozenImporter.
+
+The library that wishes to these modules make them available to the local
+Python instance by extending `PyImport_FrozenModules` appropriately (see
+https://docs.python.org/3/c-api/import.html#c.PyImport_FrozenModules).
+"""
+
+import argparse
+import functools
+import itertools
+import marshal
+import os
+import types
+from dataclasses import dataclass
+from pathlib import Path
+from typing import List
+
+
+PATH_MARKER = ""
+MAIN_INCLUDES = """#include 
+
+"""
+
+MAIN_PREFIX_TEMPLATE = """
+// Compiled standard library modules. These should be appended to the existing
+// `PyImport_FrozenModules` that ships with CPython.
+struct _frozen {}[] = {{
+"""
+
+FAKE_PREFIX = MAIN_PREFIX_TEMPLATE.format("_PyImport_FrozenModules")
+
+MAIN_SUFFIX = """\
+    {0, 0, 0} /* sentinel */
+};
+"""
+
+# Exclude some standard library modules to:
+# 1. Slim down the final frozen lib.
+# 2. Remove functionality we don't want to support.
+DENY_LIST = [
+    # Interface to unix databases
+    "dbm",
+    # ncurses bindings (terminal interfaces)
+    "curses",
+    # Tcl/Tk GUI
+    "tkinter",
+    "tkinter",
+    # Tests for the standard library
+    "test",
+    "tests",
+    "idle_test",
+    "__phello__.foo.py",
+    # importlib frozen modules. These are already baked into CPython.
+    "_bootstrap.py",
+    "_bootstrap_external.py",
+]
+
+NUM_BYTECODE_FILES = 5
+
+
+def indent_msg(fn):
+    @functools.wraps(fn)
+    def wrapper(*args, **kwargs):
+        args[0].indent += 1
+        ret = fn(*args, **kwargs)
+        args[0].indent -= 1
+        return ret
+
+    return wrapper
+
+
+@dataclass
+class FrozenModule:
+    # The fully qualified module name, e.g. 'foo.bar.baz'
+    module_name: str
+    # The name of the C variable that holds the bytecode, e.g. 'M_foo__bar__baz'
+    c_name: str
+    # The size of the C variable. Negative if this module is a package.
+    size: int
+    # The frozen bytecode
+    bytecode: bytes
+
+
+class Freezer:
+    def __init__(self, verbose: bool):
+        self.frozen_modules: List[FrozenModule] = []
+        self.indent: int = 0
+        self.verbose: bool = verbose
+
+    def msg(self, path: Path, code: str):
+        if not self.verbose:
+            return
+        # P: package dir
+        # F: python file
+        # S: skipped (not a package dir)
+        # X: skipped (deny-listed)
+        # N: skipped (not a python file)
+        for i in range(self.indent):
+            print("    ", end="")
+        print(f"{code} {path}")
+
+    def write_bytecode(self, install_root):
+        """
+        Write the `.c` files containing the frozen bytecode.
+
+        Shared frozen modules evenly across the files.
+        """
+        bytecode_file_names = [f"bytecode_{i}.c" for i in range(NUM_BYTECODE_FILES)]
+        bytecode_files = [
+            open(os.path.join(install_root, name), "w") for name in bytecode_file_names
+        ]
+        it = itertools.cycle(bytecode_files)
+        for m in self.frozen_modules:
+            self.write_frozen(m, next(it))
+
+        for f in bytecode_files:
+            f.close()
+
+    def write_main(self, install_root, oss, symbol_name):
+        """Write the `main.c` file containing a table enumerating all the frozen modules."""
+        with open(os.path.join(install_root, "main.c"), "w") as outfp:
+            outfp.write(MAIN_INCLUDES)
+            for m in self.frozen_modules:
+                outfp.write(f"extern unsigned char {m.c_name}[];\n")
+
+            outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name))
+            for m in self.frozen_modules:
+                outfp.write(f'\t{{"{m.module_name}", {m.c_name}, {m.size}}},\n')
+            outfp.write(MAIN_SUFFIX)
+            if oss:
+                outfp.write(FAKE_PREFIX)
+                outfp.write(MAIN_SUFFIX)
+
+    def write_frozen(self, m: FrozenModule, outfp):
+        """Write a single frozen module's bytecode out to a C variable."""
+        outfp.write(f"unsigned char {m.c_name}[] = {{")
+        for i in range(0, len(m.bytecode), 16):
+            outfp.write("\n\t")
+            for c in bytes(m.bytecode[i : i + 16]):
+                outfp.write("%d," % c)
+        outfp.write("\n};\n")
+
+    def compile_path(self, path: Path, top_package_path: Path):
+        """Entry point for compiling a Path object."""
+        if path.is_dir():
+            self.compile_package(path, top_package_path)
+        else:
+            self.compile_file(path, top_package_path)
+
+    @indent_msg
+    def compile_package(self, path: Path, top_package_path: Path):
+        """Compile all the files within a Python package dir."""
+        assert path.is_dir()
+        if path.name in DENY_LIST:
+            self.msg(path, "X")
+            return
+
+        # Python packages are directories that have __init__.py in them.
+        is_package_dir = any(child.name == "__init__.py" for child in path.iterdir())
+        if not is_package_dir:
+            self.msg(path, "S")
+            return
+
+        self.msg(path, "P")
+        # Recursively compile all children in this dir
+        for child in path.iterdir():
+            self.compile_path(child, top_package_path)
+
+    def get_module_qualname(self, file_path: Path, top_package_path: Path) -> List[str]:
+        # `path` looks like 'Lib/foo/bar/baz.py'
+
+        # chop off 'Lib/' to get something that represents a Python module hierarchy.
+        # e.g. 'foo/bar/baz.py', which maps to 'foo.bar.baz'
+        normalized_path = file_path.relative_to(top_package_path.parent)
+
+        if normalized_path.name == "__init__.py":
+            # Special handling for `__init__.py`. In this case, this file
+            # specifies that the containing directory should be treated as a package.
+            # For 'foo/bar/baz/__init__.py':
+            # - The module name is 'baz'
+            module_basename = normalized_path.parent.name
+            # - The parent is foo.bar (need to shave off the 'baz')
+            module_parent = normalized_path.parent.parent.parts
+        else:
+            module_basename = normalized_path.stem
+            module_parent = normalized_path.parent.parts
+        return list(module_parent) + [module_basename]
+
+    def compile_string(self, file_content: str) -> types.CodeType:
+        # instead of passing in the real build time path to 'compile', we
+        # pass in a marker instead. This prevents the build time path being
+        # leaked to runtime. That path may not be available at runtime.
+        # Setting the path to a mark make sure it's a hard error rather
+        # than a flaky error when inspect module tries to retrieve python source
+        # code during torchscripting.
+        path_marker = PATH_MARKER
+        return compile(file_content, path_marker, "exec")
+
+    @indent_msg
+    def compile_file(self, path: Path, top_package_path: Path):
+        """
+        Compile a Python source file to frozen bytecode.
+
+        Append the result to `self.frozen_modules`.
+        """
+        assert path.is_file()
+        if path.suffix != ".py":
+            self.msg(path, "N")
+            return
+
+        if path.name in DENY_LIST:
+            self.msg(path, "X")
+            return
+
+        self.msg(path, "F")
+        module_qualname = self.get_module_qualname(path, top_package_path)
+        module_mangled_name = "__".join(module_qualname)
+        c_name = "M_" + module_mangled_name
+
+        with open(path) as src_file:
+            co = self.compile_string(src_file.read())
+
+        bytecode = marshal.dumps(co)
+        size = len(bytecode)
+        if path.name == "__init__.py":
+            # Python packages are signified by negative size.
+            size = -size
+        self.frozen_modules.append(
+            FrozenModule(".".join(module_qualname), c_name, size, bytecode)
+        )
+
+
+def main() -> None:
+    parser = argparse.ArgumentParser(description="Compile py source")
+    parser.add_argument("paths", nargs="*", help="Paths to freeze.")
+    parser.add_argument("--verbose", action="store_true", help="Print debug logs")
+    parser.add_argument(
+        "--install-dir", "--install_dir", help="Root directory for all output files"
+    )
+    parser.add_argument(
+        "--oss",
+        action="store_true",
+        help="If it's OSS build, add a fake _PyImport_FrozenModules",
+    )
+    parser.add_argument(
+        "--symbol-name",
+        "--symbol_name",
+        help="The name of the frozen module array symbol to generate",
+        default="_PyImport_FrozenModules_torch",
+    )
+
+    args = parser.parse_args()
+
+    f = Freezer(args.verbose)
+
+    for p in args.paths:
+        path = Path(p)
+        if path.is_dir() and not Path.exists(path / "__init__.py"):
+            # this 'top level path p' is a standard directory containing modules,
+            # not a module itself
+            # each 'mod' could be a dir containing __init__.py or .py file
+            # NB: sorted to make sure this is deterministic
+            for mod in sorted(path.glob("*")):
+                f.compile_path(mod, mod)
+        else:
+            f.compile_path(path, path)
+
+    f.write_bytecode(args.install_dir)
+    f.write_main(args.install_dir, args.oss, args.symbol_name)
+
+
+if __name__ == "__main__":
+    main()  # pragma: no cover
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_mode_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_mode_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6e3cbb5e9403cb3dda102a85665da15a4f10482
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_mode_utils.py
@@ -0,0 +1,10 @@
+import torch
+from typing import TypeVar
+
+T = TypeVar('T')
+
+# returns if all are the same mode
+def all_same_mode(modes):
+    return all(tuple(mode == modes[0] for mode in modes))
+
+no_dispatch = torch._C._DisableTorchDispatch
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_python_dispatch.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_python_dispatch.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c774f2d0e16dfb1de6813a6abcdd30d4e251d33
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_python_dispatch.py
@@ -0,0 +1,495 @@
+import contextlib
+from typing import Optional, Union, List, Set, Dict, Any
+
+import warnings
+from dataclasses import dataclass
+import torch
+import torchgen
+from torch._C import _len_torch_dispatch_stack, _get_dispatch_stack_at, \
+    _pop_torch_dispatch_stack, _push_on_torch_dispatch_stack, DispatchKey
+
+
+# TODO: Limitations and things about enable_torch_dispatch_mode we should fix before exposing it:
+# - We need a better user-facing api for _DisableTorchDispatch that
+#   is able to selectively disable __torch_dispatch__ of a particular class.
+# - It doesn't work with the tensor constructors (torch.tensor, torch.Tensor)
+# - Better name (see https://github.com/pytorch/pytorch/pull/63496#discussion_r694091694)
+
+class TorchDispatchMode:
+    """
+    A ``TorchDispatchMode`` allows you to override the meaning of all
+    ``__torch_dispatch__`` overrideable functions within a dynamic scope,
+    without having to actually create a tensor subclass or manually
+    monkey-patch functions in the PyTorch API.  Some common situations
+    where you should use a mode:
+
+        * You want to override the meaning of factory functions, or other
+          functions that do not otherwise take a tensor as an argument
+          (these cannot be overridden with tensor subclasses).
+
+        * You want to override the behavior of all functions without needing
+          to wrap your inputs in tensor subclasses; e.g., if you are just
+          interested in logging intermediate computations.
+
+        * You want to control the order of execution of various tensor
+          subclasses explicitly, rather than implicitly via the return of
+          ``NotImplemented``.
+
+    Independent subclasses of :class:`TorchDispatchMode` are compositional:
+    modes can be pushed onto a stack using ``with MyMode():``.
+    When you call functions in the PyTorch API inside your
+    ``__torch_dispatch__`` implementation, by default, they will forward on to
+    the next mode on the mode stack.  If you want recursively call back into
+    your current ``__torch_dispatch__`` implementation, either explicitly
+    invoke ``self.__torch_dispatch__(...)``, or use the context manager
+    ``__torch_dispatch__(self)`` to make PyTorch
+    API self-referential (beware of infinite loops, in this case!)
+    """
+
+    def __init__(self, _dispatch_key=None):
+        if _dispatch_key is not None:
+            assert isinstance(_dispatch_key, torch._C.DispatchKey)
+            self.__dict__['_dispatch_key'] = _dispatch_key
+
+    def __torch_dispatch__(self, func, types, args=(), kwargs=None):
+        raise NotImplementedError()
+
+    def __enter__(self):
+        _push_mode(self)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        mb_dk_or_mode_key = self.__dict__.get("_dispatch_key", None)
+        if mb_dk_or_mode_key is None:
+            # Today, mode keys are not used at all in the per-dispatch-key-mode logic (for pre-dispatch)
+            # We should probably revisit this.
+            mb_dk_or_mode_key = self.__dict__.get("_mode_key", None)
+        _pop_mode(mb_dk_or_mode_key)
+
+    @classmethod
+    def push(cls, *args, **kwargs):
+        warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`")
+        instance = cls(*args, **kwargs)
+        return instance
+
+def _get_current_dispatch_mode():
+    stack_len = _len_torch_dispatch_stack()
+    # Return a user mode on the stack if there are any
+    if stack_len > 0:
+        return _get_dispatch_stack_at(stack_len - 1)
+    return None
+
+
+def _detect_functional_mode():
+    from torch._ops import _get_dispatch_mode_pre_dispatch
+    pre_dispatch_functional_mode = _get_dispatch_mode_pre_dispatch(torch._C._TorchDispatchModeKey.FUNCTIONAL)
+    post_dispatch_functional_mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
+
+    assert (pre_dispatch_functional_mode is None) or (post_dispatch_functional_mode is None)
+
+    if pre_dispatch_functional_mode is None:
+        return post_dispatch_functional_mode
+
+    return pre_dispatch_functional_mode
+
+def _unset_infra_mode(key):
+    from torch._ops import unset_mode_pre_dispatch, _get_dispatch_mode_pre_dispatch
+    pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(key)
+    post_dispatch_mode = torch._C._get_dispatch_mode(key)
+    if pre_dispatch_mode and post_dispatch_mode:
+        raise AssertionError("Can't have active infra mode on both pre and post dispatch mode stack")
+
+    if pre_dispatch_mode:
+        mode = unset_mode_pre_dispatch(key)
+        return mode
+    if post_dispatch_mode:
+        return torch._C._unset_dispatch_mode(key)
+
+
+def _disable_infra_mode(key):
+    assert key in (torch._C._TorchDispatchModeKey.FUNCTIONAL, torch._C._TorchDispatchModeKey.PROXY)
+    mode_unset = _unset_infra_mode(key)
+    try:
+        yield mode_unset
+    finally:
+        if mode_unset is not None:
+            _push_mode(mode_unset)
+
+
+def _get_current_dispatch_mode_stack():
+    stack_len = _len_torch_dispatch_stack()
+    return [_get_dispatch_stack_at(i) for i in range(stack_len)]
+
+
+def _push_mode(mode):
+    k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
+    assert k is None or k == torch._C.DispatchKey.PreDispatch
+    if k is None:
+        _push_on_torch_dispatch_stack(mode)
+        return
+
+    from torch._ops import get_cached_ops, _set_mode_pre_dispatch
+    # See Note [Not Caching Per-Dispatch-Key Mode Handlers]
+    # Clear the cache of every op that has been used so far, for this particular key.
+    ks = torch._C._functionality_to_backend_keys(k)
+    for op in get_cached_ops():
+        for key in ks:
+            op._uncache_dispatch(key)
+    _set_mode_pre_dispatch(mode)
+
+
+def _pop_mode(k: Optional[Union[DispatchKey, torch._C._TorchDispatchModeKey]] = None):
+    if k == torch._C.DispatchKey.PreDispatch:  # type: ignore[attr-defined]
+        from torch._ops import _pop_mode_from_pre_dispatch
+        return _pop_mode_from_pre_dispatch()
+
+    if k is None or isinstance(k, torch._C._TorchDispatchModeKey):
+        return _pop_torch_dispatch_stack(k)
+
+@contextlib.contextmanager
+def _pop_mode_temporarily(k: Optional[DispatchKey] = None):
+    old = _pop_mode(k)
+    try:
+        yield old
+    finally:
+        _push_mode(old)
+
+@contextlib.contextmanager
+def _disable_current_modes():
+    from torch._ops import _len_torch_dispatch_stack_pre_dispatch, _pop_mode_from_pre_dispatch
+    from torch._subclasses.functional_tensor import FunctionalTensorMode
+    from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
+    mode_len_pre_dispatch = _len_torch_dispatch_stack_pre_dispatch()
+    old_pre_dispatch_modes = [_pop_mode_from_pre_dispatch() for _ in range(mode_len_pre_dispatch)]
+
+    has_proxy_mode_in_pre_dispatch = False
+    has_functional_mode_in_pre_dispatch = False
+
+    for i in old_pre_dispatch_modes:
+        if isinstance(i, ProxyTorchDispatchMode):
+            has_proxy_mode_in_pre_dispatch = True
+        if isinstance(i, FunctionalTensorMode):
+            has_functional_mode_in_pre_dispatch = True
+
+    mode_len = _len_torch_dispatch_stack()
+    old_modes = [_pop_mode() for _ in range(mode_len)]
+
+    for old in old_modes:
+        if isinstance(old, FunctionalTensorMode) and has_functional_mode_in_pre_dispatch:
+            raise AssertionError("Can't have FunctionalMode available both in PreDispatch and Python Key")
+        if isinstance(old, ProxyTorchDispatchMode) and has_proxy_mode_in_pre_dispatch:
+            raise AssertionError("Can't have ProxyTorchDispatchMode available both in PreDispatch and Python Key")
+
+    # Manually disable proxy and fake modes, if any are active
+    try:
+        yield old_pre_dispatch_modes + old_modes
+    finally:
+        for mode in reversed(old_modes):
+            _push_mode(mode)
+        for mode in reversed(old_pre_dispatch_modes):
+            _push_mode(mode)
+
+
+class BaseTorchDispatchMode(TorchDispatchMode):
+    def __torch_dispatch__(self, func, types, args=(), kwargs=None):
+        if kwargs is None:
+            kwargs = {}
+        return func(*args, **kwargs)
+
+def is_traceable_wrapper_subclass(t):
+    """
+    Returns whether or not a tensor subclass that implements __torch_dispatch__
+    is 'traceable' with torch.compile.
+    In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2,
+    It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__.
+    It is also expected to obey some restrictions around traceability and aliasing:
+        * The subclass's __torch_dispatch__() implementation should desugar into pytorch
+            dispatcher operations that can be traced into a graph.
+        * The subclass should use return_and_correct_aliasing(). This is needed today to make
+            sure that torch.compile does the right thing in a few cases around input mutation
+            and output aliasing.
+
+    Expected magic method signatures:
+        attrs, ctx = t.__tensor_flatten__()
+            attrs: list of attribute name strings for inner tensors
+            ctx: dict containing any other subclass-specific metadata needed for unflattening
+
+        t = MySubClass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride)
+            inner_tensors: dict mapping attribute name -> tensor for each inner tensor
+            ctx: dict with subclass metadata in the form that __tensor_flatten__() produces
+            outer_size: expected (possibly symbolic) size that the returned subclass
+                instance should have. Note that this arg is useful for certain subclasses
+                that require the shape info to be constructed. In most cases, this arg can be
+                safely ignored.
+            outer_stride: expected (possibly symbolic) stride that the returned subclass
+                instance should have. Note that this arg is useful for certain subclasses
+                that require the stride info to be constructed. In most cases, this arg can be
+                safely ignored.
+    """
+    is_subclass = isinstance(t, torch.Tensor) and type(t) != torch.Tensor
+    return is_subclass and hasattr(t, "__tensor_flatten__") and hasattr(t, "__tensor_unflatten__")
+
+def transform_subclass(t, callback, outer_size=None, outer_stride=None):
+    """
+    Given a traceable, wrapper tensor subclass ``t`` that implements
+    ``__torch_dispatch__`` and holds some inner tensors,
+    and a callback of type ``Callable[[str, torch.Tensor], torch.Tensor]``,
+    `transform_subclass` will construct a fresh instance of the wrapper tensor subclass.
+    It will do so by grabbing each inner tensor attribute from the wrapper,
+    passing them into ``callback`` to get a transformed tensor,
+    and putting each transformed tensor into the fresh tensor subclass instance.
+
+    Note: this function will not handle ensuring that the fresh subclass
+    gets the same (autograd, and aliasing) metadata as the original tensor.
+    This is generally handled in other subsystems like AOTAutograd.
+    """
+    outer_size = outer_size if outer_size is not None else t.size()
+    outer_stride = outer_stride if outer_stride is not None else t.stride()
+
+    attrs, ctx = t.__tensor_flatten__()
+    transformed_tensors_dict = {}
+    for attr in attrs:
+        transformed_tensors_dict[attr] = callback(attr, getattr(t, attr))
+    sub = type(t).__tensor_unflatten__(
+        transformed_tensors_dict, ctx, outer_size, outer_stride
+    )
+
+    # NB: Purposefully guard here to simplify the inner / outer symbols.
+    # Using sym_eq() for symbolic comparison can result in an expression that's too
+    # difficult to guard on, so we use == here.
+    assert sub.shape == outer_size, \
+        f"Expected return value from {type(t)}__tensor_unflatten__() to have " \
+        f"shape equal to {outer_size}, but got: {sub.shape}"
+    assert sub.stride() == outer_stride, \
+        f"Expected return value from {type(t)}__tensor_unflatten__() to have " \
+        f"stride equal to {outer_stride}, but got: {sub.stride()}"
+
+    return sub
+
+def _correct_storage_aliasing(func, schema_info, args, outs):
+    """
+    Given: an OpOverload, a SchemaInfo (cached information from torchgen about schema),
+    and the inputs/outputs to the OpOverload,
+    this function checks to see if func is a view operator
+    (by checking if any of the outputs in the op's schema
+     are immutable aliases of inputs).
+    If so, this function manually aliases the storage of the output tensor
+    with its corresponding input tensor alias.
+    It does this by unsafely overwriting the storage field of the output tensor
+    to be the same storage as the input.
+    """
+    assert isinstance(func, torch._ops.OpOverload)
+    assert isinstance(args, tuple)
+    assert isinstance(outs, (list, tuple))
+    flat_outs = torch.utils._pytree.tree_leaves(outs)
+
+    def alias_non_inplace_storage(arg, ret):
+        # This is hopefully a reasonable assert:
+        # subclasses that rely on this API for output aliasing
+        # should always return wrapper tensor subclasses for us to manually alias.
+        # in theory if a subclass that needs this API wants to sometimes return
+        # plain tensors, we could remove the assert and just not perform the aliasing,
+        # but it seems safer to learn more about this case first.
+        if is_traceable_wrapper_subclass(arg) or is_traceable_wrapper_subclass(ret):
+            ret_list = ret if isinstance(ret, list) else [ret]
+            for r in ret_list:
+                assert type(arg) == type(r), f"""Called {str(func)} with input of type {type(arg)}
+and output of type {type(ret)}. But expected types to match."""
+        # Need to run under no_dispatch, because we explicitly do **not**
+        # want our subclass to intercept the set_() call.
+        # instead, our subclass should directly have its storage swapped out.
+        with torch.utils._mode_utils.no_dispatch():
+            # See Note: [Fake Tensor Dispatch Keys]
+            # we're borrowing the way it modifies dispatch key TLS.
+            meta_in_tls = torch._C._meta_in_tls_dispatch_include()
+            torch._C._set_meta_in_tls_dispatch_include(True)
+            try:
+                # directly calling this overload, and passing ret.shape, because we **explicitly**
+                # don't want to reset the sizes on ret, if the storage implies a size change.
+                # Why?
+                # The purpose of this API is *not* to change the size/strides of our output- we assume it's already correct.
+                # We just want to "fix up" the storage aliasing, without modifying or output's metadata.
+                # Example: out = inp.expand(inp.shape[0], inp.shape[0])
+                #     This requires swapping the storage of out to be the same as inp,
+                #     but we do *not* want it to change the sizes/strides that were compute for out.
+
+                if isinstance(ret, list):
+                    for r in ret:
+                        torch.ops.aten.set_.source_Storage_storage_offset(
+                            r, arg.untyped_storage(), r.storage_offset(), r.shape, r.stride())
+                else:
+                    assert isinstance(ret, torch.Tensor), f"type: {type(ret)}"
+                    torch.ops.aten.set_.source_Storage_storage_offset(
+                        ret, arg.untyped_storage(), ret.storage_offset(), ret.shape, ret.stride()
+                    )
+            finally:
+                torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)
+
+    def is_read_only_alias_match(arg, ret):
+        shared_aliases = arg.alias_set & ret.alias_set
+        return len(shared_aliases) > 0 and not arg.is_write
+
+    num_args = len(func._schema.arguments)
+    num_returns = len(func._schema.returns)
+    for arg_idx in range(num_args):
+        for return_idx in range(num_returns):
+            if is_read_only_alias_match(schema_info.args[arg_idx], schema_info.outs[return_idx]):
+                alias_non_inplace_storage(args[arg_idx], outs[return_idx])
+
+# This abstracts over the fact that in return_and_correct_aliasing,
+# we sometimes use torchgen schema parsing (for aten ops, since torchscript's schema parsing is sometimes buggy),
+# and sometimes use torchscript schema parsing (for custom ops, for which torchgen parsing is untested).
+@dataclass
+class AliasInfo:
+    alias_set: Set[str]
+    is_write: bool
+    name: Optional[str]
+
+@dataclass
+class SchemaInfo:
+    args: List[AliasInfo]
+    outs: List[AliasInfo]
+
+# Can't import torch._ops.OpOverload due to circular reference
+parsed_schema_map: Dict[Any, SchemaInfo] = {}
+
+# Given an OpOverload, returns schema information on it.
+# This is cached for efficiency, since it can involve running torchgen
+def get_alias_info(func) -> SchemaInfo:
+    if func in parsed_schema_map:
+        return parsed_schema_map[func]
+    # For ATen ops: use torchgen (since torchscript parser doesn't handle alias annotations
+    # properly for some ops that output tensorlists)
+    if func.namespace == "aten":
+        torchgen_schema_str = str(func._schema)
+        assert torchgen_schema_str.startswith("aten::")
+        # remove the aten:: namespace, which is added by the torchscript parser,
+        # and torchgen doesn't know how to handle
+        torchgen_schema_str = torchgen_schema_str[6:]
+        import re
+        # the torchscript parser ends up converting int[2]=1 into int[2]=[1, 1],
+        # which torchgen chokes on.
+        torchgen_schema_str = re.sub(r'=\[[0, ]+\]', '=0', torchgen_schema_str)
+        torchgen_schema_str = re.sub(r'=\[[1, ]+\]', '=1', torchgen_schema_str)
+        # for aten::rot90
+        torchgen_schema_str = torchgen_schema_str.replace("=[0, 1]", "=[0,1]")
+        torchgen_schema = torchgen.model.FunctionSchema.parse(torchgen_schema_str)
+        arg_schemas = [AliasInfo(
+            alias_set=set() if a.annotation is None else set(a.annotation.alias_set),
+            is_write=a.annotation is not None and a.annotation.is_write,
+            name=a.name,
+        ) for a in torchgen_schema.arguments.flat_all]
+        out_schemas = [AliasInfo(
+            alias_set=set() if a.annotation is None else set(a.annotation.alias_set),
+            is_write=a.annotation is not None and a.annotation.is_write,
+            name=a.name,
+        ) for a in torchgen_schema.returns]
+    else:
+        # For non-aten ops, torchgen is untested so we rely on torchscript schema parsing
+        arg_schemas = [AliasInfo(
+            alias_set=set() if a.alias_info is None else set(a.alias_info.before_set),
+            is_write=a.alias_info is not None and a.alias_info.is_write,
+            name=a.name,
+        ) for a in func._schema.arguments]
+        out_schemas = [AliasInfo(
+            alias_set=set() if a.alias_info is None else set(a.alias_info.before_set),
+            is_write=a.alias_info is not None and a.alias_info.is_write,
+            name=a.name,
+        ) for a in func._schema.returns]
+    schema_info = SchemaInfo(args=arg_schemas, outs=out_schemas)
+    parsed_schema_map[func] = schema_info
+    return schema_info
+
+def return_and_correct_aliasing(func, args, kwargs, out):
+    """
+    This function should be used by wrapper tensor ``__torch_dispatch__`` subclasses
+    that would like to work with torch.compile. It ensures that the subclass
+    properly implements the aliasing behavior of every op,
+    which is needed for correctness in AOTAutograd.
+    This function will handle:
+
+        * When we see a view op, we will alias the storages of any
+          input and output tensor subclasses
+
+        * When we see an inplace or out= op, we will directly
+          return the corresponding input tensor, instead of returning
+          a (potentially) fresh output tensor.
+    """
+
+    # Caching here because torchgen parsing is definitely not fast, and this function is called
+    # once for every op in the graph during functionalization.
+    schema_info = get_alias_info(func)
+
+    def get_write_alias(x):
+        if len(x.alias_set) == 0:
+            return None
+        alias_set = list(x.alias_set)
+        # torchscript allows for complicated alias sets, but our dispatcher ops only really involve simple aliasing
+        assert len(alias_set) == 1
+        if x.is_write:
+            return alias_set[0]
+        return None
+
+    def get_arg_from_alias(output_alias, schema_info, args, kwargs):
+        new_args, new_kwargs = torch.fx.operator_schemas.normalize_function(func, args=args, kwargs=kwargs)
+
+        arg_indices = [
+            i for i, a in enumerate(schema_info.args)
+            if output_alias in a.alias_set
+        ]
+        # For any dispatcher op with an output alias, we expect it to map to exactly one alias in the schema's input arguments.
+        assert len(arg_indices) == 1
+        idx = arg_indices[0]
+        arg_info = schema_info.args[idx]
+        if arg_info.name is not None and arg_info.name in new_kwargs:
+            return new_kwargs[arg_info.name]
+        return new_args[idx]
+
+    # Fix up the storages of any outs so that they point to the same storage as the input,
+    # if func is a view op.
+    _correct_storage_aliasing(func, schema_info, args, (out,) if not isinstance(out, tuple) else out)
+
+    # For inplace_view ops in particular, we'll try hard to make sure that the wrapper subclass's
+    # metadata is set correctly.
+    if torch.Tag.inplace_view in func.tags:
+        # no_dispatch() to make sure that we secretly change the metadata on the wrapper,
+        # but don't end up dispatching the op anywhere else.
+        mutated_args = [x for i, x in enumerate(args) if get_write_alias(schema_info.args[i]) is not None]
+        # Assumption: we have a very small number of inplace_view ops that follow a strict schema:
+        # there is only a single argument that gets its metadata mutated.
+        assert len(mutated_args) == 1
+        # This check exists because we generally *do* want to update the metadata of any wrapper subclasses,
+        # but FunctionalTensor is special: it overrides all size/stride calls to plumb to the inner tensor.
+        # so we don't actually need to update the metadata (and attempting to do so causes errors)
+        from torch._subclasses.functional_tensor import FunctionalTensor
+        if not isinstance(mutated_args[0], FunctionalTensor):
+            with torch.utils._mode_utils.no_dispatch():
+                # See Note: [Fake Tensor Dispatch Keys]
+                # we're borrowing the way it modifies dispatch key TLS.
+                meta_in_tls = torch._C._meta_in_tls_dispatch_include()
+                torch._C._set_meta_in_tls_dispatch_include(True)
+                try:
+                    func(*args, **kwargs)
+                finally:
+                    torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)
+
+    # Next: we need to make sure to return inputs directly, if the output is a mutable alias (e.g. add_()).
+
+    # simple case: none of our outputs have mutable aliases, so we can return the output as-is
+    if not any(get_write_alias(r) is not None for r in schema_info.outs):
+        return out
+
+    # simplifying assumption: we don't have **any** ops with return types like "-> (Tensor(a!), Tensor)"
+    if not all(get_write_alias(r) is not None for r in schema_info.outs):
+        raise RuntimeError("Unsupported schema: " + str(func._schema))
+
+    if len(func._schema.returns) == 1:
+        return get_arg_from_alias(get_write_alias(schema_info.outs[0]), schema_info, args, kwargs)
+
+    # In the multi-return case, all aten ops return a tuple / list, so cast accordingly.
+    outs_to_return = type(out)([
+        get_arg_from_alias(get_write_alias(schema_info.outs[i]), schema_info, args, kwargs)
+        if get_write_alias(r) is not None else o
+        for ((i, r), o) in zip(enumerate(schema_info.outs), out)
+    ])
+    return outs_to_return
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_pytree.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_pytree.py
new file mode 100644
index 0000000000000000000000000000000000000000..861e8875d4bdc44c06c62963b3bd172d62d77b69
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_pytree.py
@@ -0,0 +1,1550 @@
+"""
+Contains utility functions for working with nested python data structures.
+
+A *pytree* is Python nested data structure. It is a tree in the sense that
+nodes are Python collections (e.g., list, tuple, dict) and the leaves are
+Python values. Furthermore, a pytree should not contain reference cycles.
+
+pytrees are useful for working with nested collections of Tensors. For example,
+one can use `tree_map` to map a function over all Tensors inside some nested
+collection of Tensors and `tree_leaves` to get a flat list of all Tensors
+inside some nested collection. pytrees are helpful for implementing nested
+collection support for PyTorch APIs.
+
+This pytree implementation is not very performant due to Python overhead
+To improve the performance we can move parts of the implementation to C++.
+"""
+
+import dataclasses
+import importlib
+import json
+import sys
+import threading
+import types
+import warnings
+from collections import defaultdict, deque, namedtuple, OrderedDict
+from typing import (
+    Any,
+    Callable,
+    cast,
+    DefaultDict,
+    Deque,
+    Dict,
+    FrozenSet,
+    Generic,
+    Hashable,
+    Iterable,
+    List,
+    Mapping,
+    NamedTuple,
+    Optional,
+    OrderedDict as GenericOrderedDict,
+    overload,
+    Protocol,
+    Sequence,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+)
+
+
+__all__ = [
+    "PyTree",
+    "Context",
+    "FlattenFunc",
+    "UnflattenFunc",
+    "DumpableContext",
+    "ToDumpableContextFn",
+    "FromDumpableContextFn",
+    "TreeSpec",
+    "LeafSpec",
+    "keystr",
+    "key_get",
+    "register_pytree_node",
+    "tree_flatten",
+    "tree_flatten_with_path",
+    "tree_unflatten",
+    "tree_leaves",
+    "tree_leaves_with_path",
+    "tree_structure",
+    "tree_map",
+    "tree_map_with_path",
+    "tree_map_",
+    "tree_map_only",
+    "tree_map_only_",
+    "tree_all",
+    "tree_any",
+    "tree_all_only",
+    "tree_any_only",
+    "treespec_dumps",
+    "treespec_loads",
+    "treespec_pprint",
+]
+
+
+T = TypeVar("T")
+S = TypeVar("S")
+U = TypeVar("U")
+R = TypeVar("R")
+
+
+DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL = 1
+NO_SERIALIZED_TYPE_NAME_FOUND = "NO_SERIALIZED_TYPE_NAME_FOUND"
+
+
+class KeyEntry(Protocol):
+    def __hash__(self) -> int:
+        ...
+
+    def __eq__(self, other: object) -> bool:
+        ...
+
+    def __str__(self) -> str:
+        ...
+
+    def get(self, parent: Any) -> Any:
+        ...
+
+
+Context = Any
+PyTree = Any
+FlattenFunc = Callable[[PyTree], Tuple[List[Any], Context]]
+UnflattenFunc = Callable[[Iterable[Any], Context], PyTree]
+DumpableContext = Any  # Any json dumpable text
+ToDumpableContextFn = Callable[[Context], DumpableContext]
+FromDumpableContextFn = Callable[[DumpableContext], Context]
+ToStrFunc = Callable[["TreeSpec", List[str]], str]
+MaybeFromStrFunc = Callable[[str], Optional[Tuple[Any, Context, str]]]
+KeyPath = Tuple[KeyEntry, ...]
+FlattenWithKeysFunc = Callable[[PyTree], Tuple[List[Tuple[KeyEntry, Any]], Any]]
+
+
+# A NodeDef holds two callables:
+# - flatten_fn should take the collection and return a flat list of values.
+#   It can also return some context that is used in reconstructing the
+#   collection.
+# - unflatten_fn should take a flat list of values and some context
+#   (returned by flatten_fn). It returns the collection by reconstructing
+#   it from the list and the context.
+# - flatten_with_keys_fn, which is a callable that takes a
+#   pytree and returns a list of (keypath, value) pairs and a context.
+class NodeDef(NamedTuple):
+    type: Type[Any]
+    flatten_fn: FlattenFunc
+    unflatten_fn: UnflattenFunc
+    flatten_with_keys_fn: Optional[FlattenWithKeysFunc]
+
+
+_NODE_REGISTRY_LOCK = threading.Lock()
+SUPPORTED_NODES: Dict[Type[Any], NodeDef] = {}
+
+
+# _SerializeNodeDef holds the following:
+# - typ: the type of the node (e.g., "Dict", "List", etc)
+# - serialized_type_name: the fully qualified name of the type, e.g. "collections.OrderedDict"
+# - to_dumpable_context takes a TreeSpec, and returns a serialized string format of the
+#   context, and the version number
+# - from_dumpable_context takes in a string representation of the context, and the
+#   version, and returns the deserialized context
+class _SerializeNodeDef(NamedTuple):
+    typ: Type[Any]
+    serialized_type_name: str
+    to_dumpable_context: Optional[ToDumpableContextFn]
+    from_dumpable_context: Optional[FromDumpableContextFn]
+
+
+SUPPORTED_SERIALIZED_TYPES: Dict[Type[Any], _SerializeNodeDef] = {}
+SERIALIZED_TYPE_TO_PYTHON_TYPE: Dict[str, Type[Any]] = {}
+
+
+def register_pytree_node(
+    cls: Type[Any],
+    flatten_fn: FlattenFunc,
+    unflatten_fn: UnflattenFunc,
+    *,
+    serialized_type_name: Optional[str] = None,
+    to_dumpable_context: Optional[ToDumpableContextFn] = None,
+    from_dumpable_context: Optional[FromDumpableContextFn] = None,
+    flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
+) -> None:
+    """Register a container-like type as pytree node.
+
+    Args:
+        cls: the type to register
+        flatten_fn: A callable that takes a pytree and returns a flattened
+            representation of the pytree and additional context to represent the
+            flattened pytree.
+        unflatten_fn: A callable that takes a flattened version of the pytree,
+            additional context, and returns an unflattened pytree.
+        serialized_type_name: A keyword argument used to specify the fully qualified
+            name used when serializing the tree spec.
+        to_dumpable_context: An optional keyword argument to custom specify how
+            to convert the context of the pytree to a custom json dumpable
+            representation. This is used for json serialization, which is being
+            used in torch.export right now.
+        from_dumpable_context: An optional keyword argument to custom specify how
+            to convert the custom json dumpable representation of the context
+            back to the original context. This is used for json deserialization,
+            which is being used in torch.export right now.
+        flatten_with_keys_fn: An optional keyword argument to specify how to
+            access each pytree leaf's keypath when flattening and tree-mapping.
+            Like ``flatten_fn``, but in place of a List[leaf], it should return
+            a List[(keypath, leaf)].
+    """
+    with _NODE_REGISTRY_LOCK:
+        if cls in SUPPORTED_NODES:
+            raise ValueError(f"{cls} is already registered as pytree node.")
+
+    _private_register_pytree_node(
+        cls,
+        flatten_fn,
+        unflatten_fn,
+        serialized_type_name=serialized_type_name,
+        to_dumpable_context=to_dumpable_context,
+        from_dumpable_context=from_dumpable_context,
+        flatten_with_keys_fn=flatten_with_keys_fn,
+    )
+
+    try:
+        from . import _cxx_pytree as cxx
+    except ImportError:
+        pass
+    else:
+        cxx._private_register_pytree_node(
+            cls,
+            flatten_fn,
+            unflatten_fn,
+            serialized_type_name=serialized_type_name,
+            to_dumpable_context=to_dumpable_context,
+            from_dumpable_context=from_dumpable_context,
+        )
+
+
+def _register_pytree_node(
+    cls: Type[Any],
+    flatten_fn: FlattenFunc,
+    unflatten_fn: UnflattenFunc,
+    to_str_fn: Optional[ToStrFunc] = None,  # deprecated
+    maybe_from_str_fn: Optional[MaybeFromStrFunc] = None,  # deprecated
+    *,
+    serialized_type_name: Optional[str] = None,
+    to_dumpable_context: Optional[ToDumpableContextFn] = None,
+    from_dumpable_context: Optional[FromDumpableContextFn] = None,
+    flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
+) -> None:
+    """Register a container-like type as pytree node for the Python pytree only.
+
+    Args:
+        cls: the type to register
+        flatten_fn: A callable that takes a pytree and returns a flattened
+            representation of the pytree and additional context to represent the
+            flattened pytree.
+        unflatten_fn: A callable that takes a flattened version of the pytree,
+            additional context, and returns an unflattened pytree.
+        serialized_type_name: A keyword argument used to specify the fully qualified
+            name used when serializing the tree spec.
+        to_dumpable_context: An optional keyword argument to custom specify how
+            to convert the context of the pytree to a custom json dumpable
+            representation. This is used for json serialization, which is being
+            used in torch.export right now.
+        from_dumpable_context: An optional keyword argument to custom specify how
+            to convert the custom json dumpable representation of the context
+            back to the original context. This is used for json deserialization,
+            which is being used in torch.export right now.
+        flatten_with_keys_fn: An optional keyword argument to specify how to
+            access each pytree leaf's keypath when flattening and tree-mapping.
+            Like ``flatten_fn``, but in place of a List[leaf], it should return
+            a List[(keypath, leaf)].
+    """
+    warnings.warn(
+        "torch.utils._pytree._register_pytree_node is deprecated. "
+        "Please use torch.utils._pytree.register_pytree_node instead.",
+        stacklevel=2,
+    )
+
+    if to_str_fn is not None or maybe_from_str_fn is not None:
+        warnings.warn(
+            "to_str_fn and maybe_from_str_fn is deprecated. "
+            "Please use to_dumpable_context and from_dumpable_context instead."
+        )
+
+    _private_register_pytree_node(
+        cls,
+        flatten_fn,
+        unflatten_fn,
+        serialized_type_name=serialized_type_name,
+        to_dumpable_context=to_dumpable_context,
+        from_dumpable_context=from_dumpable_context,
+        flatten_with_keys_fn=flatten_with_keys_fn,
+    )
+
+
+def _private_register_pytree_node(
+    cls: Type[Any],
+    flatten_fn: FlattenFunc,
+    unflatten_fn: UnflattenFunc,
+    *,
+    serialized_type_name: Optional[str] = None,
+    to_dumpable_context: Optional[ToDumpableContextFn] = None,
+    from_dumpable_context: Optional[FromDumpableContextFn] = None,
+    flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
+) -> None:
+    """This is an internal function that is used to register a pytree node type
+    for the Python pytree only. End-users should use :func:`register_pytree_node`
+    instead.
+    """
+    with _NODE_REGISTRY_LOCK:
+        if cls in SUPPORTED_NODES:
+            # TODO: change this warning to an error after OSS/internal stabilize
+            warnings.warn(
+                f"{cls} is already registered as pytree node. "
+                "Overwriting the previous registration.",
+            )
+
+        node_def = NodeDef(cls, flatten_fn, unflatten_fn, flatten_with_keys_fn)
+        SUPPORTED_NODES[cls] = node_def
+
+        if (to_dumpable_context is None) ^ (from_dumpable_context is None):
+            raise ValueError(
+                f"Both to_dumpable_context and from_dumpable_context for {cls} must "
+                "be None or registered."
+            )
+
+        if serialized_type_name is None:
+            serialized_type_name = NO_SERIALIZED_TYPE_NAME_FOUND
+
+        serialize_node_def = _SerializeNodeDef(
+            cls,
+            serialized_type_name,
+            to_dumpable_context,
+            from_dumpable_context,
+        )
+        SUPPORTED_SERIALIZED_TYPES[cls] = serialize_node_def
+        SERIALIZED_TYPE_TO_PYTHON_TYPE[serialized_type_name] = cls
+
+
+@dataclasses.dataclass(frozen=True)
+class SequenceKey(Generic[T]):
+    idx: int
+
+    def __str__(self) -> str:
+        return f"[{self.idx!r}]"
+
+    def get(self, sequence: Sequence[T]) -> T:
+        return sequence[self.idx]
+
+
+K = TypeVar("K", bound=Hashable)
+
+
+@dataclasses.dataclass(frozen=True)
+class MappingKey(Generic[K, T]):
+    key: K
+
+    def __str__(self) -> str:
+        return f"[{self.key!r}]"
+
+    def get(self, mapping: Mapping[K, T]) -> T:
+        return mapping[self.key]
+
+
+@dataclasses.dataclass(frozen=True)
+class GetAttrKey:
+    name: str
+
+    def __str__(self) -> str:
+        return f".{self.name}"
+
+    def get(self, obj: Any) -> Any:
+        return getattr(obj, self.name)
+
+
+def _tuple_flatten(d: Tuple[Any, ...]) -> Tuple[List[Any], Context]:
+    return list(d), None
+
+
+def _tuple_flatten_with_keys(
+    d: Tuple[Any, ...]
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _tuple_flatten(d)
+    return [(SequenceKey(i), v) for i, v in enumerate(values)], context
+
+
+def _tuple_unflatten(values: Iterable[Any], context: Context) -> Tuple[Any, ...]:
+    return tuple(values)
+
+
+def _list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
+    return d, None
+
+
+def _list_flatten_with_keys(d: List[Any]) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _list_flatten(d)
+    return [(SequenceKey(i), v) for i, v in enumerate(values)], context
+
+
+def _list_unflatten(values: Iterable[Any], context: Context) -> List[Any]:
+    return list(values)
+
+
+def _dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
+    return list(d.values()), list(d.keys())
+
+
+def _dict_flatten_with_keys(
+    d: Dict[Any, Any]
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _dict_flatten(d)
+    return [(MappingKey(k), v) for k, v in zip(context, values)], context
+
+
+def _dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]:
+    return dict(zip(context, values))
+
+
+def _namedtuple_flatten(d: NamedTuple) -> Tuple[List[Any], Context]:
+    return list(d), type(d)
+
+
+def _namedtuple_flatten_with_keys(
+    d: NamedTuple,
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _namedtuple_flatten(d)
+    return (
+        [(GetAttrKey(field), v) for field, v in zip(context._fields, values)],
+        context,
+    )
+
+
+def _namedtuple_unflatten(values: Iterable[Any], context: Context) -> NamedTuple:
+    return cast(NamedTuple, context(*values))
+
+
+def _namedtuple_serialize(context: Context) -> DumpableContext:
+    json_namedtuple = {
+        "class_name": context.__name__,
+        "fields": context._fields,
+    }
+    return json_namedtuple
+
+
+def _namedtuple_deserialize(dumpable_context: DumpableContext) -> Context:
+    class_name = dumpable_context["class_name"]
+    assert isinstance(class_name, str)
+    context = namedtuple(class_name, dumpable_context["fields"])  # type: ignore[misc]
+    return context
+
+
+def _ordereddict_flatten(d: GenericOrderedDict[Any, Any]) -> Tuple[List[Any], Context]:
+    return list(d.values()), list(d.keys())
+
+
+def _ordereddict_flatten_with_keys(
+    d: GenericOrderedDict[Any, Any]
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _ordereddict_flatten(d)
+    return [(MappingKey(k), v) for k, v in zip(context, values)], context
+
+
+def _ordereddict_unflatten(
+    values: Iterable[Any],
+    context: Context,
+) -> GenericOrderedDict[Any, Any]:
+    return OrderedDict((key, value) for key, value in zip(context, values))
+
+
+_odict_flatten = _ordereddict_flatten
+_odict_unflatten = _ordereddict_unflatten
+
+
+def _defaultdict_flatten(d: DefaultDict[Any, Any]) -> Tuple[List[Any], Context]:
+    values, dict_context = _dict_flatten(d)
+    return values, [d.default_factory, dict_context]
+
+
+def _defaultdict_flatten_with_keys(
+    d: DefaultDict[Any, Any]
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _defaultdict_flatten(d)
+    _, dict_context = context
+    return [(MappingKey(k), v) for k, v in zip(dict_context, values)], context
+
+
+def _defaultdict_unflatten(
+    values: Iterable[Any],
+    context: Context,
+) -> DefaultDict[Any, Any]:
+    default_factory, dict_context = context
+    return defaultdict(default_factory, _dict_unflatten(values, dict_context))
+
+
+def _defaultdict_serialize(context: Context) -> DumpableContext:
+    default_factory, dict_context = context
+    json_defaultdict = {
+        "default_factory_module": default_factory.__module__,
+        "default_factory_name": default_factory.__qualname__,
+        "dict_context": dict_context,
+    }
+    return json_defaultdict
+
+
+def _defaultdict_deserialize(dumpable_context: DumpableContext) -> Context:
+    assert isinstance(dumpable_context, dict)
+    assert set(dumpable_context) == {
+        "default_factory_module",
+        "default_factory_name",
+        "dict_context",
+    }
+
+    default_factory_module = dumpable_context["default_factory_module"]
+    default_factory_name = dumpable_context["default_factory_name"]
+    assert isinstance(default_factory_module, str)
+    assert isinstance(default_factory_name, str)
+    module = importlib.import_module(default_factory_module)
+    default_factory = getattr(module, default_factory_name)
+
+    dict_context = dumpable_context["dict_context"]
+    return [default_factory, dict_context]
+
+
+def _deque_flatten(d: Deque[Any]) -> Tuple[List[Any], Context]:
+    return list(d), d.maxlen
+
+
+def _deque_flatten_with_keys(
+    d: Deque[Any],
+) -> Tuple[List[Tuple[KeyEntry, Any]], Context]:
+    values, context = _deque_flatten(d)
+    return [(SequenceKey(i), v) for i, v in enumerate(values)], context
+
+
+def _deque_unflatten(values: Iterable[Any], context: Context) -> Deque[Any]:
+    return deque(values, maxlen=context)
+
+
+_private_register_pytree_node(
+    tuple,
+    _tuple_flatten,
+    _tuple_unflatten,
+    serialized_type_name="builtins.tuple",
+    flatten_with_keys_fn=_tuple_flatten_with_keys,
+)
+_private_register_pytree_node(
+    list,
+    _list_flatten,
+    _list_unflatten,
+    serialized_type_name="builtins.list",
+    flatten_with_keys_fn=_list_flatten_with_keys,
+)
+_private_register_pytree_node(
+    dict,
+    _dict_flatten,
+    _dict_unflatten,
+    serialized_type_name="builtins.dict",
+    flatten_with_keys_fn=_dict_flatten_with_keys,
+)
+_private_register_pytree_node(
+    namedtuple,  # type: ignore[arg-type]
+    _namedtuple_flatten,
+    _namedtuple_unflatten,
+    serialized_type_name="collections.namedtuple",
+    to_dumpable_context=_namedtuple_serialize,
+    from_dumpable_context=_namedtuple_deserialize,
+    flatten_with_keys_fn=_namedtuple_flatten_with_keys,
+)
+_private_register_pytree_node(
+    OrderedDict,
+    _ordereddict_flatten,
+    _ordereddict_unflatten,
+    serialized_type_name="collections.OrderedDict",
+    flatten_with_keys_fn=_ordereddict_flatten_with_keys,
+)
+_private_register_pytree_node(
+    defaultdict,
+    _defaultdict_flatten,
+    _defaultdict_unflatten,
+    serialized_type_name="collections.defaultdict",
+    to_dumpable_context=_defaultdict_serialize,
+    from_dumpable_context=_defaultdict_deserialize,
+    flatten_with_keys_fn=_defaultdict_flatten_with_keys,
+)
+_private_register_pytree_node(
+    deque,
+    _deque_flatten,
+    _deque_unflatten,
+    serialized_type_name="collections.deque",
+    flatten_with_keys_fn=_deque_flatten_with_keys,
+)
+
+
+STANDARD_DICT_TYPES: FrozenSet[type] = frozenset(
+    {dict, OrderedDict, defaultdict},
+)
+BUILTIN_TYPES: FrozenSet[type] = frozenset(
+    {tuple, list, dict, namedtuple, OrderedDict, defaultdict, deque},  # type: ignore[arg-type]
+)
+
+
+# h/t https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
+def _is_namedtuple_instance(tree: Any) -> bool:
+    typ = type(tree)
+    bases = typ.__bases__
+    if len(bases) != 1 or bases[0] != tuple:
+        return False
+    fields = getattr(typ, "_fields", None)
+    if not isinstance(fields, tuple):
+        return False
+    return all(type(entry) == str for entry in fields)
+
+
+def _get_node_type(tree: Any) -> Any:
+    if _is_namedtuple_instance(tree):
+        return namedtuple
+    return type(tree)
+
+
+# A leaf is defined as anything that is not a Node.
+def _is_leaf(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None) -> bool:
+    return (is_leaf is not None and is_leaf(tree)) or _get_node_type(
+        tree
+    ) not in SUPPORTED_NODES
+
+
+# A TreeSpec represents the structure of a pytree. It holds:
+# "type": the type of root Node of the pytree
+# context: some context that is useful in unflattening the pytree
+# children_specs: specs for each child of the root Node
+# num_leaves: the number of leaves
+@dataclasses.dataclass
+class TreeSpec:
+    type: Any
+    context: Context
+    children_specs: List["TreeSpec"]
+
+    num_nodes: int = dataclasses.field(init=False)
+    num_leaves: int = dataclasses.field(init=False)
+    num_children: int = dataclasses.field(init=False)
+
+    def __post_init__(self) -> None:
+        self.num_nodes = 1 + sum(spec.num_nodes for spec in self.children_specs)
+        self.num_leaves = sum(spec.num_leaves for spec in self.children_specs)
+        self.num_children = len(self.children_specs)
+
+    def __repr__(self, indent: int = 0) -> str:
+        repr_prefix: str = f"TreeSpec({self.type.__name__}, {self.context}, ["
+        children_specs_str: str = ""
+        if self.num_children > 0:
+            indent += 2
+            children_specs_str += self.children_specs[0].__repr__(indent)
+            children_specs_str += "," if self.num_children > 1 else ""
+            children_specs_str += ",".join(
+                [
+                    "\n" + " " * indent + child.__repr__(indent)
+                    for child in self.children_specs[1:]
+                ]
+            )
+        repr_suffix: str = f"{children_specs_str}])"
+        return repr_prefix + repr_suffix
+
+    def is_leaf(self) -> bool:
+        return self.num_nodes == 1 and self.num_leaves == 1
+
+    def _flatten_up_to_helper(self, tree: PyTree, subtrees: List[PyTree]) -> None:
+        if self.is_leaf():
+            subtrees.append(tree)
+            return
+
+        node_type = _get_node_type(tree)
+        if self.type not in BUILTIN_TYPES:
+            # Always require custom node types to match exactly
+            if node_type != self.type:
+                raise ValueError(
+                    f"Type mismatch; "
+                    f"expected {self.type!r}, but got {node_type!r}.",
+                )
+            flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+            child_pytrees, context = flatten_fn(tree)
+            if len(child_pytrees) != self.num_children:
+                raise ValueError(
+                    f"Node arity mismatch; "
+                    f"expected {self.num_children}, but got {len(child_pytrees)}.",
+                )
+            if context != self.context:
+                raise ValueError(
+                    f"Node context mismatch for custom node type {self.type!r}.",
+                )
+        else:
+            # For builtin dictionary types, we allow some flexibility
+            # Otherwise, we require exact matches
+            both_standard_dict = (
+                self.type in STANDARD_DICT_TYPES and node_type in STANDARD_DICT_TYPES
+            )
+            if node_type != self.type and not both_standard_dict:
+                raise ValueError(
+                    f"Node type mismatch; "
+                    f"expected {self.type!r}, but got {node_type!r}.",
+                )
+            if len(tree) != self.num_children:
+                raise ValueError(
+                    f"Node arity mismatch; "
+                    f"expected {self.num_children}, but got {len(tree)}.",
+                )
+
+            if both_standard_dict:  # dictionary types are compatible with each other
+                dict_context = (
+                    self.context
+                    if self.type is not defaultdict
+                    # ignore mismatch of `default_factory` for defaultdict
+                    else self.context[1]
+                )
+                expected_keys = dict_context
+                got_key_set = set(tree)
+                expected_key_set = set(expected_keys)
+                if got_key_set != expected_key_set:
+                    missing_keys = expected_key_set.difference(got_key_set)
+                    extra_keys = got_key_set.difference(expected_key_set)
+                    message = ""
+                    if missing_keys:
+                        message += f"; missing key(s): {missing_keys}"
+                    if extra_keys:
+                        message += f"; extra key(s): {extra_keys}"
+                    raise ValueError(f"Node keys mismatch{message}.")
+                child_pytrees = [tree[key] for key in expected_keys]
+            else:
+                flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+                child_pytrees, context = flatten_fn(tree)
+                if (
+                    context != self.context
+                    and self.type is not deque  # ignore mismatch of `maxlen` for deque
+                ):
+                    raise ValueError(
+                        f"Node context mismatch for node type {self.type!r}; "
+                        f"expected {self.context!r}, but got {context!r}.",  # namedtuple type mismatch
+                    )
+
+        for child_pytree, child_spec in zip(child_pytrees, self.children_specs):
+            child_spec._flatten_up_to_helper(child_pytree, subtrees)
+
+    def flatten_up_to(self, tree: PyTree) -> List[PyTree]:
+        subtrees: List[PyTree] = []
+        self._flatten_up_to_helper(tree, subtrees)
+        return subtrees
+
+    def unflatten(self, leaves: Iterable[Any]) -> PyTree:
+        if not isinstance(leaves, (list, tuple)):
+            leaves = list(leaves)
+        if len(leaves) != self.num_leaves:
+            raise ValueError(
+                f"treespec.unflatten(leaves): `leaves` has length {len(leaves)} "
+                f"but the spec refers to a pytree that holds {self.num_leaves} "
+                f"items ({self}).",
+            )
+        if self.is_leaf():
+            return leaves[0]
+
+        unflatten_fn = SUPPORTED_NODES[self.type].unflatten_fn
+
+        # Recursively unflatten the children
+        start = 0
+        end = 0
+        child_pytrees = []
+        for child_spec in self.children_specs:
+            end += child_spec.num_leaves
+            child_pytrees.append(child_spec.unflatten(leaves[start:end]))
+            start = end
+
+        return unflatten_fn(child_pytrees, self.context)
+
+
+class LeafSpec(TreeSpec):
+    def __init__(self) -> None:
+        super().__init__(None, None, [])
+
+    def __post_init__(self) -> None:
+        self.num_nodes = 1
+        self.num_leaves = 1
+        self.num_children = 0
+
+    def __repr__(self, indent: int = 0) -> str:
+        return "*"
+
+
+# All leaves are equivalent, so represent with a single object to save on
+# object construction time
+_LEAF_SPEC = LeafSpec()
+
+
+def _tree_flatten_helper(
+    tree: PyTree,
+    leaves: List[Any],
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> TreeSpec:
+    if _is_leaf(tree, is_leaf=is_leaf):
+        leaves.append(tree)
+        return _LEAF_SPEC
+
+    node_type = _get_node_type(tree)
+    flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+    child_pytrees, context = flatten_fn(tree)
+
+    # Recursively flatten the children
+    children_specs = [
+        _tree_flatten_helper(child, leaves, is_leaf=is_leaf) for child in child_pytrees
+    ]
+
+    return TreeSpec(node_type, context, children_specs)
+
+
+def tree_flatten(
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> Tuple[List[Any], TreeSpec]:
+    """Flattens a pytree into a list of values and a TreeSpec that can be used
+    to reconstruct the pytree.
+    """
+    leaves: List[Any] = []
+    spec = _tree_flatten_helper(tree, leaves, is_leaf=is_leaf)
+    return leaves, spec
+
+
+def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
+    """Given a list of values and a TreeSpec, builds a pytree.
+    This is the inverse operation of `tree_flatten`.
+    """
+    if not isinstance(treespec, TreeSpec):
+        raise TypeError(
+            f"tree_unflatten(leaves, treespec): Expected `treespec` to be "
+            f"instance of TreeSpec but got item of type {type(treespec)}.",
+        )
+    return treespec.unflatten(leaves)
+
+
+def _tree_leaves_helper(
+    tree: PyTree,
+    leaves: List[Any],
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> None:
+    if _is_leaf(tree, is_leaf=is_leaf):
+        leaves.append(tree)
+        return
+
+    node_type = _get_node_type(tree)
+    flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+    child_pytrees, _ = flatten_fn(tree)
+
+    # Recursively flatten the children
+    for child in child_pytrees:
+        _tree_leaves_helper(child, leaves, is_leaf=is_leaf)
+
+
+def tree_leaves(
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> List[Any]:
+    """Get a list of leaves of a pytree."""
+    leaves: List[Any] = []
+    _tree_leaves_helper(tree, leaves, is_leaf=is_leaf)
+    return leaves
+
+
+def tree_structure(
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> TreeSpec:
+    """Get the TreeSpec for a pytree."""
+    return tree_flatten(tree, is_leaf=is_leaf)[1]
+
+
+def tree_map(
+    func: Callable[..., Any],
+    tree: PyTree,
+    *rests: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    """Map a multi-input function over pytree args to produce a new pytree.
+
+    See also :func:`tree_map_`.
+
+    >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)})
+    {'x': 8, 'y': (43, 65)}
+    >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None})
+    {'x': False, 'y': (False, False), 'z': True}
+
+    If multiple inputs are given, the structure of the tree is taken from the first input;
+    subsequent inputs need only have ``tree`` as a prefix:
+
+    >>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]])
+    [[5, 7, 9], [6, 1, 2]]
+
+    Args:
+        func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
+            corresponding leaves of the pytrees.
+        tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
+            argument to function ``func``.
+        rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
+            ``tree`` or has ``tree`` as a prefix.
+        is_leaf (callable, optional): An extra leaf predicate function that will be called at each
+            flattening step. The function should have a single argument with signature
+            ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+            as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+            leaf or not. If the function is not specified, the default pytree registry will be used.
+
+    Returns:
+        A new pytree with the same structure as ``tree`` but with the value at each leaf given by
+        ``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs``
+        is the tuple of values at corresponding nodes in ``rests``.
+    """
+    leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)
+    flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]
+    return treespec.unflatten(map(func, *flat_args))
+
+
+def tree_map_(
+    func: Callable[..., Any],
+    tree: PyTree,
+    *rests: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    """Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree.
+
+    See also :func:`tree_map`.
+
+    Args:
+        func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
+            corresponding leaves of the pytrees.
+        tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
+            argument to function ``func``.
+        rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
+            ``tree`` or has ``tree`` as a prefix.
+        is_leaf (callable, optional): An extra leaf predicate function that will be called at each
+            flattening step. The function should have a single argument with signature
+            ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+            as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+            leaf or not. If the function is not specified, the default pytree registry will be used.
+
+    Returns:
+        The original ``tree`` with the value at each leaf is given by the side-effect of function
+        ``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf
+        in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``.
+    """
+    leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)
+    flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]
+    tuple(map(func, *flat_args))  # consume and exhaust the iterable
+    return tree
+
+
+Type2 = Tuple[Type[T], Type[S]]
+Type3 = Tuple[Type[T], Type[S], Type[U]]
+if sys.version_info >= (3, 10):
+    TypeAny = Union[Type[Any], Tuple[Type[Any], ...], types.UnionType]
+else:
+    TypeAny = Union[Type[Any], Tuple[Type[Any], ...]]
+
+Fn2 = Callable[[Union[T, S]], R]
+Fn3 = Callable[[Union[T, S, U]], R]
+Fn = Callable[[T], R]
+FnAny = Callable[[Any], R]
+
+MapOnlyFn = Callable[[T], Callable[[Any], Any]]
+
+
+# These specializations help with type inference on the lambda passed to this
+# function
+@overload
+def map_only(__type_or_types_or_pred: Type2[T, S]) -> MapOnlyFn[Fn2[T, S, Any]]:
+    ...
+
+
+@overload
+def map_only(__type_or_types_or_pred: Type3[T, S, U]) -> MapOnlyFn[Fn3[T, S, U, Any]]:
+    ...
+
+
+@overload
+def map_only(__type_or_types_or_pred: Type[T]) -> MapOnlyFn[Fn[T, Any]]:
+    ...
+
+
+# This specialization is needed for the implementations below that call
+@overload
+def map_only(__type_or_types_or_pred: TypeAny) -> MapOnlyFn[FnAny[Any]]:
+    ...
+
+
+@overload
+def map_only(__type_or_types_or_pred: Callable[[Any], bool]) -> MapOnlyFn[FnAny[Any]]:
+    ...
+
+
+def map_only(
+    __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]]
+) -> MapOnlyFn[FnAny[Any]]:
+    """
+    Suppose you are writing a tree_map over tensors, leaving everything
+    else unchanged.  Ordinarily you would have to write:
+
+        def go(t):
+            if isinstance(t, Tensor):
+                return ...
+            else:
+                return t
+
+    With this function, you only need to write:
+
+        @map_only(Tensor)
+        def go(t):
+            return ...
+
+    You can also directly use 'tree_map_only'
+    """
+    if isinstance(__type_or_types_or_pred, (type, tuple)) or (
+        sys.version_info >= (3, 10)
+        and isinstance(__type_or_types_or_pred, types.UnionType)
+    ):
+
+        def pred(x: Any) -> bool:
+            return isinstance(x, __type_or_types_or_pred)  # type: ignore[arg-type]
+
+    elif callable(__type_or_types_or_pred):
+        pred = __type_or_types_or_pred  # type: ignore[assignment]
+    else:
+        raise TypeError("Argument must be a type, a tuple of types, or a callable.")
+
+    def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:
+        # @functools.wraps(func)  # torch dynamo doesn't support this yet
+        def wrapped(x: T) -> Any:
+            if pred(x):
+                return func(x)
+            return x
+
+        return wrapped
+
+    return wrapper
+
+
+@overload
+def tree_map_only(
+    __type_or_types_or_pred: Type[T],
+    func: Fn[T, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only(
+    __type_or_types_or_pred: Type2[T, S],
+    func: Fn2[T, S, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only(
+    __type_or_types_or_pred: Type3[T, S, U],
+    func: Fn3[T, S, U, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only(
+    __type_or_types_or_pred: Callable[[Any], bool],
+    func: FnAny[Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+def tree_map_only(
+    __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
+    func: FnAny[Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    return tree_map(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
+
+
+@overload
+def tree_map_only_(
+    __type_or_types_or_pred: Type[T],
+    func: Fn[T, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only_(
+    __type_or_types_or_pred: Type2[T, S],
+    func: Fn2[T, S, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only_(
+    __type_or_types_or_pred: Type3[T, S, U],
+    func: Fn3[T, S, U, Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+@overload
+def tree_map_only_(
+    __type_or_types_or_pred: Callable[[Any], bool],
+    func: FnAny[Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    ...
+
+
+def tree_map_only_(
+    __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
+    func: FnAny[Any],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    return tree_map_(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
+
+
+def tree_all(
+    pred: Callable[[Any], bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    flat_args = tree_leaves(tree, is_leaf=is_leaf)
+    return all(map(pred, flat_args))
+
+
+def tree_any(
+    pred: Callable[[Any], bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    flat_args = tree_leaves(tree, is_leaf=is_leaf)
+    return any(map(pred, flat_args))
+
+
+@overload
+def tree_all_only(
+    __type_or_types: Type[T],
+    pred: Fn[T, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+@overload
+def tree_all_only(
+    __type_or_types: Type2[T, S],
+    pred: Fn2[T, S, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+@overload
+def tree_all_only(
+    __type_or_types: Type3[T, S, U],
+    pred: Fn3[T, S, U, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+def tree_all_only(
+    __type_or_types: TypeAny,
+    pred: FnAny[bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    flat_args = tree_leaves(tree, is_leaf=is_leaf)
+    return all(pred(x) for x in flat_args if isinstance(x, __type_or_types))
+
+
+@overload
+def tree_any_only(
+    __type_or_types: Type[T],
+    pred: Fn[T, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+@overload
+def tree_any_only(
+    __type_or_types: Type2[T, S],
+    pred: Fn2[T, S, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+@overload
+def tree_any_only(
+    __type_or_types: Type3[T, S, U],
+    pred: Fn3[T, S, U, bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    ...
+
+
+def tree_any_only(
+    __type_or_types: TypeAny,
+    pred: FnAny[bool],
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> bool:
+    flat_args = tree_leaves(tree, is_leaf=is_leaf)
+    return any(pred(x) for x in flat_args if isinstance(x, __type_or_types))
+
+
+# Broadcasts a pytree to the provided TreeSpec and returns the flattened
+# values. If this is not possible, then this function returns None.
+#
+# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]),
+# would return [0, 0]. This is useful for part of the vmap implementation:
+# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be
+# broadcastable to the tree structure of `inputs` and we use
+# _broadcast_to_and_flatten to check this.
+def _broadcast_to_and_flatten(
+    tree: PyTree,
+    treespec: TreeSpec,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> Optional[List[Any]]:
+    assert isinstance(treespec, TreeSpec)
+
+    if _is_leaf(tree, is_leaf=is_leaf):
+        return [tree] * treespec.num_leaves
+    if treespec.is_leaf():
+        return None
+    node_type = _get_node_type(tree)
+    if node_type != treespec.type:
+        return None
+
+    flatten_fn = SUPPORTED_NODES[node_type].flatten_fn
+    child_pytrees, ctx = flatten_fn(tree)
+
+    # Check if the Node is different from the spec
+    if len(child_pytrees) != treespec.num_children or ctx != treespec.context:
+        return None
+
+    # Recursively flatten the children
+    result: List[Any] = []
+    for child, child_spec in zip(child_pytrees, treespec.children_specs):
+        flat = _broadcast_to_and_flatten(child, child_spec, is_leaf=is_leaf)
+        if flat is not None:
+            result += flat
+        else:
+            return None
+
+    return result
+
+
+@dataclasses.dataclass
+class _TreeSpecSchema:
+    """
+    _TreeSpecSchema is the schema used to serialize the TreeSpec
+    It contains the following fields:
+    - type: A string name of the type. null for the case of a LeafSpec.
+    - context: Any format which is json dumpable
+    - children_spec: A list of children serialized specs.
+    """
+
+    type: Optional[str]
+    context: DumpableContext
+    children_spec: List["_TreeSpecSchema"]
+
+
+class _ProtocolFn(NamedTuple):
+    treespec_to_json: Callable[[TreeSpec], DumpableContext]
+    json_to_treespec: Callable[[DumpableContext], TreeSpec]
+
+
+_SUPPORTED_PROTOCOLS: Dict[int, _ProtocolFn] = {}
+
+
+def _treespec_to_json(treespec: TreeSpec) -> _TreeSpecSchema:
+    if treespec.is_leaf():
+        return _TreeSpecSchema(None, None, [])
+
+    if treespec.type not in SUPPORTED_SERIALIZED_TYPES:
+        raise NotImplementedError(
+            f"Serializing {treespec.type} in pytree is not registered.",
+        )
+
+    serialize_node_def = SUPPORTED_SERIALIZED_TYPES[treespec.type]
+
+    serialized_type_name = serialize_node_def.serialized_type_name
+
+    if serialized_type_name == NO_SERIALIZED_TYPE_NAME_FOUND:
+        raise NotImplementedError(
+            f"No registered serialization name for {treespec.type} found. "
+            "Please update your _register_pytree_node call with a `serialized_type_name` kwarg."
+        )
+
+    if serialize_node_def.to_dumpable_context is None:
+        try:
+            serialized_context = json.dumps(treespec.context)
+        except TypeError as e:
+            raise TypeError(
+                "Unable to serialize context. "
+                "Please make the context json dump-able, or register a "
+                "custom serializer using _register_pytree_node."
+            ) from e
+    else:
+        serialized_context = serialize_node_def.to_dumpable_context(treespec.context)
+
+    child_schemas = [_treespec_to_json(child) for child in treespec.children_specs]
+
+    return _TreeSpecSchema(serialized_type_name, serialized_context, child_schemas)
+
+
+def _json_to_treespec(json_schema: DumpableContext) -> TreeSpec:
+    if (
+        json_schema["type"] is None
+        and json_schema["context"] is None
+        and len(json_schema["children_spec"]) == 0
+    ):
+        return _LEAF_SPEC
+
+    if json_schema["type"] not in SERIALIZED_TYPE_TO_PYTHON_TYPE:
+        raise NotImplementedError(
+            f'Deserializing {json_schema["type"]} in pytree is not registered.',
+        )
+
+    typ = SERIALIZED_TYPE_TO_PYTHON_TYPE[json_schema["type"]]
+    serialize_node_def = SUPPORTED_SERIALIZED_TYPES[typ]
+
+    if serialize_node_def.from_dumpable_context is None:
+        try:
+            context = json.loads(json_schema["context"])
+        except TypeError as ex:
+            raise TypeError(
+                "Unable to deserialize context. "
+                "Please make the context json load-able, or register a "
+                "custom serializer using _register_pytree_node.",
+            ) from ex
+    else:
+        context = serialize_node_def.from_dumpable_context(json_schema["context"])
+
+    children_specs = []
+    for child_string in json_schema["children_spec"]:
+        children_specs.append(_json_to_treespec(child_string))
+
+    return TreeSpec(typ, context, children_specs)
+
+
+_SUPPORTED_PROTOCOLS[1] = _ProtocolFn(_treespec_to_json, _json_to_treespec)
+
+
+def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str:
+    if not isinstance(treespec, TreeSpec):
+        raise TypeError(
+            f"treespec_dumps(treespec, protocol): Expected `treespec` to be instance of "
+            f"TreeSpec but got item of type {type(treespec)}.",
+        )
+
+    if protocol is None:
+        protocol = DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL
+
+    if protocol in _SUPPORTED_PROTOCOLS:
+        json_spec = _SUPPORTED_PROTOCOLS[protocol].treespec_to_json(treespec)
+    else:
+        raise ValueError(
+            f"Unknown protocol {protocol}. "
+            f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}",
+        )
+
+    str_spec = json.dumps((protocol, dataclasses.asdict(json_spec)))
+    return str_spec
+
+
+def treespec_loads(serialized: str) -> TreeSpec:
+    protocol, json_schema = json.loads(serialized)
+
+    if protocol in _SUPPORTED_PROTOCOLS:
+        return _SUPPORTED_PROTOCOLS[protocol].json_to_treespec(json_schema)
+    raise ValueError(
+        f"Unknown protocol {protocol}. "
+        f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}",
+    )
+
+
+class _DummyLeaf:
+    def __repr__(self) -> str:
+        return "*"
+
+
+def treespec_pprint(treespec: TreeSpec) -> str:
+    dummy_tree = tree_unflatten(
+        [_DummyLeaf() for _ in range(treespec.num_leaves)],
+        treespec,
+    )
+    return repr(dummy_tree)
+
+
+# TODO(angelayi): remove this function after OSS/internal stabilize
+def pytree_to_str(treespec: TreeSpec) -> str:
+    warnings.warn("pytree_to_str is deprecated. Please use treespec_dumps")
+    return treespec_dumps(treespec)
+
+
+# TODO(angelayi): remove this function after OSS/internal stabilize
+def str_to_pytree(json: str) -> TreeSpec:
+    warnings.warn("str_to_pytree is deprecated. Please use treespec_loads")
+    return treespec_loads(json)
+
+
+def arg_tree_leaves(*args: PyTree, **kwargs: PyTree) -> List[Any]:
+    """Get a flat list of arguments to this function
+
+    A slightly faster version of tree_leaves((args, kwargs))
+    """
+    leaves: List[Any] = []
+    for a in args:
+        _tree_leaves_helper(a, leaves)
+    for a in kwargs.values():
+        _tree_leaves_helper(a, leaves)
+    return leaves
+
+
+def tree_flatten_with_path(
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> Tuple[List[Tuple[KeyPath, Any]], TreeSpec]:
+    """Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path.
+
+    Args:
+        tree: a pytree to flatten. If it contains a custom type, that type must be
+            registered with an appropriate `tree_flatten_with_path_fn` when registered
+            with :func:`register_pytree_node`.
+        is_leaf: An extra leaf predicate function that will be called at each
+            flattening step. The function should have a single argument with signature
+            ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+            as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+            leaf or not. If the function is not specified, the default pytree registry will be used.
+    Returns:
+        A tuple where the first element is a list of (key path, leaf) pairs, and the
+        second element is a :class:`TreeSpec` representing the structure of the flattened
+        tree.
+    """
+    _, treespec = tree_flatten(tree, is_leaf)
+    return list(_generate_key_paths((), tree, is_leaf)), treespec
+
+
+def tree_leaves_with_path(
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> List[Tuple[KeyPath, Any]]:
+    """Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path.
+
+    Args:
+        tree: a pytree. If it contains a custom type, that type must be
+            registered with an appropriate `tree_flatten_with_path_fn` when registered
+            with :func:`register_pytree_node`.
+        is_leaf: An extra leaf predicate function that will be called at each
+            flattening step. The function should have a single argument with signature
+            ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+            as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+            leaf or not. If the function is not specified, the default pytree registry will be used.
+    Returns:
+        A list of (key path, leaf) pairs.
+    """
+    return list(_generate_key_paths((), tree, is_leaf))
+
+
+def _generate_key_paths(
+    key_path: KeyPath,
+    tree: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> Iterable[Tuple[KeyPath, Any]]:
+    if is_leaf and is_leaf(tree):
+        yield key_path, tree
+        return
+
+    node_type = _get_node_type(tree)
+    handler = SUPPORTED_NODES.get(node_type)
+    if not handler:
+        # This is a leaf
+        yield key_path, tree
+        return
+
+    flatten_with_keys = handler.flatten_with_keys_fn
+    if flatten_with_keys:
+        key_children, _ = flatten_with_keys(tree)
+        for k, c in key_children:
+            yield from _generate_key_paths((*key_path, k), c, is_leaf)
+    else:
+        # We registered this pytree but didn't add a flatten_with_keys_fn, complain.
+        raise ValueError(
+            f"Did not find a flatten_with_keys_fn for type: {node_type}. "
+            "Please pass a flatten_with_keys_fn argument to register_pytree_node."
+        )
+
+
+def tree_map_with_path(
+    func: Callable[..., Any],
+    tree: PyTree,
+    *rests: PyTree,
+    is_leaf: Optional[Callable[[PyTree], bool]] = None,
+) -> PyTree:
+    """Like :func:`tree_map`, but the provided callable takes an additional key path argument.
+
+    Args:
+        func: A function that takes ``2 + len(rests)`` arguments, to be applied at the
+            corresponding leaves of the pytrees. The first positional argument
+            to ``func`` is the key path of the leaf in question. The second
+            positional argument is the value of the leaf.
+        tree: A pytree to be mapped over, with each leaf providing the first positional
+            argument to function ``func``.
+        rests: A tuple of pytrees, each of which has the same structure as
+            ``tree`` or has ``tree`` as a prefix.
+        is_leaf: An extra leaf predicate function that will be called at each
+            flattening step. The function should have a single argument with signature
+            ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
+            as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
+            leaf or not. If the function is not specified, the default pytree registry will be used.
+
+    Returns
+        A new pytree with the same structure as ``tree`` but with the value at each leaf given by
+        ``func(keypath, x, *xs)`` where ``keypath`` is the key path at the
+        corresponding leaf in ``tree``, ``x`` is the value at that leaf, and
+        ``xs`` is the tuple of values at corresponding nodes in ``rests``.
+    """
+    keypath_leaves, treespec = tree_flatten_with_path(tree, is_leaf)
+    keypath_leaves = list(zip(*keypath_leaves))
+    all_keypath_leaves = keypath_leaves + [treespec.flatten_up_to(r) for r in rests]
+    return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))
+
+
+def keystr(kp: KeyPath) -> str:
+    """Given a key path, return a pretty-printed representation."""
+    return "".join([str(k) for k in kp])
+
+
+def key_get(obj: Any, kp: KeyPath) -> Any:
+    """Given an object and a key path, return the value at the key path."""
+    for k in kp:
+        obj = k.get(obj)
+    return obj
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_triton.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_triton.py
new file mode 100644
index 0000000000000000000000000000000000000000..865b34c28b3377266567b25bc44ae53f9927b7d1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_triton.py
@@ -0,0 +1,103 @@
+import functools
+import hashlib
+import os
+
+from torch._dynamo.device_interface import get_interface_for_device
+
+
+@functools.lru_cache(None)
+def has_triton_package() -> bool:
+    try:
+        import triton
+
+        return triton is not None
+    except ImportError:
+        return False
+
+
+@functools.lru_cache(None)
+def has_triton() -> bool:
+    def cuda_extra_check(device_interface):
+        return device_interface.Worker.get_device_properties().major >= 7
+
+    triton_supported_devices = {"cuda": cuda_extra_check}
+
+    def is_device_compatible_with_triton():
+        for device, extra_check in triton_supported_devices.items():
+            device_interface = get_interface_for_device(device)
+            if device_interface.is_available() and extra_check(device_interface):
+                return True
+        return False
+
+    return is_device_compatible_with_triton() and has_triton_package()
+
+
+@functools.lru_cache(None)
+def triton_backend_hash():
+    from triton.common.backend import get_backend, get_cuda_version_key
+
+    import torch
+
+    if torch.version.hip:
+        # Does not work with ROCm
+        return None
+
+    if not torch.cuda.is_available():
+        return None
+
+    backend = get_backend("cuda")
+    if backend is None:
+        return get_cuda_version_key()
+    else:
+        return backend.get_version_key()
+
+
+@functools.lru_cache
+def triton_key():
+    import pkgutil
+
+    import triton
+
+    TRITON_PATH = os.path.dirname(os.path.abspath(triton.__file__))
+    contents = []
+    # This is redundant. Doing it to be consistent with upstream.
+    # frontend
+    with open(os.path.join(TRITON_PATH, "compiler", "compiler.py"), "rb") as f:
+        contents += [hashlib.sha256(f.read()).hexdigest()]
+
+    # compiler
+    compiler_path = os.path.join(TRITON_PATH, "compiler")
+    backends_path = os.path.join(TRITON_PATH, "compiler", "backends")
+    for lib in pkgutil.iter_modules([compiler_path, backends_path]):
+        with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:  # type: ignore[call-arg, union-attr, arg-type]
+            contents += [hashlib.sha256(f.read()).hexdigest()]
+    # backend
+    libtriton_hash = hashlib.sha256()
+    with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f:
+        while True:
+            chunk = f.read(1024**2)
+            if not chunk:
+                break
+            libtriton_hash.update(chunk)
+    contents.append(libtriton_hash.hexdigest())
+    # language
+    language_path = os.path.join(TRITON_PATH, "language")
+    for lib in pkgutil.iter_modules([language_path]):
+        with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:  # type: ignore[call-arg, union-attr, arg-type]
+            contents += [hashlib.sha256(f.read()).hexdigest()]
+    from triton import __version__
+
+    return f"{__version__}" + "-".join(contents)
+
+
+@functools.lru_cache(None)
+def triton_hash_with_backend():
+    import torch
+
+    if torch.version.hip:
+        # Does not work with ROCm
+        return None
+
+    backend_hash = triton_backend_hash()
+    key = f"{triton_key()}-{backend_hash}"
+    return hashlib.sha256(key.encode("utf-8")).hexdigest()
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_typing_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_typing_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd1b6ca5785ff5d90dfed7cf3c152dfb17c616f7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_typing_utils.py
@@ -0,0 +1,13 @@
+"""Miscellaneous utilities to aid with typing."""
+
+from typing import Optional, TypeVar
+
+# Helper to turn Optional[T] into T when we know None either isn't
+# possible or should trigger an exception.
+T = TypeVar("T")
+
+
+def not_none(obj: Optional[T]) -> T:
+    if obj is None:
+        raise TypeError("Invariant encountered: value was None when it should not be")
+    return obj
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_zip.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_zip.py
new file mode 100644
index 0000000000000000000000000000000000000000..f37ddb44987889fdf6730b800592e13652e46aed
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_zip.py
@@ -0,0 +1,85 @@
+import argparse
+import glob
+import os
+from pathlib import Path
+from zipfile import ZipFile
+
+# Exclude some standard library modules to:
+# 1. Slim down the final zipped file size
+# 2. Remove functionality we don't want to support.
+DENY_LIST = [
+    # Interface to unix databases
+    "dbm",
+    # ncurses bindings (terminal interfaces)
+    "curses",
+    # Tcl/Tk GUI
+    "tkinter",
+    "tkinter",
+    # Tests for the standard library
+    "test",
+    "tests",
+    "idle_test",
+    "__phello__.foo.py",
+    # importlib frozen modules. These are already baked into CPython.
+    "_bootstrap.py",
+    "_bootstrap_external.py",
+]
+
+strip_file_dir = ""
+
+
+def remove_prefix(text, prefix):
+    if text.startswith(prefix):
+        return text[len(prefix) :]
+    return text
+
+
+def write_to_zip(file_path, strip_file_path, zf, prepend_str=""):
+    stripped_file_path = prepend_str + remove_prefix(file_path, strip_file_dir + "/")
+    path = Path(stripped_file_path)
+    if path.name in DENY_LIST:
+        return
+    zf.write(file_path, stripped_file_path)
+
+
+def main() -> None:
+    global strip_file_dir
+    parser = argparse.ArgumentParser(description="Zip py source")
+    parser.add_argument("paths", nargs="*", help="Paths to zip.")
+    parser.add_argument(
+        "--install-dir", "--install_dir", help="Root directory for all output files"
+    )
+    parser.add_argument(
+        "--strip-dir",
+        "--strip_dir",
+        help="The absolute directory we want to remove from zip",
+    )
+    parser.add_argument(
+        "--prepend-str",
+        "--prepend_str",
+        help="A string to prepend onto all paths of a file in the zip",
+        default="",
+    )
+    parser.add_argument("--zip-name", "--zip_name", help="Output zip name")
+
+    args = parser.parse_args()
+
+    zip_file_name = args.install_dir + "/" + args.zip_name
+    strip_file_dir = args.strip_dir
+    prepend_str = args.prepend_str
+    zf = ZipFile(zip_file_name, mode="w")
+
+    for p in sorted(args.paths):
+        if os.path.isdir(p):
+            files = glob.glob(p + "/**/*.py", recursive=True)
+            for file_path in sorted(files):
+                # strip the absolute path
+                write_to_zip(
+                    file_path, strip_file_dir + "/", zf, prepend_str=prepend_str
+                )
+        else:
+            write_to_zip(p, strip_file_dir + "/", zf, prepend_str=prepend_str)
+
+
+if __name__ == "__main__":
+    main()  # pragma: no cover
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/bundled_inputs.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/bundled_inputs.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a95c7828843cde842e59929edcdb602bf85ccfc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/bundled_inputs.py
@@ -0,0 +1,471 @@
+#!/usr/bin/env python3
+from typing import Any, TypeVar, Optional, Tuple, List, NamedTuple, Union, Sequence, Dict, Callable
+import textwrap
+import torch
+from torch._C import TupleType, ListType
+from torch.jit._recursive import wrap_cpp_module
+
+
+T = TypeVar("T")
+
+MAX_RAW_TENSOR_SIZE = 16
+
+class InflatableArg(NamedTuple):
+    """Helper type for bundled inputs.
+
+    'value' is the compressed/deflated input that is stored in the model. Value
+    must be of the same type as the argument to the function that it is a deflated
+    input for.
+
+    'fmt' is a formatable code string that is executed to inflate the compressed data into
+    the appropriate input. It can use 'value' as an input to the format str. It must result
+    in a value of the same type as 'value'.
+
+    'fmt_fn' is a formatable function code string that is executed to inflate the compressed
+    data into the appropriate input. It must result in a value of the same type as 'value'.
+    The function name should be the formatable part of the string.
+
+    Note: Only top level InflatableArgs can be inflated. i.e. you cannot place
+    an inflatable arg inside of some other structure. You should instead create
+    an inflatable arg such that the fmt code string returns the full structure
+    of your input.
+    """
+
+    value: Any
+    fmt: str = "{}"
+    fmt_fn: str = ""
+
+
+def bundle_inputs(
+        model: torch.jit.ScriptModule,
+        inputs: Union[Optional[Sequence[Tuple[Any, ...]]], Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]]],
+        info: Optional[Union[List[str], Dict[Callable, List[str]]]] = None,
+        *,
+        _receive_inflate_expr: Optional[List[str]] = None,
+) -> torch.jit.ScriptModule:
+    """Create and return a copy of the specified model with inputs attached.
+
+    The original model is not mutated or changed in any way.
+
+    Models with bundled inputs can be invoked in a uniform manner by
+    benchmarking and code coverage tools.
+
+    If inputs is passed in as a list then the inputs will be bundled for 'forward'.
+    If inputs is instead passed in as a map then all the methods specified in the map
+    will have their corresponding inputs bundled. Info should match watchever type is
+    chosen for the inputs.
+
+    The returned model will support the following methods:
+
+        `get_all_bundled_inputs_for_() -> List[Tuple[Any, ...]]`
+            Returns a list of tuples suitable for passing to the model like
+            `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
+
+        `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
+            Returns a dictionary mapping function names to a metadata dictionary.
+            This nested dictionary maps preset strings like:
+                'get_inputs_function_name' -> the name of a function attribute in this model that can be
+                    run to get back a list of inputs corresponding to that function.
+                'info' -> the user provided extra information about the bundled inputs
+
+    If forward has bundled inputs then these following functions will also be defined on the returned module:
+
+        `get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
+            Returns a list of tuples suitable for passing to the model like
+            `for inp in model.get_all_bundled_inputs(): model(*inp)`
+
+        `get_num_bundled_inputs() -> int`
+            Equivalent to `len(model.get_all_bundled_inputs())`,
+            but slightly easier to call from C++.
+
+    Inputs can be specified in one of two ways:
+
+      - The model can define `_generate_bundled_inputs_for_`.
+        If the user chooses this method inputs[] should map to None
+
+      - The `inputs` argument to this function can be a dictionary mapping functions to a
+        list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_.
+        Alternatively if only bundling inputs for forward the map can be omitted and a singular list of inputs
+        can be provided instead.
+
+        The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
+        list of inputs, the inner tuple is the list of args that together make up one input.
+        For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
+        is the actual data that makes up the args, e.g. a tensor.
+
+    Info is an optional parameter that maps functions to a list of strings providing extra information about that
+    function's bundled inputs. Alternatively if only bundling inputs for forward the map can be omitted and
+    a singular list of information can be provided instead. This could be descriptions, expected outputs, etc.
+        - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
+
+    This function will attempt to optimize arguments so that (e.g.)
+    arguments like `torch.zeros(1000)` will be represented compactly.
+    Only top-level arguments will be optimized.
+    Tensors in lists or tuples will not.
+    """
+    if not isinstance(model, torch.jit.ScriptModule):
+        raise Exception("Only ScriptModule is supported.")
+
+    ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model)
+    clone = torch._C._hack_do_not_use_clone_module_with_class(  # type: ignore[attr-defined]
+        model._c,
+        ignored_methods,
+        ignored_attrs,
+    )
+
+    # The above cloning function returns a torch._C.scriptmodule and we need a torch.jit.scriptmodule.
+    # Fortunately theres a function in _recursive that does exactly that conversion.
+    cloned_module = wrap_cpp_module(clone)
+    if isinstance(inputs, dict):
+        assert isinstance(info, dict) or info is None
+        augment_many_model_functions_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
+    else:
+        assert isinstance(info, list) or info is None
+        augment_model_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
+    return cloned_module
+
+def augment_model_with_bundled_inputs(
+        model: torch.jit.ScriptModule,
+        inputs: Optional[Sequence[Tuple[Any, ...]]] = None,
+        _receive_inflate_expr: Optional[List[str]] = None,  # For debugging.
+        info: Optional[List[str]] = None,  # Optional argument to provide info about forward or its inputs
+        skip_size_check=False,
+) -> None:
+    """Add bundled sample inputs to a model for the forward function.
+
+    Models with bundled inputs can be invoked in a uniform manner by
+    benchmarking and code coverage tools.
+
+    Augmented models will support the following methods:
+
+        `get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
+            Returns a list of tuples suitable for passing to the model like
+            `for inp in model.get_all_bundled_inputs(): model(*inp)`
+
+        `get_num_bundled_inputs() -> int`
+            Equivalent to `len(model.get_all_bundled_inputs())`,
+            but slightly easier to call from C++.
+
+        `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
+            Returns a dictionary mapping function names to a metadata dictionary.
+            This nested dictionary maps preset strings like:
+                'get_inputs_function_name' -> the name of a function attribute in this model that can be
+                    run to get back a list of inputs corresponding to that function.
+                'info' -> the user provided extra information about the bundled inputs
+
+    Inputs can be specified in one of two ways:
+
+      - The model can define `_generate_bundled_inputs_for_forward`.
+        If the user chooses this method inputs should be None
+
+      - `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements
+        of each tuple are the args that make up one input.
+    """
+    if not isinstance(model, torch.jit.ScriptModule):
+        raise Exception("Only ScriptModule is supported.")
+
+    forward: Callable = model.forward
+
+    # Sometimes forward won't have a name attached so just in case
+    if not hasattr(forward, "__name__"):
+        forward.__name__ = 'forward'
+    augment_many_model_functions_with_bundled_inputs(
+        model,
+        inputs={forward : inputs},
+        _receive_inflate_expr=_receive_inflate_expr,
+        info={forward : info} if info else None,
+        skip_size_check=skip_size_check,
+    )
+
+
+def augment_many_model_functions_with_bundled_inputs(
+        model: torch.jit.ScriptModule,
+        inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]],
+        _receive_inflate_expr: Optional[List[str]] = None,  # For debugging.
+        info: Optional[Dict[Callable, List[str]]] = None,  # Optional argument to provide info about the function or its inputs
+        skip_size_check=False,
+) -> None:
+    """Add bundled sample inputs to a model for an arbitrary list of public functions.
+
+    Models with bundled inputs can be invoked in a uniform manner by
+    benchmarking and code coverage tools.
+
+    Augmented models will support the following methods:
+
+        `get_all_bundled_inputs_for_() -> List[Tuple[Any, ...]]`
+            Returns a list of tuples suitable for passing to the model like
+            `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
+
+        `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
+            Returns a dictionary mapping function names to a metadata dictionary.
+            This nested dictionary maps preset strings like:
+                'get_inputs_function_name' -> the name of a function attribute in this model that can be
+                    run to get back a list of inputs corresponding to that function.
+                'info' -> the user provided extra information about the bundled inputs
+
+    If forward has bundled inputs then these following functions are also defined:
+
+        `get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
+            Returns a list of tuples suitable for passing to the model like
+            `for inp in model.get_all_bundled_inputs(): model(*inp)`
+
+        `get_num_bundled_inputs() -> int`
+            Equivalent to `len(model.get_all_bundled_inputs())`,
+            but slightly easier to call from C++.
+
+    Inputs can be specified in one of two ways:
+
+      - The model can define `_generate_bundled_inputs_for_`.
+        If the user chooses this method inputs[] should map to None
+
+      - The `inputs` argument to this function can be a dictionary mapping functions to a
+        list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_.
+        The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
+        list of inputs, the inner tuple is the list of args that together make up one input.
+        For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
+        is the actual data that makes up the args, e.g. a tensor.
+
+    Info is an optional parameter that maps functions to a list of strings providing extra information about that
+    function's bundled inputs. This could be descriptions, expected outputs, etc.
+        - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
+
+    This function will attempt to optimize arguments so that (e.g.)
+    arguments like `torch.zeros(1000)` will be represented compactly.
+    Only top-level arguments will be optimized.
+    Tensors in lists or tuples will not.
+    """
+    if not isinstance(model, torch.jit.ScriptModule):
+        raise Exception("Only ScriptModule is supported.")
+
+    if not inputs:
+        raise Exception("Please provide inputs for at least 1 function")
+
+    if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"):
+        raise Exception(
+            "Models can only be augmented with bundled inputs once. "
+            "This Model seems to have already been augmented with "
+            "bundled inputs. Please start afresh with one that "
+            "doesn't have bundled inputs.",
+        )
+
+    get_bundled_inputs_functions_and_info_template = ""
+
+    for function, input_list in inputs.items():
+        if hasattr(function, "__name__"):
+            function_name = function.__name__
+        else:
+            if hasattr(function, "name"):
+                function_name = function.name  # type: ignore[attr-defined]
+            else:
+                raise Exception(
+                    'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"')
+
+
+        if input_list is not None and not isinstance(input_list, Sequence):
+            raise TypeError(f"Error inputs for function {function_name} is not a Sequence")
+
+        function_arg_types = [arg.type for arg in function.schema.arguments[1:]]  # type: ignore[attr-defined]
+        deflated_inputs_type: ListType = ListType(TupleType(function_arg_types))
+        model._c._register_attribute(f"_bundled_inputs_deflated_{function_name}", deflated_inputs_type, [])
+
+        if hasattr(model, "_generate_bundled_inputs_for_" + function_name):
+            if input_list is not None:
+                raise Exception(
+                    "inputs[{name}] is not None, but _generate_bundled_inputs_for_{name} is already defined".format(
+                        name=function_name
+                    )
+                )
+            # Model author already defined _generate_bundled_inputs_for_.
+        elif input_list is None or len(input_list) == 0:
+            raise Exception(
+                "inputs for {name} must be specified if _generate_bundled_inputs_for_{name} is not already defined".format(
+                    name=function_name,
+                )
+            )
+        else:
+            # Iterate over the inputs and args in each input.
+            # Accumulate `deflated_inputs` as (possibly) compressed values
+            # and `parts` to be joined into the expression that unpacks them.
+            deflated_inputs = []
+            parts = []
+            for inp_idx, args in enumerate(input_list):
+                if not isinstance(args, Tuple) and not isinstance(args, List):  # type: ignore[arg-type]
+                    raise TypeError(
+                        f"Error bundled input for function {function_name} idx: {inp_idx} is not a Tuple or a List"
+                    )
+                deflated_args = []
+                parts.append("(")
+                for arg_idx, arg in enumerate(args):
+                    inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name)
+                    deflated, inflater, helper_definition = _inflate_expr(
+                        arg,
+                        f"deflated[{inp_idx}][{arg_idx}]",
+                        inflate_helper_fn_name,
+                        skip_size_check=skip_size_check,
+                    )
+                    deflated_args.append(deflated)
+                    parts.append(f"    {inflater},")
+                    if helper_definition:
+                        model.define(textwrap.dedent(helper_definition))
+                deflated_inputs.append(tuple(deflated_args))
+                parts.append("),")
+            parts.append("")
+            expr = "\n".join(parts)
+
+            # Back-channel return this expr for debugging.
+            if _receive_inflate_expr is not None:
+                _receive_inflate_expr.append(expr)
+            setattr(model, f"_bundled_inputs_deflated_{function_name}", deflated_inputs)
+            definition = textwrap.dedent("""
+                def _generate_bundled_inputs_for_{name}(self):
+                    deflated = self._bundled_inputs_deflated_{name}
+                    return [
+                {expr}
+                    ]
+                """).format(expr=expr, name=function_name)
+            model.define(definition)
+
+        # Define get_all_bundled_inputs_for_ that caches the generated inputs.
+        model.define(textwrap.dedent("""
+            def get_all_bundled_inputs_for_{name}(self):
+                all_inputs = self._generate_bundled_inputs_for_{name}()
+                assert all_inputs is not None
+                return all_inputs
+            """).format(name=function_name))
+
+        # Add to the high level helper methods
+        inputs_info = repr(info[function]) if info and function in info else '[]'
+        get_bundled_inputs_functions_and_info_template += f"""
+            temp_dict : Dict[str,List[str]] = {{}}
+            info: List[str] = {inputs_info}
+
+            temp_dict['info'] = info
+            temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}']
+            all_inputs['{function_name}'] = temp_dict
+            """
+
+        # To ensure backwards compatibility and a streamlined api for forward these wrappers are provided
+        if function_name == 'forward':
+            model.define(textwrap.dedent("""
+                def get_all_bundled_inputs(self):
+                    return self.get_all_bundled_inputs_for_forward()
+                """))
+            model.define(textwrap.dedent("""
+                def get_num_bundled_inputs(self):
+                    return len(self.get_all_bundled_inputs_for_forward())
+                """))
+
+    # Define some high level helper methods that act on all bundled inputs
+    model.define(textwrap.dedent(f"""
+        def get_bundled_inputs_functions_and_info(self):
+            all_inputs : Dict[str, Dict[str,List[str]]] = {{}}
+            {get_bundled_inputs_functions_and_info_template}
+            return all_inputs
+        """))
+
+def _inflate_expr(
+    arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False
+) -> Tuple[Union[T, torch.Tensor], str, Optional[str]]:
+    # Allow custom inflation expressions any object.
+    # For example, calling custom image-decoding ops.
+    # Or just use "{}" as the format string to ignore size limits.
+    if isinstance(arg, InflatableArg):
+        if arg.fmt_fn:
+            if arg.fmt not in ["{}", ""]:
+                raise Exception(
+                    f"Bundled input argument at position '{ref}' has "
+                    f"both arg.fmt_fn => \n{arg.fmt_fn} "
+                    f"\n and arg.fmt  => {arg.fmt}. "
+                    "Please choose `arg.fmt` if the deflater is straightforward or "
+                    "`arg.fmt_fn` if you need a function."
+                )
+
+            helper_definition = arg.fmt_fn.format(inflate_helper_fn_name)
+            expr = f"self.{inflate_helper_fn_name}({ref})"
+
+            return arg.value, expr, helper_definition
+        else:
+            return arg.value, arg.fmt.format(ref), None
+
+    if isinstance(arg, torch.Tensor):
+        # Small-storage tensors can just be saved directly.
+        if arg._typed_storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check:
+            return arg, ref, None
+        # Small contiguous tensors can be cloned to have small storage.
+        # TODO: Should we do this even for non-contiguous tensors?
+        if arg.is_contiguous() and arg.numel() <= MAX_RAW_TENSOR_SIZE:
+            return arg.clone(), ref, None
+        # Example inputs commonly come from torch.zeros, torch.ones, or torch.full.
+        # These can be represented compactly.
+        for fmt in [torch.contiguous_format, torch.channels_last]:
+            if arg.is_contiguous(memory_format=fmt) and (arg == arg.flatten()[0]).all().item():
+                return (arg.flatten()[0].clone().expand(*arg.size()),
+                        f"{ref}.contiguous(memory_format={fmt})", None)
+        # Prevent big tensors from being bundled by default.
+        # TODO: Provide more useful diagnostics.
+        raise Exception(
+            f"Bundled input argument at position '{ref}' is "
+            f"a tensor with storage size {arg._typed_storage().size()}. "
+            f"You probably don't want to bundle this as an input. "
+        )
+    else:
+        return arg, ref, None
+
+def _get_bundled_inputs_attributes_and_methods(script_module: torch.jit.ScriptModule) -> Tuple[List[str], List[str]]:
+    methods: List[str] = []
+    attributes: List[str] = []
+
+    # Has bundled inputs for forward
+    if hasattr(script_module, 'get_all_bundled_inputs'):
+        methods.append('get_all_bundled_inputs')
+        methods.append('get_num_bundled_inputs')
+        methods.append('run_on_bundled_input')
+
+    if hasattr(script_module, 'get_bundled_inputs_functions_and_info'):
+        methods.append('get_bundled_inputs_functions_and_info')
+        all_info = script_module.get_bundled_inputs_functions_and_info()
+        for function_name in all_info:
+            methods.append("get_all_bundled_inputs_for_" + function_name)
+            methods.append("_generate_bundled_inputs_for_" + function_name)
+            attributes.append("_bundled_inputs_deflated_" + function_name)
+
+            bundled_inputs_fn = getattr(
+                script_module,
+                f"get_all_bundled_inputs_for_{function_name}"
+            )
+            num_bundled_inputs: int = len(bundled_inputs_fn())
+
+            # Check inflate helper functions for each function, argument and bundled input
+            func = getattr(script_module, function_name)
+            for arg_idx in range(len(func.schema.arguments) - 1):
+                for input_idx in range(num_bundled_inputs):
+                    helper_fn_name = _get_inflate_helper_fn_name(
+                        arg_idx=arg_idx,
+                        input_idx=input_idx,
+                        function_name=function_name
+                    )
+                    # if the arg has an InflatableArg with fmt_fn, add the helper function name
+                    if hasattr(script_module, helper_fn_name):
+                        methods.append(helper_fn_name)
+
+    return (methods, attributes)
+
+
+def _get_inflate_helper_fn_name(
+    arg_idx: int,
+    input_idx: int,
+    function_name: str,
+) -> str:
+    return f"_inflate_helper_for_{function_name}_input_{input_idx}_arg_{arg_idx}"
+
+
+
+def bundle_randn(*size, dtype=None):
+    """Generate a tensor that will be inflated with torch.randn."""
+    stub = torch.zeros(1, dtype=dtype).expand(*size)
+    return InflatableArg(value=stub, fmt="torch.randn_like({})")
+
+
+def bundle_large_tensor(t):
+    """Wrap a tensor to allow bundling regardless of size."""
+    return InflatableArg(value=t, fmt="{}")
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/deterministic.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/deterministic.py
new file mode 100644
index 0000000000000000000000000000000000000000..98a6d30b067bb496e2a1a77e142974a98494997a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/deterministic.py
@@ -0,0 +1,21 @@
+import sys
+import types
+
+import torch
+
+
+class _Deterministic(types.ModuleType):
+    @property
+    def fill_uninitialized_memory(self):
+        """
+        Whether to fill uninitialized memory with a known value when
+        :meth:`torch.use_deterministic_algorithms()` is set to ``True``.
+        """
+        return torch._C._get_deterministic_fill_uninitialized_memory()
+
+    @fill_uninitialized_memory.setter
+    def fill_uninitialized_memory(self, mode):
+        return torch._C._set_deterministic_fill_uninitialized_memory(mode)
+
+
+sys.modules[__name__].__class__ = _Deterministic
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/show_pickle.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/show_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..e83bed48e66699cba9aa915417919ee5e568ddbf
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/show_pickle.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+import sys
+import pickle
+import struct
+import pprint
+import zipfile
+import fnmatch
+from typing import Any, IO, BinaryIO, Union
+
+__all__ = ["FakeObject", "FakeClass", "DumpUnpickler", "main"]
+
+class FakeObject:
+    def __init__(self, module, name, args):
+        self.module = module
+        self.name = name
+        self.args = args
+        # NOTE: We don't distinguish between state never set and state set to None.
+        self.state = None
+
+    def __repr__(self):
+        state_str = "" if self.state is None else f"(state={self.state!r})"
+        return f"{self.module}.{self.name}{self.args!r}{state_str}"
+
+    def __setstate__(self, state):
+        self.state = state
+
+    @staticmethod
+    def pp_format(printer, obj, stream, indent, allowance, context, level):
+        if not obj.args and obj.state is None:
+            stream.write(repr(obj))
+            return
+        if obj.state is None:
+            stream.write(f"{obj.module}.{obj.name}")
+            printer._format(obj.args, stream, indent + 1, allowance + 1, context, level)
+            return
+        if not obj.args:
+            stream.write(f"{obj.module}.{obj.name}()(state=\n")
+            indent += printer._indent_per_level
+            stream.write(" " * indent)
+            printer._format(obj.state, stream, indent, allowance + 1, context, level + 1)
+            stream.write(")")
+            return
+        raise Exception("Need to implement")
+
+
+class FakeClass:
+    def __init__(self, module, name):
+        self.module = module
+        self.name = name
+        self.__new__ = self.fake_new  # type: ignore[assignment]
+
+    def __repr__(self):
+        return f"{self.module}.{self.name}"
+
+    def __call__(self, *args):
+        return FakeObject(self.module, self.name, args)
+
+    def fake_new(self, *args):
+        return FakeObject(self.module, self.name, args[1:])
+
+
+class DumpUnpickler(pickle._Unpickler):  # type: ignore[name-defined]
+    def __init__(
+            self,
+            file,
+            *,
+            catch_invalid_utf8=False,
+            **kwargs):
+        super().__init__(file, **kwargs)
+        self.catch_invalid_utf8 = catch_invalid_utf8
+
+    def find_class(self, module, name):
+        return FakeClass(module, name)
+
+    def persistent_load(self, pid):
+        return FakeObject("pers", "obj", (pid,))
+
+    dispatch = dict(pickle._Unpickler.dispatch)  # type: ignore[attr-defined]
+
+    # Custom objects in TorchScript are able to return invalid UTF-8 strings
+    # from their pickle (__getstate__) functions.  Install a custom loader
+    # for strings that catches the decode exception and replaces it with
+    # a sentinel object.
+    def load_binunicode(self):
+        strlen, = struct.unpack(" sys.maxsize:
+            raise Exception("String too long.")
+        str_bytes = self.read(strlen)  # type: ignore[attr-defined]
+        obj: Any
+        try:
+            obj = str(str_bytes, "utf-8", "surrogatepass")
+        except UnicodeDecodeError as exn:
+            if not self.catch_invalid_utf8:
+                raise
+            obj = FakeObject("builtin", "UnicodeDecodeError", (str(exn),))
+        self.append(obj)  # type: ignore[attr-defined]
+    dispatch[pickle.BINUNICODE[0]] = load_binunicode  # type: ignore[assignment]
+
+    @classmethod
+    def dump(cls, in_stream, out_stream):
+        value = cls(in_stream).load()
+        pprint.pprint(value, stream=out_stream)
+        return value
+
+
+def main(argv, output_stream=None):
+    if len(argv) != 2:
+        # Don't spam stderr if not using stdout.
+        if output_stream is not None:
+            raise Exception("Pass argv of length 2.")
+        sys.stderr.write("usage: show_pickle PICKLE_FILE\n")
+        sys.stderr.write("  PICKLE_FILE can be any of:\n")
+        sys.stderr.write("    path to a pickle file\n")
+        sys.stderr.write("    file.zip@member.pkl\n")
+        sys.stderr.write("    file.zip@*/pattern.*\n")
+        sys.stderr.write("      (shell glob pattern for members)\n")
+        sys.stderr.write("      (only first match will be shown)\n")
+        return 2
+
+    fname = argv[1]
+    handle: Union[IO[bytes], BinaryIO]
+    if "@" not in fname:
+        with open(fname, "rb") as handle:
+            DumpUnpickler.dump(handle, output_stream)
+    else:
+        zfname, mname = fname.split("@", 1)
+        with zipfile.ZipFile(zfname) as zf:
+            if "*" not in mname:
+                with zf.open(mname) as handle:
+                    DumpUnpickler.dump(handle, output_stream)
+            else:
+                found = False
+                for info in zf.infolist():
+                    if fnmatch.fnmatch(info.filename, mname):
+                        with zf.open(info) as handle:
+                            DumpUnpickler.dump(handle, output_stream)
+                        found = True
+                        break
+                if not found:
+                    raise Exception(f"Could not find member matching {mname} in {zfname}")
+
+
+if __name__ == "__main__":
+    # This hack works on every version of Python I've tested.
+    # I've tested on the following versions:
+    #   3.7.4
+    if True:
+        pprint.PrettyPrinter._dispatch[FakeObject.__repr__] = FakeObject.pp_format  # type: ignore[attr-defined]
+
+    sys.exit(main(sys.argv))
diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..5607fadee9e9c6d854491a9517fea4256ebe34f6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py
@@ -0,0 +1,159 @@
+
+import torch._C
+
+
+def format_time(time_us=None, time_ms=None, time_s=None):
+    """Define time formatting."""
+    assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1
+
+    US_IN_SECOND = 1e6
+    US_IN_MS = 1e3
+
+    if time_us is None:
+        if time_ms is not None:
+            time_us = time_ms * US_IN_MS
+        elif time_s is not None:
+            time_us = time_s * US_IN_SECOND
+        else:
+            raise AssertionError("Shouldn't reach here :)")
+
+    if time_us >= US_IN_SECOND:
+        return f'{time_us / US_IN_SECOND:.3f}s'
+    if time_us >= US_IN_MS:
+        return f'{time_us / US_IN_MS:.3f}ms'
+    return f'{time_us:.3f}us'
+
+
+class ExecutionStats:
+    def __init__(self, c_stats, benchmark_config):
+        self._c_stats = c_stats
+        self.benchmark_config = benchmark_config
+
+    @property
+    def latency_avg_ms(self):
+        return self._c_stats.latency_avg_ms
+
+    @property
+    def num_iters(self):
+        return self._c_stats.num_iters
+
+    @property
+    def iters_per_second(self):
+        """Return total number of iterations per second across all calling threads."""
+        return self.num_iters / self.total_time_seconds
+
+    @property
+    def total_time_seconds(self):
+        return self.num_iters * (
+            self.latency_avg_ms / 1000.0) / self.benchmark_config.num_calling_threads
+
+    def __str__(self):
+        return '\n'.join([
+            "Average latency per example: " + format_time(time_ms=self.latency_avg_ms),
+            f"Total number of iterations: {self.num_iters}",
+            f"Total number of iterations per second (across all threads): {self.iters_per_second:.2f}",
+            "Total time: " + format_time(time_s=self.total_time_seconds)
+        ])
+
+
+class ThroughputBenchmark:
+    """
+    This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark.
+
+    This wrapper on the throughput_benchmark::ThroughputBenchmark component is responsible
+    for executing a PyTorch module (nn.Module or ScriptModule) under an inference
+    server like load. It can emulate multiple calling threads to a single module
+    provided. In the future we plan to enhance this component to support inter and
+    intra-op parallelism as well as multiple models running in a single process.
+
+    Please note that even though nn.Module is supported, it might incur an overhead
+    from the need to hold GIL every time we execute Python code or pass around
+    inputs as Python objects. As soon as you have a ScriptModule version of your
+    model for inference deployment it is better to switch to using it in this
+    benchmark.
+
+    Example::
+
+        >>> # xdoctest: +SKIP("undefined vars")
+        >>> from torch.utils import ThroughputBenchmark
+        >>> bench = ThroughputBenchmark(my_module)
+        >>> # Pre-populate benchmark's data set with the inputs
+        >>> for input in inputs:
+        ...     # Both args and kwargs work, same as any PyTorch Module / ScriptModule
+        ...     bench.add_input(input[0], x2=input[1])
+        >>> # Inputs supplied above are randomly used during the execution
+        >>> stats = bench.benchmark(
+        ...     num_calling_threads=4,
+        ...     num_warmup_iters = 100,
+        ...     num_iters = 1000,
+        ... )
+        >>> print("Avg latency (ms): {}".format(stats.latency_avg_ms))
+        >>> print("Number of iterations: {}".format(stats.num_iters))
+    """
+
+    def __init__(self, module):
+        if isinstance(module, torch.jit.ScriptModule):
+            self._benchmark = torch._C.ThroughputBenchmark(module._c)
+        else:
+            self._benchmark = torch._C.ThroughputBenchmark(module)
+
+    def run_once(self, *args, **kwargs):
+        """
+        Given input id (input_idx) run benchmark once and return prediction.
+
+        This is useful for testing that benchmark actually runs the module you
+        want it to run. input_idx here is an index into inputs array populated
+        by calling add_input() method.
+        """
+        return self._benchmark.run_once(*args, **kwargs)
+
+    def add_input(self, *args, **kwargs):
+        """
+        Store a single input to a module into the benchmark memory and keep it there.
+
+        During the benchmark execution every thread is going to pick up a
+        random input from the all the inputs ever supplied to the benchmark via
+        this function.
+        """
+        self._benchmark.add_input(*args, **kwargs)
+
+    def benchmark(
+            self,
+            num_calling_threads=1,
+            num_warmup_iters=10,
+            num_iters=100,
+            profiler_output_path=""):
+        """
+        Run a benchmark on the module.
+
+        Args:
+            num_warmup_iters (int): Warmup iters are used to make sure we run a module
+                a few times before actually measuring things. This way we avoid cold
+                caches and any other similar problems. This is the number of warmup
+                iterations for each of the thread in separate
+
+            num_iters (int): Number of iterations the benchmark should run with.
+                This number is separate from the warmup iterations. Also the number is
+                shared across all the threads. Once the num_iters iterations across all
+                the threads is reached, we will stop execution. Though total number of
+                iterations might be slightly larger. Which is reported as
+                stats.num_iters where stats is the result of this function
+
+            profiler_output_path (str): Location to save Autograd Profiler trace.
+                If not empty, Autograd Profiler will be enabled for the main benchmark
+                execution (but not the warmup phase). The full trace will be saved
+                into the file path provided by this argument
+
+
+        This function returns BenchmarkExecutionStats object which is defined via pybind11.
+        It currently has two fields:
+            - num_iters - number of actual iterations the benchmark have made
+            - avg_latency_ms - average time it took to infer on one input example in milliseconds
+        """
+        config = torch._C.BenchmarkConfig()
+        config.num_calling_threads = num_calling_threads
+        config.num_warmup_iters = num_warmup_iters
+        config.num_iters = num_iters
+        config.profiler_output_path = profiler_output_path
+        c_stats = self._benchmark.benchmark(config)
+        return ExecutionStats(c_stats, config)