diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d836e14fbd7b87c84cbe9d4ff2e59e6dc0f863 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..671c4c5c33f3d7aedb9f68b9692b16f3830f7792 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5a086e693828414ae0605d40d96d44d1e5b0105 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e262803632521adc2fcafc49accd9c3491b2111 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d737548c3d480d11e722ad5ae076cebe9f2523c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py @@ -0,0 +1,52 @@ +import glob +import importlib +from os.path import basename, dirname, isfile, join + +import torch +from torch._export.db.case import ( + _EXAMPLE_CASES, + _EXAMPLE_CONFLICT_CASES, + _EXAMPLE_REWRITE_CASES, + SupportLevel, +) + + +modules = glob.glob(join(dirname(__file__), "*.py")) +__all__ = [ + basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") +] + +# Import all module in the current directory. +from . import * # noqa: F403 + + +def all_examples(): + return _EXAMPLE_CASES + + +if len(_EXAMPLE_CONFLICT_CASES) > 0: + + def get_name(case): + model = case.model + if isinstance(model, torch.nn.Module): + model = type(model) + return model.__name__ + + msg = "Error on conflict export case name.\n" + for case_name, cases in _EXAMPLE_CONFLICT_CASES.items(): + msg += f"Case name {case_name} is associated with multiple cases:\n " + msg += f"[{','.join(map(get_name, cases))}]\n" + + raise RuntimeError(msg) + + +def filter_examples_by_support_level(support_level: SupportLevel): + return { + key: val + for key, val in all_examples().items() + if val.support_level == support_level + } + + +def get_rewrite_cases(case): + return _EXAMPLE_REWRITE_CASES.get(case.name, []) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b5e4351e771a011d66af5ec92736ecbc9c6935a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c999689d96942607d05dba5e4f023013d6325eb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..872abb560516dd0a46218cc547b17f58aece1a7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b36c514921d90e9116bd620f3bcba944d693b2cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc9b8d46e445b9dd617a3f932f4ea7525630d334 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc6882f491b408183383e73d0784bb826766deed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d41f49b7e2d303f32c61311ef0177c739575a3f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec2b7be69616519b5248db19f7c00e5864f0a60 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py new file mode 100644 index 0000000000000000000000000000000000000000..664aab8b64da2b239daaa2d78c068a1d7397c4a4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py @@ -0,0 +1,24 @@ +import torch +import torch._dynamo as torchdynamo + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"torch.escape-hatch"}, +) +class AssumeConstantResult(torch.nn.Module): + """ + Applying `assume_constant_result` decorator to burn make non-tracable code as constant. + """ + + def __init__(self): + super().__init__() + + @torchdynamo.assume_constant_result + def get_item(self, y): + return y.int().item() + + def forward(self, x, y): + return x[: self.get_item(y)] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..68dd3772684d1c8ea784a5d74214895dedeeb530 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py @@ -0,0 +1,46 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +class MySubModule(torch.nn.Module): + def foo(self, x): + return x.cos() + + def forward(self, x): + return self.foo(x) + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchClassMethod(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + + This example demonstrates using class method in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + self.subm = MySubModule() + + def bar(self, x): + return x.sin() + + def forward(self, x): + return cond(x.shape[0] <= 2, self.subm.forward, self.bar, [x]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py new file mode 100644 index 0000000000000000000000000000000000000000..3844c7227a365ceb157222c91d179296e73a3522 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py @@ -0,0 +1,30 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.tensor(4), torch.randn(5, 5)), + tags={ + "torch.dynamic-value", + "torch.escape-hatch", + }, +) +class ConstrainAsValueExample(torch.nn.Module): + """ + If the value is not known at tracing time, you can provide hint so that we + can trace further. Please look at constrain_as_value and constrain_as_size APIs. + constrain_as_value is used for values that don't need to be used for constructing + tensor. + """ + + def __init__(self): + super().__init__() + + def forward(self, x, y): + a = x.item() + torch._constrain_as_value(a, min=0, max=5) + + if a < 6: + return y.sin() + return y.cos() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..51b8dd57252529079411cf2db2b4a14a4b905634 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeConstructor(torch.nn.Module): + """ + Tensor constructors should be captured with dynamic shape inputs rather + than being baked in with static shape. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.ones(x.shape[0] * 2) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..ee45ffb288368dc35ffb76e385bcee20ced22235 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py @@ -0,0 +1,20 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeSlicing(torch.nn.Module): + """ + Slices with dynamic shape arguments should be captured into the graph + rather than being baked in. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py new file mode 100644 index 0000000000000000000000000000000000000000..b763a4ec0ae3480a322dbd9b73664944c5e2d8bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(10, 10),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeView(torch.nn.Module): + """ + Dynamic shapes should be propagated to view arguments instead of being + baked into the exported graph. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + new_x_shape = x.size()[:-1] + (2, 5) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py new file mode 100644 index 0000000000000000000000000000000000000000..6182a747955561fc8bba1a4e3c3e6187e987c135 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py @@ -0,0 +1,32 @@ +import torch + +from torch._export.db.case import export_case, ExportArgs, SupportLevel + + +@export_case( + example_inputs=ExportArgs( + torch.randn(4), + (torch.randn(4), torch.randn(4)), + *[torch.randn(4), torch.randn(4)], + mykw0=torch.randn(4), + input0=torch.randn(4), input1=torch.randn(4) + ), + tags={"python.data-structure"}, + support_level=SupportLevel.SUPPORTED, +) +class FnWithKwargs(torch.nn.Module): + """ + Keyword arguments are not supported at the moment. + """ + def __init__(self): + super().__init__() + + def forward(self, pos0, tuple0, *myargs, mykw0, **mykwargs): + out = pos0 + for arg in tuple0: + out = out * arg + for arg in myargs: + out = out * arg + out = out * mykw0 + out = out * mykwargs["input0"] * mykwargs["input1"] + return out diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..16b7e54613d7fe405d28878ff45bd3a6ce5a3f4a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape", "python.data-structure", "python.assert"}, +) +class ListContains(torch.nn.Module): + """ + List containment relation can be checked on a dynamic shape or constants. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + assert x.size(-1) in [6, 2] + assert x.size(0) not in [4, 5, 6] + assert "monkey" not in ["cow", "pig"] + return x + x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d76cc67eda8cbb3306f27b2315ae35c7517aa2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py @@ -0,0 +1,25 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class ModelAttrMutation(torch.nn.Module): + """ + Attribute mutation is not supported. + """ + + def __init__(self): + super().__init__() + self.attr_list = [torch.ones(3, 2), torch.ones(3, 2)] + + def recreate_list(self): + return [torch.zeros(3, 2), torch.zeros(3, 2)] + + def forward(self, x): + self.attr_list = self.recreate_list() + return x.sum() + self.attr_list[0].sum() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py new file mode 100644 index 0000000000000000000000000000000000000000..743a357fc13ca984369cdddadf31bb4ee27e9109 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py @@ -0,0 +1,29 @@ +from enum import Enum + +import torch + +from torch._export.db.case import export_case + + +class Animal(Enum): + COW = "moo" + + +@export_case( + example_inputs=(torch.ones(3, 2),), +) +class SpecializedAttribute(torch.nn.Module): + """ + Model attributes are specialized. + """ + + def __init__(self): + super().__init__() + self.a = "moo" + self.b = 4 + + def forward(self, x): + if self.a == Animal.COW.value: + return x * x + self.b + else: + raise ValueError("bad") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..9d030b6e82aa5f4c89c3e1c37622e53c1c4675f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.control-flow"}, +) +class StaticForLoop(torch.nn.Module): + """ + A for loop with constant number of iterations should be unrolled in the exported graph. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + ret = [] + for i in range(10): # constant + ret.append(i + x) + return ret diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py new file mode 100644 index 0000000000000000000000000000000000000000..b9f4dd8f8496ccfd6c81b7007a96d9a05e6ffce5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.operator"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class TorchSymMin(torch.nn.Module): + """ + torch.sym_min operator is not supported in export. + """ + + def forward(self, x): + return x.sum() + torch.sym_min(x.size(0), 100) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea64250f25855678acf501aff95d918a39660dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..035f6506941eb7c8a42da6b7015786dd7bac276a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fedd311fddacd12dd01868122a9d54e4f7929035 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa9b8093c370dd565dfb7fb44e4b22474446af0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py @@ -0,0 +1,32 @@ +from typing import Any, Dict, Set + + +NodeMetadataValue = Any + + +PROTECTED_KEYS: Set[str] = { + "val", + "stack_trace", + "nn_module_stack", + "debug_handle", + "tensor_meta", +} + + +class NodeMetadata: + def __init__(self, data: Dict[str, Any]) -> None: + self.data: Dict[str, Any] = data.copy() + + def __getitem__(self, key: str) -> NodeMetadataValue: + return self.data[key] + + def __setitem__(self, key: str, value: NodeMetadataValue) -> NodeMetadataValue: + if key in PROTECTED_KEYS: + raise RuntimeError(f"Could not override node key: {key}") + self.data[key] = value + + def __contains__(self, key: str) -> bool: + return key in self.data + + def copy(self) -> "NodeMetadata": + return NodeMetadata(self.data.copy()) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py new file mode 100644 index 0000000000000000000000000000000000000000..66592d48a45efca0851e51df19d07f6346d8a335 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py @@ -0,0 +1,41 @@ +# pyre-strict +from typing import Union + +import torch + + +class ProxyValue: + # pyre-ignore + def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]): + # pyre-ignore + self.data = data + self.proxy_or_node = proxy + + @property + def node(self) -> torch.fx.Node: + if isinstance(self.proxy_or_node, torch.fx.Node): + return self.proxy_or_node + assert isinstance(self.proxy_or_node, torch.fx.Proxy) + return self.proxy_or_node.node + + @property + def proxy(self) -> torch.fx.Proxy: + if not isinstance(self.proxy_or_node, torch.fx.Proxy): + raise RuntimeError( + f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}" + ) + return self.proxy_or_node + + def to_tensor(self) -> torch.Tensor: + assert isinstance(self.data, torch.Tensor) + return self.data + + def is_tensor(self) -> bool: + return isinstance(self.data, torch.Tensor) + + # pyre-ignore + def __iter__(self): + yield from self.data + + def __bool__(self) -> bool: + return bool(self.data) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9ce2ac03c23600c86ff02e38a2a4bfeefef9e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__init__.py @@ -0,0 +1 @@ +from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5143a5fb6e9377f4f87c13b533994320995e313c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41cc98b7f5ae65de8e67fc8ad7833a48f2e98c8a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b497dd232ebe2292342555f1236b2f6f4f330e2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99c7ad78088ae5c2324cac7320b378f715c11164 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a85be48ba2f7738a90f6d4a226fe5eef6678dd66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f23cffea73048ef8b519ee9031b9c5402b0a7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5726f74c16fe154050c93cd83503703612b4220 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d536afbda373166f874d77bba975079c47b92d71 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fc3ba75f0b6a18c3030c69b902f28522643e27f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..7eedd1498c94be9d09922cd33c0191ae5bb354d0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py @@ -0,0 +1,231 @@ +import math +import operator +import traceback +from functools import partial +from typing import Callable, Dict, List, NamedTuple, Set + +import sympy + +import torch +import torch.fx +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, ProxyValue, PassResult +from torch.utils._sympy.value_ranges import ValueRanges +from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols + + +__all__ = ["InputDim"] + + +class InputDim(NamedTuple): + input_name: str + dim: int + + +def _convert_to_int(val): + # Convert simple sympy Integers into concrete int + if val == sympy.oo: + return math.inf + if val == -sympy.oo: + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + raise RuntimeError( + "Export constraints cannot be non-integer expressions" + ) + + +def _convert_range_to_int(range: ValueRanges): + assert isinstance(range, ValueRanges) + min_val = _convert_to_int(range.lower) + max_val = _convert_to_int(range.upper) + return min_val, max_val + + +class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoNotUse): + def __init__( + self, + range_constraints: Dict[sympy.Symbol, ValueRanges], + ): + super().__init__() + self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints + self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set() + self.counter = 0 + + def _assert_range_constraint(self, proxy, lower, upper, assert_msg): + if lower > -math.inf: + self._insert_assert_async(operator.ge, proxy, lower, assert_msg) + + if upper < math.inf: + self._insert_assert_async(operator.le, proxy, upper, assert_msg) + + def _insert_assert_async(self, operator, lower, upper, assert_msg): + """ + Inserts assert_async call_function nodes in the graph. This function is + called **during** the interpreter-based pass. + """ + self.counter += 1 + cmp = super().call_operator(operator, (lower, upper), {}, self._create_dummy_node_metadata()) + cmp_tensor = super().call_operator(torch.ops.aten.scalar_tensor.default, (cmp,), {}, self._create_dummy_node_metadata()) + super().call_operator( + torch.ops.aten._assert_async.msg, + (cmp_tensor, assert_msg), + {}, + self._create_dummy_node_metadata(), + ) + + def call_operator(self, op, args, kwargs, meta) -> ProxyValue: + ret = super().call_operator(op, args, kwargs, meta) + if "val" not in meta: + return ret + + val = meta["val"] + + # In general, we may have to deal the case such as: ret[1].shape[0]. + # We need first find out what symbols require assertion, then we need to follow the path + # from ret to the symbol, construct the proxies along the way and construct the messages + # piece-wise at the same time. + # + # We use post-order traversal to collect all the proxies callbacks needed, construct + # the error message callbacks, and at the top-level traversal tree we execute all the callbacks. + # We need the callbacks because, in order to call the function to create a proxy for shape[0], we + # need the proxy for shape, which further requires the proxy for ret[1], etc. + def add_assertions(val): + call_backs: List[Callable] = [] + messages: List[str] = [] + if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)): + symbol = val.node.expr + if symbol in self.existing_inline_assertions: + return call_backs, messages + if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol): + if symbol in self._asserts_generated_unbacked_symbols: + return call_backs, messages + # We only care about unbacked symints for these inline + # constraints, which are prefixed with 'u' + constraint = self.range_constraints[symbol] + min_val, max_val = _convert_range_to_int(constraint) + assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]." + call_backs.append( + partial(self._assert_range_constraint, lower=min_val, upper=max_val) + ) + messages.append(assert_msg) + self._asserts_generated_unbacked_symbols.add(symbol) + + elif isinstance(val, torch.Tensor): + for i, sym in enumerate(val.shape): + cbs, msgs = add_assertions(sym) + for cb, msg in zip(cbs, msgs): + def sym_size_cb(proxy, assert_msg, dim): + dim_proxy = super( + _AddRuntimeAssertionsForInlineConstraintsPass, + self + ).call_operator( + torch.ops.aten.sym_size.int, + (proxy, dim), + {}, + self._create_dummy_node_metadata(), + ) + cb(proxy=dim_proxy, assert_msg=assert_msg) + call_backs.append(partial(sym_size_cb, dim=i)) + messages.append(f".shape[{i}]" + msg) + return call_backs, messages + + callbacks, messages = add_assertions(val) + for cb, msg in zip(callbacks, messages): + cb(proxy=ret, assert_msg=f"{ret.node}" + msg) + return ret + + def call(self, graph_module): + self.existing_inline_assertions = _get_existing_inline_assertions( + graph_module, self.range_constraints + ) + + # Add runtime asserts for inline constraints + val = super().call(graph_module) + + # Sometimes this pass would return a wrong graph where we have mismatched + # node names in signature. Before we fix it, let's just skip it. + if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass: + return PassResult(graph_module, False) + + # Populate the stack trace with dummy vals to respect IR + for node in val.graph_module.graph.nodes: + if not node.meta.get("stack_trace", None): + node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1)) + + return PassResult(val.graph_module, val.modified) + + +def _get_existing_inline_assertions( + graph_module: torch.fx.GraphModule, + range_constraints: Dict[sympy.Symbol, ValueRanges], +) -> Dict[sympy.Symbol, ValueRanges]: + existing_inline_assertions: Dict[sympy.Symbol, ValueRanges] = {} + + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + + # Find all the existing inline assertions. They will look something like: + # %_local_scalar_dense = call_function[target=torch.ops.aten._local_scalar_dense.default](args = (%arg1_1,), kwargs = {}) + # %ge = call_function[target=operator.ge](args = (%_local_scalar_dense, 0), kwargs = {}) + # %scalar_tensor = call_function[target=torch.ops.aten.scalar_tensor.default](args = (%ge,), kwargs = {}) + # %_assert_async = call_function[target=torch.ops.aten._assert_async.msg](args = (%scalar_tensor, "..."), kwargs = {}) + for node in module.graph.nodes: + if node.target != torch.ops.aten._assert_async.msg: + continue + + scalar_tensor_arg = node.args[0] + if not ( + scalar_tensor_arg.op == "call_function" and + scalar_tensor_arg.target == torch.ops.aten.scalar_tensor.default + ): + continue + + compare_arg = scalar_tensor_arg.args[0] + if not ( + compare_arg.op == "call_function" and + compare_arg.target in (operator.le, operator.ge) and + len(compare_arg.args) == 2 + ): + continue + + compare_op = compare_arg.target + maybe_symint_arg, compare_int = compare_arg.args + + # x >= 0 will sometimes be canonicalized to -x <= 0, so in some + # cases the operation before the comparison is to multiply by -1. We + # can undo the canonicalization here + if ( + maybe_symint_arg.op == "call_function" and + maybe_symint_arg.target == operator.mul and + maybe_symint_arg.args[0] == -1 + ): + maybe_symint_arg = maybe_symint_arg.args[1] + compare_op = operator.ge + compare_int = -1 * compare_int + + if not ( + "val" in maybe_symint_arg.meta and + isinstance(maybe_symint_arg.meta["val"], torch.SymInt) + ): + continue + + symint = maybe_symint_arg.meta["val"].node.expr + if not isinstance(symint, sympy.Symbol): + continue + + if symint not in range_constraints: + raise RuntimeError(f"Unable to find symint {symint} in {range_constraints}") + + found_range = existing_inline_assertions.get(symint, ValueRanges(-math.inf, math.inf)) + + if compare_arg.target == operator.le: + existing_inline_assertions[symint] = ValueRanges( + lower=found_range.lower, upper=compare_int + ) + elif compare_arg.target == operator.ge: + existing_inline_assertions[symint] = ValueRanges( + lower=compare_int, upper=found_range.upper + ) + + return existing_inline_assertions diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2b9c674859f4eefd56033cf37536a1b532ae65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py @@ -0,0 +1,66 @@ +import operator + +import torch + +from torch.export.exported_program import ConstantArgument, TensorArgument +from torch.fx.passes.infra.pass_base import PassBase, PassResult + +__all__ = ["CollectTracepointsPass"] + + +class CollectTracepointsPass(PassBase): + """ + Performs constant folding and constant propagation. + """ + + def __init__(self, specs, sig) -> None: + super().__init__() + self.specs = specs + self.sig = sig + + def call(self, gm): + def get_arg_spec(arg): + if isinstance(arg, torch.fx.Node): + if isinstance(arg.meta.get("val"), torch.Tensor): + return TensorArgument(name=arg.name) + else: + raise AssertionError( + "Symint input is not implemented yet for submodule call signature." + ) + else: + return ConstantArgument(value=arg) + + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.op != "call_function": + continue + if node.target == torch.ops.higher_order._export_tracepoint: + for i, arg in enumerate(node.args): + kind = node.kwargs["kind"] + if kind == "module_call_inputs": + self.specs[node.kwargs["path"]].inputs.append( + get_arg_spec(arg) + ) + elif kind == "module_call_outputs": + self.specs[node.kwargs["path"]].outputs.append( + get_arg_spec(arg) + ) + else: + raise AssertionError(f"Unknown tracepoint kind: {kind}") + if isinstance(arg, torch.fx.Node): + for user in node.users: + assert user.op == "call_function" + assert user.target == operator.getitem + assert isinstance(user.args[1], int) + if user.args[1] == i: + user.replace_all_uses_with(arg) + self.sig.replace_all_uses(user.name, arg.name) + break + users = list(node.users) + for user in users: + assert len(user.users) == 0 + gm.graph.erase_node(user) + gm.graph.erase_node(node) + return PassResult(gm, True) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..5fcf5adaca5b0b478db87e71633f5136b54969b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py @@ -0,0 +1,94 @@ +import copy +from typing import Dict, Optional, Tuple, List + +import torch +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, PassResult, Argument +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._ops import OpOverload + +aten = torch.ops.aten + +_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = { + aten.sym_constrain_range.default: aten._functional_sym_constrain_range, + aten._assert_async.msg: aten._functional_assert_async.msg, +} + + +class _FunctionalizeSideEffectfulOpsPass(_ExportPassBaseDeprecatedDoNotUse): + """ + Functionalize ops with side effect in graph module by replacing the op with + functional version of it. A new dependency token (`dep_token`) will be + created and propagated through functional ops to output. + For example: + ``` + def f(x): + sym_constrain_range(x.shape[0], min=1, max=3) + return x.add(3) + ``` + Will be transformed to: + ``` + def f(x): + dep_token0 = _make_dep_token() + dep_token1 = _functional_sym_constrain_range( + x.shape[0], min=1, max=3, dep_token=dep_token0 + ) + + return x.add(3), dep_token1 + ``` + """ + + def __init__(self) -> None: + super().__init__() + self._dep_token: Optional[ProxyValue] = None + self._next_dep_token_index: Optional[int] = None + + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + # Early return if no non-functional assertions. + if not any( + n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS + for n in graph_module.graph.nodes + ): + return PassResult(graph_module=graph_module, modified=False) + + gm = copy.deepcopy(graph_module) + self._dep_token = None + self._next_dep_token_index = None + return super().call(gm) + + def call_operator( + self, + op: OpOverload, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: + return super().call_operator(op, args, kwargs, meta) + + if self._dep_token is None: + self._dep_token = super().call_operator( + aten._make_dep_token, + args=(), + kwargs={}, + meta=self._create_dummy_node_metadata(), + ) + self._dep_token.node.name = "dep_token0" + self._next_dep_token_index = 1 + + self._dep_token = super().call_operator( + _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op], + args=args, + kwargs={**kwargs, "dep_token": self._dep_token}, + meta=meta, + ) + assert self._next_dep_token_index is not None + self._dep_token.node.name = f"dep_token{self._next_dep_token_index}" + self._next_dep_token_index += 1 + + return self._dep_token + + def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue: + assert self._dep_token is not None + + return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..5f93eabdc2b5d8cea145ebc8399ccc3e2c5a7816 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py @@ -0,0 +1,248 @@ +import collections +from typing import Any, Dict, Union + +import torch +from torch._export.verifier import SpecViolationError +from torch._guards import detect_fake_mode +from torch.export.exported_program import ( + ArgumentSpec, + CustomObjArgument, + ExportGraphSignature, + InputKind, + InputSpec, + TensorArgument, +) + + +class ConstantAttrMap(collections.abc.MutableMapping): + """A mapping class that understands how to use module constants (tensors and + ScriptObjects) as keys. We store tensors normally, but ScriptObjects are + stored by hash, because different torch.ScriptObjects can point to the same + underlying value (but we guarantee that they will `hash()` to the same value + if that's the case). + """ + + def __init__(self): + # Underlying dict that we use to implement this mapping. + self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {} + # Map from the hash(ScriptObject) to the ScriptObject itself. Used for + # APIs like `__iter__` that should look like they're returning the + # original ScriptObjects. + self._script_object_map: Dict[int, torch.ScriptObject] = {} + + def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any: + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + assert isinstance(real_key, (int, torch.Tensor)) + return self._constant_attrs[real_key] + + def __setitem__( + self, key: Union[torch.Tensor, torch.ScriptObject], value: Any + ) -> None: + if isinstance(key, torch.ScriptObject): + self._constant_attrs[hash(key)] = value + self._script_object_map[hash(key)] = key + elif isinstance(key, torch.Tensor): + self._constant_attrs[key] = value + else: + raise TypeError( + f"Expected key to be a tensor or ScriptObject, got {type(key)}" + ) + + def __delitem__(self, key): + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + + del self._constant_attrs[real_key] + + def __iter__(self): + for key in self._constant_attrs: + if isinstance(key, int): + yield self._script_object_map[key] + else: + yield key + + def __len__(self): + return len(self._constant_attrs) + + def __contains__(self, key: object) -> bool: + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + return real_key in self._constant_attrs + + +def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str: + # The FQN of the constant tensor in the state dict should + # correspond to the module where the constant tensor was + # originally used. + parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0] + if len(parent_fqn) > 0: + return f"{parent_fqn}.{constant_name}" + else: + return constant_name + + +def lift_constants_pass( + gm: torch.fx.GraphModule, + graph_signature: ExportGraphSignature, + constant_attrs: ConstantAttrMap, +) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]: + """ + Takes a graph module, graph signature, and modifies them implace to lift any + constants (tensors or custom classes) as inputs to the graph. Returns a + dictionary of names to constants. + + Arguments: + gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift. + graph_signature (ExportGraphSignature): This graph signature will be + mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs. + constant_attrs (ConstantAttr): A mapping from a constant value to its + fully-qualified path in `gm`. This is used to maintain consistent + location of constants between the original module and the exported + version. + + Returns: + A dictionary of fqn => constant value. + """ + all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {} + + inputs = graph_signature.input_specs + num_custom_obj = sum( + input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs + ) + num_tensor_constants = sum( + input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs + ) + + fake_mode = detect_fake_mode( + tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder") + ) + + first_user_input_loc, first_user_input = 0, None + for node in gm.graph.nodes: + if node.op == "placeholder" and node.name in graph_signature.user_inputs: + first_user_input = node + break + first_user_input_loc += 1 + + lifted_objs = ConstantAttrMap() + for node in gm.graph.nodes: + if node.op == "get_attr": + constant_val = getattr(gm, node.target) + if constant_val in lifted_objs: + # We already lifted this constant elsewhere. Just rewrite uses + # of this get_attr to point to the already-existing placeholder + # node. + const_placeholder_node = lifted_objs[constant_val] + node.replace_all_uses_with(const_placeholder_node) + gm.graph.erase_node(node) + continue + + # For ScriptObject and Tensor constants: + # First check if the constant was an attribute on some module by + # consulting `constant_attrs` map. If it is, use the fqn that keeps + # its location consistent with the eager module. + # + # If it's not in the `constant_attrs` map, that means it's an inline + # constant (e.g. x + torch.tensor(0)), and thus did not have a + # specific location in the eager module. In that case, just generate + # some name and attach it to the module in which it was used. + if isinstance(constant_val, torch.ScriptObject): + constant_kind = InputKind.CUSTOM_OBJ + constant_fqn = constant_attrs.get(constant_val) + if constant_fqn is not None: + _, _, constant_name = constant_fqn.rpartition(".") + else: + constant_name = f"_lifted_custom_obj{num_custom_obj}" + constant_fqn = get_constant_fqn(node, constant_name) + num_custom_obj += 1 + elif isinstance(constant_val, torch.Tensor): + constant_kind = InputKind.CONSTANT_TENSOR + constant_fqn = constant_attrs.get(constant_val) + if constant_fqn is not None: + _, _, constant_name = constant_fqn.rpartition(".") + else: + constant_name = f"_lifted_tensor_constant{num_tensor_constants}" + constant_fqn = get_constant_fqn(node, constant_name) + num_tensor_constants += 1 + elif isinstance(constant_val, torch.fx.GraphModule): + continue + elif "LoweredBackendModule" in type(constant_val).__name__: + continue + else: + raise SpecViolationError( + f"getattr node {node} referencing unsupported type {type(constant_val)}" + ) + + with gm.graph.inserting_before(first_user_input): + # Insert the constant node before the first user input + const_placeholder_node = gm.graph.placeholder(constant_name) + # match target name with its node name in case there is name collision + # and suffix is added to node name in fx + const_placeholder_node.target = const_placeholder_node.name + + for k, v in node.meta.items(): + const_placeholder_node.meta[k] = v + + input_spec_arg: ArgumentSpec + if isinstance(constant_val, torch.Tensor): + if fake_mode is not None: + const_placeholder_node.meta["val"] = fake_mode.from_tensor( + constant_val, static_shapes=True + ) + const_placeholder_node.meta["val"].constant = constant_val + else: + const_placeholder_node.meta["val"] = constant_val + input_spec_arg = TensorArgument(name=const_placeholder_node.name) + elif isinstance(constant_val, torch._C.ScriptObject): + class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined] + const_placeholder_node.meta["val"] = CustomObjArgument( + constant_fqn, class_fqn + ) + input_spec_arg = CustomObjArgument( + name=const_placeholder_node.name, class_fqn=class_fqn + ) + else: + raise SpecViolationError( + f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}" + ) + + lifted_objs[constant_val] = const_placeholder_node + node.replace_all_uses_with(const_placeholder_node) + gm.graph.erase_node(node) + + # Add the constant as a buffer to the graph signature + graph_signature.input_specs.insert( + first_user_input_loc, + InputSpec( + kind=constant_kind, + arg=input_spec_arg, + target=constant_fqn, + ), + ) + all_constants[constant_fqn] = constant_val + first_user_input_loc += 1 + + return all_constants + + +def rewrite_script_object_meta( + gm: torch.fx.GraphModule, +) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]: + """When tracing, we produce a graph with an actual ScriptObject in the + meta["val"]. Eventually we want to change this behavior, when FakeMode infra + for ScriptObjects lands. + + For now, we rewrie meta["val"] to be a placeholder CustomObjArgument + """ + constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {} + for node in gm.graph.nodes: + if "val" not in node.meta or not isinstance( + node.meta["val"], torch.ScriptObject + ): + continue + + old_meta = node.meta["val"] + class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined] + new_meta = CustomObjArgument(node.name, class_fqn) + constants[node.name] = old_meta + node.meta["val"] = new_meta + + return constants diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py new file mode 100644 index 0000000000000000000000000000000000000000..adcc708e554830b430db0d4374f4494482ce0b39 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py @@ -0,0 +1,26 @@ +import torch +from torch.fx.passes.infra.pass_base import PassBase, PassResult + + +class _RemoveRuntimeAssertionsPass(PassBase): + """ + Remove runtime assertions inserted by the + _AddRuntimeAssertionsForInlineConstraintsPass. + """ + + def call(self, graph_module) -> PassResult: + modified = False + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.target == torch.ops.aten._assert_async.msg: + assert_async_node = node + if len(assert_async_node.users) > 0: + continue + module.graph.erase_node(assert_async_node) + # the upstream scalar_tensor <- {le, ge} <- sym_size + # linear chain of nodes of nodes is removed by the + # downstream dead code elimination + modified = True + return PassResult(graph_module, modified) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..97af59b700a792694a83b923b8c27b692356907f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py @@ -0,0 +1,141 @@ +import torch +from torch._higher_order_ops.wrap import wrap_with_set_grad_enabled + +from ..utils import ( + node_inline_, + node_replace_, + nodes_filter, + nodes_first, + nodes_map, + sequential_split, +) + + +def _is_set_grad_enabled_node(node: torch.fx.Node): + return ( + node + and node.op == "call_function" + and node.target == torch._C._set_grad_enabled + ) + + +def _is_set_grad_enabled_sub_mod(node: torch.fx.Node, omit_if_same_with_ambient=False): + if node.op == "call_module": + assert isinstance(node.target, str) + subgm = getattr(node.graph.owning_module, node.target) + first_non_ph = nodes_first( + subgm.graph.nodes, lambda node: node.op != "placeholder" + ) + if ( + first_non_ph + and first_non_ph.op == "call_function" + and first_non_ph.target == torch._C._set_grad_enabled + ): + return ( + first_non_ph.args[0] != torch.is_grad_enabled() + if omit_if_same_with_ambient + else True + ) + return False + + +def _replace_with_hop(node: torch.fx.Node): + assert node.op == "call_module" + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + sub_graph = sub_gm.graph + set_grad_nodes = nodes_filter(sub_graph.nodes, _is_set_grad_enabled_node) + if len(set_grad_nodes) > 0: + assert len(set_grad_nodes) == 1 + set_grad_node = set_grad_nodes[0] + enable_grad_val = set_grad_node.args[0] + with graph.inserting_before(node): + get_attr_node = graph.get_attr(node.target) + output_node = next(iter(reversed(sub_gm.graph.nodes)), None) + if output_node is not None: + assert len(output_node.args) == 1 + output_args = output_node.args[0] + if isinstance(output_args, (tuple, list)): + call_func_node = graph.call_function( + wrap_with_set_grad_enabled, + (enable_grad_val, get_attr_node, *node.args), + {}, + ) + # Create the metadata + call_func_node.meta["val"] = tuple( + arg.meta["val"] for arg in output_args + ) + node_replace_(node, call_func_node, delete_old=True) + + # Rename the name of getitem nodes to the actual name of its contents + # for passing verifier and better readability, also propagate metadata + for get_item_node in call_func_node.users.keys(): + idx: int = get_item_node.args[1] + output_node = output_args[idx] + get_item_node._rename(output_node.name) + get_item_node.meta = output_node.meta + pass + + elif isinstance(output_args, torch.fx.Node): + call_func_node = graph.create_node( + "call_function", + wrap_with_set_grad_enabled, + (enable_grad_val, get_attr_node, *node.args), + {}, + output_args.name, + ) + call_func_node.meta = output_args.meta + node_replace_(node, call_func_node, delete_old=True) + else: + raise NotImplementedError( + f"repalce_set_grad_with_hop_pass doesnt' support output type {type(output_args)}" + ) + else: + raise NotImplementedError( + "Cannot replace a call_module with a hop if it has no output. This module will gets DCEed." + ) + sub_graph.erase_node(set_grad_node) + + +def _remove_set_grad_and_inline(node: torch.fx.Node): + assert node.op == "call_module" + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + sub_graph = sub_gm.graph + nodes_map( + sub_graph.nodes, + lambda n: sub_graph.erase_node(n) if _is_set_grad_enabled_node(n) else n, + ) + node_inline_(node) + + +def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule): + # If there is no set_grad_enabled node, return the original graph module + need_replacing = False + for node in gm.graph.nodes: + if _is_set_grad_enabled_node(node): + need_replacing = True + + if not need_replacing: + return gm + + new_gm = sequential_split(gm, _is_set_grad_enabled_node) + + def _maybe_inline_or_replace_with_hop(node: torch.fx.Node): + if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True): + _replace_with_hop(node) + else: + _remove_set_grad_and_inline(node) + + nodes_map( + list(new_gm.graph.nodes), + lambda node: _maybe_inline_or_replace_with_hop(node) + if node.op == "call_module" + else node, + ) + new_gm.graph.lint() + return new_gm diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..109a96d7b4bd3672660b1271b4d72e7fbb6b982f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py @@ -0,0 +1,18 @@ +from typing import Dict + +import torch + +replacements: Dict[torch._ops.OpOverloadPacket, torch._ops.OpOverload] = { + torch.ops.aten.sym_size: torch.ops.aten.sym_size.int, + torch.ops.aten.sym_stride: torch.ops.aten.sym_stride.int, + torch.ops.aten.sym_numel: torch.ops.aten.sym_numel.default, +} + + +def _replace_sym_size_ops_pass(gm: torch.fx.GraphModule): + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.target in replacements: + node.target = replacements[node.target] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..f32b442733eb98d49aea4d766ef0e727243ebeeb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py @@ -0,0 +1,71 @@ +from typing import Dict, Optional, Set + +import torch +from torch._ops import OpOverload, OpOverloadPacket, HigherOrderOperator +from torch._export.error import InternalError +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse + + +__all__ = ["ReplaceViewOpsWithViewCopyOpsPass"] + + +_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: Dict[OpOverload, OpOverload] = { + torch.ops.aten._unsafe_view.default: torch.ops.aten.view_copy.default, +} + +# TODO (tmanlaibaatar) remove this after https://github.com/pytorch/pytorch/pull/100749 +_BLACK_LISTED_OPS: Set[OpOverloadPacket] = { + torch.ops.aten.sym_size, + torch.ops.aten.sym_stride, + torch.ops.aten.sym_numel, +} + +def is_view_op(schema: torch._C.FunctionSchema) -> bool: + if len(schema.arguments) == 0: + return False + alias_info = schema.arguments[0].alias_info + return (alias_info is not None) and (not alias_info.is_write) + + +def get_view_copy_of_view_op(schema: torch._C.FunctionSchema) -> Optional[OpOverload]: + if is_view_op(schema) and schema.name.startswith("aten::"): + view_op_name = schema.name.split("::")[1] + view_op_overload = ( + schema.overload_name + if schema.overload_name != "" + else "default" + ) + view_copy_op_name = view_op_name + "_copy" + if not hasattr(torch.ops.aten, view_copy_op_name): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + view_copy_op_overload_packet = getattr(torch.ops.aten, view_copy_op_name) + + if not hasattr(view_copy_op_overload_packet, view_op_overload): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + return getattr(view_copy_op_overload_packet, view_op_overload) + + return None + + +class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBaseDeprecatedDoNotUse): + """ + Our backend expects pure functional operators. For efficiency + purposes, we keep view ops around while functionalizing the exported + program. This pass replaces view ops with view copy ops for backends that + need AOT memory planning. + """ + def call_operator(self, op, args, kwargs, meta): + if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: + return super().call_operator( + (_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op]), args, kwargs, meta + ) + + if op in _BLACK_LISTED_OPS or isinstance(op, HigherOrderOperator): + return super().call_operator(op, args, kwargs, meta) + + if view_copy_op := get_view_copy_of_view_op(op._schema): + return super().call_operator(view_copy_op, args, kwargs, meta) + + return super().call_operator(op, args, kwargs, meta) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4ff79cbc0db02c33b8754d26a81504f665445fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/benchmark_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9dd4506fe4feccdb305646c2b0c62914178a309 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/eager_transforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2bdee9d300246b69ad7004901b4fbcfcf698401 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/__pycache__/vmap.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..7645c8c9f254bb01234ecbbffc9511e62e182edd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/input_output_analysis.py @@ -0,0 +1,432 @@ +""" +This module is one of the analysis modules - it takes as input a function or graph +and some preexisting properties, and returns some data that is useful for deciding +how to further proceed with compilation or construct runtime wrappers. + +In particular, the following analyses are provided: +1. Refine the view and mutation metadata collected previously - removing duplicate + inputs or mapping views to their bases. +2. We also analyze the function signature for export graphs. +""" + +import itertools +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._subclasses.functional_tensor import FunctionalTensor +from torch.fx.experimental.symbolic_shapes import is_concrete_int +from .schemas import ( + BackwardSignature, + GraphSignature, + InputAliasInfo, + OutputAliasInfo, + OutputType, + ViewAndMutationMeta, +) +from .utils import strict_zip + +zip = strict_zip + + +def remove_dupe_metadata( + m: ViewAndMutationMeta, + keep_arg_mask: List[bool], + add_dupe_map: List[int], +) -> ViewAndMutationMeta: + assert len(m.input_info) == len(keep_arg_mask) + # Easy invariant: the first argument should never be a dupe (it will be kept) + assert len(keep_arg_mask) > 0 and keep_arg_mask[0] + + # Filter dupe'd mutated inputs out of traced_tangents + num_data_mutations = len([x for x in m.input_info if x.mutates_data]) + other_traced_tangents = m.traced_tangents[num_data_mutations:] + inp_traced_tangents = m.traced_tangents[:num_data_mutations] + filtered_inp_traced_tangents = [ + x + for i, x in enumerate(inp_traced_tangents) + if keep_arg_mask[m.mutated_inp_runtime_indices[i]] + ] + traced_tangents = filtered_inp_traced_tangents + other_traced_tangents + + return ViewAndMutationMeta( + input_info=[x for i, x in enumerate(m.input_info) if keep_arg_mask[i]], + # For outputs that are views of inputs, we store the index of the input that the output + # was generated from. Need to update that index to account for removed dupes. + output_info=[ + OutputAliasInfo( + output_type=o.output_type, + raw_type=o.raw_type, + dynamic_dims=o.dynamic_dims, + base_idx=None if o.base_idx is None else add_dupe_map[o.base_idx], + requires_grad=o.requires_grad, + ) + for o in m.output_info + ], + num_intermediate_bases=m.num_intermediate_bases, + keep_input_mutations=m.keep_input_mutations, + traced_tangents=traced_tangents, + # We are guaranteed not to get here, since dupes are not supported today with subclass inputs. + subclass_inp_meta=[], + subclass_fw_graph_out_meta=[], + subclass_tangent_meta=[], + is_train=m.is_train, + ) + + +# Given our ViewAndMutation metadata, this fn constructs a new set of metadata, +# after adding synthetic base arguments to the function. +# Most of the work in this fn is slogging through all of the metadata corresponding to inputs, +# and updating it with our synthetic base calling convention. +# +# When config.debug_assert is set, we automatically regenerate the metadata +# and compare it to this output for sanity. +# +# In addition to the updated metadata, also return the list of input indices +# that will need to be updated in the synthetic base epilogue + + +# Given our ViewAndMutation metadata, this fn constructs a new set of metadata, +# after adding synthetic base arguments to the function. +# Most of the work in this fn is slogging through all of the metadata corresponding to inputs, +# and updating it with our synthetic base calling convention. +# +# When config.debug_assert is set, we automatically regenerate the metadata +# and compare it to this output for sanity. +# +# In addition to the updated metadata, also return the list of input indices +# that will need to be updated in the synthetic base epilogue +def create_synthetic_base_metadata( + m: ViewAndMutationMeta, + # Maps each outer argument idx to its inner idx (or, if this outer arg is generated from a + # synthetic base, you get a tuple of (i, TensorMeta), telling you the base tensor idx, and view metadata) + synthetic_base_info: List[Union[int, Tuple[int, torch.Tensor]]], + outer_args: List[Any], + inner_args: List[Any], +) -> Tuple[ViewAndMutationMeta, List[int]]: + # maps inner arg indices to outer arg indices + synthetic_base_to_indices: Dict[int, List[int]] = {} + for inner_idx in range(len(inner_args)): + outer_aliased_indices_of_current_base_arg = [ + outer_idx + for outer_idx, inner_idx_or_tuple in enumerate(synthetic_base_info) + if (isinstance(inner_idx_or_tuple, int) and inner_idx_or_tuple == inner_idx) + or ( + isinstance(inner_idx_or_tuple, tuple) + and inner_idx_or_tuple[0] == inner_idx + ) + ] + synthetic_base_to_indices[inner_idx] = outer_aliased_indices_of_current_base_arg + + # given the requires_grad info on mutated inputs, + # generate the requires_grad info on those same mutated inputs, but after constructing synthetic bases. + input_infos = [] + for outer_indices in synthetic_base_to_indices.values(): + # leaf-ness should be all-or-nothing for aliased tensor. + # (aka if "a" and "b" are views, then a.is_leaf == b.is_leaf) + any_leaf = any(m.input_info[x].is_leaf for x in outer_indices) + all_leaf = all(m.input_info[x].is_leaf for x in outer_indices) + assert any_leaf == all_leaf + + mutates_data = ( + True + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_data + ) + mutates_metadata = ( + False + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_metadata + ) + requires_grad = any(m.input_info[x].requires_grad for x in outer_indices) + mutations_hidden_from_autograd = all( + m.input_info[x].mutations_hidden_from_autograd for x in outer_indices + ) + mutations_under_no_grad_or_inference_mode = all( + m.input_info[x].mutations_under_no_grad_or_inference_mode + for x in outer_indices + ) + + inpt_info = InputAliasInfo( + # If len(outer_indices) > 1, then this input is a synthetic base. + # The invariant is that to the rest of aot autograd, synthetic bases only show up if + # one of their aliases gets a data mutation. And if any of their aliases get metadata + # mutations, they will be hidden from the rest of aot autograd. + mutates_data=mutates_data, + mutates_metadata=mutates_metadata, + mutations_hidden_from_autograd=all( + m.input_info[x].mutations_hidden_from_autograd for x in outer_indices + ), + mutates_storage_metadata=False + if len(outer_indices) > 1 + else m.input_info[outer_indices[0]].mutates_storage_metadata, + mutations_under_no_grad_or_inference_mode=mutations_under_no_grad_or_inference_mode, + is_leaf=any_leaf, + requires_grad=requires_grad, + keep_input_mutations=m.keep_input_mutations, + ) + input_infos.append(inpt_info) + + # Find any inputs that fulfill the following criteria: + # (1) They are part of a synthetic base (because they alias another input, + # and at least one input experiences a data mutation) + # (2) They experience a metadata mutation + outer_aliased_arg_idx_with_metadata_mutations = [ + outer_idx + for outer_idx, inpt_info in enumerate(m.input_info) + if inpt_info.mutates_metadata + and not isinstance(synthetic_base_info[outer_idx], int) + ] + + # grab the original requires grad info on the outputs, except the ones from the mutated inputs + input_metadata_output_info = [ + OutputAliasInfo( + output_type=OutputType.alias_of_input, + raw_type=FunctionalTensor, + dynamic_dims={ + i + for i, s in enumerate(outer_args[outer_idx].shape) + if not is_concrete_int(s) + }, + base_idx=synthetic_base_info[outer_idx][0], # type: ignore[index] + requires_grad=outer_args[outer_idx].requires_grad, + ) + for outer_idx in outer_aliased_arg_idx_with_metadata_mutations + ] + existing_output_infos = [] + for o in m.output_info: + new_base_idx = ( + None + if o.base_idx is None + else ( + synthetic_base_info[o.base_idx] + if isinstance(synthetic_base_info[o.base_idx], int) + else synthetic_base_info[o.base_idx][0] # type: ignore[index] + ) + ) + # If base_idx is changed for OutputType.is_input, we need to update the output type to reflect the change + new_output_type = ( + OutputType.alias_of_input + if o.output_type == OutputType.is_input and o.base_idx != new_base_idx + else o.output_type + ) + existing_output_infos.append( + OutputAliasInfo( + output_type=new_output_type, + raw_type=o.raw_type, + dynamic_dims=o.dynamic_dims, + # Map the input idx pre-synthetic-bases to the new idx post-synthetic-bases + base_idx=new_base_idx, # type: ignore[arg-type] + requires_grad=o.requires_grad, + ) + ) + + inner_mutated_tangents = [ + x + for inner_idx, x in enumerate(inner_args) + if input_infos[inner_idx].mutates_data and input_infos[inner_idx].requires_grad + ] + + output_info = existing_output_infos + input_metadata_output_info + # Regenerate traced tangents to include mutated inputs including synthetic bases + traced_tangents = ( + inner_mutated_tangents + m.traced_tangents[len(inner_mutated_tangents) :] + ) + + return ( + ViewAndMutationMeta( + input_info=input_infos, + output_info=output_info, + num_intermediate_bases=m.num_intermediate_bases, + keep_input_mutations=m.keep_input_mutations, + traced_tangents=traced_tangents, + # We are guaranteed not to get here, since synthetic_base codepaths are not supported today with subclass inputs. + subclass_inp_meta=[], + subclass_fw_graph_out_meta=[], + subclass_tangent_meta=[], + is_train=m.is_train, + ), + outer_aliased_arg_idx_with_metadata_mutations, + ) + + +def _get_last_mem_address(x): + out = x.storage_offset() + for size, stride in zip(x.size(), x.stride()): + out += (size - 1) * stride + return out + + +# Assumption: x and y are known to share a storage, and we are trying to determine +# if their memory is actually completely disjoint, based on sizes/strides/storage_offset +def _tensors_definitely_do_not_overlap(x, y): + if x is y: + return False + if x.numel() == 0 or y.numel() == 0: + return True + + # Make x always on the left + if x.storage_offset() > y.storage_offset(): + x, y = y, x + # Short-circuit in the "obvious" overlapping case: both tensors are contiguous + if x.is_contiguous() and y.is_contiguous(): + if x.storage_offset() + x.numel() > y.storage_offset(): + # definitely overlap + return False + else: + # definitely no overlap + return True + + # Short-circuit: if last memory address of x is < start of y, then not overlapping. + x_last = _get_last_mem_address(x) + if x_last < y.storage_offset(): + return True + + if x.dim() == 2 and y.dim() == 2 and x.stride(1) == 1 and y.stride(1) == 1: + # This cases is needed for the shampoo optimizer. + # All tensors are 2d (non-contiguous), have the same outer stride, and have an inner stride of 1 + # (so rows are contiguous) + if x.stride(0) == y.stride(0): + offset_delta = y.storage_offset() - x.storage_offset() + if offset_delta < x.size(1): + # definitely overlaps (row 0 of y overlaps with row 0 of x) + # Example: + # base = torch.arange(32).reshape(4, 8) + # x = base.narrow(1, 0, 4) + # x: size=(4, 4), stride=(8, 1), offset=0 + # y = base.narrow(1, 3, 4) + # y: size=(4, 4), stride=(8, 1), offset=3 + return False + x_total_elems_covered = x.stride(0) * (x.size(0) - 1) + x.size(1) + if x_total_elems_covered <= offset_delta: + # definitely does not overlap (last byte of x is before start of y) + # Example: + # x: size=(4, 4), stride=(8, 1), offset=0 (last byte is 27) + # y: size=(4, 4), stride=(8, 1), offset=28 (start byte is 28) + return True + # At this point, we want to check if the 0th row of y + # overlaps with **some** row of x. + # We can check this by shifting y backward by the shared stride, repeatedly, + # until the first row of y is before the first row of x. + # Then we can check if these rows overlap. + # We can accomplish this by modding our offset by the stride. + offset_delta_mod = offset_delta % x.stride(0) + # Example: + # 0 1 2 3 + # 9 10 11 12 + # 18 19 20 21 + # 27 28 29 30 + # x: size=(4, 4), stride=(9, 1), offset=0 + # y: size=(4, 4), stride=(9, 1), offset=22 (this would not overlap) + # y: size=(4, 4), stride=(9, 1), offset=23 (this would not overlap) + # y: size=(4, 4), stride=(9, 1), offset=24 (this would overlap) + # y: size=(4, 4), stride=(9, 1), offset=25 (this would overlap) + # If the interval [modded_offset, modded_offset + x_size] falls entirely + # without + if offset_delta_mod + y.size(1) <= x.stride(0): + return True + else: + return False + return False + + +def compute_overlapping_inputs(fwd_inputs, aliased_input_indices): + actual_aliased_indices = set() + for j in range(len(aliased_input_indices)): + for i in range(j): + i_ = aliased_input_indices[i] + j_ = aliased_input_indices[j] + if not _tensors_definitely_do_not_overlap(fwd_inputs[i_], fwd_inputs[j_]): + actual_aliased_indices.add(i_) + actual_aliased_indices.add(j_) + return actual_aliased_indices + + +def _graph_input_names(gm): + return [node.name for node in gm.graph.nodes if node.op == "placeholder"] + + +def _graph_output_names(gm): + output_node = next(iter(reversed(gm.graph.nodes))) + assert output_node.op == "output" and len(output_node.args) == 1 + return_args = output_node.args[0] + return [getattr(return_arg, "name", None) for return_arg in return_args] + + +def create_graph_signature( + fx_g: torch.fx.GraphModule, + fw_metadata: ViewAndMutationMeta, + in_spec: pytree.TreeSpec, + out_spec: pytree.TreeSpec, + *, + user_args_flat: List[Tensor], + params_and_buffers_flat: List[Tensor], + param_names: List[str], + buffer_names: List[str], + trace_joint: bool, + num_user_fw_outs: Optional[int], + loss_index: Optional[int], +) -> GraphSignature: + # Retrieve graph input names + graph_input_names = _graph_input_names(fx_g) + # Retrieve graph output names + graph_output_names = _graph_output_names(fx_g) + + num_params_buffers = len(param_names) + len(buffer_names) + num_tokens = len(fw_metadata.tokens) + # We have enough restrictions on the graph (no de-duping, synthetic bases, etc), + # Such that # graph inps = # user inps + # params + # buffers + num_user_args = len(graph_input_names) - num_params_buffers - num_tokens + + if trace_joint: + assert num_user_fw_outs is not None + num_fw_outs = num_user_fw_outs + fw_metadata.num_mutated_inp_runtime_indices + backward_output_names = graph_output_names[num_fw_outs:] + + grad_index = itertools.count(0) + gradients_to_parameters = { + backward_output_names[next(grad_index)]: param_names[i] + for i, param in enumerate(params_and_buffers_flat) + if param.requires_grad + } + + gradients_to_user_inputs = { + backward_output_names[next(grad_index)]: graph_input_names[ + i + len(params_and_buffers_flat) + ] + for i, user_input in enumerate(user_args_flat) + if user_input.requires_grad + } + + assert len(gradients_to_parameters) + len(gradients_to_user_inputs) == len( + backward_output_names + ) + + # Check that we have fully accounted for all graph outputs + backward_signature = BackwardSignature( + gradients_to_parameters, + gradients_to_user_inputs, + graph_output_names[loss_index], + ) + else: + backward_signature = None + num_user_fw_outs = ( + len(graph_output_names) + - fw_metadata.num_mutated_inp_runtime_indices + - num_tokens + ) + + return GraphSignature.from_tracing_metadata( + in_spec=in_spec, + out_spec=out_spec, + graph_input_names=graph_input_names, + graph_output_names=graph_output_names, + view_mutation_metadata=fw_metadata, + named_parameters=param_names, + named_buffers=buffer_names, + num_user_inputs=num_user_args, + num_user_outputs=num_user_fw_outs, + loss_index=loss_index, + backward_signature=backward_signature, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..28f82555ac974a46f94780473c9d19af81575423 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/logging_utils.py @@ -0,0 +1,135 @@ +""" +Contains utils for logging in AOTAutograd, including managing the names of the graphs under +compilation, capturing user-friendly tracebacks, and debug messages. +""" + +import collections +from contextlib import contextmanager +from typing import List, Tuple + +import torch +import torch.fx.traceback as fx_traceback + +# This is a list since looking forward, we can have this arbitrarily nested. +graph_being_compiled: List[str] = [] +# TODO: It would be nice to reset the numbering every time aot_id goes +# up, but this is annoying to do right now (because we don't know if +# an aot_id will come back from the dead), so right now this also happens +# to be a globally unique number too (at the cost of wobbling if you change +# how the graphs compile) +nth_graph: int = 0 +model_name: str = "model" + + +def set_model_name(name): + global model_name + model_name = name + + +def get_aot_compilation_context() -> Tuple[List[str], str, int]: + return list(graph_being_compiled), model_name, nth_graph + + +def get_aot_graph_name() -> str: + """ + Returns the name of the graph being compiled. + """ + global model_name, graph_being_compiled, nth_graph + return f"{model_name}__{'_'.join(graph_being_compiled)}_{nth_graph}" + + +get_graph_being_compiled = get_aot_graph_name + + +@contextmanager +def track_graph_compiling(aot_config, graph_name): + global graph_being_compiled + # TODO: Don't shove the aot_id in here; set it in the context + graph_being_compiled = [f"{aot_config.aot_id}_{graph_name}"] + try: + yield + finally: + global nth_graph + nth_graph += 1 + graph_being_compiled = [] + + +# Set up hooks so that during backward the fx's stack_trace is properly set +callback_set = False + + +def setup_stacktrace_preservation_hooks(roots: List): + def iter_graph(roots): + if not roots: + return + seen = set() + q = collections.deque() # type: ignore[var-annotated] + for node in roots: + if node is not None and node not in seen: + seen.add(node) + q.append(node) + + while q: + node = q.popleft() + for fn, _idx in node.next_functions: + if fn in seen or fn is None: + continue + seen.add(fn) + q.append(fn) + + yield node + + def get_callback(saved_stack_): + def callback(): + global callback_set + fx_traceback.set_stack_trace(saved_stack_) + callback_set = False + + return callback + + def get_prehook(stack_, seq_nr): + def prehook(grad_output): + global callback_set + + if not callback_set: + torch.autograd.variable.Variable._execution_engine.queue_callback( # type: ignore[attr-defined] + get_callback(fx_traceback.format_stack()) + ) + callback_set = True + + fx_traceback.set_stack_trace(stack_) + fx_traceback.set_grad_fn_seq_nr(seq_nr) + + return prehook + + def get_posthook(special_stack_, seq_nr): + def posthook(grad_input, grad_output): + fx_traceback.set_stack_trace(special_stack_) + fx_traceback.reset_grad_fn_seq_nr() + + return posthook + + for node in iter_graph(roots): + forward_node_stack = node.metadata.get("traceback_", []) + node.register_prehook(get_prehook(forward_node_stack, node._sequence_nr())) + + special_stack = forward_node_stack.copy() + special_stack.append( + "Gradient addition node due to multiple use of tensor around:" + ) + node.register_hook(get_posthook(special_stack, node._sequence_nr())) + + +def describe_input(i, aot_config): + if i < aot_config.num_params_buffers: + return f"parameter/buffer {i}" + else: + return f"input {i - aot_config.num_params_buffers}" + + +def format_guard_bug_msg(aot_config, expected): + return ( + f"At compilation time, graph {aot_config.aot_id} was compiled under the " + f"assumption that {expected}, but at runtime this was not the case. " + "This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..e53bbeabb7c199d2332d1b2d8e59af64fe960fde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py @@ -0,0 +1,1246 @@ +# mypy: ignore-errors + +import itertools +from contextlib import nullcontext +from functools import partial, wraps +from typing import Any, Callable, Dict, List, Optional, Tuple +from unittest.mock import patch + +import torch +import torch.nn as nn +import torch.utils._pytree as pytree +import torch.utils.dlpack +from torch import Tensor +from torch._dispatch.python import enable_python_dispatcher +from torch._dynamo import compiled_autograd +from torch._dynamo.utils import dynamo_timed, preserve_rng_state +from torch._guards import detect_fake_mode +from torch._subclasses import FakeTensor, FakeTensorMode +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.experimental.symbolic_shapes import ( + ShapeEnv +) +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch._decomp.decompositions_for_rng import PhiloxStateTracker, rng_decompositions +from . import config +from .partitioners import default_partition + +from ._aot_autograd.utils import ( # noqa: F401 + strict_zip, + _get_symint_hints, + KNOWN_TYPES, + partial_flatten_asdict, + normalize_as_list, + _get_autocast_states, + make_boxed_func, + make_boxed_compiler, + call_func_at_runtime_with_args, + create_tree_flattened_fn, + maybe_to_fresh_input, +) +from ._aot_autograd.logging_utils import ( # noqa: F401 + graph_being_compiled, + nth_graph, + model_name, + set_model_name, + get_aot_compilation_context, + get_aot_graph_name, + get_graph_being_compiled, + track_graph_compiling, + callback_set, + setup_stacktrace_preservation_hooks, + describe_input, + format_guard_bug_msg, +) +from ._aot_autograd.functional_utils import ( # noqa: F401 + is_fun, + to_fun, + from_fun, + sync_functional_tensor, + has_metadata_mutation, + has_data_mutation, + are_all_mutations_hidden_from_autograd, + are_all_mutations_under_no_grad_or_inference_mode, + gen_alias_from_base, + assert_functional_graph, + _check_if_mutation_can_be_in_graph, +) +from ._aot_autograd.schemas import ( # noqa: F401 + OutputType, + OutputAliasInfo, + MutationType, + InputAliasInfo, + SubclassCreationMeta, + ViewAndMutationMeta, + SubclassMeta, + TensorAlias, + BackwardSignature, + GraphOutputName, + GraphInputName, + FQN, + GraphSignature, + AOTConfig, +) +from ._aot_autograd.subclass_utils import ( # noqa: F401 + requires_subclass_dispatch, + unwrap_tensor_subclasses, + wrap_tensor_subclasses, + wrap_tensor_subclasses_maybe_joint, + create_metadata_for_subclass, +) +from ._aot_autograd.collect_metadata_analysis import ( # noqa: F401 + run_functionalized_fw_and_collect_metadata, +) +from ._aot_autograd.input_output_analysis import ( # noqa: F401 + remove_dupe_metadata, + create_synthetic_base_metadata, + _tensors_definitely_do_not_overlap, + compute_overlapping_inputs, + create_graph_signature, +) +from ._aot_autograd.traced_function_transforms import ( # noqa: F401 + fn_input_mutations_to_outputs, + fn_prepped_for_autograd, + create_functionalized_fn, + create_functionalized_rng_ops_wrapper, + aot_dispatch_subclass, + create_functional_call, + create_joint, +) +from ._aot_autograd.runtime_wrappers import ( # noqa: F401 + create_runtime_wrapper, + functionalized_rng_runtime_epilogue, + aot_dispatch_subclass_wrapper, + aot_wrapper_dedupe, + aot_wrapper_synthetic_base, + merge_view_inputs, +) +from ._aot_autograd.dispatch_and_compile_graph import ( # noqa: F401 + aot_dispatch_base_graph, + aot_dispatch_autograd_graph, +) +from ._aot_autograd.jit_compile_runtime_wrappers import ( # noqa: F401 + aot_dispatch_base, + aot_dispatch_autograd, +) + +zip = strict_zip + +# This global counter increments every time we compile a graph with +# AOTAutograd. You can use this to correlate runtime error messages +# with compile time (e.g., if you get an error at runtime saying +# compiled graph 3 failed, you can set a breakpoint at compile time +# for this graph number to investigate further at compile time.) +# +# NB: this is different from get_aot_compilation_context, which tracks +# each underlying graph that is compiled. In contrast, AOT_COUNTER +# corresponds to top-level invocations of aot_module/aot_function; +# one counter is allocated per entire compiled block (but this block +# may involve compiling multiple subgraphs; e.g., for forwards/backwards) +AOT_COUNTER = itertools.count() + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# AOT Autograd contains a pretty non-trivial amount of logic to handle edge cases around aliasing and mutation +# that are external to the graph (they show up as side effects in some way when you run the graph). +# +# Take a look at `test_aotdispatch.py TestAOTAutograd.test_input_mutation*` tests for some examples functions +# and what they're compiled graphs looks like. +# Below is a very long comment detailing several edge cases, and showing how AOT Autograd handles them. +# +# Note [AOT Autograd: input data mutations] +# +# If we compile a function that mutates inputs, then those input mutations are real side effects +# that a user expects to see after running the compiled graph. +# However, the graph that we want to send to a backend needs to be *entirely* functional. +# The way we reconcile this difference is that we remove the mutations completely from the graph that we compile +# but we update the graph to return (updated_inputs, user_outputs). +# In the epilogue that runs after the compiled graph is executed, we copy the updated inputs back to the originals. +# +# Example: original user code: +# def f(x): +# x.mul_(2) +# out = x.mul(3) +# return out +# +# After AOT Autograd compiles, we end up with a: +# (a) compiled graph +# (b) autograd.Function.forward() method, that executes the compiled graph +# (c) wrapper function, that calls the autograd.Function.forward() and performs the epilogue +# +# The output of (a, b, c) are all written below. +# +# def compiled_forward_graph(x): +# x_updated = x.mul(2) +# out = x_updated.mul(3) +# return x_updated, out +# +# # x_updated gets a gradient in the compiled backward +# def compiled_backward_graph(grad_x_updated, grad_out): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# x_updated, out = compiled_forward_graph(x) +# return x_updated, out +# +# def compiled_wrapper(x): +# x_updated, out = autograd.Function.apply(x) +# x.copy_(x_updated) +# return out +# +# Another important thing to note is that updated inputs (due to data mutations) *do* participate +# in the compiled backward graph! Since the compiled forward graph gets N extra outputs +# (due to updated inputs showing up as graph outputs), +# The compiled backward gets an additional N inputs. +# That way, during the x.copy_(x_updated) bit in the epilogue, gradients will flow from the updated input +# back to the original input. + + +# Note [AOT Autograd: input metadata mutations] +# +# For the same reason as input mutations, we also don't put input metadata mutations in the graph. +# Instead, we return the updated version of the input (a view), and mutate the input's metadata outside of the graph +# +# Example: original user code: +# def f(x): +# x.t_() +# out = x.mul(3) +# return out +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(x): +# x_updated = x.t() +# out = x_updated.mul(3) +# return x_updated, out +# +# # x_updated does *not* get a gradient in the compiled backward +# def compiled_backward_graph(grad_out): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# x_updated, out = compiled_forward_graph(x) +# return x_updated, out +# +# def compiled_wrapper(x): +# x_updated, out = autograd.Function.apply(x) +# x.as_strided_(x_updated) +# return out + + +# Note [AOT Autograd: outputs aliasing inputs or intermediates!] +# +# AOT Autograd needs special handling for outputs that alias graph inputs or intermediates! +# Why? +# (1) autograd.Function.forward() has a limitation, where views that returned in the forward cannot later be mutated. +# (2) views don't need to be compiled in the graph anyway - it's cheap to generate them outside of the compiled graph, +# in an epilogue. +# For outputs that alias inputs, we do the following: +# (a) *still* return the aliased output as a graph output +# (b) In the AOT Autograd wrapper/epilogue, we don't return that aliased output. Instead, we use it to regenerate the output. +# +# For outputs that alias *intermediates*, we do the following: +# (a) Return the output in the compiled forward, **and** return it's ._base (a graph intermediates) as an output in the forward +# (b) Use (output, graph_intermediate) to regenerate the alias, and return that to the user (instead of the compiled fw output). +# You might wonder why we return the aliased output directly in the graph (and making the graph compute it), +# only to not return it and instead generate a fresh alias off of the intermediate, +# instead of (say) just storing metadata about the size/stride of the output somewhere to generate the alias. There are two reasons: +# (1) Getting the actual alias tensor allows us to use view-replay to generate the alias, instead of an as_strided() call +# (2) Inductor (and other backends) are free to change the memory format of graph outputs, if it results in better performance. +# This can result in problems if a user later tries to .view() that output expecting it to have one set of strides, +# when it has a different set of strides. +# By including the view op directly in the graph, inductor takes that into account when deciding what memory format +# the graph intermediate should be. +# +# Another important thing to note is how our traced backward() graph handles aliases. +# (this applies to outputs aliasing inputs, outputs aliasing intermediates, +# *and* updated inputs returned in the compiled forward due to metadata-only mutations). +# Any outputs that alias (either inputs or intermediates) do NOT participate in the compiled backward graph +# It would be wasteful to include them in the compiled backward(), because we regenerate them eagerly +# at the end of the forward. +# +# Example: original user code: +# def f(x): +# out1 = x.t() +# intermediate = x.mul(2) +# out2 = intermediate.view(-1) +# return out1, out2 +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(x): +# out1 = x.t() +# intermediate = x.mul(2) +# out2 = intermediate.view(-1) +# # the compiled graph also returns the intermediate +# return out1, out2, intermediate +# +# # intermediate gets a gradient in the compiled backward. +# # both output aliases (out1 and out2) do not. +# def compiled_backward_graph(grad_intermediate): +# grad_x = ... +# return grad_x +# +# def autograd.Function.forward(x): +# out1, out2, intermediate = compiled_forward_graph(x) +# return out1, out2, intermediate +# +# def compiled_wrapper(x): +# out1, out2, intermediate = autograd.Function.apply(x) +# # regenerate out1 from the input +# out1_regenerated = out1._view_func(x) +# # regenerate out1 from the intermediate +# out2_regenerated = out2._view_func(intermediate) +# return out1_regenerated, out2_regenerated + + +# Note [AOT Autograd: mutations to inputs that alias other inputs] +# +# Another edge case that is (only partially) handled today is when an input is mutated, but itself aliases another input. +# AOT Autograd needs to **ensure** that functionalization knows that the two inputs are aliased to each other. +# That way, when the aliased input is accessed later in the graph, functionalization knows to "update" the alias +# given the mutation that occurred. +# +# This is handled by updating the calling convention: we create a "synthetic base" that becomes a new input +# in the compiled function, and we regenerate the original (aliased) inputs directly off of the base +# inside of the compiled function. +# +# This logic is fully encapsulated in aot_wrapper_synthetic_base() +# +# Example: original user code: +# def f(x, x_view): +# x.mul_(2) +# out = x * x_view +# return out +# f(x, x.view(-1)) +# +# AOT Autograd output (compiled graph, autograd.Function.forward(), wrapper function): +# def compiled_forward_graph(base) +# x = generate_x(base) +# x_view = generate_x_view(base) +# x_updated = x.mul(2) +# x_view_updated = x_updated.view(-1) +# out = x_updated * x_view_updated +# return x_updated, out +# +# # The calling convention change from (aliases) -> (base) happens +# # *outside* of the autograd.Function.forward(). +# # That means the forward() only has 1 input (base), +# # and the backward() only has 1 output (grad_base) +# def compiled_backward_graph(grad_out): +# grad_base = ... +# return grad_base +# +# def autograd.Function.forward(base): +# x_updated, out = compiled_forward_graph(base) +# return x_updated, out +# +# # The compiled wrapper is where we create synthetic bases. +# # The info on which inputs are mutated is also tracked *before* synthetic base creation. +# def compiled_wrapper(x, x_view): +# base = merge_view_inputs(x, x_view) +# x_updated, out = autograd.Function.apply(base) +# # x and x_view are aliased in eager mode, so this mutation to x will automatically affect x_view. +# x.copy_(x_updated) +# return out + + +# Note [AOT Autograd: Views to avoid tangents aliasing inputs] +# +# We view every forward output when creating out tangent tensors to handle the problematic +# case in which a subclass does extra aliasing between graph outputs/inputs in a way that +# is not visible above the sublass. +# +# Ordinarily, when constructing the joint function that we want to trace in AOTAutograd, +# we're guaranteed that the tangent tensors that we pass +# into the joint are distinct tensors from the primals. This is because when +# decide which forward outputs to create tangents for, we only create tangents +# for forward outputs that are not aliases of inputs (See Note +# [AOT Autograd: outputs aliasing inputs or intermediates!]). +# +# However, when wrapper tensor subclasses enter the picture, it is possible +# to have an output of the forward that is a subclass that is not an +# input / alias of an input, but one of its inner tensors is an alias! +# NestedTensor is an example: Performing an out-of-place pointwise op on a +# NestedTensor constructs a fresh NestedTensor that holds onto the input's +# offsets tensor directly. +# +# Having tangent tensors that are the same as the (primal) forward inputs, +# can cause problems during tracing as make_fx() will specialize on our +# duplicate inputs: If we passed in the same tensor for primals_1 and +# tangents_1 during tracing, make_fx() will happily sub out all usages of +# tangents_1 with primals_1 in the graph, which is not what we want. +# +# To work around this, we view every forward output when creating out tangent +# tensors so that tangents can never be the same as forward inputs even if +# forward inputs alias forward outputs. + +# Note [Side-Effectful Tokens in AOTAutograd] +# +# We allow some some side-effectful operators in +# the post-AOTAutograd (functional) graph, such as prints and torchbind operations. +# To ensure that these side-effects are compatible to future graph passes that +# assume that the graph is functional, we will thread "effect tokens" to show +# data dependence between these side-effectful operators. Practically speaking, +# effect tokens are just dummy values (torch.tensor([])). The graph would look +# like the following: +# +# def gm(self, token0, reader): +# token1, frame = with_token(ordered_effect_op, (reader,), token0) +# frame = frame * 2 +# token2, frame2 = with_token(ordered_effect_op, (reader,), token1) +# frame2 = frame2 * 2 +# return token2, frame, frame2 +# +# We will pass the token as an input to the graph, thread it through +# side-effectful operators using the `with_effects` high order operator, and then +# return the updated token as an output. +# So the signature of the graph input would look something like +# (*tokens, *params_buffers, *user_inputs), and the signature of the graph +# output would look something like (*tokens, *outputs). + +# +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +aot_autograd_decompositions = {} + +@dynamo_timed +def create_aot_dispatcher_function( + flat_fn, flat_args: List[Any], aot_config: AOTConfig +): + """ + Traces the forward and backward graphs of the attr:`flat_fn` to generate a + joint graph. The joint graph is an Fx graph with Aten ops. Please refer to + the tracing mechanism to understand the graph capturing details. + + The joint graph is then passed through attr:`partition_fn` to isolate the + forward and backward portions, which are then respectively compiled via the + provided attr:`fw_compiler` and attr:`bw_compiler`. + + The resulting compiled forward and backward graphs are then wrapped up in a + ``torch.autograd.Function`` object. + + The calling convention here is that the first aot_config.num_params_buffers + inputs in flat_args are parameters and buffers, and the rest are inputs. + + We use this to assume that parameters/buffer's shapes don't change. + + Note: this function is used both by aot_function and aot_export (controlled by aot_config.is_export) + When aot_config.is_export is True, we return an FX graph + metadata + When aot_config.is_export is False, we return an ordinary runtime function + """ + + # This is the main entry point. + # TODO: Chillee argues that dynamo itself should pass in fake tensors to + # the list of arguments when compiling; at the moment we do not do this + + if aot_config.decompositions is None: + aot_config.decompositions = {} + + + aot_config.decompositions = { + **aot_autograd_decompositions, + **aot_config.decompositions, + } + + if config.functionalize_rng_ops: + # Update the decompositions with functionalized random decompositions + aot_config.decompositions = { + **rng_decompositions, + **aot_config.decompositions, + } + + # Check flat_args to see if they're already fake. If so, use that fake + # mode instead. + + fake_mode = detect_fake_mode(flat_args) + if fake_mode is None: + shape_env = ShapeEnv() if aot_config.dynamic_shapes else None + fake_mode = FakeTensorMode(shape_env=shape_env) + else: + shape_env = fake_mode.shape_env + + python_dispatcher_mode = ( + enable_python_dispatcher() if shape_env is not None else nullcontext() + ) + + with torch.autograd.set_multithreading_enabled( + False + ), preserve_rng_state(), fake_mode, python_dispatcher_mode, PhiloxStateTracker(): + + def process_inputs(flat_args): + def convert(idx, x): + if shape_env is not None: + from torch._dynamo.source import ConstantSource + if isinstance(x, int): + # We always specialize on scalar values in export. + if aot_config.is_export: + return x + source = ConstantSource(f"sym_{idx}") + return shape_env.create_symintnode( + shape_env.create_symbol(x, source), + hint=x, + source=source + ) + if not isinstance(x, torch.Tensor): + return x + if isinstance(x, FakeTensor): + assert x.fake_mode is fake_mode + return x + if is_traceable_wrapper_subclass(x): + attrs, _ = x.__tensor_flatten__() + if all(isinstance(getattr(x, attr), FakeTensor) for attr in attrs): + assert all(getattr(x, attr).fake_mode is fake_mode for attr in attrs) + return x + + + # see note [Tensor Fakification and Symbol Caching] + symbolic_context = None + source = None + if tracing_context := torch._guards.TracingContext.try_get(): + if x in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[x] + source = symbolic_context.tensor_source + if ( + idx < aot_config.num_params_buffers + and config.static_weight_shapes + and not symbolic_context + ): + # TODO: Ensure that this codepath is never exercised from + # Dynamo + return fake_mode.from_tensor(x, static_shapes=True) + + return fake_mode.from_tensor( + x, static_shapes=False, symbolic_context=symbolic_context, source=source + ) + return [convert(idx, x) for idx, x in enumerate(flat_args)] + + fake_flat_args = process_inputs(flat_args) + + needs_autograd = ( + any(x.requires_grad for x in fake_flat_args if isinstance(x, Tensor)) + and torch.is_grad_enabled() + ) + + with enable_python_dispatcher(): + # Patch set_rng_state as set_rng_state with fake tensors is + # nonsensical. This does not affect the collection of metadata. + with patch("torch.cuda.set_rng_state", lambda *args: None): + fw_metadata = run_functionalized_fw_and_collect_metadata( + flat_fn, + keep_input_mutations=aot_config.keep_inference_input_mutations, + is_train=needs_autograd, + pre_dispatch=aot_config.pre_dispatch, + )(*fake_flat_args) + + req_subclass_dispatch = requires_subclass_dispatch(fake_flat_args, fw_metadata) + + if needs_autograd and not any(x.requires_grad for x in fw_metadata.output_info): + # We realized that none of the outputs require grad, + # so we actually have an inference graph. + needs_autograd = False + # A bit silly: right now in the subclass codepath, our ViewAndMutationMeta + # changes depending on whether we pass in is_train / keep_input_mutations, + # so we're forced to recompute the metadata. + # TODO: refactor the subclass path of run_functionalized_fw_and_collect_metadata + # so that this is unnecessary. + if req_subclass_dispatch: + fw_metadata = run_functionalized_fw_and_collect_metadata( + flat_fn, + keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd, + is_train=needs_autograd, + pre_dispatch=aot_config.pre_dispatch, + )(*fake_flat_args) + else: + fw_metadata = ViewAndMutationMeta( + input_info=fw_metadata.input_info, + output_info=fw_metadata.output_info, + num_intermediate_bases=fw_metadata.num_intermediate_bases, + keep_input_mutations=aot_config.keep_inference_input_mutations and not needs_autograd, + traced_tangents=fw_metadata.traced_tangents, + subclass_inp_meta=fw_metadata.subclass_inp_meta, + subclass_fw_graph_out_meta=fw_metadata.subclass_fw_graph_out_meta, + subclass_tangent_meta=fw_metadata.subclass_tangent_meta, + is_train=needs_autograd, + ) + + + if fw_metadata.num_intermediate_bases > 0: + assert not req_subclass_dispatch, f"""\ +torch.compile is currently being used with tensor subclass inputs: +{','.join([str(type(x)) for x in fake_flat_args])}. We are attempting to a compile a graph with two graph outputs +that alias one another, which is currently unsupported in the subclass use case. If you run into this, +please file a github issue""" + + if aot_config.is_export: + # aot_export: ban input metadata mutations for now to keep shared code paths simpler. + # Keeping .resize_() in the graph will require some work + # Allowing it but keeping the graph functional will require some calling convention changes. + if len([x for x in fw_metadata.input_info if x.mutates_metadata]) != 0: + raise RuntimeError(f"""\ +Found an input that received a metadata mutation, through e.g. a call to `.resize_()` or `.transpose_()`. +This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue. + +fw_metadata={str(fw_metadata)}""") + # In export, banning data mutations on inputs that require grad for now. + # This should be rare, and is tricky to get right. When we trace the backward, + # we currently trace with autograd.grad instead of .backward(), which makes it difficult + # to ensure that we run autograd all the way through the input **before** it saw the mutation. + if len([x for x in fw_metadata.input_info if x.requires_grad and x.mutates_data]) != 0: + raise RuntimeError(f"""\ +Found a graph input that requires gradients, and received a mutation. +This is currently banned in the aot_export workflow. If you need this functionality, please file a github issue. + +fw_metadata={str(fw_metadata)}""") + if req_subclass_dispatch: + raise RuntimeError("""\ +aot_export is not currently supported with traceable tensor subclass. +If you need this feature, please comment on """) + + # Need to decide on a strategy for functionalized RNG: toggling via global config seems bad, + # and turning it on will require a non-trivial calling convention change for any export runtime. + if config.functionalize_rng_ops: + raise RuntimeError("""\ +Functionalized RNG is not currently supported in the aot_export workflow. Please file a github issue, +or otherwise set torch._functorch.config.functionalize_rng_ops = False.""") + + # crappy version of dispatcher + # TODO: Do this properly + if needs_autograd: + # For now, aot_dispatch_autograd knows to explicitly return a graph + # when run with export, and an opaque callable otherwise. + # In theory we could factor these out, but I wanted to let the dust + # settle on how functionalized rng fits into export first. + compiler_fn = aot_dispatch_autograd_graph if aot_config.is_export else aot_dispatch_autograd + else: + # aot_dispatch_base_graph contains only the "graph bits", while aot_dispatch_base + # includes some extra work around handling a runtime epilogue. + compiler_fn = aot_dispatch_base_graph if aot_config.is_export else aot_dispatch_base + + compiler_fn = partial(aot_wrapper_synthetic_base, compiler_fn=compiler_fn, needs_autograd=needs_autograd) + compiler_fn = partial(aot_wrapper_dedupe, compiler_fn=compiler_fn) + # You can put more passes here + + compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata) + if aot_config.is_export: + # During export, we don't get back a callable - we get back the raw fx graph + # (either a joint or an inference-only graph) + assert isinstance(compiled_fn, torch.fx.GraphModule) + return compiled_fn, fw_metadata + + if not hasattr(compiled_fn, "_boxed_call"): + compiled_fn = make_boxed_func(compiled_fn) + + return compiled_fn + + +def aot_function( + fn: Callable, + fw_compiler: Callable, + bw_compiler: Optional[Callable] = None, + partition_fn: Callable = default_partition, + decompositions: Optional[Dict] = None, + num_params_buffers: int = 0, + keep_inference_input_mutations: bool = False, + inference_compiler: Optional[Callable] = None, + *, + # Whether or not to trace with dynamic shapes + dynamic=False, + enable_log=True, +) -> Callable: + """ + Traces the forward and backward graph of :attr:`fn` using torch dispatch + mechanism, and then compiles the generated forward and backward graphs + through :attr:`fw_compiler` and :attr:`bw_compiler`. + + :func:`aot_function` traces the forward and backward graph ahead of time, + and generates a joint forward and backward graph. :attr:`partition_fn` is + then used to separate out forward and backward graphs. The partitioner + function can be used to perform optimizations such as recomputation. One can + set `decompositions` dictionary to decompose the operators into a sequence + of core or simpler operators supported by the backend compilers. + + .. warning:: + This API is experimental and likely to change. + + Args: + fn (Callable): A Python function that takes one ore more arguments. Must + return one or more Tensors. + fw_compiler (Callable): A Python function that accepts an Fx graph with + Aten ops and input args, and returns a Callable that semantically is + equivalent to the input Fx graph. + bw_compiler (Optional[Callable]): A Python function that accepts an + Fx graph with Aten ops and input args, and returns a Callable that + semantically is equivalent to the input Fx graph. Default: None + (when None, it defaults to the :attr:`fw_compiler`) + partition_fn (Callable): A Python function that takes a joint forward + and backward graph, and partitions it into separate forward and + backward graphs. + decompositions (Dict): A dictionary to define the decomposition of + larger Aten ops into simpler or core Aten ops. + inference_compiler (Optional[Callable]): A Python function that accepts an + Fx graph with Aten ops and input args, and returns a Callable that + semantically is equivalent to the input Fx graph. inference_compiler is invoked + if no autograd is needed. Default: None + (when None, it defaults to the :attr:`fw_compiler`) + Returns: + Returns a ``Callable`` that retains the eager behavior of the original + :attr:`fn`, but with forward and backward graph compiled via + :attr:`fw_compile` and :attr:`bw_compile`. + + A simple example usage of :func:`aot_function` is as follows. This example + will print the forward and backward graphs of the function ``fn`` + + >>> fn = lambda x : x.sin().cos() + >>> def print_compile_fn(fx_module, args): + >>> print(fx_module) + >>> return fx_module + >>> aot_fn = aot_function(fn, print_compile_fn) + >>> x = torch.randn(4, 5, requires_grad=True) + >>> aot_fn(x) + """ + + if bw_compiler is None: + bw_compiler = fw_compiler + if inference_compiler is None: + inference_compiler = fw_compiler + aot_config = AOTConfig( + fw_compiler=fw_compiler, + bw_compiler=bw_compiler, + inference_compiler=inference_compiler, + partition_fn=partition_fn, + decompositions=decompositions, + num_params_buffers=num_params_buffers, + aot_id=next(AOT_COUNTER), + keep_inference_input_mutations=keep_inference_input_mutations, + dynamic_shapes=dynamic, + aot_autograd_arg_pos_to_source=None, + is_export=False, + no_tangents=False, + enable_log=enable_log, + ) + cached_res = None + + @wraps(fn) + def returned_function(*args, **kwargs): + nonlocal cached_res + # Now flatten the tensor args + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + + # Compile the function and save it in the cache + if cached_res is None: + flat_fn, out_spec = create_tree_flattened_fn(fn, args, kwargs) + + compiled_fn = create_aot_dispatcher_function( + flat_fn, + flat_args, + aot_config, + ) + cached_res = (compiled_fn, out_spec) + + cached_fn, out_spec = cached_res + out = cached_fn(flat_args) + return out_spec.unflatten(out) + + return returned_function + + +def aot_module(mod: nn.Module, *args, **kwargs) -> nn.Module: + """ + Traces the forward and backward graph of :attr:`mod` using torch dispatch + tracing mechanism. It is wrapper function, that underneath uses + :func:`aot_function` to perform tracing and compilation. + + :func:`aot_module` lifts the parameters and buffers of ``nn.Module`` as inputs + to a new callable which is then compiled through :func:`aot_function`. + + .. warning:: + This API is experimental and likely to change. + + Args: + mod (Callable): A ``nn.Module`` module. + args : args to be passed to :func:`aot_function` + kwargs : kwargs to be passed to :func:`aot_function` + + Returns: + Returns a ``nn.Module`` that retains the eager behavior of the original + :attr:`mod`, but with forward and backward graph compiled. + + """ + # See Note: [Fake Modules and AOTAutograd] + torch._dynamo.utils.assert_no_fake_params_or_buffers(mod) + + def functional_call(named_params, named_buffers, *args, **kwargs): + params_and_buffers = {**named_params, **named_buffers} + return torch.func.functional_call(mod, params_and_buffers, args, kwargs) + + named_params = dict(mod.named_parameters(remove_duplicate=False)) + named_buffers = dict(mod.named_buffers(remove_duplicate=False)) + num_params_buffers = len(named_params) + len(named_buffers) + compiled_f = aot_function( + functional_call, *args, num_params_buffers=num_params_buffers, **kwargs + ) + + class AOTModule(nn.Module): + def __init__(self): + super().__init__() + self.orig_module = mod + + def forward(self, *args, **kwargs): + return compiled_f( + named_params, + named_buffers, + *args, + **kwargs, + ) + + return AOTModule() + + +def aot_module_simplified( + mod: nn.Module, + args, + fw_compiler: Callable, + bw_compiler: Optional[Callable] = None, + partition_fn: Callable = default_partition, + decompositions: Optional[Dict] = None, + keep_inference_input_mutations=False, + inference_compiler: Optional[Callable] = None, +) -> nn.Module: + """ + This is the simplified or low overhead version of aot_module. For frontends + like TorchDynamo, the input functions/modules to AOT are static and have + unpacked inputs/outputs. This gives us an opportunity to remove the + (1) pytree overhead to parse inputs/outputs, + (2) AOT Autograd cache, + (3) Reading of params/buffers in every forward call + + :func:`aot_module_simplified` removes these overheads. + """ + params = { + **dict(mod.named_parameters(remove_duplicate=False)), + **dict(mod.named_buffers(remove_duplicate=False)), + } + params_flat, params_spec = pytree.tree_flatten(params) + params_flat = list(params_flat) + params_len = len(params_flat) + + functional_call = create_functional_call(mod, params_spec, params_len) + + if bw_compiler is None: + bw_compiler = fw_compiler + if inference_compiler is None: + inference_compiler = fw_compiler + + seen_sources = set() + + full_args = [] + # First, the params + full_args.extend(params_flat) + + if tracing_context := torch._guards.TracingContext.try_get(): + tracing_context.params_flat = params_flat + + aot_autograd_arg_pos_to_source = None + # Then, the params 1:1 mapped sources, if relevant. + if hasattr(mod, "_param_name_to_source"): + aot_autograd_arg_pos_to_source = [] + # We now know this came from dynamo, and (1) we care about guards, + # so setting up aot_autograd_arg_pos_to_source for downstream dedup guards + # can now be done safely. (2) Dynamo logic protects the 1:1 sizing below. + for name in params.keys(): + assert name in mod._param_name_to_source, f"{name} not found." + source = mod._param_name_to_source[name] + assert source not in seen_sources, source + seen_sources.add(source) + aot_autograd_arg_pos_to_source.append(source) + + # Next, the input args + full_args.extend(args) + + if hasattr(mod, "graph"): + # Non dynamo entrypoints can get to here... + for i, node in enumerate(mod.graph.nodes): + if node.op == "placeholder": + if hasattr(node, "_dynamo_source"): + # ... but not here! + if aot_autograd_arg_pos_to_source is None: + aot_autograd_arg_pos_to_source = [] + source = node._dynamo_source + assert source not in seen_sources, source + seen_sources.add(source) + aot_autograd_arg_pos_to_source.append(source) + + if aot_autograd_arg_pos_to_source is not None: + assert len(full_args) == len(aot_autograd_arg_pos_to_source) + + dynamic_shapes = False + for x in full_args: + if isinstance(x, FakeTensor): + dynamic_shapes = x.fake_mode.shape_env is not None + break + + aot_config = AOTConfig( + fw_compiler=fw_compiler, + bw_compiler=bw_compiler, + inference_compiler=inference_compiler, + partition_fn=partition_fn, + decompositions=decompositions, + num_params_buffers=params_len, + aot_id=next(AOT_COUNTER), + keep_inference_input_mutations=keep_inference_input_mutations, + dynamic_shapes=dynamic_shapes, + aot_autograd_arg_pos_to_source=aot_autograd_arg_pos_to_source, + is_export=False, + no_tangents=False, + ) + + with compiled_autograd.disable(): + compiled_fn = create_aot_dispatcher_function( + functional_call, + full_args, + aot_config, + ) + + # TODO: There is something deeply wrong here; compiled_fn running with + # the boxed calling convention, but aot_module_simplified somehow + # historically returned a function that was not the boxed calling + # convention. This should get fixed... + def forward(*runtime_args): + full_args = [] + full_args.extend(params_flat) + full_args.extend(runtime_args) + return compiled_fn(full_args) + + # Just for convenience + forward.zero_grad = mod.zero_grad + forward.named_parameters = mod.named_parameters + forward.named_buffers = mod.named_buffers + + return forward + + +def aot_export_module( + mod: nn.Module, + args, + *, + decompositions: Optional[Dict] = None, + # If true, we'll return a joint forward-backward graph, + # As well as metadata on the loss + gradients in the backward. + trace_joint: bool, + # If trace_joint is True, we expect your module to return a scalar loss. + # Your module can return multiple outputs, so you must specify which output the loss is. + output_loss_index: Optional[int] = None, + pre_dispatch: bool = False, + kwargs=None, +) -> Tuple[torch.fx.GraphModule, GraphSignature]: + """ + This function takes in a module, and returns: + (1) an FX graph that can be exported + (2) some metadata about the graph + + If `trace_joint=True` we will return a joint graph of the forward + backward. + + The traced FX graph will have the following properties compared to the original module: + (1) Inputs and outputs to the module will be pytree-flattened + (2) Parameters and buffers on the module will be lifted into graph inputs, + graph_inputs = (*parameters, *buffers, *user_inputs) + (3) The graph will be fully functionalized + (4) Any input mutations will be converted into additional outputs in the graph, + meaning whoever calls this graph is responsible for applying the mutations + back to the original inputs. + (5) If is_joint is provided the graph will return parameter gradients in addition to user outputs. + The graph output will look like: + graph_outputs = (*updated_inputs, *user_outputs, *param_gradients) + + There are also several restrictions on what modules can use this API. In particular: + (1) If trace_joint is specified, we expect the loss function to be **fused** + into the module forward. One of the outputs to the forward must be a scalar loss, + which is specified with `output_loss_index`. + All other outputs to the forward are presumed to not require gradients. + (2) This API cannot capture optimizers (although in theory we could build an API for this). + (3) Metadata mutations on params/buffers/inputs are banned. + (4) Data mutations on anything that requires gradients are banned (parameters) + (5) If an input is mutated, it is not allowed to alias any other inputs. + (6) Parameters must not be duplicated. + """ + if pre_dispatch and trace_joint: + raise RuntimeError("pre_dispatch is not supported when trace_joint is True.") + named_parameters = dict(mod.named_parameters(remove_duplicate=False)) + named_buffers = dict(mod.named_buffers(remove_duplicate=False)) + + params_and_buffers = { + **dict(named_parameters), + **dict(named_buffers), + } + params_and_buffers_flat, params_spec = pytree.tree_flatten(params_and_buffers) + params_and_buffers_flat = tuple(params_and_buffers_flat) + params_len = len(params_and_buffers_flat) + + kwargs = kwargs or {} + + functional_call = create_functional_call(mod, params_spec, params_len, store_orig_mod=True) + + num_fw_outs = None + + if trace_joint: + # This helper effectively just adds some extra asserts about what the backward will look like: + # Outputs must include a scalar loss, that we compute gradients w.r.t. + # We don't compute gradients w.r.t. anything else: so just in case we detach() + # and other output tensors. + def fn_to_trace(*args): + nonlocal num_fw_outs + out = functional_call(*args) + if output_loss_index is None: + raise RuntimeError("""\ +If trace_joint=Trueit is required that one of your forward outputs must be a scalar loss. +You must specify the which (index) output is the loss with output_loss_index.""") + if isinstance(out, (torch.Tensor)): + out = (out,) + if not isinstance(out, (tuple, list)): + raise RuntimeError(f"Expected forward output to be either a tensor or a list/tuple of tensors. found {type(out)}") + + for i, o in enumerate(out): + # We only want to create a backward graph w.r.t. the loss that the user passed in. + # This implies that every other output should not require gradients. + # Instead of making this an error (and forcing the user to detach all other outputs + # of their forward), + # we'll automatically detach them here. + if o.requires_grad and i != output_loss_index: + raise RuntimeError(f"""\ +Found an output of the forward that requires gradients, that was not the scalar loss. +We require all outputs to the forward that are not the scalar loss to not require gradient, +because we will only compute a backward graph against the scalar loss. +You can fix this by calling .detach() on each of your forward outputs that is not the loss. +You specified that output index {output_loss_index} is the loss, but we found that +the output at index {i} requires gradients.""") + out_loss = out[output_loss_index] + num_fw_outs = len(out) + if not out_loss.requires_grad: + raise RuntimeError(f"""\ +The output at index {output_loss_index} was marked as the loss, but it does not require gradients""") + if out_loss.numel() != 1: + raise RuntimeError(f"""\ +We require the output marked as the loss (at index {output_loss_index}) to be a scalar, but it has shape {out_loss.shape}""") + return out + ctx = nullcontext + else: + # Run under no_grad, so our tracing machinery only traces an inference graph. + ctx = torch.no_grad + fn_to_trace = functional_call + + full_args = [] + # First, the params + # NB: It is REQUIRED that parameters come first, Inductor infers "fixed" + # parameters by looking at the difference in parameter count outside + # and inside AOTAutograd, and assumes the prefix of arguments are fixed + # arguments + full_args.extend(params_and_buffers_flat) + # Next, the input args + full_args.extend(args) + + with ctx(): + fx_g, metadata, in_spec, out_spec = _aot_export_function( + fn_to_trace, + full_args, + decompositions=decompositions, + num_params_buffers=params_len, + no_tangents=True, + pre_dispatch=pre_dispatch, + kwargs=kwargs, + ) + if trace_joint: + def flattened_joint(*args): + # The idea here is that the joint graph that AOTAutograd creates has some strict properties: + # (1) It accepts two arguments (primals, tangents), and pytree_flattens them + # (2) It returns a tuple of (fw_outs, gradients) + # This is a very useful convention for anyone who wants to partition the joint graph + # into a separate forward and backward graph. + # However, + # (1) for people exporting a single joint graph, it would be preferable not to have + # any pytrees in the graph. + # (2) We are guaranteed in the aot_export_module case that the forward outputs a loss, + # and there are therefore no tangents that are needed to run the joint graph. + # (3) AOTAutograd creates a grad_input for every input in the forward, + # including None's for inputs that are not grad-requiring tensors. + # we don't want these in our export graph. + # and there are therefore no tangents that are needed to run the joint graph. + # This function "fixes" both of the above by removing any tangent inputs, + # and removing pytrees from the original FX graph. + fake_tangents = [None for _ in range(metadata.num_outputs + metadata.num_mutated_inp_runtime_indices)] + fw_outs, gradients = fx_g(args, fake_tangents) + assert len(gradients) == len(args) + output_gradients = [] + for i, (a, grad) in enumerate(zip(args, gradients)): + if isinstance(a, torch.Tensor) and a.requires_grad: + assert grad is not None, """\ +Found a parameter that did not receive a gradient. +"This is most likely a bug, but if this needs to be supported please comment on this Github issue: +https://github.com/pytorch/pytorch/issues/101192 +""" + output_gradients.append(grad) + else: + assert grad is None + return *fw_outs, *output_gradients + fx_g = make_fx(flattened_joint)(*full_args) + + user_args_flat = pytree.arg_tree_leaves(*args, **kwargs) + return fx_g, create_graph_signature( + fx_g, + metadata, + in_spec, + out_spec, + user_args_flat=user_args_flat, + params_and_buffers_flat=params_and_buffers_flat, + param_names=list(named_parameters.keys()), + buffer_names=list(named_buffers.keys()), + trace_joint=trace_joint, + num_user_fw_outs=num_fw_outs, + loss_index=output_loss_index, + ) + +def aot_export_joint_simple( + func: Callable, + args, + *, + trace_joint: bool, + # It looks like the main consequence of this API is that for dynamic shapes, + # it will assume that parms/buffers are static. + # With the new inferred dynamic shapes API, maybe this doesn't matter? + num_params_buffers: int = 0, + decompositions: Optional[Dict] = None, +) -> torch.fx.GraphModule: + """ + A simplified version of export. Used by higher order operators. + + This function makes a high-level "no calling convention changes" guarantee: + - If no inputs require grad (so we export an inference graph), + there are *no* calling convention change between the exported graph, and "func". + - If at least one input requires grad (so we trace out and export a joint fw-bw graph), + Then if you were partition the graph into a separate forward and backward graph, + The forward graph will have no calling convention changes compared to "func". + + The above also relies on some strong restrictions around which functions this API accepts: + (1) `args` cannot contain any pytrees (they must have been pytree_flattened already) + (2) `func` cannot mutate any inputs + (3) The outputs of `func` cannot alias any inputs. + + Note: this function is only lightly tested today. It will probably be tested more heavily by higher order ops. + """ + if trace_joint: + ctx = nullcontext + else: + # Run under no_grad, so our tracing machinery only traces an inference graph. + ctx = torch.no_grad + + with ctx(): + fx_g, metadata, in_spec, out_spec = _aot_export_function( + func, + args, + decompositions=decompositions, + ) + in_spec, _kw_in_spec = in_spec.children_specs + # At this point, we can just directly return the (joint or inference graph) that we traced. + # First though: a bunch of assertions to make sure that our graph doesn't require + # any calling convention changes compared to the original function. + # These restrictions are *in addition to* the general restrictions on export. + + # No input mutations + if len([x for x in metadata.input_info if x.mutates_data or x.mutates_metadata]) != 0: + raise RuntimeError(f"aot_export_joint_simple does not support input mutations. {str(metadata)}") + # No output aliasing + if len([x for x in metadata.output_info if x.output_type != OutputType.non_alias]) != 0: + raise RuntimeError(f"aot_export_joint_simple does not support outputs that alias inputs. {str(metadata)}") + # No pytrees + if in_spec.is_leaf(): + raise RuntimeError(f"aot_export_joint_simple requires inputs to be a single list/tuple. in_spec={str(in_spec)}") + if not all(child.is_leaf() for child in in_spec.children_specs): + raise RuntimeError(f"aot_export_joint_simple requires individual inputs not to be pytrees. in_spec={str(in_spec)}") + if out_spec.is_leaf(): + raise RuntimeError(f"aot_export_joint_simple requires outputs to be a single list/tuple. out_spec={str(out_spec)}") + if not all(child.is_leaf() for child in out_spec.children_specs): + raise RuntimeError(f"aot_export_joint_simple requires individual outputs not to be pytrees. out_spec={str(out_spec)}") + # TODO: we might have to temporarily patch config.functionalize_rng + # so that it doesn't run when we're exporting a higher order op. + + if config.debug_assert: + # Smoke test that after partitioning, we can run the forward without any calling convention changes. + fw_module, bw_module = aot_config.default_partition( # noqa: F821 + fx_g, args, num_fwd_outputs=len(fw_metadata.output_infos) # noqa: F821 + ) + # Attempt to run the fw_module with the original user inputs + fake_mode = detect_fake_mode(args) + if fake_mode is None: + fake_mode = FakeTensorMode() + with fake_mode: + fw_module(*args) + return fx_g + +# Private for now because we aren't providing a contract on what to return +# for joint graphs (we could when there's a clearer use case) +# In the future, we may need to add more export API's that provide their own strong guarantees. +# This is meant as a general helper function for handling various export-y use cases. +def _aot_export_function( + func: Callable, + args, + *, + num_params_buffers: int = 0, + decompositions: Optional[Dict] = None, + # If we're exporting a joint graph and we don't want any tangent inputs in the graph + # (because we are backpropping through a scalar 1 loss), + # we need to explicitly specify not to include tangents in the graph. + # It's not enough just to check that our tangent is a scalar, since we also + # need to know if it is a 1 (no need to make it a graph input), or something else + # (requiring it to be a graph input). + # We don't know this info at trace time though, so we need to make it an explicit config. + no_tangents: bool = False, + pre_dispatch: bool = False, + kwargs=None, +) -> Tuple[torch.fx.GraphModule, ViewAndMutationMeta, pytree.TreeSpec, pytree.TreeSpec]: + kwargs = kwargs or {} + + flat_fn, out_spec = create_tree_flattened_fn(func, args, kwargs) + flat_args, in_spec = pytree.tree_flatten((args, kwargs)) + + dynamic_shapes = False + for x in flat_args: + if isinstance(x, FakeTensor): + dynamic_shapes = x.fake_mode.shape_env is not None + break + + # The export use case doesn't care about several bits of AOTConfig + # (1) compilers (we just export the graph) + # (2) partitioners (export is only full graph, user can partition themselves) + aot_config = AOTConfig( + fw_compiler=None, + bw_compiler=None, + inference_compiler=None, + partition_fn=None, + decompositions=decompositions, + num_params_buffers=num_params_buffers, + aot_id=next(AOT_COUNTER), + # For now there's no use case involving keeping input mutations in the graph + # (which we can only do in the inference case anyway). + # We can add this later if we need to. + keep_inference_input_mutations=False, + dynamic_shapes=dynamic_shapes, + aot_autograd_arg_pos_to_source=None, + is_export=True, + no_tangents=no_tangents, + pre_dispatch=pre_dispatch, + ) + + fx_g, meta = create_aot_dispatcher_function( + flat_fn, + flat_args, + aot_config, + ) + return fx_g, meta, in_spec, out_spec.spec + + +compiled_function = aot_function +compiled_module = aot_module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/apis.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/apis.py new file mode 100644 index 0000000000000000000000000000000000000000..46325716936c591ce559ebcd114cb77f6b3df34d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/apis.py @@ -0,0 +1,401 @@ +# NOTE: We allow Dynamo to see this file (via torch/_dynamo/trace_rules.py) so that it can +# trace through functorch transforms. +# Currently, we can't allow Dynamo to see `eager_transforms.py`/`vmap.py` as that break a lot of thing +# and there isn't a mechanism to selectively expose only some functions (eg. grad) from a file +# to Dynamo. +from torch._functorch.vmap import (vmap_impl, _check_randomness_arg, + Callable, in_dims_t, out_dims_t, _check_out_dims_is_int_or_int_pytree, + _process_batched_inputs, _chunked_vmap) +from torch._functorch.utils import exposed_in, argnums_t +import functools + +# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors, +# sends those into func, and then unwraps the output BatchedTensors. Operations +# on BatchedTensors perform the batched operations that the user is asking for. +# +# vmap's randomness behavior differs from JAX's, which would require a PRNG key +# to be passed everywhere. + + +@exposed_in('torch.func') +def vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + *, + chunk_size=None) -> Callable: + """ + vmap is the vectorizing map; ``vmap(func)`` returns a new function that + maps ``func`` over some dimension of the inputs. Semantically, vmap + pushes the map into PyTorch operations called by ``func``, effectively + vectorizing those operations. + + vmap is useful for handling batch dimensions: one can write a function + ``func`` that runs on examples and then lift it to a function that can + take batches of examples with ``vmap(func)``. vmap can also be used to + compute batched gradients when composed with autograd. + + .. note:: + :func:`torch.vmap` is aliased to :func:`torch.func.vmap` for + convenience. Use whichever one you'd like. + + Args: + func (function): A Python function that takes one or more arguments. + Must return one or more Tensors. + in_dims (int or nested structure): Specifies which dimension of the + inputs should be mapped over. ``in_dims`` should have a + structure like the inputs. If the ``in_dim`` for a particular + input is None, then that indicates there is no map dimension. + Default: 0. + out_dims (int or Tuple[int]): Specifies where the mapped dimension + should appear in the outputs. If ``out_dims`` is a Tuple, then + it should have one element per output. Default: 0. + randomness (str): Specifies whether the randomness in this + vmap should be the same or different across batches. If 'different', + the randomness for each batch will be different. If 'same', the + randomness will be the same across batches. If 'error', any calls to + random functions will error. Default: 'error'. WARNING: this flag + only applies to random PyTorch operations and does not apply to + Python's random module or numpy randomness. + chunk_size (None or int): If None (default), apply a single vmap over inputs. + If not None, then compute the vmap :attr:`chunk_size` samples at a time. + Note that :attr:`chunk_size=1` is equivalent to computing the vmap with a for-loop. + If you run into memory issues computing the vmap, please try a non-None chunk_size. + + Returns: + Returns a new "batched" function. It takes the same inputs as + ``func``, except each input has an extra dimension at the index + specified by ``in_dims``. It takes returns the same outputs as + ``func``, except each output has an extra dimension at the index + specified by ``out_dims``. + + .. warning: + :func:`vmap` works best with functional-style code. Please do not + perform any side-effects in ``func``, with the exception of + in-place PyTorch operations. Examples of side-effects include mutating + Python data structures and assigning values to variables not captured + in ``func``. + + One example of using :func:`vmap` is to compute batched dot products. PyTorch + doesn't provide a batched ``torch.dot`` API; instead of unsuccessfully + rummaging through docs, use :func:`vmap` to construct a new function. + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.func.vmap(torch.dot) # [N, D], [N, D] -> [N] + >>> x, y = torch.randn(2, 5), torch.randn(2, 5) + >>> batched_dot(x, y) + + :func:`vmap` can be helpful in hiding batch dimensions, leading to a simpler + model authoring experience. + + >>> batch_size, feature_size = 3, 5 + >>> weights = torch.randn(feature_size, requires_grad=True) + >>> + >>> def model(feature_vec): + >>> # Very simple linear model with activation + >>> return feature_vec.dot(weights).relu() + >>> + >>> examples = torch.randn(batch_size, feature_size) + >>> result = torch.vmap(model)(examples) + + :func:`vmap` can also help vectorize computations that were previously difficult + or impossible to batch. One example is higher-order gradient computation. + The PyTorch autograd engine computes vjps (vector-Jacobian products). + Computing a full Jacobian matrix for some function f: R^N -> R^N usually + requires N calls to ``autograd.grad``, one per Jacobian row. Using :func:`vmap`, + we can vectorize the whole computation, computing the Jacobian in a single + call to ``autograd.grad``. + + >>> # Setup + >>> N = 5 + >>> f = lambda x: x ** 2 + >>> x = torch.randn(N, requires_grad=True) + >>> y = f(x) + >>> I_N = torch.eye(N) + >>> + >>> # Sequential approach + >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0] + >>> for v in I_N.unbind()] + >>> jacobian = torch.stack(jacobian_rows) + >>> + >>> # vectorized gradient computation + >>> def get_vjp(v): + >>> return torch.autograd.grad(y, x, v) + >>> jacobian = torch.vmap(get_vjp)(I_N) + + :func:`vmap` can also be nested, producing an output with multiple batched dimensions + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.vmap(torch.vmap(torch.dot)) # [N1, N0, D], [N1, N0, D] -> [N1, N0] + >>> x, y = torch.randn(2, 3, 5), torch.randn(2, 3, 5) + >>> batched_dot(x, y) # tensor of size [2, 3] + + If the inputs are not batched along the first dimension, ``in_dims`` specifies + the dimension that each inputs are batched along as + + >>> torch.dot # [N], [N] -> [] + >>> batched_dot = torch.vmap(torch.dot, in_dims=1) # [N, D], [N, D] -> [D] + >>> x, y = torch.randn(2, 5), torch.randn(2, 5) + >>> batched_dot(x, y) # output is [5] instead of [2] if batched along the 0th dimension + + If there are multiple inputs each of which is batched along different dimensions, + ``in_dims`` must be a tuple with the batch dimension for each input as + + >>> torch.dot # [D], [D] -> [] + >>> batched_dot = torch.vmap(torch.dot, in_dims=(0, None)) # [N, D], [D] -> [N] + >>> x, y = torch.randn(2, 5), torch.randn(5) + >>> batched_dot(x, y) # second arg doesn't have a batch dim because in_dim[1] was None + + If the input is a Python struct, ``in_dims`` must be a tuple containing a struct + matching the shape of the input: + + >>> f = lambda dict: torch.dot(dict['x'], dict['y']) + >>> x, y = torch.randn(2, 5), torch.randn(5) + >>> input = {'x': x, 'y': y} + >>> batched_dot = torch.vmap(f, in_dims=({'x': 0, 'y': None},)) + >>> batched_dot(input) + + By default, the output is batched along the first dimension. However, it can be batched + along any dimension by using ``out_dims`` + + >>> f = lambda x: x ** 2 + >>> x = torch.randn(2, 5) + >>> batched_pow = torch.vmap(f, out_dims=1) + >>> batched_pow(x) # [5, 2] + + For any function that uses kwargs, the returned function will not batch the kwargs but will + accept kwargs + + >>> x = torch.randn([2, 5]) + >>> def fn(x, scale=4.): + >>> return x * scale + >>> + >>> batched_pow = torch.vmap(fn) + >>> assert torch.allclose(batched_pow(x), x * 4) + >>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5] + + .. note:: + vmap does not provide general autobatching or handle variable-length + sequences out of the box. + """ + _check_randomness_arg(randomness) + if not (chunk_size is None or chunk_size > 0): + raise ValueError(f"vmap: chunk_size should be None or greater than 0. (got {chunk_size})") + + # @functools.wraps(func) + def wrapped(*args, **kwargs): + return vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs) + + return wrapped + + +def chunk_vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + chunks=2) -> Callable: + """ + chunk_vmap is the vectorizing map (vmap) using chunks of input data. It is a mix of vmap (which vectorizes + everything) and map (which executes things sequentially). ``chunk_vmap`` vectorizes the input with number of + chunks at a time. For more details about vectorizing map, see :func:`vmap`. + + .. note:: + Please use :func:`vmap` with ``chunk_size`` argument instead of this API. + + Args: + func (function): A Python function that takes one or more arguments. + Must return one or more Tensors. + in_dims (int or nested structure): Specifies which dimension of the + inputs should be mapped over. ``in_dims`` should have a + structure like the inputs. If the ``in_dim`` for a particular + input is None, then that indicates there is no map dimension. + Default: 0. + out_dims (int or Tuple[int]): Specifies where the mapped dimension + should appear in the outputs. If ``out_dims`` is a Tuple, then + it should have one element per output. Default: 0. + randomness (str): Specifies whether the randomness in this + vmap should be the same or different across batches. If 'different', + the randomness for each batch will be different. If 'same', the + randomness will be the same across batches. If 'error', any calls to + random functions will error. Default: 'error'. WARNING: this flag + only applies to random PyTorch operations and does not apply to + Python's random module or numpy randomness. + chunks (int): Number of chunks to use to split the input data. Default is 2. + If equals to 1 then :func:`vmap` is called. + + Returns: + Returns a new "batched" function. It takes the same inputs as + ``func``, except each input has an extra dimension at the index + specified by ``in_dims``. It takes returns the same outputs as + ``func``, except each output has an extra dimension at the index + specified by ``out_dims``. + """ + _check_randomness_arg(randomness) + + if chunks == 1: + return vmap(func, in_dims=in_dims, out_dims=out_dims, randomness=randomness) + + def _get_chunk_flat_args(flat_args_, flat_in_dims_, chunks_): + flat_args_chunks = tuple( + t.chunk(chunks_, dim=in_dim) if in_dim is not None else [t, ] * chunks_ + for t, in_dim in zip(flat_args_, flat_in_dims_) + ) + # transpose chunk dim and flatten structure + # chunks_flat_args is a list of flatten args + chunks_flat_args = zip(*flat_args_chunks) + return chunks_flat_args + + @functools.wraps(func) + def wrapped_with_chunks(*args, **kwargs): + _check_out_dims_is_int_or_int_pytree(out_dims, func) + _, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func) + # Chunk flat arguments + chunks_flat_args = _get_chunk_flat_args(flat_args, flat_in_dims, chunks) + + # Apply vmap on chunks + return _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs) + + return wrapped_with_chunks + + +@exposed_in("torch.func") +def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + """``grad`` operator helps computing gradients of ``func`` with respect to the + input(s) specified by ``argnums``. This operator can be nested to + compute higher-order gradients. + + Args: + func (Callable): A Python function that takes one or more arguments. + Must return a single-element Tensor. If specified ``has_aux`` equals ``True``, + function can return a tuple of single-element Tensor and other auxiliary objects: + ``(output, aux)``. + argnums (int or Tuple[int]): Specifies arguments to compute gradients with respect to. + ``argnums`` can be single integer or tuple of integers. Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a tensor and other + auxiliary objects: ``(output, aux)``. Default: False. + + Returns: + Function to compute gradients with respect to its inputs. By default, the output of + the function is the gradient tensor(s) with respect to the first argument. + If specified ``has_aux`` equals ``True``, tuple of gradients and output auxiliary objects + is returned. If ``argnums`` is a tuple of integers, a tuple of output gradients with + respect to each ``argnums`` value is returned. + + Example of using ``grad``: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad + >>> x = torch.randn([]) + >>> cos_x = grad(lambda x: torch.sin(x))(x) + >>> assert torch.allclose(cos_x, x.cos()) + >>> + >>> # Second-order gradients + >>> neg_sin_x = grad(grad(lambda x: torch.sin(x)))(x) + >>> assert torch.allclose(neg_sin_x, -x.sin()) + + When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad, vmap + >>> batch_size, feature_size = 3, 5 + >>> + >>> def model(weights, feature_vec): + >>> # Very simple linear model with activation + >>> assert feature_vec.dim() == 1 + >>> return feature_vec.dot(weights).relu() + >>> + >>> def compute_loss(weights, example, target): + >>> y = model(weights, example) + >>> return ((y - target) ** 2).mean() # MSELoss + >>> + >>> weights = torch.randn(feature_size, requires_grad=True) + >>> examples = torch.randn(batch_size, feature_size) + >>> targets = torch.randn(batch_size) + >>> inputs = (weights, examples, targets) + >>> grad_weight_per_example = vmap(grad(compute_loss), in_dims=(None, 0, 0))(*inputs) + + Example of using ``grad`` with ``has_aux`` and ``argnums``: + + >>> # xdoctest: +SKIP + >>> from torch.func import grad + >>> def my_loss_func(y, y_pred): + >>> loss_per_sample = (0.5 * y_pred - y) ** 2 + >>> loss = loss_per_sample.mean() + >>> return loss, (y_pred, loss_per_sample) + >>> + >>> fn = grad(my_loss_func, argnums=(0, 1), has_aux=True) + >>> y_true = torch.rand(4) + >>> y_preds = torch.rand(4, requires_grad=True) + >>> out = fn(y_true, y_preds) + >>> # > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample)) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``grad``. + + Case 1: Using ``torch.no_grad`` inside a function: + + >>> # xdoctest: +SKIP + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``grad(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``grad`` inside ``torch.no_grad`` context manager: + + >>> # xdoctest: +SKIP + >>> with torch.no_grad(): + >>> grad(f)(x) + + In this case, ``grad`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``grad`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + + """ + # To avoid cyclical dependency. + import torch._functorch.eager_transforms as eager_transforms + + @functools.wraps(func) + def wrapper(*args, **kwargs): + return eager_transforms.grad_impl(func, argnums, has_aux, args, kwargs) + return wrapper + + +@exposed_in("torch.func") +def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + """ + Returns a function to compute a tuple of the gradient and primal, or + forward, computation. + + Args: + func (Callable): A Python function that takes one or more arguments. + Must return a single-element Tensor. If specified ``has_aux`` + equals ``True``, function can return a tuple of single-element + Tensor and other auxiliary objects: ``(output, aux)``. + argnums (int or Tuple[int]): Specifies arguments to compute gradients + with respect to. ``argnums`` can be single integer or tuple of + integers. Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a tensor and + other auxiliary objects: ``(output, aux)``. Default: False. + + Returns: + Function to compute a tuple of gradients with respect to its inputs + and the forward computation. By default, the output of the function is + a tuple of the gradient tensor(s) with respect to the first argument + and the primal computation. If specified ``has_aux`` equals + ``True``, tuple of gradients and tuple of the forward computation with + output auxiliary objects is returned. If ``argnums`` is a tuple of + integers, a tuple of a tuple of the output gradients with respect to + each ``argnums`` value and the forward computation is returned. + + See :func:`grad` for examples + """ + from torch._functorch import eager_transforms + + @functools.wraps(func) + def wrapper(*args, **kwargs): + return eager_transforms.grad_and_value_impl(func, argnums, has_aux, args, kwargs) + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/autograd_function.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/autograd_function.py new file mode 100644 index 0000000000000000000000000000000000000000..0a4fbf81c7259c8739ad39d88195fc930b828acb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/autograd_function.py @@ -0,0 +1,659 @@ +import torch +from torch._ops import HigherOrderOperator +from torch._C._functorch import TransformType +from torch._functorch.utils import enable_single_level_autograd_function +import torch.utils._pytree as pytree +from torch._C._functorch import ( + _wrap_for_grad, + _unwrap_for_grad, + current_level, +) +from torch._functorch.vmap import ( + wrap_batched, + unwrap_batched, + restore_vmap, + _add_batch_dim, +) +from torch._functorch.apis import vmap +from torch._functorch.vmap import _broadcast_to_and_flatten +from torch.autograd.forward_ad import _set_fwd_grad_enabled +from typing import Any, NamedTuple, Tuple + +# autograd.Function technically runs before the regular PyTorch dispatcher. +# This is how features like autocast and torch_dispatch (e.g. PythonTLSSnapshot) +# work with it. One day we might decide to change this, but until then, +# we need to give the illusion that autograd.Function runs before those things. +# +# We do this by using creating a custom HigherOrderOperator that only functorch +# dispatches specially. +class CustomFunctionHigherOrderOperator(HigherOrderOperator): + def __init__(self): + super().__init__('custom_function_call') + + def __call__(self, autograd_function, *args, **kwargs): + # When custom_function_call is done dispatching through functorch, + # it should just invoke the autograd.Function. This is consistent + # with the autograd.Function behavior of being invoked before the + # PyTorch dispatcher. + # + # This will lead us into trouble later down the line, but this is + # pre-existing. There is an invariant that a function traced by + # make_fx should have the same behavior when provided the same + # Tensor. However, make_fx sees autograd.Function as a composite + # (because autograd.Function happens before the Python dispatch key) + # and only traces the forward pass. + if torch._C._are_functorch_transforms_active(): + return super().__call__(autograd_function, *args, **kwargs) + return autograd_function.apply(*args, **kwargs) + + +# "custom_function_call" +# This is the mechanism for an autograd.Function that works with functorch transforms. +# It wraps an autograd.Function; interactions with functorch transforms are defined +# via PyDispatcher and HigherOrderOperator rather than through the traditional PyTorch +# dispatcher. +custom_function_call = CustomFunctionHigherOrderOperator() + + +# The grad rule for custom_function_call is to construct a new _SingleLevelFunction +# (autograd.Function that only works with a single layer (level) of functorch) that: +# - unwraps the inputs +# - redispatches to custom_function_call +# - wraps the outputs +# and whose backward pass calls the original autograd.Function's backward. +# +# Why do we need to redispatch to custom_function_call? +# ----------------------------------------------------- +# This is consistent with how ATen operators work with functorch's grad transform: +# they always redispatch to the original operator. +# Consider torch.sin, and let's say we do grad0(grad1(torch.sin))(x) +# +# grad1 will: +# - set up the autograd graph +# - unwrap the inputs +# - redispatch to at::sin (*) +# - rewrap the outputs on the return +# +# On the redispatch in (*), grad0 will: +# - set up the autograd graph +# - unwrap the inputs +# - redispatch to at::sin +# - rewrap the outputs on the return +# +# To "set up the autograd graph", we generate a _SingleLevelFunction +# and apply it. +@custom_function_call.py_impl(TransformType.Grad) +@custom_function_call.py_impl(TransformType.Jvp) +def custom_function_call_grad(interpreter, autograd_function, *operands): + Generated = generate_single_level_function(interpreter, autograd_function) + with enable_single_level_autograd_function(): + flat_out = Generated.apply(*operands) + return flat_out + + +def generate_single_level_function(interpreter, autograd_function): + level = interpreter.level() + + def forward(*operands): + unwrapped_operands = pytree.tree_map_only( + torch.Tensor, + lambda x: _unwrap_for_grad(x, level), + operands) + # Both enable_grad() and _set_fwd_grad_enabled() are necessary no matter + # the transform. _SingleLevelFunction will turn off both fwd and bwd + # gradient computation and we need to turn it back on here. + with torch.enable_grad(), _set_fwd_grad_enabled(True), interpreter.lower(): + unwrapped_output = custom_function_call(autograd_function, *unwrapped_operands) + + # See NOTE [mark_dirty object identity check] + def wrap_fn(output): + return _wrap_for_grad(output, level) + + return wrap_outputs_maintaining_identity( + unwrapped_output, + unwrapped_operands, + operands, + wrap_fn) + + def setup_context(ctx, inputs, output): + return autograd_function.setup_context(ctx, inputs, output) + + # backward is only used if the transform is TransformType.Grad + def backward(ctx, *grads): + result = autograd_function.backward(ctx, *grads) + return result + + # jvp is only used if the transform is TransformType.Jvp + def jvp(ctx, *tangents): + result = autograd_function.jvp(ctx, *tangents) + return result + + # This is the sequence of magic words to dynamically generate a Subclass with + # a given name. A Tensor's .grad_fn field has a class name that is the original + # autograd.Function's name + Backward, so we do this to generate some + # meaningful name. + name = f'{autograd_function.__name__}Generated' + Generated = type( + name, + (torch.autograd.function._SingleLevelFunction,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + 'jvp': staticmethod(jvp), + 'setup_context': staticmethod(setup_context), + }, + ) + return Generated + +# wrap_outputs_maintaining_identity handles outputs from the vmap, +# backward (vjp), and jvp staticmethod. The way it distinguishes +# between the vmap case and the {backward, jvp} case is if the out_dims +# are specified or not. +# +# NB: we cannot use out_dims=None as the deciding factor. This because +# out_dims=None can still happen in the vmap staticmethod! What the +# user is saying in that case is that their output does not have a +# dimension that is being vmapped over, which is valid. +NO_OUT_DIMS = "not specified" + +# NOTE [mark_dirty object identity check] +# autograd.Function's ctx.mark_dirty expect a returned input +# to have the same object identity as the input. +# Mode-only functorch will greatly simplify this logic. +def wrap_outputs_maintaining_identity( + outputs, unwrapped_inputs, orig_inputs, wrap_fn, out_dims=NO_OUT_DIMS): + flat_unwrapped_inputs = pytree.arg_tree_leaves(*unwrapped_inputs) + flat_orig_inputs = pytree.arg_tree_leaves(*orig_inputs) + + unwrapped_input_to_orig_input = { + id(unwrapped): orig + for unwrapped, orig in zip(flat_unwrapped_inputs, flat_orig_inputs) + } + + flat_outputs, spec = pytree.tree_flatten(outputs) + result = [] + + out_dims_specified = out_dims != NO_OUT_DIMS + + if out_dims_specified: + flat_out_dims = _broadcast_to_and_flatten(out_dims, spec) + # _broadcast_to_and_flatten returns None if it is unable to broadcast. + # TODO: update following link from master to stable once that's out + if flat_out_dims is None: + raise RuntimeError( + f"The autograd.Function's vmap staticmethod returned an " + f"incompatible (output, out_dims) tuple. " + f"Expected out_dims={out_dims} " + f"to be compatible with the structure of `output`. " + f"out_dims has structure {pytree.tree_flatten(out_dims)[1]} " + f"but output has structure {spec}. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html" + ) + + for i, output in enumerate(flat_outputs): + if not isinstance(output, torch.Tensor): + result.append(output) + continue + if id(output) in unwrapped_input_to_orig_input: + result.append(unwrapped_input_to_orig_input[id(output)]) + continue + if out_dims_specified: + result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[possibly-undefined, index] + else: + result.append(wrap_fn(output)) + + return pytree.tree_unflatten(result, spec) + + +# NOTE: [functorch vjp and autograd interaction] +# There's an edge case with the functorch vjp and autograd interaction +# that will eventually be fixed by mode-only functorch. +# The TL;DR is that there's no way to unwrap a dead GradTensorWrapper, +# so we (the framework) need to do it manually. Regular PyTorch operators +# automatically do so this is consistent. +# +# class MyExp(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return x.exp() +# +# @staticmethod +# def setup_context(ctx, inputs, output): +# y = output +# ctx.save_for_backward(y) +# +# @staticmethod +# def backward(gy): +# y, = ctx.saved_tensors() +# return MyMul.apply(gy, y) +# +# x = torch.randn([], requires_grad=True) +# gy = torch.randn([], requires_grad=True) +# _, vjp_fn = vjp(MySin.apply, x) +# result = vjp_fn(gy) +# +# MyMul is an autograd.Function that is not shown here. +# It saves a `y` for backward (since gy requires grad). +# +# in vjp_fn(gy), we get: +# > MyMul.apply(gy, GradTensorWrapper(y, level=dead)) +# Because the y that is saved for backward by MyExp is a GradTensorWrapper +# but is now dead since we are outside the vjp context. +# +# PyTorch dispatcher operations, upon seeing a dead GradTensorWrapper, +# will automatically unwrap the GradTensorWrapper when applied. +# But since autograd.Function technically sits above the regular PyTorch +# dispatcher, it doesn't get this treatment. So we manually do +# the unwrapping to be consistent with regular PyTorch dispatcher operations. + + +class VmapInfo(NamedTuple): + batch_size: int + randomness: str + + +def has_overriden_vmap_rule(autograd_function): + return autograd_function.vmap is not torch.autograd.Function.vmap + + +def validate_vmap_returns_tuple_of_two_elements(result): + base_error_msg = ( + "Expected the vmap staticmethod to have two returns, an output " + "and out_dims with pytree structure compatible with the output. " + ) + if not isinstance(result, tuple): + raise RuntimeError(base_error_msg + f"Got a {type(result)} instead") + if not len(result) == 2: + raise RuntimeError(base_error_msg + f"Got {len(result)} returns instead") + +@custom_function_call.py_impl(TransformType.Vmap) +def custom_function_call_vmap(interpreter, autograd_function, *operands): + if autograd_function.generate_vmap_rule: + if has_overriden_vmap_rule(autograd_function): + # TODO: Update link to stable once that's out + # https://github.com/pytorch/pytorch/issues/92029 + raise RuntimeError( + f"You tried to vmap over {autograd_function.__name__}, but " + f"it has both generate_vmap_rule=True and an overriden vmap " + f"staticmethod. Please set generate_vmap_rule=False or delete " + f"the overriden vmap staticmethod to avoid ambiguity. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html") + return custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands) + + if not has_overriden_vmap_rule(autograd_function): + # TODO: Update link to stable once that's out + # https://github.com/pytorch/pytorch/issues/92029 + raise RuntimeError( + f"You tried to vmap over {autograd_function.__name__}, but " + f"it does not have vmap support. Please override and implement the " + f"vmap staticmethod or set generate_vmap_rule=True. " + f"For more details, please see " + f"https://pytorch.org/docs/master/notes/extending.func.html") + + current_level = interpreter.level() + info = VmapInfo( + batch_size=interpreter.batch_size(), + randomness=interpreter.randomness(), + ) + unwrapped_operands, in_dims = unwrap_batched(operands, current_level) + + # If none of the tensors are batched at the current level, then we skip the + # current level. This saves the user from needing to handle this case in + # their vmap staticmethod (and is consistent with our C++ batching rule API) + if pytree.tree_all(lambda dim: dim is None, in_dims): + with interpreter.lower(): + return custom_function_call(autograd_function, *operands) + + with interpreter.lower(): + result = autograd_function.vmap(info, in_dims, *unwrapped_operands) + validate_vmap_returns_tuple_of_two_elements(result) + unwrapped_output, out_dims = result + + # See NOTE [mark_dirty object identity check] + def wrap_fn(output, out_dim): + return output if out_dim is None else _add_batch_dim(output, out_dim, current_level) + + return wrap_outputs_maintaining_identity( + unwrapped_output, + unwrapped_operands, + operands, + wrap_fn, + out_dims=out_dims) + + +def custom_function_call_vmap_generate_rule(interpreter, autograd_function, *operands): + unwrapped_operands, in_dims = unwrap_batched(operands, interpreter.level()) + vmapped_function, get_out_dims = vmapify_autograd_function( + autograd_function, in_dims, interpreter.batch_size(), interpreter.randomness()) + + with interpreter.lower(): + output = custom_function_call(vmapped_function, *unwrapped_operands) + + out_dims = get_out_dims() + return wrap_batched(output, out_dims, interpreter.level()) + + +@custom_function_call.py_impl(TransformType.Functionalize) +def custom_function_call_functionalize(interpreter, autograd_function, generate_vmap_rule, *operands): + raise RuntimeError("NYI: Functionalize rule for custom_function_call") + + +def vmapify_autograd_function(autograd_function, in_dims, batch_size, randomness): + # The following values are saved from the forward() and setup_context() + # and used in backward(). + # Why do we save the values out here instead of on the ctx object? + # - out_dims: There's no way to retrieve this from forward() + # - input_shapes, saved_tensors_bdims: I'm a bit scared of nesting + # vmap(vmap( but not completely sure if it is a problem. If we + # assigned those fields to the ctx object, the worry is that they + # get overwritten. + init_val = "not populated" + out_dims = init_val + input_shapes: Any = init_val + saved_tensors_bdims: Any = init_val + + def forward(*operands): + nonlocal out_dims + outputs, out_dims = restore_vmap( + autograd_function.forward, in_dims, batch_size, randomness)(*operands) + return outputs + + def setup_context(ctx, inputs, outputs): + input_shapes_ = None + saved_tensors_bdims_ = None + + def inner(inputs, outputs): + # wrapped_ctx.save_for_backward will: + # - unwrap batchedtensors into (tensor, bdim) + # - save_for_backward(*unwrapped_tensors) + # - assign the bdims to wrapped_ctx._pt_saved_tensors_bdims + wrapped_ctx = CtxCustomSave(ctx, current_level()) + autograd_function.setup_context(wrapped_ctx, inputs, outputs) + + # input_shapes are used for reductify later to reduce expanded gradients + # to the correct shape. + # See NOTE: [Why can't we rely on autograd to reduce expanded gradients?] + # for more details + nonlocal input_shapes_ + input_shapes_ = tuple(inp.shape if isinstance(inp, torch.Tensor) else None + for inp in inputs) + nonlocal saved_tensors_bdims_ + saved_tensors_bdims_ = wrapped_ctx._pt_saved_tensors_bdims + + # See NOTE: [Why do we need to run setup_context under a vmap?] + restore_vmap( + inner, + (in_dims, out_dims), + batch_size, + randomness, + )(inputs, outputs) + + nonlocal input_shapes + input_shapes = input_shapes_ + nonlocal saved_tensors_bdims + saved_tensors_bdims = saved_tensors_bdims_ + + def jvp(ctx, *tangents): + assert out_dims != init_val + assert saved_tensors_bdims != init_val + + def jvp_no_context(saved_tensors, tangents): + wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors) + return autograd_function.jvp(wrapped_ctx, *tangents) + + tangent_in_dims = get_tangents_in_dims(in_dims, tangents) + out_tangents, out_tangents_dims = restore_vmap( + jvp_no_context, (saved_tensors_bdims, tangent_in_dims), batch_size, randomness)( + ctx.saved_tensors, tangents) + + result = reductify(out_tangents, out_tangents_dims, out_dims, batch_size) + return result + + def backward(ctx, *grad_outputs): + assert out_dims != init_val + assert input_shapes != init_val + assert saved_tensors_bdims != init_val + + def backward_no_context(inputs): + saved_tensors, grad_outputs = inputs + wrapped_ctx = CtxWithSavedTensors(ctx, saved_tensors) + return autograd_function.backward(wrapped_ctx, *grad_outputs) + + grad_ins, grad_ins_dims = restore_vmap( + backward_no_context, ((saved_tensors_bdims, out_dims),), batch_size, randomness)( + (ctx.saved_tensors, grad_outputs)) + result = reductify(grad_ins, grad_ins_dims, in_dims, batch_size, input_shapes) + return result + + name = f'Vmapped{autograd_function.__name__}' + Generated = type( + name, + (torch.autograd.Function,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + 'jvp': staticmethod(jvp), + 'setup_context': staticmethod(setup_context), + 'generate_vmap_rule': True + } + ) + + def get_out_dims(): + assert out_dims != init_val + return out_dims + + return Generated, get_out_dims + + +# tangents might be None, so we need to replace +# the corresponding in_dims with None. +def get_tangents_in_dims(input_dims, tangents): + flat_in_dims, spec = pytree.tree_flatten(input_dims) + flat_tangents = pytree.arg_tree_leaves(*tangents) + result = [None if tangent is None else in_dim + for in_dim, tangent in zip(flat_in_dims, flat_tangents)] + return pytree.tree_unflatten(result, spec) + + +# NOTE: [Why do we need to run setup_context under a vmap?] +# Consider the following autograd.Function +# +# class Sum(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return x.sum() +# @staticmethod +# def setup_context(ctx, inputs, outputs): +# ctx.x_shape = inputs[0] +# @staticmethod +# def backward(ctx, gy): +# return gy.expand(ctx.x_shape) +# +# x = torch.randn(B, 4) +# in_dims = 0 +# vmap(Sum.apply, in_dims)(x) +# +# Let’s assume for a moment that we didn’t vmap setup_context in VmappedSum: +# +# class VmappedSum(torch.autograd.Function): +# @staticmethod +# def forward(x): +# return vmap(Sum.forward, in_dims)(x) +# +# @staticmethod +# def setup_context(ctx, inputs, outputs): +# Sum.setup_context(ctx, inputs, outputs) +# +# @staticmethod +# def backward(ctx, gy): +# def backward_no_context(gy): +# return gy.expand(ctx.x_shape) +# +# dims = (0,) +# gx = vmap(backward_no_context, dims)(gy) +# return gx +# +# We end up saving [B, 4] as x_shape. In the backward, gy has shape [B], +# and we’re doing: +# +# def backward_no_context(gy): +# return gy.expand([B, 4]) +# +# gx = vmap(backward_no_context, dims)(gy: "Tensor[B]") +# +# This gives us the wrong result (gx has shape [B, B, 4], but it should +# have shape [4]). Performing vmap over setup_context means the shape +# saved has shape [4] and leads to a correct result shape for gx. + +# Wraps a ctx object. Forwards all attr accesses to the underlying object +# except for the attrs in _pt_attrs +class WrappedCtx: + _pt_reserved_attrs: Tuple[str, ...] = ('_pt_reserved_attrs', '_pt_inner_ctx') + + def __init__(self, ctx): + if not isinstance(ctx, WrappedCtx): + reserved_attrs = type(self)._pt_reserved_attrs + for name in reserved_attrs: + if not hasattr(ctx, name): + continue + raise RuntimeError( + f'PyTorch reserves the {reserved_attrs} field on ctx. ' + 'Please name your fields on ctx something else to avoid name ' + 'collision.') + self._pt_inner_ctx = ctx + + def __getattr__(self, name): + return getattr(self._pt_inner_ctx, name) + + def __setattr__(self, name, value): + if name in type(self)._pt_reserved_attrs: + self.__dict__[name] = value + return + return setattr(self._pt_inner_ctx, name, value) + +# Wraps ctx to create a new ctx object that overrides saved_tensors. +class CtxWithSavedTensors(WrappedCtx): + _pt_reserved_attrs = ('_pt_new_saved_tensors', *WrappedCtx._pt_reserved_attrs) + + def __init__(self, ctx, new_saved_tensors): + super().__init__(ctx) + self._pt_new_saved_tensors = new_saved_tensors + + @property + def saved_tensors(self): + return self._pt_new_saved_tensors + +class CtxCustomSave(WrappedCtx): + _pt_reserved_attrs = ('_pt_saved_tensors_bdims', '_pt_current_level', + *WrappedCtx._pt_reserved_attrs) + + def __init__(self, ctx, current_level): + super().__init__(ctx) + self._pt_saved_tensors_bdims = () + self._pt_current_level = current_level + + def save_for_backward(self, *tensors): + unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) + self._pt_inner_ctx.save_for_backward(*unwrapped_tensors) + self._pt_saved_tensors_bdims = bdims + + def save_for_forward(self, *tensors): + unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) + self._pt_inner_ctx.save_for_forward(*unwrapped_tensors) + self._pt_saved_tensors_bdims = bdims + + +def reductify(grad_input, grad_input_bdim, input_bdim, batch_size, + target_shape_without_bdim_to_reduce_to=None): + if not isinstance(grad_input, tuple): + grad_input = (grad_input,) + if not isinstance(grad_input_bdim, tuple): + grad_input_bdim = (grad_input_bdim,) + if not isinstance(input_bdim, tuple): + input_bdim = (input_bdim,) + + if target_shape_without_bdim_to_reduce_to is None: + target_shape_without_bdim_to_reduce_to = len(grad_input) * (None,) + result = tuple( + reductify_leaf(gi, gi_bdim, i_bdim, batch_size, maybe_ishape) + for gi, gi_bdim, i_bdim, maybe_ishape in + zip(grad_input, grad_input_bdim, input_bdim, target_shape_without_bdim_to_reduce_to) + ) + return result + + +def reductify_leaf(grad_input, grad_input_bdim, input_bdim, batch_size, + target_shape_without_bdim_to_reduce_to=None): + if grad_input is None: + return None + + if grad_input_bdim is None and input_bdim is None: + return grad_input + + if grad_input_bdim is not None and input_bdim is None: + return grad_input.sum(grad_input_bdim) + + # NOTE: [Why can't we rely on autograd to reduce expanded gradients?] + # For reverse-mode AD, + # given a grad_input and input, it is valid for the user to return a + # grad_input that has a broadcasted shape when compared to the input. + # In this situation, autograd automatically reduces the grad_input to + # the shape of the input. + # + # However, when input_bdim is not None, we have problems. + # + # [example 1] + # grad_input: Tensor[3, 4], input: Tensor[B, 4] + # We can expand grad_input to Tensor[B, 3, 4], but that isn't broadcastable + # from [B, 4]. + # + # [example 2] + # grad_input: Tensor[3, B, 4], input: Tensor[B, 4] + # We can swizzle grad_input to Tensor[B, 3, 4], but that isn't broadcastable + # from [B, 4]. + # + # This means that we need to also reduce the grad_input to the shape of the + # input. This behavior is controlled by the `target_shape_without_bdim_to_reduce_to` flag; + # if not-None then we do the reducing manually, otherwise, we do not do a reduction. + assert input_bdim is not None + + if grad_input_bdim is None: + grad_input = grad_input.unsqueeze(input_bdim) + new_shape = list(grad_input.shape) + new_shape[input_bdim] = batch_size + grad_input = grad_input.expand(new_shape) + grad_input_bdim = input_bdim + + if target_shape_without_bdim_to_reduce_to is not None: + return vmap(torch.Tensor.sum_to_size, in_dims=(grad_input_bdim, None), out_dims=input_bdim)( + grad_input, target_shape_without_bdim_to_reduce_to) + + if input_bdim != grad_input_bdim: + grad_input = grad_input.movedim(grad_input_bdim, input_bdim) + return grad_input + + +class AutogradFunctionApply(HigherOrderOperator): + def __init__(self): + super().__init__("autograd_function_apply") + + def __call__(self, fwd, bwd, *fwd_args): + saved_values = None + + class ApplyTemplate(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + nonlocal saved_values + output, saved_values = fwd(None, *args) + return output + + @staticmethod + def backward(ctx, *grad): + return bwd(None, *grad, *saved_values) + + return ApplyTemplate.apply(*fwd_args) + + +autograd_function_apply = AutogradFunctionApply() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py new file mode 100644 index 0000000000000000000000000000000000000000..29715581837e19fab7c2ab511abf289ec35ef662 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/batch_norm_replacement.py @@ -0,0 +1,24 @@ +import torch.nn as nn +from torch._functorch.utils import exposed_in + + +def batch_norm_without_running_stats(module: nn.Module): + if isinstance(module, nn.modules.batchnorm._BatchNorm) and module.track_running_stats: + module.running_mean = None + module.running_var = None + module.num_batches_tracked = None + module.track_running_stats = False + + +@exposed_in("torch.func") +def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module: + """ + In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and + setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root` + """ + # base case + batch_norm_without_running_stats(root) + + for obj in root.modules(): + batch_norm_without_running_stats(obj) + return root diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d77ae7aae84d53bf388d4f702b2576c2b2a503e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/benchmark_utils.py @@ -0,0 +1,195 @@ +# mypy: ignore-errors + +import contextlib +import time +import os +import json + +import torch +from torch.profiler import profile, ProfilerActivity + + +def synchronize(): + pass + + +def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1, + devices=None, kwargs_for_f=None, kwargs_for_profiler=None): + """ + Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx] + [num_runs] times to [trace_filename]. + + [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA. + Return total runtime without the profiler + + Outputs to trace_filename + """ + + if devices is None: + devices = ["cuda"] + + global synchronize + if devices != ["cpu"] and torch.cuda.is_available(): + synchronize = torch.cuda.synchronize + + if kwargs_for_f is None: + kwargs_for_f = {} + if kwargs_for_profiler is None: + kwargs_for_profiler = {} + + with optimize_ctx: + torch.manual_seed(1337) + for _ in range(5): # warmup runs + f(input, **kwargs_for_f) + synchronize() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(num_runs): + f(input, **kwargs_for_f) + synchronize() + t1 = time.perf_counter() + timing = t1 - t0 + + with profile(activities=activities, **kwargs_for_profiler) as prof: + with optimize_ctx: + synchronize() + torch.manual_seed(1337) + for _ in range(num_runs): + f(input, **kwargs_for_f) + synchronize() + prof.export_chrome_trace(trace_filename) + + return timing + + +def get_chrome_trace_events(filename): + f = open(filename) + data = json.load(f) + events = data["traceEvents"] + return events + + +def is_gpu_compute_event(event): + global gpu_pids + return "pid" in event and event["pid"] in gpu_pids and "ph" in event and event["ph"] == "X" + + +def get_sorted_gpu_events(events): + sorted_gpu_events = [] + for event in events: + if not is_gpu_compute_event(event): + continue + sorted_gpu_events.append(event) + return sorted(sorted_gpu_events, key=lambda x: x["ts"]) + + +def get_duration(sorted_gpu_events): + if len(sorted_gpu_events) == 0: + return 0 + event = sorted_gpu_events[0] + current_end_time = event["ts"] + event["dur"] + total_duration = event["dur"] + for event in sorted_gpu_events[1:]: + start_time = max(event["ts"], current_end_time) + end_time = event["ts"] + event["dur"] + total_duration = total_duration + max(end_time - start_time, 0) + current_end_time = max(current_end_time, end_time) + return total_duration + + +def get_sorted_gpu_mm_conv_events(events): + def is_mm_conv_event(event): + return "name" in event and ("gemm" in event["name"] or "conv" in event["name"] + or "cutlass" in event["name"] or "wgrad" in event["name"]) + gpu_events = get_sorted_gpu_events(events) + sorted_events = [] + for event in gpu_events: + if not is_mm_conv_event(event): + continue + sorted_events.append(event) + return sorted_events + + +gpu_pids = [] + + +def compute_utilization(filename: str, total_length: float): + """ + Process the chrome traces outputs by the pytorch profiler to compute GPU Utilization + and percent of times spent on matmul and convolution + + Args: + filename(str): Name of chrome traces file produced by pytorch profiler + + total_length(float): total length of the process without profiler in second + + Return: + tuple: (GPU Utilization, percent of time spent on matmul and convolution) + """ + events = get_chrome_trace_events(filename) + + # get pids of GPU events + global gpu_pids + gpu_pids = [] + for event in events: + if "name" not in event: + continue + if event["name"] == 'process_labels' and "GPU" in event["args"]["labels"]: + gpu_pids.append(event["pid"]) + + total_length = total_length * 1e6 + sorted_gpu_events = get_sorted_gpu_events(events) + utilization = get_duration(sorted_gpu_events) / total_length + + sorted_gpu_mm_conv_events = get_sorted_gpu_mm_conv_events(events) + mm_conv_utilization = get_duration(sorted_gpu_mm_conv_events) / total_length + + return utilization, mm_conv_utilization + + +def benchmark_utilization(f, input, trace_folder, optimize_ctx=None, trace_file_name="tmp_chrome_trace", num_runs=1): + """ + Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of + running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times. + It will produce a chrome trace file in trace_folder/trace_file_name.json + + Example: + + ``` + def f(a): + return a.sum() + a = torch.rand(2**20, device="cuda") + utilization, mm_conv_utilization = benchmark_utilization(f, a, "tmp", trace_file_name = "tmp_chrome_trace") + ``` + + Args: + f: function to benchmark + + input: input to :attr:`f` + + trace_folder: name of the folder to store the chrome trace + + optimize_ctx: the context in which f will run + + trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace" + + num_runs: number of times to run f, excluding the warm-up runs, default to 1. + + Return: + tuple: (GPU Utilization, percent of time spent on matmul and convolution) + + """ + isExist = os.path.exists(trace_folder) + if not isExist: + os.makedirs(trace_folder) + print("create folder " + trace_folder) + + if optimize_ctx is None: + optimize_ctx = contextlib.nullcontext() + + chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + ".json") + total_length = dump_chrome_trace(f, input, chrome_trace_file_name, optimize_ctx, + [ProfilerActivity.CUDA], num_runs=num_runs, devices="cuda") + utilization, mm_conv_utilization = compute_utilization(chrome_trace_file_name, total_length) + + return utilization, mm_conv_utilization diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compile_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compile_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f512a14ce232f0b398ba79732510a3b4110dba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compile_utils.py @@ -0,0 +1,97 @@ +# mypy: ignore-errors + + +import torch +import torch.fx as fx +from torch.utils._pytree import tree_flatten +from torch.utils import _pytree as pytree + +aten = torch.ops.aten + + +def get_aten_target(node): + if hasattr(node.target, 'overloadpacket'): + return node.target.overloadpacket + return node.target + + +rand_ops = [aten.dropout, aten._fused_dropout, aten._standard_gamma, + aten.bernoulli, aten.multinomial, aten.native_dropout, + aten.normal, aten.poisson, aten.binomial, aten.rrelu, + aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm] + + +# return a new copy of torch.fx.graph.Graph with CSE applied to the input graph +def fx_graph_cse(fx_g: torch.fx.graph.Graph): + new_graph = fx.Graph() + env = {} # map from node in the old graph to node in the new graph + hash_env = {} # map from hash to a node in the new graph + token_map = {} # map from hash to token + for n in fx_g.nodes: + # The placeholder, output, and get_attr nodes are copied to the new graph without change + # do not CSE away random operations + if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in rand_ops: + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method' + # substitute args and kwargs members to their mapping in env if exists + # specs can be used to reconstruct nested list/dictionaries + def substitute(arg_list): + arg_list, spec = tree_flatten(arg_list) + for i in range(len(arg_list)): + v = arg_list[i] + if isinstance(v, torch.fx.node.Node) and v in env: + arg_list[i] = env[v] + if isinstance(v, (torch.SymBool, torch.SymInt, torch.SymFloat)): + arg_list[i] = v.node + return tuple(arg_list), spec + args, args_spec = substitute(n.args) + kwargs, kwargs_spec = substitute(n.kwargs) + + # each token corresponds to a unique node + # nodes with the same token can be substituted + token = {"target": n.target, "args": args, "args_spec": args_spec, + "kwargs": kwargs, "kwargs_spec": kwargs_spec} + + # hash substituted args to a number, do not hash specs because specs are not hashable + # We need to add type into hash to avoid situations like: + # hash((primals_2, 1.0)) == hash((primals_2, 1)) + hash_arg = hash((tuple((a, type(a)) for a in args), tuple((a, type(a)) for a in kwargs))) + hash_val = (n.target, hash_arg) + + # check if a node has a substitute and can be eliminated + hash_val_in_hash_env = hash_val in hash_env + if hash_val_in_hash_env and token_map[hash_val] == token: + env[n] = hash_env[hash_val] + continue + + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + if not hash_val_in_hash_env: + hash_env[hash_val] = new_node + token_map[hash_val] = token + + return new_graph + + +def strip_overloads(gm): + """ + Modifies the target of graph nodes in :attr:`gm` to strip overloads. + + Args: + gm(fx.GraphModule): The input Fx graph module to be modified + """ + for node in gm.graph.nodes: + if isinstance(node.target, torch._ops.OpOverload): + node.target = node.target.overloadpacket + gm.recompile() + + +def get_placeholders(graph): + return list(filter(lambda x: x.op == 'placeholder', graph.nodes)) + +def get_outputs(graph): + for node in graph.nodes: + if node.op == 'output': + return pytree.tree_leaves(node.args[0]) + raise AssertionError("No output node found") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compilers.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compilers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0117904e148323e2a7703cf309b214bccfaac4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/compilers.py @@ -0,0 +1,441 @@ +# mypy: ignore-errors + +import copy +import logging +import os +import pickle +import random +from contextlib import contextmanager +from functools import partial +from typing import Callable, Union +import sympy + +import torch +from torch import SymInt +import torch.fx as fx +import torch.nn as nn +from torch._decomp import get_decompositions +from torch.fx.experimental.symbolic_shapes import bind_symbols + +from .aot_autograd import aot_function, aot_module, make_boxed_compiler +from .compile_utils import strip_overloads +from .partitioners import ( + default_partition, + draw_graph, + min_cut_rematerialization_partition, +) +import torch.utils._pytree as pytree + + +log = logging.getLogger(__name__) + + +# These canonicalizations are needed here (and not decompositions), as the ops +# we're trying to canonicalize to CompositeImplicitAutograd. +def _canonicalize(fx_g): + for node in fx_g.graph.nodes: + if node.target == torch.ops.aten._to_copy: + node.target = torch.ops.aten.to + fx_g.recompile() + return fx_g + + +@contextmanager +def _disable_jit_autocast(): + old_jit_autocast_flag = torch._C._jit_set_autocast_mode(False) + try: + yield + finally: + torch._C._jit_set_autocast_mode(old_jit_autocast_flag) + + +@make_boxed_compiler +def ts_compile(fx_g: fx.GraphModule, inps) -> Callable: + """ + Compiles the :attr:`fx_g` with Torchscript compiler. + + .. warning:: + This API is experimental and likely to change. + + Args: + fx_g(fx.GraphModule): The input Fx graph module to be compiled. + + Returns: + Torch scripted model. + """ + + with _disable_jit_autocast(): + strip_overloads(fx_g) + + for node in fx_g.graph.nodes: + if ( + node.target == torch.ops.aten._to_copy + and len(node.args) == 1 + and len(node.kwargs) == 1 + and "dtype" in node.kwargs + ): + node.target = torch.ops.aten.to + + for node in fx_g.graph.nodes: + new_kwargs = {} + for k, v in node.kwargs.items(): + if isinstance(v, torch.device): + v = v.type + new_kwargs[k] = v + node.kwargs = new_kwargs + + fx_g.graph.lint() + + fx_g.recompile() + + f = torch.jit.script(fx_g) + + torch._C._jit_pass_remove_mutation(f.graph) + + f = torch.jit.freeze(f.eval()) + f = torch.jit.optimize_for_inference(f) + if not any(isinstance(t, torch._subclasses.FakeTensor) for t in inps): + f(*inps) + return f + + +def _draw_graph_compile(fx_g, _, name, clear_meta=True): + print(fx_g.code) + draw_graph(fx_g, name, clear_meta=clear_meta) + return fx_g + + +def draw_graph_compile(name): + return make_boxed_compiler( + partial(_draw_graph_compile, name=name) + ) + + +@make_boxed_compiler +def nop(fx_g: fx.GraphModule, _) -> Callable: + """ + Returns the :attr:`fx_g` Fx graph module as it is. This is a no-op compiler + and can be used to check accuracy. + + .. warning:: + This API is experimental and likely to change. + + """ + return fx_g + +class DebugInterpreter(fx.Interpreter): + def run(self, *args): + self.symbol_mapping = bind_symbols(self.module, *args) + super().run(*args) + + def run_node(self, n): + + def subst_symint(ni): + if not isinstance(ni, SymInt): + return ni + r = sympy.expand(ni.node.expr.xreplace(self.symbol_mapping)) + assert r.is_number, r + return int(r) + + def subst_symint_tuple(nis): + return tuple(subst_symint(ni) for ni in nis) + + def check_significant_strides(a, b): + if subst_symint(a.numel()) > 0: + for idx in range(a.ndim): + if subst_symint(a.stride(idx)) != b.stride(idx) and subst_symint(a.size(idx)) > 1: + return False + return True + + def check(nv, rv, desc): + assert callable(desc) + assert nv.dtype == rv.dtype, f"{desc()}: {nv.dtype} != {rv.dtype}" + assert subst_symint_tuple(nv.size()) == rv.size(), \ + f"{desc()}: {nv.size()} aka {subst_symint_tuple(nv.size())} != {rv.size()}" + same_strides = check_significant_strides(nv, rv) + assert same_strides, f"{desc()}: {nv.stride()} aka {subst_symint_tuple(nv.stride())} != {rv.stride()}" + + r = super().run_node(n) + if 'val' in n.meta: + n_vals, n_spec = pytree.tree_flatten(n.meta['val']) + r_vals, r_spec = pytree.tree_flatten(r) + # TODO: There is some sort of problem where we record that an + # operator returned a tuple/list, and then later it turns out the + # real version of the operator returned a list/tuple. Need to + # figure out what's actually going on here, the error itself is + # harmless enough as we only getitem out the outputs. + # assert n_spec == r_spec, f"{n_spec} != {r_spec}" + assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}" + for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals): + if not isinstance(rv, torch.Tensor): + continue + check(nv, rv, lambda: f"output {i} where {self.symbol_mapping}") + return r + + +@make_boxed_compiler +def debug_nop(fx_g: fx.GraphModule, _) -> Callable: + """ + Returns a (slow) interpreter over the FX graph module that also checks + various debugging properties (e.g., that tracing strides matched real + strides.) + """ + return DebugInterpreter(fx_g).run + +@make_boxed_compiler +def simple_ts_compile(fx_g, _): + strip_overloads(fx_g) + f = torch.jit.script(fx_g) + f = torch.jit.freeze(f.eval()) + return f + + +def nnc_jit(f): + return aot_function(f, simple_ts_compile) + + +aten = torch.ops.aten +default_decompositions = { + aten.detach, + aten.gelu_backward, + aten.leaky_relu_backward, + aten.sigmoid_backward, + aten.threshold_backward, + aten.hardtanh_backward, + aten.hardsigmoid_backward, + aten.hardswish_backward, + aten.tanh_backward, + aten.silu_backward, + aten.elu_backward, + aten.cudnn_batch_norm, + aten.cudnn_batch_norm_backward, + aten.masked_fill.Scalar, + aten.masked_fill.Tensor, + aten.elu, + aten.leaky_relu, + aten.hardtanh, + aten.hardswish, + aten.hardsigmoid, + aten.conj_physical, + aten.is_same_size, +} + +default_decompositions = get_decompositions(default_decompositions) + + +@make_boxed_compiler +def print_compile(fx_g, _): + print(fx_g.code) + return fx_g + + +def memory_efficient_fusion( + fn: Union[Callable, nn.Module], + **kwargs, +): + """ + Wrapper function over :func:`aot_function` and :func:`aot_module` to perform + memory efficient fusion. It uses the + :func:`min_cut_rematerialization_partition` partitioner to perform efficient + recomputation. It uses NVFuser to compile the generated forward and backward + graphs. + + .. warning:: + This API is experimental and likely to change. + + Args: + fn (Union[Callable, nn.Module]): A Python function or a ``nn.Module`` + that takes one ore more arguments. Must return one or more Tensors. + **kwargs: Any other overrides you want to make to the settings + + Returns: + Returns a ``Callable`` or ``nn.Module`` that retains the eager behavior + of the original :attr:`fn`, but whose forward and backward graphs have + gone through recomputation optimizations, and the graphs have been + compiled with nvfuser. + + """ + config = { + "fw_compiler": ts_compile, + "bw_compiler": ts_compile, + "partition_fn": min_cut_rematerialization_partition, + "decompositions": default_decompositions, + } + config.update(kwargs) + if isinstance(fn, torch.nn.Module): + return aot_module(fn, **config) + else: + return aot_function(fn, **config) + + +def debug_compile(fx_g, inps): + fx_g.to_folder("foo") + print( + f""" +############################################################## +# To minimize FX graph, copy and paste the below and run it # +############################################################## + +import torch +import torch.fx as fx +from functorch.compile import minifier, check_nvfuser_subprocess, check_nvfuser_correctness_subprocess + +inps = {[(i.shape, i.dtype) for i in inps]} +inps = [torch.ones(shape, dtype=dtype, device='cuda') for (shape, dtype) in inps] +from foo import FxModule +mod = FxModule().cuda() + +with torch.jit.fuser("fuser2"): + # check_nvfuser_subprocess can be replaced with check_nvfuser_correctness_subprocess + minifier(fx.symbolic_trace(mod), inps, check_nvfuser_subprocess) +""" + ) + from foo import FxModule + + FxModule().cuda()(*inps) + + return ts_compile(fx_g, inps) + + +graph_index = 0 + + +def get_inputs(input_data_path): + """ + Return a random input for the given inputs meta generated from _save_fx_default. + """ + inputs = [] + with (open(input_data_path, "rb")) as f: + inputs_meta = pickle.load(f) + inputs = [] + for meta in inputs_meta: + if len(meta) == 1: + type = meta + input = type(random.rand()) + else: + type, shape, stride, dtype, device = meta + if dtype in { + torch.int, + torch.int32, + torch.int64, + torch.bool, + torch.int, + torch.uint8, + int, + float, + }: + input = torch.randint(0, 1, shape, dtype=dtype, device=device) + else: + input = torch.rand(shape, dtype=dtype, device=device) + inputs.append(input) + return inputs + + +def _save_fx_default(current_name, folder_name, dump_example_input, gm, example_inputs): + """ + The forward, backward, and joint computation graph will be stored in + {folder_name}/{current_name}/{current_name}_forward_{graph_index}, + {folder_name}/{current_name}/{current_name}_backward_{graph_index}, and + {folder_name}/{current_name}/{current_name}_joint_{graph_index} respectively. + The input shape of the graphs will be stored in the .input files. + These files can be loaded with pickle, + and is a list of format (type, shape, stride, dtype, device). + In the case of type = int or float, it is just (type,). + For joint graph input, it is a nested list [[],[]] + where the two inner lists have the same format. + If dump_example_input is True, example_inputs will be stored in .pt file. + Since each function might produce multiple graphs, + the graph_index is used to distinguish difference graphs + """ + from functorch.compile import aot_module_simplified + + def get_input_meta(args): + input_meta = [] + if len(args) > 0 and isinstance(args[0], tuple): # joint input + input_meta += get_input_meta(args[0]) + input_meta += get_input_meta(args[1]) + return input_meta + for arg in args: + if type(arg) == int or type(arg) == float: + input_meta.append((type(arg),)) + else: + input_meta.append( + (type(arg), arg.shape, arg.stride(), arg.dtype, arg.device) + ) + return input_meta + + def graph_saver_helper(gm_to_save, args, type_name): + global graph_index + if len(gm_to_save.graph.nodes) == 0: + log.log( + logging.WARNING, + "No nodes in graph {%s}_{%s}_{%s}.", + current_name, + type_name, + graph_index, + ) + return + + gm = copy.deepcopy(gm_to_save) + gm.graph.set_codegen(torch.fx.graph.CodeGen()) # remove codegen + gm.recompile() + + input_meta = get_input_meta(args) + + os.makedirs(f"{folder_name}/{current_name}", exist_ok=True) + gm.to_folder( + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}" + ) + pickle.dump( + input_meta, + open( + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.input", # noqa: B950 + "wb", + ), + ) # noqa: E501 + if dump_example_input: + torch.save( + args, + f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.pt", # noqa: B950 + ) # noqa: E501 + + def graph_saver_forward(gm, fw_args): + graph_saver_helper(gm, fw_args, "forward") + return gm + + def graph_saver_backward(gm, bw_args): + graph_saver_helper(gm, bw_args, "backward") + global graph_index + graph_index += 1 + return gm + + def graph_saver_joint(gm, joint_args): + graph_saver_helper(gm, joint_args, "joint") + return default_partition(gm, joint_args) + + return aot_module_simplified( + gm, + example_inputs, + fw_compiler=graph_saver_forward, + bw_compiler=graph_saver_backward, + partition_fn=graph_saver_joint, + decompositions=default_decompositions, + ) + + +# WARNING: This isn't tested anywhere!! +def graph_dumper_aot(current_name, folder_name, dump_example_input=False): + """ + Dump the forward, backward, and joint computation graph. + Example Usage: + save_fx_func = graph_dumper_aot(current_name, folder_name, dump_example_input = False) + optimize_ctx = torchdynamo.optimize( + save_fx_func + ) + with torch.enable_grad(): + with optimize_ctx: + result = forward_and_backward_pass(model, example_inputs) + """ + global graph_index + graph_index = 0 + return partial(_save_fx_default, current_name, folder_name, dump_example_input) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/config.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9e084b04489c48e964a4b57ca9f25fbf7f1b058c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/config.py @@ -0,0 +1,48 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Global flags for aot autograd +""" +import os +import sys +from typing import TYPE_CHECKING + +# Converts torch rng ops to their functional philox rng equivalents. Note that +# we functionalize only CUDA rng ops today. +functionalize_rng_ops = False + +# can be useful for debugging if we are incorrectly creating meta fake tensors +fake_tensor_allow_meta = os.environ.get("FAKE_ALLOW_META", True) + +# Enables optional asserts in hotpath code to check for errors. If +# you are seeing weird accuracy problems, try turning this on. +# This is currently off by default as it will harm tracing time, +# but it is on by default for aot_eager. +debug_assert = False + +debug_partitioner = os.environ.get("AOT_PARTITIONER_DEBUG", False) + +static_weight_shapes = True + +# Applies CSE to the graph before partitioning +cse = True + +# Restricts the amount of computation AOTAutograd can do. +max_dist_from_bw = 3 + +# Enable aggressive_recomputation in the min-cut algorithm in partitioners to reduce +# memory usage with some penalty of performance. It allows more ops to be considered +# as recomputable except random ops and compute-intensive ops. +aggressive_recomputation = False + +if TYPE_CHECKING: + from torch.utils._config_typing import * # noqa: F401, F403 + +from torch.utils._config_module import install_config_module + +# adds patch, save_config, invalid config checks, etc +install_config_module(sys.modules[__name__]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/deprecated.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..0272095147150874aa1012cad792305e71d322c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/deprecated.py @@ -0,0 +1,125 @@ +import torch._functorch.apis as apis +import torch._functorch.eager_transforms as _impl +import torch._functorch.make_functional as _nn_impl +from torch._functorch.vmap import in_dims_t, out_dims_t +from torch._functorch.eager_transforms import argnums_t +import torch.nn as nn +import textwrap +from typing import Any, Callable, Optional, Tuple, Union +import warnings + +""" +The APIs in this file are exposed as `functorch.*`. They are thin wrappers +around the torch.func.* APIs that have deprecation warnings -- we're trying +to move people to the torch.func.* equivalents. + +NB: We don't use *args, **kwargs in the signatures because that changes the +documentation. +""" + +def get_warning(api, new_api=None, replace_newlines=False): + if new_api is None: + new_api = f'torch.func.{api}' + warning = ( + f"We've integrated functorch into PyTorch. As the final step of the \n" + f"integration, functorch.{api} is deprecated as of PyTorch \n" + f"2.0 and will be deleted in a future version of PyTorch >= 2.3. \n" + f"Please use {new_api} instead; see the PyTorch 2.0 release notes \n" + f"and/or the torch.func migration guide for more details \n" + f"https://pytorch.org/docs/master/func.migrating.html" + ) + if replace_newlines: + warning = warning.replace("\n", "") + return warning + + +def warn_deprecated(api, new_api=None): + warning = get_warning(api, new_api, replace_newlines=True) + warnings.warn(warning, stacklevel=2) + + +def setup_docs(functorch_api, torch_func_api=None, new_api_name=None): + api_name = functorch_api.__name__ + if torch_func_api is None: + torch_func_api = getattr(_impl, api_name) + # See https://docs.python.org/3/using/cmdline.html#cmdoption-OO + if torch_func_api.__doc__ is None: + return + + warning = get_warning(api_name, new_api_name) + warning_note = "\n.. warning::\n\n" + textwrap.indent(warning, " ") + warning_note = textwrap.indent(warning_note, " ") + functorch_api.__doc__ = torch_func_api.__doc__ + warning_note + +def vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + randomness: str = 'error', + *, + chunk_size=None) -> Callable: + warn_deprecated('vmap', 'torch.vmap') + return apis.vmap(func, in_dims, out_dims, randomness, chunk_size=chunk_size) + +def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + warn_deprecated('grad') + return apis.grad(func, argnums, has_aux) + +def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable: + warn_deprecated('grad_and_value') + return apis.grad_and_value(func, argnums, has_aux) + +def vjp(func: Callable, *primals, has_aux: bool = False): + warn_deprecated('vjp') + return _impl.vjp(func, *primals, has_aux=has_aux) + +def jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool = False, has_aux: bool = False): + warn_deprecated('jvp') + return _impl.jvp(func, primals, tangents, strict=strict, has_aux=has_aux) + +def jacrev(func: Callable, argnums: Union[int, Tuple[int]] = 0, *, has_aux=False, + chunk_size: Optional[int] = None, + _preallocate_and_copy=False): + warn_deprecated('jacrev') + return _impl.jacrev(func, argnums, has_aux=has_aux, chunk_size=chunk_size, + _preallocate_and_copy=_preallocate_and_copy) + +def jacfwd(func: Callable, argnums: argnums_t = 0, has_aux: bool = False, *, randomness: str = "error"): + warn_deprecated('jacfwd') + return _impl.jacfwd(func, argnums, has_aux, randomness=randomness) + +def hessian(func, argnums=0): + warn_deprecated('hessian') + return _impl.hessian(func, argnums=argnums) + +def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable: + warn_deprecated('functionalize') + return _impl.functionalize(func, remove=remove) + +def make_functional(model: nn.Module, disable_autograd_tracking: bool = False): + warn_deprecated('make_functional', 'torch.func.functional_call') + return _nn_impl.make_functional(model, disable_autograd_tracking) + +def make_functional_with_buffers(model: nn.Module, disable_autograd_tracking: bool = False): + warn_deprecated('make_functional_with_buffers', 'torch.func.functional_call') + return _nn_impl.make_functional_with_buffers(model, disable_autograd_tracking) + +def combine_state_for_ensemble(models): + warn_deprecated('combine_state_for_ensemble', 'torch.func.stack_module_state') + return _nn_impl.combine_state_for_ensemble(models) + +setup_docs(vmap, apis.vmap, 'torch.vmap') +setup_docs(grad, apis.grad) +setup_docs(grad_and_value, apis.grad_and_value) +setup_docs(vjp) +setup_docs(jvp) +setup_docs(jacrev) +setup_docs(jacfwd) +setup_docs(hessian) +setup_docs(functionalize) +setup_docs(make_functional, _nn_impl.make_functional, + 'torch.func.functional_call') +setup_docs(make_functional_with_buffers, _nn_impl.make_functional, + 'torch.func.functional_call') +setup_docs(combine_state_for_ensemble, _nn_impl.combine_state_for_ensemble, + 'torch.func.stack_module_state') diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c7aad808b51a83f5dd0dca44770311fc2062266c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/eager_transforms.py @@ -0,0 +1,1640 @@ +# mypy: ignore-errors + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Callable, Union, Tuple, List, Any, Optional +import torch +from functools import partial, wraps +import contextlib +from torch.utils._pytree import ( + tree_flatten, + tree_unflatten, + tree_map, + tree_map_only, + tree_map_, + treespec_pprint, +) +from torch.utils import _pytree as pytree +from torch.fx.experimental import const_fold +from torch.fx.experimental.proxy_tensor import make_fx +import torch.autograd.forward_ad as fwAD +from torch._subclasses.functional_tensor import FunctionalTensor + +from .vmap import doesnt_support_saved_tensors_hooks, get_chunk_sizes +from .apis import vmap + +from torch._C._functorch import ( + _wrap_for_grad, + _unwrap_for_grad, + _grad_increment_nesting, + _grad_decrement_nesting, + _jvp_increment_nesting, + _jvp_decrement_nesting, + _wrap_functional_tensor, + _unwrap_functional_tensor, + _func_decrement_nesting, + _func_increment_nesting, + _assert_wrapped_functional, + _propagate_functional_input_mutation, + set_inplace_requires_grad_allowed, + get_inplace_requires_grad_allowed, +) +from torch._functorch.utils import exposed_in, argnums_t + + +def lazy_dynamo_disable(func): + import torch._dynamo + return torch._dynamo.disable(func) + +@contextlib.contextmanager +def enable_inplace_requires_grad(enabled): + prev_state = get_inplace_requires_grad_allowed() + set_inplace_requires_grad_allowed(enabled) + try: + yield + finally: + set_inplace_requires_grad_allowed(prev_state) + + +def _vjp_treespec_compare(primals_out, cotangents): + # Revert this once #116264 gets fixed + _, primals_out_spec = tree_flatten(primals_out) + _, cotangents_spec = tree_flatten(cotangents) + # Dynamo fails to trace operator.ne below. To bypass this limitation, this + # function is not inlined. + if primals_out_spec != cotangents_spec: + raise RuntimeError( + f'Expected pytree structure of cotangents to be the same ' + f'as pytree structure of outputs to the function. ' + f'cotangents: {treespec_pprint(cotangents_spec)}, ' + f'primal output: {treespec_pprint(primals_out_spec)}') + + +def _set_tensor_requires_grad(x): + # avoid graph-break on x.requires_grad_() + # https://github.com/pytorch/pytorch/pull/110053 + return x.requires_grad_() + +def _create_differentiable(inps, level=None): + def create_differentiable(x): + if isinstance(x, torch.Tensor): + with enable_inplace_requires_grad(True): + return _set_tensor_requires_grad(x) + raise ValueError(f'Thing passed to transform API must be Tensor, ' + f'got {type(x)}') + return tree_map(create_differentiable, inps) + + +def _undo_create_differentiable(inps, level=None): + def unwrap_tensors(x): + if isinstance(x, torch.Tensor): + return _unwrap_for_grad(x, level) + # TODO: Remove the following hack for namedtuples + if isinstance(x, tuple): + return tree_map(unwrap_tensors, tuple(x)) + + raise RuntimeError(f"Expected tensors, got unsupported type {type(x)}") + + return tree_map(unwrap_tensors, inps) + + +def _is_differentiable(maybe_tensor): + if not isinstance(maybe_tensor, torch.Tensor): + return False + return maybe_tensor.requires_grad + + +def _any_differentiable(tensor_or_tuple_of_tensors): + flat_args, _ = tree_unflatten(tensor_or_tuple_of_tensors) + return any(tuple(map(_is_differentiable, flat_args))) + + +def _wrap_tensor_for_grad(maybe_tensor, level): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + return _wrap_for_grad(maybe_tensor, level) + + +def _wrap_all_tensors(tensor_pytree, level): + return tree_map(partial(_wrap_tensor_for_grad, level=level), tensor_pytree) + + +def _as_tuple(val): + if isinstance(val, tuple): + return val + return (val,) + +# Version of autograd.grad that handles outputs that don't depend on inputs + + +def _autograd_grad(outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True): + if grad_outputs is None: + diff_outputs = tuple(out for out in outputs if out.requires_grad) + else: + result = tuple((out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad) + if len(result) == 0: + diff_outputs, grad_outputs = (), () + else: + diff_outputs, grad_outputs = zip(*result) + if len(diff_outputs) == 0: + return tuple(torch.zeros_like(inp) for inp in inputs) + grad_inputs = torch.autograd.grad(diff_outputs, inputs, grad_outputs, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=True) + grad_inputs = tuple(torch.zeros_like(inp) if gi is None else gi + for gi, inp in zip(grad_inputs, inputs)) + return grad_inputs + +# NOTE [grad and vjp interaction with no_grad] +# +# def f(x): +# with torch.no_grad(): +# c = x ** 2 +# return x - c +# +# The thing to consider is if enable_grad is on/off before grad gets called. +# +# Case 1: enable_grad is on. +# grad(f)(x) +# In this case, `grad` should respect the inner torch.no_grad. +# +# Case 2: enable_grad is off +# with torch.no_grad(): +# grad(f)(x) +# In this case, `grad` should respect the inner torch.no_grad, but not the +# outer one. This is because `grad` is a "function transform": its result +# should not depend on the result of a context manager outside of `f`. +# +# This gives us the following desired behavior: +# - (nested) grad transforms must obey torch.no_grad inside them +# - (nested) grad transforms should not obey torch.no_grad outside them +# +# To achieve this behavior, upon entering grad/vjp: +# - we save the current ("previous") is_grad_enabled (*) +# - we unconditionally enable grad. +# +# Inside DynamicLayerBackFallback, when we're temporarily popping `grad` layer +# off the stack: +# - if grad_mode is disabled, then we do nothing. (there is a torch.no_grad +# active, all subsequent grad transforms must obey it). +# - if grad_mode is enabled, and the previous is_grad_enabled (*) is False, +# then we temporarily restore the previous `is_grad_enabled`. This is +# because we're crossing the boundary from a `grad` outside the +# no_grad to a `grad` inside the no_grad. +# +# NB: vjp has some interesting behavior because the vjp's callable can be called +# under a different grad_mode than the forward computation... +# +# NB: forward-mode AD: forward-mode AD doesn't respect torch.no_grad, but +# it respects c10::AutoFwGradMode. We've implemented the same logic for +# our jvp transform (it will have special handling if FwGradMode is disabled). + + +# How do we increment and decrement the nesting? I don't think we can. +@exposed_in("torch.func") +def vjp(func: Callable, *primals, has_aux: bool = False): + """ + Standing for the vector-Jacobian product, returns a tuple containing the + results of ``func`` applied to ``primals`` and a function that, when + given ``cotangents``, computes the reverse-mode Jacobian of ``func`` with + respect to ``primals`` times ``cotangents``. + + Args: + func (Callable): A Python function that takes one or more arguments. Must + return one or more Tensors. + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. The returned function will also be computing the + derivative with respect to these arguments + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + other auxiliary objects that will not be differentiated. + Default: False. + + Returns: + Returns a ``(output, vjp_fn)`` tuple containing the output of ``func`` + applied to ``primals`` and a function that computes the vjp of + ``func`` with respect to all ``primals`` using the cotangents passed + to the returned function. If ``has_aux is True``, then instead returns a + ``(output, vjp_fn, aux)`` tuple. + The returned ``vjp_fn`` function will return a tuple of each VJP. + + When used in simple cases, :func:`vjp` behaves the same as :func:`grad` + + >>> x = torch.randn([5]) + >>> f = lambda x: x.sin().sum() + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> grad = vjpfunc(torch.tensor(1.))[0] + >>> assert torch.allclose(grad, torch.func.grad(f)(x)) + + However, :func:`vjp` can support functions with multiple outputs by + passing in the cotangents for each of the outputs + + >>> x = torch.randn([5]) + >>> f = lambda x: (x.sin(), x.cos()) + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> vjps = vjpfunc((torch.ones([5]), torch.ones([5]))) + >>> assert torch.allclose(vjps[0], x.cos() + -x.sin()) + + :func:`vjp` can even support outputs being Python structs + + >>> x = torch.randn([5]) + >>> f = lambda x: {'first': x.sin(), 'second': x.cos()} + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> cotangents = {'first': torch.ones([5]), 'second': torch.ones([5])} + >>> vjps = vjpfunc(cotangents) + >>> assert torch.allclose(vjps[0], x.cos() + -x.sin()) + + The function returned by :func:`vjp` will compute the partials with + respect to each of the ``primals`` + + >>> x, y = torch.randn([5, 4]), torch.randn([4, 5]) + >>> (_, vjpfunc) = torch.func.vjp(torch.matmul, x, y) + >>> cotangents = torch.randn([5, 5]) + >>> vjps = vjpfunc(cotangents) + >>> assert len(vjps) == 2 + >>> assert torch.allclose(vjps[0], torch.matmul(cotangents, y.transpose(0, 1))) + >>> assert torch.allclose(vjps[1], torch.matmul(x.transpose(0, 1), cotangents)) + + ``primals`` are the positional arguments for ``f``. All kwargs use their + default value + + >>> x = torch.randn([5]) + >>> def f(x, scale=4.): + >>> return x * scale + >>> + >>> (_, vjpfunc) = torch.func.vjp(f, x) + >>> vjps = vjpfunc(torch.ones_like(x)) + >>> assert torch.allclose(vjps[0], torch.full(x.shape, 4.)) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``vjp``. + Case 1: Using ``torch.no_grad`` inside a function: + + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``vjp(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager: + + >>> # xdoctest: +SKIP(failing) + >>> with torch.no_grad(): + >>> vjp(f)(x) + + In this case, ``vjp`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``vjp`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + """ + return _vjp_with_argnums(func, *primals, has_aux=has_aux) + + +@contextlib.contextmanager +def grad_increment_nesting(): + try: + grad_level = _grad_increment_nesting() + yield grad_level + finally: + _grad_decrement_nesting() + + +@doesnt_support_saved_tensors_hooks +def _vjp_with_argnums(func: Callable, *primals, argnums: Optional[argnums_t] = None, has_aux: bool = False): + # This is the same function as vjp but also accepts an argnums argument + # All args are the same as vjp except for the added argument + # argnums (Optional[int or tuple[int]]): Optional, specifies the argument(s) to compute gradients with respect to. + # If None, computes the gradients with respect to all inputs (used for vjp). Default: None + # + # WARN: Users should NOT call this function directly and should just be calling vjp. + # It is only separated so that inputs passed to jacrev but not differentiated get the correct wrappers. + # + # NOTE: All error messages are produced as if vjp was being called, even if this was called by jacrev + # + # Returns the same two elements as :func:`vjp` but the function returned, vjp_fn, returns a tuple of VJPs + # for only the primal elements given by argnums. + with grad_increment_nesting() as level: + # See NOTE [grad and vjp interaction with no_grad] + with torch.enable_grad(): + primals = _wrap_all_tensors(primals, level) + # Note for the reviewer: This is extremely odd but it passes the + # assertion "len(self.block_stack) == 1" on symbolic_convert.py + # The equivalent "if argnums is None" fails for some reason + if not isinstance(argnums, int) and not argnums: + diff_primals = _create_differentiable(primals, level) + else: + diff_primals = _slice_argnums(primals, argnums, as_tuple=False) + tree_map_(partial(_create_differentiable, level=level), diff_primals) + primals_out = func(*primals) + + if has_aux: + if not (isinstance(primals_out, tuple) and len(primals_out) == 2): + raise RuntimeError( + "vjp(f, *primals): output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + primals_out, aux = primals_out + aux = _undo_create_differentiable(aux, level) + + flat_primals_out, primals_out_spec = tree_flatten(primals_out) + assert_non_empty_tensor_output(flat_primals_out, 'vjp(f, *primals)') + flat_diff_primals, primals_spec = tree_flatten(diff_primals) + results = _undo_create_differentiable(primals_out, level) + + for primal_out in flat_primals_out: + assert isinstance(primal_out, torch.Tensor) + if primal_out.is_floating_point() or primal_out.is_complex(): + continue + raise RuntimeError("vjp(f, ...): All outputs of f must be " + "floating-point or complex Tensors, got Tensor " + f"with dtype {primal_out.dtype}") + + def wrapper(cotangents, retain_graph=True, create_graph=None): + if create_graph is None: + create_graph = torch.is_grad_enabled() + flat_cotangents, cotangents_spec = tree_flatten(cotangents) + _vjp_treespec_compare(primals_out, cotangents) + result = _autograd_grad(flat_primals_out, flat_diff_primals, flat_cotangents, + retain_graph=retain_graph, create_graph=create_graph) + return tree_unflatten(result, primals_spec) + + if has_aux: + return results, wrapper, aux + else: + return results, wrapper + + +def _safe_zero_index(x): + assert len(x) == 1 + return x[0] + +# jacrev and jacfwd don't support complex functions +# Helper function to throw appropriate error. +def error_if_complex(func_name, args, is_input): + flat_args = pytree.tree_leaves(args) + for idx, arg in enumerate(flat_args): + if isinstance(arg, torch.Tensor) and arg.dtype.is_complex: + input_or_output = ("inputs" if is_input else "outputs") + err_msg = (f"{func_name}: Expected all {input_or_output} " + f"to be real but received complex tensor at flattened input idx: {idx}") + raise RuntimeError(err_msg) + +@exposed_in("torch.func") +def jacrev(func: Callable, argnums: Union[int, Tuple[int]] = 0, *, has_aux=False, + chunk_size: Optional[int] = None, + _preallocate_and_copy=False): + """ + Computes the Jacobian of ``func`` with respect to the arg(s) at index + ``argnum`` using reverse mode autodiff + + .. note:: + Using :attr:`chunk_size=1` is equivalent to computing the jacobian + row-by-row with a for-loop i.e. the constraints of :func:`vmap` are + not applicable. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Jacobian with respect to. + Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + auxiliary objects that will not be differentiated. + Default: False. + chunk_size (None or int): If None (default), use the maximum chunk size + (equivalent to doing a single vmap over vjp to compute the jacobian). + If 1, then compute the jacobian row-by-row with a for-loop. + If not None, then compute the jacobian :attr:`chunk_size` rows at a time + (equivalent to doing multiple vmap over vjp). If you run into memory issues computing + the jacobian, please try to specify a non-None chunk_size. + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Jacobian of ``func`` with respect to the arg(s) at + ``argnums``. If ``has_aux is True``, then the returned function + instead returns a ``(jacobian, aux)`` tuple where ``jacobian`` + is the Jacobian and ``aux`` is auxiliary objects returned by ``func``. + + A basic usage with a pointwise, unary operation will give a diagonal array + as the Jacobian + + >>> from torch.func import jacrev + >>> x = torch.randn(5) + >>> jacobian = jacrev(torch.sin)(x) + >>> expected = torch.diag(torch.cos(x)) + >>> assert torch.allclose(jacobian, expected) + + If you would like to compute the output of the function as well as the + jacobian of the function, use the ``has_aux`` flag to return the output + as an auxiliary object: + + >>> from torch.func import jacrev + >>> x = torch.randn(5) + >>> + >>> def f(x): + >>> return x.sin() + >>> + >>> def g(x): + >>> result = f(x) + >>> return result, result + >>> + >>> jacobian_f, f_x = jacrev(g, has_aux=True)(x) + >>> assert torch.allclose(f_x, f(x)) + + :func:`jacrev` can be composed with vmap to produce batched + Jacobians: + + >>> from torch.func import jacrev, vmap + >>> x = torch.randn(64, 5) + >>> jacobian = vmap(jacrev(torch.sin))(x) + >>> assert jacobian.shape == (64, 5, 5) + + Additionally, :func:`jacrev` can be composed with itself to produce + Hessians + + >>> from torch.func import jacrev + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hessian = jacrev(jacrev(f))(x) + >>> assert torch.allclose(hessian, torch.diag(-x.sin())) + + By default, :func:`jacrev` computes the Jacobian with respect to the first + input. However, it can compute the Jacboian with respect to a different + argument by using ``argnums``: + + >>> from torch.func import jacrev + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacrev(f, argnums=1)(x, y) + >>> expected = torch.diag(2 * y) + >>> assert torch.allclose(jacobian, expected) + + Additionally, passing a tuple to ``argnums`` will compute the Jacobian + with respect to multiple arguments + + >>> from torch.func import jacrev + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacrev(f, argnums=(0, 1))(x, y) + >>> expectedX = torch.diag(torch.ones_like(x)) + >>> expectedY = torch.diag(2 * y) + >>> assert torch.allclose(jacobian[0], expectedX) + >>> assert torch.allclose(jacobian[1], expectedY) + + .. note:: + Using PyTorch ``torch.no_grad`` together with ``jacrev``. + Case 1: Using ``torch.no_grad`` inside a function: + + >>> def f(x): + >>> with torch.no_grad(): + >>> c = x ** 2 + >>> return x - c + + In this case, ``jacrev(f)(x)`` will respect the inner ``torch.no_grad``. + + Case 2: Using ``jacrev`` inside ``torch.no_grad`` context manager: + + >>> with torch.no_grad(): + >>> jacrev(f)(x) + + In this case, ``jacrev`` will respect the inner ``torch.no_grad``, but not the + outer one. This is because ``jacrev`` is a "function transform": its result + should not depend on the result of a context manager outside of ``f``. + """ + if not (chunk_size is None or chunk_size > 0): + raise ValueError("jacrev: `chunk_size` should be greater than 0.") + + @wraps(func) + def wrapper_fn(*args): + error_if_complex("jacrev", args, is_input=True) + vjp_out = _vjp_with_argnums(func, *args, argnums=argnums, has_aux=has_aux) + if has_aux: + output, vjp_fn, aux = vjp_out + else: + output, vjp_fn = vjp_out + + # See NOTE: [Computing jacobian with vmap and vjp for multiple outputs] + flat_output, output_spec = tree_flatten(output) + + error_if_complex("jacrev", flat_output, is_input=False) + + # NB: vjp already checks that all outputs are tensors + # Step 1: Construct grad_outputs by splitting the standard basis + flat_output_numels = tuple(out.numel() for out in flat_output) + + primals = _slice_argnums(args, argnums) + flat_primals, primals_spec = tree_flatten(primals) + + def compute_jacobian_stacked(): + # Helper function to compute chunked Jacobian + # The intermediate chunked calculation are only + # scoped at this function level. + chunked_results = [] + for flat_basis_chunk in _chunked_standard_basis_for_(flat_output, + flat_output_numels, + chunk_size=chunk_size): + if chunk_size == 1: + # sanity check. + for t in flat_basis_chunk: + assert t.size(0) == 1 + + flat_basis_chunk = tree_map(lambda t: torch.squeeze(t, 0), flat_basis_chunk) + + basis = tree_unflatten(flat_basis_chunk, output_spec) + + if chunk_size == 1: + # Behaviour with `chunk_size=1` is same as `for-loop` + # i.e. user shouldn't deal with the limitations of vmap. + chunked_result = vjp_fn(basis) + else: # chunk_size is None or chunk_size != 1 + chunked_result = vmap(vjp_fn)(basis) + + flat_results = pytree.tree_leaves(chunked_result) + + if chunk_size == 1: + flat_results = tree_map(lambda t: torch.unsqueeze(t, 0), flat_results) + + chunked_results.append(flat_results) + + if len(chunked_results) == 1: + # Short-circuit if we used a single chunk + return chunked_results[0] + + # Concatenate chunks. + flat_results = [] + # Iterate and concat the jacobians of different + # inputs. + for idx in range(len(flat_primals)): + r = tuple(r_[idx] for r_ in chunked_results) + flat_results.append(torch.cat(r, 0)) + + return flat_results + + def compute_jacobian_preallocate_and_copy(): + # Helper function to compute chunked Jacobian + # The intermediate chunked calculation are only + # scoped at this function level. + out_vec_size = sum(flat_output_numels) + + # Don't pre-allocate if we have a single chunk. + if not (chunk_size is None or chunk_size >= out_vec_size): + stacked_results = [primal.new_zeros(out_vec_size, *primal.shape) for primal in flat_primals] + + for idx, flat_basis_chunk in enumerate(_chunked_standard_basis_for_(flat_output, + flat_output_numels, + chunk_size=chunk_size)): + if chunk_size == 1: + # sanity check. + for t in flat_basis_chunk: + assert t.size(0) == 1 + + flat_basis_chunk = [torch.squeeze(t, 0) for t in flat_basis_chunk] + + basis = tree_unflatten(flat_basis_chunk, output_spec) + + if chunk_size == 1: + # Behaviour with `chunk_size=1` is same as `for-loop` + # i.e. user shouldn't deal with the limitations of vmap. + chunked_result = vjp_fn(basis) + else: # chunk_size is None or chunk_size != 1 + chunked_result = vmap(vjp_fn)(basis) + + flat_results = pytree.tree_leaves(chunked_result) + + # Short-circuit if we have a single chunk. + if chunk_size is None or chunk_size >= out_vec_size: + if chunk_size == 1: # and out_vec_size == 1 + # Since we squeezed the output dim + flat_results = tree_map(lambda t: torch.unsqueeze(t, 0), flat_results) + return flat_results + + for r, sr in zip(flat_results, stacked_results): + sr[idx * chunk_size: (idx + 1) * chunk_size].copy_(r) + + return stacked_results + + if _preallocate_and_copy: + flat_jacobians_per_input = compute_jacobian_preallocate_and_copy() + else: + flat_jacobians_per_input = compute_jacobian_stacked() + + # Step 2: The returned jacobian is one big tensor per input. In this step, + # we split each Tensor by output. + flat_jacobians_per_input = [result.split(flat_output_numels, dim=0) for result in flat_jacobians_per_input] + flat_input_flat_output = [ + tuple(split.view(out.shape + primal.shape) + for split, out in zip(splits, flat_output)) + for splits, primal in zip(flat_jacobians_per_input, flat_primals) + ] + + # Step 3: Right now, `jacobian` is a List[List[Tensor]]. + # The outer List corresponds to the number of primals, + # the inner List corresponds to the number of outputs. + # We need to: + # a. Exchange the order of the outer List and inner List + # b. tree_unflatten the inner Lists (which correspond to the primals) + # c. handle the argnums=int case + # d. tree_unflatten the outer List (which corresponds to the outputs) + flat_output_flat_input = tuple(zip(*flat_input_flat_output)) + + flat_output_input = tuple(tree_unflatten(flat_input, primals_spec) + for flat_input in flat_output_flat_input) + + if isinstance(argnums, int): + flat_output_input = tuple(_safe_zero_index(flat_input) + for flat_input in flat_output_input) + output_input = tree_unflatten(flat_output_input, output_spec) + if has_aux: + return output_input, aux + return output_input + return wrapper_fn + +# NOTE: [Computing jacobian with vmap and vjp for multiple outputs] +# +# Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3). +# It turns out we can compute the jacobian of this function with a single +# call to autograd.grad by using vmap over the correct grad_outputs. +# +# Firstly, one way to compute the jacobian is to stack x**2 and x.sum() +# into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()]) +# +# To get the first row of the jacobian, we call +# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0])) +# To get the 2nd row of the jacobian, we call +# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0])) +# and so on. +# +# Using vmap, we can vectorize all 4 of these computations into one by +# passing the standard basis for R^4 as the grad_output. +# vmap(partial(autograd.grad, g(x), x))(torch.eye(4)). +# +# Now, how do we compute the jacobian *without stacking the output*? +# We can just split the standard basis across the outputs. So to +# compute the jacobian of f(x), we'd use +# >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...)) +# The grad_outputs looks like the following: +# ( torch.tensor([[1, 0, 0], +# [0, 1, 0], +# [0, 0, 1], +# [0, 0, 0]]), +# torch.tensor([[0], +# [0], +# [0], +# [1]]) ) +# +# But we're not done yet! +# >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...))) +# returns a Tensor of shape [4, 3]. We have to remember to split the +# jacobian of shape [4, 3] into two: +# - one of shape [3, 3] for the first output +# - one of shape [ 3] for the second output + + +def _chunked_standard_basis_for_(tensors, tensor_numels, chunk_size=None): + # This function: + # - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix. + # - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`. + # - Each chunk corresponds to one tensor. The chunk has the same dtype and + # device as the tensor + # + # For example, with tensor_numels = [1, 2, 1], this function returns: + # ( tensor([[1], tensor([[0, 0], tensor([[0], + # [0], [1, 0], [0], + # [0], [0, 1], [0], + # [0]]) , [0, 0]]) , [1]]) ) + # + # Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors) + # Precondition: tensors always has at least one element. + # + # See NOTE: [Computing jacobian with vmap and grad for multiple tensors] + # for context behind this function. + # NOTE: Argument `chunk_size` is used to generate chunked basis instead of + # one huge basis matrix. `chunk_size` dictates the maximum size of the + # basis matrix along dim=0. + assert len(tensors) == len(tensor_numels) + assert len(tensors) > 0 + assert chunk_size is None or chunk_size > 0 + total_numel = sum(tensor_numels) + if chunk_size and chunk_size < total_numel: + chunk_numels = get_chunk_sizes(total_numel, chunk_size) + else: # chunk_size is None or chunk_size >= total_numel + chunk_size = total_numel + chunk_numels = [total_numel] + + diag_start_indices = (0, *torch.tensor(tensor_numels).cumsum(dim=0)[:-1].neg().unbind()) + + for chunk_idx, total_numel in enumerate(chunk_numels): + chunks = tuple(tensor.new_zeros(total_numel, tensor_numel) + for tensor, tensor_numel in zip(tensors, tensor_numels)) + + for chunk, diag_start_idx in zip(chunks, diag_start_indices): + chunk.diagonal(diag_start_idx + chunk_idx * chunk_size).fill_(1) + chunks = tuple(chunk.view(total_numel, *tensor.shape) + for chunk, tensor in zip(chunks, tensors)) + yield chunks + +def _construct_standard_basis_for(tensors, tensor_numels): + for basis in _chunked_standard_basis_for_(tensors, tensor_numels, chunk_size=None): + return basis + + +def _validate_and_wrap_argnum(argnum, num_args): + if not isinstance(argnum, int): + raise RuntimeError(f'argnum must be int, got: {type(argnum)}') + if argnum >= 0 and argnum < num_args: + return argnum + if argnum < 0 and argnum >= -num_args: + return argnum + num_args + raise RuntimeError(f'Got argnum={argnum}, but only {num_args} positional inputs') + + +def _check_unique_non_empty(argnums): + if isinstance(argnums, tuple): + if len(argnums) == 0: + raise RuntimeError("argnums must be non-empty") + if len(set(argnums)) != len(argnums): + raise RuntimeError(f"argnums elements must be unique, got {argnums}") + + +def _replace_args(old_args, new_args, argnums): + if isinstance(argnums, int): + if len(new_args) != 1: + raise RuntimeError(f'new_args should be of size 1, was of size {len(new_args)}') + return tuple(new_args[0] if i == argnums else old_args[i] for i in range(len(old_args))) + if isinstance(argnums, tuple): + if len(new_args) != len(argnums): + raise RuntimeError( + "new_args should have the same size as argnums. " + f"Argnums size {len(argnums)}, new_args size {len(new_args)}") + + def get_right_elem(i): + return new_args[argnums.index(i)] if i in argnums else old_args[i] + + return tuple(get_right_elem(i) for i in range(len(old_args))) + raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}') + + +def _validate_and_wrap_argnums(argnums, num_args): + if isinstance(argnums, int): + return _validate_and_wrap_argnum(argnums, num_args) + if isinstance(argnums, tuple): + return tuple(_validate_and_wrap_argnum(argnum, num_args) for argnum in argnums) + raise AssertionError("Should never get here") + + +def _slice_argnums(args, argnums, as_tuple=True): + if not isinstance(argnums, int) and not isinstance(argnums, tuple): + raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}') + argnums = _validate_and_wrap_argnums(argnums, len(args)) + _check_unique_non_empty(argnums) + if isinstance(argnums, int): + if as_tuple: + return (args[argnums],) + else: + return args[argnums] + return tuple(args[i] for i in argnums) + + +JVP_NESTING = 0 + + +@contextlib.contextmanager +def noop(): + yield + + +def assert_flat_tuple_of_tensors(elts: Any, api: str, argname: str) -> None: + if not isinstance(elts, tuple): + raise RuntimeError( + f'{api}: Expected {argname} to be a tuple of Tensors, got {type(elts)}') + for elt in elts: + if isinstance(elt, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected {argname} to be a tuple of Tensors, got ' + f'a tuple with an element of type {type(elt)}') + if len(elts) == 0: + raise RuntimeError( + f'{api}: Expected {argname} to be a non-empty tuple of Tensors.') + + +def assert_non_empty_tensor_output(output: List[Any], api: str) -> None: + if (len(output) == 1 and output[0] is None) or len(output) < 1: + raise RuntimeError( + f'{api}: Expected f to be a function that has non-empty output (got output = {output})' + ) + for o in output: + if not isinstance(o, torch.Tensor): + raise RuntimeError( + f'{api}: expected f(*primals) to return only tensors' + f', got unsupported type {type(o)}' + ) + + +def assert_output_is_tensor_or_tensors(output: Any, api: str) -> None: + if isinstance(output, torch.Tensor): + return + if not isinstance(output, tuple): + raise RuntimeError( + f'{api}: Expected output of f to be a Tensor or Tensors, got ' + f'{type(output)}') + if len(output) == 0: + raise RuntimeError( + f'{api}: Expected output of f to be a non-empty tuple of Tensors.') + for out in output: + if isinstance(out, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected output of f to be a Tensor or Tensors, got ' + f'{type(out)} as an output') + + +def assert_non_empty_list_of_tensors(output: List[torch.Tensor], api: str, argname: str) -> None: + if len(output) == 0: + raise RuntimeError( + f'{api}: Expected {argname} to contain at least one Tensor.') + for out in output: + if isinstance(out, torch.Tensor): + continue + raise RuntimeError( + f'{api}: Expected {argname} to only contain Tensors, got ' + f'{type(out)}') + + +jvp_str = 'jvp(f, primals, tangents)' + + +def safe_unpack_dual(dual, strict): + if not isinstance(dual, torch.Tensor): + raise RuntimeError( + f'{jvp_str}: expected f(*args) to return only tensors' + f', got unsupported type {type(dual)}' + ) + + primal, tangent = fwAD.unpack_dual(dual) + if tangent is None: + if strict: + raise RuntimeError( + 'jvp(f, primals, tangents, strict=True): ' + 'The output of f is independent of ' + 'the inputs. This is not allowed with strict=True.') + tangent = torch.zeros_like(primal) + return primal, tangent + + +@exposed_in("torch.func") +def jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool = False, has_aux: bool = False): + """ + Standing for the Jacobian-vector product, returns a tuple containing + the output of `func(*primals)` and the "Jacobian of ``func`` evaluated at + ``primals``" times ``tangents``. This is also known as forward-mode autodiff. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. The returned function will also be computing the + derivative with respect to these arguments + tangents (Tensors): The "vector" for which Jacobian-vector-product is + computed. Must be the same structure and sizes as the inputs to + ``func``. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + other auxiliary objects that will not be differentiated. + Default: False. + + Returns: + Returns a ``(output, jvp_out)`` tuple containing the output of ``func`` + evaluated at ``primals`` and the Jacobian-vector product. + If ``has_aux is True``, then instead returns a ``(output, jvp_out, aux)`` tuple. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + + jvp is useful when you wish to compute gradients of a function R^1 -> R^N + + >>> from torch.func import jvp + >>> x = torch.randn([]) + >>> f = lambda x: x * torch.tensor([1., 2., 3]) + >>> value, grad = jvp(f, (x,), (torch.tensor(1.),)) + >>> assert torch.allclose(value, f(x)) + >>> assert torch.allclose(grad, torch.tensor([1., 2, 3])) + + :func:`jvp` can support functions with multiple inputs by passing in the + tangents for each of the inputs + + >>> from torch.func import jvp + >>> x = torch.randn(5) + >>> y = torch.randn(5) + >>> f = lambda x, y: (x * y) + >>> _, output = jvp(f, (x, y), (torch.ones(5), torch.ones(5))) + >>> assert torch.allclose(output, x + y) + + """ + + return _jvp_with_argnums(func, primals, tangents, argnums=None, strict=strict, has_aux=has_aux) + + +@doesnt_support_saved_tensors_hooks +def _jvp_with_argnums(func: Callable, primals: Any, tangents: Any, argnums: Optional[argnums_t], *, + strict: bool = False, has_aux: bool): + # This is the same function as jvp but also accepts an argnums argument + # Most args are the same as jvp except for the added argument + # argnums (Optional[int or tuple[int]]): Optional, specifies the argument(s) to compute gradients with respect to. + # If None, computes the gradients with respect to all inputs (used for jvp). Default: None + # Because of this, tangents must be of length argnums and matches up to the corresponding primal whose index is + # given by argnums + # + # WARN: Users should NOT call this function directly and should just be calling jvp. + # It is only separated so that inputs passed to jacfwd but not differentiated get the correct wrappers. + # + # NOTE: All error messages are produced as if jvp was being called, even if this was called by jacfwd + # + # Returns the same two elements as :func:`jvp` but the returned tuple, ``jvp_out``, only has JVPs with respect to + # the primals given by argnums + if not isinstance(primals, tuple): + raise RuntimeError( + f'{jvp_str}: Expected primals to be a tuple. ' + f'E.g. it should be valid to call f(*primals).') + diff_args = primals if argnums is None else _slice_argnums(primals, argnums) + flat_primals, primals_spec = tree_flatten(diff_args) + flat_tangents, tangents_spec = tree_flatten(tangents) + if primals_spec != tangents_spec: + raise RuntimeError( + f'{jvp_str}: Expected primals and tangents to have the same python ' + f'structure. For example, if primals is a tuple of 3 tensors, ' + f'tangents also must be. Got primals with structure {primals_spec} ' + f'and tangents with structure {tangents_spec}') + assert_non_empty_list_of_tensors(flat_primals, jvp_str, 'primals') + assert_non_empty_list_of_tensors(flat_tangents, jvp_str, 'tangents') + + level = _jvp_increment_nesting() + try: + global JVP_NESTING + JVP_NESTING += 1 + with fwAD._set_fwd_grad_enabled(True): + ctx = fwAD.dual_level if JVP_NESTING == 1 else noop + with ctx(): + flat_duals = tuple(fwAD.make_dual(p, t) + for p, t in zip(flat_primals, flat_tangents)) + duals = tree_unflatten(flat_duals, primals_spec) + if argnums is not None: + primals = _wrap_all_tensors(primals, level) + duals = _replace_args(primals, duals, argnums) + result_duals = func(*duals) + if has_aux: + if not (isinstance(result_duals, tuple) and len(result_duals) == 2): + raise RuntimeError( + f"{jvp_str}: output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + result_duals, aux = result_duals + aux = _undo_create_differentiable(aux, level) + + result_duals, spec = tree_flatten(result_duals) + assert_non_empty_tensor_output(result_duals, jvp_str) + + primals_out, tangents_out = \ + zip(*[safe_unpack_dual(dual, strict) for dual in result_duals]) + primals_out = tree_map( + partial(_undo_create_differentiable, level=level), primals_out) + tangents_out = tree_map( + partial(_undo_create_differentiable, level=level), tangents_out) + + primals_out_unflatten = tree_unflatten(primals_out, spec) + tangents_out_unflatten = tree_unflatten(tangents_out, spec) + if has_aux: + return primals_out_unflatten, tangents_out_unflatten, aux + + return primals_out_unflatten, tangents_out_unflatten + finally: + _jvp_decrement_nesting() + JVP_NESTING -= 1 + + +def safe_unflatten(tensor, dim, shape): + if len(shape) == 0: + assert tensor.shape[dim] == 1 + return tensor.squeeze(dim) + return tensor.unflatten(dim, shape) + + +@exposed_in("torch.func") +def jacfwd(func: Callable, argnums: argnums_t = 0, has_aux: bool = False, *, randomness: str = "error"): + """ + Computes the Jacobian of ``func`` with respect to the arg(s) at index + ``argnum`` using forward-mode autodiff + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Jacobian with respect to. + Default: 0. + has_aux (bool): Flag indicating that ``func`` returns a + ``(output, aux)`` tuple where the first element is the output of + the function to be differentiated and the second element is + auxiliary objects that will not be differentiated. + Default: False. + randomness(str): Flag indicating what type of randomness to use. + See :func:`vmap` for more detail. Allowed: "different", "same", "error". + Default: "error" + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Jacobian of ``func`` with respect to the arg(s) at + ``argnums``. If ``has_aux is True``, then the returned function + instead returns a ``(jacobian, aux)`` tuple where ``jacobian`` + is the Jacobian and ``aux`` is auxiliary objects returned by ``func``. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + An alternative is to use :func:`jacrev`, which has better operator coverage. + + A basic usage with a pointwise, unary operation will give a diagonal array + as the Jacobian + + >>> from torch.func import jacfwd + >>> x = torch.randn(5) + >>> jacobian = jacfwd(torch.sin)(x) + >>> expected = torch.diag(torch.cos(x)) + >>> assert torch.allclose(jacobian, expected) + + :func:`jacfwd` can be composed with vmap to produce batched + Jacobians: + + >>> from torch.func import jacfwd, vmap + >>> x = torch.randn(64, 5) + >>> jacobian = vmap(jacfwd(torch.sin))(x) + >>> assert jacobian.shape == (64, 5, 5) + + If you would like to compute the output of the function as well as the + jacobian of the function, use the ``has_aux`` flag to return the output + as an auxiliary object: + + >>> from torch.func import jacfwd + >>> x = torch.randn(5) + >>> + >>> def f(x): + >>> return x.sin() + >>> + >>> def g(x): + >>> result = f(x) + >>> return result, result + >>> + >>> jacobian_f, f_x = jacfwd(g, has_aux=True)(x) + >>> assert torch.allclose(f_x, f(x)) + + Additionally, :func:`jacrev` can be composed with itself or :func:`jacrev` + to produce Hessians + + >>> from torch.func import jacfwd, jacrev + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hessian = jacfwd(jacrev(f))(x) + >>> assert torch.allclose(hessian, torch.diag(-x.sin())) + + By default, :func:`jacfwd` computes the Jacobian with respect to the first + input. However, it can compute the Jacboian with respect to a different + argument by using ``argnums``: + + >>> from torch.func import jacfwd + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacfwd(f, argnums=1)(x, y) + >>> expected = torch.diag(2 * y) + >>> assert torch.allclose(jacobian, expected) + + Additionally, passing a tuple to ``argnums`` will compute the Jacobian + with respect to multiple arguments + + >>> from torch.func import jacfwd + >>> def f(x, y): + >>> return x + y ** 2 + >>> + >>> x, y = torch.randn(5), torch.randn(5) + >>> jacobian = jacfwd(f, argnums=(0, 1))(x, y) + >>> expectedX = torch.diag(torch.ones_like(x)) + >>> expectedY = torch.diag(2 * y) + >>> assert torch.allclose(jacobian[0], expectedX) + >>> assert torch.allclose(jacobian[1], expectedY) + + """ + @wraps(func) + def wrapper_fn(*args): + error_if_complex("jacfwd", args, is_input=True) + primals = args if argnums is None else _slice_argnums(args, argnums) + flat_primals, primals_spec = tree_flatten(primals) + flat_primals_numels = tuple(p.numel() for p in flat_primals) + flat_basis = _construct_standard_basis_for(flat_primals, flat_primals_numels) + basis = tree_unflatten(flat_basis, primals_spec) + + def push_jvp(basis): + output = _jvp_with_argnums(func, args, basis, argnums=argnums, has_aux=has_aux) + # output[0] is the output of `func(*args)` + error_if_complex("jacfwd", output[0], is_input=False) + if has_aux: + _, jvp_out, aux = output + return jvp_out, aux + _, jvp_out = output + return jvp_out + + results = vmap(push_jvp, randomness=randomness)(basis) + if has_aux: + results, aux = results + # aux is in the standard basis format, e.g. NxN matrix + # We need to fetch the first element as original `func` output + flat_aux, aux_spec = tree_flatten(aux) + flat_aux = [value[0] for value in flat_aux] + aux = tree_unflatten(flat_aux, aux_spec) + + jac_outs, spec = tree_flatten(results) + # Most probably below output check can never raise an error + # as jvp should test the output before + # assert_non_empty_output(jac_outs, 'jacfwd(f, ...)(*args)') + + jac_outs_ins = tuple( + tuple( + safe_unflatten(jac_out_in, -1, primal.shape) + for primal, jac_out_in in + zip(flat_primals, jac_out.movedim(0, -1).split(flat_primals_numels, dim=-1)) + ) + for jac_out in jac_outs + ) + jac_outs_ins = tuple(tree_unflatten(jac_ins, primals_spec) for jac_ins in jac_outs_ins) + + if isinstance(argnums, int): + jac_outs_ins = tuple(jac_ins[0] for jac_ins in jac_outs_ins) + if has_aux: + return tree_unflatten(jac_outs_ins, spec), aux + return tree_unflatten(jac_outs_ins, spec) + return wrapper_fn + + +@exposed_in("torch.func") +def hessian(func, argnums=0): + """ + Computes the Hessian of ``func`` with respect to the arg(s) at index + ``argnum`` via a forward-over-reverse strategy. + + The forward-over-reverse strategy (composing ``jacfwd(jacrev(func))``) is + a good default for good performance. It is possible to compute Hessians + through other compositions of :func:`jacfwd` and :func:`jacrev` like + ``jacfwd(jacfwd(func))`` or ``jacrev(jacrev(func))``. + + Args: + func (function): A Python function that takes one or more arguments, + one of which must be a Tensor, and returns one or more Tensors + argnums (int or Tuple[int]): Optional, integer or tuple of integers, + saying which arguments to get the Hessian with respect to. + Default: 0. + + Returns: + Returns a function that takes in the same inputs as ``func`` and + returns the Hessian of ``func`` with respect to the arg(s) at + ``argnums``. + + .. note:: + You may see this API error out with "forward-mode AD not implemented + for operator X". If so, please file a bug report and we will prioritize it. + An alternative is to use ``jacrev(jacrev(func))``, which has better + operator coverage. + + A basic usage with a R^N -> R^1 function gives a N x N Hessian: + + >>> from torch.func import hessian + >>> def f(x): + >>> return x.sin().sum() + >>> + >>> x = torch.randn(5) + >>> hess = hessian(f)(x) # equivalent to jacfwd(jacrev(f))(x) + >>> assert torch.allclose(hess, torch.diag(-x.sin())) + + """ + return jacfwd(jacrev(func, argnums), argnums) + + +@doesnt_support_saved_tensors_hooks +def grad_and_value_impl(func, argnums, has_aux, args, kwargs) -> Callable: + with grad_increment_nesting() as level: + output, aux, grad_input = None, None, None + # See NOTE [grad and vjp interaction with no_grad] + with torch.enable_grad(): + args = _wrap_all_tensors(args, level) + kwargs = _wrap_all_tensors(kwargs, level) + diff_args = _slice_argnums(args, argnums, as_tuple=False) + tree_map_(partial(_create_differentiable, level=level), diff_args) + + output = func(*args, **kwargs) + if has_aux: + if not (isinstance(output, tuple) and len(output) == 2): + raise RuntimeError( + "grad_and_value(f)(*args): output of function f should be a tuple: (output, aux) " + "if has_aux is True" + ) + output, aux = output + + if not isinstance(output, torch.Tensor): + raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) ' + f'to return a Tensor, got {type(output)}') + if output.dim() != 0: + raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) ' + 'to return a scalar Tensor, got tensor with ' + f'{output.dim()} dims. Maybe you wanted to ' + 'use the vjp or jacrev APIs instead?') + + flat_diff_args, spec = tree_flatten(diff_args) + + # NB: need create_graph so that backward pass isn't run in no_grad mode + flat_outputs = _as_tuple(output) + flat_grad_input = _autograd_grad(flat_outputs, flat_diff_args, create_graph=True) + grad_input = tree_unflatten(flat_grad_input, spec) + + grad_input = _undo_create_differentiable(grad_input, level) + output = _undo_create_differentiable(output, level) + if has_aux: + aux = _undo_create_differentiable(aux, level) + + if has_aux: + return grad_input, (output, aux) + return grad_input, output + + +def grad_impl(func: Callable, argnums: argnums_t, has_aux: bool, args, kwargs): + results = grad_and_value_impl(func, argnums, has_aux, args, kwargs) + if has_aux: + grad, (_, aux) = results + return grad, aux + grad, _ = results + return grad + +def _maybe_wrap_functional_tensor(maybe_tensor, level, *, _python_functionalize: bool = False): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + wrapped = _wrap_functional_tensor(maybe_tensor, level) + _assert_wrapped_functional(maybe_tensor, wrapped) + if _python_functionalize: + out = FunctionalTensor(wrapped) + torch._mirror_autograd_meta_to(maybe_tensor, out) + return out + return wrapped + + +def _wrap_all_tensors_to_functional(tensor_pytree, level, *, _python_functionalize: bool = False): + return tree_map(partial(lambda x: _maybe_wrap_functional_tensor( + x, level, _python_functionalize=_python_functionalize)), tensor_pytree) + + +def _maybe_unwrap_functional_tensor(maybe_tensor, *, reapply_views: bool): + if not isinstance(maybe_tensor, torch.Tensor): + return maybe_tensor + if isinstance(maybe_tensor, FunctionalTensor): + maybe_tensor = maybe_tensor.elem + + if not torch._is_functional_tensor(maybe_tensor): + # If it's not a functional tensor, just return it. + # This can happen if we functionalize a fn that returns a global, + # which was never wrapped properly. + return maybe_tensor + # Sync any pending updates on the output tensor + torch._sync(maybe_tensor) + return _unwrap_functional_tensor(maybe_tensor, reapply_views) + + +def _unwrap_all_tensors_from_functional(tensor_pytree, *, reapply_views: bool): + return tree_map(lambda t: _maybe_unwrap_functional_tensor(t, reapply_views=reapply_views), tensor_pytree) + + +@exposed_in("torch.func") +def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable: + """ + functionalize is a transform that can be used to remove (intermediate) + mutations and aliasing from a function, while preserving the function's + semantics. + + ``functionalize(func)`` returns a new function with the same semantics + as ``func``, but with all intermediate mutations removed. + Every inplace operation performed on an intermediate tensor: + ``intermediate.foo_()`` + gets replaced by its out-of-place equivalent: + ``intermediate_updated = intermediate.foo()``. + + functionalize is useful for shipping a pytorch program off to + backends or compilers that aren't able to easily represent + mutations or aliasing operators. + + Args: + func (Callable): A Python function that takes one or more arguments. + remove (str): An optional string argument, that takes on either + the value 'mutations' or 'mutations_and_views'. + If 'mutations' is passed in then all mutating operators + will be replaced with their non-mutating equivalents. + If 'mutations_and_views' is passed in, then additionally, all aliasing + operators will be replaced with their non-aliasing equivalents. + Default: 'mutations'. + + Returns: + Returns a new "functionalized" function. It takes the same inputs as + ``func``, and has the same behavior, but any mutations + (and optionally aliasing) performed on intermediate tensors + in the function will be removed. + + functionalize will also remove mutations (and views) that were performed on function inputs. + However to preserve semantics, functionalize will "fix up" the mutations after + the transform has finished running, by detecting if any tensor inputs "should have" + been mutated, and copying the new data back to the inputs if necessary. + + + Example:: + + >>> # xdoctest: +SKIP + >>> import torch + >>> from torch.fx.experimental.proxy_tensor import make_fx + >>> from torch.func import functionalize + >>> + >>> # A function that uses mutations and views, but only on intermediate tensors. + >>> def f(a): + ... b = a + 1 + ... c = b.view(-1) + ... c.add_(1) + ... return b + ... + >>> inpt = torch.randn(2) + >>> + >>> out1 = f(inpt) + >>> out2 = functionalize(f)(inpt) + >>> + >>> # semantics are the same (outputs are equivalent) + >>> print(torch.allclose(out1, out2)) + True + >>> + >>> f_traced = make_fx(f)(inpt) + >>> f_no_mutations_traced = make_fx(functionalize(f))(inpt) + >>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt) + >>> + >>> print(f_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view = torch.ops.aten.view(add, [-1]) + add_ = torch.ops.aten.add_(view, 1); view = None + return add + + >>> print(f_no_mutations_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view = torch.ops.aten.view(add, [-1]); add = None + add_1 = torch.ops.aten.add(view, 1); view = None + view_1 = torch.ops.aten.view(add_1, [2]); add_1 = None + return view_1 + + >>> print(f_no_mutations_and_views_traced.code) + + + + def forward(self, a_1): + add = torch.ops.aten.add(a_1, 1); a_1 = None + view_copy = torch.ops.aten.view_copy(add, [-1]); add = None + add_1 = torch.ops.aten.add(view_copy, 1); view_copy = None + view_copy_1 = torch.ops.aten.view_copy(add_1, [2]); add_1 = None + return view_copy_1 + + + >>> # A function that mutates its input tensor + >>> def f(a): + ... b = a.view(-1) + ... b.add_(1) + ... return a + ... + >>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt) + >>> # + >>> # All mutations and views have been removed, + >>> # but there is an extra copy_ in the graph to correctly apply the mutation to the input + >>> # after the function has completed. + >>> print(f_no_mutations_and_views_traced.code) + + + + def forward(self, a_1): + view_copy = torch.ops.aten.view_copy(a_1, [-1]) + add = torch.ops.aten.add(view_copy, 1); view_copy = None + view_copy_1 = torch.ops.aten.view_copy(add, [2]); add = None + copy_ = torch.ops.aten.copy_(a_1, view_copy_1); a_1 = None + return view_copy_1 + + + There are a few "failure modes" for functionalize that are worth calling out: + (1) Like other torch.func transforms, `functionalize()` doesn't work with functions + that directly use `.backward()`. The same is true for torch.autograd.grad. + If you want to use autograd, you can compute gradients directly + with `functionalize(grad(f))`. + (2) Like other torch.func transforms, `functionalize()` doesn't work with global state. + If you call `functionalize(f)` on a function that takes views / mutations of + non-local state, functionalization will simply no-op and pass the view/mutation + calls directly to the backend. + One way to work around this is is to ensure that any non-local state creation + is wrapped into a larger function, which you then call functionalize on. + (3) `resize_()` has some limitations: functionalize will only work on programs + that use resize_()` as long as the tensor being resized is not a view. + (4) `as_strided()` has some limitations: functionalize will not work on + `as_strided()` calls that result in tensors with overlapping memory. + + + Finally, a helpful mental model for understanding functionalization is that + most user pytorch programs are writing with the public torch API. + When executed, torch operators are generally decomposed into + our internal C++ "ATen" API. + The logic for functionalization happens entirely at the level of ATen. + Functionalization knows how to take every aliasing operator in ATen, + and map it to its non-aliasing equivalent + (e.g. ``tensor.view({-1})`` -> ``at::view_copy(tensor, {-1})``), + and how to take every mutating operator in ATen, + and map it to its non-mutating equivalent + (e.g. ``tensor.add_(1)`` -> ``at::add(tensor, -1)``), + while tracking aliases and mutations out-of-line to know when to fix things up. + Information about which ATen operators are aliasing or mutating all comes from + https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml. + """ + if remove == 'mutations': + reapply_views = True + elif remove == 'mutations_and_views': + reapply_views = False + else: + raise RuntimeError( + f"functionalize(f, remove='mutations'): received invalid argument for remove={remove}." + " Valid options are:\n" + " remove='mutations': all inplace and out= operators will be removed from the program, and replaced" + " with their out-of-place equivalents.\n" + " remove='mutations_and_views': In addition to the above, all aliasing operators {view} will be" + " replaced with their non-aliasing counterparts, {view}_copy.\n" + ) + + @doesnt_support_saved_tensors_hooks + @wraps(func) + def wrapped(*args, **kwargs): + try: + func_level = _func_increment_nesting(reapply_views) + func_args = _wrap_all_tensors_to_functional(args, func_level) + func_kwargs = _wrap_all_tensors_to_functional(kwargs, func_level) + + flattened_unwrapped_args = pytree.arg_tree_leaves(*args) + flattened_wrapped_args = pytree.arg_tree_leaves(*func_args) + flattened_unwrapped_kwargs = pytree.arg_tree_leaves(**kwargs) + flattened_wrapped_kwargs = pytree.arg_tree_leaves(**func_kwargs) + + func_outputs = func(*func_args, **func_kwargs) + outputs = _unwrap_all_tensors_from_functional(func_outputs, reapply_views=reapply_views) + flat_outputs, func_out_spec = tree_flatten(outputs) + + for a in flattened_wrapped_args + flattened_wrapped_kwargs: + if isinstance(a, torch.Tensor): + # Call sync_() on the inputs, to ensure that any pending mutations have been applied. + torch._sync(a) + + # And if any mutations were applied to the inputs, we need to propagate them back to the user. + for unwrapped, wrapped in zip(flattened_unwrapped_args, flattened_wrapped_args): + if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor): + _propagate_functional_input_mutation(unwrapped, wrapped) + for unwrapped, wrapped in zip(flattened_unwrapped_kwargs, flattened_wrapped_kwargs): + if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor): + _propagate_functional_input_mutation(unwrapped, wrapped) + + return outputs + finally: + _func_decrement_nesting() + return wrapped + +@exposed_in("torch.func") +def linearize(func: Callable, *primals) -> Tuple[Any, Callable]: + ''' + Returns the value of ``func`` at ``primals`` and linear approximation + at ``primals``. + + Args: + func (Callable): A Python function that takes one or more arguments. + primals (Tensors): Positional arguments to ``func`` that must all be + Tensors. These are the values at which the function is linearly approximated. + + Returns: + Returns a ``(output, jvp_fn)`` tuple containing the output of ``func`` + applied to ``primals`` and a function that computes the jvp of + ``func`` evaluated at ``primals``. + + linearize is useful if jvp is to be computed multiple times at ``primals``. However, + to achieve this, linearize saves intermediate computation and has higher memory requirements + than directly applying `jvp`. So, if all the ``tangents`` are known, it maybe more efficient + to compute vmap(jvp) instead of using linearize. + + .. note:: + linearize evaluates ``func`` twice. Please file an issue for an implementation + with a single evaluation. + + Example:: + >>> import torch + >>> from torch.func import linearize + >>> def fn(x): + ... return x.sin() + ... + >>> output, jvp_fn = linearize(fn, torch.zeros(3, 3)) + >>> jvp_fn(torch.ones(3, 3)) + tensor([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]) + >>> + + ''' + # Note: We evaluate `fn` twice. + # Once for returning the output and other while + # tracing the graph. + # If this becomes a bottle-neck, we should update + # make_fx such that it also returns the output. + + output = func(*primals) + _, output_spec = tree_flatten(output) + + flat_primals, primals_argspec = tree_flatten(primals) + + # tangents for tracing + flat_tangents = tuple(p.new_empty(()).expand_as(p) for p in flat_primals) + + # function to trace + def trace_fn(flat_tangents): + with fwAD.dual_level(): + flat_duals = tuple(fwAD.make_dual(p, t) for p, t in zip(flat_primals, flat_tangents)) + duals = tree_unflatten(flat_duals, primals_argspec) + output = func(*duals) + tangents = tree_map_only(torch.Tensor, lambda t: fwAD.unpack_dual(t)[1], output) + + return tangents + + jvp_graph = make_fx(trace_fn)(flat_tangents) + const_folded_jvp_graph = const_fold.split_const_subgraphs(jvp_graph) + + # Hold only the meta-data regarding the primals. + flat_primals_shape = tuple(p.shape for p in flat_primals) + flat_primals_device = tuple(p.device for p in flat_primals) + flat_primals_dtype = tuple(p.dtype for p in flat_primals) + + def forward_ad_checks(flat_tangents): + for idx, t in enumerate(flat_tangents): + if t.shape != flat_primals_shape[idx]: + msg = (f"tangent:{idx} with shape {t.shape} in flattened " + f"pytree doesn't match the shape {flat_primals_shape[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + if t.device != flat_primals_device[idx]: + msg = (f"tangent:{idx} with device {t.device} in flattened " + f"pytree doesn't match the device {flat_primals_device[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + if t.dtype != flat_primals_dtype[idx]: + msg = (f"tangent:{idx} with dtype {t.dtype} in flattened " + f"pytree doesn't match the dtype {flat_primals_dtype[idx]} " + "of the corresponding primal.") + raise RuntimeError(msg) + + # jvp_fn : callable to return + # It takes care of checking the argspec of tangents, + # calling the folded fx graph and unflattening fx graph output + def jvp_fn(*tangents): + flat_tangents, tangent_argspec = tree_flatten(tangents) + if tangent_argspec != primals_argspec: + raise RuntimeError(f"Expected the tangents {tangent_argspec} to have " + f"the same argspec as the primals {primals_argspec}") + + forward_ad_checks(flat_tangents) + + flat_output = const_folded_jvp_graph(*flat_tangents) + # const folded graph can return flat output, + # so transform output. + return tree_unflatten(flat_output, output_spec) + + return output, jvp_fn diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/functional_call.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/functional_call.py new file mode 100644 index 0000000000000000000000000000000000000000..7533811ed235ba8e527306f20908faa605162228 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/functional_call.py @@ -0,0 +1,248 @@ +from collections import Counter +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from torch import Tensor +from torch._functorch.utils import exposed_in + + +@exposed_in("torch.func") +def functional_call( + module: "torch.nn.Module", + parameter_and_buffer_dicts: Union[Dict[str, Tensor], Sequence[Dict[str, Tensor]]], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + r"""Performs a functional call on the module by replacing the module parameters + and buffers with the provided ones. + + .. note:: If the module has active parametrizations, passing a value in the + :attr:`parameter_and_buffer_dicts` argument with the name set to the regular parameter + name will completely disable the parametrization. + If you want to apply the parametrization function to the value passed + please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``. + + .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected + in the ``parameter_and_buffer_dicts`` input. + + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # does self.foo = self.foo + 1 + >>> print(mod.foo) # tensor(0.) + >>> functional_call(mod, a, torch.ones(())) + >>> print(mod.foo) # tensor(0.) + >>> print(a['foo']) # tensor(1.) + + .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the + tie_weights flag. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied + >>> print(mod.foo) # tensor(1.) + >>> mod(torch.zeros(())) # tensor(2.) + >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too + >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated + >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())} + >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.) + + An example of passing multiple dictionaries + + .. code-block:: python + + a = ({'weight': torch.ones(1, 1)}, {'buffer': torch.zeros(1)}) # two separate dictionaries + mod = nn.Bar(1, 1) # return self.weight @ x + self.buffer + print(mod.weight) # tensor(...) + print(mod.buffer) # tensor(...) + x = torch.randn((1, 1)) + print(x) + functional_call(mod, a, x) # same as x + print(mod.weight) # same as before functional_call + + + And here is an example of applying the grad transform over the parameters + of a model. + + .. code-block:: python + + import torch + import torch.nn as nn + from torch.func import functional_call, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + + def compute_loss(params, x, t): + y = functional_call(model, params, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(dict(model.named_parameters()), x, t) + + .. note:: If the user does not need grad tracking outside of grad transforms, they can detach all of the + parameters for better performance and memory usage + + Example:: + + >>> detached_params = {k: v.detach() for k, v in model.named_parameters()} + >>> grad_weights = grad(compute_loss)(detached_params, x, t) + >>> grad_weights.grad_fn # None--it's not tracking gradients outside of grad + + This means that the user cannot call ``grad_weight.backward()``. However, if they don't need autograd tracking + outside of the transforms, this will result in less memory usage and faster speeds. + + Args: + module (torch.nn.Module): the module to call + parameters_and_buffer_dicts (Dict[str, Tensor] or tuple of Dict[str, Tensor]): the parameters that will be used in + the module call. If given a tuple of dictionaries, they must have distinct keys so that all dictionaries can + be used together + args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument. + kwargs (dict): keyword arguments to be passed to the module call + tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as + tied in the reparameterized version. Therefore, if True and different values are passed for the tied + parameters and buffers, it will error. If False, it will not respect the originally tied parameters and + buffers unless the values passed for both weights are the same. Default: True. + strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and + buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will + error. Default: False. + + Returns: + Any: the result of calling ``module``. + """ + if isinstance(parameter_and_buffer_dicts, dict): + parameters_and_buffers = parameter_and_buffer_dicts + elif isinstance(parameter_and_buffer_dicts, Sequence): + if not all(isinstance(d, dict) for d in parameter_and_buffer_dicts): + raise ValueError( + "Expected all elements of parameter_and_buffer_dicts to be dictionaries" + ) + all_keys = [k for d in parameter_and_buffer_dicts for k in d.keys()] + repeated_keys = [key for key, n in Counter(all_keys).items() if n > 1] + if len(repeated_keys) > 0: + raise ValueError( + f"{repeated_keys} appeared in multiple dictionaries; behavior of functional call is ambiguous" + ) + parameters_and_buffers = { + k: v for d in parameter_and_buffer_dicts for k, v in d.items() + } + else: + raise ValueError( + f"Expected parameter_and_buffer_dicts to be a dict, or a list/tuple of dicts, " + f"but got {type(parameter_and_buffer_dicts)}" + ) + + return nn.utils.stateless._functional_call( + module, + parameters_and_buffers, + args, + kwargs, + tie_weights=tie_weights, + strict=strict, + ) + + +@exposed_in("torch.func") +def stack_module_state( + models: List[nn.Module], +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """stack_module_state(models) -> params, buffers + + Prepares a list of torch.nn.Modules for ensembling with :func:`vmap`. + + Given a list of ``M`` ``nn.Modules`` of the same class, returns two dictionaries + that stack all of their parameters and buffers together, indexed by name. + The stacked parameters are optimizable (i.e. they are new leaf nodes in the + autograd history that are unrelated to the original parameters and can be + passed directly to an optimizer). + + Here's an example of how to ensemble over a very simple model: + + .. code-block:: python + + num_models = 5 + batch_size = 64 + in_features, out_features = 3, 3 + models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)] + data = torch.randn(batch_size, 3) + + def wrapper(params, buffers, data): + return torch.func.functional_call(model[0], (params, buffers), data) + + params, buffers = stack_module_state(models) + output = vmap(wrapper, (0, 0, None))(params, buffers, data) + + assert output.shape == (num_models, batch_size, out_features) + + When there's submodules, this follows state dict naming conventions + + .. code-block:: python + + import torch.nn as nn + class Foo(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + hidden = 4 + self.l1 = nn.Linear(in_features, hidden) + self.l2 = nn.Linear(hidden, out_features) + + def forward(self, x): + return self.l2(self.l1(x)) + + num_models = 5 + in_features, out_features = 3, 3 + models = [Foo(in_features, out_features) for i in range(num_models)] + params, buffers = stack_module_state(models) + print(list(params.keys())) # "l1.weight", "l1.bias", "l2.weight", "l2.bias" + + .. warning:: + All of the modules being stacked together must be the same (except for + the values of their parameters/buffers). For example, they should be in the + same mode (training vs eval). + """ + if len(models) == 0: + raise RuntimeError("stack_module_state: Expected at least one model, got 0.") + if not (all(m.training for m in models) or all(not m.training for m in models)): + raise RuntimeError( + "stack_module_state: Expected all models to have the same training/eval mode." + ) + model0_typ = type(models[0]) + if not all(type(m) == model0_typ for m in models): + raise RuntimeError( + "stack_module_state: Expected all models to be of the same class." + ) + all_params = [dict(model.named_parameters()) for model in models] + params = { + k: construct_stacked_leaf(tuple(params[k] for params in all_params), k) + for k in all_params[0] + } + all_buffers = [dict(model.named_buffers()) for model in models] + buffers = { + k: construct_stacked_leaf(tuple(buffers[k] for buffers in all_buffers), k) + for k in all_buffers[0] + } + + return params, buffers + + +def construct_stacked_leaf( + tensors: Union[Tuple[Tensor, ...], List[Tensor]], name: str +) -> Tensor: + all_requires_grad = all(t.requires_grad for t in tensors) + none_requires_grad = all(not t.requires_grad for t in tensors) + if not all_requires_grad and not none_requires_grad: + raise RuntimeError( + f"Expected {name} from each model to have the same .requires_grad" + ) + result = torch.stack(tensors) + if all_requires_grad: + result = result.detach().requires_grad_() + return result diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py new file mode 100644 index 0000000000000000000000000000000000000000..33024b1ec8d8fb709d601c8ee50f4c7d28ea7fca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/fx_minifier.py @@ -0,0 +1,445 @@ +# mypy: ignore-errors + +import torch.fx as fx +import copy +import torch +import math +import sys +from typing import Callable, List +from functools import wraps, partial +from dataclasses import dataclass +from .compile_utils import get_placeholders, get_outputs +from torch.utils._content_store import ContentStoreWriter +from torch.hub import tqdm +from torch.multiprocessing.reductions import StorageWeakRef +import os + +is_tuple = object() + +@dataclass +class LoadTensorMeta: + size: List[int] + stride: List[int] + dtype: torch.dtype + device: torch.device + +class ConcreteProp(torch.fx.Interpreter): + def __init__(self, mod, *, writer=None, skip_offload=False): + super().__init__(mod) + self.writer = writer + self.skip_offload = skip_offload + self.seen_storages = set() + + def run_node(self, n): + self.pbar.update(1) + r = super().run_node(n) + name = n.name + + if isinstance(r, torch.Tensor): + if self.writer is None: + n.meta['concrete_value'] = r + else: + if StorageWeakRef(r.untyped_storage()) in self.seen_storages: + # Refuse to offload tensors which alias other live + # tensors, because this will violate operator contracts + n.meta['concrete_value'] = None + else: + if not self.skip_offload: + self.writer.write_tensor(os.path.join("eager", name), r) + n.meta['concrete_value'] = LoadTensorMeta( + r.size(), + r.stride(), + r.dtype, + r.device + ) + self.seen_storages.add(StorageWeakRef(r.untyped_storage())) + else: + n.meta['concrete_value'] = is_tuple + + return r + + def propagate(self, *args): + with tqdm( + desc="Saving intermediates for delta debugging", + total=len(self.module.graph.nodes), + disable=self.writer is None + ) as pbar: + self.pbar = pbar + r = super().run(*args) + if not self.skip_offload: + pbar.set_description("Saved! To skip next time, run with --skip-saving-eager-intermediates") + return r + +def is_load_tensor_node(node): + return node.op == 'call_function' and node.target is torch.ops.debugprims.load_tensor.default + + +# inplace modifies node/inps +def _convert_node_to_placeholder(graph, node, inps): + if node.op == 'output' or node.op == "placeholder": + return False + + if is_load_tensor_node(node): + return False + + concrete_val = node.meta.get('concrete_value', None) + + if isinstance(concrete_val, torch.Tensor): + node.op = 'placeholder' + node.target = node.name + node.args = () + node.kwargs = {} + + inps.append(concrete_val) + return True + + elif concrete_val is None: + return False + + elif concrete_val is is_tuple: + r = False + for tuple_user in list(node.users): + r = _convert_node_to_placeholder(graph, tuple_user, inps) or r + # NB: We must not erase the node at this point, because + # we are iterating over the nodes and this would change + # the iteration order + # graph.erase_node(node) + return r + + elif isinstance(concrete_val, LoadTensorMeta): + node.op = 'call_function' + node.target = torch.ops.debugprims.load_tensor.default + node.args = (os.path.join("eager", node.name), concrete_val.size, concrete_val.stride) + node.kwargs = { + 'device': concrete_val.device, + 'dtype': concrete_val.dtype, + } + return True + + return False + +def create_minified_hlo_graph(minified_fx_graph, inputs): + """ + Takes minified FX graph as primary input, and ports it to HLO via StableHLO + Provides minified HLO graph as output, and archive them to local directory + """ + hlo_dir = f"{os.getcwd()}/hlo_files" + os.makedirs(hlo_dir, exists_ok=True) + + from torch_xla.stablehlo import save_torch_model_as_stablehlo + save_torch_model_as_stablehlo(minified_fx_graph, inputs, hlo_dir) + +def dump_state(fx_g, inps): + print(f""" +# Working Repro with {len(fx_g.graph.nodes)} nodes +inps = {[(i.shape, i.dtype, i.device.type) for i in inps]} +inps = [torch.zeros(())] + [torch.ones(shape, dtype=dtype, device=device) for (shape, dtype, device) in inps] +{fx_g.code} +""") + +def is_power_of_two(n): + if n == 0: + return False + return (n & (n - 1)) == 0 + +@dataclass +class ReproState: + graph: fx.Graph + inps: List[torch.Tensor] + + def __post_init__(self): + ph_nodes = get_placeholders(self.graph) + assert len(ph_nodes) == len(self.inps) + +def minifier( + fail_f: fx.GraphModule, inps, module_fails, dump_state: Callable = dump_state, *, + save_dir=None, offload_to_disk=False, skip_offload=False, skip_sanity=False, + max_granularity=None +): + """ + Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails. + + Does 2 main strategies: + 1. Truncates suffix: Removes some suffix from the graph and sets a new output. + 2. Delta Debugging: Tries replacing half of the graph with inputs. If fails, + tries replacing quarter of the graph, etc. + + >>> # xdoctest: +SKIP(failing) + >>> failing_function = fx.symbolic_trace(f) + >>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps)) + + note: module_fails returns True if it fails. + """ + assert isinstance(inps, (tuple, list)) + + failing_graph = fail_f.graph + cur_size = len(failing_graph.nodes) + + if max_granularity is not None and not is_power_of_two(max_granularity): + raise RuntimeError(f"max_granularity {max_granularity} not power of two") + + num_queries = 0 + + def deepcopy_fx_graph(fx_graph): + return fx.GraphModule(fail_f, copy.deepcopy(fx_graph)).graph + + + def graph_fails(graph, inps): + nonlocal num_queries + graph = copy.deepcopy(graph) + num_queries += 1 + mod = fx.GraphModule(fail_f, graph) + mod.graph.lint() + return module_fails(mod, inps) + + writer = None + if offload_to_disk: + writer = ContentStoreWriter(save_dir) + + ConcreteProp(fail_f, writer=writer, skip_offload=skip_offload).propagate(*inps) + if not skip_sanity and not graph_fails(failing_graph, inps): + raise RuntimeError("Input graph did not fail the tester") + print(f"Started off with {cur_size} nodes", file=sys.stderr) + + def _register_strategy(strategy: Callable, name: str): + @wraps(strategy) + def new_func(old_state: ReproState, granularity=1): + print(file=sys.stderr) + print( + f"Strategy: {name} (G: {granularity}) " + f"({len(old_state.graph.nodes)} nodes, {len(old_state.inps)} inputs)", + file=sys.stderr + ) + new_state = strategy(deepcopy_fx_graph(old_state.graph), list(old_state.inps), granularity) + if new_state is not None: + new_nodes = len(new_state.graph.nodes) + old_nodes = len(old_state.graph.nodes) + new_inps = len(new_state.inps) + old_inps = len(old_state.inps) + new_outs = len(get_outputs(new_state.graph)) + old_outs = len(get_outputs(old_state.graph)) + progress_made = False + if new_nodes < old_nodes: + progress_made = True + print(f"SUCCESS: Went from {old_nodes} to {new_nodes} nodes", file=sys.stderr) + if new_inps > old_inps: + progress_made = True + print(f"SUCCESS: Went from {old_inps} to {new_inps} inputs", file=sys.stderr) + if new_outs < old_outs: + progress_made = True + print(f"SUCCESS: Went from {old_outs} to {new_outs} outputs", file=sys.stderr) + + if not progress_made: + raise RuntimeError("Success raised but no progress made?") + + if not graph_fails(new_state.graph, new_state.inps): + print("WARNING: Something went wrong, not applying this minification", file=sys.stderr) + return None + return new_state + else: + print(f"FAIL: {name}", file=sys.stderr) + return None + + return new_func + + def register_strategy(name: str): + return partial(_register_strategy, name=name) + + @register_strategy("Truncate suffix") + def remove_suffix(cur_graph, cur_inps, granularity): + tested = set() + new_graph = fx.Graph() + env = {} + for idx, node in enumerate(cur_graph.nodes): + new_node = new_graph.node_copy(node, lambda x: env[x]) + if node.op not in ['placeholder', 'output']: + # If idx is divisible by (granularity * 2), it would have been checked already. + if idx % granularity == 0 and (idx % (granularity * 2) != 0) and idx not in tested: + output_node = new_graph.output((new_node,)) + if len(new_graph.nodes) < len(cur_graph.nodes) and graph_fails(new_graph, cur_inps): + return ReproState(new_graph, cur_inps) + else: + tested.add(idx) + new_graph.erase_node(output_node) + env[node] = new_node + return None + + @register_strategy("Remove outputs") + def remove_outputs(cur_graph, cur_inps, granularity): + granularity = max(1, granularity // 2) + for idx, node in enumerate(cur_graph.nodes): + node.idx = idx + if node.op == 'output': + output = node + break + + if isinstance(output.args[0], fx.Node): + return None + + output_args = sorted(output.args[0], key=lambda x: x.idx if isinstance(x, fx.Node) else int(1e9)) + if len(output_args) == 1: + return None + + for idx in range(0, len(output_args), granularity): + output.args = (output_args[:idx] + output_args[idx + granularity:],) + if graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + + def remove_unused_inputs_unchecked(cur_state: ReproState): + cur_graph = cur_state.graph + cur_inps = cur_state.inps + ph_nodes = get_placeholders(cur_graph) + assert len(ph_nodes) == len(cur_inps) + + new_inps = [] + for idx in range(len(ph_nodes)): + if len(ph_nodes[idx].users) == 0: + cur_graph.erase_node(ph_nodes[idx]) + else: + new_inps.append(cur_inps[idx]) + if len(new_inps) < len(cur_inps): + return ReproState(cur_graph, new_inps) + return None + + def remove_unused_inputs_checked(cur_state: ReproState): + new_state = remove_unused_inputs_unchecked(cur_state) + if new_state is not None and graph_fails(new_state.graph, new_state.inps): + return new_state + return None + + def _remove_unused_wrapper(cur_graph, cur_inps, granularity): + return remove_unused_inputs_checked(ReproState(cur_graph, cur_inps)) + + remove_unused_inputs = register_strategy("Remove unused inputs")(_remove_unused_wrapper) + + @register_strategy("Eliminate dead code") + def eliminate_dead_code(cur_graph, cur_inps, granularity): + if cur_graph.eliminate_dead_code() and graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + + def _consolidate_placeholders(cur_graph, inps): + new_graph = fx.Graph() + env = {} + seen_non_placeholder = False + + # Move all placeholders to the front; also, if any load_tensor + # is at the front, convert it into an input (because it can be live + # all the time) + for node in cur_graph.nodes: + if node.op == 'placeholder': + new_node = new_graph.node_copy(node, lambda x: env[x]) + env[node] = new_node + elif not seen_non_placeholder and is_load_tensor_node(node): + new_node = new_graph.placeholder(node.name) + env[node] = new_node + inps.append(torch.ops.debugprims.load_tensor.default(*node.args, **node.kwargs)) + else: + seen_non_placeholder = True + + # Move everyone else + for node in cur_graph.nodes: + if node not in env: + new_node = new_graph.node_copy(node, lambda x: env[x]) + env[node] = new_node + return new_graph + + @register_strategy("Delta Debugging") + def delta_debugging(cur_graph: fx.Graph, cur_inps, granularity): + num_nodes = len(cur_graph.nodes) + for start_range in range(0, num_nodes, granularity): + is_removing = False + new_graph = deepcopy_fx_graph(cur_graph) + new_inps = cur_inps[:] + end_range = min(num_nodes, start_range + granularity) + for idx in range(start_range, end_range): + new_node = list(new_graph.nodes)[idx] + if _convert_node_to_placeholder(new_graph, new_node, new_inps): + is_removing = True + if not is_removing: + continue + new_graph.eliminate_dead_code() + new_graph = _consolidate_placeholders(new_graph, new_inps) + new_state = remove_unused_inputs_unchecked(ReproState(new_graph, new_inps)) + if new_state is None: + new_state = ReproState(new_graph, new_inps) + if graph_fails(new_state.graph, new_state.inps): + return ReproState(new_state.graph, new_state.inps) + + return None + + @register_strategy("Consolidate Inputs") + def consolidate_inputs(cur_graph, cur_inps, granularity): + old_len = len(cur_inps) + cur_graph = _consolidate_placeholders(cur_graph, cur_inps) + if len(cur_inps) > old_len and graph_fails(cur_graph, cur_inps): + return ReproState(cur_graph, cur_inps) + return None + + failing_state = ReproState(failing_graph, inps) + + def try_granularity(failing_state, granularity, use_non_granular): + print(f"Trying granularity {granularity}", file=sys.stderr) + + strategies = [] + num_nodes = len(failing_state.graph.nodes) + num_outputs = len(get_outputs(failing_state.graph)) + if num_outputs > num_nodes // 2: + strategies += [remove_outputs] + + if use_non_granular: + strategies += [eliminate_dead_code, remove_unused_inputs, consolidate_inputs] + + strategies += [remove_suffix, delta_debugging] + + for strategy in strategies: + new_state = strategy(failing_state, granularity) + if new_state is not None: + return new_state + return None + + while True: + dump_state(fx.GraphModule(fail_f, failing_state.graph), failing_state.inps) + granularity = int(2**(math.floor(math.log2(len(failing_state.graph.nodes))))) + if max_granularity is not None: + granularity = min(max_granularity, granularity) + new_state = try_granularity(failing_state, granularity, use_non_granular=True) + if new_state is not None: + failing_state = new_state + continue + + granularity //= 2 + has_progress = False + while granularity >= 1: + new_state = try_granularity(failing_state, granularity, use_non_granular=False) + if new_state is not None: + failing_state = new_state + has_progress = True + break + granularity //= 2 + if has_progress: + continue + + new_state = remove_outputs(failing_state, 1) + if new_state is not None: + failing_state = new_state + continue + + break + + if not graph_fails(failing_state.graph, failing_state.inps): + raise RuntimeError("Uh oh, something went wrong :( Final graph is not failing") + + print(f"Made {num_queries} queries", file=sys.stderr) + failing_fx = fx.GraphModule(fail_f, failing_state.graph) + + # If XLA debugging environment is enabled, create minified HLO graph as well + if "XLA_HLO_DEBUG" in os.environ: + create_minified_hlo_graph(failing_fx, failing_state.inps) + + dump_state(failing_fx, failing_state.inps) + print("Wrote minimal repro out to repro.py", file=sys.stderr) + return failing_fx, failing_state.inps diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/make_functional.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/make_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..711be174d82761ed6ade88afdccc3ad66290adf1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/make_functional.py @@ -0,0 +1,615 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import copy +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + NoReturn, + Sequence, + Tuple, + Type, + Union, +) + +import torch +import torch.nn as nn +from torch import Tensor +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + +# Utilities to make nn.Module "functional" +# In particular the goal is to be able to provide a function that takes as input +# the parameters and evaluate the nn.Module using fixed inputs. + + +def raise_parameter_tying_error() -> NoReturn: + raise RuntimeError( + "make_functional(module): we don't yet support models that " + "do parameter tying (also sometimes known as weight sharing). " + "Please try to rewrite your model by replacing all instances of the " + "tied parameter with another and/or comment your support in " + "https://github.com/pytorch/functorch/issues/446" + ) + + +def create_names_map( + named_params: Union[Dict[str, Tensor], Iterable[Tuple[str, Tensor]]], + tied_named_params: Union[Dict[str, Tensor], Iterable[Tuple[str, Tensor]]], +) -> Dict[str, List[str]]: + """ + named_params is a dictionary of tensors: {'A': A, 'B': B} + tied_named_params is another dictionary of tensors {'A': A, 'B': B, 'B_tied': B} + with potentially tied (or 'duplicated') tensors + + This function creates a mapping from the names in named_params to the + names in tied_named_params: {'A': ['A'], 'B': ['B', 'B_tied']}. + """ + named_params = dict(named_params) + tied_named_params = dict(tied_named_params) + + tensors_dict_keys = set(named_params.keys()) + tied_tensors_dict_keys = set(tied_named_params.keys()) + assert tensors_dict_keys.issubset(tied_tensors_dict_keys) + + tensor_to_mapping: Dict[Tensor, Tuple[str, List[str]]] = {} + for key, tensor in named_params.items(): + tensor_to_mapping[tensor] = (key, []) + for key, tensor in tied_named_params.items(): + assert tensor in tensor_to_mapping + tensor_to_mapping[tensor][1].append(key) + return dict(tensor_to_mapping.values()) + + +def _extract_members( + mod: nn.Module, + named_members: Callable[..., Iterable[Tuple[str, Tensor]]], + subclass: Callable[[Tensor], Tensor], +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + all_named_members = tuple(named_members(remove_duplicate=False)) + unique_named_members = tuple(named_members(remove_duplicate=True)) + names_map = create_names_map(unique_named_members, all_named_members) + + # Remove all the members in the model + memo = {} + accessor = NamedMemberAccessor(mod) + for name, p in all_named_members: + if p not in memo: + memo[p] = subclass(torch.empty_like(p, device="meta")) + replacement = memo[p] + accessor.set_tensor(name, replacement) + + if len(unique_named_members) == 0: + names, params = (), () + else: + names, params = zip(*unique_named_members) # type: ignore[assignment] + return params, names, names_map + + +def extract_weights( + mod: nn.Module, +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + """ + This function removes all the Parameters from the model and + return them as a tuple as well as their original attribute names. + The weights must be re-loaded with `load_weights` before the model + can be used again. + Note that this function modifies the model in place and after this + call, mod.parameters() will be empty. + """ + return _extract_members(mod, mod.named_parameters, nn.Parameter) + + +def extract_buffers( + mod: nn.Module, +) -> Tuple[Tuple[Tensor, ...], Tuple[str, ...], Dict[str, List[str]]]: + return _extract_members(mod, mod.named_buffers, lambda x: x) + + +def load_weights( + mod: nn.Module, + names: Sequence[str], + params: Sequence[Tensor], + as_params: bool = False, +) -> None: + """ + Reload a set of weights so that `mod` can be used again to perform a forward pass. + Note that the `params` are regular Tensors (that can have history) and so are left + as Tensors. This means that mod.parameters() will still be empty after this call. + """ + accessor = NamedMemberAccessor(mod) + if as_params: + params = [nn.Parameter(p) for p in params] + accessor.set_tensors(names, params) + + +def _swap_state( + mod: nn.Module, names_map: Dict[str, List[str]], elems: Iterable[Tensor] +) -> List[Tensor]: + result: List[Tensor] = [] + accessor = NamedMemberAccessor(mod) + for (_, attr_names), elem in zip(names_map.items(), elems): + for i, attr_name in enumerate(attr_names): + if i == 0: + result.append(accessor.swap_tensor(attr_name, elem)) + else: + accessor.set_tensor(attr_name, elem) + return result + + +def load_buffers( + mod: nn.Module, + names: Sequence[str], + buffers: Sequence[Tensor], + as_params: bool = False, +) -> None: + accessor = NamedMemberAccessor(mod) + accessor.set_tensors(names, buffers) + + +def load_state( + model: nn.Module, + weights: Sequence[Tensor], + weight_names: Sequence[str], + buffers: Sequence[Tensor] = (), + buffer_names: Sequence[str] = (), +) -> nn.Module: + """load_state(model, weights, weight_names, buffers=(), buffer_names=()) -> model + + load_state takes `weights` and `buffers` and assigns them to the model. + This is the inverse operation of `make_functional_deprecated_v1`. + """ + assert len(weight_names) == len(weights) + load_weights(model, weight_names, weights) + if len(buffers) > 0: + assert len(buffer_names) == len(buffers) + load_buffers(model, buffer_names, buffers) + return model + + +def make_functional_deprecated_v1(model: nn.Module): + """make_functional_deprecated_v1(model) -> weights, func, weight_names + + Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights) + and returns a functional version of the model, `func`. This makes + it so that it is possible use transforms over the parameters of + `model`. + + `func` can be invoked as follows: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, func, _ = make_functional_deprecated_v1(model) + func(weights, (x,)) + ``` + + And here is an example of applying the grad transform: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, _, func = make_functional_deprecated_v1(model) + grad_weights = grad(func)(weights, (x,)) + ``` + + To put the state back into a model, use `load_state`. + """ + buffers = list(model.buffers()) + if len(buffers) > 0: + raise RuntimeError( + "make_functional_deprecated_v1(model): `model` has buffers. Please use " + "make_functional_with_buffers_deprecated_v1(model) instead." + ) + weights, descriptors, _ = extract_weights(model) + + def fun(weights, data): + mutable_model = copy.deepcopy(model) + load_weights(mutable_model, descriptors, weights) + return mutable_model(*data) + + return weights, fun, descriptors + + +def make_functional_with_buffers_deprecated_v1(model: nn.Module): + """make_functional_with_buffers_deprecated_v1(model) -> weights, buffers, func, weight_names, buffer_names + + Given an nn.Module, make_functional_with_buffers_deprecated_v1 extracts the state (weights and buffers) + and returns a functional version of the model, `func`. + + `func` can be invoked as follows: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model) + func(weights, buffers, (x,)) + ``` + + And here is an example of applying the grad transform: + ``` + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model) + func(weights, buffers, (x,)) + grad_weights = grad(func)(weights, buffers, (x,)) + ``` + + To put the state back into a model, use `load_state`. + """ + weights, weight_descriptors, _ = extract_weights(model) + buffers, buf_descriptors, _ = extract_buffers(model) + + def fun(weights, buffers, data): + mutable_model = copy.deepcopy(model) + load_weights(mutable_model, weight_descriptors, weights) + load_buffers(mutable_model, buf_descriptors, buffers) + return mutable_model(*data) + + return weights, buffers, fun, weight_descriptors, buf_descriptors + + +class FunctionalModuleWithBuffers(nn.Module): + """ + This is the callable object returned by :func:`make_functional_with_buffers`. + """ + + def __init__( + self, + stateless_model: nn.Module, + param_names: Tuple[str, ...], + buffer_names: Tuple[str, ...], + param_names_map: Dict[str, List[str]], + buffer_names_map: Dict[str, List[str]], + ) -> None: + super().__init__() + self.stateless_model = stateless_model + self.param_names = param_names + self.buffer_names = buffer_names + + self.all_names_map = dict(param_names_map) + self.all_names_map.update(buffer_names_map) + + @staticmethod + def _create_from( + model: nn.Module, disable_autograd_tracking: bool = False + ) -> Tuple["FunctionalModuleWithBuffers", Tuple[Tensor, ...], Tuple[Tensor, ...]]: + # TODO: We don't need to copy the model to create a stateless copy + model_copy = copy.deepcopy(model) + params, param_names, param_names_map = extract_weights(model_copy) + buffers, buffer_names, buffer_names_map = extract_buffers(model_copy) + if disable_autograd_tracking: + for param in params: + param.requires_grad_(False) + return ( + FunctionalModuleWithBuffers( + model_copy, param_names, buffer_names, param_names_map, buffer_names_map + ), + params, + buffers, + ) + + def forward( + self, params: Iterable[Tensor], buffers: Iterable[Tensor], *args, **kwargs + ) -> Any: + # Temporarily load the state back onto self.stateless_model + old_state = _swap_state( + self.stateless_model, + self.all_names_map, + tuple(params) + tuple(buffers), + ) + try: + return self.stateless_model(*args, **kwargs) + finally: + # Remove the loaded state on self.stateless_model + _swap_state(self.stateless_model, self.all_names_map, old_state) + + +class FunctionalModule(nn.Module): + """ + This is the callable object returned by :func:`make_functional`. + """ + + def __init__( + self, + stateless_model: nn.Module, + param_names: Tuple[str, ...], + names_map: Dict[str, List[str]], + ) -> None: + super().__init__() + self.stateless_model = stateless_model + self.param_names = param_names + self.names_map = names_map + + @staticmethod + def _create_from( + model: nn.Module, disable_autograd_tracking: bool = False + ) -> Tuple["FunctionalModule", Tuple[Tensor, ...]]: + # TODO: We don't need to copy the model to create a stateless copy + model_copy = copy.deepcopy(model) + params, param_names, names_map = extract_weights(model_copy) + if disable_autograd_tracking: + for param in params: + param.requires_grad_(False) + return FunctionalModule(model_copy, param_names, names_map), params + + def forward(self, params: Iterable[Tensor], *args, **kwargs) -> Any: + # Temporarily load the state back onto self.stateless_model + old_state = _swap_state(self.stateless_model, self.names_map, params) + try: + return self.stateless_model(*args, **kwargs) + finally: + # Remove the loaded state on self.stateless_model + _swap_state(self.stateless_model, self.names_map, old_state) + + +def make_functional( + model: nn.Module, disable_autograd_tracking: bool = False +) -> Tuple[FunctionalModule, Tuple[Tensor, ...]]: + """make_functional(model, disable_autograd_tracking=False) -> func, params + + Given a ``torch.nn.Module``, :func:`make_functional` extracts the state + (params) and returns a functional version of the model, ``func``. This + makes it so that it is possible use transforms over the parameters of + ``model``. + + ``func`` can be invoked as follows: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional + + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params = make_functional(model) + func(params, x) + + And here is an example of applying the grad transform over the parameters + of a model. + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params = make_functional(model) + + def compute_loss(params, x, t): + y = func(params, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(params, x, t) + + If the model has any buffers, please use :func:`make_functional_with_buffers` instead. + + Args: + model (torch.nn.Module): Input model. + disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters. + The returned params are unrelated to the set of params from the original model. If False (default), + the params will have ``requires_grad=True`` on them (aka they will be trackable with regular + PyTorch autograd), matching the requires_grad-ness of the params from the original model. + Otherwise, the returned params will have ``requires_grad=False``. Default, False. + If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or + ``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``. + Otherwise, if you're only planning on using functorch's gradient transforms, + then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking + history with PyTorch autograd. + + """ + buffers = list(model.buffers()) + if len(buffers) > 0: + raise RuntimeError( + "make_functional(model): `model` has buffers. Please use " + "make_functional_with_buffers(model) instead." + ) + return FunctionalModule._create_from( + model, disable_autograd_tracking=disable_autograd_tracking + ) + + +def make_functional_with_buffers( + model: nn.Module, disable_autograd_tracking: bool = False +) -> Tuple[FunctionalModuleWithBuffers, Tuple[Tensor, ...], Tuple[Tensor, ...]]: + """make_functional_with_buffers(model, disable_autograd_tracking=False) -> func, params, buffers + + Given a ``torch.nn.Module``, make_functional_with_buffers extracts the + state (params and buffers) and returns a functional version of the model + ``func`` that can be invoked like a function. + + ``func`` can be invoked as follows: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional_with_buffers + + x = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params, buffers = make_functional_with_buffers(model) + func(params, buffers, x) + + And here is an example of applying the grad transform over the parameters + of a model: + + .. code-block:: python + + import torch + import torch.nn as nn + from functorch import make_functional_with_buffers, grad + + x = torch.randn(4, 3) + t = torch.randn(4, 3) + model = nn.Linear(3, 3) + func, params, buffers = make_functional_with_buffers(model) + + def compute_loss(params, buffers, x, t): + y = func(params, buffers, x) + return nn.functional.mse_loss(y, t) + + grad_weights = grad(compute_loss)(params, buffers, x, t) + + Args: + model (torch.nn.Module): Input model. + disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters. + The returned params are unrelated to the set of params from the original model. If False (default), + the params will have ``requires_grad=True`` on them (aka they will be trackable with regular + PyTorch autograd), matching the requires_grad-ness of the params from the original model. + Otherwise, the returned params will have ``requires_grad=False``. Default, False. + If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or + ``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``. + Otherwise, if you're only planning on using functorch's gradient transforms, + then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking + history with PyTorch autograd. + + """ + return FunctionalModuleWithBuffers._create_from( + model, disable_autograd_tracking=disable_autograd_tracking + ) + + +def transpose_stack( + tuple_of_tuple_of_tensors: Tuple[Tuple[Tensor, ...], ...] +) -> Tuple[Tensor, ...]: + tuple_of_tuple_of_tensors = tuple(zip(*tuple_of_tuple_of_tensors)) + results = tuple( + torch.stack(shards).detach() for shards in tuple_of_tuple_of_tensors + ) + return results + + +def combine_state_for_ensemble( + models: Sequence[nn.Module], +) -> Tuple[FunctionalModuleWithBuffers, Tuple[Tensor, ...], Tuple[Tensor, ...]]: + """combine_state_for_ensemble(models) -> func, params, buffers + + Prepares a list of torch.nn.Modules for ensembling with :func:`vmap`. + + Given a list of ``M`` ``nn.Modules`` of the same class, stacks all of their + parameters and buffers together to make ``params`` and ``buffers``. + Each parameter and buffer in the result will have an additional dimension + of size ``M``. + + :func:`combine_state_for_ensemble` also returns ``func``, a functional + version of one of the models in :attr:`models`. One cannot directly run + ``func(params, buffers, *args, **kwargs)`` directly, you probably want to + use ``vmap(func, ...)(params, buffers, *args, **kwargs)`` + + Here's an example of how to ensemble over a very simple model: + + .. code-block:: python + + num_models = 5 + batch_size = 64 + in_features, out_features = 3, 3 + models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)] + data = torch.randn(batch_size, 3) + + fmodel, params, buffers = combine_state_for_ensemble(models) + output = vmap(fmodel, (0, 0, None))(params, buffers, data) + + assert output.shape == (num_models, batch_size, out_features) + + .. warning:: + All of the modules being stacked together must be the same (except for + the values of their parameters/buffers). For example, they should be in the + same mode (training vs eval). + + This API is subject to change -- we're investigating better ways to + create ensembles and would love your feedback how to improve this. + """ + if len(models) == 0: + raise RuntimeError( + "combine_state_for_ensemble: Expected at least one model, got 0." + ) + if not (all(m.training for m in models) or all(not m.training for m in models)): + raise RuntimeError( + "combine_state_for_ensemble: Expected all models to " + "have the same training/eval mode." + ) + model0_typ = type(models[0]) + if not all(type(m) == model0_typ for m in models): + raise RuntimeError( + "combine_state_for_ensemble: Expected all models to be of the same class." + ) + funcs, params, buffers = zip( + *[make_functional_with_buffers(model) for model in models] + ) + params = transpose_stack(params) + buffers = transpose_stack(buffers) + return funcs[0], params, buffers + + +def functional_init( + model_class: Type[nn.Module], + ensemble_shape: Union[Tuple[()], Tuple[int]] = (), + device: torch.types.Device = "cpu", +): + def wrapped(*args, **kwargs): + if len(ensemble_shape) >= 2: + raise ValueError("NYI: ensemble_shape with more than 1 element") + if len(ensemble_shape) == 0: + model = model_class(*args, **kwargs).to(device) + return make_functional_deprecated_v1(model) + num_models = ensemble_shape[0] # type: ignore[misc] + if num_models <= 0: + raise ValueError(f"num_models {num_models} should be > 0") + # NB: Not very efficient, more of a POC + models = tuple( + model_class(*args, **kwargs).to(device) for _ in range(num_models) + ) + _, fn, names = make_functional_deprecated_v1(model_class(*args, **kwargs)) + weights = tuple(make_functional_deprecated_v1(model)[0] for model in models) + weights = tuple(zip(*weights)) + weights = tuple(torch.stack(shards).detach() for shards in weights) + return weights, fn, names + + return wrapped + + +def functional_init_with_buffers( + model_class: Type[nn.Module], + ensemble_shape: Union[Tuple[()], Tuple[int]] = (), + device: torch.types.Device = "cpu", +): + def wrapped(*args, **kwargs): + if len(ensemble_shape) >= 2: + raise ValueError("NYI: ensemble_shape with more than 1 element") + if len(ensemble_shape) == 0: + model = model_class(*args, **kwargs).to(device) + return make_functional_deprecated_v1(model) + num_models = ensemble_shape[0] # type: ignore[misc] + if num_models <= 0: + raise ValueError(f"num_models {num_models} should be > 0") + # NB: Not very efficient, more of a POC + models = tuple( + model_class(*args, **kwargs).to(device) for _ in range(num_models) + ) + ( + _, + _, + fn, + weight_names, + buffer_names, + ) = make_functional_with_buffers_deprecated_v1(model_class(*args, **kwargs)) + weights, buffers = zip( + *tuple( + make_functional_with_buffers_deprecated_v1(model)[:2] + for model in models + ) + ) + weights = tuple(zip(*weights)) + weights = tuple(torch.stack(shards).detach() for shards in weights) + buffers = tuple(zip(*buffers)) + buffers = tuple(torch.stack(shards).detach() for shards in buffers) + return weights, buffers, fn, weight_names, buffer_names + + return wrapped diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/partitioners.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/partitioners.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8f86055da1d8420130e9caeb2ab59c27d2a385 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/partitioners.py @@ -0,0 +1,981 @@ +# mypy: ignore-errors + +from torch.fx.experimental.proxy_tensor import is_sym_node, py_sym_types +from torch.fx.experimental.sym_node import magic_methods, method_to_operator +from torch.fx.experimental.symbolic_shapes import ( + hint_int, free_symbols, is_symbol_binding_fx_node, find_symbol_binding_fx_nodes +) +from torch.fx.experimental._backward_state import BackwardState +import torch +import torch.fx as fx +import operator +import math +import torch.utils._pytree as pytree +import copy +import os +import itertools +import sympy +from collections import defaultdict +from torch.fx.passes import graph_drawer +from typing import List, Optional, Set, Tuple, Union +from .compile_utils import fx_graph_cse, get_aten_target +from . import config +import functools + + +AOT_PARTITIONER_DEBUG = config.debug_partitioner + + +def must_recompute(node): + return node.meta.get("recompute", False) + +def has_recomputable_ops(fx_g): + found = False + for node in fx_g.graph.nodes: + if must_recompute(node): + return True + return False + +def has_recomputable_rng_ops(fx_g): + for node in fx_g.graph.nodes: + if must_recompute(node) and hasattr(node.target, "tags") and torch.Tag.nondeterministic_seeded in node.target.tags: + return True + return False + +def sym_node_size(node): + if isinstance(node.meta["val"], (torch.SymInt, torch.SymBool)): + return 1 + assert isinstance(node.meta["val"], torch.SymFloat) + return 4 + +class InvalidNodeBase: + def __repr__(self): + return "Invalid Node" + + +InvalidNode = InvalidNodeBase() + + +def _extract_graph_with_inputs_outputs(joint_graph, inputs, outputs): + """ + Given a graph, extracts out a subgraph that takes the specified nodes as + inputs and returns the specified outputs. + + This includes specifying non-placeholder nodes as inputs. + + The general strategy is to initialize all inputs with proxies as we + encounter them, and trace through the graph, only keeping values which take + in valid proxies. Then, all dead code is eliminated. + """ + new_graph = fx.Graph() + env = {} + + # Add new placeholder nodes in the order specified by the inputs + for node in inputs: + new_node = new_graph.placeholder(node.name) + # Can't use node_copy here as we may be turning previous call_function into placeholders + new_node.meta = node.meta + env[node] = new_node + + for node in joint_graph.nodes: + if node in inputs: + continue + elif node.op == 'placeholder': + env[node] = InvalidNode + elif node.op == 'call_function': + all_args = pytree.arg_tree_leaves(*node.args, **node.kwargs) + all_args = [isinstance(env[x], InvalidNodeBase) for x in all_args if isinstance(x, fx.Node)] + if any(all_args): + env[node] = InvalidNode + continue + env[node] = new_graph.node_copy(node, lambda x: env[x]) + elif node.op == 'get_attr': + env[node] = new_graph.node_copy(node, lambda x: env[x]) + elif node.op == 'output': + pass + output_values = [] + for x in outputs: + if isinstance(x, fx.Node): + if x not in env: + raise RuntimeError(f"Node {x} couldn't be found in env") + assert not isinstance(env[x], InvalidNodeBase), f"Node {x} was invalid, but is output" + output_values.append(env[x]) + else: + output_values.append(x) + new_graph.output(output_values) + + new_graph.eliminate_dead_code() + new_graph.lint() + return new_graph + + +def _is_primal(node): + return ( + node.op == "placeholder" + and "tangents" not in node.target + and not _is_bwd_seed_offset(node) + and not _is_fwd_seed_offset(node) + ) + +def _is_tangent(node): + return node.op == "placeholder" and "tangents" in node.target + +def _is_bwd_seed_offset(node): + return node.op == "placeholder" and ("bwd_seed" in node.target or "bwd_base_offset" in node.target) + +def _is_fwd_seed_offset(node): + return node.op == "placeholder" and ("fwd_seed" in node.target or "fwd_base_offset" in node.target) + +def _is_backward_state(node): + return node.op == "placeholder" and isinstance(node.meta.get("val"), BackwardState) + + +def _extract_fwd_bwd_outputs(joint_module: fx.GraphModule, *, num_fwd_outputs): + outputs = pytree.arg_tree_leaves(*(node.args for node in joint_module.graph.nodes if node.op == 'output')) + fwd_outputs = outputs[:num_fwd_outputs] + bwd_outputs = outputs[num_fwd_outputs:] + return fwd_outputs, bwd_outputs + + +def _remove_by_name(saved_values, name): + for saved_value in saved_values: + if saved_value.name == name: + saved_values.remove(saved_value) + break + +def _placeholders(nodes): + # Avoid making an entire pass over the graph if we only care about the input placeholders + result = [] + for node in nodes: + if node.op == 'placeholder': + result.append(node) + else: + break # placeholders are all at the start of graph + return result + + +def _extract_fwd_bwd_modules(joint_module: fx.GraphModule, saved_values, saved_sym_nodes, *, num_fwd_outputs): + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + placeholders = _placeholders(joint_module.graph.nodes) + primal_inputs = [*filter(_is_primal, placeholders)] + tangent_inputs = [*filter(_is_tangent, placeholders)] + fwd_seed_offset_inputs = [*filter(_is_fwd_seed_offset, placeholders)] + bwd_seed_offset_inputs = [*filter(_is_bwd_seed_offset, placeholders)] + backward_state_inputs = [*filter(_is_backward_state, placeholders)] + + bwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + saved_sym_nodes + saved_values + tangent_inputs + bwd_seed_offset_inputs, + bwd_outputs + ) + + for node in _placeholders(bwd_graph.nodes): + assert node.op == 'placeholder' + # This is to filter out saved values that don't actually end up being used by the backwards pass + if not node.users: + _remove_by_name(saved_values, node.name) + _remove_by_name(saved_sym_nodes, node.name) + elif _is_backward_state(node): + # BackwardState is saved directly + _remove_by_name(saved_values, node.name) + assert backward_state_inputs + + + # Now that we have the finalized list of saved values, we need to ensure + # we propagate all symbols which are referenced by backwards inputs. + # These are not directly used in the graph but are required for downstream + # sizevar assignment + saved_symbols: Set[sympy.Symbol] = set() + saved_sym_nodes_binding = [] + saved_sym_nodes_derived = [] + + # Some symbols may already be bound in the directly saved_sym_nodes, + # keep track of them so we don't re-bind them + for node in saved_sym_nodes: + symbol = is_symbol_binding_fx_node(node) + if symbol: + saved_symbols.add(symbol) + saved_sym_nodes_binding.append(node) + else: + saved_sym_nodes_derived.append(node) + + # Now go through all of the prospective backward inputs and track any + # other symbols we need to bind + symbol_bindings = find_symbol_binding_fx_nodes(joint_module.graph) + for node in itertools.chain(saved_sym_nodes_derived, saved_values, tangent_inputs): + if "val" not in node.meta: + continue + new_symbols = free_symbols(node.meta["val"]) - saved_symbols + # NB: Deterministic order please! + for s in sorted(new_symbols, key=lambda s: s.name): + # NB: For well formed graphs, the symbol should always be present, + # but we also have ways to produce ill-formed graphs, e.g., direct + # make_fx usages, so don't choke in this case + if s not in symbol_bindings: + continue + saved_sym_nodes_binding.append(symbol_bindings[s]) + saved_symbols |= new_symbols + + + # Update saved_sym_nodes that are now reordered to have all bindings at + # front. This can also be used later on to figure out the position of saved + # sym nodes in the output of fwd graph. + saved_sym_nodes.clear() + saved_sym_nodes.extend(saved_sym_nodes_binding + saved_sym_nodes_derived) + + # Now, we re-generate the fwd/bwd graphs. + # NB: This might increase compilation time, but I doubt it matters + fwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + primal_inputs + fwd_seed_offset_inputs, + fwd_outputs + saved_values + saved_sym_nodes + ) + bwd_graph = _extract_graph_with_inputs_outputs( + joint_module.graph, + saved_sym_nodes + saved_values + tangent_inputs + bwd_seed_offset_inputs + backward_state_inputs, + bwd_outputs + ) + + fwd_module = fx._lazy_graph_module._make_graph_module(joint_module, fwd_graph) + bwd_module = fx._lazy_graph_module._make_graph_module(joint_module, bwd_graph) + return fwd_module, bwd_module + + +def default_partition( + joint_module: fx.GraphModule, _joint_inputs, *, num_fwd_outputs +) -> Tuple[fx.GraphModule, fx.GraphModule]: + """ + Partitions the :attr:`joint_module` in a manner that closely resembles the + behavior observed in the original ``.forward()`` and ``.backward()`` of the + callable, i.e., the resulting forward graph contains those operators that + are executed in the original ``.forward()`` callable passed to + :func:`aot_function`. + + The default partitioner collects the operators that are between the forward + inputs and the forward outputs. This helps in finding the tensors which have + to be stashed for the backward pass. These stashed tensors become the output + of the generated forward graph. The remaining operators are then placed in + the backward graph. + + .. warning:: + This API is experimental and likely to change. + + Args: + joint_module(fx.GraphModule): The joint forward and backward graph. This + is the result of AOT Autograd tracing. + + Returns: + Returns the generated forward and backward Fx graph modules. + """ + if has_recomputable_ops(joint_module): + return min_cut_rematerialization_partition(joint_module, _joint_inputs, num_fwd_outputs=num_fwd_outputs) + primal_inputs = list(filter(_is_primal, joint_module.graph.nodes)) + fwd_seed_offset_inputs = list(filter(_is_fwd_seed_offset, joint_module.graph.nodes)) + inputs = primal_inputs + fwd_seed_offset_inputs + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, inputs, fwd_outputs) + forward_node_names = {node.name for node in forward_only_graph.nodes if node.op != 'output'} + saved_values = [] + saved_sym_nodes = [] + + for node in joint_module.graph.nodes: + if node.name not in forward_node_names: + continue + if is_sym_node(node): + # Symints must be kept separate from tensors so that PythonFunction only calls + # save_for_backward on tensors and stashes symints in autograd .ctx + saved_sym_nodes.append(node) + elif ( + 'tensor_meta' not in node.meta + and node.op == 'call_function' + ): + # Since we can't save tuple of tensor values, we need to flatten out what we're saving + users = node.users + assert all(user.target == operator.getitem for user in users) + saved_values.extend(users) + else: + backward_usages = [n for n in node.users if n.name not in forward_node_names] + if 'tensor_meta' in node.meta and all(is_sym_node(n) for n in backward_usages): + # If we have a tensor in the forward, where only its sizes/strides are needed in the backward, + # and not the actual tensor data, + # then it will be a lot cheaper to save only the sizes/strides, and not the actual tensor. + # + # Note that saving the tensor could also cause compilation problems: + # If the user mutated an input in the forward and uses its sizes/strides in the backward, + # then we would be obligated to clone the input before saving it to appease autograd. + # (This is how we originally found this bug). + saved_sym_nodes.extend(backward_usages) + else: + saved_values.append(node) + saved_values = list(dict.fromkeys(saved_values).keys()) + saved_sym_nodes = list(dict.fromkeys(saved_sym_nodes).keys()) + + return _extract_fwd_bwd_modules(joint_module, saved_values, saved_sym_nodes=saved_sym_nodes, num_fwd_outputs=num_fwd_outputs) + + +def _prod(x): + s = 1 + for i in x: + s *= i + return s + +def _tensor_nbytes(numel, dtype): + return numel * dtype.itemsize + +def _size_of(node: fx.Node) -> int: + if 'val' in node.meta: + val = node.meta['val'] + if isinstance(val, py_sym_types): + if isinstance(val, torch.SymInt): + return 1 + else: + return 999999 + # NB: The fallback values here are meaningless, maybe we should respect + # torch._inductor.config.unbacked_symint_fallback (but this is a + # layering violation) + elif isinstance(val, (list, tuple)): + return sum(_tensor_nbytes(hint_int(n.numel(), fallback=4098), n.dtype) for n in val if isinstance(n, torch.Tensor)) + elif isinstance(val, torch.Tensor): + return _tensor_nbytes(hint_int(val.numel(), fallback=4098), val.dtype) + + raise RuntimeError(f"Unknown metadata type {type(val)}") + + # Only needed since we don't always trace with fake tensors. + if 'tensor_meta' in node.meta: + metadata = node.meta['tensor_meta'] + # TODO: What is to_size_hint suppose to be? + numel = _prod(map(to_size_hint, metadata.shape)) # noqa: F821 + dtype = metadata.dtype + else: + return 0 + + return _tensor_nbytes(numel, dtype) + + +# Used for some investigative purposes +def _count_ops(graph): + from collections import defaultdict + cnt = defaultdict(int) + for node in graph.nodes: + if node.op == 'call_function': + cnt[node.target.__name__] += 1 + print(sorted(cnt.items(), key=lambda x: x[1], reverse=True)) + + +@functools.lru_cache(None) +def pointwise_ops(): + ops = [] + for attr_name in dir(torch.ops.aten): + opoverloadpacket = getattr(torch.ops.aten, attr_name) + if not isinstance(opoverloadpacket, torch._ops.OpOverloadPacket): + continue + + for overload in opoverloadpacket.overloads(): + op_overload = getattr(opoverloadpacket, overload) + if torch.Tag.pointwise in op_overload.tags: + # currently aot autograd uses packet not overload + ops.append(opoverloadpacket) + break + + return ops + +def get_depth(node, depth_map): + if node in depth_map: + return depth_map[node] + + # Base case + if node.op == "placeholder": + depth_map[node] = 0 + return depth_map[node] + + # Handle output node + if node.op == "output": + args = node.args[0] + for arg in args: + if isinstance(arg, torch.fx.node.Node): + get_depth(arg, depth_map) + return + + # Get the depth of args and set the depth of this node + arg_depths = [get_depth(arg, depth_map) for arg in node.all_input_nodes if isinstance(arg, torch.fx.node.Node)] + # factory ops like full, rand might not have any input args + if len(arg_depths) == 0: + arg_depths = [0] + depth_map[node] = max(arg_depths) + 1 + return depth_map[node] + + +def sort_depths(args, depth_map): + arg_depths = {arg: depth_map[arg] for arg in args if isinstance(arg, torch.fx.node.Node)} + return sorted(arg_depths.items(), key=lambda x: x[1], reverse=True) + + +def reordering_to_mimic_autograd_engine(gm): + """ + This pass finds the first bwd node in the graph (by looking at users of + tangents) and then reorders the graph by walking from this node to all the + way to the end of the graph. At each op in this traveral, we insert this op + in a new graph and try to bring only the relevant subgraph from the other + non-bwd edges relevant for this op. This closely mimics the behavior of + autograd engine. + + Why is this pass required in the first place? + + This is an artifact of how partitioners work today. The starting point of + partitioner is a joint graph, which is fwd and then bwd graph. In the case + of checkpointing, we keep portions of fwd graph in their original place in + the joint graph, while obtaining a bwd graph. As a result, the resulting bwd + graph has copies of recomputed fwd subgraphs followed by the original bwd + graph. If we run this naively, this leads to bad memory footprint, because + the fwd subgraphs are live for way longer duration than necessary. This pass + reorders the operations such that we prioritize the ops for the original bwd + graph while only realizing those ops from the fwd graph that are necessary + at any given point in the graph. + """ + + new_graph = fx.Graph() + env = {} + + # Add new placeholder nodes in the order specified by the inputs + for node in gm.graph.nodes: + if node.op == "placeholder": + new_node = new_graph.placeholder(node.name) + # Can't use node_copy here as we may be turning previous call_function into placeholders + new_node.meta = node.meta + env[node] = new_node + + + order = {} + for idx, node in enumerate(gm.graph.nodes): + order[node] = idx + + # Populate depth for the nodes. Depth is the distance from the inputs. + depths = {} + output_node = next(node for node in gm.graph.nodes if node.op == "output") + get_depth(output_node, depths) + + def insert_node_in_graph(node): + if node in env: + return env[node] + + # Bias traversal towards the nodes that have higher depth - prioritizes + # critical path first. + for arg, _ in sort_depths(node.all_input_nodes, depths): + env[arg] = insert_node_in_graph(arg) + env[node] = new_graph.node_copy(node, lambda x: env[x]) + return env[node] + + # Find first bwd node in the graph + tangent_inputs = list(filter(_is_tangent, gm.graph.nodes)) + first_node_in_bwd = None + minimum_order = math.inf + for tangent in tangent_inputs: + for user in tangent.users: + if order[user] < minimum_order: + minimum_order = order[user] + first_node_in_bwd = user + assert first_node_in_bwd is not None + + # Build the graph op-by-op by starting from the node all the way to the end + for node in list(gm.graph.nodes)[order[first_node_in_bwd]:]: + insert_node_in_graph(node) + + # The output node is already built by the traversal. + new_gm = torch.fx.GraphModule(gm, new_graph) + return new_gm + + +def functionalize_rng_ops(joint_module, fw_module, bw_module, num_sym_nodes): + # During user-driven activation checkpointing, we have to ensure that a rng + # op in fwd yields the same output as the recomputed rng op in the bwd. To + # do this, we use functionalize wrappers to wrap the random ops and share + # rng state between the fwd and bwd graphs. + + # There are 3 main steps to do this + # Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd. + # Step 2 - Modify the fwd pass such that + # 1) Replace rand with run_and_save_rng_state wrapper + # 2) Replace the users of the original op with the output[1] of this op. + # 3) Collect all the rng_state - output[0] of each op, and make them + # output nodes. Special care needs to be taken here because fwd outputs + # has symints at the very end. + # Step 3 - Modify the bwd pass such that + # 1) Add the input nodes just before the tangents for the stashed rng states + # 2) Replace rand with run_with_save_rng_state wrappers + # 3) Use the stashed states as inputs to these ops + + # Unique id to generate name + uid = itertools.count() + + def get_rng_ops(gmod): + random_nodes = {} + for node in gmod.graph.nodes: + if ( + node.op == "call_function" + and hasattr(node.target, "tags") + and torch.Tag.nondeterministic_seeded in node.target.tags + ): + random_nodes[node.name] = node + return random_nodes + + def get_device(node): + """ + Check the example value of the node outputs to find the device type. + """ + if "val" not in node.meta: + return None + + candidates = node.meta["val"] + if not isinstance(candidates, tuple): + candidates = (candidates,) + + for candidate in candidates: + if isinstance(candidate, torch.Tensor): + if candidate.device.type == "cuda": + return "cuda" + + return "cpu" + + def get_sample_rng_state(device): + if device == "cuda": + return torch.cuda.get_rng_state() + return torch.get_rng_state() + + # Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd. + joint_graph_rng_ops = get_rng_ops(joint_module) + fw_graph_rng_ops = get_rng_ops(fw_module) + bw_graph_rng_ops = get_rng_ops(bw_module) + recomputable_rng_ops_map = dict() + for node in joint_module.graph.nodes: + if ( + must_recompute(node) + and hasattr(node.target, "tags") + and torch.Tag.nondeterministic_seeded in node.target.tags + ): + base_node = joint_graph_rng_ops[node.name] + fw_node = fw_graph_rng_ops[node.name] + bw_node = bw_graph_rng_ops[node.name] + recomputable_rng_ops_map[base_node] = {"fwd": fw_node, "bwd": bw_node} + + run_and_save_rng = torch._prims.rng_prims.run_and_save_rng_state + run_with_rng_state = torch._prims.rng_prims.run_with_rng_state + + for node in bw_module.graph.nodes: + if node.op == "placeholder" and "tangent" in node.name: + bw_tangent_start_node = node + break + + + fw_rng_state_outputs = [] + for base_node, node_pair in recomputable_rng_ops_map.items(): + # Step 2 - Modify the fwd pass such that + fw_node = node_pair["fwd"] + bw_node = node_pair["bwd"] + fw_graph = fw_module.graph + with fw_graph.inserting_before(fw_node): + functional_fw_node = fw_graph.create_node( + "call_function", + run_and_save_rng, + args=(fw_node.target, *fw_node.args), + kwargs=fw_node.kwargs + ) + state = fw_graph.create_node("call_function", operator.getitem, args=(functional_fw_node, 0), kwargs={}) + rng_output = fw_graph.create_node("call_function", operator.getitem, args=(functional_fw_node, 1,), kwargs={}) + fw_node.replace_all_uses_with(rng_output) + fw_graph.erase_node(fw_node) + fw_rng_state_outputs.append(state) + + + # Step 3 - Modify the bwd pass such that + bw_graph = bw_module.graph + with bw_graph.inserting_before(bw_tangent_start_node): + state_name = f"rng_state_output_{next(uid)}" + bw_rng_state_node = bw_graph.placeholder(state_name) + bw_rng_state_node.meta["val"] = get_sample_rng_state(get_device(fw_node)) + + with bw_graph.inserting_before(bw_node): + rng_output = bw_graph.create_node( + "call_function", + run_with_rng_state, + args=(bw_rng_state_node, bw_node.target, *bw_node.args), + kwargs=bw_node.kwargs + ) + + bw_node.replace_all_uses_with(rng_output) + bw_graph.erase_node(bw_node) + + + # Add the rng states in the output of the fwd graph. AOT Autograd assumes + # that symints are at the end of forward graph outputs. So, insert the new + # rng states accordingly. + fw_output_node = next(node for node in fw_module.graph.nodes if node.op == "output") + fw_outputs = fw_output_node.args[0] + sym_node_start_idx = len(fw_outputs) - num_sym_nodes + outputs = fw_outputs[:sym_node_start_idx] + fw_rng_state_outputs + fw_outputs[sym_node_start_idx:] + fw_module.graph.output(outputs) + fw_module.graph.erase_node(fw_output_node) + fw_module.recompile() + bw_module.recompile() + return fw_module, bw_module + + +def cleanup_recompute_tags(joint_module): + """ + If there are two consecutive checkpointed blocks with no operator in + between, we would still want to stash the tensor at the boundary of + checkpointed blocks. The following pass makes the last output node + non-recomputable to allow for that. + """ + for node in joint_module.graph.nodes: + if must_recompute(node): + for user in node.users: + if must_recompute(user) and user.meta["recompute"] > node.meta["recompute"]: + node.meta["recompute"] = 0 + return joint_module + + +def min_cut_rematerialization_partition( + joint_module: fx.GraphModule, _joint_inputs, compiler="inductor", recomputable_ops=None, + *, num_fwd_outputs +) -> Tuple[fx.GraphModule, fx.GraphModule]: + """ + Partitions the joint graph such that the backward recomputes the forward. + Recomputing helps in trading off memory bandwidth with computation. + + To create the fwd and bwd graph, we copy the joint graph, manually set the + outputs to just original forward or backward outputs. And then we run the + resulting graphs through dead code elimination. + + .. warning:: + This API is experimental and likely to change. + + Args: + joint_module(fx.GraphModule): The joint forward and backward graph. This + is the result of AOT Autograd tracing. + _joint_inputs: The inputs to the joint graph. This is unused. + compiler: This option determines the default set of recomputable ops. + Currently, there are two options: ``nvfuser`` and ``inductor``. + recomputable_ops: This is an optional set of recomputable ops. If this + is not None, then this set of ops will be used instead of the + default set of ops. + num_fwd_outputs: The number of outputs from the forward graph. + + Returns: + Returns the generated forward and backward Fx graph modules. + """ + try: + import networkx as nx + except ImportError as e: + raise RuntimeError("Need networkx installed to perform smart recomputation " + "heuristics") from e + + joint_module.graph.eliminate_dead_code() + joint_module.recompile() + + fx_g = joint_module.graph + + # add the CSE pass + if config.cse: + cse_graph = fx_graph_cse(fx_g) + joint_module.graph = cse_graph + full_bw_graph = joint_module.graph + + graph_has_recomputable_ops = has_recomputable_ops(joint_module) + graph_has_recomputable_rng_ops = has_recomputable_rng_ops(joint_module) + if graph_has_recomputable_ops: + joint_module = cleanup_recompute_tags(joint_module) + + name_to_node = {} + for node in joint_module.graph.nodes: + name_to_node[node.name] = node + + def classify_nodes(joint_module): + required_bw_nodes = set() + for node in joint_module.graph.nodes: + if node.op == 'placeholder' and "tangents" in node.target: + required_bw_nodes.add(node) + if node in required_bw_nodes: + for user in node.users: + required_bw_nodes.add(user) + + primal_inputs = list(filter(_is_primal, joint_module.graph.nodes)) + fwd_seed_offset_inputs = list(filter(_is_fwd_seed_offset, joint_module.graph.nodes)) + inputs = primal_inputs + fwd_seed_offset_inputs + fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs) + required_bw_nodes.update(o for o in bwd_outputs if o is not None) + forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, inputs, fwd_outputs) + required_fw_nodes = {name_to_node[node.name] for node in forward_only_graph.nodes + if node.op != 'output'} + unclaimed_nodes = {node for node in joint_module.graph.nodes + if node not in required_fw_nodes and node not in required_bw_nodes} + return fwd_outputs, required_fw_nodes, required_bw_nodes, unclaimed_nodes, inputs + + orig_fw_outputs, required_fw_nodes, required_bw_nodes, unclaimed_nodes, inputs = classify_nodes(joint_module) + + # networkx blows up on graphs with no required backward nodes + # Since there's nothing to partition anyway, and the default partitioner can "handle" + # this case, send our graph over to the default partitioner. + if len(required_bw_nodes) == 0: + return default_partition(joint_module, _joint_inputs, num_fwd_outputs=num_fwd_outputs) + + for node in reversed(joint_module.graph.nodes): + if node not in required_fw_nodes: + node.dist_from_bw = 0 + else: + node.dist_from_bw = int(1e9) + for user in node.users: + node.dist_from_bw = min(node.dist_from_bw, user.dist_from_bw + 1) + + aten = torch.ops.aten + prims = torch.ops.prims + + # compiler == "nvfuser" is the default set of recomputable ops + default_recomputable_ops = [aten.add, aten.sub, aten.div, aten.atan2, aten.mul, aten.max, aten.min, aten.pow, aten.remainder, aten.fmod, aten.__and__, aten.__or__, aten.__xor__, aten.__lshift__, aten.__rshift__, aten.eq, aten.ne, aten.ge, aten.gt, aten.le, aten.lt, aten.abs, aten.bitwise_not, aten.ceil, aten.floor, aten.frac, aten.neg, aten.relu, aten.round, aten.silu, aten.trunc, aten.log, aten.log10, aten.log1p, aten.log2, aten.lgamma, aten.exp, aten.expm1, aten.erf, aten.erfc, aten.cos, aten.acos, aten.cosh, aten.sin, aten.asin, aten.sinh, aten.tan, aten.atan, aten.tanh, aten.atanh, aten.sqrt, aten.rsqrt, aten.reciprocal, aten.sigmoid, aten.softplus, aten.threshold, aten.threshold_backward, aten.clamp, aten.where, aten.lerp, aten.addcmul, aten.gelu, aten.gelu_backward, aten.sum, aten.mean, aten._grad_sum_to_size, aten.sum_to_size, aten.amax, aten.to, aten.type_as, operator.getitem, aten.squeeze, aten.unsqueeze, aten.rsub, aten._to_copy] # noqa: E501,B950 + view_ops = [aten.squeeze, aten.unsqueeze, aten.alias] + if compiler == "inductor": + default_recomputable_ops += [prims.div, prims.convert_element_type, aten.clone, aten._to_copy, aten.full_like, prims.var, prims.sum, aten.var, aten.std, prims.broadcast_in_dim, aten.select, aten.permute, aten._unsafe_view, aten.view, aten.expand, aten.slice, aten.reshape, aten.broadcast_tensors, aten.scalar_tensor, aten.ones, aten.new_zeros, aten.lift_fresh_copy, aten.arange, aten.triu, aten.var_mean, aten.isinf, aten.any, aten.full, aten.as_strided, aten.zeros, aten.argmax, aten.maximum] # noqa: E501,B950 + view_ops += [aten.view, aten.slice, aten.permute, aten.t, prims.broadcast_in_dim, aten.expand, aten.as_strided] + # Natalia said that we should allow recomputing indexing :) + default_recomputable_ops += [aten.index] + default_recomputable_ops += view_ops + + default_recomputable_ops += pointwise_ops() + + default_recomputable_ops += [ + aten.zeros_like, + ] + + default_recomputable_ops += [ + method_to_operator(m) + for m in magic_methods + ] + + recomputable_ops = set(recomputable_ops) if recomputable_ops is not None else set(default_recomputable_ops) + + random_ops = [aten.native_dropout, aten.rand_like, aten.randn_like] + compute_intensive_ops = [aten.mm, aten.convolution, aten.convolution_backward, aten.bmm, aten.addmm, aten.upsample_bilinear2d, aten._softmax, aten._softmax_backward_data, aten.native_layer_norm, aten.native_layer_norm_backward, aten.native_batch_norm, aten.native_batch_norm_backward, aten._native_batch_norm_legit] # noqa: E501,B950 + + fusible_ops = recomputable_ops | set(random_ops) + if AOT_PARTITIONER_DEBUG: + joint_module_ops = { + str(node.target._overloadpacket) + for node in joint_module.graph.nodes + if node.op == "call_function" and hasattr(node.target, "_overloadpacket") + } + ops_ignored = joint_module_ops - {str(i) for i in recomputable_ops} + print("Ops banned from rematerialization: ", ops_ignored) + print() + + def is_materialized_backwards(node): + cur_nodes = {node} + while len(cur_nodes) > 0: + cur = cur_nodes.pop() + for user in cur.users: + if user not in required_fw_nodes and not is_fusible(cur, user): + return True + if user not in required_fw_nodes and get_aten_target(user) in view_ops: + cur_nodes.add(user) + + return False + + def ban_recomputation(node): + if "recompute" in node.meta: + return node.meta["recompute"] == 0 + elif config.aggressive_recomputation: + ignored_ops = random_ops + compute_intensive_ops + return (node.op == 'call_function' and get_aten_target(node) in ignored_ops) + else: + if node.op != 'call_function': + return False + if get_aten_target(node) not in recomputable_ops: + return True + if node.target == operator.getitem: + return False + if node.target in [aten.lift_fresh_copy.default, aten.lift_fresh.default]: + return False + + # If a node *must* be materialized in the backwards pass, then we + # should never recompute it. This is a pretty subtle point. In + # general, the assumption we make is that recomputing a node in the + # backwards pass is "free". However, if a node must be materialized + # in the backwards pass, then recomputing it is never free. + if is_materialized_backwards(node): + return True + + # Arbitrary hack that sometimes seems to help things. The above + # modification appears to have made this heuristic a lot less critical + # for performance. + # TODO: Investigate why this hack helps. + # TODO: Investigate the interaction with compiler assisted + # activation checkpointing. Removing the heuristic improves both + # memory footprint and speedup. + if not graph_has_recomputable_ops: + if compiler == "inductor" and node.dist_from_bw > config.max_dist_from_bw: + return True + # If the output of an op is 4x smaller (arbitrary choice), + # then we don't allow recomputation. + input_tensors_size = sum(_size_of(i) for i in node.args if isinstance(i, fx.Node)) + output_size = _size_of(node) + return (output_size * 4 < input_tensors_size) + + def is_fusible(a, b): + # We can perform "memory fusion" into a cat, but cat cannot be a + # producer to a fusion + if get_aten_target(b) == aten.cat: + return True + return get_aten_target(a) in fusible_ops and get_aten_target(b) in fusible_ops + + def is_materialized(node): + if node.op == 'placeholder': + return True + + return not all(is_fusible(node, user) for user in node.users) + + def get_node_weight(node) -> int: + mem_sz = _size_of(node) + + # Heuristic to bias towards nodes closer to the backwards pass + # Complete guess about current value + mem_sz = int(mem_sz * (1.1 ** max(min(node.dist_from_bw, 100), 1))) + # mem_sz = int(mem_sz + node.dist_from_bw) + + if is_materialized(node): + return mem_sz + else: + return mem_sz * 2 + + nx_graph = nx.DiGraph() + for node in full_bw_graph.nodes: + if node.op == 'output': + continue + + if node in required_bw_nodes: + if node not in inputs: + nx_graph.add_edge(node.name + "_in", "sink", capacity=math.inf) + continue + # If someone saves a input for backward as-is and backward + # returns that tensor as-is as a grad input, then the node x would + # be both a required_bw_node and an input. In this case we + # (1) connect x_in to to the source, (2) x_out to the sink, and + # (3) assign the proper weight to the x_in-x_out edge, so that + # x would be part of cut nodes. A case where this happens is if + # NestedTensor saves a offset tensor as part of the singleton int + # in sizes. + nx_graph.add_edge(node.name + "_out", "sink", capacity=math.inf) + + if _is_primal(node) or _is_fwd_seed_offset(node): + nx_graph.add_edge("source", node.name + "_in", capacity=math.inf) + + # If a node can't be recomputed (too expensive or involves randomness), + # we prevent it from being recomputed by adding an inf edge to the source + # We only need to ban nodes in the fw pass, as those are the only ones that would be recomputed. + if ban_recomputation(node) and node in required_fw_nodes: + nx_graph.add_edge("source", node.name + "_in", capacity=math.inf) + + # Checks if a node is actually a tuple. Can be simplified to just an isinstance check if we always use faketensors. + is_non_tensor_node = (('val' not in node.meta and 'tensor_meta' not in node.meta) or + ('val' in node.meta and not isinstance(node.meta['val'], torch.Tensor))) + + if is_sym_node(node): + weight = sym_node_size(node) + elif is_non_tensor_node: + weight = 0 if isinstance(node.meta.get("val"), BackwardState) else math.inf + else: + weight = get_node_weight(node) + + # Creates the weights on the "node" edge + nx_graph.add_edge(node.name + "_in", node.name + "_out", capacity=weight) + for user in node.users: + nx_graph.add_edge(node.name + "_out", user.name + "_in", capacity=math.inf) + + try: + cut_value, partition = nx.minimum_cut(nx_graph, "source", "sink") + except Exception: + print('Failed to compute min-cut on following graph:') + print('\n'.join(nx.readwrite.edgelist.generate_edgelist(nx_graph))) + raise + + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, nx_graph[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + cut_nodes = set() + for node_in, node_out in cutset: + assert node_in[:-3] == node_out[:-4] + node_name = node_in[:-3] + cut_nodes.add(node_name) + + # To make this stuff deterministic + node_idx = {node: idx for idx, node in enumerate(joint_module.graph.nodes)} + saved_values = sorted((name_to_node[node] for node in cut_nodes), key=lambda x: node_idx[x]) + # save_for_backward on tensors and stashes symints in autograd .ctx + saved_sym_nodes = list(filter(is_sym_node, saved_values)) + saved_values = list(filter(lambda n: not is_sym_node(n), saved_values)) + # NB: saved_sym_nodes will be mutated to reflect the actual saved symbols + fw_module, bw_module = _extract_fwd_bwd_modules( + joint_module, saved_values, saved_sym_nodes=saved_sym_nodes, num_fwd_outputs=num_fwd_outputs) + + if graph_has_recomputable_ops: + if graph_has_recomputable_rng_ops: + fw_module, bw_module = functionalize_rng_ops( + joint_module, fw_module, bw_module, len(saved_sym_nodes) + ) + bw_module = reordering_to_mimic_autograd_engine(bw_module) + + if AOT_PARTITIONER_DEBUG: + print("Theoretical Activations Stored: ", sum([_size_of(i) for i in saved_values]) / 1e9) + fw_module_nodes = {node.name for node in fw_module.graph.nodes if node.op == 'call_function'} + bw_module_nodes = {node.name for node in bw_module.graph.nodes if node.op == 'call_function'} + remat_nodes = fw_module_nodes & bw_module_nodes + + counts = defaultdict(int) + for node in fw_module.graph.nodes: + if node.name in remat_nodes and hasattr(node.target, '_overloadpacket'): + counts[str(node.target._overloadpacket)] += 1 + print(f"# remat/fw/bw: {len(remat_nodes)}/{len(fw_module_nodes)}/{len(bw_module_nodes)}") + print("Count of Ops Rematerialized: ", sorted(counts.items(), key=lambda x: x[1], reverse=True)) + return fw_module, bw_module + + +def draw_graph( + traced: torch.fx.GraphModule, + fname: str, + figname: str = "fx_graph", + clear_meta: bool = True, + prog: Union[str, List[str]] = None, + parse_stack_trace: bool = False, + dot_graph_shape: Optional[str] = None, +) -> None: + if clear_meta: + new_graph = copy.deepcopy(traced.graph) + traced = fx.GraphModule(traced, new_graph) + for node in traced.graph.nodes: + node.meta = {} + base, ext = os.path.splitext(fname) + if not ext: + ext = ".svg" + print(f"Writing FX graph to file: {base}{ext}") + g = graph_drawer.FxGraphDrawer( + traced, + figname, + parse_stack_trace=parse_stack_trace, + dot_graph_shape=dot_graph_shape, + ) + x = g.get_main_dot_graph() + write_method = getattr(x, "write_" + ext.lstrip(".")) + fname = f"{base}{ext}" + if prog is None: + write_method(fname) + else: + write_method(fname, prog=prog) + + +def draw_joint_graph( + graph: torch.fx.GraphModule, + joint_inputs, + file_name: str = "full_graph.png", + dot_graph_shape: Optional[str] = None, +): + draw_graph(graph, file_name, dot_graph_shape=dot_graph_shape) + return default_partition(graph, joint_inputs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py new file mode 100644 index 0000000000000000000000000000000000000000..39d8b8ba7861eda2dc2d265221717b12da4f4dee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pyfunctorch.py @@ -0,0 +1,252 @@ +from abc import ABC, abstractmethod +import contextlib +from typing import Any, List, Tuple +import torch +import torch.utils._pytree as pytree +from torch._C._functorch import ( + TransformType, + RandomnessType, + CInterpreter, + CGradInterpreterPtr, + CFunctionalizeInterpreterPtr, + CVmapInterpreterPtr, + CJvpInterpreterPtr, + pop_dynamic_layer_stack, + push_dynamic_layer_stack, +) +from torch.autograd.forward_ad import _set_fwd_grad_enabled + +""" +This file contains the functorch integration with PyDispatcher. + +PyDispatcher does not understand functorch's DynamicLayerStack dispatching +logic because it is entirely implemented in C++ in the fallbacks for two +dispatch keys, FuncTorchDynamicLayer{Front, Back}Mode (PyDispatcher is unable +to directly reuse C++ boxed fallbacks). + +Instead of trying to hammer PyDispatcher into understanding those fallbacks, +we re-implement the logic of peeking the top of the stack for an interpreter, +selecting the interpreter to dispatch on, etc, in Python. This leads to a +simpler design. + +The main difference between C++ functorch and PyDispatcher's functorch logic +is that: +- C++ functorch needs to manually tweak dispatch keys to ping-pong between + DynamicLayerFrontMode and DynamicLayerBackMode. +- PyDispatcher's functorch logic pops an Interpreter from the top of the stack + and asks it to execute the rule associated with the Interpreter. + +In C++ we do the ping-pong because e.g. vmap rules are associated with the +batched DispatchKey, but in PyDispatcher we are able to avoid this by asking +the user to register a batching rule directly to a transform that an +interpreter then invokes. +""" + + +# FuncTorchInterpreter is the Python version of Interpreter (recall that +# the DynamicLayerStack is a stack of interpreters). +# It is a wrapper around the actual C++ Interpreter object. +# +# Keep the methods in sync with aten/src/ATen/functorch/Interpreter.h +class FuncTorchInterpreter(ABC): + def __init__(self, cptr: Any): + self._cptr = cptr + + # Process an operation. eg for vmap, this is invoking a batching rule. + # Conceptually this is analogous to Interpreter::process in C++ + @abstractmethod + def process(self, op, args, kwargs): + pass + + # lower an operation from this Interpreter to the next Interpreter on the stack. + # Concretely, this involves temporarily popping the current Interpreter. + # Conceptually this is analogous to Interpreter::sendToNextInterpreter in C++ + def lower(self): + return temporarily_pop_interpreter_stack() + + def level(self): + return self._cptr.level() + + def key(self): + return self._cptr.key() + + def get_state(self): + raise NotImplementedError() + + def check_state(self, state): + return state == self.get_state() + + +@contextlib.contextmanager +def temporarily_pop_interpreter_stack(): + try: + saved = pop_dynamic_layer_stack() + yield + finally: + push_dynamic_layer_stack(saved) + + +class VmapInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Vmap + # NOTE: [Interpreter cdata vs cptr] + # cdata is a generic CInterpreter. We wrap it in a CVmapInterpreterPtr + # so that we can access methods specific to the vmap interpreter + self._cdata = cdata + self._cptr = CVmapInterpreterPtr(cdata) + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Vmap] + return kernel(self, *args, **kwargs) + + def batch_size(self): + return self._cptr.batchSize() + + def randomness(self): + typ = self._cptr.randomness() + if typ == RandomnessType.Error: + return "error" + elif typ == RandomnessType.Same: + return "same" + elif typ == RandomnessType.Different: + return "different" + raise RuntimeError(f"Unknown RandomnessType: {typ}") + + def get_state(self): + return (self.key().name, self.level(), self.randomness()) + + +@contextlib.contextmanager +def nested(*contexts): + with contextlib.ExitStack() as stack: + for ctx in contexts: + stack.enter_context(ctx) + yield contexts + + +class GradInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Grad + # See NOTE: [Interpreter cdata vs cptr] + self._cdata = cdata + self._cptr = CGradInterpreterPtr(cdata) + + def lift(self, args, kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, self._cptr.lift, [args, kwargs]) + return args, kwargs + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Grad] + args, kwargs = self.lift(args, kwargs) + return kernel(self, *args, **kwargs) + + # GradInterpreter has custom lower because of the no_grad interaction + # See NOTE [grad and vjp interaction with no_grad] + # This logic is mirrored from C++ GradInterpreterPtr::sendToNextInterpreter + def lower(self): + prev_grad_mode = self.prev_grad_mode() + if not prev_grad_mode: + return nested(torch.no_grad(), super().lower()) + return super().lower() + + def prev_grad_mode(self): + return self._cptr.prevGradMode() + + def get_state(self): + return (self.key().name, self.level(), self.prev_grad_mode()) + + +class JvpInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Jvp + # See NOTE: [Interpreter cdata vs cptr] + self._cdata = cdata + self._cptr = CJvpInterpreterPtr(cdata) + + def lift(self, args, kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, self._cptr.lift, [args, kwargs]) + return args, kwargs + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Jvp] + args, kwargs = self.lift(args, kwargs) + return kernel(self, *args, **kwargs) + + # Jvp has custom lower because of the no_fwd_grad interaction + # See NOTE [grad and vjp interaction with no_grad] for related info. + # This logic is mirrored from C++ JvpInterpreterPtr::sendToNextInterpreter + def lower(self): + prev_fwd_grad_mode = self.prev_fwd_grad_mode() + if not prev_fwd_grad_mode: + return nested(_set_fwd_grad_enabled(False), super().lower()) + return super().lower() + + def prev_fwd_grad_mode(self): + return self._cptr.prevFwdGradMode() + + +class FunctionalizeInterpreter(FuncTorchInterpreter): + def __init__(self, cdata: CInterpreter): + assert cdata.key() == TransformType.Functionalize + self._cdata = cdata + self._cptr = CFunctionalizeInterpreterPtr(cdata) + + def process(self, op, args, kwargs): + kernel = op.functorch_table[TransformType.Functionalize] + return kernel(self, *args, **kwargs) + + def functionalize_add_back_views(self): + return self._cptr.functionalizeAddBackViews() + + +def coerce_cinterpreter(cinterpreter: CInterpreter) -> FuncTorchInterpreter: + key = cinterpreter.key() + if key == TransformType.Grad: + return GradInterpreter(cinterpreter) + if key == TransformType.Vmap: + return VmapInterpreter(cinterpreter) + if key == TransformType.Jvp: + return JvpInterpreter(cinterpreter) + if key == TransformType.Functionalize: + return FunctionalizeInterpreter(cinterpreter) + raise RuntimeError(f"NYI: PyDispatcher has not implemented support for {key}") + + +def retrieve_current_functorch_interpreter() -> FuncTorchInterpreter: + interpreter = torch._C._functorch.peek_interpreter_stack() + assert interpreter is not None + return coerce_cinterpreter(interpreter) + + +def retrieve_all_functorch_interpreters() -> List[FuncTorchInterpreter]: + cis = torch._C._functorch.get_interpreter_stack() + if cis is None: + return [] + return [coerce_cinterpreter(ci) for ci in cis] + + +def compare_functorch_state(states: List[Tuple[Any, ...]]) -> bool: + # There are four possible cases covered here: + # 1. Current stack empty AND stack when generated not empty -> Invalidate + # 2. Current stack not empty AND stack when generated empty -> Invalidate + # 3. Current stack and generated stack empty -> Valid FX graph + # 4. Current stack and generated stack not empty -> Valid if both states match + peek = torch._C._functorch.peek_interpreter_stack() + if (peek is None and len(states) != 0) or (peek is not None and len(states) == 0): + return False + + cis = retrieve_all_functorch_interpreters() + return len(cis) == len(states) and \ + all(ci.check_state(state) for ci, state in zip(cis, states)) + + +def dispatch_functorch(op, args, kwargs): + interpreter = retrieve_current_functorch_interpreter() + # In traditional PyTorch operators, DispatchKey::FuncTorchTensorWrapper's + # unwrap_dead_tensors fallback handles unwrapping dead tensor wrappers. + # PyDispatcher sidesteps the PyTorch dispatcher when dealing with functorch + # transforms, so we manually unwrap the dead tensors here. + # This logic won't need to exist when we have mode-only functorch. + args, kwargs = pytree.tree_map_only( + torch.Tensor, torch._C._functorch.unwrap_if_dead, (args, kwargs)) + return interpreter.process(op, args, kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/python_key.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/python_key.py new file mode 100644 index 0000000000000000000000000000000000000000..e7c805841a62b015e6fee609d370109ddd45821a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/python_key.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +__all__ = ["make_fx", "dispatch_trace", "PythonKeyTracer", "pythonkey_decompose"] +from torch.fx.experimental.proxy_tensor import make_fx, dispatch_trace, PythonKeyTracer, decompose + +pythonkey_decompose = decompose diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py new file mode 100644 index 0000000000000000000000000000000000000000..8c4b50bc6ad4c3d36ec358493569a5296cd2d053 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/pytree_hacks.py @@ -0,0 +1,22 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + +# TODO: remove this file when the migration of the pytree utility is done +from torch.utils._pytree import tree_map_, treespec_pprint + + +__all__ = ["tree_map_", "treespec_pprint"] + + +with warnings.catch_warnings(): + warnings.simplefilter("always") + warnings.warn( + "torch._functorch.pytree_hacks is deprecated and will be removed in a future release. " + "Please use torch.utils._pytree instead.", + DeprecationWarning, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..12e87e60f6befd0399b1ffb634f83f53588d436b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/top_operators_github_usage.py @@ -0,0 +1,625 @@ +# mypy: ignore-errors + +""" +From https://docs.google.com/spreadsheets/d/12R3nCOLskxPYjjiNkdqy4OdQ65eQp_htebXGODsjSeA/edit#gid=0 +Try to keep this list in sync with that. +""" +top_torch = [ + ("t", 6837449), + ("tensor", 585786), + ("mode", 462182), + ("cat", 394818), + ("max", 368038), + ("zeros", 329495), + ("load", 327756), + ("no_grad", 294694), + ("save", 265130), + ("from_numpy", 243063), + ("manual_seed", 165044), + ("ones", 153696), + ("randn", 150796), + ("stack", 133358), + ("sum", 130772), + ("arange", 98087), + ("rand", 94715), + ("mean", 88546), + ("exp", 73883), + ("zeros_like", 72831), + ("min", 72248), + ("sigmoid", 66798), + ("log", 62135), + ("matmul", 47811), + ("clamp", 45304), + ("sqrt", 44911), + ("abs", 43535), + ("tanh", 42793), + ("empty", 40311), + ("argmax", 38435), + ("bmm", 33984), + ("pow", 33571), + ("norm", 31125), + ("mm", 30995), + ("is_tensor", 29546), + ("ones_like", 29512), + ("nonzero", 28681), + ("full", 28373), + ("unsqueeze", 27911), + ("where", 26585), + ("randperm", 26450), + ("eye", 24342), + ("mul", 23236), + ("topk", 22537), + ("as_tensor", 21967), + ("sort", 21412), + ("squeeze", 20863), + ("randint", 20771), + ("linspace", 20041), + ("add", 19201), + ("transpose", 18663), + ("split", 18325), + ("gather", 17904), + ("set_grad_enabled", 16013), + ("sin", 15669), + ("cos", 15562), + ("div", 15513), + ("index_select", 14866), + ("multinomial", 14331), + ("flatten", 14267), + ("isnan", 14170), + ("randn_like", 13096), + ("eq", 12680), + ("einsum", 12480), + ("round", 12367), + ("floor", 11628), + ("allclose", 11000), + ("reshape", 10605), + ("diag", 10167), + ("chunk", 9581), + ("std", 9379), + ("set_default_tensor_type", 9281), + ("triu", 8559), + ("meshgrid", 8292), + ("set_num_threads", 8126), + ("unique", 7964), + ("full_like", 7780), + ("tril", 7538), + ("dot", 7275), + ("sign", 6943), + ("equal", 6916), + ("normal", 6750), + ("cumsum", 6556), + ("dist", 6058), + ("isfinite", 6030), + ("gt", 5935), + ("set_printoptions", 5888), + ("range", 5491), + ("empty_like", 5351), + ("flip", 5342), + ("masked_select", 5341), + ("bernoulli", 5262), + ("atan", 5253), + ("var", 5247), + ("prod", 5200), + ("erf", 5088), + ("inverse", 5072), + ("addmm", 4854), + ("logsumexp", 4582), + ("fft", 4436), + ("lt", 4421), + ("log2", 4316), + ("enable_grad", 4238), + ("rand_like", 4187), + ("argsort", 3972), + ("seed", 3932), + ("mv", 3547), + ("ger", 3309), + ("ge", 3248), + ("atan2", 3210), + ("ceil", 3202), + ("ne", 3075), + ("bincount", 3063), + ("acos", 3055), + ("rsqrt", 3031), + ("svd", 3029), + ("numel", 3003), + ("log1p", 2840), + ("unbind", 2808), + ("le", 2714), + ("isinf", 2707), + ("cross", 2646), + ("set_default_dtype", 2536), + ("argmin", 2535), + ("sparse_coo_tensor", 2489), + ("log10", 2304), + ("kthvalue", 2192), + ("set_rng_state", 2158), + ("get_rng_state", 1996), + ("get_default_dtype", 1879), + ("det", 1868), + ("qr", 1864), + ("histc", 1852), + ("symeig", 1832), + ("trace", 1801), + ("median", 1795), + ("addcmul", 1751), + ("remainder", 1717), + ("baddbmm", 1693), + ("lgamma", 1665), + ("repeat_interleave", 1598), + ("fmod", 1576), + ("reciprocal", 1575), + ("tan", 1560), + ("initial_seed", 1532), + ("take", 1529), + ("stft", 1487), + ("get_num_threads", 1477), + ("real", 1459), + ("cholesky", 1406), + ("quantize_per_tensor", 1392), + ("diag_embed", 1364), + ("lerp", 1363), + ("asin", 1345), + ("eig", 1333), + ("trunc", 1290), + ("diagonal", 1287), + ("cosh", 1279), + ("rfft", 1269), + ("cumprod", 1260), + ("addr", 1211), + ("roll", 1198), + ("narrow", 1188), + ("digamma", 1172), + ("square", 1163), + ("sinh", 1131), + ("logspace", 1084), + ("broadcast_tensors", 1070), + ("irfft", 1013), + ("frac", 997), + ("hann_window", 994), + ("solve", 989), + ("logdet", 977), + ("expm1", 968), + ("cdist", 946), + ("addmv", 903), + ("randint_like", 888), + ("tensordot", 888), + ("ifft", 877), + ("true_divide", 854), + ("erfinv", 830), + ("addcdiv", 819), + ("addbmm", 813), + ("renorm", 781), + ("pinverse", 753), + ("isclose", 740), + ("erfc", 729), + ("is_storage", 725), + ("triangular_solve", 723), + ("rot90", 709), + ("logical_not", 686), + ("geqrf", 681), + ("slogdet", 677), + ("lu", 665), + ("hamming_window", 659), + ("orgqr", 651), + ("ormqr", 622), + ("is_floating_point", 602), + ("diagflat", 562), + ("cholesky_solve", 559), + ("tril_indices", 552), + ("chain_matmul", 551), + ("triu_indices", 548), + ("angle", 522), + ("poisson", 505), + ("matrix_power", 485), + ("unique_consecutive", 471), + ("quantize_per_channel", 465), + ("std_mean", 458), + ("bartlett_window", 447), + ("var_mean", 428), + ("lstsq", 421), + ("logical_and", 419), + ("mvlgamma", 411), + ("blackman_window", 400), + ("bitwise_not", 395), + ("cholesky_inverse", 388), + ("as_strided", 384), + ("floor_divide", 353), + ("cartesian_prod", 321), + ("lu_solve", 317), + ("set_flush_denormal", 310), + ("empty_strided", 283), + ("logical_xor", 282), + ("polygamma", 282), + ("logical_or", 280), + ("set_num_interop_threads", 278), + ("combinations", 274), + ("trapz", 270), + ("matrix_rank", 260), + ("lu_unpack", 255), + ("result_type", 244), + ("conj", 231), + ("cummax", 230), + ("lobpcg", 229), + ("bitwise_xor", 217), + ("promote_types", 213), + ("get_num_interop_threads", 211), + ("cummin", 205), + ("bitwise_and", 198), + ("dequantize", 192), + ("bitwise_or", 191), + ("imag", 191), + ("can_cast", 184), + ("istft", 180), + ("compiled_with_cxx11_abi", 159), + ("is_complex", 151), + ("block_diag", 136), + ("pca_lowrank", 124), + ("absolute", 122), + ("svd_lowrank", 108), + ("neg", 2), +] + +top_nn_functional = [ + ("nn.functional.softmax", 10522), + ("nn.functional.relu", 8572), + ("nn.functional.interpolate", 7277), + ("nn.functional.pad", 5207), + ("nn.functional.log_softmax", 4699), + ("nn.functional.normalize", 2338), + ("nn.functional.cross_entropy", 2083), + ("nn.functional.grid_sample", 1970), + ("nn.functional.one_hot", 1967), + ("nn.functional.mse_loss", 1920), + ("nn.functional.conv2d", 1593), + ("nn.functional.dropout", 1516), + ("nn.functional.softplus", 1385), + ("nn.functional.sigmoid", 1128), + ("nn.functional.linear", 1036), + ("nn.functional.gelu", 930), + ("nn.functional.avg_pool2d", 899), + ("nn.functional.max_pool2d", 876), + ("nn.functional.nll_loss", 863), + ("nn.functional.embedding", 737), + ("nn.functional.tanh", 664), + ("nn.functional.leaky_relu", 640), + ("nn.functional.adaptive_avg_pool2d", 633), + ("nn.functional.cosine_similarity", 627), + ("nn.functional.unfold", 609), + ("nn.functional.conv1d", 596), + ("nn.functional.binary_cross_entropy_with_logits", 591), + ("nn.functional.l1_loss", 571), + ("nn.functional.binary_cross_entropy", 492), + ("nn.functional.elu", 416), + ("nn.functional.batch_norm", 413), + ("nn.functional.upsample", 413), + ("nn.functional.fold", 305), + ("nn.functional.affine_grid", 298), + ("nn.functional.max_pool1d", 297), + ("nn.functional.torch", 294), + ("nn.functional.threshold", 263), + ("nn.functional.smooth_l1_loss", 262), + ("nn.functional.pairwise_distance", 253), + ("nn.functional.logsigmoid", 243), + ("nn.functional.adaptive_max_pool2d", 235), + ("nn.functional.relu6", 213), + ("nn.functional.pixel_shuffle", 209), + ("nn.functional.avg_pool3d", 203), + ("nn.functional.bilinear", 203), + ("nn.functional.conv_transpose2d", 201), + ("nn.functional.gumbel_softmax", 197), + ("nn.functional.max_unpool2d", 196), + ("nn.functional.kl_div", 191), + ("nn.functional.hardtanh", 189), + ("nn.functional.ctc_loss", 185), + ("nn.functional.layer_norm", 178), + ("nn.functional.conv3d", 172), + ("nn.functional.max_unpool3d", 167), + ("nn.functional.hardshrink", 165), + ("nn.functional.hardswish", 156), + ("nn.functional.selu", 156), + ("nn.functional.glu", 155), + ("nn.functional.assert_int_or_pair", 150), + ("nn.functional.hardsigmoid", 146), + ("nn.functional.upsample_bilinear", 146), + ("nn.functional.max_pool3d", 140), + ("nn.functional.adaptive_avg_pool3d", 139), + ("nn.functional.instance_norm", 124), + ("nn.functional.embedding_bag", 122), + ("nn.functional.upsample_nearest", 110), + ("nn.functional.avg_pool1d", 105), + ("nn.functional.prelu", 102), + ("nn.functional.celu", 92), + ("nn.functional.dropout2d", 86), + ("nn.functional.hinge_embedding_loss", 82), + ("nn.functional.softsign", 81), + ("nn.functional.max_unpool1d", 74), + ("nn.functional.silu", 74), + ("nn.functional.softshrink", 70), + ("nn.functional.leaky_relu_", 68), + ("nn.functional.softmin", 67), + ("nn.functional.channel_shuffle", 66), + ("nn.functional.multilabel_margin_loss", 66), + ("nn.functional.dropout3d", 65), + ("nn.functional.multi_margin_loss", 65), + ("nn.functional.lp_pool2d", 64), + ("nn.functional.conv_transpose1d", 62), + ("nn.functional.triplet_margin_loss", 62), + ("nn.functional.tanhshrink", 61), + ("nn.functional.adaptive_max_pool1d", 59), + ("nn.functional.cosine_embedding_loss", 58), + ("nn.functional.multi_head_attention_forward", 58), + ("nn.functional.max_pool1d_with_indices", 53), + ("nn.functional.poisson_nll_loss", 53), + ("nn.functional.margin_ranking_loss", 52), + ("nn.functional.soft_margin_loss", 52), + ("nn.functional.adaptive_max_pool3d", 51), + ("nn.functional.group_norm", 51), + ("nn.functional.local_response_norm", 51), + ("nn.functional.multilabel_soft_margin_loss", 51), + ("nn.functional.relu_", 50), + ("nn.functional.alpha_dropout", 49), + ("nn.functional.feature_alpha_dropout", 49), + ("nn.functional.lp_pool1d", 49), + ("nn.functional.adaptive_max_pool1d_with_indices", 48), + ("nn.functional.adaptive_max_pool2d_with_indices", 48), + ("nn.functional.adaptive_max_pool3d_with_indices", 48), + ("nn.functional.fractional_max_pool2d", 48), + ("nn.functional.fractional_max_pool2d_with_indices", 48), + ("nn.functional.fractional_max_pool3d", 48), + ("nn.functional.fractional_max_pool3d_with_indices", 48), + ("nn.functional.max_pool2d_with_indices", 48), + ("nn.functional.max_pool3d_with_indices", 48), + ("nn.functional.handle_torch_function", 47), + ("nn.functional.has_torch_function", 47), + ("nn.functional.adaptive_avg_pool1d", 43), + ("nn.functional.pdist", 43), + ("nn.functional.rrelu_", 37), + ("nn.functional.elu_", 34), + ("nn.functional.boolean_dispatch", 33), + ("nn.functional.hardtanh_", 26), + ("nn.functional.triplet_margin_with_distance_loss", 23), + ("nn.functional.selu_", 20), + ("nn.functional.pixel_unshuffle", 19), + ("nn.functional.conv_transpose3d", 18), + ("nn.functional.gaussian_nll_loss", 15), + ("nn.functional.has_torch_function_unary", 15), + ("nn.functional.has_torch_function_variadic", 15), + ("nn.functional.celu_", 13), + ("nn.functional.huber_loss", 7), + ("nn.functional.mish", 4), + ("nn.functional.threshold_", 3), + ("nn.functional.grad", 2), + ("nn.functional.conv_tbc", 1), + ("nn.functional.math", 1), +] + +top_nn_module = [ + ("nn.Module", 927129, None), + ("nn.Linear", 530688, "nn.functional.linear"), + ("nn.Sequential", 384968, None), + ("nn.Conv2d", 383320, "nn.functional.conv2d"), + ("nn.ReLU", 318877, "nn.functional.relu"), + ("nn.BatchNorm2d", 233265, "nn.functional.batch_norm"), + ("nn.Dropout", 179268, "nn.functional.dropout"), + ("nn.ModuleList", 171225, None), + ("nn.Parameter", 153291, None), + ("nn.CrossEntropyLoss", 152696, "nn.functional.cross_entropy"), + ("nn.MaxPool2d", 138619, "nn.functional.max_pool2d"), + ("nn.Embedding", 111844, "nn.functional.embedding"), + ("nn.DataParallel", 104238, None), + ("nn.MSELoss", 82954, "nn.functional.mse_loss"), + ("nn.Sigmoid", 75810, "nn.functional.sigmoid"), + ("nn.LeakyReLU", 65632, "nn.functional.leaky_relu"), + ("nn.BatchNorm1d", 65374, "nn.functional.batch_norm"), + ("nn.Softmax", 65114, "nn.functional.softmax"), + ("nn.Tanh", 59445, "nn.functional.tanh"), + ("nn.AdaptiveAvgPool2d", 59071, "nn.functional.adaptive_avg_pool2d"), + ("nn.AvgPool2d", 58377, "nn.functional.avg_pool2d"), + ("nn.ConvTranspose2d", 57524, "nn.functional.conv_transpose2d"), + ("nn.LSTM", 57411, None), + ("nn.Conv1d", 41108, "nn.functional.conv1d"), + ("nn.LayerNorm", 36089, "nn.functional.layer_norm"), + ("nn.BCELoss", 34005, "nn.functional.binary_cross_entropy"), + ("nn.Upsample", 32527, "nn.functional.interpolate"), + ("nn.BCEWithLogitsLoss", 29944, "nn.functional.binary_cross_entropy_with_logits"), + ("nn.GRU", 25421, None), + ("nn.Dropout2d", 23512, "nn.functional.dropout2d"), + ("nn.LogSoftmax", 22897, "nn.functional.log_softmax"), + ("nn.L1Loss", 22778, "nn.functional.l1_loss"), + ("nn.GroupNorm", 22183, "nn.functional.group_norm"), + ("nn.NLLLoss", 21751, "nn.functional.nll_loss"), + ("nn.Conv3d", 20874, "nn.functional.conv3d"), + ("nn.Identity", 17911, None), + ("nn.InstanceNorm2d", 16426, "nn.functional.instance_norm"), + ("nn.BatchNorm3d", 16378, "nn.functional.batch_norm"), + ("nn.PReLU", 13472, "nn.functional.prelu"), + ("nn.ReLU6", 12622, "nn.functional.relu6"), + ("nn.ELU", 12508, "nn.functional.elu"), + ("nn.LSTMCell", 10885, None), + ("nn.Flatten", 10384, "torch.flatten"), + ("nn.ModuleDict", 10255, None), + ("nn.ReflectionPad2d", 9954, "nn.functional.pad"), + ("nn.MaxPool3d", 9526, "nn.functional.max_pool3d"), + ("nn.MaxPool1d", 9154, "nn.functional.max_pool1d"), + ("nn.RNN", 9154, None), + ("nn.ZeroPad2d", 8847, "nn.functional.pad"), + ("nn.ParameterList", 7702, None), + ("nn.SyncBatchNorm", 6814, None), + ("nn.PixelShuffle", 6571, "nn.functional.pixel_shuffle"), + ("nn.SmoothL1Loss", 6517, "nn.functional.smooth_l1_loss"), + ("nn.Hardswish", 6458, "nn.functional.hardswish"), + ("nn.AdaptiveMaxPool2d", 6071, "nn.functional.adaptive_max_pool2d"), + ("nn.SELU", 6043, "nn.functional.selu"), + ("nn.ConvTranspose3d", 6039, "nn.functional.conv_transpose3d"), + ("nn.GRUCell", 5840, None), + ("nn.ReplicationPad2d", 5600, "nn.functional.pad"), + ("nn.KLDivLoss", 5541, "nn.functional.kl_div"), + ("nn.ConvTranspose1d", 5183, "nn.functional.conv_transpose1d"), + ("nn.Softplus", 5120, "nn.functional.softplus"), + ("nn.SiLU", 4895, "nn.functional.silu"), + ("nn.AvgPool3d", 4523, "nn.functional.avg_pool3d"), + ("nn.CosineSimilarity", 4058, "nn.functional.cosine_similarity"), + ("nn.GELU", 3932, "nn.functional.gelu"), + ("nn.UpsamplingBilinear2d", 3673, "nn.functional.interpolate"), + ("nn.InstanceNorm1d", 3658, "nn.functional.instance_norm"), + ("nn.Transformer", 3604, None), + ("nn.MultiheadAttention", 3435, "nn.functional.multi_head_attention_forward"), + ("nn.AvgPool1d", 3195, "nn.functional.avg_pool1d"), + ("nn.Dropout3d", 2964, "nn.functional.dropout3d"), + ("nn.AdaptiveAvgPool3d", 2915, "nn.functional.adaptive_avg_pool3d"), + ("nn.InstanceNorm3d", 2893, "nn.functional.instance_norm"), + ("nn.Hardtanh", 2613, "nn.functional.hardtanh"), + ("nn.MarginRankingLoss", 2568, "nn.functional.margin_ranking_loss"), + ("nn.GLU", 2526, "nn.functional.glu"), + ("nn.AdaptiveAvgPool1d", 2481, "nn.functional.adaptive_avg_pool1d"), + ("nn.EmbeddingBag", 2344, "nn.functional.embedding_bag"), + ("nn.TransformerEncoderLayer", 2292, None), + ("nn.TransformerEncoder", 2091, None), + ("nn.MaxUnpool2d", 2031, "nn.functional.max_unpool2d"), + ("nn.UpsamplingNearest2d", 2004, "nn.functional.interpolate"), + ("nn.ConstantPad1d", 1904, "nn.functional.pad"), + ("nn.ConstantPad2d", 1791, "nn.functional.pad"), + ("nn.CTCLoss", 1789, "nn.functional.ctc_loss"), + ("nn.AdaptiveMaxPool1d", 1713, "nn.functional.adaptive_max_pool1d"), + ("nn.AdaptiveLogSoftmaxWithLoss", 1665, None), + ("nn.Bilinear", 1664, "nn.functional.bilinear"), + ("nn.RNNCell", 1653, None), + ("nn.MultiLabelSoftMarginLoss", 1624, "nn.functional.multilabel_soft_margin_loss"), + ("nn.Unfold", 1452, "nn.functional.unfold"), + ("nn.RReLU", 1431, "nn.functional.rrelu"), + ("nn.CosineEmbeddingLoss", 1357, "nn.functional.cosine_embedding_loss"), + ("nn.LocalResponseNorm", 1331, "nn.functional.local_response_norm"), + ("nn.Softmax2d", 1300, "nn.functional.softmax"), + ("nn.PairwiseDistance", 1241, "nn.functional.pairwise_distance"), + ("nn.LogSigmoid", 1235, "nn.functional.logsigmoid"), + ("nn.TripletMarginLoss", 1230, "nn.functional.triplet_margin_loss"), + ("nn.RNNBase", 1133, None), + ("nn.Threshold", 1043, "nn.functional.threshold"), + ("nn.AdaptiveMaxPool3d", 1025, "nn.functional.adaptive_max_pool3d"), + ("nn.CELU", 1018, "nn.functional.celu"), + ("nn.NLLLoss2d", 966, "nn.functional.nll_loss"), + ("nn.Softsign", 877, "nn.functional.softsign"), + ("nn.ReplicationPad1d", 862, "nn.functional.pad"), + ("nn.SoftMarginLoss", 856, "nn.functional.soft_margin_loss"), + ("nn.ParameterDict", 742, None), + ("nn.ReflectionPad1d", 731, "nn.functional.pad"), + ("nn.Softshrink", 713, "nn.functional.softshrink"), + ("nn.AlphaDropout", 710, "nn.functional.alpha_dropout"), + ("nn.Tanhshrink", 681, "nn.functional.tanhshrink"), + ("nn.PoissonNLLLoss", 676, "nn.functional.poisson_nll_loss"), + ("nn.MaxUnpool3d", 660, "nn.functional.max_unpool3d"), + ("nn.Fold", 630, "nn.functional.fold"), + ("nn.MultiMarginLoss", 622, "nn.functional.multi_margin_loss"), + ("nn.TransformerDecoderLayer", 614, None), + ("nn.TransformerDecoder", 607, None), + ("nn.Hardshrink", 592, "nn.functional.hardshrink"), + ("nn.ConstantPad3d", 582, "nn.functional.pad"), + ("nn.MultiLabelMarginLoss", 580, "nn.functional.multilabel_margin_loss"), + ("nn.LPPool2d", 550, "nn.functional.lp_pool2d"), + ("nn.Softmin", 537, "nn.functional.softmin"), + ("nn.MaxUnpool1d", 518, "nn.functional.max_unpool1d"), + ("nn.FractionalMaxPool2d", 484, "nn.functional.fractional_max_pool2d"), + ("nn.Hardsigmoid", 477, "nn.functional.hardsigmoid"), + ("nn.ReplicationPad3d", 470, "nn.functional.pad"), + ("nn.HingeEmbeddingLoss", 442, "nn.functional.hinge_embedding_loss"), + ("nn.LPPool1d", 386, "nn.functional.lp_pool1d"), + ("nn.FractionalMaxPool3d", 252, "nn.functional.fractional_max_pool3d"), + ("nn.Container", 217, None), + ("nn.Unflatten", 206, "nn.functional.unflatten"), + ("nn.FeatureAlphaDropout", 136, "nn.functional.feature_alpha_dropout"), + ("nn.TripletMarginWithDistanceLoss", 107, "nn.functional.triplet_margin_with_distance_loss"), + ("nn.ChannelShuffle", 90, "nn.functional.channel_shuffle"), + ("nn.RNNCellBase", 88, None), + ("nn.LazyLinear", 81, "nn.functional.linear"), + ("nn.UninitializedParameter", 60, None), + ("nn.CrossMapLRN2d", 59, None), + ("nn.GaussianNLLLoss", 55, "nn.functional.gaussian_nll_loss"), + ("nn.PixelUnshuffle", 45, "nn.functional.pixel_unshuffle"), + ("nn.Mish", 31, "nn.functional.mish"), + ("nn.ReflectionPad3d", 22, "nn.functional.pad"), + ("nn.HuberLoss", 18, "nn.functional.huber_loss"), + ("nn.LazyConv2d", 15, None), + ("nn.LazyConv1d", 9, None), + ("nn.LazyConv3d", 8, None), + ("nn.LazyConvTranspose1d", 8, None), + ("nn.LazyConvTranspose2d", 8, None), + ("nn.LazyConvTranspose3d", 8, None), + ("nn.LazyBatchNorm1d", 3, None), + ("nn.LazyBatchNorm2d", 3, None), + ("nn.LazyBatchNorm3d", 3, None), + ("nn.UninitializedBuffer", 3, None), +] + +# No rankings because these are a little hard to get rankings for +method_only_ops = [ + 'bfloat16', + 'bool', + 'byte', + 'char', + 'contiguous', + 'cpu', + 'cuda', + 'detach', + 'double', + 'expand', + 'expand_as', + 'float', + 'get_device', + 'half', + 'hardshrink', + 'index_add', + 'index_copy', + 'index_fill', + 'index_put', + 'int', + 'is_contiguous', + 'is_pinned', + 'is_set_to', + 'is_shared', + 'is_signed', + 'item', + 'long', + 'masked_scatter', + 'masked_fill', + 'narrow_copy', + 'numpy', + 'pin_memory', + 'repeat', + 'reshape_as', + 'select', + 'short', + 'storage_offset', + 'sum_to_size', + 'to', + 'to_mkldnn', + 'tolist', + 'type', + 'type_as', + 'unfold', + 'view', + 'view_as', +] + + +def get_nn_functional_top_list(): + top_nn_functional_ = dict(top_nn_functional) + for _, count, functional_name in top_nn_module: + if functional_name is None: + continue + if functional_name == 'torch.flatten': + continue + if functional_name not in top_nn_functional_: + top_nn_functional_[functional_name] = count + else: + top_nn_functional_[functional_name] += count + + top_nn_functional_ = list(top_nn_functional_.items()) + top_nn_functional_.sort(key=lambda x: x[1], reverse=True) + return top_nn_functional_ + + +usage_count = {} +for k, v in get_nn_functional_top_list(): + usage_count[k] = v +for k, v in top_torch: + usage_count[k] = v diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8e3c60029978e7dabe78a9acb831bcf9757fac88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/utils.py @@ -0,0 +1,41 @@ +import contextlib +import torch +from torch._C._functorch import ( + set_single_level_autograd_function_allowed, + get_single_level_autograd_function_allowed, + unwrap_if_dead, +) +from typing import Union, Tuple + +@contextlib.contextmanager +def enable_single_level_autograd_function(): + try: + prev_state = get_single_level_autograd_function_allowed() + set_single_level_autograd_function_allowed(True) + yield + finally: + set_single_level_autograd_function_allowed(prev_state) + +def unwrap_dead_wrappers(args): + # NB: doesn't use tree_map_only for performance reasons + result = tuple( + unwrap_if_dead(arg) if isinstance(arg, torch.Tensor) else arg + for arg in args + ) + return result + +# Allows one to expose an API in a private submodule publicly as per the definition +# in PyTorch's public api policy. +# +# It is a temporary solution while we figure out if it should be the long-term solution +# or if we should amend PyTorch's public api policy. The concern is that this approach +# may not be very robust because it's not clear what __module__ is used for. +# However, both numpy and jax overwrite the __module__ attribute of their APIs +# without problem, so it seems fine. +def exposed_in(module): + def wrapper(fn): + fn.__module__ = module + return fn + return wrapper + +argnums_t = Union[int, Tuple[int, ...]] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_functorch/vmap.py b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/vmap.py new file mode 100644 index 0000000000000000000000000000000000000000..1f776ca0f1815cb280b38d9672da872347a24d6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_functorch/vmap.py @@ -0,0 +1,452 @@ +# mypy: ignore-errors + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import contextlib +import functools +import threading +from torch import Tensor +from typing import Any, Callable, Optional, Tuple, Union, List +from torch.utils._pytree import ( + tree_flatten, + tree_unflatten, + tree_map_, + _broadcast_to_and_flatten, + TreeSpec, +) +from functools import partial +import os +import itertools + +from torch._C._functorch import ( + _add_batch_dim, + _remove_batch_dim, + _vmap_decrement_nesting, + _vmap_increment_nesting, + is_batchedtensor, +) + +in_dims_t = Union[int, Tuple] +out_dims_t = Union[int, Tuple[int, ...]] + + +def doesnt_support_saved_tensors_hooks(f): + message = ( + "torch.func transforms don't yet support saved tensor hooks. " + "Please open an issue with your use case." + ) + + @functools.wraps(f) + def fn(*args, **kwargs): + with torch.autograd.graph.disable_saved_tensors_hooks(message): + return f(*args, **kwargs) + return fn + + +# Checks that all args-to-be-batched have the same batch dim size +def _validate_and_get_batch_size( + flat_in_dims: List[Optional[int]], + flat_args: List) -> int: + batch_sizes = [arg.size(in_dim) for in_dim, arg in zip(flat_in_dims, flat_args) + if in_dim is not None] + if len(batch_sizes) == 0: + raise ValueError('vmap: Expected at least one Tensor to vmap over') + if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes): + raise ValueError( + f'vmap: Expected all tensors to have the same size in the mapped ' + f'dimension, got sizes {batch_sizes} for the mapped dimension') + return batch_sizes[0] + + +def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int: + if isinstance(batched_outputs, tuple): + return len(batched_outputs) + return 1 + +# If value is a tuple, check it has length `num_elements`. +# If value is not a tuple, make a tuple with `value` repeated `num_elements` times + + +def _as_tuple(value: Any, num_elements: int, error_message_lambda: Callable[[], str]) -> Tuple: + if not isinstance(value, tuple): + return (value,) * num_elements + if len(value) != num_elements: + raise ValueError(error_message_lambda()) + return value + + +def _process_batched_inputs( + in_dims: in_dims_t, args: Tuple, func: Callable +) -> Tuple[int, List[Any], List[Any], TreeSpec]: + if not isinstance(in_dims, int) and not isinstance(in_dims, tuple): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'expected `in_dims` to be int or a (potentially nested) tuple ' + f'matching the structure of inputs, got: {type(in_dims)}.') + if len(args) == 0: + raise ValueError( + f'vmap({_get_name(func)})(): got no inputs. Maybe you forgot to add ' + f'inputs, or you are trying to vmap over a function with no inputs. ' + f'The latter is unsupported.') + + flat_args, args_spec = tree_flatten(args) + flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec) + if flat_in_dims is None: + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'in_dims is not compatible with the structure of `inputs`. ' + f'in_dims has structure {tree_flatten(in_dims)[1]} but inputs ' + f'has structure {args_spec}.') + + for i, (arg, in_dim) in enumerate(zip(flat_args, flat_in_dims)): + if not isinstance(in_dim, int) and in_dim is not None: + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for an input but in_dim must be either ' + f'an integer dimension or None.') + if isinstance(in_dim, int) and not isinstance(arg, Tensor): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for an input but the input is of type ' + f'{type(arg)}. We cannot vmap over non-Tensor arguments, ' + f'please use None as the respective in_dim') + if in_dim is not None and (in_dim < -arg.dim() or in_dim >= arg.dim()): + raise ValueError( + f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(): ' + f'Got in_dim={in_dim} for some input, but that input is a Tensor ' + f'of dimensionality {arg.dim()} so expected in_dim to satisfy ' + f'-{arg.dim()} <= in_dim < {arg.dim()}.') + if in_dim is not None and in_dim < 0: + flat_in_dims[i] = in_dim % arg.dim() + + return _validate_and_get_batch_size(flat_in_dims, flat_args), flat_in_dims, flat_args, args_spec + +# Creates BatchedTensors for every Tensor in arg that should be batched. +# Returns the (potentially) batched arguments and the batch_size. + + +def _create_batched_inputs( + flat_in_dims: List[Any], flat_args: List[Any], vmap_level: int, args_spec) -> Tuple: + # See NOTE [Ignored _remove_batch_dim, _add_batch_dim] + batched_inputs = [arg if in_dim is None else + _add_batch_dim(arg, in_dim, vmap_level) + for in_dim, arg in zip(flat_in_dims, flat_args)] + return tree_unflatten(batched_inputs, args_spec) + + +def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_dim): + + if out_dim is None: + if isinstance(batched_output, torch.Tensor) and is_batchedtensor(batched_output): + raise ValueError( + f'vmap({name}, ...): `{name}` can not return a ' + f'BatchedTensor when out_dim is None' + ) + return batched_output + + # out_dim is non None + if not isinstance(batched_output, torch.Tensor): + raise ValueError(f'vmap({name}, ...): `{name}` must only return ' + f'Tensors, got type {type(batched_output)}. ' + 'Did you mean to set out_dim= to None for output?') + + return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim) + + +# Undos the batching (and any batch dimensions) associated with the `vmap_level`. +def _unwrap_batched( + batched_outputs: Union[Tensor, Tuple[Tensor, ...]], + out_dims: out_dims_t, + vmap_level: int, batch_size: int, func: Callable) -> Tuple: + flat_batched_outputs, output_spec = tree_flatten(batched_outputs) + + def incompatible_error(): + raise ValueError( + f'vmap({_get_name(func)}, ..., out_dims={out_dims})(): ' + f'out_dims is not compatible with the structure of `outputs`. ' + f'out_dims has structure {tree_flatten(out_dims)[1]} but outputs ' + f'has structure {output_spec}.') + + if isinstance(batched_outputs, torch.Tensor): + # Some weird edge case requires us to spell out the following + # see test_out_dims_edge_case + if isinstance(out_dims, int): + flat_out_dims = [out_dims] + elif isinstance(out_dims, tuple) and len(out_dims) == 1: + flat_out_dims = out_dims + elif out_dims is None: + flat_out_dims = [out_dims] + else: + incompatible_error() + else: + flat_out_dims = _broadcast_to_and_flatten(out_dims, output_spec) + if flat_out_dims is None: + incompatible_error() + + flat_outputs = [ + _maybe_remove_batch_dim(_get_name(func), batched_output, vmap_level, batch_size, out_dim) + for batched_output, out_dim in zip(flat_batched_outputs, flat_out_dims) + ] + return tree_unflatten(flat_outputs, output_spec) + + +def _check_int_or_none(x, func, out_dims): + if isinstance(x, int): + return + if x is None: + return + raise ValueError( + f'vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be ' + f'an int, None or a python collection of ints representing where in the outputs the ' + f'vmapped dimension should appear.') + + +def _check_out_dims_is_int_or_int_pytree(out_dims: out_dims_t, func: Callable) -> None: + if isinstance(out_dims, int): + return + tree_map_(partial(_check_int_or_none, func=func, out_dims=out_dims), out_dims) + + +def _get_name(func: Callable): + if hasattr(func, '__name__'): + return func.__name__ + + # Not all callables have __name__, in fact, only static functions/methods do. + # A callable created via functools.partial or an nn.Module, to name some + # examples, don't have a __name__. + return repr(func) + + +DECOMPOSITIONS_LOADED = False +DECOMPOSITIONS_LOCK = threading.Lock() +VMAP_DECOMPOSITIONS_LIB = None + +# torch.package, Python 3.11, and torch.jit-less environments are unhappy with +# decompositions. Only load them when needed if possible. +def lazy_load_decompositions(): + global DECOMPOSITIONS_LOADED + if DECOMPOSITIONS_LOADED: + return + + with DECOMPOSITIONS_LOCK: + if DECOMPOSITIONS_LOADED: + return + + if not (os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__): + DECOMPOSITIONS_LOADED = True + return + + # use an alternate way to register an operator into the decomposition table + # _register_jit_decomposition doesn't work for some operators, e.g. addr, + # because the Tensor types generated cannot be unioned by torchscript + # decomp should be type OpOverload + global VMAP_DECOMPOSITIONS_LIB + VMAP_DECOMPOSITIONS_LIB = torch.library.Library("aten", "IMPL", "FuncTorchBatched") + + from torch._decomp import decomposition_table + + def _register_python_decomposition_vmap(decomp): + if decomp in decomposition_table: + VMAP_DECOMPOSITIONS_LIB.impl(decomp, decomposition_table[decomp]) + else: + raise RuntimeError(f"could not find decomposition for {decomp}") + + _register_python_decomposition_vmap(torch.ops.aten.mse_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.smooth_l1_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.huber_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss_forward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_forward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_backward.default) + _register_python_decomposition_vmap(torch.ops.aten.addr.default) + + DECOMPOSITIONS_LOADED = True + +def vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs): + lazy_load_decompositions() + _check_out_dims_is_int_or_int_pytree(out_dims, func) + batch_size, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func) + + if chunk_size is not None: + chunks_flat_args = _get_chunked_inputs(flat_args, flat_in_dims, batch_size, chunk_size) + return _chunked_vmap(func, flat_in_dims, chunks_flat_args, + args_spec, out_dims, randomness, **kwargs) + + # If chunk_size is not specified. + return _flat_vmap( + func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs + ) + +def get_chunk_sizes(total_elems, chunk_size): + n_chunks = n_chunks = total_elems // chunk_size + chunk_sizes = [chunk_size] * n_chunks + # remainder chunk + remainder = total_elems % chunk_size + if remainder != 0: + chunk_sizes.append(remainder) + return chunk_sizes + +def _get_chunked_inputs(flat_args, flat_in_dims, batch_size, chunk_size): + split_idxs = (batch_size,) + if chunk_size is not None: + chunk_sizes = get_chunk_sizes(batch_size, chunk_size) + split_idxs = tuple(itertools.accumulate(chunk_sizes)) + + flat_args_chunks = tuple( + t.tensor_split(split_idxs, dim=in_dim) if in_dim is not None else [t, ] * len(split_idxs) + for t, in_dim in zip(flat_args, flat_in_dims) + ) + + # transpose chunk dim and flatten structure + # chunks_flat_args is a list of flatten args + chunks_flat_args = zip(*flat_args_chunks) + return chunks_flat_args + + +def _flatten_chunks_output(chunks_output_): + # chunks_output is a list of chunked outputs + # flatten chunked outputs: + flat_chunks_output = [] + arg_spec = None + for output in chunks_output_: + flat_output, arg_specs = tree_flatten(output) + flat_chunks_output.append(flat_output) + if arg_spec is None: + arg_spec = arg_specs + + # transpose chunk dim and flatten structure + # flat_output_chunks is flat list of chunks + flat_output_chunks = list(zip(*flat_chunks_output)) + return flat_output_chunks, arg_spec + + +def _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks): + # concat chunks on out_dim + flat_out_dims = _broadcast_to_and_flatten(out_dims, arg_spec) + assert len(flat_out_dims) == len(flat_output_chunks) + flat_output = [] + for idx, out_dim in enumerate(flat_out_dims): + flat_output.append(torch.cat(flat_output_chunks[idx], dim=out_dim)) + # release tensors + flat_output_chunks[idx] = None + + return flat_output + + +# Applies vmap on chunked_input and returns concatenated output over the chunks. +def _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs): + + chunks_output = [] + rs = torch.get_rng_state() if randomness == "same" else None + for flat_args in chunks_flat_args: + batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args) + + # The way we compute split the input in `_get_chunked_inputs`, + # we may get a tensor with `0` batch-size. We skip any computation + # in that case. + # Eg. + # >>> chunk_size = 1 + # >>> batch_size = 6 + # >>> t = torch.zeros(batch_size, 1) + # >>> t.tensor_split([1, 2, 3, 4, 5, 6]) + # (tensor([[0.]]), tensor([[0.]]), tensor([[0.]]), tensor([[0.]]), + # tensor([[0.]]), tensor([[0.]]), tensor([], size=(0, 1))) + if batch_size == 0: + continue + + if rs is not None: + torch.set_rng_state(rs) + chunks_output.append( + _flat_vmap( + func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs + ) + ) + + flat_output_chunks, arg_spec = _flatten_chunks_output(chunks_output) + + # chunked output tensors are held by both `flat_output_chunks` and `chunks_output`. + # eagerly remove the reference from `chunks_output`. + del chunks_output + + # concat chunks on out_dim + flat_output = _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks) + + # finally unflatten the output + return tree_unflatten(flat_output, arg_spec) + + +# Vmap refactored helper functions: +def _check_randomness_arg(randomness): + if randomness not in ['error', 'different', 'same']: + raise RuntimeError(f"Only allowed values for randomness are 'error', 'different', or 'same'. Got {randomness}") + + +@contextlib.contextmanager +def vmap_increment_nesting(batch_size, randomness): + try: + vmap_level = _vmap_increment_nesting(batch_size, randomness) + yield vmap_level + finally: + _vmap_decrement_nesting() + + +@doesnt_support_saved_tensors_hooks +def _flat_vmap(func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs): + + with vmap_increment_nesting(batch_size, randomness) as vmap_level: + batched_inputs = _create_batched_inputs(flat_in_dims, flat_args, vmap_level, args_spec) + batched_outputs = func(*batched_inputs, **kwargs) + return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func) + + +# `restore_vmap` is a private helper function. It is vmap but has the following +# differences: +# - instead of returning outputs, it returns an (outputs, out_dims) tuple. +# out_dims is a pytree of same shape as outputs and contains Optional[int] +# specifying where the vmapped dimension, if it exists, is in the corresponding output. +# - does no validation on in_dims or inputs (vmap expects at least one Tensor to be vmapped). +# restore_vmap allows for no inputs to have the vmap dimension +# - does no validation on outputs (vmap expects only Tensor outputs) +# restore_vmap allows for return of arbitrary outputs (not just Tensors) +# +# The TL;DR is that restore_vmap is more general than vmap and has a slightly +# different API. The relaxations are so that we can "pause" vmap in the middle +# of its execution and then "restore" it later (this is what we do in +# the generate_vmap_rule=True implementation of autograd.Function). +# +# restore_vmap can be technically used in the implementation of vmap, but doing +# that refactor is a bit technically challenging because: +# - vmap couples the tensor-wrapping code with error checking +# - vmap's tensor unwrapping code is in C++; we would need to rewrite part of it +# in python because it overlaps with unwrap_batched +@doesnt_support_saved_tensors_hooks +def restore_vmap(func, in_dims, batch_size, randomness): + def inner(*args, **kwargs): + with vmap_increment_nesting(batch_size, randomness) as vmap_level: + batched_inputs = wrap_batched(args, in_dims, vmap_level) + batched_outputs = func(*batched_inputs, **kwargs) + return unwrap_batched(batched_outputs, vmap_level) + return inner + + +def wrap_batched(args, bdims, level): + flat_args, spec = tree_flatten(args) + flat_bdims = _broadcast_to_and_flatten(bdims, spec) + assert flat_bdims is not None + result = _create_batched_inputs(flat_bdims, flat_args, level, spec) + return result + + +def unwrap_batched(args, level): + flat_args, spec = tree_flatten(args) + if len(flat_args) == 0: + return args, () + result = [torch._C._functorch._unwrap_batched(arg, level) if isinstance(arg, torch.Tensor) + else (arg, None) for arg in flat_args] + output, bdims = zip(*result) + return tree_unflatten(output, spec), tree_unflatten(bdims, spec)