diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..fc5c6444da1eec6e452375ad0db06d4478a4eb58 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75c0eececb9fd5a8fbac12870f7f9459bac43404e6851bc728d7d104ee4037e5 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2a7a1613146f61f13371a5014921ca2364464643 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a8379030427fb2efc50d6a3163ea8c99d70f29b80392233a0a4e6cafa4ce60f +size 33555627 diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2e383624c1af95bee53ffd5de2bf204f933a9538 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b915f5d379be5bc0206384f4550e01ae4d44761ee5d63b9e775aa9c7ec49187e +size 33555533 diff --git a/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..051ddee7bd279d55fd5f60babed2420970ebc18d --- /dev/null +++ b/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ef5663fc4992676357ab082014a4b6f14c7e388c385f2779948ed1de0212f74 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/21.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/21.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8d24e29748ac021f422f1ba05ed6d8d6609c8535 --- /dev/null +++ b/ckpts/universal/global_step120/zero/21.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2250056ed84e089995ac97203e2b6cfde6272c7c5dd35a6d44f9f146c9b0d43d +size 16778317 diff --git a/ckpts/universal/global_step120/zero/4.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/4.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b5b9122dc35f16ccc0e5c5a38ea7758ea41bd17f --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba07ef3cd53856a7271abf8c504c9cd767078a8f30a9d8cf383c1687ecb7d433 +size 16778396 diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d737548c3d480d11e722ad5ae076cebe9f2523c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py @@ -0,0 +1,52 @@ +import glob +import importlib +from os.path import basename, dirname, isfile, join + +import torch +from torch._export.db.case import ( + _EXAMPLE_CASES, + _EXAMPLE_CONFLICT_CASES, + _EXAMPLE_REWRITE_CASES, + SupportLevel, +) + + +modules = glob.glob(join(dirname(__file__), "*.py")) +__all__ = [ + basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") +] + +# Import all module in the current directory. +from . import * # noqa: F403 + + +def all_examples(): + return _EXAMPLE_CASES + + +if len(_EXAMPLE_CONFLICT_CASES) > 0: + + def get_name(case): + model = case.model + if isinstance(model, torch.nn.Module): + model = type(model) + return model.__name__ + + msg = "Error on conflict export case name.\n" + for case_name, cases in _EXAMPLE_CONFLICT_CASES.items(): + msg += f"Case name {case_name} is associated with multiple cases:\n " + msg += f"[{','.join(map(get_name, cases))}]\n" + + raise RuntimeError(msg) + + +def filter_examples_by_support_level(support_level: SupportLevel): + return { + key: val + for key, val in all_examples().items() + if val.support_level == support_level + } + + +def get_rewrite_cases(case): + return _EXAMPLE_REWRITE_CASES.get(case.name, []) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de988d32dd110ecc46c014c3117026aaafa4afd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d1d6167b316bbcc6aef194977984dd7893632a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7580a248b51b3e1ac576667305cb8837db838dec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e981363ec0627482acda5e5f18aac299da57e1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..856119cb94d38843e582f3c9a2a6ab2cff3b7221 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae46d63a4fb910c88a2dbb0dc5000ee4e54100c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90305e6f86943f8fcb553a09e4b9fa76fb5a1cf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..723f948481cac09ab313016fc4d8731717f36ce3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f587a5b23c5661c9ff058b39cff0fead60055be6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbb0a8b0325eb1631a67d3104132f66d1bef5d08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..addf3f1b25fcfb45f8dbb6616cc63037e1125c15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5100c645fa60cb40836e1913a5c6002c6aa53beb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d32519c842c9e64e5fe83fde919d6809729e333b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27496263d4004f1f0e32e2476795c5788ad1a2af Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7ffe50e7c5b17d98d6ab96e04a460325ad5b58f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2194f52be8c3af94fec2cc4b0655028d63929277 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59da43d2666ba1b360541d1b4974b93850a07857 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfcf1042fc02f82faacf0d15f8b2c35ec0a4b341 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e78ebcb5a5a89b26f30973e4676a1b4ed25073ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7150774cb36ed57e982bbaca7f874efff5ff417e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16364ea127d6bb13b7c65ffb68fb3bc56f420bb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8d8c3367ab632525052e471ba551de37563080f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2c939beaf0cccf0e27139b67356aeac577a4b25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2fe0be6a60144ef3a28c784c8166cdbbfa25d44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5607be84b17b0a8110237f5e1313632bcfc573cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf91052aba281c15a5bc254ece708025d1388826 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05db4759e158dc8df01ac0f9723baffdc4885e01 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c99a0119d1ee868a9bf63fac2c7a54fdbb67380 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..288b53c1ad85f76d51d64b5cbfba81bfdb4176ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aef40656b8d7157fd220e7165e4ba0274ee72ad4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f220d00e71fd014044535e84fb65be0b2018116d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eddc132432e516957543d984721a5996998fd99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44d3e020c40e2eee0d67af864e8710795e97cff1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69b4016fa4fa9dde5072137f4bfe6850df10091c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90f5daf4a6a7c6484e5229bd871af5f7d581e2a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7443ee1ce54f647d837904db28161c6366e43a55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62e956147ed994cd28373d353b0c38142bf5203d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py new file mode 100644 index 0000000000000000000000000000000000000000..664aab8b64da2b239daaa2d78c068a1d7397c4a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py @@ -0,0 +1,24 @@ +import torch +import torch._dynamo as torchdynamo + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"torch.escape-hatch"}, +) +class AssumeConstantResult(torch.nn.Module): + """ + Applying `assume_constant_result` decorator to burn make non-tracable code as constant. + """ + + def __init__(self): + super().__init__() + + @torchdynamo.assume_constant_result + def get_item(self, y): + return y.int().item() + + def forward(self, x, y): + return x[: self.get_item(y)] diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..77c629559d21eb6390c00ce8143d773d16f5710f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py @@ -0,0 +1,24 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 4),), +) +class ClassMethod(torch.nn.Module): + """ + Class methods are inlined during tracing. + """ + + @classmethod + def method(cls, x): + return x + 1 + + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(4, 2) + + def forward(self, x): + x = self.linear(x) + return self.method(x) * self.__class__.method(x) * type(self).method(x) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..68dd3772684d1c8ea784a5d74214895dedeeb530 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py @@ -0,0 +1,46 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +class MySubModule(torch.nn.Module): + def foo(self, x): + return x.cos() + + def forward(self, x): + return self.foo(x) + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchClassMethod(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + + This example demonstrates using class method in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + self.subm = MySubModule() + + def bar(self, x): + return x.sin() + + def forward(self, x): + return cond(x.shape[0] <= 2, self.subm.forward, self.bar, [x]) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py new file mode 100644 index 0000000000000000000000000000000000000000..bd8a1db034256fd305ae8924254070ac212e9248 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py @@ -0,0 +1,44 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchNestedFunction(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates using nested function in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + def true_fn(x): + def inner_true_fn(y): + return x + y + + return inner_true_fn(x) + + def false_fn(x): + def inner_false_fn(y): + return x - y + + return inner_false_fn(x) + + return cond(x.shape[0] < 10, true_fn, false_fn, [x]) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py new file mode 100644 index 0000000000000000000000000000000000000000..38905b57e31243e10e52193ab36a8503ba4991f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py @@ -0,0 +1,63 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchNonlocalVariables(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates how to rewrite code to avoid capturing closure variables in branch functions. + + The code below will not work because capturing closure variables is not supported. + ``` + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y + my_tensor_var + my_primitive_var + + def false_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y - my_tensor_var - my_primitive_var + + return cond(x.shape[0] > 5, true_fn, false_fn, [x]) + ``` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(x, y, z): + return x + y + z + + def false_fn(x, y, z): + return x - y - z + + return cond( + x.shape[0] > 5, + true_fn, + false_fn, + [x, my_tensor_var, torch.tensor(my_primitive_var)], + ) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py new file mode 100644 index 0000000000000000000000000000000000000000..a05e584100c958a124f9cfc59c489b417f5d3214 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py @@ -0,0 +1,39 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim +from functorch.experimental.control_flow import cond + +x = torch.randn(3, 2) +y = torch.ones(2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x, y), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, + extra_inputs=(torch.randn(2, 2), torch.ones(2)), + dynamic_shapes={"x": {0: dim0_x}, "y": None}, +) +class CondOperands(torch.nn.Module): + """ + The operands passed to cond() must be: + - a list of tensors + - match arguments of `true_fn` and `false_fn` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x, y): + def true_fn(x, y): + return x + y + + def false_fn(x, y): + return x - y + + return cond(x.shape[0] > 2, true_fn, false_fn, [x, y]) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py new file mode 100644 index 0000000000000000000000000000000000000000..fd02e2484c54678712593f7c9fa28344e5574375 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py @@ -0,0 +1,29 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6, 4, 3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondPredicate(torch.nn.Module): + """ + The conditional statement (aka predicate) passed to cond() must be one of the following: + - torch.Tensor with a single element + - boolean expression + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + pred = x.dim() > 2 and x.shape[2] > 10 + + return cond(pred, lambda x: x.cos(), lambda y: y.sin(), [x]) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py new file mode 100644 index 0000000000000000000000000000000000000000..1af4b22dc988816c011aa2eb085f97c9850d257a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py @@ -0,0 +1,27 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.tensor(4),), + tags={ + "torch.dynamic-value", + "torch.escape-hatch", + }, +) +class ConstrainAsSizeExample(torch.nn.Module): + """ + If the value is not known at tracing time, you can provide hint so that we + can trace further. Please look at constrain_as_value and constrain_as_size APIs + constrain_as_size is used for values that NEED to be used for constructing + tensor. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + a = x.item() + torch._constrain_as_size(a, min=0, max=5) + return torch.ones((a, 5)) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..39eff84af34812e1a31006c698652ec6dc2bbd20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py @@ -0,0 +1,26 @@ +import functools + +import torch + +from torch._export.db.case import export_case + + +def test_decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + 1 + + return wrapper + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(3, 2)), +) +class Decorator(torch.nn.Module): + """ + Decorators calls are inlined into the exported function during tracing. + """ + + @test_decorator + def forward(self, x, y): + return x + y diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py new file mode 100644 index 0000000000000000000000000000000000000000..382b444d7f8a285e85c4f5530f01972918a6d96f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"python.data-structure"}, +) +class Dictionary(torch.nn.Module): + """ + Dictionary structures are inlined and flattened along tracing. + """ + def __init__(self): + super().__init__() + + def forward(self, x, y): + elements = {} + elements["x2"] = x * x + y = y * elements["x2"] + return {"y": y} diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py new file mode 100644 index 0000000000000000000000000000000000000000..ec95df0bd97dda4e673e7898a1072db8215f8310 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.assert"}, +) +class DynamicShapeAssert(torch.nn.Module): + """ + A basic usage of python assertion. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + # assertion with error message + assert x.shape[0] > 2, f"{x.shape[0]} is greater than 2" + # assertion without error message + assert x.shape[0] > 1 + return x diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..51b8dd57252529079411cf2db2b4a14a4b905634 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeConstructor(torch.nn.Module): + """ + Tensor constructors should be captured with dynamic shape inputs rather + than being baked in with static shape. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.ones(x.shape[0] * 2) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py new file mode 100644 index 0000000000000000000000000000000000000000..45c8d36bee1fa7ed0102809a6871fbfa76628696 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"torch.dynamic-shape", "python.control-flow"}, +) +class DynamicShapeIfGuard(torch.nn.Module): + """ + `if` statement with backed dynamic shape predicate will be specialized into + one particular branch and generate a guard. However, export will fail if the + the dimension is marked as dynamic shape from higher level API. + """ + + def forward(self, x): + if x.shape[0] == 3: + return x.cos() + + return x.sin() diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py new file mode 100644 index 0000000000000000000000000000000000000000..5be0003fd170abb49afc80544229177d4b8b8de4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import map + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(2)), + tags={"torch.dynamic-shape", "torch.map"}, +) +class DynamicShapeMap(torch.nn.Module): + """ + functorch map() maps a function over the first tensor dimension. + """ + + def __init__(self): + super().__init__() + + def forward(self, xs, y): + def body(x, y): + return x + y + + return map(body, xs, y) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..ee45ffb288368dc35ffb76e385bcee20ced22235 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py @@ -0,0 +1,20 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeSlicing(torch.nn.Module): + """ + Slices with dynamic shape arguments should be captured into the graph + rather than being baked in. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2] diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py new file mode 100644 index 0000000000000000000000000000000000000000..b763a4ec0ae3480a322dbd9b73664944c5e2d8bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(10, 10),), + tags={"torch.dynamic-shape"}, +) +class DynamicShapeView(torch.nn.Module): + """ + Dynamic shapes should be propagated to view arguments instead of being + baked into the exported graph. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + new_x_shape = x.size()[:-1] + (2, 5) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py new file mode 100644 index 0000000000000000000000000000000000000000..6182a747955561fc8bba1a4e3c3e6187e987c135 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py @@ -0,0 +1,32 @@ +import torch + +from torch._export.db.case import export_case, ExportArgs, SupportLevel + + +@export_case( + example_inputs=ExportArgs( + torch.randn(4), + (torch.randn(4), torch.randn(4)), + *[torch.randn(4), torch.randn(4)], + mykw0=torch.randn(4), + input0=torch.randn(4), input1=torch.randn(4) + ), + tags={"python.data-structure"}, + support_level=SupportLevel.SUPPORTED, +) +class FnWithKwargs(torch.nn.Module): + """ + Keyword arguments are not supported at the moment. + """ + def __init__(self): + super().__init__() + + def forward(self, pos0, tuple0, *myargs, mykw0, **mykwargs): + out = pos0 + for arg in tuple0: + out = out * arg + for arg in myargs: + out = out * arg + out = out * mykw0 + out = out * mykwargs["input0"] * mykwargs["input1"] + return out diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py new file mode 100644 index 0000000000000000000000000000000000000000..a5bd7fbd8edf523d4d6d11250bc9f8c8986653fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py @@ -0,0 +1,27 @@ +from typing import List + +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=([torch.ones(3, 2), torch.tensor(4), torch.tensor(5)],), + tags={"python.control-flow", "python.data-structure"}, +) +class ListUnpack(torch.nn.Module): + """ + Lists are treated as static construct, therefore unpacking should be + erased after tracing. + """ + + def __init__(self): + super().__init__() + + def forward(self, args: List[torch.Tensor]): + """ + Lists are treated as static construct, therefore unpacking should be + erased after tracing. + """ + x, *y = args + return x + y[0] diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d76cc67eda8cbb3306f27b2315ae35c7517aa2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py @@ -0,0 +1,25 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class ModelAttrMutation(torch.nn.Module): + """ + Attribute mutation is not supported. + """ + + def __init__(self): + super().__init__() + self.attr_list = [torch.ones(3, 2), torch.ones(3, 2)] + + def recreate_list(self): + return [torch.zeros(3, 2), torch.zeros(3, 2)] + + def forward(self, x): + self.attr_list = self.recreate_list() + return x.sum() + self.attr_list[0].sum() diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py new file mode 100644 index 0000000000000000000000000000000000000000..58b946f94a0c28447501a1d1a1fd4c98405d49d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py @@ -0,0 +1,27 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(2)), + tags={"python.closure"}, +) +class NestedFunction(torch.nn.Module): + """ + Nested functions are traced through. Side effects on global captures + are not supported though. + """ + def __init__(self): + super().__init__() + + def forward(self, a, b): + x = a + b + z = a - b + + def closure(y): + nonlocal x + x += 1 + return x * y + z + + return closure(x) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..0d799b2a609acc2b626e70f5c9beb131784f4e6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py @@ -0,0 +1,20 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.utils import _pytree as pytree + + +@export_case( + example_inputs=({1: torch.randn(3, 2), 2: torch.randn(3, 2)},), + support_level=SupportLevel.SUPPORTED, +) +class PytreeFlatten(torch.nn.Module): + """ + Pytree from PyTorch can be captured by TorchDynamo. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + y, spec = pytree.tree_flatten(x) + return y[0] + 1 diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fc2b0ec36a5f9296aceb3146be74f07d5e5ac2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim + +x = torch.ones(3, 2) +dim1_x = Dim("dim1_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape"}, + dynamic_shapes={"x": {1: dim1_x}}, +) +class ScalarOutput(torch.nn.Module): + """ + Returning scalar values from the graph is supported, in addition to Tensor + outputs. Symbolic shapes are captured and rank is specialized. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[1] + 1 diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..9d030b6e82aa5f4c89c3e1c37622e53c1c4675f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.control-flow"}, +) +class StaticForLoop(torch.nn.Module): + """ + A for loop with constant number of iterations should be unrolled in the exported graph. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + ret = [] + for i in range(10): # constant + ret.append(i + x) + return ret diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py new file mode 100644 index 0000000000000000000000000000000000000000..c258e430f7ea0fa4a5b58ef9d6988e936fbb0f3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"python.control-flow"}, +) +class StaticIf(torch.nn.Module): + """ + `if` statement with static predicate value should be traced through with the + taken branch. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + if len(x.shape) == 3: + return x + torch.ones(1, 1, 1) + + return x diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py new file mode 100644 index 0000000000000000000000000000000000000000..b9f4dd8f8496ccfd6c81b7007a96d9a05e6ffce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.operator"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class TorchSymMin(torch.nn.Module): + """ + torch.sym_min operator is not supported in export. + """ + + def forward(self, x): + return x.sum() + torch.sym_min(x.size(0), 100) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb16cd64a56fce4c4ccfdbb257f32f11514439c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py @@ -0,0 +1,18 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.mutation"}, + support_level=SupportLevel.SUPPORTED, +) +class UserInputMutation(torch.nn.Module): + """ + Directly mutate user input in forward + """ + + def forward(self, x): + x.mul_(2) + return x.cos() diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86fba52b80a82b79d62fef3f2794080d3b9c55bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07b3ade315b0bda59f12436b1b06c79b628c7284 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2be8a5963036b02a2e4497062d6f11941132a3d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37437fe70eec06d2a54a7639efb884fcd2ea0903 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23f327138298a138a2832f32c47c7436769560e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5b7f9cad7aaf176edfef356526789800f941f37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..702132febb10e8c28bb6c2acd8302f9c0c32b211 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8aad42712d9dc433a01770c385295755236535f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca866ee3d8b99dbd4b73b9790845eff995b23e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py @@ -0,0 +1,94 @@ +from contextlib import contextmanager + +import torch +from torch._C import DispatchKey # @manual +from torch._functorch._aot_autograd.utils import KNOWN_TYPES +from torch._higher_order_ops.utils import autograd_not_implemented +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.fx.node import has_side_effect +from torch.utils import _pytree as pytree + +# The call_torchbind operator represents a method invocation on a torchbind +# object. The calling convention is: +# call_torchbind(self: ScriptObject, method_name: str, *method_args, **method_kwargs) +# We do not expect users to write this operator directly. Instead it will be +# emitted by Dynamo when tracing encounters a torchbind object. +call_torchbind = HigherOrderOperator("call_torchbind") + +# Register this operator as side-effectful with FX. +# TODO: this is not really sufficient. While passes (hopefully) check +# Node.is_impure() and make good decisions, we also assume we can execute the +# graph as many times as we want without changing behavior, which is NOT true of +# ops that mutate torchbind object state. +has_side_effect(call_torchbind) + +_orig_scriptmethod_call = torch.ScriptMethod.__call__ + + +def torchbind_method_redispatch(self, *args, **kwargs): + if isinstance(self.raw_owner, torch.ScriptObject): + return call_torchbind(self.raw_owner, self.name, *args, **kwargs) + return _orig_scriptmethod_call(self, *args, **kwargs) + + +@contextmanager +def enable_torchbind_tracing(): + """Context manager that acts as a feature flag to enable torchbind tracing + behavior. Once torchbind tracing has been stabilized, we can remove this and + turn it always on. + """ + try: + KNOWN_TYPES.append(torch.ScriptObject) + torch.ScriptMethod.__call__ = torchbind_method_redispatch # type: ignore[method-assign] + yield + finally: + assert ( + KNOWN_TYPES.pop() is torch.ScriptObject + ), "Someone else messed with KNOWN_TYPES during tracing, exploding." + torch.ScriptMethod.__call__ = _orig_scriptmethod_call # type: ignore[method-assign] + + +@call_torchbind.py_impl(DispatchKey.CompositeExplicitAutograd) +def call_torchbind_impl(obj, method, *args, **kwargs): + return _orig_scriptmethod_call(getattr(obj, method), *args, **kwargs) + + +@call_torchbind.py_impl(ProxyTorchDispatchMode) +def inner(mode, *args, **kwargs): + if mode.enable_tracing: + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + + out_proxy = mode.tracer.create_proxy( + "call_function", + call_torchbind, + proxy_args, + proxy_kwargs, + ) + out = call_torchbind_impl(*args, **kwargs) + + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + else: + return call_torchbind(*args, **kwargs) + + +# TODO: currently we just run the C++ implementation with fake tensors. +# But we should make it possible to register a fake torchbind implementation. +@call_torchbind.py_impl(FakeTensorMode) +def call_torchbind_fake(mode, *args, **kwargs): + with mode: + return call_torchbind_impl(*args, **kwargs) + + +call_torchbind.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(call_torchbind, deferred_error=True) +) + + +@call_torchbind.py_functionalize_impl +def call_torchbind_func(ctx, *args, **kwargs): + args = ctx.unwrap_tensors(args) + with ctx.redispatch_to_next(): + return ctx.wrap_tensors(call_torchbind(*args, **kwargs)) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2ec46460e96b312de7ed2a040d68fa0e0d4db8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py @@ -0,0 +1,183 @@ +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Callable + +import torch +import torch.fx.traceback as fx_traceback +import torch.utils._pytree as pytree +from torch._ops import HigherOrderOperator +from torch.fx.experimental.proxy_tensor import make_fx +from torch.multiprocessing.reductions import StorageWeakRef + + +@dataclass +class UnsupportedAliasMutationException(RuntimeError): + reason: str + + +def autograd_not_implemented_inner( + operator: HigherOrderOperator, delayed_error: bool, *args: Any, **kwargs: Any +) -> Any: + """If autograd is enabled and any of the arguments require grad this will either + raise an error or return a DelayedError depending on the value of delayed. + + Args: + operator: The HigherOrderOperator to call with the *args and **kwargs with + op_name: The name of the HigherOrderOperator + delayed_error: If True, return a DelayedError instead of raising an error + args: The flattened operands to the HigherOrderOperator + kwargs: The keyword arguments to the HigherOrderOperator + + Raises: + RuntimeError: If autograd is enabled and any of the arguments to the HigherOrderOperator + """ + with torch._C._AutoDispatchBelowAutograd(): + result = operator(*args, **kwargs) + flat_operands = pytree.arg_tree_leaves(*args) + if torch.is_grad_enabled() and any( + f.requires_grad for f in flat_operands if isinstance(f, torch.Tensor) + ): + if delayed_error: + err_fn = torch._C._functions.DelayedError( + f"Autograd not implemented for {str(operator)}", + 1, + ) + + def fake_requires_grad(tensor): + if torch.is_floating_point(tensor) or torch.is_complex(tensor): + tensor = tensor.detach() + tensor.requires_grad = True + return tensor + + return pytree.tree_map_only( + torch.Tensor, lambda x: err_fn(fake_requires_grad(x)), result + ) + else: + raise RuntimeError(f"Autograd not implemented for {str(operator)}") + return result + + +def autograd_not_implemented(op: HigherOrderOperator, deferred_error: bool) -> Callable: + def inner(*args, **kwargs): + return autograd_not_implemented_inner(op, deferred_error, *args, **kwargs) + + return inner + + +def _maybe_run_with_interpreter(fn): + maybe_interpreted_fn = fn + if isinstance(fn, torch.fx.GraphModule) and fx_traceback.has_preserved_node_meta(): + # Running graph with interpreter is needed for propagating the stack_trace + def graph_with_interpreter(*args): + with fx_traceback.preserve_node_meta(): + return torch.fx.Interpreter(fn).run(*args) + + maybe_interpreted_fn = graph_with_interpreter + return maybe_interpreted_fn + + +# We'll use the current decomposition table to make sure operators in subgraphs are +# decomposed properly. +# We also need to maybe run with interpreter for propagating stack_trace +def reenter_make_fx(fn, pre_dispatch=False): + decomp_table = torch.fx.experimental.proxy_tensor.CURRENT_DECOMPOSITION_TABLE + return make_fx( + _maybe_run_with_interpreter(fn), + decomposition_table=decomp_table, + pre_dispatch=pre_dispatch, + ) + + +@contextmanager +def _set_compilation_env(): + _old_is_tracing = torch.fx._symbolic_trace._is_fx_tracing_flag + try: + # We need to turn off the is_fx_tracing_flag. Remove this flag check from dyanmo + # once we are confident fx tracing works with dynamo. + torch.fx._symbolic_trace._is_fx_tracing_flag = False + yield + finally: + torch.fx._symbolic_trace._is_fx_tracing_flag = _old_is_tracing + + +def _has_potential_branch_input_mutation(branch, inputs, pre_dispatch=False): + """ + Dispatch-trace the branch with inputs and check if + producing graph has mutable op on the input. This is + bit restrictive as the branch must be traceable. + """ + try: + gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs) + except UnsupportedAliasMutationException: + # this can happen when nested cond_op is + # functionalized + return True + except Exception as e: + raise e + + def _detect_input_mutation(gm): + input_nodes = set() + for node in gm.graph.nodes: + if node.op == "placeholder": + input_nodes.add(node) + if node.op == "call_function": + target = node.target + if ( + isinstance(target, torch._ops.OpOverload) + and target._schema.is_mutable + ): + for arg in node.args: + if arg in input_nodes: + return True + + for _, module in gm.named_children(): + if isinstance(module, torch.fx.GraphModule): + if _detect_input_mutation(module): + return True + + return False + + return _detect_input_mutation(gm) + + +def _has_potential_branch_input_alias(branch, inputs, pre_dispatch=False): + """ + Dispatch-trace the branch with inputs and check if + producing graph has output aliasing the branch input. This is + bit restrictive as the branch must be traceable. + """ + try: + gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs) + except UnsupportedAliasMutationException: + # this can happen when nested cond_op is + # functionalized + return True + except Exception as e: + raise e + + def _detect_input_alias(gm): + input_storages = set() + for node in gm.graph.nodes: + # We need to check existence of "val" because we reuse the logic here + # for map operator, where num_mapped_args is a scalar + # and doesn't have a "val" meta. + if node.op == "placeholder" and "val" in node.meta: + input_storages.add(StorageWeakRef(node.meta["val"]._typed_storage())) + if node.op == "output": + + def check_alias(out): + if out is not None and "val" in out.meta: + out_storage = StorageWeakRef(out.meta["val"]._typed_storage()) + return out_storage in input_storages + return False + + if any(pytree.tree_leaves(pytree.tree_map(check_alias, node.args))): + return True + + for _, module in gm.named_children(): + if isinstance(module, torch.fx.GraphModule) and _detect_input_alias(module): + return True + + return False + + return _detect_input_alias(gm) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py new file mode 100644 index 0000000000000000000000000000000000000000..f288c350f0ee298927bf0c4581b598e23e0e5e6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py @@ -0,0 +1,183 @@ +import inspect +import logging + +import torch +from torch._ops import HigherOrderOperator +from torch.utils.checkpoint import checkpoint, uid +import torch._dynamo.config + +log = logging.getLogger(__name__) + + + +# Used for testing the HigherOrderOperator mechanism +class Wrap(HigherOrderOperator): + def __init__(self): + super().__init__("wrap") + + def __call__(self, func, *args, **kwargs): + # Dynamo already traces the body of HigherOrderOp beforehand when it + # so no need to trace into it. + import torch._dynamo # noqa: F401 + from torch._dynamo import disable + + @disable + def wrapper(): + result = func(*args, **kwargs) + return result + + return wrapper() + +wrap = Wrap() + +class WrapWithSetGradEnabled(HigherOrderOperator): + def __init__(self): + super().__init__("wrap_with_set_grad_enabled") + + def __call__(self, enable_grad, wrapped_func, *args, **kwargs): + # Dynamo already traces the body of HigherOrderOp beforehand when it + # so no need to trace into it. + import torch._dynamo # noqa: F401 + from torch._dynamo import disable + + @disable + def wrapper(): + with torch.set_grad_enabled(enable_grad): + return wrapped_func(*args, **kwargs) + return wrapper() + +wrap_with_set_grad_enabled = WrapWithSetGradEnabled() + +class WrapActivationCheckpoint(HigherOrderOperator): + """ + This operator is used to wrap torch.utils.checkpoint. This avoids + TorchDynamo to look into saved tensor hooks and directly passes the control + to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of + AOT tracing torch.utils.checkpoint code, we have a backward graph with + recomputed forward nodes. + + However, we might deprecate this operator soon. The difficulty arises in the + functionalization of rng ops. Today, there are two different + functionalization of rng ops - one at AOT autograd and other at Inductor. + And they are difficult to map to each other. The rng states also complicate + pattern matching in Inductor. Due to the ease of implementation, we are + currently inclined towards functionalization at Inductor level, which means + that duplication/recomputation is done as a compiler pass in the + partitioners. See TagActivationCheckpoint for more information. + """ + def __init__(self): + super().__init__("wrap_activation_checkpoint") + + def __call__(self, function, *args, **kwargs): + # use_reentrant is set to False because this op is going to be traced. + # And we ensure that AOT Autograd traces through the non reentrant + # version of checkpointing. + import torch.fx.traceback as fx_traceback + from torch.fx import Interpreter + kwargs["use_reentrant"] = False + kwargs["preserve_rng_state"] = False + # Using interpreter allows preservation of metadata through torch.compile stack. + with fx_traceback.preserve_node_meta(): + return checkpoint(Interpreter(function).run, *args, **kwargs) + +wrap_activation_checkpoint = WrapActivationCheckpoint() + +class TagActivationCheckpoint(HigherOrderOperator): + """ + This operator is supposed to be used only with torch.compile stack. This + accepts a Fx graph module which needs to be checkpointed. This operator adds + "recomputable" tag to the nodes of the Fx graph that should be recomputed. + + The goal is to: + 1. Avoid using Dynamo to trace through saved tensor hooks. + 2. For selective checkpointing case, let AOTAutograd trace through + saved tensor hooks but has special logic with TorchDispatchMode to override + the usual saved_tensor_hooks fn logic in order to tag the nodes. + 3. Rely on the partitioners to actually duplicate the nodes. + This sits well in the torch.compile stack, because by the time graph + reaches partitioner, inductor has already run its functionalization of rng + ops (by setting fixed seed for each random op, see `replace_random_passes`). + Therefore, the duplication of nodes, by design, respects the rng states in + the forward and recomputed forward in backward. + """ + + def __init__(self): + super().__init__("tag_activation_checkpoint") + + @staticmethod + def divide_kwargs(kwargs): + """ + checkpoint fn can have mixed kwargs between checkpointed fn and + checkpoint fn itself. For example + >> def gn(x, y, z=None): + >> a = torch.matmul(x, y) + >> if z is not None: + >> return torch.matmul(a, z) + >> return a + >> def fn(x, y, z): + >> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z)) + In the above case, z belongs to checkpointed function gn, but + use_reentrant belongs to the checkpoint function. This function splits + the kwargs into checkpoint_kwargs and gmod_kwargs (or + checkpointed_fn_kwargs). + We do sorting to ensure same graph from run to run for better + debuggability. It is not required for correctness. + """ + ckpt_signature = inspect.signature(checkpoint) + checkpoint_keys = set() + for name in ckpt_signature.parameters: + if name in ("function", "args", "kwargs"): + continue + checkpoint_keys.add(name) + + # `preserve_rng_state` is not a regular kwarg + checkpoint_keys.add("preserve_rng_state") + + checkpoint_kwargs = {name: kwargs[name] for name in kwargs.keys() if name in checkpoint_keys} + gmod_kwargs = {name: kwargs[name] for name in kwargs.keys() if name not in checkpoint_keys} + return checkpoint_kwargs, gmod_kwargs + + def tag_nodes(self, gmod): + unique_graph_id = next(uid) + for node in gmod.graph.nodes: + if node.op in ("call_function", "call_method", "call_module"): + node.meta["recompute"] = unique_graph_id + return gmod + + def __call__(self, gmod, *args, **kwargs): + import torch.fx.traceback as fx_traceback + from torch.fx import Interpreter + if "_checkpoint_context_fn" in gmod.meta: + assert torch._dynamo.config._experimental_support_context_fn_in_torch_utils_checkpoint, \ + "Passing context_fn to torch.utils.checkpoint is currently not supported under torch.compile" + log.warning(""" +Detected that context_fn is passed to torch.utils.checkpoint under torch.compile. +Please make sure the checkpointed region does not contain in-place ops (e.g. torch.relu_). +""") + # use_reentrant is set to False because this op is going to be traced. + # And we ensure that AOT Autograd traces through the non reentrant + # version of checkpointing. + kwargs["use_reentrant"] = False + # preserve_rng_state is set to False because we want to prevent AOTAutograd from tracing through + # `torch.random.fork_rng` op (which is not supported yet under CUDA). + # This doesn't mean that we don't preserve RNG state. Instead, we will always preserve RNG state + # regardless of this flag (by doing RNG functionalization via `replace_random_passes` in Inductor + # instead of in AOTAutograd). + kwargs["preserve_rng_state"] = False + kwargs["context_fn"] = gmod.meta["_checkpoint_context_fn"] + # We first tag all nodes as "recompute" in this graph, and then we undo the "recompute" tag + # for specific nodes in _CachingTorchDispatchMode in torch/utils/checkpoint.py. + gmod = self.tag_nodes(gmod) + # Using interpreter allows preservation of metadata through torch.compile stack. + with fx_traceback.preserve_node_meta(): + return checkpoint(Interpreter(gmod).run, *args, **kwargs) + else: + gmod = self.tag_nodes(gmod) + # Using interpreter allows preservation of metadata through torch.compile stack. + # TODO: We want to use the same `checkpoint(Interpreter(gmod).run, *args, **kwargs)` here + # as the `context_fn != None` case, but that depends on in-place op support in TorchDispatchMode + torch.compile. + # (for details on in-place op issue, run `test_compile_selective_checkpoint_inplace_op` unit test) + with fx_traceback.preserve_node_meta(): + return Interpreter(gmod).run(*args) + +tag_activation_checkpoint = TagActivationCheckpoint()