diff --git a/ckpts/universal/global_step120/zero/1.word_embeddings.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/1.word_embeddings.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a6589231165f532dc1c27ca57a9f3a3d87b85b86 --- /dev/null +++ b/ckpts/universal/global_step120/zero/1.word_embeddings.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69bc159dc60ca0fa15d189e9ea5a5afe2fdb1fd1843f43d423706e3ef48e0582 +size 415237419 diff --git a/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..3a24a94c9ec726faa7eec96a64d1c9e54c6127fe --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17eaa0cccd5fef6a55674225f15d60f3ae6ea4c4a93b3558422135b52d01ee46 +size 9372 diff --git a/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4d3e022fcd66eaf8743f334894dee4cd16ad76ee --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1dc0c4eb90385736831009786614e41bbc14440018e0ca45547e827d894162a +size 9387 diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..89b32f28657b3a529c738833923ebaeefdd7d7df --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc37920f0413b3804caac3df9bab38fd1ba77bb527e0a092ca06fa4c268c1dd9 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71ba09ef333a01c7506dc7db8921e029ea0957a9 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed44ee6d530f1d12e0af63406c2de39ddcd545f5234db79633ad832537d5fae +size 33555627 diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ad13569040c7813b0230fd64b5a882c8e966fefc --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a2890cb784a3591974935bb944c6b8f4ad401da0bc8fce465a4714a8c04f911 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4c62eb72217e00ba7f70ed2a72ee6e4a5bb33468 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7532df6a0350673df2366907c0091363072a8a4b9f12bbf9a9793c7c8e5f64e +size 9387 diff --git a/ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..6bb39dd8622ae8063555c7f2b19d34942bfe0693 --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc8c554279d3831a4aaba7844cb92c0de84e792dc6406c762163ca7974c1a59 +size 9372 diff --git a/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2df6add0dced4300dd0d6c4ba9f180ebfd9924cc --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d48d36ff4c24763c96242ad3ff413431a60d7ccb26095c441b892dfffdcc692 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..edfef6aa972e8f41006ccac3e716b48ffd2cfc10 --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45ee432c24153973ef44a9001e0b6398de0380a298b6676ed86d7897b1f2c23d +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4a1c29861e8c64274f792e8bb47e9f31dddbdf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..859ba20377cc8f3aac4c17afac84737b841a69ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ce9dbed6d778508257372b4eac2d5dd9b778cb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69f655f54bc461cc2680b1fc841e0a223566f534 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..059ffbff081d37d93da55aa337a967be7ac5ba8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41290396d37ede7fe967284a49241361143656d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c924d57e96ef50e1f6425a5a752abd9a1485563 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d934c82ee91a709665326efd77f2db62e902b81a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9476bb49fc9ae2f0be881f6e7a0c331f5bd70e6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1bfa3fd5d77a3200da0c5950b52cb6e22218a04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/tensor_version_op.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/tensor_version_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..311fb499817a8f1514d50b4bbd2b2f16c91a5540 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/tensor_version_op.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..124dfd085a32f1964984808337f9e97942a06555 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a32aab972cdd7fd2e0088abc0970642eaea4f118 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..726ab662de963f193f6aa12ab4b434f6b8866780 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..156ec7146514f5eff2158eb820cb7a4c26c90c4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56b53113247d65594eba818fa54dd71e96979d52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/common.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/common.py new file mode 100644 index 0000000000000000000000000000000000000000..0932025fd2ce4e86cc73be5287f25ff319648b4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/common.py @@ -0,0 +1,112 @@ +# mypy: ignore-errors + +import contextlib +import functools +import logging +from unittest.mock import patch + +import torch +from torch._dynamo import disable +from torch._dynamo.utils import counters, defake +from torch._functorch.aot_autograd import aot_module_simplified +from torch.utils._python_dispatch import _disable_current_modes + +log = logging.getLogger(__name__) + + +def aot_autograd(**kwargs): + def compiler_fn(gm: torch.fx.GraphModule, example_inputs): + # Hack to get around circular import problems with aot_eager_decomp_partition + if callable(kwargs.get("decompositions")): + kwargs["decompositions"] = kwargs["decompositions"]() + + # NB: dont delete counter increment + counters["aot_autograd"]["total"] += 1 + use_fallback = False + + if use_fallback: + log.debug("Unable to use AOT Autograd because graph has mutation") + counters["aot_autograd"]["not_ok"] += 1 + return gm + + # OK attempt to compile + + def _wrapped_bw_compiler(*args, **kwargs): + # stop TorchDynamo from trying to compile our generated backwards pass + return disable(disable(bw_compiler)(*args, **kwargs)) + + bw_compiler = kwargs.get("bw_compiler") or kwargs["fw_compiler"] + kwargs["bw_compiler"] = _wrapped_bw_compiler + kwargs["inference_compiler"] = ( + kwargs.get("inference_compiler") or kwargs["fw_compiler"] + ) + + from functorch.compile import nop + + from torch._inductor.debug import enable_aot_logging + + # debug asserts slow down compile time noticeably, + # So only default them on when the aot_eager backend is used. + if kwargs.get("fw_compiler", None) == nop: + patch_config = patch("functorch.compile.config.debug_assert", True) + else: + patch_config = contextlib.nullcontext() + + try: + # NB: NOT cloned! + with enable_aot_logging(), patch_config: + cg = aot_module_simplified(gm, example_inputs, **kwargs) + counters["aot_autograd"]["ok"] += 1 + return disable(cg) + except Exception: + counters["aot_autograd"]["not_ok"] += 1 + raise + + return compiler_fn + + +def mem_efficient_fusion_kwargs(use_decomps): + from functorch.compile import ( + default_decompositions, + min_cut_rematerialization_partition, + ts_compile, + ) + + kwargs = { + # these are taken from memory_efficient_fusion() + "fw_compiler": ts_compile, + "bw_compiler": ts_compile, + "partition_fn": min_cut_rematerialization_partition, + } + + if use_decomps: + kwargs["decompositions"] = default_decompositions + + return kwargs + + +def fake_tensor_unsupported(fn): + """ + Decorator for backends that need real inputs. We swap out fake + tensors for zero tensors. + """ + + @functools.wraps(fn) + def wrapper(model, inputs, **kwargs): + with _disable_current_modes(): + inputs = list(map(defake, inputs)) + return fn(model, inputs, **kwargs) + + return wrapper + + +def device_from_inputs(example_inputs) -> torch.device: + for x in example_inputs: + if hasattr(x, "device"): + return x.device + + +def dtype_from_inputs(example_inputs) -> torch.dtype: + for x in example_inputs: + if hasattr(x, "dtype"): + return x.dtype diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py new file mode 100644 index 0000000000000000000000000000000000000000..d3ab13d7d6ea92ccf83ed246ef3d1e634784a8d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py @@ -0,0 +1,239 @@ +# mypy: ignore-errors + +import functools +import operator +from collections import defaultdict +from typing import Dict, List, Optional + +import torch +from torch._dynamo.backends.debugging import boxed_nop +from torch._inductor.cudagraph_trees import cudagraphify_impl +from torch._inductor.cudagraph_utils import ( + BoxedDeviceIndex, + check_multiple_devices_or_any_cpu_nodes, + get_mutation_stack_trace, +) +from torch._inductor.utils import ( + BoxedBool, + count_tangents, + has_incompatible_cudagraph_ops, + num_fw_fixed_arguments, + output_node, +) +from torch.multiprocessing.reductions import StorageWeakRef +from .common import aot_autograd +from .registry import register_backend + +perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints") + + +def find_input_mutations(g): + def meta_fk(meta): + return meta["val"] if "val" in meta else meta["fake_result"] + + inputs = defaultdict(set) + input_idx = 0 + mutated_inputs = set() + for n in g.nodes: + if n.op == "placeholder": + if isinstance(meta_fk(n.meta), torch.Tensor): + inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx) + input_idx += 1 + elif n.op == "call_function": + if n.target is operator.getitem: + continue + schema = n.target._schema + for i, arg in enumerate(schema.arguments): + if i < len(n.args): + argument = n.args[i] + else: + if arg.name not in n.kwargs: + continue + argument = n.kwargs[arg.name] + mut_arg = False + if arg.alias_info: + if arg.alias_info.is_write: + mut_arg = True + if mut_arg: + # TODO: not correct for args that contain tensors in a struct + # like list + mutated_inputs |= inputs[ + StorageWeakRef(meta_fk(argument.meta)._typed_storage()) + ] + + # TODO: error on unrecognized nodes + return mutated_inputs + + +def get_device_node_mapping(gm: torch.fx.GraphModule): + device_node_mapping: Dict[torch.device, torch.fx.Node] = {} + for n in gm.graph.nodes: + t = n.meta.get("val", None) + if isinstance(t, torch.Tensor) and t.device not in device_node_mapping: + device_node_mapping[t.device] = n + return device_node_mapping + + +def check_for_mutation(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]: + mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed)) + if not mutation_indices: + return None + + return get_mutation_stack_trace(aot_model, mutation_indices) + + +def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]: + if mut_skip := check_for_mutation(aot_model, num_fixed): + return mut_skip + + if skip := check_multiple_devices_or_any_cpu_nodes( + get_device_node_mapping(aot_model) + ): + return skip + + if has_incompatible_cudagraph_ops(aot_model): + return "skipping cudagraphs due to incompatible op" + + return None + + +def get_device_index(gm) -> int: + device = next(iter(get_device_node_mapping(gm))) + assert device.type == "cuda" + return device.index + + +def get_stack_traces(gm) -> List[Optional[str]]: + output = output_node(gm) + assert len(output.args) == 1 + return [ + (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None) + for arg in output.args[0] + ] + + +def cudagraphs(dynamo_model, dynamo_inputs): + do_cudagraphs = BoxedBool(True) + boxed_device_index = BoxedDeviceIndex(None) + + def forward_cudagraphs(aot_model, aot_inputs, is_inference=False): + interp = boxed_nop(aot_model, aot_inputs) + fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs)) + if skip_msg := check_for_skip(aot_model, fixed): + BoxedBool.disable(do_cudagraphs) + perf_log.warning("skipping cudagraphs due to %s", skip_msg) + return interp + + boxed_device_index.set(get_device_index(aot_model)) + + out = cudagraphify_impl( + interp, + aot_inputs, + range(fixed), + device_index=boxed_device_index.value, + is_backward=False, + is_inference=False, + stack_traces=get_stack_traces(aot_model), + ) + out._boxed_call = True + return out + + def backward_cudagraphs(aot_model, aot_inputs): + interp = boxed_nop(aot_model, aot_inputs) + if not do_cudagraphs: + return aot_model + + fixed = count_tangents(aot_model) + if skip_msg := check_for_skip(aot_model, fixed): + perf_log.warning("skipping cudagraphs due to %s", skip_msg) + + # See [Backward Generation Handling] + manager = torch._inductor.cudagraph_trees.get_manager( + boxed_device_index.value, create_if_none_exists=False + ) + assert manager is not None + + def fn(inputs): + manager.set_to_running_backward() + return aot_model(inputs) + + fn._boxed_call = True + return fn + + out = cudagraphify_impl( + interp, + aot_inputs, + range(fixed), + device_index=get_device_index(aot_model), + is_backward=True, + is_inference=False, + stack_traces=get_stack_traces(aot_model), + ) + out._boxed_call = True + return out + + aot_cudagraphs = aot_autograd( + fw_compiler=forward_cudagraphs, + bw_compiler=backward_cudagraphs, + inference_compiler=functools.partial(forward_cudagraphs, is_inference=True), + keep_inference_input_mutations=torch._dynamo.config.cudagraph_backend_keep_input_mutation, + ) + return aot_cudagraphs(dynamo_model, dynamo_inputs) + + +class CudagraphsBackend: + compiler_name = "cudagraphs" + + @staticmethod + def reset(): + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + + reset_cudagraph_trees() + + @staticmethod + def __call__(model, inputs): + return cudagraphs(model, inputs) + + +# aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful +# for debugging and can serve as a perf baseline. +register_backend(name="cudagraphs", compiler_fn=CudagraphsBackend()) + + +def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True): + """This isn't registered as a backend, but is used in some benchmarks""" + assert isinstance(inputs, (list, tuple)) + if copy_inputs: + static_inputs = [torch.zeros_like(x) for x in inputs] + else: + static_inputs = list(inputs) + + # warmup + torch.cuda.synchronize() + stream = torch.cuda.Stream() + stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(stream): + model(*inputs) + stream.synchronize() + torch.cuda.current_stream().wait_stream(stream) + torch.cuda.synchronize() + + # record + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, stream=stream): + static_outputs = model(*static_inputs) + if not isinstance(static_outputs, (list, tuple)): + static_outputs = (static_outputs,) + + def run(*new_inputs): + assert len(static_inputs) == len(new_inputs) + if copy_inputs: + for dst, src in zip(static_inputs, new_inputs): + dst.copy_(src) + graph.replay() + if copy_outputs: + return [x.clone() for x in static_outputs] + else: + return static_outputs + + return run diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py new file mode 100644 index 0000000000000000000000000000000000000000..41315e7a8950f2c781a141a352faf65a5c384851 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py @@ -0,0 +1,289 @@ +# mypy: ignore-errors + +import dataclasses +import functools +from importlib import import_module +from typing import Any, List, Optional + +from functorch.compile import min_cut_rematerialization_partition + +import torch +from torch import _guards +from torch._functorch.compilers import ts_compile +from .common import aot_autograd +from .registry import register_debug_backend as register_backend + +""" +This file contains TorchDynamo backends intended for debugging uses. +""" + + +@register_backend +def eager(gm, fake_tensor_inputs): + return gm + + +@register_backend +def pre_dispatch_eager(gm, fake_tensor_inputs): + from torch.fx.experimental.proxy_tensor import make_fx + + def runnable_gm(*args): + return torch.fx.Interpreter(gm).run(*args) + + pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs) + pre_dispatch_gm.print_readable() + + return pre_dispatch_gm + + +@register_backend +def eager_debug(gm, fake_tensor_inputs): + from torch._subclasses.schema_check_mode import SchemaCheckMode + + # We could add more debugging bits here. + # Right now, this backend can be used to check for and error on + # custom dispatcher ops that have incorrect schemas. + def inner(*args): + with SchemaCheckMode(): + return torch.fx.Interpreter(gm).run(*args) + + return inner + + +@register_backend(name="ts") +def torchscript(gm, fake_tensor_inputs): + return torch.jit.script(gm) + + +# used boxed call to discard inputs when they are no longer needed +def boxed_nop(fx_g, example_inputs): + def run(args): + return torch.fx.Interpreter(fx_g).boxed_run(args) + + run._boxed_call = True + return run + + +# Useful for debugging purpose +# aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging. +aot_eager = aot_autograd( + fw_compiler=boxed_nop, partition_fn=min_cut_rematerialization_partition +) +register_backend(name="aot_eager", compiler_fn=aot_eager) + +aot_eager_default_partitioner = aot_autograd(fw_compiler=boxed_nop) +register_backend( + name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner +) + +# Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs +# inductor problems. +# aot_eager_decomp_partition just replaces the inductor compiler with nop to help +# isolate inductor vs aot_eager errors +aot_eager_decomp_partition = aot_autograd( + # these are taken from memory_efficient_fusion() + fw_compiler=boxed_nop, + bw_compiler=boxed_nop, + # NB: lambda here is to delay import of inductor + decompositions=lambda: import_module( + "torch._inductor.compile_fx" + ).select_decomp_table(), + partition_fn=functools.partial( + min_cut_rematerialization_partition, compiler="inductor" + ), +) +register_backend( + name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition +) + +# AOT Autograd with torchscript backend. Default partitioner. +# aot_ts uses torchscript backend. We can use this with both nnc and nvfuser +# by using the relevant fuser with torch.jit.fuser(...) +aot_ts = aot_autograd(fw_compiler=ts_compile) +register_backend(name="aot_ts", compiler_fn=aot_ts) + +# These buggy backends are used for inducing bugs so that we can test +# our repro extraction / minifier scripts + + +class ReluCompileError(Exception): + pass + + +class TestingOnlyCompileError(Exception): + pass + + +@register_backend +def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs): + for node in gm.graph.nodes: + if node.target == torch.relu: + raise ReluCompileError() + return gm + + +@register_backend +def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs): + for node in gm.graph.nodes: + if node.target == torch.relu: + node.target = torch._assert + node.args = (False, "ReluRuntimeError") + gm.recompile() + return gm + + +@register_backend +def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs): + for node in gm.graph.nodes: + if node.target == torch.relu: + node.target = torch.add + node.args = (node.args[0], 1) + gm.recompile() + + return gm + + +@register_backend +def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs): + # Require at least one non-trivial thing in the graph, + # see https://github.com/pytorch/pytorch/issues/102898 + for node in gm.graph.nodes: + if node.op == "call_function": + break + else: + return gm + for t in example_inputs: + if not t.is_leaf: + raise TestingOnlyCompileError() + return gm + + +@dataclasses.dataclass +class ExplainOutput: + """ + This is the output of :func:`torch._dynamo.explain()` + There is no reason to create this class directly. + """ + + graphs: List[torch.fx.GraphModule] + graph_count: int + graph_break_count: int + break_reasons: List[ + Any + ] # Type is GraphCompileReason but doesn't matter for this purpose + op_count: int + ops_per_graph: Optional[List[torch.fx.Node]] = None + out_guards: Optional[List[_guards.Guard]] = None + compile_times: Optional[str] = None + + def __str__(self): + output = f"Graph Count: {self.graph_count}\n" + output += f"Graph Break Count: {self.graph_break_count}\n" + output += f"Op Count: {self.op_count}\n" + + output += "Break Reasons:\n" + for idx, break_reason in enumerate(self.break_reasons): + output += f" Break Reason {idx+1}:\n" + output += f" Reason: {break_reason.reason}\n" + output += " User Stack:\n" + for frame_summary in break_reason.user_stack: + output += f" {frame_summary}\n" + + if self.ops_per_graph is not None: + output += "Ops per Graph:\n" + for idx, ops in enumerate(self.ops_per_graph): + output += f" Ops {idx+1}:\n" + for op in ops: + output += f" {op}\n" + + if self.out_guards is not None: + output += "Out Guards:\n" + for i, guard in enumerate(self.out_guards): + output += f" Guard {i+1}:\n" + output += f" {str(guard)}" + + if self.compile_times is not None: + output += f"Compile Times: {self.compile_times}\n" + return output + + +def _explain_graph_detail( + gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons +): + """ + This function is a utility which processes a torch.fx.GraphModule and + accumulates information about its ops, graph breaks, and other details. It + is intended to be used by the ExplainWithBackend class and + `torch._dynamo.explain()` to provide details from Dynamo's graph capture. + + Parameters: + gm (torch.fx.GraphModule): The GraphModule to be processed. + graphs (list): A list that accumulates all the GraphModules processed. + op_count (int): The total count of operations in all GraphModules processed so far. + ops_per_graph (list): A list that accumulates the operations of each GraphModule. + break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule. + + Returns: + tuple: A tuple containing the processed GraphModule, the updated lists of graphs, + operations per graph, and break reasons, and the updated operation count. + """ + graphs.append(gm) + ops = [node.target for node in gm.graph.nodes if node.op == "call_function"] + op_count += len(ops) + ops_per_graph.append(ops) + if gm.compile_subgraph_reason.graph_break: + break_reasons.append(gm.compile_subgraph_reason) + + return gm, graphs, op_count, ops_per_graph, break_reasons + + +class ExplainWithBackend: + """ + This class is intended to be used as a backend for `torch.compile`. It is + composable with other backends. When used in this way, it accumulates + information about graph breaks, ops, and other info and provides a string + representation summarizing this information. + + Attributes: + backend (str): The name of the backend to use for optimization. + graphs (list): A list of the graphs captured by TorchDynamo. + op_count (int): The total number of operations in all optimized graphs. + break_reasons (list): A list of graph break reasons with stack traces. + + Example Usage: + def fn(x): + x = torch.sigmoid(x) + return x + + torch._dynamo.reset() + eb = ExplainWithBackend("inductor") + optimized_fn = torch.compile(fn, backend=eb) + result = optimized_fn(torch.randn(5)) + print(eb.output()) + """ + + def __init__(self, backend): + from .registry import lookup_backend + + self.backend = lookup_backend(backend) + self.graphs = [] + self.op_count = 0 + self.break_reasons = [] + + def __call__(self, gm: torch.fx.GraphModule, example_inputs): + gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail( + gm, self.graphs, self.op_count, [], self.break_reasons + ) + return self.backend(gm, example_inputs) + + def output(self) -> ExplainOutput: + graph_count = len(self.graphs) + output = ExplainOutput( + self.graphs, + graph_count, + graph_count - 1, + self.break_reasons, + self.op_count, + ) + + return output diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..291c024dbe9d82904779e3cf2addaa0cf88341dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py @@ -0,0 +1,612 @@ +# mypy: ignore-errors + +import logging +import traceback +from dataclasses import dataclass, field +from typing import Any, List, Optional +from unittest import mock + +import torch +from torch import fx +from torch._dynamo.output_graph import GraphCompileReason +from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode +from torch._logging import trace_structured +from torch.fx.node import Node + +# Regular log messages should go through 'log'. +# ddp_graph_log is a separate artifact logger reserved for dumping graphs. +# See docs/source/logging.rst for more info. +log = logging.getLogger(__name__) +ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs") + + +def args_str(args): + # a debug helper + if torch.is_tensor(args): + return f"T[{args.shape}]" + elif isinstance(args, tuple): + return f"tuple({', '.join([args_str(x) for x in args])})" + elif isinstance(args, list): + return f"list({', '.join([args_str(x) for x in args])})" + else: + return str(args) + + +@dataclass +class Bucket: + size: int = 0 + params: List[str] = field(default_factory=list) + nodes: List[fx.Node] = field(default_factory=list) + + # param_ids is just used for unit testing + param_ids: List = field(default_factory=list) + + # keep track of any buckets that were extended for logging purposes + opcount_increased_to_capture_external_output: int = 0 + paramsize_before_opcount_increase: int = 0 + + +def bucket_has_external_output(bucket: Bucket) -> bool: + nodes_in_bucket = set() + # we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards + # so we don't reverse it here + for node in bucket.nodes: + # assume node.op != output, since those are filtered in the original iteration + nodes_in_bucket.add(node) + for user in node.users: + if user not in nodes_in_bucket: + return True + return False + + +def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int): + headers = ("Index", "Size (b)", "Param Names") + rows = [] + extended_buckets = [] + for idx, bucket in enumerate(reversed(buckets)): + if len(bucket.params) > 0: + rows.append((idx, bucket.size, bucket.params[0])) + for param in bucket.params[1:]: + rows.append((None, None, param)) + if bucket.opcount_increased_to_capture_external_output > 0: + extended_buckets.append( + ( + idx, + bucket.opcount_increased_to_capture_external_output, + bucket.size - bucket.paramsize_before_opcount_increase, + ) + ) + + if len(rows): + log.info( + "\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.", + bucket_bytes_cap, + len(buckets), + ) + + if len(extended_buckets): + log.warning( + "Some buckets were extended beyond their requested parameter capacities" + " in order to ensure each subgraph has an output node, required for fx graph partitioning." + " This can be the case when a subgraph would have only contained nodes performing inplace mutation," + " and returning no logical outputs. This should not be a problem, unless it results in too few graph" + " partitions for optimal DDP performance." + ) + + try: + from tabulate import tabulate + + log.debug( + "\nDDPOptimizer produced the following bucket assignments:\n%s", + tabulate(rows, headers=headers, tablefmt="simple_grid"), + ) + + if len(extended_buckets): + log.warning( + "DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s", + tabulate( + extended_buckets, + headers=("Index", "Extra Ops", "Extra Param Size (b)"), + tablefmt="simple_grid", + ), + ) + except ImportError: + log.debug( + "Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information." + ) + else: + log.debug("DDPOptimizer captured no parameters and did not split this graph.") + + +def has_higher_order_op(gm): + # Check if there is a higher order op in the graph + for node in gm.graph.nodes: + if node.op == "get_attr": + maybe_param = getattr(gm, node.target) + if isinstance(maybe_param, torch.fx.GraphModule): + return True + return False + + +# 3 (lazy compile): Replace submodules with lazily compiling submodule +class SubmoduleReplacer(torch.fx.interpreter.Interpreter): + def __init__(self, module, compiler): + super().__init__(module) + self.compiler = compiler + + def lazily_compiled_submod(self, input_mod): + """ + Create a wrapper around submodules which: + - lazily compiles each of the partitioned submodules using the user-provided compiler + - unpacks singleton tuples/lists into flat arg + """ + + class LazilyCompiledModule(torch.nn.Module): + def __init__(self, submod, compiler, unwrap_singleton_tuple): + super().__init__() + self.submod = submod + self.compiler = compiler + self.compiled = False + self.unwrap_singleton_tuple = unwrap_singleton_tuple + + def forward(self, *args): + if not self.compiled: + # First compile with args as example_inputs + # These args will be fakeified if using Inductor/AOTAutograd + new_submod = self.compiler(self.submod, args) + del self.submod + self.submod = new_submod + self.compiled = True + self.compiler = None + + x = self.submod(*args) + # we must let 'input_mod' return a tuple, to make AOT happy. + # (aot_autograd compile_fn literally requires that the output of a graph it compiles is a tuple). + # however, we don't acutally want this tuple to be returned, since the fx logic that calls the submod + # will again wrap outputs from the submod in a tuple. So we unwrap it, and count on it being re-wrapped + if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)): + return x[0] + return x + + unwrap_singleton_tuple = False + for sn in input_mod.graph.nodes: + if sn.op == "output": + if not isinstance(sn.args[0], tuple): + unwrap_singleton_tuple = True + sn.args = (sn.args,) + + input_mod.recompile() + input_mod.compile_subgraph_reason = GraphCompileReason( + "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])." + " Set `torch._dynamo.config.optimize_ddp = False` to disable.", + [ + # it's close to useless to get a real stacktrace here, and quite verbose. + traceback.FrameSummary(__file__, 0, DDPOptimizer), + ], + ) + wrapper = LazilyCompiledModule( + input_mod, + self.compiler, + unwrap_singleton_tuple, + ) + return wrapper + + # We replace the submodules with lazy submodules which compile + # the corresponding submodules when they are run with real values + # Always returns `None` - we do not need to propagate values in order + # to replace submodules. + def run_node(self, n: Node) -> Any: + if n.op == "call_module": + real_mod = self.fetch_attr(n.target) + + ddp_graph_log.debug("\n---%s graph---\n%s", n.target, real_mod.graph) + + assert len(n.kwargs) == 0, "We assume only args for these modules" + lazily_compiled_submod = self.lazily_compiled_submod(real_mod) + + # We update the original (outer) graph with a call into the compiled module + # instead of the uncompiled one. + self.module.delete_submodule(n.target) + n.target = "compiled_" + n.target + self.module.add_submodule(n.target, lazily_compiled_submod) + + +# 3 (no lazy compile): compile each of the partitioned submodules using the user-provided compiler +class SubmodCompiler(torch.fx.interpreter.Interpreter): + def __init__(self, module, compiler, fake_mode): + super().__init__(module) + self.compiler = compiler + self.fake_mode = fake_mode + + def compile_submod(self, input_mod, args, kwargs): + """ + Compile the submodule, + using a wrapper to make sure its output is always a tuple, + which is required by AotAutograd based compilers + """ + assert len(kwargs) == 0, "We assume only args for these modules" + + class WrapperModule(torch.nn.Module): + def __init__(self, submod, unwrap_singleton_tuple): + super().__init__() + self.submod = submod + self.unwrap_singleton_tuple = unwrap_singleton_tuple + + def forward(self, *args): + x = self.submod(*args) + # TODO(whc) + # for some reason the isinstance check is necessary if I split one node per submod + # - even though I supposedly wrapped the output in a tuple in those cases, the real + # compiled module was still returning a tensor + if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)): + return x[0] + return x + + unwrap_singleton_tuple = False + for sn in input_mod.graph.nodes: + if sn.op == "output": + if not isinstance(sn.args[0], tuple): + unwrap_singleton_tuple = True + sn.args = (sn.args,) + + input_mod.recompile() + input_mod.compile_subgraph_reason = GraphCompileReason( + "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])." + " Set `torch._dynamo.config.optimize_ddp = False` to disable.", + [ + # it's close to useless to get a real stacktrace here, and quite verbose. + traceback.FrameSummary(__file__, 0, DDPOptimizer), + ], + ) + + wrapper = WrapperModule( + self.compiler(input_mod, args), + unwrap_singleton_tuple, + ) + return wrapper + + # Note: + # + # The way distributed works today around fake tensors can be somewhat confusing. + # Some of these codepaths are shared in both runtime, and compile time. The presence + # of a fake_mode, read off of fake tensor inputs, dictates how we will operate. + # + # A few things to keep in mind: + # + # 1) We invoke `compile_submod` with a real module. The output of that gets stored + # on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`. + # + # 2) When running a call_module targeted node, if we have a fake_mode, we fakify the + # module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it. + # + # 3) Fake tensors should always be around during compile time. + # + # 4) Fake tensors should never be around at runtime. + # + # 5) We end up with a compilation mode that takes a real submodule and fake tensors, + # to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd] + def run_node(self, n: Node) -> Any: + args, kwargs = self.fetch_args_kwargs_from_env(n) + new_args = [] + assert self.fake_mode + for arg in args: + if isinstance(arg, torch.Tensor) and not isinstance( + arg, torch._subclasses.FakeTensor + ): + new_args.append(torch._dynamo.utils.to_fake_tensor(arg, self.fake_mode)) + else: + new_args.append(arg) + + log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args)) + assert isinstance(args, tuple) + assert isinstance(kwargs, dict) + + if n.op == "call_module": + real_mod = self.fetch_attr(n.target) + if self.fake_mode: + curr_submod = deepcopy_to_fake_tensor(real_mod, self.fake_mode) + else: + curr_submod = real_mod + + ddp_graph_log.debug("\n---%s graph---\n%s", n.target, curr_submod.graph) + + # When calling the compiler on the submod, inputs (new_args) are expected to + # be FakeTensors already since Dynamo would have made them FakeTensors in the + # non-DDP flow. However, the parameters are _not_ expected to be FakeTensors, + # since this wrapping happens during compilation + + # Note: Returning Fake Tensors on First AOT Autograd Call + # + # Inductor will optimize strides of outputs when it deems it profitable. + # For instance, converting to channels last. When we split the graph here + # into multiple inductor compilations, we need to make sure that the + # output strides of one compilation is appropriately passed to the subsequent + # compilations. However, the mapping from inductor output to dynamo output + # is non-trivial due to aot_autograd's deduping, de-aliasing, mutation, re-writing, + # subclass handling, etc. In order to replay all this logic we set a flag such that + # the first invocation of inductor in aot_autograd will return Fake Tensors with + # appropriate strides. Then, all of aot autograd's runtime logic is replayed. + # This gives us the appropriately strided outputs here which will reflect runtime strides. + + class FakeifyFirstAOTInvocationGuard: + def __init__(self): + self.tc = torch._guards.TracingContext.try_get() + assert self.tc + torch._guards.TracingContext.try_get().fakify_first_call = True + + def __del__(self): + self.tc.fakify_first_call = False + + # For aot_eager and other backends, tracing context is not set + has_tracing_context = torch._guards.TracingContext.try_get() is not None + if has_tracing_context: + g = FakeifyFirstAOTInvocationGuard() + + from torch._dynamo.utils import counters + + init = counters["aot_autograd"]["total"] + compiled_submod_real = self.compile_submod(real_mod, new_args, kwargs) + + # TODO - better way of doing this? + # Only aot autograd handles fakifying first call + invoked_aot_autograd = init != counters["aot_autograd"]["total"] + + # We update the original (outer) graph with a call into the compiled module + # instead of the uncompiled one. + self.module.delete_submodule(n.target) + n.target = "compiled_" + n.target + self.module.add_submodule(n.target, compiled_submod_real) + + # Finally, we have to produce inputs for use compiling the next submodule, + # and these need to be FakeTensors, so we execute the module under fake_mode + # Because parameters are not fake we patch fake tensor mode to allow non fake inputs + with self.fake_mode, mock.patch.object( + self.fake_mode, "allow_non_fake_inputs", True + ): + if has_tracing_context and invoked_aot_autograd: + out = compiled_submod_real(*new_args, **kwargs) + # output should be fake or subclass + assert all( + (not isinstance(t, torch.Tensor) or type(t) is not torch.Tensor) + for t in (out if isinstance(out, (list, tuple)) else [out]) + ) + return out + else: + return curr_submod(*new_args, **kwargs) + else: + # placeholder or output nodes don't need to get compiled, just executed + return getattr(self, n.op)(n.target, new_args, kwargs) + + +class DDPOptimizer: + + """Note [DDPOptimizer] + DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP), + breaking the dynamo graph into chunks to compile separately, with the breaks aligning to + the boundaries of gradient-allreduce buckets chosen by DDP. + + Background/Motivation + - DDP uses allreduce collectives to synchronize partial gradients computed on different workers + - DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce + - Parameters grouped into buckets are assumed to be adjacent in time, so they become ready + at around the same time during backward and thus can share the same allreduce efficiently + - Allreduces must overlap with backward compute for optimal training performance + - DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which + operates when individual grads become 'ready' + - Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the + autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole + fused backward function executes, preventing any overlap of compute and communication + + Algorithm + - DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse + this graph in reverse order to determine the true order that gradients will become ready during backward. + - Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started + and a graph break introduced + - Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together + into an outer module that is returned to the user + + Notes + - It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP, + and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does + in eager. + - If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently + produce splits that do not necessarily align with the buckets used by DDP. This should result in performance + degradation approaching the baseline case where graph-splits are not used, but not worse. + - If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the + subgraphs being compiled + - DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers + left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are + also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP, + it is not catastrophic but could impact performance by choosing sub-optimal bucket splits. + - DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients, + and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by + DDPOptimizer) + + Debugging + - Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb. + - In many cases, the log messages are helpful (they show bucket size assignments)- + just set TORCH_LOGS env to include any of 'dynamo', 'distributed', or 'dist_ddp'. + - See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model + in a single process (or with torchrun, in multiple processes) + + Args: + bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be + set to match the equivalent parameter on the original DDP module. + + backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph. + + first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP + special-cases the first bucket size since it is sometimes optimal to start a small allreduce early. + + """ + + def __init__( + self, + bucket_bytes_cap: int, + backend_compile_fn, + first_bucket_cap: Optional[int] = None, + ): + if first_bucket_cap is not None: + self.first_bucket_cap = first_bucket_cap + elif torch.distributed.is_available(): + # this constant comes from C10D lib which is not always built + self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES + else: + self.first_bucket_cap = bucket_bytes_cap + + self.bucket_bytes_cap = bucket_bytes_cap + assert ( + self.first_bucket_cap <= self.bucket_bytes_cap + ), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP" + + self.backend_compile_fn = backend_compile_fn + + def _ignore_parameter(self, parameter): + return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored + + def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]): + """ + Implements graph splitting, first determining a set of of buckets by counting + parameter sizes in reverse graph order, then invoking the user/backend compiler + to compile each subgraph. Finally, stiches compiled graphs into one graphmodule + and returns its callable. + """ + if has_higher_order_op(gm): + # This indicates presence of a higher order op. For now, we + # have no way to break the higher order op into two buckets. + # Allowing higher order ops in the graph also requires + # changes in the split_module, becuase graph splitter + # currently assumes that all the args of all ops are + # tensors, but in the case of higher order ops, it could be + # a graph module. As a workaround, we are shortcircuiting + raise NotImplementedError( + "DDPOptimizer backend: Found a higher order op in the graph. " + "This is not supported. Please turn off DDP optimizer using " + "torch._dynamo.config.optimize_ddp=False. Note that this can " + "cause performance degradation because there will be one bucket " + "for the entire Dynamo graph. Please refer to this issue - " + "https://github.com/pytorch/pytorch/issues/104674." + ) + + # 1: compute the partition map according to DDP bucket logic + buckets = [Bucket()] # (size, param_names) + for node in reversed(gm.graph.nodes): + if node.op in ("output", "placeholder"): + continue + + if ( + buckets[0].size >= self.bucket_bytes_cap + or len(buckets) == 1 + and buckets[0].size >= self.first_bucket_cap + ): + if bucket_has_external_output(buckets[0]): + buckets.insert(0, Bucket()) + else: + # continue building this bucket past the point of filling its parameter capacity, + # to increase chances it contains at least one node that is either a global output or + # passed as input to a subsequent graph + + if buckets[0].opcount_increased_to_capture_external_output == 0: + buckets[0].paramsize_before_opcount_increase = buckets[0].size + buckets[0].opcount_increased_to_capture_external_output += 1 + + if node.op == "call_module": + target = gm.get_submodule(node.target) + for name, param in target.named_parameters(): + if param.requires_grad and not self._ignore_parameter(param): + buckets[0].size += param.untyped_storage().nbytes() + buckets[0].params.append(f"{node.target}_{name}") + buckets[0].param_ids.append(id(param)) + elif node.op == "get_attr": + maybe_param = getattr(gm, node.target) + if maybe_param.requires_grad and not self._ignore_parameter( + maybe_param + ): + buckets[0].size += maybe_param.untyped_storage().nbytes() + buckets[0].params.append(node.target) + buckets[0].param_ids.append(id(maybe_param)) + + # All nodes have to be mapped to a bucket, even if they don't have their own params + # Ignored params still end up in buckets, we just don't count them towards the capacity + buckets[0].nodes.append(node) + + if len(buckets) > 1 and buckets[0].size == 0: + # we collected a small preamble graph with ops that don't include parameters, fuse it back + buckets[1].nodes.extend(buckets[0].nodes) + assert len(buckets[0].params) == 0, "Params should be empty if size is 0" + del buckets[0] + + # stash buckets for testing/debugging purposes + self.buckets = buckets + pretty_print_buckets(buckets, self.bucket_bytes_cap) + + if len(buckets) == 1: + # bypass split/fuse logic if there is only one bucket + return self.backend_compile_fn(gm, example_inputs) + + # 2: partition the graphmodule according to bucket capacity + partition_map = {} + for idx, b in enumerate(buckets): + for node in b.nodes: + partition_map[node] = idx + + split_gm = fx.passes.split_module.split_module( + gm, None, lambda node: partition_map[node] + ) + + debug_str = ( + f"\n---orig graph---\n{gm.graph}\n" + + f"\n---split graph---\n{split_gm.graph}\n" + ) + for name, module in split_gm.named_modules(): + if "." not in name and len(name): + # only print the submod graphs, not their children + debug_str += f"\n---{name} graph---\n{module.graph}\n" + debug_str += "\n---------------\n" + ddp_graph_log.debug(debug_str) + + trace_structured( + "optimize_ddp_split_graph", + payload_fn=lambda: split_gm.print_readable(print_output=False), + ) + for name, module in split_gm.named_modules(): + if "." not in name and len(name): + trace_structured( + "optimize_ddp_split_child", + lambda: {"name": name}, + payload_fn=lambda: module.print_readable(print_output=False), + ) + + # NOTE, we want to enable `optimize_ddp_lazy_compile` by default as soon as possible, + # becuase it will fix stride mismatch errors (see motivation: https://github.com/pytorch/pytorch/pull/114154). + # However, lazy compile currently causes shape mismatch in other cases (`test_graph_split_inductor_transpose`) + # and we need to fix them before we can enable it by default. + if not torch._dynamo.config.optimize_ddp_lazy_compile: + # Today, optimize_ddp=True and keep_output_stride=False can lead to silent + # correctness issues. The problem is that ddp_optimizer works by partitioning + # the dynamo graph, sending each subgraph through aot autograd to inductor, + # and creates example inputs by eagerly interpreting each subgraph to get + # an output that with the same metadata that we'd get from eager mode. + # This is a problem though, for torch._inductor.config.keep_output_stride. + # The above config can cause the outputs of the first graph to have + # **different** strides from eager, causing the inputs that we pass + # to the second graph to be wrong. + # To really fix this, we would need to faithfully ask inductor + # what the outputs to each graph it expects are. + fake_mode = detect_fake_mode(example_inputs) + if fake_mode is None: + fake_mode = torch._subclasses.fake_tensor.FakeTensorMode() + + if torch._dynamo.config.optimize_ddp_lazy_compile: + submod_compiler = SubmoduleReplacer(split_gm, self.backend_compile_fn) + else: + submod_compiler = SubmodCompiler( + split_gm, self.backend_compile_fn, fake_mode + ) + submod_compiler.run(*example_inputs) + split_gm.recompile() + + ddp_graph_log.debug( + "\n---final graph---\n%s\n---------------\n", split_gm.graph + ) + return split_gm diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdc8ba39c146a81a6712876995de05ebd7ba221 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py @@ -0,0 +1,16 @@ +# mypy: ignore-errors + +import sys + +from torch._dynamo import register_backend + + +@register_backend +def inductor(*args, **kwargs): + if sys.platform == "win32": + raise RuntimeError("Windows not yet supported for inductor") + + # do import here to avoid loading inductor into memory when it is not used + from torch._inductor.compile_fx import compile_fx + + return compile_fx(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py new file mode 100644 index 0000000000000000000000000000000000000000..cfaa053b4cb10227b1253d249cbcb70a6aa1dd0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py @@ -0,0 +1,37 @@ +# mypy: ignore-errors + +# This backend is maintained by ONNX team. To direct issues +# to the right people, please tag related GitHub issues with `module: onnx`. +# +# Maintainers' Github IDs: wschin, thiagocrepaldi, BowenBao, abock +from torch.onnx._internal.onnxruntime import ( + is_onnxrt_backend_supported, + torch_compile_backend, +) +from .registry import register_backend + + +def has_onnxruntime(): + # FIXME(abock): update test/dynamo/test_backends.py to call is_onnxrt_backend_supported() + return is_onnxrt_backend_supported() + + +if is_onnxrt_backend_supported(): + register_backend(name="onnxrt", compiler_fn=torch_compile_backend) +else: + + def information_displaying_backend(*args, **kwargs): + raise ImportError( + "onnxrt is not registered as a backend. " + "Please make sure all dependencies such as " + "numpy, onnx, onnxscript, and onnxruntime-training are installed. " + "Suggested procedure to fix dependency problem:\n" + " (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n" + " (2) Open a new python terminal.\n" + " (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n" + " (4) If it returns `True`, then you can use `onnxrt` backend.\n" + " (5) If it returns `False`, please execute the package importing section in " + "torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails." + ) + + register_backend(name="onnxrt", compiler_fn=information_displaying_backend) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..13cb47a50354e042f37da408db1b6135625bca41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py @@ -0,0 +1,115 @@ +# mypy: ignore-errors + +import functools +import sys +from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple + +import torch +from torch import fx + + +class CompiledFn(Protocol): + def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]: + ... + + +CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn] + +_BACKENDS: Dict[str, CompilerFn] = dict() + + +def register_backend( + compiler_fn: Optional[CompilerFn] = None, + name: Optional[str] = None, + tags: Sequence[str] = (), +): + """ + Decorator to add a given compiler to the registry to allow calling + `torch.compile` with string shorthand. Note: for projects not + imported by default, it might be easier to pass a function directly + as a backend and not use a string. + + Args: + compiler_fn: Callable taking a FX graph and fake tensor inputs + name: Optional name, defaults to `compiler_fn.__name__` + tags: Optional set of string tags to categorize backend with + """ + if compiler_fn is None: + # @register_backend(name="") syntax + return functools.partial(register_backend, name=name, tags=tags) + assert callable(compiler_fn) + name = name or compiler_fn.__name__ + assert name not in _BACKENDS, f"duplicate name: {name}" + _BACKENDS[name] = compiler_fn + compiler_fn._tags = tuple(tags) + return compiler_fn + + +register_debug_backend = functools.partial(register_backend, tags=("debug",)) +register_experimental_backend = functools.partial( + register_backend, tags=("experimental",) +) + + +def lookup_backend(compiler_fn): + """Expand backend strings to functions""" + if isinstance(compiler_fn, str): + if compiler_fn not in _BACKENDS: + _lazy_import() + if compiler_fn not in _BACKENDS: + _lazy_import_entry_point(compiler_fn) + if compiler_fn not in _BACKENDS: + from ..exc import InvalidBackend + + raise InvalidBackend(name=compiler_fn) + compiler_fn = _BACKENDS[compiler_fn] + return compiler_fn + + +def list_backends(exclude_tags=("debug", "experimental")) -> List[str]: + """ + Return valid strings that can be passed to: + + torch.compile(..., backend="name") + """ + _lazy_import() + exclude_tags = set(exclude_tags or ()) + return sorted( + [ + name + for name, backend in _BACKENDS.items() + if not exclude_tags.intersection(backend._tags) + ] + ) + + +@functools.lru_cache(None) +def _lazy_import(): + from .. import backends + from ..utils import import_submodule + + import_submodule(backends) + + from ..repro.after_dynamo import dynamo_minifier_backend + + assert dynamo_minifier_backend is not None + + +@functools.lru_cache(None) +def _lazy_import_entry_point(backend_name: str): + from importlib.metadata import entry_points + + compiler_fn = None + group_name = "torch_dynamo_backends" + if sys.version_info < (3, 10): + backend_eps = entry_points() + eps = [ep for ep in backend_eps.get(group_name, ()) if ep.name == backend_name] + if len(eps) > 0: + compiler_fn = eps[0].load() + else: + backend_eps = entry_points(group=group_name) + if backend_name in backend_eps.names: + compiler_fn = backend_eps[backend_name].load() + + if compiler_fn is not None and backend_name not in list_backends(tuple()): + register_backend(compiler_fn=compiler_fn, name=backend_name) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py new file mode 100644 index 0000000000000000000000000000000000000000..1868919ea7621be35555c41c985e900614d50e63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py @@ -0,0 +1,14 @@ +# mypy: ignore-errors + +# import torch # type: ignore[import] +# from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import] +# from .registry import register_backend # type: ignore[import] + +""" +Placeholder for TensorRT backend for dynamo via torch-tensorrt +""" + +# @register_backend +# def tensorrt(gm, example_inputs): +# import torch_tensorrt # type: ignore[import] +# pass diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5645d4b906e1781c44a31af078677e8f718abd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py @@ -0,0 +1,75 @@ +# mypy: ignore-errors + +import logging +import warnings + +from functorch.compile import make_boxed_func + +from ..backends.common import aot_autograd +from .registry import register_backend, register_experimental_backend + +log = logging.getLogger(__name__) + + +@register_experimental_backend +def torchxla_trivial(gm, fake_tensor_inputs): + return gm + + +@register_experimental_backend +def torchxla_trace_once(model, fake_tensor_inputs): + warnings.warn( + "This backend will be deprecated in 2.2, please use `openxla` backend instead" + ) + + return xla_backend_helper(model, fake_tensor_inputs) + + +@register_backend +def openxla_eval(model, fake_tensor_inputs): + return xla_backend_helper(model, fake_tensor_inputs, boxed=False) + + +def openxla_eval_boxed(model, fake_tensor_inputs): + return xla_backend_helper(model, fake_tensor_inputs, boxed=True) + + +def xla_backend_helper(model, fake_tensor_inputs, boxed=False): + try: + import torch_xla.core.dynamo_bridge as bridge + except ImportError as e: + raise ImportError( + "Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla" + ) from e + + compiled_graph = None + + def fwd(*args): + nonlocal model + nonlocal compiled_graph + if compiled_graph is None: + compiled_graph = bridge.extract_compiled_graph(model, args) + del model + return compiled_graph(*args) + + return make_boxed_func(fwd) if boxed else fwd + + +aot_torchxla_trivial = aot_autograd( + fw_compiler=torchxla_trivial, +) +register_experimental_backend( + name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial +) + +aot_torchxla_trace_once = aot_autograd( + fw_compiler=torchxla_trace_once, +) +register_experimental_backend( + name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once +) + +openxla = aot_autograd( + fw_compiler=openxla_eval_boxed, +) +register_backend(name="openxla", compiler_fn=openxla) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py new file mode 100644 index 0000000000000000000000000000000000000000..a4fe7b1736b2dbc2b3b1026b2c7d306787db5eef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py @@ -0,0 +1,172 @@ +# mypy: ignore-errors + +import functools +import importlib +import logging +import os +import tempfile + +import torch +from .common import device_from_inputs, fake_tensor_unsupported + +from .registry import register_backend + +log = logging.getLogger(__name__) + + +@register_backend +@fake_tensor_unsupported +def tvm(gm, example_inputs, *, scheduler=None, trials=20000): + import tvm # type: ignore[import] + from tvm import relay # type: ignore[import] + from tvm.contrib import graph_executor # type: ignore[import] + + jit_mod = torch.jit.trace(gm, example_inputs) + device = device_from_inputs(example_inputs) + shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)] + example_outputs = gm(*example_inputs) + if len(example_outputs) == 0: + log.warning("Explicitly fall back to eager due to zero output") + return gm.forward + mod, params = relay.frontend.from_pytorch(jit_mod, shape_list) + if device.type == "cuda": + dev = tvm.cuda(device.index) + target = tvm.target.cuda() + else: + dev = tvm.cpu(0) + target = tvm.target.Target(llvm_target()) + + if scheduler is None: + scheduler = os.environ.get("TVM_SCHEDULER", None) + + if scheduler == "auto_scheduler": + from tvm import auto_scheduler + + log_file = tempfile.NamedTemporaryFile() + + if not os.path.exists(log_file): + tasks, task_weights = auto_scheduler.extract_tasks( + mod["main"], params, target + ) + for task in tasks: + print(task.compute_dag) + else: + print("No tasks") + if len(tasks) != 0: + tuner = auto_scheduler.TaskScheduler(tasks, task_weights) + if not os.path.exists(log_file): + assert trials > 0 + tune_option = auto_scheduler.TuningOptions( + num_measure_trials=trials, + measure_callbacks=[auto_scheduler.RecordToFile(log_file)], + early_stopping=2000, + ) + try: + tuner.tune(tune_option) + except Exception: + if os.path.exists(log_file): + os.unlink(log_file) + raise + + with auto_scheduler.ApplyHistoryBest(log_file): + with tvm.transform.PassContext( + opt_level=3, config={"relay.backend.use_auto_scheduler": True} + ): + lib = relay.build(mod, target=target, params=params) + elif scheduler == "meta_schedule": + from tvm import meta_schedule as ms + + with tempfile.TemporaryDirectory() as work_dir: + if device.type != "cuda": + # meta_schedule needs num-cores to be specified + # here we use the maximum core count + target = tvm.target.Target( + f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}" + ) + # TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch + # once USE_PT_TVMDSOOP is updated and turned on by default in TVM. + database = ms.relay_integration.tune_relay( + mod=mod, + target=target, + work_dir=work_dir, + max_trials_global=20000, + num_trials_per_iter=64, + params=params, + strategy="evolutionary", + ) + lib = ms.relay_integration.compile_relay( + database=database, + mod=mod, + target=target, + params=params, + ) + elif scheduler == "default" or not scheduler: + # no autotuning + with tvm.transform.PassContext(opt_level=10): + lib = relay.build(mod, target=target, params=params) + else: + raise NotImplementedError( + "This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. " + "There are three available options: default, auto_scheduler and meta_schedule." + ) + m = graph_executor.GraphModule(lib["default"](dev)) + + def to_torch_tensor(nd_tensor): + """A helper function to transfer a NDArray to torch.tensor.""" + if nd_tensor.dtype == "bool": + # DLPack does not support boolean so it can't be handled by + # torch.utils.dlpack.from_pack. Workaround by going through + # numpy, although this brings additional data copy overhead. + return torch.from_numpy(nd_tensor.numpy()) + return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack()) + + def to_tvm_tensor(torch_tensor): + """A helper function to transfer a torch.tensor to NDArray.""" + if torch_tensor.dtype == torch.bool: + # same reason as above, fallback to numpy conversion which + # could introduce data copy overhead + return tvm.nd.array(torch_tensor.cpu().numpy()) + return tvm.nd.from_dlpack(torch_tensor) + + def exec_tvm(*i_args): + args = [a.contiguous() for a in i_args] + shape_info, _ = m.get_input_info() + active_inputs = {name for name, _ in shape_info.items()} + for idx, arg in enumerate(args, 0): + if arg.dim() != 0: + if arg.requires_grad: + arg = arg.detach() + inp_name = f"inp_{idx}" + if inp_name not in active_inputs: + log.warning( + "input %s skipped as not found in tvm's runtime library", + inp_name, + ) + continue + m.set_input( + inp_name, + to_tvm_tensor(arg), + ) + m.run() + return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())] + + return exec_tvm + + +tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule") +tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler") + + +def has_tvm(): + try: + importlib.import_module("tvm") + return True + except ImportError: + return False + + +@functools.lru_cache(None) +def llvm_target(): + if "avx512" in open("/proc/cpuinfo").read(): + return "llvm -mcpu=skylake-avx512" + return "llvm -mcpu=core-avx2" diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17481be90e17b746f4578efa058b45bedf78df8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cd8cb75486f8018efd04f5dc7f1f0fec9ef1a39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47e5c3d47700a065a03635d36a585d080f75a243 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d567f0c1c93d5b469b2ce9be8d29749fc03c85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py @@ -0,0 +1,932 @@ +import argparse +import copy +import functools +import io +import logging +import os +import shutil +import subprocess +import sys +import textwrap +import uuid +from importlib import import_module +from tempfile import TemporaryFile +from typing import Any, Callable, Dict, Union + +import torch +import torch.fx as fx +import torch.nn as nn +from torch._dynamo.debug_utils import ( + _cuda_system_info_comment, + AccuracyError, + backend_accuracy_fails, + BuckTargetWriter, + cast_to_fp64, + extra_imports, + generate_config_string, + helper_for_dump_minify, + InputReader, + InputWriter, + MAX_CONSTANT_NUMEL_INLINE, + minifier_dir, + NNModuleToString, + NopInputReader, + same_two_models, +) +from torch._dynamo.utils import clone_inputs, counters, same +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.experimental.symbolic_shapes import ( + fx_placeholder_targets, + has_free_symbols, +) +from torch.hub import tqdm + +from .. import config + +log = logging.getLogger(__name__) + + +inductor_config = import_module("torch._inductor.config") +use_buck = inductor_config.is_fbcode() + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MAIN ENTRY POINT +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str): + """ + Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both + forward and backward call separately with the backend compiler_fn - like + inductor or nvfuser. Intercepting after Aot Autograd presents neat + abstraction, where all the params are lifted as graph inputs, making it easy + to save the graph as a string. + """ + + @functools.wraps(unconfigured_compiler_fn) + def debug_wrapper(gm, example_inputs, **kwargs): + from torch._subclasses import FakeTensorMode + + compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs) + + from torch._functorch.aot_autograd import get_aot_graph_name + + graph_name = get_aot_graph_name() + + # TODO: why do we need to deepcopy the original graph? + orig_graph = copy.deepcopy(gm.graph) + assert config.repro_after in ("dynamo", "aot", None) + + try: + # Call the compiler_fn - which is either aot_autograd or inductor + # with fake inputs + inner_compiled_fn = compiler_fn(gm, example_inputs) + except Exception as e: + # TODO: Failures here are troublesome because no real inputs, + # need a different serialization strategy + if config.repro_after == "aot": + if config.repro_level == 1: + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + example_inputs, + compiler_name, + ) + elif config.repro_level == 2: + dump_to_minify( + fx.GraphModule(gm, orig_graph), + example_inputs, + compiler_name, + ) + log.error("CompilerError") + raise + + # We may run regular PyTorch compute that may trigger Dynamo, do NOT + # recursively attempt to accuracy minify in that case! + def deferred_for_real_inputs(real_inputs): + # This is a bit obscure: if we recursively try to accuracy minify + # the SAME function, this would trigger. But most of the time + # we should never hit this branch + if config.repro_after != "aot": + return inner_compiled_fn(real_inputs) + with config.patch(repro_after=None): + return inner_debug_fn(real_inputs) + + def inner_debug_fn(real_inputs): + """ + Aot Autograd fw_compiler and bw_compiler can have fake tensors. So, + example_inputs can be fake tensors. We can call compiler_fn (which is + inductor or nvfuser) with fake tensors but the actually compiled_fn + should be called with real tensors. Therefore, the actual invocation + is deferred. + """ + # Copy the tensor attrs like shape, stride etc by converting to Fake Tensor + # because inductor clears the tensor list in its codegen. And example_inputs + # are available only for the first invocation. + fake_mode = FakeTensorMode() + copy_tensor_attrs = [ + fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x + for x in real_inputs + ] + if config.repro_level == 3: + # Always dump the original module in case we have segfaults + dump_to_minify( + fx.GraphModule(gm, orig_graph), real_inputs, compiler_name + ) + + if config.repro_level == 4: + if compiler_name != "inductor": + raise NotImplementedError( + "Accuracy minification is supported for inductor only" + ) + if backend_aot_accuracy_fails(gm, real_inputs, compiler_fn): + log.warning( + "Accuracy failed for the AOT Autograd graph %s", graph_name + ) + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + real_inputs, + f"{compiler_name}_accuracy", + ) + dump_to_minify( + fx.GraphModule(gm, orig_graph), + real_inputs, + f"{compiler_name}_accuracy", + ) + raise AccuracyError("Bad accuracy detected") + else: + # Call the compiled function with real inputs + return inner_compiled_fn(real_inputs) + else: + try: + # Call the compiled function with real inputs + out = inner_compiled_fn(real_inputs) + # sync cuda kernels to ensure IMA detection + for arg in example_inputs: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + torch.cuda.synchronize() + break + return out + except Exception as e: + if config.repro_level == 1: + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + copy_tensor_attrs, + compiler_name, + ) + elif config.repro_level == 2: + dump_to_minify( + fx.GraphModule(gm, orig_graph), + copy_tensor_attrs, + compiler_name, + ) + raise + + if config.repro_after == "aot": + compiled_fn = deferred_for_real_inputs + compiled_fn._boxed_call = True # type: ignore[attr-defined] + return compiled_fn + else: + return inner_compiled_fn + + return debug_wrapper + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# DUMP REPROS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None): + model_str = textwrap.dedent( + f""" +import torch +from torch import tensor, device +import torch.fx as fx +from torch._dynamo.testing import rand_strided +from math import inf +import torch._inductor.inductor_prims + +{generate_config_string(stable_output=stable_output)} + +isolate_fails_code_str = None + +{extra_imports} + + """ + ) + if not stable_output: + model_str += f"# torch version: {torch.version.__version__}\n" + if hasattr(torch.version, "cuda"): + model_str += f"# torch cuda version: {torch.version.cuda}\n" + if hasattr(torch.version, "git_version"): + model_str += f"# torch git version: {torch.version.git_version}\n\n\n" + model_str += _cuda_system_info_comment() + + model_str += NNModuleToString.convert(gm) + + # get hint shape/stride when dynamic shape enabled + def hint_if_symint(x): + return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x) + + writer = InputWriter(save_dir) + for placeholder, arg in zip(fx_placeholder_targets(gm), args): + if isinstance(arg, (int, torch.SymInt)): + writer.symint(placeholder, arg) + elif isinstance(arg, torch.Tensor): + # TODO: improve these names with FQN + writer.tensor(placeholder, arg) + else: + raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}") + + model_str += "\n".join(writer.lines()) + "\n" + + model_str += "mod = Repro()\n" + return model_str + + +def save_graph_repro( + fd, + gm, + args, + compiler_name, + *, + stable_output=False, + save_dir=None, + command="run", + accuracy=None, + tracing_mode=None, + check_str=None, +): + fd.write( + generate_compiler_repro_string( + gm, + args, + stable_output=stable_output, + save_dir=save_dir, + ) + ) + if accuracy is None: + accuracy = "_accuracy" in compiler_name + if tracing_mode is None: + tracing_mode = "real" + if any(has_free_symbols(a) for a in args): + tracing_mode = "symbolic" + fd.write("if __name__ == '__main__':\n") + fd.write(" from torch._dynamo.repro.after_aot import run_repro\n") + fd.write( + f" with torch.no_grad():\n" + f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, " + f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r}" + ")\n" + ) + + +def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None): + subdir = os.path.join(minifier_dir(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py") + log.warning( + "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name + ) + with open(file_name, "w") as fd: + save_graph_repro( + fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy + ) + curdir = os.getcwd() + repro_path = os.path.join(curdir, "repro.py") + try: + shutil.copyfile(file_name, repro_path) + log.warning("Copying repro file for convenience to %s", repro_path) + if use_buck: + BuckTargetWriter(file_name).write() + except OSError: + log.warning("No write permissions for %s", repro_path) + pass + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# DUMP MINIFIER +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def dump_to_minify(gm, args, compiler_name: str): + out = io.StringIO() + # TODO: factor this out + subdir = os.path.join(minifier_dir(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify") + return helper_for_dump_minify(out.getvalue()) + + +def isolate_fails( + fx_g, + args, + compiler_name: str, + env=None, + save_dir=None, + accuracy=None, + tracing_mode=None, + check_str=None, +): + if env is None: + env = {} + subdir = os.path.join(os.getcwd(), "isolate") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py") + with open(file_name, "w") as fd: + save_graph_repro( + fd, + fx_g, + args, + compiler_name, + save_dir=save_dir, + command="minifier-query", + accuracy=accuracy, + tracing_mode=tracing_mode, + check_str=check_str, + ) + # with open(file_name, "r") as fd: + # print(fd.read()) + new_env = os.environ.copy() + new_env = {**new_env, **env} + stdout, stderr = TemporaryFile(), TemporaryFile() + + if use_buck: + cmd = BuckTargetWriter(file_name).write(print_msg=False) + else: + cmd = ["python", file_name] + + p = subprocess.Popen( + cmd, + cwd=subdir, + stdout=stdout, + stderr=stderr, + env=new_env, + ) + p.wait() + + stdout.seek(0) + stderr.seek(0) + print( + textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout + ) + print( + textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr + ) + # print(f"Isolated test failed - {file_name}") + return p.returncode != 0 + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MINIFIER TOOLS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def inductor_fails(fx_g, args, check_str=None): + has_cuda = False + for arg in args: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + has_cuda = True + break + + def sync(): + if has_cuda: + # Ensures that segfaults are surfaced + torch.cuda.synchronize() + + from torch._inductor.compile_fx import compile_fx_inner + + try: + result = fx_g(*args) + assert isinstance(result, (tuple, list)) + assert not any(isinstance(x, (tuple, list)) for x in result) + except Exception: + return False + + sync() + + try: + compile_mod = compile_fx_inner(fx_g, args) + compile_mod(args) + sync() + except Exception as e: + if check_str is not None and check_str not in repr(e): + return False + print(repr(e)) + return True + return False + + +def inductor_accuracy_fails( + fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False +): + from torch._inductor.compile_fx import compile_fx_inner + + return backend_aot_accuracy_fails( + fx_g, + args, + compile_fx_inner, + require_fp64=require_fp64, + ignore_non_fp=ignore_non_fp, + ) + + +backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# REPRO MAIN +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def repro_common(options, mod, load_args): + # Invariant for graphs we generate with the repro script + assert not any(mod.named_parameters()) + for n, b in mod.named_buffers(): + if b.numel() > MAX_CONSTANT_NUMEL_INLINE: + log.warning( + "Constant %s was not serialized, generated random data instead. " + "If you think this is affecting you, please comment on " + "https://github.com/pytorch/pytorch/issues/100468", + n, + ) + + if not hasattr(load_args, "_version"): + log.warning( + "load_args does not have a _version attribute, please file a bug to PyTorch " + "and describe how you generate this repro script" + ) + else: + if load_args._version > 0: + log.warning( + "load_args is version %s, but this version of PyTorch only supports " + "version 0. We will try to run it anyway but there may be an incompatibility; " + "if so, try upgrading your version of PyTorch.", + load_args._version, + ) + + nop_reader = NopInputReader() + load_args(nop_reader) + + with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar: + input_reader = InputReader(save_dir=options.save_dir, pbar=pbar) + load_args(input_reader) + args = input_reader.args + + # Turn mod into a GraphModule the slow way + # TODO: speed this up + mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args) + + torch._inductor.config.generate_intermediate_hooks = True + + return mod, args + + +ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = { + "": inductor_fails, + # This might look inverted but it's not. strict_accuracy means "we will + # minify any time we see anything that diverges", whereas accuracy is more + # conservative, and will only minify if there is a meaningful fp64 + # divergence + "accuracy": functools.partial( + inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True + ), + "strict_accuracy": inductor_accuracy_fails, +} + + +def repro_minifier_query(options, mod, load_args): + mod, args = repro_common(options, mod, load_args) + fail_fn = functools.partial( + ACCURACY_FAILS[options.accuracy], check_str=options.check_str + ) + if fail_fn(mod, args): + sys.exit(1) + else: + sys.exit(0) + + +def repro_minify(options, mod, load_args): + from functorch.compile import minifier + + mod, args = repro_common(options, mod, load_args) + compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor" + + favored_device = 1 if torch.cuda.device_count() >= 2 else 0 + env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)} + + module_fails: Any + if options.isolate: + module_fails = functools.partial( + isolate_fails, + env=env_variables, + compiler_name=compiler_name, + save_dir=options.save_dir, + accuracy=options.accuracy, + tracing_mode=options.tracing_mode, + ) + else: + module_fails = ACCURACY_FAILS[options.accuracy] + + minifier( + mod, + args, + module_fails=functools.partial(module_fails, check_str=options.check_str), + dump_state=functools.partial( + dump_compiler_graph_state, compiler_name=compiler_name + ), + save_dir=options.save_dir, + offload_to_disk=options.offload_to_disk, + skip_offload=options.skip_saving_eager_intermediates, + skip_sanity=options.skip_sanity, + max_granularity=options.max_granularity, + ) + + +def repro_analyze(options, mod, load_args): + from torch._inductor.compile_fx import compile_fx_inner + from torch._inductor.hooks import intermediate_hook + + mod, args = repro_common(options, mod, load_args) + + # TODO: The logic for cloning inputs/models here is intentionally + # modeled off of run_fwd_maybe_bwd, but arguably it is better not to + # clone inputs (as you are doubling your effective GPU memory usage). + # It is certainly faster though! It probably makes sense to let the + # user specify the offload strategy. + + with tqdm(desc="Compiling"): + compiled = compile_fx_inner(mod, args) + total = counters["inductor"]["intermediate_hooks"] + + known_names = set() + + def save_hook(name, val): + known_names.add(name) + if not options.skip_saving_inductor_intermediates: + writer.write_tensor(os.path.join("inductor", name), val) + pbar.update(1) # type: ignore[has-type] + + writer = torch.utils._content_store.ContentStoreWriter( + options.save_dir, stable_hash=options.stable_hash + ) + reader = torch.utils._content_store.ContentStoreReader(options.save_dir) + + new_args = clone_inputs(args) + with intermediate_hook(save_hook), tqdm( + desc="Saving inductor intermediates", total=total + ) as pbar: + compiled(new_args) + assert not new_args + + def compare_tuples(tuple1, tuple2): + diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]] + diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices] + + if not diff_values: + return None + else: + return " and ".join(f"{a} != {b}" for a, b in diff_values) + + def check_hook(name, val): + meta = writer.compute_tensor_metadata(val) + meta2 = reader.read_tensor_metadata(os.path.join("inductor", name)) + reason = compare_tuples(meta, meta2) + if reason is not None: + pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})") + pbar.update(1) + + if not options.skip_check_deterministic: + new_args = clone_inputs(args) + with intermediate_hook(check_hook), tqdm( + desc="Checking inductor determinism", total=total + ) as pbar: + compiled(new_args) + assert not new_args + + class WriterInterp(fx.Interpreter): + def __init__(self, mod, subdir): + super().__init__(mod) + self.subdir = subdir + + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + pbar.update(1) + writer.write_tensor(os.path.join(self.subdir, name), r) + return r + + # NB: the module cast doesn't actually do anything, since there are no + # parameters/buffers on the module + if not options.skip_saving_float64_intermediates: + new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args)) + with tqdm(desc="Saving float64 intermediates", total=total) as pbar: + WriterInterp(new_mod, "float64").boxed_run(new_args) + assert not new_args + + class ExactReaderInterp(fx.Interpreter): + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + meta = writer.compute_tensor_metadata(r) + meta2 = reader.read_tensor_metadata(os.path.join("float64", name)) + reason = compare_tuples(meta, meta2) + if reason is not None: + pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})") + pbar.update(1) + return r + + # TODO: check eager determinism + + if not options.skip_check_deterministic: + new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args)) + with tqdm(desc="Checking float64 determinism", total=total) as pbar: + ExactReaderInterp(new_mod).boxed_run(new_args) + assert not new_args + + # Now that we've saved everything, interp through the eager graph + # and do comparisons + class ReaderInterp(fx.Interpreter): + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + inductor = reader.read_tensor(os.path.join("inductor", name)) + float64 = reader.read_tensor(os.path.join("float64", name)) + logged = False + + def log_error(msg, *args): + nonlocal logged + logged = True + pbar.write(f"DIVERGED at {name}: {msg % args}") + + if not same( + r, + inductor, + float64, + tol=torch._dynamo.config.repro_tolerance, + equal_nan=True, + log_error=log_error, + ): + assert logged + pbar.update(1) + return r + + with tqdm(desc="Checking divergence", total=total) as pbar: + ReaderInterp(mod).boxed_run(args) + assert not args + + +def repro_run(options, mod, load_args): + from torch._inductor.compile_fx import compile_fx_inner + + mod, args = repro_common(options, mod, load_args) + + from torch.cuda import synchronize + + compiled = compile_fx_inner(mod, args) + + if options.accuracy != "": + # We don't really respect --accuracy vs --strict-accuracy here, it + # seems counterintuitive + if not same_two_models(mod, compiled, args, only_fwd=True): + raise AccuracyError("Bad accuracy detected") + else: + need_sync = False + for arg in args: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + need_sync = True + break + ref = compiled(list(args)) + if need_sync: + synchronize() # ensure segfaults are surfaced + return lambda: compiled(list(args)) + + +# TODO: lazily load the inputs or something, rather than cloning them +def run_repro( + mod, + load_args, + *, + command="run", + accuracy: Union[bool, str] = "", + save_dir=None, + tracing_mode=None, + patch_code=None, + check_str=None, + **kwargs, +): + for k in kwargs: + log.warning( + "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch", + k, + ) + + if accuracy is True: + accuracy = "accuracy" + elif accuracy is False: + accuracy = "" + + if patch_code is not None: + log.warning( + "patch_code no longer works on this version of PyTorch, silently ignoring" + ) + + parser = argparse.ArgumentParser( + description=f"""\ +An after_aot repro script, typically triggering a bug in PyTorch Inductor. +When run with no arguments, this script defaults to running '{command}'. +Extra flags may be available; to find out more, try '{command} --help'. +There are also alternate subcommands available, see below. + +default settings on this script: + {accuracy=} + {tracing_mode=} + {save_dir=} + {check_str=} +""", + formatter_class=argparse.RawTextHelpFormatter, + ) + + def common_flags(parser): + accuracy_group = parser.add_mutually_exclusive_group() + accuracy_group.add_argument( + "--no-accuracy", + dest="accuracy", + action="store_const", + const="", + default=accuracy, + help="do not test accuracy, just run the module and see if it errors", + ) + accuracy_group.add_argument( + "--accuracy", + action="store_const", + const="accuracy", + default=accuracy, + help="""\ +test if the RMSE between the compiled module and the fp64 reference is greater +than eager and the fp64 reference. This is usually more reliable than the +standard allclose test, as we expect numeric differences from compiling, often +improving accuracy over eager. RMSE test allows for compiled module to +diverge greatly from eager, as long as this divergence moves it closer to the +'true' mathematical value of the network. Caveats: (1) double precision can +still suffer from rounding error, so it is not a perfect reference (see for +example 'Herbie: Automatically Improving Floating Point Accuracy') for +approaches that detect the necessary working precision and compute it in +arbitrary precision floating point; unfortunately, this is not practical for +tensor computation; (2) if there are not enough samples in the output being +compared, we may get unlucky and have an unlucky greater RMSE than eager; this +could be overcome by applying a more rigorous statistical test at some +p-value, which we leave for future work. +""", + ) + accuracy_group.add_argument( + "--strict-accuracy", + dest="accuracy", + action="store_const", + const="strict_accuracy", + default=accuracy, + help="""\ +by default, when doing accuracy minification we will reject reductions which +change the divergence from a floating point divergence to a integral/boolean +divergence. This is because some operations like ReLU involve temporarily +sharp boundaries that smooth out again afterwards; without requiring +divergence on floating point, the minifier will often fixate on divergent +boolean tensor even though this is not the true source of the divergence. +However, rejecting these reductions makes it more difficult for the minifier +to make process. Using this option will let the minifier progress for ALL +divergences--you just might not end up with a useful repro in the end.""", + ) + + parser.add_argument( + "--save-dir", + type=str, + default=save_dir, + metavar="DIR", + help="directory where saved inputs live", + ) + parser.add_argument( + "--no-save-dir", + dest="save_dir", + action="store_const", + const=None, + help="don't use any directory for saved inputs", + ) + parser.add_argument( + "--tracing-mode", + type=str, + metavar="{real,fake,symbolic}", + default=tracing_mode, + help="how to trace the repro module into a GraphModule with metadata", + ) + + subparsers = parser.add_subparsers( + dest="command", metavar="{run,minify,analyze}", required=True + ) + + parser_run = subparsers.add_parser( + "run", + help="just run the repro", + ) + common_flags(parser_run) + + parser_minify = subparsers.add_parser( + "minify", help="run the minifier on the repro" + ) + common_flags(parser_minify) + parser_minify_isolate = parser_minify.add_mutually_exclusive_group() + parser_minify_isolate.add_argument( + "--isolate", + action="store_true", + default=True, + help="run in separate processes to avoid interference (default)", + ) + parser_minify_isolate.add_argument( + "--no-isolate", + dest="isolate", + action="store_false", + help="speed up by running all compilation in same process", + ) + parser_minify.add_argument( + "--skip-saving-eager-intermediates", + action="store_true", + help="skip saving eager intermediates on --minify", + ) + # TODO: make this an option for --analyze too + parser_minify.add_argument( + "--offload-to-disk", + action="store_true", + help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing", + ) + parser_minify.add_argument( + "--skip-sanity", + action="store_true", + help="skip sanity check at beginning of minification on original graph", + ) + parser_minify.add_argument( + "--max-granularity", + type=int, + default=None, + help="start at this granularity and work down; must be power of 2", + ) + parser_minify.add_argument( + "--check-str", + type=str, + default=check_str, + help="require minified program to fail with error containing this string", + ) + + parser_analyze = subparsers.add_parser( + "analyze", help="run the accuracy analyzer on the repro" + ) + common_flags(parser_analyze) + parser_analyze.add_argument( + "--skip-saving-inductor-intermediates", + action="store_true", + help="skip saving inductor intermediates on --analyze", + ) + parser_analyze.add_argument( + "--skip-saving-float64-intermediates", + action="store_true", + help="skip saving float64 intermediates", + ) + parser_analyze.add_argument( + "--skip-check-deterministic", + action="store_true", + help="skip checking that the network is deterministic", + ) + parser_analyze.add_argument( + "--stable-hash", + action="store_true", + help="use SHA-1 checksum instead of fast (but possibly unsound) hash", + ) + + # Run the repro in the context of minification, inverting exit code meaning + parser_minifier_query = subparsers.add_parser( + "minifier-query", + ) + common_flags(parser_minifier_query) + parser_minifier_query.add_argument( + "--check-str", + type=str, + default=check_str, + help="require minified program to fail with error containing this string", + ) + + args = None + if len(sys.argv) <= 1: + args = [command, *sys.argv[1:]] + + options = parser.parse_args(args) + COMMAND_FNS = { + "minify": repro_minify, + "analyze": repro_analyze, + "minifier-query": repro_minifier_query, + "run": repro_run, + } + return COMMAND_FNS[options.command](options, mod, load_args) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py new file mode 100644 index 0000000000000000000000000000000000000000..2028a9a4c12eabbb84e20ff1a4c2527e98dad883 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py @@ -0,0 +1,566 @@ +import argparse +import copy +import functools +import logging +import os +import shutil +import sys +import textwrap +from importlib import import_module +from typing import Union + +import torch +import torch.fx as fx + +from torch._dynamo.debug_utils import ( + AccuracyError, + backend_accuracy_fails, + BUCK_CMD_PREFIX, + BuckTargetWriter, + extra_imports, + generate_config_string, + helper_for_dump_minify, + InputReader, + InputWriter, + minifier_dir, + NNModuleToString, + NopInputReader, + run_fwd_maybe_bwd, + same_two_models, +) +from torch.fx.experimental.symbolic_shapes import fx_placeholder_targets +from torch.hub import tqdm + +from .. import config +from ..backends.registry import lookup_backend, register_debug_backend +from ..debug_utils import clone_inputs_retaining_gradness + +log = logging.getLogger(__name__) + + +inductor_config = import_module("torch._inductor.config") +use_buck = inductor_config.is_fbcode() + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MAIN ENTRY POINT +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str): + """ + A minifier decorator that wraps the TorchDynamo produced Fx graph modules. + As opposed to wrap_compiler_debug, this wrapper intercepts at the + TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some + level, e.g., it is useful for minifying issues related to Aot Autograd + tracing. If an error is found, we minify and save the minified repro in + repro.tar.gz. + """ + + @functools.wraps(unconfigured_compiler_fn) + def debug_wrapper(gm, example_inputs, **kwargs): + compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs) + assert config.repro_after in ("dynamo", "aot", None) + + if config.repro_after == "dynamo": + + def add_paths(exc): + exc.minifier_path = os.path.join(minifier_dir(), "minifier_launcher.py") + if use_buck: + exc.buck_command = " ".join( + BUCK_CMD_PREFIX + + [BuckTargetWriter(exc.minifier_path).cmd_line_path] + ) + + if config.repro_level == 3: + dump_to_minify_after_dynamo(gm, example_inputs, compiler_name) + + # Check for either accuracy (level 4) or other type of failures. + if config.repro_level == 4: + # Check Accuracy + compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs) + if backend_accuracy_fails(gm, example_inputs, compiler_fn): + log.warning( + "Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error." + ) + dump_to_minify_after_dynamo( + fx.GraphModule(gm, copy.deepcopy(gm.graph)), + example_inputs, + compiler_name, + ) + exc = AccuracyError("Bad accuracy detected.") + add_paths(exc) + raise exc + else: + try: + compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs) + run_fwd_maybe_bwd(compiled_gm, example_inputs) + except Exception as exc: + log.warning( + "Compiled Fx GraphModule failed. Creating script to minify the error." + ) + if config.repro_level == 1: + dump_state_fn = functools.partial( + dump_backend_state, compiler_name=compiler_name + ) + dump_state_fn( + fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs + ) + elif config.repro_level == 2: + dump_to_minify_after_dynamo( + fx.GraphModule(gm, copy.deepcopy(gm.graph)), + example_inputs, + compiler_name, + ) + add_paths(exc) + raise + else: + compiled_gm = compiler_fn(gm, example_inputs) + + return compiled_gm + + debug_wrapper._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined] + if hasattr(unconfigured_compiler_fn, "compiler_name"): + debug_wrapper.__name__ = unconfigured_compiler_fn.compiler_name + if hasattr(unconfigured_compiler_fn, "get_compiler_config"): + debug_wrapper.get_compiler_config = unconfigured_compiler_fn.get_compiler_config # type: ignore[attr-defined] + return debug_wrapper + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# REPRO DUMPERS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def generate_dynamo_fx_repro_string( + gm, + args, + compiler_name, + check_accuracy=False, + *, + stable_output=False, + save_dir=None, + command="run", +): + """ + Generate a repro string for backend-agnostic minified version. + """ + + model_str = NNModuleToString.convert(gm) + + # TODO: Figure out why torch.compile'd hash isn't work on this codepath + writer = InputWriter(save_dir, stable_hash=True) + for placeholder, arg in zip(fx_placeholder_targets(gm), args): + if isinstance(arg, (int, torch.SymInt)): + writer.symint(placeholder, arg) + elif isinstance(arg, torch.Tensor): + # TODO: improve these names with FQN + writer.tensor(placeholder, arg) + else: + raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}") + load_args = "\n".join(writer.lines()) + + return textwrap.dedent( + f""" +from math import inf +import torch +from torch import tensor, device +import torch.fx as fx +import torch._dynamo +from torch._dynamo.testing import rand_strided +from torch._dynamo.debug_utils import run_fwd_maybe_bwd + +{generate_config_string(stable_output=stable_output)} + +{extra_imports} + +{model_str} +mod = Repro() + +{load_args} + +if __name__ == '__main__': + from torch._dynamo.repro.after_dynamo import run_repro + run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r}, + save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r}) +""" + ) + + +def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False): + """ + Saves the repro to a repro.py file + """ + curdir = os.getcwd() + subdir = os.path.join(os.getcwd(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + file_name = os.path.join(subdir, f"minified_{len(gm.graph.nodes)}_nodes.py") + log.warning( + "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name + ) + + with open(file_name, "w") as fd: + fd.write( + generate_dynamo_fx_repro_string( + gm, args, compiler_name, check_accuracy, save_dir=subdir + ) + ) + latest_repro = os.path.join(curdir, "repro.py") + log.warning("Copying %s to %s for convenience", file_name, latest_repro) + + if use_buck: + BuckTargetWriter(latest_repro).write() + + shutil.copyfile(file_name, latest_repro) + + +def dump_backend_state(gm, args, compiler_name, check_accuracy=False): + """ + Dumps the dynamo graph to repro the issue. + 1) It tries to convert Fx GraphModule to a string. If we can, it writes to a + repro.py file. + 2) If we can't convert Fx GraphModule to a string, we use to_folder to save + the module and save a tar file. + """ + assert NNModuleToString.can_convert_to_string(gm) + return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy) + # return dump_backend_repro_as_tarfile(gm, args, compiler_name) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MINIFIER DUMPER +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def dump_to_minify_after_dynamo(gm, args, compiler_name): + # TODO: factor this out + subdir = os.path.join(minifier_dir(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + helper_for_dump_minify( + generate_dynamo_fx_repro_string( + gm, + args, + compiler_name, + check_accuracy=config.repro_level == 4, + save_dir=subdir, + command="minify", + ) + ) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MINIFIER BACKENDS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +@register_debug_backend +def dynamo_minifier_backend(gm, example_inputs, compiler_name): + from functorch.compile import minifier + + compiler_fn = lookup_backend(compiler_name) + + # TODO: It's inconsistent to pass SymInt inputs but REAL tensors. + # We should pass ints and look at the GraphModule placeholders + # to resolve them to SymInt (if necessary) + example_inputs = [ + i.node.hint if isinstance(i, torch.SymInt) else i for i in example_inputs + ] + + try: + compiled_gm = compiler_fn(gm, example_inputs) + run_fwd_maybe_bwd(compiled_gm, example_inputs) + raise ValueError("No issue was detected") + except Exception as exc: + orig_failure = str(exc) + log.warning( + "Compiled Fx GraphModule failed. Creating script to minify the error." + ) + dump_state_fn = functools.partial( + dump_backend_state, compiler_name=compiler_name + ) + dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs) + fails_fn = functools.partial( + backend_fails, + compiler_fn=compiler_fn, + orig_failure=orig_failure, + ) + minifier( + gm, + example_inputs, + module_fails=fails_fn, + dump_state=dump_state_fn, + ) + return gm + + +@register_debug_backend +def dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name): + from functorch.compile import minifier + + compiler_fn = lookup_backend(compiler_name) + + # Set the eval mode to remove randomness. + gm.eval() + + # Check Accuracy + if backend_accuracy_fails( + gm, example_inputs, compiler_fn, only_fwd=config.repro_forward_only + ): + log.warning("Accuracy failed for the TorchDynamo produced graph") + dump_state_fn = functools.partial( + dump_backend_state, compiler_name=compiler_name, check_accuracy=True + ) + fails_fn = functools.partial( + backend_accuracy_fails, + compiler_fn=compiler_fn, + only_fwd=config.repro_forward_only, + ) + dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs) + minifier( + gm, + example_inputs, + module_fails=fails_fn, + dump_state=dump_state_fn, + ) + else: + log.error("Input graph does not fail accuracy testing") + return gm + + +def backend_fails(gm, example_inputs, compiler_fn, orig_failure): + """ + Minifier uses this function to identify if the minified graph module fails + with the same error. + + One caveat is that minifier can potentially go into a wrong direction when + the resulting graph module fails for a different reason. To avoid this, we + save the string for the original exception and check similarity between new + and old exception. They can be somewhat different in some cases, when the + exception string depends on the failing node information. So, we have a + loose similarity metric to guide the minifier path. + """ + from difflib import SequenceMatcher + + try: + # Run the original gm to check eager validity + run_fwd_maybe_bwd(gm, clone_inputs_retaining_gradness(example_inputs)) + compiled_gm = compiler_fn(gm, example_inputs) + run_fwd_maybe_bwd(compiled_gm, clone_inputs_retaining_gradness(example_inputs)) + return False + except Exception as e: + new_failure = str(e) + if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5: + return True + return False + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# REPRO MAIN +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def run_load_args(options, mod, load_args): + if not hasattr(load_args, "_version"): + log.warning( + "load_args does not have a _version attribute, please file a bug to PyTorch " + "and describe how you generate this repro script" + ) + else: + if load_args._version > 0: + log.warning( + "load_args is version %s, but this version of PyTorch only supports " + "version 0. We will try to run it anyway but there may be an incompatibility; " + "if so, try upgrading your version of PyTorch.", + load_args._version, + ) + + nop_reader = NopInputReader() + load_args(nop_reader) + + with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar: + input_reader = InputReader(save_dir=options.save_dir, pbar=pbar) + load_args(input_reader) + args = input_reader.args + + return args + + +def repro_minify(options, mod, load_args): + args = run_load_args(options, mod, load_args) + + # Setup debug minifier compiler + if not options.accuracy: + compiler_fn = lookup_backend("dynamo_minifier_backend") + else: + compiler_fn = lookup_backend("dynamo_accuracy_minifier_backend") + + if options.backend is None: + raise RuntimeError( + "Compiler name is None - this likely means that a custom compiler " + "was called by torchdynamo. Please remove this error, import your " + "custom compiler function, and replace the backend=None " + "line in run_repro to backend=" + ) + + dynamo_minifier_backend = functools.partial( + compiler_fn, + compiler_name=options.backend, + ) + opt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod) + + with torch.cuda.amp.autocast(enabled=options.autocast): + opt_mod(*args) + + +def repro_run(options, mod, load_args): + opt_mod = torch._dynamo.optimize(options.backend)(mod) + + if options.accuracy != "": + mod.eval() + opt_mod.eval() + + with torch.cuda.amp.autocast(enabled=options.autocast): + # TODO: disable clone + args = run_load_args(options, mod, load_args) + assert same_two_models(mod, mod, args), "Eager itself failed" + if not same_two_models(mod, opt_mod, args): + raise AccuracyError("Dynamo failed") + else: + with torch.cuda.amp.autocast(enabled=options.autocast): + args = run_load_args(options, mod, load_args) + ref = run_fwd_maybe_bwd( + mod, args, only_fwd=options.only_fwd, disable_clone=True + ) + del args + + args = run_load_args(options, mod, load_args) + res = run_fwd_maybe_bwd( + opt_mod, args, only_fwd=options.only_fwd, disable_clone=True + ) + + +def run_repro( + mod, + load_args, + *, + command="run", + accuracy: Union[bool, str] = "", + save_dir=None, + autocast=False, + backend="inductor", + **kwargs, +): + for k in kwargs: + log.warning( + "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch", + k, + ) + + if accuracy is True: + accuracy = "accuracy" + elif accuracy is False: + accuracy = "" + + parser = argparse.ArgumentParser( + description=f"""\ +An after_dynamo repro script, typically triggering a bug in Dynamo or +AOTAutograd. When run with no arguments, this script defaults to running +'{command}'. Extra flags may be available; to find out more, try '{command} +--help'. There are also alternate subcommands available, see below. + +default settings on this script: + {accuracy=} + {save_dir=} +""", + formatter_class=argparse.RawTextHelpFormatter, + ) + + def common_flags(parser): + accuracy_group = parser.add_mutually_exclusive_group() + accuracy_group.add_argument( + "--no-accuracy", + dest="accuracy", + action="store_const", + const="", + default=accuracy, + help="do not test accuracy, just run the module and see if it errors", + ) + accuracy_group.add_argument( + "--accuracy", + action="store_const", + const="accuracy", + default=accuracy, + help="test accuracy", + ) + parser.add_argument( + "--save-dir", + type=str, + default=save_dir, + metavar="DIR", + help="directory where saved inputs live", + ) + parser.add_argument( + "--no-save-dir", + dest="save_dir", + action="store_const", + const=None, + help="don't use any directory for saved inputs", + ) + parser.add_argument( + "--no-isolate", + dest="isolate", + action="store_false", + default=False, + help="no isolate (doesn't do anything for after_dynamo)", + ) + parser.add_argument( + "--autocast", + default=autocast, + action="store_true", + help="use torch.cuda.amp.autocast", + ) + parser.add_argument( + "--no-autocast", + dest="autocast", + action="store_false", + help="don't use torch.cuda.amp.autocast", + ) + parser.add_argument( + "--backend", + type=str, + default=backend, + metavar="BACKEND", + help="torch.compile backend to use", + ) + + subparsers = parser.add_subparsers( + dest="command", metavar="{run,minify}", required=True + ) + + parser_run = subparsers.add_parser( + "run", + help="just run the repro", + ) + common_flags(parser_run) + parser_run.add_argument( + "--only-fwd", + action="store_true", + help="don't run backwards compilation for testing", + ) + + parser_minify = subparsers.add_parser( + "minify", help="run the minifier on the repro" + ) + common_flags(parser_minify) + + args = None + if len(sys.argv) <= 1: + args = [command, *sys.argv[1:]] + + options = parser.parse_args(args) + COMMAND_FNS = { + "minify": repro_minify, + "run": repro_run, + } + COMMAND_FNS[options.command](options, mod, load_args) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..258079768428fda57c7109b60285ef9699d9a86d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py @@ -0,0 +1,151 @@ +# mypy: ignore-errors + +from .base import VariableTracker +from .builtin import BuiltinVariable +from .constant import ConstantVariable, EnumVariable +from .ctx_manager import ( + ContextWrappingVariable, + DeterministicAlgorithmsVariable, + DisabledSavedTensorsHooksVariable, + GradIncrementNestingCtxManagerVariable, + GradInplaceRequiresGradCtxManagerVariable, + GradModeVariable, + InferenceModeVariable, + StreamContextVariable, + StreamVariable, + VmapIncrementNestingCtxManagerVariable, + WithExitFunctionVariable, +) +from .dicts import ( + ConstDictVariable, + CustomizedDictVariable, + DataClassVariable, + DefaultDictVariable, + SetVariable, +) +from .distributed import BackwardHookVariable +from .functions import ( + FunctoolsPartialVariable, + NestedUserFunctionVariable, + SkipFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) +from .higher_order_ops import ( + FunctorchHigherOrderVariable, + TorchHigherOrderOperatorVariable, +) +from .iter import ( + CountIteratorVariable, + CycleIteratorVariable, + IteratorVariable, + ItertoolsVariable, + RepeatIteratorVariable, +) +from .lazy import LazyVariableTracker +from .lists import ( + BaseListVariable, + ListIteratorVariable, + ListVariable, + NamedTupleVariable, + RangeVariable, + RestrictedListSubclassVariable, + SliceVariable, + TupleIteratorVariable, + TupleVariable, +) +from .misc import ( + AutogradFunctionContextVariable, + AutogradFunctionVariable, + ClosureVariable, + DeletedVariable, + GetAttrVariable, + InspectSignatureVariable, + LambdaVariable, + MethodWrapperVariable, + NewCellVariable, + NewGlobalVariable, + NumpyVariable, + PythonModuleVariable, + StringFormatVariable, + SuperVariable, + TypingVariable, + UnknownVariable, +) +from .nn_module import NNModuleVariable, UnspecializedNNModuleVariable +from .sdpa import SDPAParamsVariable +from .tensor import ( + FakeItemVariable, + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, + UntypedStorageVariable, +) +from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable +from .user_defined import ( + RemovableHandleVariable, + UserDefinedClassVariable, + UserDefinedObjectVariable, +) + +__all__ = [ + "AutogradFunctionContextVariable", + "AutogradFunctionVariable", + "BackwardHookVariable", + "BaseListVariable", + "BuiltinVariable", + "ClosureVariable", + "ConstantVariable", + "ConstDictVariable", + "ContextWrappingVariable", + "CountIteratorVariable", + "CustomizedDictVariable", + "CycleIteratorVariable", + "DataClassVariable", + "DefaultDictVariable", + "DeletedVariable", + "DeterministicAlgorithmsVariable", + "EnumVariable", + "FakeItemVariable", + "GetAttrVariable", + "GradModeVariable", + "InspectSignatureVariable", + "IteratorVariable", + "ItertoolsVariable", + "LambdaVariable", + "LazyVariableTracker", + "ListIteratorVariable", + "ListVariable", + "NamedTupleVariable", + "NestedUserFunctionVariable", + "NewCellVariable", + "NewGlobalVariable", + "NNModuleVariable", + "NumpyNdarrayVariable", + "NumpyVariable", + "PythonModuleVariable", + "RangeVariable", + "RemovableHandleVariable", + "RepeatIteratorVariable", + "RestrictedListSubclassVariable", + "SDPAParamsVariable", + "SkipFunctionVariable", + "SliceVariable", + "StringFormatVariable", + "SuperVariable", + "TensorVariable", + "TorchCtxManagerClassVariable", + "TorchInGraphFunctionVariable", + "TupleVariable", + "UnknownVariable", + "UnspecializedNNModuleVariable", + "UnspecializedPythonVariable", + "UntypedStorageVariable", + "UserDefinedClassVariable", + "UserDefinedObjectVariable", + "UserFunctionVariable", + "UserMethodVariable", + "VariableTracker", + "WithExitFunctionVariable", +] diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558edf809bf8cc401dea0d128753befa77068f15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd50bd7f1f086541472a30ac68e8057ed14ae45b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93291117b4eaf4cbdcd460099dabcf0b91f61bea Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb2cd9ed1960a7df124e174d7f732000e97ea22 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39bafe700dbfb0a2b36c92866d3b5cb5ad7d4caf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc6f2ee927cf66b92ec6fcd02637fad694638f28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96af6e06793421eb13da677bc00ed8653148e5c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..362eb7f21a9bcc7d7d44fb77e4f003b7aff71633 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e566e6793449fa4cb35579b8fe0b1081410b0fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b220eaf66127cd9cbada2fa3483547a7e85a56c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9acf34441b4f7f3df7829a10507d3b6e51e97dbc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a09949ac26858dedeba5aac1a3020e0c02d2c4d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ab30ad7fe589b8c85443c1983a919002533f2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a909b13aaea3624bf0a6da299150d93c8370861 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..037ac1f82ab9bfe86ca01d58e9fa6c780836946f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5435a52bb06d246ef6cb7b79fdc78a61723058cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33e0284a609c2fcd319083293ebd78e797b40e20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a205b1840b0485bd6ae46e67d2c890c76e73d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76024d9a8ed00d204ff83c5d54849114645b95b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d53d2f6e3aeb59428b03a610f9c55cfe4eed2e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e5b75161e41c3444a843f8cb40270b04deb463e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/base.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a28f2c8cb0aaef0e268bf7aa9f905d024c3b4f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/base.py @@ -0,0 +1,420 @@ +# mypy: ignore-errors + +import collections +from enum import Enum +from typing import Any, Callable, Dict, List + +from .. import variables +from ..current_scope_id import current_scope_id +from ..exc import unimplemented +from ..source import AttrSource, Source +from ..utils import identity, istype + + +class MutableLocalSource(Enum): + """ + If the VariableTracker.mutable_local represents a Variable that: + - already existed that Dynamo began tracking while introspection (Existing) + - is a new variable that is created during Dynamo introspection (Local) + """ + + Existing = 0 + Local = 1 + + +class ParentsTracker: + """ + This is a perf optimization to limit the number of objects we need to visit in tx.replace_all. + This must be a seperate object so that it is not cloned in apply. + """ + + def __init__(self): + # logically this is a set, but we use a dict to ensure deterministic ordering + self.parents: Dict[ParentsTracker, bool] = dict() + + def add(self, parent): + self.parents[parent] = True + + def recursive_parents(self): + rv = dict(self.parents) + worklist = list(self.parents) + while worklist: + for parent in worklist.pop().parents: + if parent not in rv: + assert isinstance(parent, ParentsTracker) + rv[parent] = True + worklist.append(parent) + return rv.keys() + + +class MutableLocalBase: + """ + Base class for Variable.mutable_local + """ + + def __init__(self, typ: MutableLocalSource): + # In HigherOrderOperator tracing, we need to distinguish + # between MutableLocals inside the HigherOrderOperator and + # ones outside it. For example, it is not safe to mutate + # `a` in the following example because it was constructed + # in a different scope. + # + # def f(x): + # a = 1 + # def g(x): + # nonlocal a + # a = 2 + # return x + # return wrap(g, x) + a + # + # We use self.scope to distinguish this. + # scope == 0: The object was an existing variable + # scope == 1: The object was created while Dynamo + # was introspecting a function + # (and no HigherOrderOps were involved) + # scope >= 2: The object was created through + # Dynamo introspection of a HigherOrderOp. + # The exact number corresponds to the level + # of nested HigherOrderOps. + if typ is MutableLocalSource.Existing: + self.scope = 0 + elif typ is MutableLocalSource.Local: + self.scope = current_scope_id() + else: + unimplemented(f"Unsupported MutableLocalSource: {typ}") + + +class MutableLocal(MutableLocalBase): + """ + Marker used to indicate this (list, iter, etc) was constructed in + local scope and can be mutated safely in analysis without leaking + state. + """ + + def __init__(self): + super().__init__(MutableLocalSource.Local) + + def __hash__(self): + return id(self) + + def __eq__(self, other): + return self is other + + +def _is_top_level_scope(scope_id): + return scope_id == 1 + + +def is_side_effect_safe(m: MutableLocalBase): + scope_id = current_scope_id() + + # In the top-level scope (if no HigherOrderOperators are involved), + # we are allowed to modify variables created in this scope as well + # as existing variables. + if _is_top_level_scope(scope_id): + return True + # Otherwise, only allow local mutation of variables created in the current scope + return m.scope == scope_id + + +class VariableTrackerMeta(type): + def __call__(cls, *args, **kwargs): + """Call __post_init__""" + obj = type.__call__(cls, *args, **kwargs) + obj.__post_init__(*args, **kwargs) + return obj + + def __instancecheck__(cls, instance) -> bool: + """Make isinstance work with LazyVariableTracker""" + if type.__instancecheck__( + variables.LazyVariableTracker, instance + ) and cls not in ( + VariableTracker, + variables.LazyVariableTracker, + ): + instance = instance.realize() + return type.__instancecheck__(cls, instance) + + +class VariableTracker(metaclass=VariableTrackerMeta): + """ + Base class for tracked locals and stack values + + VariableTracker instances are immutable and should be copied in + order to change them. + """ + + # fields to leave unmodified in apply() + _nonvar_fields = { + "value", + "guards", + "source", + "mutable_local", + "parents_tracker", + "user_code_variable_name", + } + + def clone(self, **kwargs): + """Shallow copy with some (optional) changes""" + args = dict(self.__dict__) + args.update(kwargs) + return self.__class__(**args) + + @classmethod + def copy(cls, value): + """Deeper (but not full) copy, leaving FX and user objects alone""" + return cls.apply(identity, value) + + @classmethod + def apply( + cls, + fn: Callable[["VariableTracker"], "VariableTracker"], + value, + cache=None, + skip_fn=lambda _: False, # Whether we should skip applying to this var + ): + """ + Walk this object and call fn on all the VariableTracker + instances + """ + if cache is None: + cache = dict() + + idx = id(value) + if idx in cache: + return cache[idx][0] + + if isinstance(value, VariableTracker): + if not skip_fn(value): + + def update_object_dict(v): + changed = False + rv = v.__dict__ + for key in rv.keys(): + if key not in v._nonvar_fields: + prior = rv[key] + rv[key] = cls.apply(fn, prior, cache, skip_fn) + changed = changed or prior is not rv[key] + + return v + + value = value.unwrap() + was_realized = value.is_realized() + result = fn(update_object_dict(value)) + if not was_realized and value.is_realized(): + # running fn() resulted in value getting realized, + # which means we missed updating the contents of result + result = update_object_dict(result.unwrap()) + else: + result = fn(value) + if result is not None: + result = result.unwrap() + elif istype(value, list): + result = [cls.apply(fn, v, cache, skip_fn) for v in value] + elif istype(value, tuple): + result = tuple(cls.apply(fn, v, cache, skip_fn) for v in value) + elif istype(value, (dict, collections.OrderedDict)): + result = { + k: cls.apply(fn, v, cache, skip_fn) for k, v in list(value.items()) + } + else: + result = value + + # save `value` to keep it alive and ensure id() isn't reused + cache[idx] = (result, value) + return result + + def __repr__(self): + return f"{self.__class__.__name__}()" + + def python_type(self): + """ + Abstract method to be implemented by subclasses of VariableTracker. + + This method should return the type represented by the instance of the subclass. + The purpose is to provide a standardized way to retrieve the Python type information + of the variable being tracked. + + Returns: + type: The Python type (such as int, str, list, etc.) of the variable tracked by + the subclass. If the type cannot be determined or is not relevant, + leaving it undefined or invoking super() is always sound. + + Note: + This is an abstract method and may be overridden in subclasses. + + Example: + class SetVariable(VariableTracker): + def python_type(self): + return set + + Raises: + NotImplementedError: If the method is not implemented in a subclass. + """ + raise NotImplementedError(f"{self} has no type") + + def as_python_constant(self): + """For constants""" + raise NotImplementedError(f"{self} is not a constant") + + def guard_as_python_constant(self): + """Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants""" + try: + return self.as_python_constant() + except NotImplementedError as e: + unimplemented(str(e)) + + def is_python_constant(self): + try: + self.as_python_constant() + return True + except NotImplementedError: + return False + + def make_guard(self, fn): + if self.source: + return self.source.make_guard(fn) + raise NotImplementedError() + + def const_getattr(self, tx, name: str) -> Any: + """getattr(self, name) returning a python constant""" + raise NotImplementedError() + + def var_getattr(self, tx, name: str) -> "VariableTracker": + """getattr(self, name) returning a new variable""" + value = self.const_getattr(tx, name) + if not variables.ConstantVariable.is_literal(value): + raise NotImplementedError() + source = None + if self.source: + source = AttrSource(self.source, name) + return variables.ConstantVariable.create(value, source=source) + + def is_proxy(self): + try: + self.as_proxy() + return True + except NotImplementedError: + return False + + def as_proxy(self): + raise NotImplementedError(str(self)) + + def maybe_fx_node(self): + try: + proxy = self.as_proxy() + import torch.fx + + if isinstance(proxy, torch.fx.Proxy): + return proxy.node + return None + except NotImplementedError: + return None + + def reconstruct(self, codegen): + raise NotImplementedError() + + def can_reconstruct(self, tx): + """If it is possible to reconstruct the Python object this + VariableTracker represents.""" + assert tx is tx.output.root_tx, "Only root tx can reconstruct" + try: + from ..codegen import PyCodegen + + cg = PyCodegen(tx) + self.reconstruct(cg) + return True + except NotImplementedError: + return False + + def unpack_var_sequence(self, tx) -> List["VariableTracker"]: + raise NotImplementedError() + + def has_unpack_var_sequence(self, tx) -> bool: + try: + self.unpack_var_sequence(tx) + return True + except NotImplementedError: + return False + + def inspect_parameter_names(self) -> List[str]: + unimplemented(f"inspect_parameter_names: {self}") + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + unimplemented(f"hasattr {self.__class__.__name__} {name}") + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + unimplemented(f"call_function {self} {args} {kwargs}") + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__len__" and self.has_unpack_var_sequence(tx): + assert not (args or kwargs) + return variables.ConstantVariable.create(len(self.unpack_var_sequence(tx))) + elif ( + name == "__getattr__" + and len(args) == 1 + and args[0].is_python_constant() + and not kwargs + ): + return self.var_getattr(tx, args[0].as_python_constant()) + raise unimplemented(f"call_method {self} {name} {args} {kwargs}") + + def rename(self, tx, name): + return self + + def realize(self) -> "VariableTracker": + """Used by LazyVariableTracker to build the real VariableTracker""" + return self + + def recursive_realize(self): + """Realize all objects under this""" + return VariableTracker.apply(lambda x: x.realize(), self) + + def unwrap(self) -> "VariableTracker": + """Used by LazyVariableTracker to return the real VariableTracker if it already exists""" + return self + + def is_realized(self): + """Used by LazyVariableTracker to indicate an unrealized node""" + return True + + def __init__( + self, + *, + source: Source = None, + mutable_local: MutableLocal = None, + parents_tracker: ParentsTracker = None, + ): + super().__init__() + self.source = source + self.mutable_local = mutable_local + self.parents_tracker = parents_tracker + + def __post_init__(self, *args, **kwargs): + if self.parents_tracker is None: + self.parents_tracker = ParentsTracker() + # visit children 1 level deep and ensure parent is set properly + VariableTracker.apply( + lambda node: node.parents_tracker.add(self.parents_tracker), + [v for k, v in self.__dict__.items() if k not in self._nonvar_fields], + skip_fn=lambda _: True, + ) + + +def typestr(*objs): + if len(objs) == 1: + (obj,) = objs + if isinstance(obj, VariableTracker): + return str(obj) + else: + return type(obj).__name__ + else: + return " ".join(map(typestr, objs)) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..5d25732f64225015c485c65bdb81c18e3c49e32e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py @@ -0,0 +1,1976 @@ +# mypy: ignore-errors + +import abc +import collections +import contextlib +import dataclasses +import enum +import functools +import inspect +import itertools +import logging +import operator +import re +import sys +import types +from typing import List, NamedTuple, Optional, Union + +from torch.utils._sympy.value_ranges import ValueRanges + +try: + import numpy as np +except ModuleNotFoundError: + np = None + +import torch + +from torch import SymInt +from torch._guards import GuardSource, TracingContext +from torch._ops import HigherOrderOperator +from torch._streambase import _EventBase, _StreamBase +from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode +from torch._subclasses.meta_utils import is_sparse_any +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + DimDynamic, + RelaxedUnspecConstraint, + StatefulSymbolicContext, + SubclassSymbolicContext, + SymbolicContext, +) +from torch.fx.immutable_collections import immutable_list +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch.utils.weak import TensorWeakRef +from .. import config, mutation_guard, replay_record, trace_rules + +from ..device_interface import get_registered_device_interfaces +from ..exc import InternalTorchDynamoError, unimplemented +from ..guards import GuardBuilder, install_guard, make_dupe_guard +from ..side_effects import SideEffects +from ..source import ( + AttrSource, + ConstantSource, + ConstDictKeySource, + ConvertIntSource, + GetItemSource, + is_constant_source, + is_from_defaults, + LocalSource, + NumpyTensorSource, + RandomValueSource, + Source, + TupleIteratorGetItemSource, +) +from ..trace_rules import is_callable_allowed, is_numpy +from ..utils import ( + build_checkpoint_variable, + clone_input, + common_constant_types, + get_fake_value, + get_static_address_type, + is_function_or_wrapper, + is_namedtuple, + is_typing, + is_utils_checkpoint, + istype, + odict_values, + preserve_rng_state, + tensor_always_has_static_shape, + tuple_iterator, + tuple_iterator_getitem, + tuple_iterator_len, + unwrap_with_attr_name_if_wrapper, + wrap_fake_exception, +) + +from .base import MutableLocal, typestr, VariableTracker +from .constant import ConstantVariable, EnumVariable +from .ctx_manager import ( + AutocastModeVariable, + EventVariable, + NullContextVariable, + PreserveVersionContextVariable, + StreamContextVariable, + StreamVariable, +) +from .dicts import ( + ConstDictVariable, + DataClassVariable, + DefaultDictVariable, + HFPretrainedConfigVariable, + PythonSysModulesVariable, + SetVariable, +) +from .distributed import ( + DeviceMeshVariable, + PlacementClassVariable, + PlacementVariable, + ProcessGroupVariable, +) +from .functions import ( + CollectiveFunctionRewriteVariable, + FunctoolsPartialVariable, + TritonKernelVariable, + UserMethodVariable, +) +from .higher_order_ops import TorchHigherOrderOperatorVariable +from .iter import ItertoolsVariable +from .lazy import LazyVariableTracker +from .lists import ( + BaseListVariable, + ListVariable, + NamedTupleVariable, + RangeVariable, + RestrictedListSubclassVariable, + SizeVariable, + SliceVariable, + TupleIteratorVariable, + TupleVariable, +) +from .misc import ( + AutogradFunctionContextVariable, + AutogradFunctionVariable, + ComptimeVariable, + DebuggingVariable, + GetAttrVariable, + GetSetDescriptorVariable, + InspectSignatureVariable, + LambdaVariable, + MethodWrapperVariable, + NumpyVariable, + PythonModuleVariable, + SavedTensorBox, + TypingVariable, +) +from .nn_module import FSDPManagedNNModuleVariable, UnspecializedNNModuleVariable +from .optimizer import OptimizerVariable + +from .sdpa import SDPAParamsVariable +from .tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorSubclassVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable +from .torch_function import build_torch_function_fn, TensorWithTFOverrideVariable +from .user_defined import ( + KeyedJaggedTensorVariable, + UserDefinedClassVariable, + UserDefinedObjectVariable, +) + + +log = logging.getLogger(__name__) + + +DimList = List + + +class _missing: + pass + + +@dataclasses.dataclass +class GraphArg: + source: Source + # TODO: storing a SymInt here but not a FakeTensor is a pretty strange + # thing to do. Probably should have example (which stores an int) and + # fake_example + _example: Union[TensorWeakRef, torch.SymInt] + is_unspecialized: bool + fake_tensor: Optional[torch._subclasses.fake_tensor.FakeTensor] + # UnspecializedPythonVariable often masquerades as a tensor. + # We MUST NOT generate shape guard code + # that actually tries to access tensor properties on these values. + # is_tensor lets us tell if this graph arg actually is a tensor + # or not. + is_tensor: bool = True + # Sometimes, the Tensor we pass to example is freshly allocated (smh). + # Then we cannot only keep a weak reference to it. This lets you + # stash a strong reference too. + example_strong_ref: Optional[torch.Tensor] = None + + @property + def example(self): + if isinstance(self._example, TensorWeakRef): + r = self._example() + assert r is not None + return r + else: + return self._example + + def __post_init__(self): + if isinstance(self._example, torch.Tensor): + self._example = TensorWeakRef(self._example) + assert is_fake(self.fake_tensor) + + def reconstruct(self, codegen): + self.source.reconstruct(codegen) + + def erase(self): + self._example = None + self.example_strong_ref = None + + def __eq__(self, other): + return self.source.name() == other.source.name() + + +class BackwardStateGraphArg(GraphArg): + def __init__(self): + super().__init__( + source=None, + _example=BackwardState(), + is_unspecialized=False, + fake_tensor=None, + is_tensor=False, + ) + + def reconstruct(self, codegen): + assert codegen.tx.output.backward_state_var + codegen.load_import_from(BackwardState.__module__, "BackwardState") + codegen.call_function(0, True) + codegen.dup_top() + codegen.store(codegen.tx.output.backward_state_var) + + +@dataclasses.dataclass +class FrameStateSizeEntry: + scalar: Optional[int] + size: Optional[List[int]] + + +class VariableBuilder: + """Wrap a python value in a VariableTracker() instance""" + + def __init__( + self, + tx, + source: Source, + ): + assert ( + source is not None + ), "Consider SourcelessBuilder for ephemeral objects, usually objects created locally." + assert TracingContext.try_get() is not None, "Expected active TracingContext" + super().__init__() + self.tx = tx + self.source = source + self.name = source.name() + + def __call__(self, value): + if value in self.tx.output.side_effects: + side_effect_result = self.tx.output.side_effects[value] + dup_guard = make_dupe_guard(self.source, side_effect_result.source) + if dup_guard: + self.install_guards(dup_guard) + return side_effect_result + vt = self._wrap(value) + vt.source = self.source + if self._can_lift_attrs_to_inputs(vt): + vt = self.tx.output.side_effects.track_object_existing(value, vt) + return vt + + def _can_lift_attrs_to_inputs(self, vt): + if type(vt) in [ + TensorVariable, + TensorWithTFOverrideVariable, + UserDefinedObjectVariable, + NumpyNdarrayVariable, + ]: + return True + return False + + @staticmethod + @functools.lru_cache(None) + def _common_constants(): + return { + # We zero-one specialize shapes, so specialize these constants + # too + 0, + 1, + # NB: There used to be more constants here, but honestly it was + # pretty confusing. Note we specialize floats by default, and + # DON'T specialize ints by default. This all only matters with + # dynamic_shapes + } + + def get_source(self): + return self.source + + def install_guards(self, *guards): + source = self.get_source() + if ( + isinstance(source, ConstantSource) + or source.guard_source() == GuardSource.CONSTANT + ): + return None + install_guard(*[source.make_guard(guard) for guard in guards], skip=1) + return {} + + def set_source_and_track_mutable(self, value, var): + assert isinstance(var, VariableTracker) + var.source = self.source + return self.tx.output.side_effects.track_mutable(value, var) + + @classmethod + @functools.lru_cache(None) + def _type_dispatch(cls): + # NB: Careful not to close over self to avoid ref cycle from lru_cache + entries = [ + ( + ( + torch.Tensor, + torch.nn.Parameter, + torch._subclasses.FakeTensor, + torch._subclasses.functional_tensor.FunctionalTensor, + ), + cls.wrap_tensor, + ), + ( + (tuple, list, odict_values, collections.deque, torch.Size), + cls.wrap_listlike, + ), + (tuple_iterator, cls.wrap_tuple_iterator), + ((slice, range), cls.wrap_slice_range), + (tuple(common_constant_types), cls.wrap_literal), + ] + + if config.trace_numpy and np: + entries.append((np.ndarray, cls.wrap_numpy_ndarray)) + + result = {} + for ts, fn in entries: + for t in ts if isinstance(ts, tuple) else (ts,): + assert t not in result + result[t] = fn + + return result + + @classmethod + @functools.lru_cache(None) + def _id_dispatch(cls): + from ..comptime import comptime + + entries = [ + ( + inspect.signature, + lambda self, value: LambdaVariable( + InspectSignatureVariable.create, + source=self.source, + **self.install_guards(GuardBuilder.CLOSURE_MATCH), + ), + ), + (comptime, lambda self, value: ComptimeVariable()), + ( + dataclasses.fields, + lambda self, value: LambdaVariable( + _dataclasses_fields_lambda, + source=self.source, + **self.install_guards(GuardBuilder.FUNCTION_MATCH), + ), + ), + ] + + result = {} + for ts, fn in entries: + for t in ts if isinstance(ts, (tuple, list)) else (ts,): + assert t not in result + result[id(t)] = fn + + return result + + def _wrap(self, value): + # import here to avoid circular dependencies + from torch.utils._triton import has_triton + + if has_triton(): + from triton.runtime.autotuner import Autotuner + from triton.runtime.jit import JITFunction + else: + + class JITFunction: + pass + + class Autotuner: + pass + + # Handle exact type() match + type_dispatch = self._type_dispatch().get(type(value)) + if type_dispatch is not None: + return type_dispatch(self, value) + + # Handle exact id() match + id_dispatch = self._id_dispatch().get(id(value)) + if id_dispatch is not None: + return id_dispatch(self, value) + + # Note - There are some nested values where types mismatch! + # We want to get those out and wrap those. + value = inspect.getattr_static(value, "_torchdynamo_inline", value) + + # Everything else (NB: order matters!) + if is_traceable_wrapper_subclass(value) or istype( + value, config.traceable_tensor_subclasses + ): + return self.wrap_tensor(value) + elif is_namedtuple(value): + return self.wrap_listlike(value) + + elif value is torch.utils._pytree.SUPPORTED_NODES: + # For SUPPORTED_NODES, we guard on the dictionary version (PEP509) + # under the assumption that the values themselves don't change. + self.install_guards(GuardBuilder.DICT_VERSION) + result = { + ConstantVariable.create(k): UserDefinedObjectVariable( + v, + source=GetItemSource( + self.get_source(), ConstDictKeySource(self.get_source(), i) + ), + ) + for i, (k, v) in enumerate(value.items()) + } + return ConstDictVariable(result, type(value)) + elif value is sys.modules: + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return PythonSysModulesVariable(source=self.source) + elif istype(value, (dict, collections.defaultdict, collections.OrderedDict)): + if not value and self.get_source().is_nn_module(): + # It is faster to guard on 'false' property than to guard + # on actual dict keys, but we can't do this fast guard in general because + # it omits a crucial type check that ensures the value is actually still a dict at runtime. + + # Why is this OK for (specialized) nnmodules? We set up a setattr hook + # to check for module property mutations, which does a reasonable, + # but not completely secure job ensuring a property wasn't changed. + self.install_guards(GuardBuilder.BOOL_FALSE) + else: + self.install_guards(GuardBuilder.DICT_LENGTH) + + # Optimisation for the common case strings, ints, etc + all_const = all(ConstantVariable.is_literal(k) for k in value.keys()) + if all_const: + self.install_guards(GuardBuilder.DICT_CONST_KEYS) + + # We need all the keys to be hashable. We do this within the + # _HashableTracker class in dicts.py + def build_key_value(i, k, v): + if all_const: + key = ConstantVariable.create(k) + source_key = k + else: + source_key = ConstDictKeySource(self.get_source(), i) + key = LazyVariableTracker.create(k, source_key) + + source_value = GetItemSource(self.get_source(), source_key) + value = LazyVariableTracker.create(v, source_value) + + return key, value + + result = dict( + build_key_value(i, k, v) for i, (k, v) in enumerate(value.items()) + ) + + if istype(value, collections.defaultdict): + factory_source = AttrSource(self.source, "default_factory") + result = DefaultDictVariable( + result, + type(value), + default_factory=VariableBuilder(self.tx, factory_source)( + value.default_factory + ), + source=self.source, + ) + else: + result = ConstDictVariable(result, type(value), source=self.source) + + return self.set_source_and_track_mutable(value, result) + elif isinstance(value, torch.nn.Module): + return self.wrap_module(value) + elif ConstantVariable.is_literal(value): # non-atomic literals + return self.wrap_literal(value) + elif istype(value, frozenset) and ( + ConstantVariable.is_literal(x) for x in value + ): + # For frozenset, we can guard by object ID instead of value + # equality, this allows us to handle non-literal values + self.install_guards(GuardBuilder.ID_MATCH) + return ConstantVariable.create(value=value, source=self.source) + elif isinstance(value, enum.Enum): + self.install_guards(GuardBuilder.ID_MATCH) + return EnumVariable(value=value, source=self.source) + elif DebuggingVariable.is_reorderable_logging_function(value): + # Put this above builtin_callable so that print() can be handled + # along with other builtin debugging functions + self.install_guards(GuardBuilder.BUILTIN_MATCH) + return DebuggingVariable(value, source=self.source) + elif is_utils_checkpoint(value): + return build_checkpoint_variable(source=self.source) + elif isinstance(value, functools.partial): + func_src = AttrSource(self.get_source(), "func") + func_obj = VariableBuilder(self.tx, func_src)(value.func) + + args = [] + args_source = AttrSource(self.get_source(), "args") + for i, arg in enumerate(value.args): + args.append( + VariableBuilder(self.tx, GetItemSource(args_source, i))(arg) + ) + + keywords = {} + keywords_source = AttrSource(self.get_source(), "keywords") + for k, v in value.keywords.items(): + if not ConstantVariable.is_literal(k): + unimplemented("functools.partial with non-literal keyword") + keywords[k] = VariableBuilder( + self.tx, GetItemSource(keywords_source, k) + )(v) + + install_guard( + self.get_source().make_guard(GuardBuilder.TYPE_MATCH), + keywords_source.make_guard(GuardBuilder.DICT_KEYS), + args_source.make_guard(GuardBuilder.SEQUENCE_LENGTH), + ) + return FunctoolsPartialVariable(func_obj, args, keywords) + elif is_typing(value): + # typing.List, typing.Mapping, etc. + self.install_guards(GuardBuilder.ID_MATCH) + return TypingVariable( + value, + source=self.source, + ) + elif np is not None and isinstance(value, np.generic): + # numpy array scalars: convert to 0D arrays + return self.wrap_numpy_ndarray(np.asarray(value)) + elif is_numpy(value): + assert np + self.install_guards( + GuardBuilder.FUNCTION_MATCH + if callable(value) + else GuardBuilder.TYPE_MATCH + ) + return NumpyVariable(value, source=self.source) + # NB: These can't be put in type_dispatch, they have to run later + elif CollectiveFunctionRewriteVariable.can_rewrite(value): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return CollectiveFunctionRewriteVariable.create( + self.tx, + value, + source=self.source, + ) + elif istype(value, torch.autograd.function.FunctionMeta): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return AutogradFunctionVariable( + value, + source=self.source, + ) + elif isinstance(value, torch.autograd.function.FunctionCtx): + saved_tensors_source = AttrSource(self.source, "saved_tensors") + install_guard( + self.source.make_guard(GuardBuilder.TYPE_MATCH), + saved_tensors_source.make_guard(GuardBuilder.SEQUENCE_LENGTH), + ) + saved_tensors = [ + VariableBuilder(self.tx, GetItemSource(saved_tensors_source, n))(v) + for n, v in enumerate(value.saved_tensors) + ] + return self.tx.output.side_effects.track_object_existing( + value, + AutogradFunctionContextVariable( + value, + source=self.source, + saved_tensors=SavedTensorBox(saved_tensors), + ), + ) + elif ( + isinstance(value, types.MethodType) + and istype( + getattr(value, "__self__", None), torch.autograd.function.FunctionMeta + ) + and getattr(value, "__name__", "") == "apply" + and value == getattr(value.__self__, "apply", None) + ): + # handle aliased autograd function `apply` calls + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return GetAttrVariable( + AutogradFunctionVariable( + value.__self__, source=AttrSource(self.source, member="__self__") + ), + "apply", + ) + elif callable(value) and trace_rules.lookup_callable(value) is not None: + if is_callable_allowed(value): + self.tx.output.has_user_defined_allowed_in_graph = True + return trace_rules.lookup_callable(value).create_with_source( + value, source=self.source + ) + elif np and isinstance(value, np.number): + return self.wrap_unspecialized_primitive(value) + elif DataClassVariable.is_matching_object(value): + self.install_guards(GuardBuilder.TYPE_MATCH) + return DataClassVariable.wrap(self, value) + elif HFPretrainedConfigVariable.is_matching_object(value): + self.install_guards(GuardBuilder.TYPE_MATCH) + return HFPretrainedConfigVariable(value) + elif isinstance(value, HigherOrderOperator): + self.install_guards(GuardBuilder.TYPE_MATCH, GuardBuilder.NAME_MATCH) + return TorchHigherOrderOperatorVariable.make(value, source=self.source) + elif isinstance(value, torch.cuda.StreamContext): + self.install_guards(GuardBuilder.ID_MATCH) + stream_source = AttrSource(self.source, "stream") + stream_var = VariableBuilder(self.tx, stream_source)(value.stream) + return StreamContextVariable.create(self.tx, stream_var) + elif isinstance(value, _StreamBase): + self.install_guards(GuardBuilder.ID_MATCH) + return StreamVariable( + None, + value, + value.device, + source=self.source, + ) + elif isinstance(value, (torch._C._SDPAParams)): + self.install_guards(GuardBuilder.TYPE_MATCH) + return SDPAParamsVariable.create(self.tx, value, self.source) + elif isinstance(value, _EventBase): + self.install_guards(GuardBuilder.ID_MATCH) + return EventVariable( + None, + value, + source=self.source, + ) + elif ( + isinstance(value, torch._C._TensorMeta) + and value in config.traceable_tensor_subclasses + ): + return TensorSubclassVariable(value, source=self.source) + elif ( + istype(value, contextlib.nullcontext) + and inspect.getattr_static(value, "enter_result", None) is None + ): + self.install_guards(GuardBuilder.TYPE_MATCH) + return NullContextVariable(source=self.source) + elif KeyedJaggedTensorVariable.is_matching_object(value): + self.install_guards(GuardBuilder.TYPE_MATCH) + result = KeyedJaggedTensorVariable(value, source=self.source) + # TODO: this doing it manually is bad + return self.tx.output.side_effects.track_object_existing(value, result) + elif isinstance(value, torch.optim.Optimizer): + self.install_guards(GuardBuilder.TYPE_MATCH) + return OptimizerVariable(value, source=self.source) + elif ProcessGroupVariable.is_process_group(value): + self.install_guards(GuardBuilder.ID_MATCH) + return ProcessGroupVariable(value, source=self.source) + elif DeviceMeshVariable.is_device_mesh(value): + # TODO: see if we need to add custom guard instead of a simple ID_MATCH + self.install_guards(GuardBuilder.ID_MATCH) + return DeviceMeshVariable(value, source=self.source) + elif PlacementClassVariable.is_placement_type(value): + # TODO: see if we need to add custom guard instead of a simple ID_MATCH + self.install_guards(GuardBuilder.ID_MATCH) + return PlacementClassVariable(value, source=self.source) + elif PlacementVariable.is_placement(value): + # TODO: see if we need to add custom guard instead of a simple ID_MATCH + self.install_guards(GuardBuilder.ID_MATCH) + return PlacementVariable( + value, + source=self.source, + ) + elif istype(value, type) and value in itertools.__dict__.values(): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return ItertoolsVariable(value, source=self.source) + elif isinstance(value, torch.SymBool): + # Note: the idea here is to re-use the infra we've built for SymInt by simulating the + # user provided SymBool with a SymInt in dynamo. + + # Concretely, + # 1. We create a SymInt in dynamo's shape_env, whose source is constructed as ConvertIntSource(self.source). + # so that guards on the SymInts can be effectively applied on the original SymBool in user program. + # 2. We create a SymBool based on the SymInt in dynamo's ShapeEnv. Because the original user program + # depends on the value being a SymBool. This allows dynamo to interpret the user's program correctly. + + value_hint = value.node.require_hint() + new_source = ConvertIntSource(self.source) + + new_symint = self.tx.output.shape_env.create_unspecified_symint_and_symbol( + int(value_hint), + new_source, + dynamic_dim=DimDynamic.DYNAMIC, + ) + + sym_node_proxy = self.tx.output.root_tracer.create_graph_input( + re.sub(r"[^a-zA-Z0-9]+", "_", self.name), + type(new_symint), + source=new_source, + ) + + sym_node_proxy.node.meta["grapharg"] = GraphArg( + new_source, + new_symint, + False, + None, + is_tensor=False, + example_strong_ref=new_symint, + ) + self.tx.output.bound_symbols.add(new_symint.node.expr) + self.tx.output.tracked_fakes.append( + TrackedFake(new_symint, new_source, None) + ) + return SymNodeVariable( + sym_node_proxy, + new_symint == 1, + ) + elif isinstance(value, (JITFunction, Autotuner)): + self.install_guards(GuardBuilder.ID_MATCH) + return TritonKernelVariable( + value, + None, # No kernel idx provided + None, # No grid provided + source=self.source, + ) + elif isinstance(value, torch.amp.autocast_mode.autocast): + self.install_guards(GuardBuilder.ID_MATCH) + return AutocastModeVariable( + target_values=[ + value.device, + value.fast_dtype, + value._enabled, + value._cache_enabled, + ], + source=self.source, + ) + elif TorchCtxManagerClassVariable.is_matching_cls(value): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return TorchCtxManagerClassVariable(value, source=self.source) + elif is_function_or_wrapper(value): + value, attr_name = unwrap_with_attr_name_if_wrapper(value) + # For these wrappers, Dynamo points to the wrapped function, + # so source needs to be updated as well. + if attr_name is not None: + self.source = AttrSource(self.source, attr_name) + return trace_rules.lookup(value).create_with_source( + value, source=self.source + ) + # Don't use istype, since some python modules are not subclasses of types.ModuleType directly. + # E.g, type(torch.ops) -> , + # type(torch.backends.cudnn) -> + elif isinstance(value, (types.ModuleType, replay_record.DummyModule)): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return PythonModuleVariable( + value, + source=self.source, + ) + elif isinstance(value, types.MethodType) and isinstance( + value.__self__, (torch.nn.Module, torch.utils._pytree.TreeSpec) + ): + # don't let MethodTypes fall through to UserDefinedObject, + # which doesn't support 'CALL_FUNCTION' + + # TODO(whc): Why do we limit this to methods on NNModules? + # I don't have a good reason for this, but it preserves the existing behavior + # for MBartForConditionalGeneration, which generates many graph breaks and OOMs otherwise. + # I suspect we probably want to relax this check and dig deeper there. + + # In order to construct a MethodVariable in Dynamo, we start with an actual method obj from python, + # but need to separately wrap its underlying `__func__` and its `self` argument. We wrap `self` here + # and then `__func__` gets wrapped inside UserMethodVariable. + self_obj = VariableBuilder( + self.tx, source=AttrSource(self.source, "__self__") + )(value.__self__) + assert self_obj and isinstance( + self_obj, VariableTracker + ), "Failed to produce a valid self obj" + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return UserMethodVariable( + value.__func__, + self_obj, + source=self.source, + ) + elif isinstance(value, types.GetSetDescriptorType): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return GetSetDescriptorVariable(value) + elif isinstance(value, types.MethodWrapperType): + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return MethodWrapperVariable(value) + elif issubclass(type(value), type): + if value in (torch.utils.hooks.BackwardHook, torch.nn.Parameter): + # TODO(jansel): combine this case with the one above + return trace_rules.lookup(value).create_with_source( + value, source=self.source + ) + if value is torch.autograd._unsafe_preserve_version_counter: + self.install_guards(GuardBuilder.FUNCTION_MATCH) + return PreserveVersionContextVariable.constructor(self.tx) + # This is a userdefined class, so install an ID_MATCH even if its a + # global variable. + self.install_guards(GuardBuilder.ID_MATCH) + return UserDefinedClassVariable( + value, + source=self.source, + ) + elif RestrictedListSubclassVariable.is_matching_cls(type(value)): + self.install_guards(GuardBuilder.SEQUENCE_LENGTH) + return self.set_source_and_track_mutable( + value, + RestrictedListSubclassVariable( + [ + LazyVariableTracker.create( + value=value[i], source=GetItemSource(self.source, i) + ) + for i in range(len(value)) + ], + user_cls=type(value), + user_cls_source=AttrSource(self.source, "__class__"), + ), + ) + else: + self.install_guards(GuardBuilder.TYPE_MATCH) + result = UserDefinedObjectVariable(value, source=self.source) + if not SideEffects.cls_supports_mutation_side_effects(type(value)): + # don't allow STORE_ATTR mutation with custom __setattr__ + return result + return self.tx.output.side_effects.track_object_existing(value, result) + + def wrap_listlike(self, value: Union[tuple, list, odict_values, NamedTuple]): + if config.specialize_int and type(value) is torch.Size: + self.install_guards(GuardBuilder.CONSTANT_MATCH) + return ConstantVariable.create(value=value) + # One can index a tensor with a list/tuple. Therefore, we need to + # have a stricter match. + self.install_guards(GuardBuilder.SEQUENCE_LENGTH) + + for item in value: + if item is value: + unimplemented("list elements are pointing to the list itself") + + output = [ + LazyVariableTracker.create(item, source=GetItemSource(self.get_source(), i)) + for i, item in enumerate(value) + ] + + result = BaseListVariable.cls_for_instance(value)( + output, mutable_local=MutableLocal() + ) + if istype(value, list): + return self.set_source_and_track_mutable(value, result) + return result + + def wrap_tuple_iterator(self, value: tuple_iterator): + self.install_guards(GuardBuilder.TUPLE_ITERATOR_LEN) + output = [ + VariableBuilder(self.tx, TupleIteratorGetItemSource(self.get_source(), i))( + tuple_iterator_getitem(value, i) + ) + for i in range(tuple_iterator_len(value)) + ] + result = TupleIteratorVariable( + output, mutable_local=MutableLocal(), source=self.source + ) + + return self.set_source_and_track_mutable(value, result) + + def wrap_slice_range(self, value: Union[slice, range]): + items = [ + VariableBuilder(self.tx, AttrSource(self.get_source(), k))( + getattr(value, k) + ) + for k in ("start", "stop", "step") + ] + self.install_guards(GuardBuilder.TYPE_MATCH) + if isinstance(value, slice): + return SliceVariable(items, source=self.source) + else: + return RangeVariable(items, source=self.source) + + def wrap_module(self, value: torch.nn.Module): + from ..eval_frame import OptimizedModule + + if istype(value, OptimizedModule): + self.install_guards(GuardBuilder.TYPE_MATCH) + self.source = AttrSource(self.source, "_orig_mod") + return self.wrap_module(value._orig_mod) + + if ( + isinstance(value, (torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM)) + and not config.allow_rnn + ): + unimplemented("TorchDynamo purposely graph breaks on RNN, GRU, LSTMs") + if mutation_guard.is_dynamic_nn_module(value): + # created dynamically, don't specialize on it + self.install_guards(GuardBuilder.TYPE_MATCH) + result = UnspecializedNNModuleVariable(value, source=self.source) + if not SideEffects.cls_supports_mutation_side_effects(type(value)): + # don't allow STORE_ATTR mutation with custom __setattr__ + return result + return self.tx.output.side_effects.track_object_existing(value, result) + elif issubclass( + value.__class__, torch.nn.parallel.distributed.DistributedDataParallel + ): + self.install_guards(GuardBuilder.TYPE_MATCH) + return UnspecializedNNModuleVariable(value) + elif getattr(value, "_is_fsdp_managed_module", False): + # See note [Dynamo treats FSDP wrapped modules as UnspecializedNNModule] + # in fully_sharded_data_parallel.py for more information + + # we can't do this assert inside FSDP constructor, + # since we don't know yet whether dynamo will be used + assert getattr( + value, "_fsdp_use_orig_params", False + ), "Dynamo only supports FSDP with use_orig_params=True" + + # Note on FSDP guarding + # 1. We expect FSDP wrapping mutates an nn module irreversably (no way to de-wrap). + # 2. Eager FSDP already assumes (requires, but without enforcement) that users don't mutate their + # model parameters/structure after FSDP wrapping, because FSDP wouldn't notice or update its FlatParams. + # + # Due to (1), once we enter this path we expect not to go back nor have to guard on type + # or _is_fsdp_managed_module. + # + # TODO(whc) We could add a guard on the opposite case, where a user compiled/ran + # pre-FSDP-wrapped model, then wrapped, to ensure that we recompile with the FSDP handling. + # + # Due to (2), we skip guards on inner contents of fsdp_managed modules, by using FSDPNNModuleSource as the + # guard source. This behavior is gated on config.skip_fsdp_guards. + # + # ID_MATCH is required to disambiguate cases as simple as a unit test that constructs 2 models and wraps + # them differently with different FSDP configs. (test_dynamo_distributed.py -k test_fsdp_aot_eager) + self.install_guards(GuardBuilder.TYPE_MATCH, GuardBuilder.ID_MATCH) + return FSDPManagedNNModuleVariable(value, source=self.get_source()) + else: + return self.tx.output.register_attr_or_module( + value, + self.name, + source=self.get_source(), + # Guards are added inside register_attr_or_module + ) + + def wrap_literal(self, value): + unspec = not config.specialize_int + if unspec and type(value) is int: + # unspecializing int by default, but still + # specialize for the following conditions + if not TracingContext.get().force_unspec_int_unbacked_size_like and ( + value in self._common_constants() + # Assume integers from global variables want to be specialized + or not self.source.guard_source().is_local() + # Assume that integers that came from NN modules want to be + # specialized (as we don't expect users to be changing the + # NN modules on the fly) + or self.source.guard_source().is_nn_module() + or is_from_defaults(self.source) + ): + self.install_guards(GuardBuilder.CONSTANT_MATCH) + return ConstantVariable.create(value=value, source=self.source) + else: + return self.wrap_unspecialized_primitive(value) + else: + self.install_guards(GuardBuilder.CONSTANT_MATCH) + return ConstantVariable.create(value=value) + + def assert_not_wrapped_by_this_graph(self, value: torch.Tensor): + if is_fake(value) and maybe_get_fake_mode(value) is self.tx.fake_mode: + raise InternalTorchDynamoError( + "Cannot wrap a Tensor that has already been", + "wrapped by this instance of Dynamo", + ) + + def wrap_tensor(self, value: torch.Tensor): + source = self.get_source() + + # We cannot already be tracking the tensor, which implies + # it would have already been wrapped + assert value not in self.tx.output.side_effects + + if ( + source.guard_source().is_nn_module() + or get_static_address_type(value) is not None + ) and not source.guard_source().is_fsdp_module(): + self.assert_not_wrapped_by_this_graph(value) + return self.tx.output.register_attr_or_module( + value, self.name, source=source + ) + + if is_constant_source(source): + self.assert_not_wrapped_by_this_graph(value) + return self.tx.output.register_attr_or_module( + value, + re.sub(r"[^a-zA-Z0-9]+", "_", self.name), + source=source, + # Guards are added inside register_attr_or_module + ) + + if type(value) in config.traceable_tensor_subclasses: + # Ordinarily, we would fakeify a tensor so that it can get dynamic + # shapes and be computed on without triggering actual operations. + # However, how can we fakeify a tensor subclass? Ordinary + # inheritance (nor multiple inheritance) won't work work. + # + # Instead, our plan is to *manually simulate* the tensor subclass + # inheriting from a fake tensor with dynamo. This means our + # data representation for a tensor subclass will be a fake tensor + # + tensor subclass type + any extra data the subclass may have + # been storing on the tensor. Because all Python accesses are + # mediated through TensorWithTFOverrideVariable, we can ensure + # that we dispatch differently, e.g., according to + # __torch_function__ + # + # To simplify things for now, the __dict__ tracking bits haven't + # been implemented yet, but they can be added into this design at + # a later point in time. + subclass_type = type(value) + else: + assert type(value) in ( + torch.Tensor, + torch.nn.Parameter, + torch._subclasses.fake_tensor.FakeTensor, + torch._subclasses.functional_tensor.FunctionalTensor, + ) or is_traceable_wrapper_subclass(value), type(value) + subclass_type = None + + # NB: this just says we accessed a tensor from the same source again + # (e.g., a tensor lives in a global foo, and we LOAD_GLOBAL it twice). + # This is distinct from two distinct sources mapping to the same + # Tensor (per id())! No guard is necessary here. See below for the + # other case. + is_duplicate_tensor = source in self.tx.output.input_source_to_var + if is_duplicate_tensor: + return self.tx.output.input_source_to_var[source] + + # By this point, we should have deduplicated all tensors + self.assert_not_wrapped_by_this_graph(value) + + # tx.output has multiple tracers if we're introspecting HigherOrderOperator. + # When we've discovered an untracked tensor, then we actually need + # to get Dynamo to track the tensor (which is what this function does) + # and put it as a graph input on the root tracer. Later on, + # if the input is actually used in the body of the HigherOrderOperator, + # then the relevant SubgraphTracer will lift it to being an input of + # the subgraph. + # See NOTE [HigherOrderOperator tracing design] for more details. + + tensor_proxy = self.tx.output.root_tracer.create_graph_input( + re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value), source=source + ) + options = {} + if type(value) in config.traceable_tensor_subclasses: + options["torch_function_fn"] = build_torch_function_fn( + self.tx, value, self.source + ) + self.install_guards(GuardBuilder.TYPE_MATCH) + + if ( + isinstance(value, torch.Tensor) + and value.is_nested + and not isinstance(value, torch.nested._internal.nested_tensor.NestedTensor) + ): + unimplemented("torch.compile does not support strided NestedTensor") + + if is_sparse_any(value): + unimplemented( + f"torch.compile does not support sparse Tensor with {value.layout} layout" + ) + + tensor_variable = wrap_fx_proxy( + tx=self.tx, + proxy=tensor_proxy, + example_value=value, + subclass_type=subclass_type, + source=source, + **options, + ) + + self.install_guards( + functools.partial( + GuardBuilder.TENSOR_MATCH, + value=value + if isinstance(source, NumpyTensorSource) + else TensorWeakRef(value), + ) + ) + + # We install TYPE_MATCH guards for traceable wrapper subclass object, + # and recursively install corresponding guard for each inner attribute. + if is_traceable_wrapper_subclass(value): + self.install_guards(GuardBuilder.TYPE_MATCH) + attrs, _ = value.__tensor_flatten__() + for attr in attrs: + inner_value = getattr(value, attr) + inner_source = AttrSource(self.source, attr) + VariableBuilder(self.tx, inner_source)(inner_value).recursive_realize() + + self.tx.output.input_source_to_var[source] = tensor_variable + assert "tensor_dict" not in tensor_proxy.node.meta + tensor_proxy.node.meta["tensor_dict"] = value.__dict__.copy() + + # Note: this information is conveyed via subclass_type now + fake_tensor_value = tensor_variable.proxy.node.meta["example_value"] + if maybe_get_fake_mode(fake_tensor_value) is not self.tx.fake_mode: + raise InternalTorchDynamoError("Wrapped Tensor must be this graph's fake") + + grapharg = GraphArg(source, value, False, fake_tensor_value) + tensor_proxy.node.meta["grapharg"] = grapharg + self.tx.output.add_symbol_bindings(grapharg) + return tensor_variable + + def wrap_numpy_ndarray(self, value): + assert np is not None + assert isinstance(value, np.ndarray) + + source = NumpyTensorSource(self.get_source()) + + from torch._numpy import _util + + readonly = not value.flags.writeable + if readonly: + try: + value.flags.writeable = True + except ValueError: + # One can not easily make nditer elements writable, + # but warning is not the end of the world + assert isinstance(value.base, np.nditer) + pass + + try: + tensor_value = _util._try_convert_to_tensor(value) + if readonly: + from torch._prims_common import clone_preserve_strides + + tensor_value = clone_preserve_strides(tensor_value) + except NotImplementedError as e: + # failed to convert to tensor, graph break + unimplemented(str(e)) + + # We do this because we want the full behavior of guarding the numpy ndarray as if it were + # a tensor. It's a little annoying to make a VT to throw out, but there's so many side effects here + # that there's not another great way to do this atm. + # This creates the right graphargs, as well as registration for guards in tensor names and shape env. + VariableBuilder(self.tx, source)(tensor_value).recursive_realize() + proxy = self.tx.output.root_tracer.create_graph_input( + re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(tensor_value), source=source + ) + options = {"source": source} + numpy_ndarray_variable = wrap_fx_proxy_cls( + target_cls=NumpyNdarrayVariable, + tx=self.tx, + proxy=proxy, + example_value=tensor_value, + **options, + ) + + self.tx.output.input_source_to_var[source] = numpy_ndarray_variable + example_value = numpy_ndarray_variable.proxy.node.meta["example_value"] + + # is_unspecialized should be true because we are wrapping a np.ndarray as argument input, and it needs to be + # converted to a tensor. + grapharg = GraphArg( + source, + tensor_value, + is_unspecialized=True, + fake_tensor=example_value, + is_tensor=True, + example_strong_ref=tensor_value, + ) + proxy.node.meta["grapharg"] = grapharg + + return numpy_ndarray_variable + + def wrap_unspecialized_primitive(self, value): + if self.name in self.tx.output.unspec_variable_map: + return self.tx.output.unspec_variable_map[self.name] + else: + shape_env = self.tx.output.shape_env + if TracingContext.get().force_unspec_int_unbacked_size_like and isinstance( + value, int + ): + wrapped_value = shape_env.create_unbacked_symint() + _constrain_range_for_size(wrapped_value) + self.tx.output.bound_symbols.add(wrapped_value.node.expr) + self.tx.output.tracked_fakes.append( + TrackedFake(wrapped_value, self.source, None) + ) + + # NB: We do not do float. For motivation, see + # https://docs.google.com/document/d/1INSCdYu1PxXcr43HrD82OudeEuS-qxQe1yZmLg2wy6A/edit + # but the general idea is that we generate kernels that can + # take unspecialized floats and use them in sizevar computation + elif ( + isinstance(value, int) + and not is_constant_source(self.get_source()) + and not isinstance(self.get_source(), RandomValueSource) + ): + if torch._dynamo.config.specialize_int: + # If specialize_int is False, also return + # a constant (but this should have been handled + # in the caller, TBH) + self.install_guards(GuardBuilder.CONSTANT_MATCH) + return ConstantVariable.create(value=value, source=self.source) + + name = self.source.name() + if name not in self.tx.output.frame_state: + # Note - this essentially means that if this name gets reused as a tensor, + # it will start fully dynamic. That should always be a safe option, and not awfully inefficient. + # Alternatively, if we want to improve pef here, we can add a third state of unset, but I am not + # sure that is necessary for now. + frame_state_entry = FrameStateSizeEntry(scalar=value, size=None) + else: + frame_state_entry = self.tx.output.frame_state[name] + if frame_state_entry.scalar != value: + log.debug( + "automatic dynamic int %s val %s != %s", + name, + value, + frame_state_entry.scalar, + ) + frame_state_entry.scalar = None + self.tx.output.frame_state[name] = frame_state_entry + + # TODO: This should be dynamic, as we in general do not + # know if bare integers are actually going to be sizevars + # and it is inappropriate to eagerly duck size them with + # real sizevars + if ( + config.automatic_dynamic_shapes and frame_state_entry.scalar is None + ) or not config.assume_static_by_default: + dynamic_dim = DimDynamic.DYNAMIC + else: # assume_static_by_default + # TODO: dynamic_dim = DimDynamic.STATIC should work but + # for some reason it doesn't + self.install_guards(GuardBuilder.CONSTANT_MATCH) + return ConstantVariable.create(value=value) + + wrapped_value = shape_env.create_unspecified_symint_and_symbol( + value, + source=self.source, + dynamic_dim=dynamic_dim, + ) + self.tx.output.bound_symbols.add(wrapped_value.node.expr) + + self.tx.output.tracked_fakes.append( + TrackedFake(wrapped_value, self.source, None) + ) + else: + wrapped_value = torch.tensor(value) + if not isinstance(self.get_source(), RandomValueSource): + install_guard(self.get_source().make_guard(GuardBuilder.TYPE_MATCH)) + options = {"source": self.get_source()} + if isinstance(wrapped_value, torch.Tensor): + options.update({"raw_value": value}) + + proxy = self.tx.output.root_tracer.create_graph_input( + re.sub(r"[^a-zA-Z0-9]+", "_", self.name), + type(wrapped_value), + source=self.get_source(), + ) + + unspec_var = wrap_fx_proxy_cls( + UnspecializedPythonVariable, + tx=self.tx, + proxy=proxy, + example_value=wrapped_value, + **options, + ) + self.tx.output.unspec_variable_map[self.name] = unspec_var + if not is_constant_source(self.get_source()): + if self.tx.export and not isinstance(self.get_source(), LocalSource): + raise AssertionError( + "Dynamo attempts to add additional input during export: value={}, source={}".format( + wrapped_value, self.get_source() + ) + ) + fake_tensor_value = None + if isinstance(unspec_var, ConstantVariable): + example_value = unspec_var.value + else: + example_value = unspec_var.proxy.node.meta["example_value"] + if is_fake(example_value): + fake_tensor_value = example_value + assert fake_tensor_value.fake_mode is self.tx.fake_mode, ( + f"fake mode ({fake_tensor_value.fake_mode}) from fake tensor metadata doesn't match mode" + "({self.tx.fake_mode}) from InstructionTranslator" + ) + + proxy.node.meta["grapharg"] = GraphArg( + self.get_source(), + wrapped_value, + isinstance(wrapped_value, torch.Tensor), + fake_tensor_value, + is_tensor=False, + example_strong_ref=wrapped_value, + ) + return unspec_var + + +def _dataclasses_fields_lambda(obj): + if isinstance(obj, UserDefinedObjectVariable): + value = obj.value + elif isinstance(obj, DataClassVariable): + value = obj.user_cls + else: + unimplemented(f"Dataclass fields handling fails for type {obj}") + items = [] + for field in dataclasses.fields(value): + source = None + if obj.source: + source = GetItemSource( + AttrSource(obj.source, "__dataclass_fields__"), field.name + ) + items.append(UserDefinedObjectVariable(field, source=source)) + return TupleVariable(items) + + +def wrap_fx_proxy(tx, proxy, example_value=None, subclass_type=None, **options): + kwargs = { + "tx": tx, + "proxy": proxy, + "example_value": example_value, + "subclass_type": subclass_type, + **options, + } + if subclass_type is None: + return wrap_fx_proxy_cls(target_cls=TensorVariable, **kwargs) + else: + result = wrap_fx_proxy_cls(target_cls=TensorWithTFOverrideVariable, **kwargs) + result.install_global(tx) + return result + + +# Note: Unfortunate split due to some gross classes existing that subclass TensorVariable +# Should be compositional instead +# +# This is a horribly complicated function that does too many things, to +# explain what it does, let's first talk about the classic usage wrap_fx_proxy +# for a TensorVariable. There are two primary modes of use: +# +# 1. Wrapping a pre-existing Tensor. In this case, example_value is set +# to the pre-existing Tensor. (Note that this example_value will NOT +# be the final example_value we put into node.meta['example_value'], +# instead it is converted into a fake tensor using +# wrap_to_fake_tensor_and_record and registered as a graph input.) +# +# 2. "Wrapping" the result of some Tensor operation Dynamo traced over. In +# this case, example_value is None (and we are going to figure it out +# ourselves using FakeTensors, via get_fake_value, which will run +# the operation represented by the (singular!) FX node referenced by +# the passed in proxy.) +# +# The expectation is you end up with a Tensor output, and everything is +# straightforwardly traced into the graph. +# +# In all cases, the returned `TensorVariable` subclass will have an `example_value` +# and that `example_value` must be a `FakeTensor` produced by the currently running +# instance of Dynamo. +# +# Upon closer inspection, you may notice that there are a slurry of non-Tensor +# output cases. What gives? Well, we sometimes trace operations into the +# graph that don't involve tensors. +# +# * Some operators return tuples; we need to recursively handle their +# contents +# +# * Some operators have side effects that will affect subsequent AOTAutograd +# tracing but don't otherwise return anything. +# +# * Some operators return symbolic ints/floats/bools which can go in the +# graph and be traced (but only if they're actually symbolic! If they're +# static you don't want to put them in the graph, which means you +# shouldn't call this function.) +# +# The common theme is that you only use this function WHEN YOU ARE TRACING +# SOMETHING INTO THE GRAPH. This is sort of obvious, because you can't call +# this function without a proxy. +def wrap_fx_proxy_cls( + target_cls, tx, proxy, example_value=None, subclass_type=None, **options +): + from ..symbolic_convert import InstructionTranslatorBase + + assert isinstance(tx, InstructionTranslatorBase) + if "guards" in options and options["guards"] is not None: + tx.output.guards.update(options["guards"]) + + assert "example_value" not in proxy.node.meta, f"{proxy.node.meta['example_value']}" + + initial_example_value = example_value + + def _clone_input(value): + if isinstance(value, torch.Tensor): + # tensor subclasses will not be converted to FakeTensors and need to be cloned + if not ( + isinstance(value, FakeTensor) + or ( + # Is functional tensor fakeified by this instance of Dynamo + torch._is_functional_tensor(value) + and maybe_get_fake_mode(value) is tx.fake_mode + ) + or value.is_nested + ): + # NB: ensure strides are preserved + value = clone_input(value) + + return value + + with preserve_rng_state(): + if example_value is None: + # only allow_non_graph_fake in this instance because we handle the non-fake + # cases properly below. + example_value = get_fake_value(proxy.node, tx, allow_non_graph_fake=True) + + # Handle recursive calls here + elif maybe_get_fake_mode(example_value) is tx.fake_mode: + pass + + elif isinstance(example_value, torch.Tensor): + if tx.export: + # The legacy behavior for real value cache with subclasses was + # to perform a clone WITHOUT preserving the subclass. It's + # not entirely clear this is what you actually want though. + with torch._C.DisableTorchFunctionSubclass(): + proxy.tracer.real_value_cache[proxy.node] = _clone_input( + example_value + ) + # NB: If we're ignoring subclass, then the expectation is you will + # take the returned TensorVariable and wrap it into a more + # accurate TensorVariable that is able to track subclass-ness; + # otherwise this is wrong! + kwargs = { + "is_tensor": target_cls + in (TensorVariable, TensorWithTFOverrideVariable), + } + assert "source" in options and options["source"] is not None + kwargs["source"] = options["source"] + example_value = wrap_to_fake_tensor_and_record( + example_value, tx=tx, **kwargs + ) + if isinstance(example_value, torch.Tensor) and ( + maybe_get_fake_mode(example_value) is not tx.fake_mode + ): + raise InternalTorchDynamoError( + "`example_value` needs to be a `FakeTensor`" + f"wrapped by this instance of Dynamo. Found: {example_value}" + ) + + if isinstance(example_value, torch.Tensor): + is_parameter = isinstance(example_value, torch.nn.Parameter) + + # NB: In most (all?) cases, this does not actually do a clone. + # (WARNING: this means that if we mutate metadata on the fake + # tensor, the stored example value will update too!) + example_value = _clone_input(example_value) + proxy.node.meta["example_value"] = example_value + specialized_props = target_cls.specialize(example_value) + # TODO: not sure about this fake mode test + if ( + isinstance(example_value, torch._subclasses.fake_tensor.FakeTensor) + and example_value.fake_mode is tx.fake_mode + ): + tensor_type = subclass_type if subclass_type else torch.Tensor + specialized_props["class_type"] = ( + torch.nn.Parameter if is_parameter else tensor_type + ) + + options.update(specialized_props) + return target_cls(proxy, **options) + elif ( + hasattr(proxy.node.target, "__name__") + and proxy.node.target.__name__ == "set_state" + and isinstance(proxy.node.target.__self__, torch._C.Generator) + or proxy.node.target == torch.random.set_rng_state + ): + return TorchInGraphFunctionVariable(proxy.node.target) + elif ( + proxy.node.target == torch._C._DisableFuncTorch + or proxy.node.target == torch.cuda._is_in_bad_fork + ): + return UserDefinedObjectVariable(example_value) + elif istype(example_value, torch.Size) and all( + isinstance(x, int) for x in example_value + ): + sizes = [ConstantVariable.create(x) for x in example_value] + return SizeVariable(sizes, **options) + elif isinstance(example_value, (tuple, list)): + proxy.node.meta["example_value"] = example_value + unpacked = [] + for i, val in enumerate(example_value): + if val is None: + # nn.MultiheadAttention() can return None, see issue #175 + unpacked.append( + ConstantVariable.create(None, **options), + ) + else: + unpacked.append( + wrap_fx_proxy_cls( + target_cls, + tx, + proxy.tracer.create_proxy( + "call_function", operator.getitem, (proxy, i), {} + ), + example_value=val, + **options, + ) + ) + if isinstance(example_value, torch.Size): + # NB: Keep the old proxy around. See SizeVariable for an + # explanation why + return SizeVariable(unpacked, proxy, **options) + elif istype(example_value, tuple): + return TupleVariable(unpacked, **options) + elif istype(example_value, (list, immutable_list)): + return ListVariable(unpacked, mutable_local=MutableLocal(), **options) + else: + assert example_value.__class__.__module__ == "torch.return_types" or hasattr( + example_value, "_fields" + ), f"expected {example_value.__class__.__module__} == torch.return_types or named tuple but got {type(example_value)}" + return NamedTupleVariable(unpacked, example_value.__class__, **options) + elif example_value is None or proxy.node.target is torch.manual_seed: + return ConstantVariable.create(None, **options) + elif isinstance(example_value, (torch.SymInt, torch.SymFloat, torch.SymBool)): + proxy.node.meta["example_value"] = example_value + return SymNodeVariable(proxy, example_value, **options) + elif ( + inspect.isclass(proxy.node.target) + and issubclass(proxy.node.target, _StreamBase) + ) or proxy.node.target in [ + device_interface.current_stream + for _, device_interface in get_registered_device_interfaces() + ]: + proxy.node.meta["example_value"] = example_value + return StreamVariable(proxy, example_value, example_value.device, **options) + elif ( + inspect.isclass(proxy.node.target) and issubclass(proxy.node.target, _EventBase) + ) or proxy.node.target in [ + device_interface.Event + for _, device_interface in get_registered_device_interfaces() + ]: + proxy.node.meta["example_value"] = example_value + return EventVariable(proxy, example_value, **options) + elif proxy.node.target == "query" and proxy.node.op == "call_method": + proxy.node.meta["example_value"] = example_value + return ConstantVariable(example_value, **options) + elif ( + example_value is not None + and isinstance(example_value, _EventBase) + and proxy.node.target == "record_event" + and proxy.node.op == "call_method" + ): + proxy.node.meta["example_value"] = example_value + return EventVariable(proxy, example_value, **options) + elif isinstance(example_value, int) and proxy.node.target in [ + torch.sym_int, + getattr, + operator.getitem, + torch._utils._element_size, + torch.seed, + operator.mod, + torch._C._functorch._vmap_increment_nesting, + torch._C._functorch._vmap_decrement_nesting, + torch._functorch.vmap._validate_and_get_batch_size, + torch._C._functorch._grad_increment_nesting, + torch._C._functorch._grad_decrement_nesting, + # some mac builds are missing torch.distributed.get_rank() + getattr(torch.distributed, "get_rank", _missing), + getattr(torch.distributed, "get_world_size", _missing), + # This always wants to be in the graph, even if the constraint + # results in a constant int + torch._constrain_as_value, + torch._constrain_as_size, + ]: + proxy.node.meta["example_value"] = example_value + return ConstantVariable.create(example_value, **options) + elif isinstance(example_value, torch.backends.cuda.SDPAParams): + from .sdpa import SDPAParamsVariable + + proxy.node.meta["example_value"] = example_value + return SDPAParamsVariable(proxy, **options) + elif isinstance(example_value, bool) and proxy.node.target in [ + torch.backends.cuda.can_use_flash_attention, + torch.backends.cuda.can_use_efficient_attention, + ]: + proxy.node.meta["example_value"] = example_value + return ConstantVariable.create(example_value, **options) + else: + unimplemented( + "torch.* op returned non-Tensor " + + f"{typestr(example_value)} {proxy.node.op} {proxy.node.target}" + ) + + +# Tracks the sources of all fake tensors we wrap in Dynamo. +# Used by shape guard computation. +@dataclasses.dataclass +class TrackedFake: + fake: Union[FakeTensor, SymInt] + source: Source + # Is None when fake is SymInt + symbolic_context: Optional[SymbolicContext] + + def __hash__(self) -> int: + return hash((self.fake, self.source.name())) + + def __eq__(self, other: object) -> bool: + if isinstance(other, TrackedFake): + return self.fake is other.fake and self.source.name() == other.source.name() + return False + + +# Performs automatic dynamic dim determination. +# Returns a SymbolicContext +def _automatic_dynamic( + e, tx, source, static_shapes, outer_only=False +) -> SymbolicContext: + # strided NT not supported + if e.is_nested and not isinstance( + e, torch.nested._internal.nested_tensor.NestedTensor + ): + unimplemented("torch.compile does not support strided NestedTensor") + + name = source.name() + prior_policy = tx.output.tracing_context.tensor_to_context.get(e, None) + shape_env_to_source_to_symbol_cache = ( + prior_policy.shape_env_to_source_to_symbol_cache if prior_policy else None + ) + + # Get base context if the tensor is a view + view_base_context: Optional[SymbolicContext] = None + if e._is_view(): + base_source = AttrSource(source, "_base") + view_base_context = _automatic_dynamic(e._base, tx, base_source, static_shapes) + + if is_traceable_wrapper_subclass(e) and not outer_only: + # Get symbolic context for outer tensor + outer_context = _automatic_dynamic( + e, tx, source, static_shapes, outer_only=True + ) + + # Get symbolic contexts for inner tensors + attrs, _ = type(e).__tensor_flatten__(e) + inner_contexts = {} # mapping from attr -> symbolic context + for attr in attrs: + inner_tensor = getattr(e, attr) + inner_source = AttrSource(source, attr) + inner_context = _automatic_dynamic( + inner_tensor, tx, inner_source, static_shapes + ) + inner_contexts[attr] = inner_context + + return SubclassSymbolicContext( + dynamic_sizes=outer_context.dynamic_sizes, + constraint_sizes=outer_context.constraint_sizes, + view_base_context=view_base_context, + tensor_source=outer_context.tensor_source, + shape_env_to_source_to_symbol_cache=outer_context.shape_env_to_source_to_symbol_cache, + inner_contexts=inner_contexts, + ) + + if static_shapes: + return StatefulSymbolicContext( + dynamic_sizes=[DimDynamic.STATIC] * e.dim(), + constraint_sizes=[None] * e.dim(), + view_base_context=view_base_context, + tensor_source=source, + shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, + ) + + # We preserve the dynamism of inputs. For example, when users call + # make_fx(torch.cond, tracing_mode="symbolic")(*args), inputs have SymInt sizes. + from torch.fx.experimental.symbolic_shapes import is_nested_int + + if any(isinstance(s, SymInt) and not is_nested_int(s) for s in e.size()): + return StatefulSymbolicContext( + dynamic_sizes=[ + DimDynamic.DYNAMIC if isinstance(s, SymInt) else DimDynamic.STATIC + for s in e.size() + ], + constraint_sizes=[None] * e.dim(), + view_base_context=view_base_context, + tensor_source=source, + shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, + ) + + # Prep for automatic dynamic + frame_state_entry = None + if name not in tx.output.frame_state: + # If there is no entry for this source, add the tensor to frame state with its current static size. + # E.g., {} -> {"x": [2, 4]} + frame_state_entry = FrameStateSizeEntry(None, None) + frame_state_entry.size = list(e.size()) + else: + frame_state_entry = tx.output.frame_state[name] + if frame_state_entry.size is not None: + if e.ndim != len(frame_state_entry.size): + # If there is already an entry, and the dim mismatches, replace the frame state entry with None. + # E.g. {"x": [2, 3, 4]} -> {"x": None} + log.debug( + "automatic dynamic %s dim %s != %s", + name, + e.ndim, + frame_state_entry.size, + ) + frame_state_entry.size = None + else: + # If there is already an entry, and the dim matches, for every size in the frame state which + # disagrees with the current static size, replace it with None. E.g., {"x": [2, 3]} -> {"x": [2, None]} + for i, dim in enumerate(frame_state_entry.size): + if dim is not None and e.size()[i] != dim: + log.debug( + "automatic dynamic %s size(%s) %s != %s", + name, + i, + e.size(i), + dim, + ) + frame_state_entry.size[i] = None + + # TODO: index export_constraints ahead of time so we don't have to + # do a linear scan every time here + t_id = id(e) + dim2constraint = {} + + def update_dim2constraint(dim, constraint_range, debug_name): + if dim in dim2constraint: + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + + old_constraint_range, old_debug_name = dim2constraint[dim] + new_constraint_range = StrictMinMaxConstraint( + vr=constraint_range.vr & old_constraint_range.vr, + warn_only=False, + ) + # It is possible for (non-None) old_debug_name and debug_name to be different + # but this will only happen the corresponding Dims can be derived equal. + new_debug_name = old_debug_name or debug_name + dim2constraint[dim] = new_constraint_range, new_debug_name + else: + dim2constraint[dim] = constraint_range, debug_name + + if tx.output.export_constraints: + for constraint in tx.output.export_constraints: + if constraint.t_id == t_id: + update_dim2constraint( + constraint.dim, constraint.constraint_range, constraint.debug_name + ) + if constraint.shared is not None and constraint.shared.t_id == t_id: + # We process constraint ranges for each shared dimension separately + # so that we can directly check range constraint violations on them + # without looking up which other shared dimensions have this info. + # In other words, for this t_id, we will have processed all of its + # constraint ranges, no matter where / how they were specified, by + # by the end of this loop. + update_dim2constraint( + constraint.shared.dim, + constraint.constraint_range, + constraint.debug_name, + ) + + dynamic_dims = [] + constraint_dims = [] + for i in range(e.dim()): + # NB: mark dynamic has precedence over static + marked_dynamic = i in getattr(e, "_dynamo_dynamic_indices", set()) + marked_weak_dynamic = i in getattr(e, "_dynamo_weak_dynamic_indices", set()) + marked_static = i in getattr(e, "_dynamo_static_indices", set()) + + # NB: both static and dynamic have precedence over + automatic_dynamic = config.automatic_dynamic_shapes and ( + frame_state_entry.size is None or frame_state_entry.size[i] is None + ) + + # Reflect the user directive in the frame_state + # For dynamic, apply None always + if frame_state_entry.size and marked_dynamic: + log.debug("automatic dynamic %s marked dynamic", name) + frame_state_entry.size[i] = None + + # We will process constraints first, as they will imply that we + # have a dynamic dimension + # Precedence: export constraints > eager constraints + constraint = dim2constraint.get(i) + if constraint is None: + if marked_dynamic and not config.allow_ignore_mark_dynamic: + if hasattr(e, "_dynamo_dynamic_range"): + dim_range = [ + dr for dr in e._dynamo_dynamic_range if dr.dim == i + ].pop() + if dim_range.min is None and dim_range.max is None: + constraint_dim = RelaxedUnspecConstraint(warn_only=False) + else: + from torch.fx.experimental.symbolic_shapes import ( + StrictMinMaxConstraint, + ) + + constraint_dim = StrictMinMaxConstraint( + vr=ValueRanges(lower=dim_range.min, upper=dim_range.max), + warn_only=False, + ) + else: + constraint_dim = RelaxedUnspecConstraint(warn_only=False) + + elif not marked_static and automatic_dynamic: + constraint_dim = RelaxedUnspecConstraint(warn_only=True) + else: + constraint_dim = None + else: + constraint_dim, debug_name = constraint + if debug_name is not None: + dim_name = f"{name}.size()[{i}]" + tx.output.shape_env.source_name_to_debug_name[dim_name] = debug_name + constraint_dims.append(constraint_dim) + + # Now, figure out if the dim is dynamic/duck/static + if ( + constraint_dim is not None + or marked_dynamic + or marked_weak_dynamic + or is_nested_int(e.shape[i]) + ): + # NB: We could assert static_shapes is False here, but it + # seems better to allow the user to override symbolic_context in this + # case + dynamic = DimDynamic.DYNAMIC + elif static_shapes or config.assume_static_by_default or marked_static: + dynamic = DimDynamic.STATIC + else: + dynamic = DimDynamic.DUCK + + dynamic_dims.append(dynamic) + + tx.output.frame_state[name] = frame_state_entry + + return StatefulSymbolicContext( + dynamic_sizes=dynamic_dims, + constraint_sizes=constraint_dims, + view_base_context=view_base_context, + tensor_source=source, + shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, + ) + + +# See note [Tensor Fakification and Symbol Caching] +def wrap_to_fake_tensor_and_record( + e, tx, *, source: Optional[Source], is_tensor: bool, parent_context=None +): + if ( + type(e) in (torch.Tensor, torch.nn.Parameter, FakeTensor) + or isinstance(e, torch.Tensor) + or is_traceable_wrapper_subclass(e) + ): + assert source is not None + static_shapes, reason = tensor_always_has_static_shape( + e, is_tensor, guard_source=source.guard_source() + ) + + if not parent_context: + symbolic_context = _automatic_dynamic(e, tx, source, static_shapes) + else: + # Parent contexts are passed in when we are recursively creating + # fake tensors for subclasses. A better design would be not to create a + # parent/child relationship, but to recursively call _automatic_dynamic + # as we recursively call wrap_to_fake_tensor_and_record. This runs + # into bugs around how meta_utils knows and works to create fake tensors + # with tensor subclasses. Ideally, dynamo would drive both the recursive + # wrap_to_fake_tensor_and_record and _automatic_dynamic policy creation. + assert isinstance(source, AttrSource) + inner_context_name = source.member + symbolic_context = parent_context.inner_contexts[inner_context_name] + + log.debug( + "wrap_to_fake %s %s %s %s", + source.name(), + tuple(e.shape), + symbolic_context, + type(e), + ) + fake_e = wrap_fake_exception( + lambda: tx.fake_mode.from_tensor( + e, + source=source, + symbolic_context=symbolic_context, + ) + ) + + if is_traceable_wrapper_subclass(fake_e): + attrs, _ = fake_e.__tensor_flatten__() + for attr in attrs: + fake_inner = getattr(fake_e, attr) + inner = getattr(e, attr) + inner_source = AttrSource(source, attr) + wrap_to_fake_tensor_and_record( + inner, + tx, + source=inner_source, + is_tensor=isinstance(fake_inner, torch.Tensor), + parent_context=symbolic_context, + ) + + tx.output.tracing_context.tensor_to_context[e] = symbolic_context + tx.output.tensor_weakref_to_sizes_strides[e] = { + "size": fake_e.size(), + "stride": fake_e.stride(), + } + + if ( + is_tensor + and not (static_shapes and source.is_nn_module()) + and not is_constant_source(source) + ): + tx.output.tracked_fakes.append( + TrackedFake(fake_e, source, symbolic_context) + ) + tx.output.tracked_fakes_id_to_source[id(e)].append(source) + + return fake_e + else: + return e + + +class SourcelessBuilder: + """ + Like builder, but stateless and does not require a source. Useful for simple type->VT objects, or objects + that are being created/evaporated during inlining (ex: consider a locally made list of tensors we then iterate over + .), such a list should not show up as an artifact from inputs, nor in reconstruction, nor in the graph. However, + there may be reasons to represent it as a ListVariable internally. + + NOTE - Objects produced here are born UNGUARDED due to the nature of sources! + + NOTE - This class is very new! It will have some rough edges, but it was created to stem the bleeding of giant + if/else type->VariableTracker trees that were cropping up all over dynamo. + """ + + def __call__(self, tx, value) -> VariableTracker: + if isinstance(value, VariableTracker): + # This is always valid to call, and useful for recursive calls. + return value + if isinstance(value, dataclasses._HAS_DEFAULT_FACTORY_CLASS): + return UserDefinedObjectVariable(value) + if ConstantVariable.is_literal(value): + return SourcelessBuilder.wrap_constant_literal(value) + elif callable(value) and trace_rules.lookup_callable(value) is not None: + if is_callable_allowed(value): + self.tx.output.has_user_defined_allowed_in_graph = True + return trace_rules.lookup_callable(value)(value) + elif is_function_or_wrapper(value): + return trace_rules.lookup(value)(value) + elif isinstance(value, enum.Enum): + return EnumVariable(value) + elif isinstance(value, (type, abc.ABCMeta)): + return UserDefinedClassVariable(value) + elif isinstance(value, dict): + items = {self(tx, k): self(tx, v) for k, v in value.items()} + return ConstDictVariable(items, mutable_local=MutableLocal()) + elif isinstance(value, set): + # Nb. value is a set here so the iteration below is non-deterministic! + return SetVariable( + [self(tx, x) for x in value], mutable_local=MutableLocal() + ) + elif isinstance(value, (tuple, list)): + cls = BaseListVariable.cls_for(type(value)) + return cls([self(tx, x) for x in value], mutable_local=MutableLocal()) + elif isinstance(value, types.MethodWrapperType): + return MethodWrapperVariable(value) + elif PlacementVariable.is_placement(value): + return PlacementVariable(value) + elif DeviceMeshVariable.is_device_mesh(value): + return DeviceMeshVariable(value) + unimplemented(f"Unexpected type in sourceless builder {type(value)}") + + @staticmethod + def wrap_constant_literal(value): + assert ConstantVariable.is_literal(value) + return ConstantVariable.create(value=value) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9625d196fa3cfc712ee0270a887667595d1b87 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py @@ -0,0 +1,1748 @@ +# mypy: ignore-errors + +import contextlib +import functools +import inspect +import itertools +import logging +import math +import operator +import types +from collections import defaultdict, OrderedDict +from typing import Dict, List + +import torch +from torch import sym_float, sym_int + +from .. import config, polyfill, variables +from ..exc import ( + AttributeMutationError, + unimplemented, + Unsupported, + UserError, + UserErrorType, +) +from ..guards import GuardBuilder, install_guard +from ..replay_record import DummyModule +from ..source import AttrSource, GetItemSource, is_constant_source, TypeSource +from ..utils import ( + check_constant_args, + check_numpy_ndarray_args, + check_unspec_python_args, + extract_fake_example_value, + get_fake_value, + guard_if_dyn, + istype, + numpy_operator_wrapper, + proxy_args_kwargs, + tensortype_to_dtype, +) +from .base import MutableLocal, typestr, VariableTracker +from .constant import ConstantVariable +from .ctx_manager import EventVariable, StreamVariable +from .dicts import ( + ConstDictVariable, + DefaultDictVariable, + DictView, + is_hashable, + SetVariable, +) +from .lists import ( + BaseListVariable, + ListIteratorVariable, + ListVariable, + SizeVariable, + TupleIteratorVariable, + TupleVariable, +) +from .tensor import ( + FakeItemVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .user_defined import UserDefinedVariable + +log = logging.getLogger(__name__) + + +IN_PLACE_DESUGARING_MAP = { + operator.iadd: operator.add, + operator.isub: operator.sub, + operator.imul: operator.mul, + operator.ifloordiv: operator.floordiv, + operator.itruediv: operator.truediv, + operator.imod: operator.mod, + operator.imatmul: operator.imatmul, + operator.ilshift: operator.lshift, + operator.irshift: operator.rshift, + operator.ipow: operator.pow, + operator.iand: operator.and_, + operator.ior: operator.or_, + operator.ixor: operator.xor, +} + + +def _polyfill_call_impl(name): + """Create a BuiltinVariable.call_{name} method that inlines through polyfill.{name}""" + + def call_fn(self, tx, *args, **kwargs): + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn), args, kwargs + ) + + fn = getattr(polyfill, name) + call_fn.__name__ = f"call_{name}" + return call_fn + + +class BuiltinVariable(VariableTracker): + _SENTINEL = object() + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.BUILTIN_MATCH)) + return BuiltinVariable(value, source=source) + + @staticmethod + @functools.lru_cache(None) + def _constant_fold_functions(): + fns = { + abs, + all, + any, + bool, + callable, + chr, + divmod, + float, + getattr, + int, + len, + max, + min, + ord, + pow, + repr, + round, + str, + str.format, + sum, + type, + operator.abs, + operator.pos, + operator.neg, + operator.not_, + operator.truth, + operator.invert, + operator.pow, + operator.mul, + operator.matmul, + operator.floordiv, + operator.truediv, + operator.mod, + operator.add, + operator.sub, + operator.getitem, + operator.length_hint, + operator.lshift, + operator.rshift, + operator.and_, + operator.or_, + operator.xor, + operator.ipow, + operator.imul, + operator.imatmul, + operator.ifloordiv, + operator.itruediv, + operator.imod, + operator.iadd, + operator.isub, + operator.ilshift, + operator.irshift, + operator.iand, + operator.ixor, + operator.ior, + operator.index, + } + fns.update(x for x in math.__dict__.values() if isinstance(x, type(math.sqrt))) + return fns + + def can_constant_fold_through(self): + return self.fn in self._constant_fold_functions() + + @staticmethod + @functools.lru_cache(None) + def _fx_graph_functions(): + fns = { + operator.abs, + operator.pos, + operator.neg, + operator.not_, + operator.invert, + operator.pow, + operator.mul, + operator.matmul, + operator.floordiv, + operator.truediv, + operator.mod, + operator.add, + operator.lt, + operator.gt, + operator.ge, + operator.le, + operator.ne, + operator.eq, + operator.sub, + operator.getitem, + operator.length_hint, + operator.lshift, + operator.rshift, + operator.and_, + operator.or_, + operator.xor, + operator.ipow, + operator.imul, + operator.imatmul, + operator.ifloordiv, + operator.itruediv, + operator.imod, + operator.iadd, + operator.isub, + operator.ilshift, + operator.irshift, + operator.iand, + operator.ixor, + operator.ior, + } + return fns + + @staticmethod + @functools.lru_cache(None) + def _binops(): + # function -> ([forward name, reverse name, in-place name], in-place op) + fns = { + operator.add: (["__add__", "__radd__", "__iadd__"], operator.iadd), + operator.sub: (["__sub__", "__rsub__", "__isub__"], operator.isub), + operator.mul: (["__mul__", "__rmul__", "__imul__"], operator.imul), + operator.truediv: ( + ["__truediv__", "__rtruediv__", "__itruediv__"], + operator.itruediv, + ), + operator.floordiv: ( + ["__floordiv__", "__rfloordiv__", "__ifloordiv__"], + operator.ifloordiv, + ), + operator.mod: (["__mod__", "__rmod__", "__imod__"], operator.imod), + pow: (["__pow__", "__rpow__", "__ipow__"], operator.ipow), + operator.pow: (["__pow__", "__rpow__", "__ipow__"], operator.ipow), + operator.lshift: ( + ["__lshift__", "__rlshift__", "__ilshift__"], + operator.ilshift, + ), + operator.rshift: ( + ["__rshift__", "__rrshift__", "__irshift__"], + operator.irshift, + ), + # NB: The follow binary operators are not supported for now, since the + # corresponding magic methods aren't defined on SymInt / SymFloat: + # operator.matmul + # divmod + # operator.and_ + # operator.or_ + # operator.xor + } + return fns + + @staticmethod + @functools.lru_cache(None) + def _binop_handlers(): + # Multiple dispatch mechanism defining custom binop behavior for certain type + # combinations. Handlers are attempted in order, and will be used if the type checks + # match. They are expected to have the signature: + # fn(tx, arg0: VariableTracker, arg1: VariableTracker, options) -> VariableTracker + + # Override table contains: op_fn -> [list of handlers] + op_handlers = {} + for ( + op, + (magic_method_names, in_place_op), + ) in BuiltinVariable._binops().items(): + op_handlers[op] = [] + op_handlers[in_place_op] = [] + + forward_name, reverse_name, inplace_name = magic_method_names + + # User-defined args (highest precedence) + def user_defined_handler( + tx, + a, + b, + options, + forward_name=forward_name, + reverse_name=reverse_name, + ): + # Manually handle reversing logic if needed (e.g. call __radd__) + + # TODO: If we expand this to handle tensor args, we need to manually + # handle cases like this: + # + # class A(int): + # def __radd__(self, other): + # print("woof") + # torch.randn(3) + A(3) + # + # In this example, A.__radd__() is not called -> nothing is printed, because + # Tensor.__add__ only does a subtype test against int, ignoring the subclass. + # To be fully correct, we should not call A.__radd__() here, and there may be + # other cases to reason about and add exceptions for. + if isinstance(a, UserDefinedVariable): + return a.call_method(tx, forward_name, [b], {}) + else: + return b.call_method(tx, reverse_name, [a], {}) + + op_handlers[op].append( + ((UserDefinedVariable, VariableTracker), user_defined_handler) + ) + op_handlers[op].append( + ((VariableTracker, UserDefinedVariable), user_defined_handler) + ) + + def user_defined_inplace_handler( + tx, a, b, options, forward_name=inplace_name + ): + return a.call_method(tx, forward_name, [b], {}) + + op_handlers[in_place_op].append( + ((UserDefinedVariable, VariableTracker), user_defined_inplace_handler) + ) + op_handlers[in_place_op].append( + ((VariableTracker, UserDefinedVariable), user_defined_inplace_handler) + ) + + # Dynamic shape args + def dynamic_handler(tx, a, b, options, fn=op): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", fn, *proxy_args_kwargs([a, b], {}) + ), + **options, + ) + + op_handlers[op].append( + ((SymNodeVariable, VariableTracker), dynamic_handler) + ) + op_handlers[op].append( + ((VariableTracker, SymNodeVariable), dynamic_handler) + ) + + # NB: Prefer out-of-place op when calling in-place op to generate valid graph + op_handlers[in_place_op].append( + ((SymNodeVariable, VariableTracker), dynamic_handler) + ) + op_handlers[in_place_op].append( + ((VariableTracker, SymNodeVariable), dynamic_handler) + ) + + # Special cases - lower precedence but still prefer these over constant folding + + # List-like addition (e.g. [1, 2] + [3, 4]) + def tuple_add_handler(tx, a, b, options): + return TupleVariable(a.items + list(b.unpack_var_sequence(tx)), **options) + + def size_add_handler(tx, a, b, options): + return SizeVariable(a.items + list(b.unpack_var_sequence(tx)), **options) + + list_like_addition_handlers = [ + # NB: Prefer the tuple-specific logic over base logic because of + # some SizeVariable weirdness. Specifically, the tuple-specific logic + # drops the subclass type (e.g. SizeVariable) and returns TupleVariables. + ( + (SizeVariable, SizeVariable), + size_add_handler, + ), + ( + (TupleVariable, TupleVariable), + tuple_add_handler, + ), + ( + (TupleVariable, ConstantVariable), + tuple_add_handler, + ), + ( + (ConstantVariable, TupleVariable), + lambda tx, a, b, options: TupleVariable( + list(a.unpack_var_sequence(tx)) + b.items, **options + ), + ), + ( + (BaseListVariable, BaseListVariable), + lambda tx, a, b, options: type(a)(a.items + b.items, **options), + ), + ] + op_handlers[operator.add].extend(list_like_addition_handlers) + + def list_iadd_handler(tx, a, b, _): + if not a.mutable_local or not b.has_unpack_var_sequence(tx): + # Handler doesn't apply + return None + + seq = b.unpack_var_sequence(tx) + tx.output.side_effects.mutation(a) + a.items.extend(seq) + return a + + list_like_iadd_handlers = [ + ( + (ListVariable, VariableTracker), + list_iadd_handler, + ), + ( + (TupleVariable, TupleVariable), + tuple_add_handler, + ), + ( + (TupleVariable, ConstantVariable), + tuple_add_handler, + ), + ] + op_handlers[operator.iadd].extend(list_like_iadd_handlers) + + # List-like expansion (e.g. [1, 2, 3] * 3) + def expand_list_like(tx, lst, const, options): + return lst.__class__( + items=lst.items * const.as_python_constant(), + mutable_local=MutableLocal(), + **options, + ) + + list_like_expansion_handlers = [ + ((ListVariable, ConstantVariable), expand_list_like), + ((TupleVariable, ConstantVariable), expand_list_like), + ( + (ConstantVariable, ListVariable), + lambda tx, a, b, options: expand_list_like(tx, b, a, options), + ), + ( + (ConstantVariable, TupleVariable), + lambda tx, a, b, options: expand_list_like(tx, b, a, options), + ), + ] + op_handlers[operator.mul].extend(list_like_expansion_handlers) + + return op_handlers + + @staticmethod + def _find_binop_handler(op, a, b): + handlers = BuiltinVariable._binop_handlers() + if op not in handlers: + return None + + # Return first handler that matches the type checks + for (type1, type2), handler in handlers[op]: + if isinstance(a, type1) and isinstance(b, type2): + return handler + + return None + + def can_insert_in_graph(self): + return self.fn in self._fx_graph_functions() + + def __init__(self, fn, **kwargs): + super().__init__(**kwargs) + self.fn = fn + + def __str__(self): + if self.fn is None: + name = "None" + else: + name = self.fn.__name__ + + return f"{self.__class__.__name__}({name})" + + def python_type(self): + return type(self.fn) + + def as_python_constant(self): + return self.fn + + def as_proxy(self): + DTYPE = { + bool: torch.bool, + int: torch.int64, + float: torch.float64, + } + if self.fn in DTYPE: + return DTYPE[self.fn] + return super().as_proxy() + + def reconstruct(self, codegen): + name = self.fn.__name__ + assert self.fn.__module__ == "builtins" + assert name not in codegen.tx.f_globals, "shadowed global" + codegen.append_output(codegen.create_load_global(name, False, add=True)) + + def constant_args(self, *args, **kwargs): + return check_constant_args(args, kwargs) + + def tensor_args(self, *args, **kwargs): + return any( + isinstance(i, variables.TensorVariable) + for i in itertools.chain(args, kwargs.values()) + ) and not any( + isinstance(i, variables.GetAttrVariable) + for i in itertools.chain(args, kwargs.values()) + ) + + def python_and_tensor_constant_only(self, *args, **kwargs): + tensor_args = [] + non_tensor_args = [] + for i in itertools.chain(args, kwargs.values()): + if isinstance(i, variables.TensorVariable): + tensor_args.append(i) + else: + non_tensor_args.append(i) + return all( + is_constant_source(t.source) if t.source is not None else False + for t in tensor_args + ) and self.constant_args(*non_tensor_args) + + def unspec_python_args(self, *args, **kwargs): + return check_unspec_python_args(args, kwargs) + + @staticmethod + def unwrap_unspec_args_kwargs(args, kwargs): + return [x.as_python_constant() for x in args], { + k: v.as_python_constant() for k, v in kwargs.items() + } + + def has_constant_handler(self, args, kwargs): + constant_args = check_constant_args(args, kwargs) + unspec_python_args = self.unspec_python_args(*args, **kwargs) + return self.can_constant_fold_through() and ( + constant_args or unspec_python_args + ) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import UserFunctionVariable + from .builder import wrap_fx_proxy, wrap_fx_proxy_cls + + args = [v.realize() for v in args] + kwargs = {k: v.realize() for k, v in kwargs.items()} + assert isinstance(args, (list, tuple)) + assert isinstance(kwargs, dict) + tensor_args = self.tensor_args(*args, **kwargs) + + # args[0] is list and args[1] is unspec + if self.fn is operator.getitem and not isinstance( + args[0], variables.TensorVariable + ): + tensor_args = False + + if ( + self.can_insert_in_graph() + and tensor_args + and not ( + self.fn is operator.getitem + and isinstance(args[0], ConstDictVariable) + and isinstance(args[1], variables.TensorVariable) + ) + ): + try: + fn = self.fn + + # Constant fold for constant tensor and python constants + if tensor_args and self.python_and_tensor_constant_only( + *args, **kwargs + ): + from ..bytecode_transformation import unique_id + from .functions import invoke_and_store_as_constant + + return invoke_and_store_as_constant( + tx, fn, unique_id(fn.__name__), args, kwargs + ) + + if self.fn in IN_PLACE_DESUGARING_MAP and isinstance( + args[0], variables.ConstantVariable + ): + # In-place operators like += usually mustate tensor + # values, but in the edge case of immutable values they + # re-bind the variable. + # + # The easiest way to keep the graph consistent in this + # scenario is to de-sugar eagerly. + fn, args = IN_PLACE_DESUGARING_MAP[self.fn], [args[0], args[1]] + + if self.fn is operator.getitem and isinstance(args[1], SymNodeVariable): + # Standard indexing will force specialization due to + # __index__. Rewrite as a regular torch op which will + # trace fine + fn, args = torch.select, [ + args[0], + variables.ConstantVariable.create(0), + args[1], + ] + + # Interaction between ndarray and tensors: + # We prefer the tensor op whenever there are tensors involved + if check_numpy_ndarray_args(args, kwargs) and not any( + type(arg) == variables.TensorVariable for arg in args + ): + proxy = tx.output.create_proxy( + "call_function", + numpy_operator_wrapper(self.fn), + *proxy_args_kwargs(args, kwargs), + ) + + return wrap_fx_proxy_cls(variables.NumpyNdarrayVariable, tx, proxy) + + proxy = tx.output.create_proxy( + "call_function", + fn, + *proxy_args_kwargs(args, kwargs), + ) + if any(isinstance(arg, FakeItemVariable) for arg in args): + return wrap_fx_proxy_cls( + FakeItemVariable, + tx, + proxy, + ) + elif self.unspec_python_args(*args, **kwargs): + _args, _kwargs = self.unwrap_unspec_args_kwargs(args, kwargs) + raw_value = self.fn(*_args, **_kwargs) + + need_unwrap = any( + x.need_unwrap + for x in itertools.chain(args, kwargs.values()) + if isinstance(x, variables.UnspecializedPythonVariable) + ) + + return wrap_fx_proxy_cls( + UnspecializedPythonVariable, + tx, + proxy, + raw_value=raw_value, + need_unwrap=need_unwrap, + ) + elif all(isinstance(x, SymNodeVariable) for x in args): + return SymNodeVariable.create(tx, proxy, None) + else: + # Work around for vision_maskrcnn due to precision difference + # specialize the dividend when float divide by tensor + if self.fn is operator.truediv and isinstance( + args[0], variables.UnspecializedPythonVariable + ): + args[0] = args[0].convert_to_constant(tx) + return wrap_fx_proxy(tx, proxy) + + except NotImplementedError: + unimplemented(f"partial tensor op: {self} {args} {kwargs}") + + # Handle cases like int(torch.seed()) + # Also handle sym_float to sym_int cases + if self.fn in (int, float) and isinstance( + args[0], (SymNodeVariable, variables.TensorVariable) + ): + if isinstance(args[0], variables.TensorVariable): + item = args[0].call_method(tx, "item", [], {}) + else: + item = args[0] + fn_ = sym_int if self.fn is int else sym_float + out = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_, + (item.as_proxy(),), + {}, + ), + ) + return out + + # Handle `str` on a user defined function + if self.fn == str and args and isinstance(args[0], (UserFunctionVariable)): + return variables.ConstantVariable.create(value=str(args[0].fn)) + + # Handle binary ops (e.g. __add__ / __radd__, __iadd__, etc.) + # NB: Tensor args are handled above and not here + if len(kwargs) == 0 and len(args) == 2: + # Try to find a handler for the arg types; otherwise, fall through to constant handler + binop_handler = BuiltinVariable._find_binop_handler( + self.fn, args[0], args[1] + ) + if binop_handler: + res = binop_handler(tx, args[0], args[1], {}) + if res is not None: + return res + + handler = getattr(self, f"call_{self.fn.__name__}", None) + + if handler: + try: + result = handler(tx, *args, **kwargs) + if result is not None: + return result + except TypeError: + # Check if binding is bad. inspect signature bind is expensive. + # So check only when handler call fails. + try: + inspect.signature(handler).bind(tx, *args, **kwargs) + except TypeError as e: + has_constant_handler = self.has_constant_handler(args, kwargs) + if not has_constant_handler: + log.warning( + "incorrect arg count %s %s and no constant handler", + handler, + e, + ) + unimplemented(f"invalid handler args {handler} {args} {kwargs}") + else: + raise + except Unsupported as exc: + has_constant_handler = self.has_constant_handler(args, kwargs) + if not has_constant_handler: + raise + # Actually, we will handle this just fine + exc.remove_from_stats() + + # NB: call to has_constant_handler is deliberately delayed post generic + # handler because has_constant_handler calls as_python_constant + # internally which realizes LazyVariableTracker for ConstantVariables, + # unnecessarily putting guards on objects which might not actually be used. + has_constant_handler = self.has_constant_handler(args, kwargs) + if has_constant_handler: + from .builder import SourcelessBuilder + + # constant fold + return SourcelessBuilder()( + tx, + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + + return super().call_function(tx, args, kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if self.fn == dict and name == "fromkeys": + return BuiltinVariable.call_custom_dict_fromkeys(tx, dict, *args, **kwargs) + if self.fn == itertools.chain and name == "from_iterable": + assert len(args) == 1 + assert len(kwargs) == 0 + obj = args[0] + items = [] + for item in obj.unpack_var_sequence(tx): + items.extend(item.unpack_var_sequence(tx)) + return variables.TupleVariable(items) + + return super().call_method(tx, name, args, kwargs) + + def _call_min_max(self, tx, *args): + if len(args) == 1 and args[0].has_unpack_var_sequence(tx): + # expand iterable + items = args[0].unpack_var_sequence(tx) + return self._call_min_max_seq(tx, items) + elif len(args) == 2: + return self._call_min_max_binary(tx, args[0], args[1]) + elif len(args) > 2: + return self._call_min_max_seq(tx, args) + + def _call_min_max_seq(self, tx, items): + assert len(items) > 0 + if len(items) == 1: + return items[0] + + return functools.reduce(functools.partial(self._call_min_max_binary, tx), items) + + def _call_min_max_binary(self, tx, a, b): + if self.tensor_args(a, b): + if not isinstance(a, variables.TensorVariable): + a, b = b, a + assert isinstance(a, variables.TensorVariable) + + # result of an item call is a scalar convert to a tensor + if isinstance(a, FakeItemVariable): + a = variables.TorchInGraphFunctionVariable(torch.tensor).call_function( + tx, [a], {} + ) + + # Dynamic input does not get resolved, rather, gets stored as call_function + if isinstance(a, SymNodeVariable) or isinstance(b, SymNodeVariable): + from .builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + type(a), + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.fn, + *proxy_args_kwargs([a, b], {}), + ), + ) + + # convert min/max to torch ops + if b.is_python_constant(): + if isinstance(a, variables.NumpyNdarrayVariable): + import numpy as np + + fn = variables.NumpyVariable(np.clip) + else: + fn = variables.TorchInGraphFunctionVariable(torch.clamp) + kwargs = {"min": b} if (self.fn is max) else {"max": b} + result = fn.call_function(tx, [a], kwargs) + else: + if isinstance(a, variables.NumpyNdarrayVariable): + import numpy as np + + fn = {max: np.maximum, min: np.minimum}[self.fn] + fn = variables.NumpyVariable(fn) + else: + fn = {max: torch.maximum, min: torch.minimum}[self.fn] + fn = variables.TorchInGraphFunctionVariable(fn) + result = fn.call_function(tx, [a, b], {}) + + # return unspec if both a, b are unspec or const + if all( + isinstance( + i, + ( + variables.UnspecializedPythonVariable, + variables.ConstantVariable, + ), + ) + for i in [a, b] + ): + if any(isinstance(val, FakeItemVariable) for val in [a, b]): + return variables.FakeItemVariable.from_tensor_variable(result) + + if b.is_python_constant(): + raw_b = b.as_python_constant() + else: + raw_b = b.raw_value + if self.fn is max: + raw_res = max(a.raw_value, raw_b) + else: + raw_res = min(a.raw_value, raw_b) + + need_unwrap = any( + x.need_unwrap + for x in [a, b] + if isinstance(x, variables.UnspecializedPythonVariable) + ) + return variables.UnspecializedPythonVariable.from_tensor_variable( + result, raw_res, need_unwrap + ) + # otherwise return tensor + else: + return result + elif isinstance(a, SymNodeVariable) or isinstance(b, SymNodeVariable): + fn = torch.sym_max if self.fn is max else torch.sym_min + proxy = tx.output.create_proxy( + "call_function", fn, *proxy_args_kwargs([a, b], {}) + ) + return SymNodeVariable.create(tx, proxy, None) + + call_min = _call_min_max + call_max = _call_min_max + + def call_abs(self, tx, arg: "VariableTracker"): + # Call arg.__abs__() + abs_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__abs__")], {} + ) + return abs_method.call_function(tx, [], {}) + + def call_pos(self, tx, arg: "VariableTracker"): + # Call arg.__pos__() + pos_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__pos__")], {} + ) + return pos_method.call_function(tx, [], {}) + + def call_round(self, tx, arg, *args, **kwargs): + # Call arg.__round__() + round_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__round__")], {} + ) + return round_method.call_function(tx, args, kwargs) + + def call_range(self, tx, *args): + if self.unspec_python_args(*args) or self.constant_args(*args): + return variables.RangeVariable(args) + elif self._dynamic_args(*args): + args = [ + variables.ConstantVariable.create(guard_if_dyn(arg)) for arg in args + ] + return variables.RangeVariable(args) + # None no-ops this handler and lets the driving function proceed + return None + + def _dynamic_args(self, *args, **kwargs): + return any(isinstance(x, SymNodeVariable) for x in args) or any( + isinstance(x, SymNodeVariable) for x in kwargs.values() + ) + + def call_slice(self, tx, *args): + return variables.SliceVariable(args) + + def _dyn_proxy(self, tx, *args, **kwargs): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", self.fn, *proxy_args_kwargs(args, kwargs) + ), + ) + + def _call_iter_tuple_list(self, tx, obj=None, *args, **kwargs): + if self._dynamic_args(*args, **kwargs): + return self._dyn_proxy(tx, *args, **kwargs) + + if isinstance(obj, variables.IteratorVariable): + # For non-list iterators, we will guard on vars that + # determine the control flow + return obj + + cls = variables.BaseListVariable.cls_for(self.fn) + if obj is None: + return cls( + [], + mutable_local=MutableLocal(), + ) + elif obj.has_unpack_var_sequence(tx): + if obj.source and not is_constant_source(obj.source): + if isinstance(obj, TupleIteratorVariable): + install_guard( + obj.source.make_guard(GuardBuilder.TUPLE_ITERATOR_LEN) + ) + else: + install_guard(obj.source.make_guard(GuardBuilder.SEQUENCE_LENGTH)) + + return cls( + list(obj.unpack_var_sequence(tx)), + mutable_local=MutableLocal(), + ) + + def call_iter(self, tx, obj, *args, **kwargs): + # Handle the case where we are iterating over a tuple, list or iterator + ret = self._call_iter_tuple_list(tx, obj, *args, **kwargs) + + if ret is None: + # If the object doesn't implement a __iter__ method, it will be an error in eager mode when calling iter on it anyway. + # If the object implements a __iter__ method, inlining effectively forwards the call to another iter call + # (e.g. when __iter__ just returns iter(self.list)) or return a user-defined iterator. + return obj.call_method(tx, "__iter__", args, kwargs) + return ret + + call_tuple = _call_iter_tuple_list + call_list = _call_iter_tuple_list + + def call_callable(self, tx, arg): + from .functions import BaseUserFunctionVariable + + if isinstance( + arg, (variables.UserDefinedClassVariable, BaseUserFunctionVariable) + ): + return variables.ConstantVariable.create(True) + elif isinstance(arg, UserDefinedVariable): + return variables.ConstantVariable.create(callable(arg.value)) + elif isinstance(arg, (ConstantVariable, SymNodeVariable, TensorVariable)): + return variables.ConstantVariable.create(False) + + def call_cast(self, _, *args, **kwargs): + if len(args) == 2: + return args[1] + + unimplemented(f"unsupported args to builtin cast(): {args} {kwargs}") + + def call_dict(self, tx, *args, **kwargs): + return BuiltinVariable.call_custom_dict(tx, dict, *args, **kwargs) + + @staticmethod + def call_custom_dict(tx, user_cls, *args, **kwargs): + if not kwargs: + if not args: + args = ({},) + assert len(args) == 1 + arg = args[0] + if isinstance(arg, dict): + return ConstDictVariable(arg, user_cls, mutable_local=MutableLocal()) + elif isinstance(arg, variables.ConstDictVariable): + return arg.clone(user_cls=user_cls, mutable_local=MutableLocal()) + elif isinstance( + arg, + ( + ListVariable, + TupleVariable, + ListIteratorVariable, + ), + ): + items = dict( + x.unpack_var_sequence(tx) for x in arg.unpack_var_sequence(tx) + ) + return ConstDictVariable(items, user_cls, mutable_local=MutableLocal()) + elif not args and kwargs: + items = {ConstantVariable.create(k): v for k, v in kwargs.items()} + return variables.ConstDictVariable( + items, user_cls=user_cls, mutable_local=MutableLocal() + ) + unimplemented(f"{user_cls.__name__}(): {args} {kwargs}") + + @staticmethod + def call_custom_dict_fromkeys(tx, user_cls, *args, **kwargs): + assert user_cls in {dict, OrderedDict, defaultdict} + if kwargs: + # Only `OrderedDict.fromkeys` accepts `value` passed by keyword + assert user_cls is OrderedDict + assert len(args) == 1 and len(kwargs) == 1 and "value" in kwargs + args = (*args, kwargs.pop("value")) + if len(args) == 0: + raise UserError(TypeError, "fromkeys expected at least 1 argument, got 0") + if len(args) == 1: + args = (*args, ConstantVariable.create(None)) + assert len(args) == 2 + arg, value = args + DictVariableType = ( + ConstDictVariable if user_cls is not defaultdict else DefaultDictVariable + ) + + if isinstance(arg, dict): + arg = [ConstantVariable.create(k) for k in arg.keys()] + return DictVariableType( + dict.fromkeys(arg, value), user_cls, mutable_local=MutableLocal() + ) + elif arg.has_unpack_var_sequence(tx) and all( + is_hashable(v) for v in arg.unpack_var_sequence(tx) + ): + keys = arg.unpack_var_sequence(tx) + return DictVariableType( + dict.fromkeys(keys, value), user_cls, mutable_local=MutableLocal() + ) + unimplemented(f"{user_cls.__name__}.fromkeys(): {args} {kwargs}") + + def call_set(self, tx, *args, **kwargs): + # Can we merge this implementation and call_dict's one? + assert not kwargs + if not args: + return SetVariable([], mutable_local=MutableLocal()) + assert len(args) == 1 + arg = args[0] + if isinstance(arg, variables.SetVariable): + return arg.clone(mutable_local=MutableLocal()) + elif arg.has_unpack_var_sequence(tx): + items = arg.unpack_var_sequence(tx) + return SetVariable(items, mutable_local=MutableLocal()) + else: + unimplemented(f"set(): {args} {kwargs}") + + def call_zip(self, tx, *args, **kwargs): + if kwargs: + assert len(kwargs) == 1 and "strict" in kwargs + if all(x.has_unpack_var_sequence(tx) for x in args): + unpacked = [arg.unpack_var_sequence(tx) for arg in args] + if kwargs.pop("strict", False) and len(unpacked) > 0: + if not all(len(u) == len(unpacked[0]) for u in unpacked): + raise UserError( + ValueError, + "zip() has one argument of len differing from others", + ) + items = [variables.TupleVariable(list(item)) for item in zip(*unpacked)] + return variables.TupleVariable(items) + + def call_enumerate(self, tx, *args): + if len(args) == 1: + start = 0 + else: + assert len(args) == 2 + assert isinstance(args[1], variables.ConstantVariable) + start = args[1].as_python_constant() + if args[0].has_unpack_var_sequence(tx): + items = [ + variables.TupleVariable( + [variables.ConstantVariable.create(idx), var], + ) + for idx, var in enumerate(args[0].unpack_var_sequence(tx), start) + ] + return variables.TupleVariable(items) + + def call_len(self, tx, *args, **kwargs): + return args[0].call_method(tx, "__len__", args[1:], kwargs) + + def call_getitem(self, tx, *args, **kwargs): + return args[0].call_method(tx, "__getitem__", args[1:], kwargs) + + def call_isinstance(self, tx, arg, isinstance_type): + try: + arg_type = arg.python_type() + except NotImplementedError: + unimplemented( + f"isinstance({arg}, {isinstance_type}): can't determine type of {arg}" + ) + + isinstance_type = isinstance_type.as_python_constant() + + if isinstance(arg, variables.TensorVariable) and arg.dtype is not None: + + def _tensor_isinstance(tensor_var, tensor_type): + def check_type(ty): + if ty not in tensortype_to_dtype: + return issubclass(arg.python_type(), ty) + + dtypes = tensortype_to_dtype[ty] + return arg.dtype in dtypes + + if type(tensor_type) is tuple: + return any(check_type(ty) for ty in tensor_type) + else: + return check_type(tensor_type) + + return variables.ConstantVariable.create( + _tensor_isinstance(arg, isinstance_type) + ) + # UserDefinedObject with C extensions can have torch.Tensor attributes, + # so break graph. + if isinstance(arg, variables.UserDefinedObjectVariable) and isinstance( + arg.value, types.MemberDescriptorType + ): + unimplemented( + f"isinstance called on UserDefinedClass {arg} {isinstance_type}" + ) + # handle __instancecheck__ defined in user class + if ( + isinstance(arg, variables.UserDefinedObjectVariable) + and "__instancecheck__" in isinstance_type.__class__.__dict__ + ): + return variables.ConstantVariable.create( + isinstance_type.__class__.__instancecheck__(isinstance_type, arg.value) + ) + + try: + val = issubclass(arg_type, isinstance_type) + except TypeError: + val = arg_type is isinstance_type + return variables.ConstantVariable.create(val) + + def call_issubclass(self, tx, left_ty, right_ty): + """Checks if first arg is subclass of right arg""" + left_ty = left_ty.as_python_constant() + right_ty = right_ty.as_python_constant() + + return variables.ConstantVariable(issubclass(left_ty, right_ty)) + + def call_super(self, tx, a, b): + return variables.SuperVariable(a, b) + + def call_next(self, tx, arg): + if isinstance( + arg, (variables.ListIteratorVariable, variables.IteratorVariable) + ): + val, next_iter = arg.next_variables(tx) + return val + elif isinstance(arg, variables.BaseListVariable): + return arg.items[0] + + def call_hasattr(self, tx, obj, attr): + if attr.is_python_constant(): + name = attr.as_python_constant() + return obj.call_hasattr(tx, name) + + def call_map(self, tx, fn, seq): + if seq.has_unpack_var_sequence(tx): + items = [fn.call_function(tx, [x], {}) for x in seq.unpack_var_sequence(tx)] + return variables.TupleVariable(items) + + def call_sum(self, tx, seq, start=_SENTINEL): + # Special case for sum on tuple of floats and ints + if isinstance(seq, (variables.ListVariable, variables.TupleVariable)) and all( + isinstance(x, variables.ConstantVariable) + and isinstance(x.value, (int, float)) + for x in seq.items + ): + if start is self._SENTINEL: + return variables.ConstantVariable.create( + sum(x.value for x in seq.items), + ) + if isinstance(start, variables.ConstantVariable) and isinstance( + start.value, (int, float) + ): + return variables.ConstantVariable.create( + sum((x.value for x in seq.items), start=start.value), + ) + if seq.has_unpack_var_sequence(tx): + if start is self._SENTINEL: + start = variables.ConstantVariable.create(0) + items = seq.unpack_var_sequence(tx) + return BuiltinVariable(functools.reduce).call_function( + tx, + [ + BuiltinVariable(operator.add), + variables.TupleVariable(items), + start, + ], + {}, + ) + + def call_reduce(self, tx, function, iterable, initial=_SENTINEL): + if iterable.has_unpack_var_sequence(tx): + items = iterable.unpack_var_sequence(tx) + if initial is self._SENTINEL: + value, items = items[0], items[1:] + else: + value = initial + for element in items: + value = function.call_function(tx, [value, element], {}) + return value + + def call_getattr( + self, tx, obj: VariableTracker, name_var: VariableTracker, default=None + ): + from .. import trace_rules + from . import ( + ConstantVariable, + GetAttrVariable, + PythonModuleVariable, + TorchInGraphFunctionVariable, + UserFunctionVariable, + ) + from .builder import SourcelessBuilder, VariableBuilder + + name = name_var.as_python_constant() + + if not name_var.is_python_constant(): + unimplemented("non-const getattr() name") + + if tx.output.side_effects.is_attribute_mutation(obj): + try: + # re-read a pending side effect? + return tx.output.side_effects.load_attr(obj, name) + except KeyError: + pass + + if default is not None: + hasattr_var = self.call_hasattr(tx, obj, name_var) + assert hasattr_var.as_python_constant() in (True, False) + if not hasattr_var.as_python_constant(): + return default + + options = {} + if obj.source: + source = AttrSource(obj.source, name) + options["source"] = source + else: + source = None + + if name == "__bases__": + try: + value = obj.as_python_constant() + if isinstance(value, type): + bases = value.__bases__ + if source is not None: + tuple_args = [ + VariableBuilder(tx, GetItemSource(source, i))(b) + for i, b in enumerate(bases) + ] + else: + tuple_args = [SourcelessBuilder()(tx, b) for b in bases] + + return variables.TupleVariable(tuple_args, **options) + except NotImplementedError: + pass + + if isinstance(obj, variables.NNModuleVariable): + return obj.var_getattr(tx, name) + elif isinstance( + obj, + ( + variables.TensorVariable, + variables.NamedTupleVariable, + variables.ConstantVariable, + variables.UserDefinedClassVariable, + variables.UserDefinedObjectVariable, + ), + ): + try: + return obj.var_getattr(tx, name) + except NotImplementedError: + return GetAttrVariable(obj, name, **options) + elif isinstance(obj, TorchInGraphFunctionVariable): + # Get OpOverload from an OpOverloadPacket, e.g., torch.ops.aten.add.default. + member = getattr(obj.value, name) + if isinstance( + member, (torch._ops.OpOverloadPacket, torch._ops.OpOverload) + ) and trace_rules.is_aten_op_or_tensor_method(member): + return TorchInGraphFunctionVariable(member, **options) + elif isinstance(obj, (PythonModuleVariable, DummyModule)): + if obj.is_torch: + member = getattr(obj.value, name) + else: + member = obj.value.__dict__[name] + + if config.replay_record_enabled: + tx.exec_recorder.record_module_access(obj.value, name, member) + + if source is not None: + return VariableBuilder(tx, source)(member) + else: + return SourcelessBuilder()(tx, member) + elif istype(obj, UserFunctionVariable) and name in ("__name__", "__module__"): + return ConstantVariable.create(getattr(obj.fn, name)) + else: + try: + return obj.var_getattr(tx, name) + except NotImplementedError: + return GetAttrVariable(obj, name, **options) + + def call_setattr( + self, tx, obj: VariableTracker, name_var: VariableTracker, val: VariableTracker + ): + from .distributed import PlacementVariable + + if isinstance( + obj, + ( + variables.DataClassVariable, + variables.CustomizedDictVariable, + PlacementVariable, + ), + ): + return obj.call_method(tx, "__setattr__", [name_var, val], {}) + elif ( + tx.output.side_effects.is_attribute_mutation(obj) + and name_var.is_python_constant() + ): + name = name_var.as_python_constant() + if isinstance(obj, variables.TensorVariable): + from .builder import wrap_fx_proxy + + if name == "requires_grad": + # TODO(voz): Make it work properly + unimplemented( + "mutating requires_grad can introduce a new leaf from non-leaf or vice versa in " + "the middle of the graph, which aot_autograd does not currently know how to handle. " + ) + if name == "data": + # Remove the old reference in tracked fakes - if we don't do this + # new .data value size and shape differences will cause + # tracked fakes to produce incorrect guards. This is sound because the TensorVariable + # coming out of set_() below will be a new one, and get + # installed in tracked fakes. + to_remove = [] + for tf in tx.output.tracked_fakes: + if tf.source == obj.source: + to_remove.append(tf) + for tf in to_remove: + tx.output.tracked_fakes.remove(tf) + + # Step 1 - disable grads + with dynamo_disable_grad(tx), torch.no_grad(): + # Step 2 - call `set_` + out = wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + torch.Tensor.set_, + *proxy_args_kwargs([obj, val], {}), + ), + ) + + # Step 3 - drop the version counter - this is a step required to get + # .data setting to play correctly with the autograd engine. + # Esentially, dynamo is trying to faithful preserve the (absurd) + # behavior of .data= from eager mode + def _lower_version_count_by_1(x): + version = x._version + if version > 0: + version = version - 1 + torch._C._autograd._unsafe_set_version_counter(x, version) + return x + + tx.output.create_proxy( + "call_function", + _lower_version_count_by_1, + (out.as_proxy(),), + {}, + ) + _lower_version_count_by_1(obj.as_proxy().node.meta["example_value"]) + # This handles options prop, guards and ends with a clone + # Step 4 - replace all reference to the current object with the new one + return out + + tx.output.side_effects.store_attr(obj, name, val) + return val + elif isinstance(obj, variables.UserDefinedObjectVariable): + unimplemented( + f"setattr(UserDefinedObjectVariable) {type(obj.value).__setattr__}" + ) + elif isinstance(obj, variables.NNModuleVariable): + if not tx.output.is_root_tracer(): + raise AttributeMutationError( + "Can't inplace modify module params/buffers inside HigherOrderOp" + ) + if name_var.is_python_constant() and isinstance( + val, variables.TensorVariable + ): + assigning_fake_val = get_fake_value(val.as_proxy().node, tx) + + try: + getattr_var = obj.var_getattr(tx, name_var.as_python_constant()) + except AttributeError: + getattr_var = None + + if isinstance(getattr_var, variables.TensorVariable): + # get_fake_val will get the same fake tensor + existing_fake_attr = get_fake_value(getattr_var.as_proxy().node, tx) + + # same tensor identiy, setattr is a no-op + mod_setattr = inspect.getattr_static(obj.module_type, "__setattr__") + if ( + existing_fake_attr is assigning_fake_val + and mod_setattr is torch.nn.Module.__setattr__ + ): + return getattr_var + + obj.convert_to_unspecialized(tx) + # FIXME (tmanlaibaatar) this is utter hack to unblock HuggingFace export + # Export generally doesn't want to allow mutations on objects directly, + # but we don't have good way to do this rn. For now, we make it an undefined + # behaviour and just set attributes directly on the PretrainedConfig object + # for now. + elif isinstance(obj, variables.dicts.HFPretrainedConfigVariable) and tx.export: + if name_var.is_python_constant() and isinstance( + val, variables.ConstantVariable + ): + setattr( + obj.obj, name_var.as_python_constant(), val.as_python_constant() + ) + return ConstantVariable(None) + + def call_delattr(self, tx, obj: VariableTracker, name_var: VariableTracker): + return self.call_setattr(tx, obj, name_var, variables.DeletedVariable()) + + def call_type(self, tx, obj: VariableTracker): + from .builder import SourcelessBuilder, VariableBuilder + + try: + py_type = obj.python_type() + except NotImplementedError as error: + raise UserError( + UserErrorType.INVALID_INPUT, + str(error), + case_name="unknown_python_type", + ) from None + + if obj.source is None: + return SourcelessBuilder()(tx, py_type) + else: + return VariableBuilder(tx, TypeSource(obj.source))(py_type) + + def call_reversed(self, tx, obj: VariableTracker): + if obj.has_unpack_var_sequence(tx): + items = list(reversed(obj.unpack_var_sequence(tx))) + return variables.TupleVariable(items) + + def call_sorted(self, tx, obj: VariableTracker, **kwargs): + if ( + obj.has_unpack_var_sequence(tx) + and not isinstance(obj, variables.TensorVariable) + and all(x.is_python_constant() for x in obj.unpack_var_sequence(tx)) + ): + function = kwargs.pop("key", None) + reverse = kwargs.pop( + "reverse", ConstantVariable.create(False) + ).as_python_constant() + assert len(kwargs) == 0 + if function: + items = sorted( + obj.unpack_var_sequence(tx), + key=lambda x: function.call_function( + tx, [x], {} + ).as_python_constant(), + reverse=reverse, + ) + else: + items = sorted( + obj.unpack_var_sequence(tx), + key=lambda x: x.as_python_constant(), + reverse=reverse, + ) + return variables.ListVariable(items) + + def call_chain(self, tx, *args): + if all(obj.has_unpack_var_sequence(tx) for obj in args): + items = [] + for obj in args: + items.extend(obj.unpack_var_sequence(tx)) + return variables.TupleVariable(items) + + def call_islice(self, tx, iterable, *args): + if iterable.has_unpack_var_sequence(tx) and all( + x.is_python_constant() for x in args + ): + const_args = [x.as_python_constant() for x in args] + items = iterable.unpack_var_sequence(tx) + items = list(itertools.islice(items, *const_args)) + return variables.TupleVariable(items) + + # neg is a constant fold function, so we only get here if constant fold is not valid + def call_neg(self, tx, a): + if isinstance(a, SymNodeVariable): + return SymNodeVariable.create( + tx, + (operator.neg)(a.as_proxy()), + sym_num=None, + ) + # None no-ops this handler and lets the driving function proceed + return None + + def call_format(self, tx, _format_string, *args, **kwargs): + format_string = _format_string.as_python_constant() + return variables.StringFormatVariable.create(format_string, args, kwargs) + + def call_id(self, tx, *args): + if len(args) > 0 and isinstance(args[0], variables.NNModuleVariable): + nn_mod_variable = args[0] + mod = tx.output.get_submodule(nn_mod_variable.module_key) + return variables.ConstantVariable.create(id(mod)) + else: + unimplemented(f"call_id with args {args}") + + def call_deepcopy(self, tx, x): + unimplemented(f"copy.deepcopy {repr(x)}") + + def _comparison(self, tx, left, right): + """ + Used to implement comparison operators for different types. + For example, list1 < list2 is implemented differently from tensor1 < tensor2 + """ + from . import ( + BaseListVariable, + ConstantVariable, + NNModuleVariable, + TensorVariable, + UserDefinedObjectVariable, + UserFunctionVariable, + ) + from .lists import SizeVariable + from .tensor import ( + supported_const_comparison_ops, + supported_tensor_comparison_ops, + ) + + op = self.fn + + def _unimplemented(): + unimplemented(f"comparison {typestr(left)} {op} {typestr(right)}") + + if ( + all( + isinstance(x, (NNModuleVariable, ConstantVariable)) + for x in [left, right] + ) + and op in supported_const_comparison_ops.values() + ): + left = ( + tx.output.get_submodule(left.module_key) + if isinstance(left, NNModuleVariable) + else left.as_python_constant() + ) + right = ( + tx.output.get_submodule(right.module_key) + if isinstance(right, NNModuleVariable) + else right.as_python_constant() + ) + return ConstantVariable.create(op(left, right)) + + if isinstance(left, UserFunctionVariable): + if op not in supported_const_comparison_ops.values(): + _unimplemented() + if not isinstance(right, UserFunctionVariable): + _unimplemented() + return ConstantVariable.create(op(left.fn, right.fn)) + + # Note, we have a rare BaseListVariable subtype mismatch with valid comparison + # x = torch.randn([3, 3]) + # x.size() == (3, 3) # True + # (3, 3) == x.size() # True + if isinstance(left, (SizeVariable, TupleVariable)) and isinstance( + right, (TupleVariable, SizeVariable) + ): + return BaseListVariable.list_compare(tx, op, left, right) + + if isinstance(left, BaseListVariable): + if not type(left) == type(right): # Mismatch in BaseListVariable subclasses + _unimplemented() + return BaseListVariable.list_compare(tx, op, left, right) + + # If they implement set semantics (e.g. SetVariable or DictKeys) + if hasattr(left, "set_items") and hasattr(right, "set_items"): + return ConstantVariable.create(op(left.set_items, right.set_items)) + + if isinstance(left, TensorVariable) or isinstance(right, TensorVariable): + from .builder import wrap_fx_proxy_cls + + if op in [operator.is_, operator.is_not]: + is_result = ( + isinstance(left, TensorVariable) + and isinstance(right, TensorVariable) + and id(extract_fake_example_value(left.as_proxy().node)) + == id(extract_fake_example_value(right.as_proxy().node)) + ) + if op is operator.is_: + return ConstantVariable.create(is_result) + else: + return ConstantVariable.create(not is_result) + + if op not in supported_tensor_comparison_ops.values(): + _unimplemented() + if ( + isinstance(left, TensorVariable) + and isinstance(right, TensorVariable) + and (left.size and right.size) is not None + and left.size != right.size + ): + try: + torch.broadcast_shapes(left.size, right.size) + except RuntimeError: + # not broadcastable, can't be compared + _unimplemented() + tensor_cls = left if isinstance(left, TensorVariable) else right + proxy = tx.output.create_proxy( + "call_function", op, (left.as_proxy(), right.as_proxy()), {} + ) + return wrap_fx_proxy_cls( + type(tensor_cls), # handle Ndarrays and Tensors + tx, + proxy, + ) + + if isinstance(left, SymNodeVariable) or isinstance(right, SymNodeVariable): + if op not in supported_tensor_comparison_ops.values(): + _unimplemented() + + proxy = tx.output.create_proxy( + "call_function", op, (left.as_proxy(), right.as_proxy()), {} + ) + return SymNodeVariable.create( + tx, + proxy, + sym_num=None, + ) + + if isinstance(left, UserDefinedObjectVariable) and isinstance( + right, UserDefinedObjectVariable + ): + return ConstantVariable.create(op(left.value, right.value)) + + if isinstance(left, (StreamVariable, EventVariable)) or isinstance( + right, (StreamVariable, EventVariable) + ): + if type(left) == type(right) and op is operator.eq: + return ConstantVariable(op(left.value, right.value)) + + if isinstance(right, ConstantVariable) or isinstance( + left, ConstantVariable + ): + return ConstantVariable(op(left.value, right.value)) + + if op.__name__.startswith("is_"): + # If the two objects are of different type, we can safely return False and True for `is` and `is not`, respectively + if type(left) is not type(right): + return ConstantVariable.create(op.__name__ != "is_") + + if isinstance(left, BuiltinVariable) and isinstance(right, BuiltinVariable): + return ConstantVariable.create(op(left.fn, right.fn)) + + _unimplemented() + + def call_and_(self, tx, a, b): + # Rely on constant_handler + if isinstance(a, ConstantVariable) and isinstance(b, ConstantVariable): + return None + if isinstance(a, (SymNodeVariable, ConstantVariable)) and isinstance( + b, (SymNodeVariable, ConstantVariable) + ): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.and_, *proxy_args_kwargs([a, b], {}) + ), + sym_num=None, + ) + if hasattr(a, "set_items") and hasattr(b, "set_items"): + return SetVariable(list(a.set_items & b.set_items)) + # None no-ops this handler and lets the driving function proceed + + def call_or_(self, tx, a, b): + # Rely on constant_handler + if isinstance(a, ConstantVariable) and isinstance(b, ConstantVariable): + return None + if isinstance(a, (SymNodeVariable, ConstantVariable)) and isinstance( + b, (SymNodeVariable, ConstantVariable) + ): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.or_, *proxy_args_kwargs([a, b], {}) + ), + sym_num=None, + ) + if hasattr(a, "set_items") and hasattr(b, "set_items"): + return SetVariable(list(a.set_items | b.set_items)) + # None no-ops this handler and lets the driving function proceed + return None + + def call_not_(self, tx, a): + if isinstance(a, SymNodeVariable): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.not_, *proxy_args_kwargs([a], {}) + ), + sym_num=None, + ) + + # Unwrap the underlying ConstDictVariable + if isinstance(a, DictView): + a = a.dv_dict + if isinstance(a, (ListVariable, ConstDictVariable)): + return ConstantVariable.create(len(a.items) == 0) + + return None + + call_eq = _comparison + call_gt = _comparison + call_lt = _comparison + call_ge = _comparison + call_le = _comparison + call_ne = _comparison + call_is_ = _comparison + call_is_not = _comparison + + call_all = _polyfill_call_impl("all") + call_any = _polyfill_call_impl("any") + + +@contextlib.contextmanager +def dynamo_disable_grad(tx): + from . import GradModeVariable + + org_value = torch.is_grad_enabled() + gmv = GradModeVariable.create(tx, False) + try: + gmv.enter(tx) + yield + finally: + gmv.exit(tx) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/constant.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..86d2e3422285ee7e67ff76175fa6bf0b4ce3c365 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/constant.py @@ -0,0 +1,213 @@ +# mypy: ignore-errors + +import operator +from typing import Dict, List + +import torch +from torch._dynamo.source import GetItemSource + +from .. import variables +from ..exc import unimplemented, UserError, UserErrorType +from ..guards import GuardBuilder, install_guard +from ..utils import common_constant_types, istype, np +from .base import typestr, VariableTracker + +_type_to_assert_reason = { + # NB - We CAN have ConstantVariable.create(set) because of how sets interact with guards. + # A locally created set should always become a SetVariable, as the items in the set will already either be sourced + # from somewhere else, or unsourced. An input set would imply sources derived from set contents. For example, an + # input list's contents will have a source like some_list[0], some_list[1][1], etc. For a set, arbitrary access is + # not possible. This is a solvable problem, but one we have not taken on yet. As such, input sets are not allowed to + # become SetVariables. The solution here is to create a ConstantSetVariable that is more like a ConstantVariable. + # As this does not exist, we cannot add sets to this invariant. + list: "List types must use ListVariable.", + dict: "Dict types must use ConstDictVariable.", + torch.Tensor: "Tensor types must use TensorVariable.", + torch.SymInt: "SymInts must use SymNodeVariable. " + "If the underlying value is static, we will create a ConstantVariable and specialize.", + torch.SymFloat: "SymInts must use SymNodeVariable", +} + + +class ConstantVariable(VariableTracker): + @staticmethod + def create(value, **kwargs) -> VariableTracker: + source = kwargs.get("source", None) + is_literal = ConstantVariable.is_literal(value) + if not is_literal: + for disallowed_type, reason in _type_to_assert_reason.items(): + assert not isinstance(value, disallowed_type), reason + + # Routing for list and tuple literals. + if is_literal and isinstance(value, (list, tuple)): + items = [] + for i, x in enumerate(value): + item_source = GetItemSource(source, i) if source else None + if item_source: + install_guard(item_source.make_guard(GuardBuilder.CONSTANT_MATCH)) + items.append( + ConstantVariable.create( + x, + source=item_source, + ) + ) + return variables.BaseListVariable.cls_for(type(value))(items, **kwargs) + + return ConstantVariable(value, **kwargs) + + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + if not ConstantVariable.is_literal(value): + for disallowed_type, reason in _type_to_assert_reason.items(): + assert not isinstance(value, disallowed_type), reason + + assert not isinstance( + value, (list, tuple) + ), "ConstantVariable(list) is banned - please create a ListVariable(items)" + if np is not None and isinstance(value, np.number): + self.value = value.item() + else: + self.value = value + + def as_proxy(self): + return self.value + + def __str__(self): + return f"ConstantVariable({type(self.value).__name__}: {repr(self.value)})" + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + @property + def items(self): + """ + Need this when adding a BaseListVariable and a ConstantVariable together. + Happens in detectron2. + """ + return self.unpack_var_sequence(tx=None) + + def getitem_const(self, arg: VariableTracker): + return ConstantVariable.create( + self.value[arg.as_python_constant()], + ) + + @staticmethod + def is_literal(obj): + if type(obj) in common_constant_types: + return True + # The structure within is_literal get routed to variables.BaseListVariable + if type(obj) in (list, tuple, set, frozenset, torch.Size): + return all(ConstantVariable.is_literal(x) for x in obj) + return False + + def unpack_var_sequence(self, tx): + try: + return [ConstantVariable.create(x) for x in self.as_python_constant()] + except TypeError as e: + raise NotImplementedError from e + + def const_getattr(self, tx, name): + if isinstance(self.value, type): + raise UserError( + UserErrorType.ANTI_PATTERN, + "Can't access members of type(obj) for a generated custom object. " + "Please use __class__ instead", + case_name="type_reflection_method", + ) + member = getattr(self.value, name) + if callable(member): + raise NotImplementedError() + return member + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from .tensor import SymNodeVariable + + if name == "format" and istype(self.value, str): + return variables.BuiltinVariable(str.format).call_function( + tx, [self, *args], kwargs + ) + + if any(isinstance(x, SymNodeVariable) for x in args): + # Promote to SymNodeVariable for operations involving dynamic shapes. + return variables.SymNodeVariable(self.as_proxy(), self.value).call_method( + tx, name, args, kwargs + ) + + try: + const_args = [a.as_python_constant() for a in args] + const_kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} + except NotImplementedError: + return super().call_method(tx, name, args, kwargs) + + def has_arith_binop(num_ty): + return ( + isinstance(self.value, num_ty) + and hasattr(operator, name) + and len(args) == 1 + and args[0].is_python_constant() + ) + + if isinstance(self.value, str) and name in str.__dict__.keys(): + method = getattr(self.value, name) + return ConstantVariable.create(method(*const_args, **const_kwargs)) + elif has_arith_binop(int) or has_arith_binop(float): + op = getattr(operator, name) + add_target = const_args[0] + if isinstance(add_target, (torch.SymInt, torch.SymFloat)): + from .tensor import SymNodeVariable + + # Addition between a non sym and sym makes a sym + # sym_num = tx.output.register_attr_or_module( + # add_target, f"sym_shape_{add_target}", source=None + # ) + proxy = tx.output.create_proxy( + "call_function", op, (self.value, add_target), {} + ) + return SymNodeVariable.create(tx, proxy, add_target) + return ConstantVariable.create(op(self.value, add_target)) + elif name == "__len__" and not (args or kwargs): + return ConstantVariable.create(len(self.value)) + elif name == "__contains__" and len(args) == 1 and args[0].is_python_constant(): + assert not kwargs + search = args[0].as_python_constant() + result = search in self.value + return ConstantVariable.create(result) + + unimplemented(f"const method call {typestr(self.value)}.{name}") + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + result = hasattr(self.value, name) + return variables.ConstantVariable.create(result) + + +class EnumVariable(VariableTracker): + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + def as_proxy(self): + return self.value + + def __str__(self): + return f"EnumVariable({type(self.value)})" + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + def const_getattr(self, tx, name): + member = getattr(self.value, name) + if callable(member): + raise NotImplementedError() + return member diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..548d13246eba7f4e6a4f08a619cffb573db98dc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py @@ -0,0 +1,825 @@ +# mypy: ignore-errors + +import dataclasses +import inspect +from typing import Callable, Dict, List, Optional + +import torch._C +from torch._guards import Guard + +from .. import variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..device_interface import get_interface_for_device +from ..exc import unimplemented, Unsupported +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GlobalStateSource +from .base import VariableTracker +from .functions import ( + NestedUserFunctionVariable, + UserFunctionVariable, + UserMethodVariable, + WrappedUserFunctionVariable, + WrappedUserMethodVariable, +) + + +@dataclasses.dataclass +class ContextMangerState: + """ + Mutating `self` in VariableTracker is not allowed because we copy + them. This is a mutable container pointed to by context managers + that won't get copied, so it is safe to mutate. + """ + + cleanup_fn: Optional[Callable] = None + proxy: Optional[torch.fx.Proxy] = None + + def cleanup(self): + if self.cleanup_fn is not None: + self.cleanup_fn() + self.cleanup_fn = None + + def cleanup_assert(self): + assert self.cleanup_fn, "multiple exits?" + self.cleanup() + + +class ContextWrappingVariable(VariableTracker): + _nonvar_fields = { + "cm_obj", + "target_values", + "initial_values", + "state", + *VariableTracker._nonvar_fields, + } + + def __init__(self, target_values, initial_values=None, *, state=None, **kwargs): + super().__init__(**kwargs) + self.target_values = target_values + self.initial_values = initial_values + self.state = ContextMangerState() if state is None else state + + def enter(self, tx): + self._call_func(tx, self.target_values) + self.set_cleanup_hook(tx) + return variables.ConstantVariable.create(None) + + def set_cleanup_hook(self, tx, fn=None): + if fn is None: + + def fn(): + self._call_func(tx, self.initial_values) + + self.state.cleanup_fn = fn + tx.output.add_cleanup_hook(self.state.cleanup) + + def exit(self, tx, *args): + self.state.cleanup_assert() + return variables.ConstantVariable.create(None) + + def reconstruct(self, codegen): + codegen( + AttrSource(codegen.tx.import_source(self.module_name()), self.fn_name()) + ) + + def module_name(self): + raise NotImplementedError("module_name called on base") + + def fn_name(self): + raise NotImplementedError("fn_name called on base") + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + assert len(args) == 1 + if isinstance(args[0], NestedUserFunctionVariable): + args[0] = UserFunctionVariable(args[0].get_function()) + assert isinstance(args[0], (UserMethodVariable, UserFunctionVariable)) + + if isinstance(args[0], UserMethodVariable): + return WrappedUserMethodVariable(args[0], self) + + if isinstance(args[0], UserFunctionVariable): + return WrappedUserFunctionVariable(args[0], self) + + +class GenericContextWrappingVariable(ContextWrappingVariable): + def __init__(self, target_values, initial_values=None, *, cm_obj=None, **kwargs): + assert cm_obj is not None + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.cm_obj = cm_obj + + def enter(self, tx): + source = None if self.source is None else AttrSource(self.source, "__enter__") + try: + return variables.UserMethodVariable( + self.cm_obj.__enter__.__func__, + variables.UserDefinedObjectVariable(self.cm_obj), + source=source, + ).call_function(tx, [], {}) + except Unsupported as e: + raise unimplemented( + f"Unsupported context manager {self.cm_obj}'s __enter__ function" + ) from e + + def exit(self, tx, *args): + source = None if self.source is None else AttrSource(self.source, "__exit__") + try: + x = variables.UserMethodVariable( + self.cm_obj.__exit__.__func__, + variables.UserDefinedObjectVariable(self.cm_obj), + source=source, + ).call_function( + tx, + [ + variables.ConstantVariable.create(None), + variables.ConstantVariable.create(None), + variables.ConstantVariable.create(None), + ], + {}, + ) + except Unsupported as e: + raise unimplemented( + f"Unsupported context manager {self.cm_obj}'s __exit__ function" + ) from e + + tx.generic_context_manager_depth -= 1 + return x + + +class GradInplaceRequiresGradCtxManagerVariable(ContextWrappingVariable): + """represents torch grad requries grad""" + + @staticmethod + def create(tx, target_values, **kwargs): + return GradInplaceRequiresGradCtxManagerVariable( + target_values=target_values, + initial_values=None, + **kwargs, + ) + + def enter(self, tx): + [enabled] = self.target_values + self.prev_state = torch._C._functorch.get_inplace_requires_grad_allowed() + torch._C._functorch.set_inplace_requires_grad_allowed(enabled) + self.set_cleanup_hook( + tx, + lambda: torch._C._functorch.set_inplace_requires_grad_allowed( + self.prev_state + ), + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch.set_inplace_requires_grad_allowed, + (enabled,), + {}, + ) + return variables.ConstantVariable.create(None) + + def exit(self, tx, *args): + self.state.cleanup() + tx.output.create_node( + "call_function", + torch._C._functorch.set_inplace_requires_grad_allowed, + (self.prev_state,), + {}, + ) + return variables.ConstantVariable.create(None) + + +class GradIncrementNestingCtxManagerVariable(ContextWrappingVariable): + """represents torch.func.grad increment/decrement nesting""" + + # A guard is needed as the grad level is baked into the torch FX graph + # This is fine if grad is only called from within the function + # being compiled. But the FX graph may be invalid in the case of a grad + # call from eager that calls the compiled function, as the grad levels + # may be different. + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) + + @staticmethod + def create(tx, **kwargs): + var = GradIncrementNestingCtxManagerVariable( + target_values=None, + initial_values=None, + **kwargs, + ) + return var + + def enter(self, tx): + install_guard(self._guards_singleton) + grad_level = torch._C._functorch._grad_increment_nesting() + self.set_cleanup_hook(tx, lambda: torch._C._functorch._grad_decrement_nesting()) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch._grad_increment_nesting, + (), + {}, + ) + return variables.ConstantVariable.create(grad_level) + + def exit(self, tx, *args): + self.state.cleanup() + tx.output.create_node( + "call_function", torch._C._functorch._grad_decrement_nesting, (), {} + ) + return variables.ConstantVariable.create(None) + + +class VmapIncrementNestingCtxManagerVariable(ContextWrappingVariable): + """represents torch VMap increment/decrement nesting""" + + # A guard is needed as the vmap level is baked into the torch FX graph + # generated. This is fine if vmap is only called from within the function + # being compiled. But the FX graph may be invalid in the case of a vmap + # call from eager that calls the compiled function, as the vmap levels + # may be different. + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) + + @staticmethod + def create(tx, target_values, **kwargs): + var = VmapIncrementNestingCtxManagerVariable( + target_values=target_values, + initial_values=None, + **kwargs, + ) + return var + + def enter(self, tx): + install_guard(self._guards_singleton) + batch_size, randomness = self.target_values + vmap_level = torch._C._functorch._vmap_increment_nesting(batch_size, randomness) + self.set_cleanup_hook(tx, lambda: torch._C._functorch._vmap_decrement_nesting()) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch._vmap_increment_nesting, + (batch_size, randomness), + {}, + ) + return variables.ConstantVariable.create(vmap_level) + + def exit(self, tx, *args): + self.state.cleanup() + tx.output.create_node( + "call_function", torch._C._functorch._vmap_decrement_nesting, (), {} + ) + return variables.ConstantVariable.create(None) + + +class GradModeVariable(ContextWrappingVariable): + """represents torch.{no_grad,enable_grad,set_grad_mode}()""" + + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.GRAD_MODE) + + @staticmethod + def create(tx, target_value, initialized=False, **kwargs): + var = GradModeVariable( + target_values=[target_value], + initial_values=[torch.is_grad_enabled()], + **kwargs, + ) + if initialized: + var._call_func(tx, var.target_values) + return var + + def __init__(self, target_values, initial_values=None, initialized=True, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + self._call_func(tx, self.target_values) + return variables.ConstantVariable.create(None) + + def exit(self, tx, *args): + self._call_func(tx, self.initial_values) + return variables.ConstantVariable.create(None) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ): + self._call_func(tx, self.initial_values) # undo eager initialization + return super().call_function(tx, args, kwargs) + + def _call_func(self, tx, values): + assert len(values) == 1 + value = values[0] + # Coalesce grad mode mutations + if torch.is_grad_enabled() != value: + tx.output.create_node( + "call_function", torch._C._set_grad_enabled, (value,), {} + ) + torch._C._set_grad_enabled(value) + + def module_name(self): + return "torch" + + def fn_name(self): + return "set_grad_enabled" + + +class InferenceModeVariable(ContextWrappingVariable): + @staticmethod + def create(tx, target_value, **kwargs): + var = InferenceModeVariable( + [target_value], initial_values=torch.is_inference_mode_enabled(), **kwargs + ) + return var + + def __init__( + self, + target_values, + initial_values=None, + **kwargs, + ): + if initial_values is None: + # This must be called here since function defaults are evaluated at import time + initial_values = torch.is_inference_mode_enabled() + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.target_values = target_values + + def exit(self, tx, *args): + self.state.cleanup_assert() + tx.output.create_node( + "call_function", + torch.autograd.grad_mode._exit_inference_mode, + (self.state.proxy,), + {}, + ) + + def enter(self, tx): + ctx = torch.autograd.grad_mode._enter_inference_mode(*self.target_values) + self.set_cleanup_hook( + tx, lambda: torch.autograd.grad_mode._exit_inference_mode(ctx) + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch.autograd.grad_mode._enter_inference_mode, + (*self.target_values,), + {}, + ) + + def module_name(self): + return "torch" + + def fn_name(self): + return "inference_mode" + + +class TorchFunctionDisableVariable(ContextWrappingVariable): + """represents whether torch function overrides are enabled or not""" + + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.TORCH_FUNCTION_STATE) + + @staticmethod + def create(tx, **kwargs): + var = TorchFunctionDisableVariable( + target_values=[False], + initial_values=[tx.output.torch_function_enabled], + **kwargs, + ) + # mlazos: I think this is here to make sure we don't reinvoke on clone() + var._call_func(tx, [False]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx, values): + assert len(values) == 1 + tx.output.set_torch_function_state(values[0]) + + +class DeterministicAlgorithmsVariable(ContextWrappingVariable): + """represents torch.{are_deterministic_algorithms_enabled,use_deterministic_algorithms}()""" + + _guards_singleton = Guard( + GlobalStateSource(), GuardBuilder.DETERMINISTIC_ALGORITHMS + ) + + @staticmethod + def create(tx, target_value, **kwargs): + var = DeterministicAlgorithmsVariable( + target_values=[target_value], + initial_values=[torch.are_deterministic_algorithms_enabled()], + **kwargs, + ) + var._call_func(tx, [target_value]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx, values): + assert len(values) == 1 + value = values[0] + tx.output.create_node( + "call_function", torch._C._set_deterministic_algorithms, (value,), {} + ), + torch._C._set_deterministic_algorithms(value) + + def module_name(self): + return "torch" + + def fn_name(self): + return "use_deterministic_algorithms" + + +class DisabledSavedTensorsHooksVariable(ContextWrappingVariable): + """represents torch.autograd.graph.disable_saved_tensors_hook.""" + + @staticmethod + def create(tx, target_value, **kwargs): + var = DisabledSavedTensorsHooksVariable( + target_values=[target_value], + initial_values=[ + torch._C._autograd._saved_tensors_hooks_get_disabled_error_message() + ], + **kwargs, + ) + var._call_func(tx, [target_value]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx, values): + assert len(values) == 1 + value = values[0] + if value is not None: + # Disable `saved_tensors_hooks` with message (`value`) + # OR + # we are exiting this context and restoring the previous message. + tx.output.create_node( + "call_function", + torch._C._autograd._saved_tensors_hooks_disable, + (value,), + {}, + ) + torch._C._autograd._saved_tensors_hooks_disable(value) + else: + # We are exiting this context and if prev_message was None, we re-enable `saved_tensors_hooks`. + tx.output.create_node( + "call_function", torch._C._autograd._saved_tensors_hooks_enable, (), {} + ) + torch._C._autograd._saved_tensors_hooks_enable() + + def module_name(self): + return "torch.autograd.graph" + + def fn_name(self): + return "disable_saved_tensors_hooks" + + +class AutocastModeVariable(ContextWrappingVariable): + @staticmethod + def create(func, args, kwargs): + assert func in [ + torch.amp.autocast_mode.autocast, + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ] + # device_type : str, + # dtype : Optional[_dtype] = None, + # enabled : bool = True, + # cache_enabled : Optional[bool] = None):cache_enabled + bound_args = inspect.signature(func).bind(*args, **kwargs) + bound_args.apply_defaults() + target_values = [] + kwargs.clear() + + for key in ["device_type", "dtype", "enabled", "cache_enabled"]: + if key == "device_type" and func in [ + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ]: + arg = "cuda" if func is torch.cuda.amp.autocast else "cpu" + else: + arg = bound_args.arguments[key] + if isinstance(arg, VariableTracker): + target_values.append(arg.as_python_constant()) + else: + target_values.append(arg) + + var = AutocastModeVariable(target_values, initial_values=None, **kwargs) + return var + + def __init__(self, target_values, initial_values=None, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.target_values = target_values + + def exit(self, tx, *args): + self.state.cleanup_assert() + tx.output.create_node( + "call_function", torch.amp._exit_autocast, (self.state.proxy,), {} + ) + + def enter(self, tx): + ctx = torch.amp._enter_autocast(*self.target_values) + self.set_cleanup_hook(tx, lambda: torch.amp._exit_autocast(ctx)) + self.state.proxy = tx.output.create_node( + "call_function", torch.amp._enter_autocast, (*self.target_values,), {} + ) + + def module_name(self): + return "torch.amp.autocast_mode" + + def fn_name(self): + return "autocast" + + +class NullContextVariable(ContextWrappingVariable): + """ + This class represents Python contextlib.nullcontext. + It's used as a placeholder for other context managers that Dynamo doesn't + support yet, e.g, torch.autograd.profiler.record_function. + """ + + def __init__(self, target_values=None, **kwargs): + super().__init__(target_values=target_values, **kwargs) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def exit(self, tx, *args): + return variables.ConstantVariable.create(None) + + def module_name(self): + return "contextlib" + + def fn_name(self): + return "nullcontext" + + +class StreamContextVariable(ContextWrappingVariable): + @staticmethod + def create(tx, target_value, **kwargs): + from .builder import wrap_fx_proxy_cls + + current_stream_method = get_interface_for_device( + target_value.device + ).current_stream + current_stream = wrap_fx_proxy_cls( + StreamVariable, + tx, + tx.output.create_proxy( + "call_function", + current_stream_method, + (None,), + {}, + ), + ) + return StreamContextVariable( + target_values=[target_value], + initial_values=[current_stream], + device=target_value.device, + **kwargs, + ) + + def __init__(self, target_values, device, initial_values=None, **kwargs): + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.device = device + self.set_stream = get_interface_for_device(self.device).set_stream + self.set_stream_id = get_interface_for_device(self.device)._set_stream_by_id + + def enter(self, tx): + # stream generated inside the traced function + if self.target_values[0].as_proxy() is not None: + tx.output.create_proxy( + "call_function", + self.set_stream, + (self.target_values[0].as_proxy(),), + {}, + ) + # stream passed from outside the traced function + else: + stream = self.target_values[0].value + tx.output.create_proxy( + "call_function", + self.set_stream_id, + (stream.stream_id, stream.device_index, stream.device_type), + {}, + ) + self.set_stream(self.target_values[0].value) + self.set_cleanup_hook(tx, lambda: self.set_stream(self.initial_values[0].value)) + + def exit(self, tx, *args): + tx.output.create_proxy( + "call_function", + self.set_stream, + (self.initial_values[0].as_proxy(),), + {}, + ) + self.state.cleanup_assert() + + +class PreserveVersionContextVariable(ContextWrappingVariable): + """ + Wraps torch.autograd._unsafe_preserve_version_counter + """ + + @staticmethod + def constructor(tx): + return variables.LambdaVariable( + lambda tensor: PreserveVersionContextVariable( + tensor, + tensor.var_getattr(tx, "_version"), + ) + ) + + def __init__(self, tensor, prev_version, **kwargs): + kwargs.setdefault("target_values", None) + super().__init__(**kwargs) + self.tensor = tensor + self.prev_version = prev_version + + def enter(self, tx): + pass + + def exit(self, tx, *args): + from ..tensor_version_op import _unsafe_set_version_counter + + return variables.TorchInGraphFunctionVariable( + _unsafe_set_version_counter + ).call_function(tx, [self.tensor, self.prev_version], {}) + + def reconstruct(self, codegen): + unimplemented( + "torch.autograd._unsafe_preserve_version_counter with graph break" + ) + + +class StreamVariable(VariableTracker): + def __init__(self, proxy, value, device, **kwargs): + if proxy is not None and "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == value + assert ( + value.device.type == device.type + ), "stream value is not equal to the passed device" + super().__init__(**kwargs) + self.proxy = proxy + self.value = value + self.device = device + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + assert hasattr(self.value, name), f"no stream method found named {name}" + assert name in [ + "wait_stream", + "synchronize", + "query", + "record_event", + "wait_event", + ], f" unsupported stream method {name}" + + from ..utils import proxy_args_kwargs + from .builder import wrap_fx_proxy_cls + + if name in ("wait_stream", "synchronize", "wait_event"): + tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ) + return variables.ConstantVariable(None) + elif name == "query": + return wrap_fx_proxy_cls( + target_cls=variables.ConstantVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + elif name == "record_event": + return wrap_fx_proxy_cls( + target_cls=EventVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + else: + unimplemented(self.device + " stream method " + name + " unsupported") + + def as_proxy(self): + return self.proxy + + def reconstruct(self, codegen): + # If we got here, this stream is fully subsumed by the graph - this means it is + # not an input or global + assert not self.source + # Since we just proved that - for other such structures, like lists and dicts, reconstruction + # is fine and sound according to dynamo principles of treating collectives. However, + # streams are special in that we want to preserve the identity of the stream as the same as in the graph + # Normally, we would do this via codegen for the proxy mapping to an output - we cannot do this yet, as we do not + # yet have a plan for how we want to handle the case where the stream is used as an input or an output. Pending + # design, to unblock current work, we lift the stream into a global and then codegen bytecode to load it from there. + prefix = f"_stream_{self.device}" + name = codegen.tx.output.install_global_by_id(prefix, self.value) + codegen.append_output( + codegen.create_load_global(name, push_null=False, add=True) + ) + + +class EventVariable(VariableTracker): + def __init__(self, proxy, value, **kwargs): + if proxy is not None and "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == value + super().__init__(**kwargs) + self.proxy = proxy + self.value = value + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from ..utils import proxy_args_kwargs + from .builder import wrap_fx_proxy_cls + + if name in ("wait", "record", "synchronize"): + tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ) + return variables.ConstantVariable(None) + elif name == "query": + return wrap_fx_proxy_cls( + target_cls=variables.ConstantVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + else: + unimplemented(f"event method {name} unsupported") + + def as_proxy(self): + return self.proxy + + +class WithExitFunctionVariable(VariableTracker): + def __init__(self, ctx: ContextWrappingVariable, target, **kwargs): + super().__init__(**kwargs) + assert isinstance(ctx, ContextWrappingVariable) + self.ctx = ctx + self.target = target + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + assert not kwargs + return self.ctx.exit(tx, *args) + + def reconstruct(self, codegen): + # Note here we reconstruct the context manager rather than the + # exit function. The handler generated by BlockStackEntry + # will re-enter the context in the resume function. + codegen( + AttrSource( + codegen.tx.import_source(self.ctx.module_name()), self.ctx.fn_name() + ) + ) + + if codegen.tx.output.partial_convert: + codegen.extend_output( + [codegen.create_load_const(val) for val in self.ctx.target_values] + ) + codegen.extend_output( + create_call_function(len(self.ctx.target_values), True) + ) + codegen.append_output(create_instruction("SETUP_WITH", target=self.target)) + codegen.append_output(create_instruction("POP_TOP")) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe3a35e15e4593f8f8ea148e0175bc82d16f8c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py @@ -0,0 +1,897 @@ +# mypy: ignore-errors + +import collections +import dataclasses +import functools +import inspect +import sys +from typing import Dict, List, Optional + +from torch._subclasses.fake_tensor import is_fake + +from .. import variables +from ..bytecode_transformation import ( + create_call_function, + create_call_method, + create_instruction, +) +from ..eval_frame import skip_code + +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GetItemSource +from ..utils import dict_keys, dict_values, istype, specialize_symnode +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable + +# [Adding a new supported class within the keys of ConstDictVarialble] +# - Add its tracker type to is_hashable +# - (perhaps) Define how it is compared in _HashableTracker._eq_impl + + +def is_hashable(x): + if isinstance(x, variables.TensorVariable): + # Tensors are hashable if they have an example_value (a fake tensor) + # Most VT's should have one. + # It'd be nice if at some point we could assert that they all have one + return x.as_proxy().node.meta.get("example_value") is not None + elif isinstance(x, variables.TupleVariable): + return all(is_hashable(e) for e in x.items) + else: + return isinstance( + x, + ( + variables.BuiltinVariable, + variables.SymNodeVariable, + variables.ConstantVariable, + variables.EnumVariable, + variables.user_defined.UserDefinedClassVariable, + variables.UserFunctionVariable, + variables.SkipFunctionVariable, + variables.misc.NumpyVariable, + variables.NNModuleVariable, + variables.MethodWrapperVariable, + variables.TorchInGraphFunctionVariable, + variables.TypingVariable, + variables.FunctoolsPartialVariable, + ), + ) + + +class ConstDictVariable(VariableTracker): + class _HashableTracker: + """ + Auxiliary opaque internal class that wraps a VariableTracker and makes it hashable + This should not be seen or touched by anything outside of ConstDictVariable and its children + Note that it's also fine to put VTs into dictionaries and sets, but doing so does not take into account aliasing + """ + + def __init__(self, vt): + # We specialize SymNodes + vt = specialize_symnode(vt) + # TODO Temorarily remove to figure out what keys are we breaking on + # and add proper support for them + if not is_hashable(vt): + unimplemented(f"Dict key of type {type(vt)}. Key: {vt}") + self.vt = vt + + @property + def underlying_value(self): + if isinstance(self.vt, variables.TensorVariable): + x = self.vt.as_proxy().node.meta["example_value"] + elif isinstance(self.vt, variables.TupleVariable): + Hashable = ConstDictVariable._HashableTracker + x = tuple(Hashable(e).underlying_value for e in self.vt.items) + elif isinstance(self.vt, variables.NNModuleVariable): + return self.vt.module + elif isinstance(self.vt, variables.UserFunctionVariable): + return self.vt.get_function() + else: + x = self.vt.as_python_constant() + return x + + def __hash__(self): + return hash(self.underlying_value) + + @staticmethod + def _eq_impl(a, b): + # TODO: Put this in utils and share it between variables/builtin.py and here + if type(a) != type(b): + return False + elif isinstance(a, tuple): + Hashable = ConstDictVariable._HashableTracker + return len(a) == len(b) and all( + Hashable._eq_impl(u, v) for u, v in zip(a, b) + ) + elif is_fake(a): + return a is b + else: + return a == b + + def __eq__(self, other: "ConstDictVariable._HashableTracker") -> bool: + Hashable = ConstDictVariable._HashableTracker + assert isinstance(other, Hashable) or ConstantVariable.is_literal( + other + ), type(other) + if isinstance(other, Hashable): + return Hashable._eq_impl(self.underlying_value, other.underlying_value) + + # constant + return Hashable._eq_impl(self.underlying_value, other) + + def __init__( + self, items: Dict[VariableTracker, VariableTracker], user_cls=dict, **kwargs + ): + super().__init__(**kwargs) + + Hashable = ConstDictVariable._HashableTracker + + # Keys will just be HashableTrackers when cloning, in any other case they'll be VariableTrackers + assert all( + isinstance(x, (VariableTracker, Hashable)) + and isinstance(v, VariableTracker) + for x, v in items.items() + ) + + def make_hashable(key): + return key if isinstance(key, Hashable) else Hashable(key) + + self.items = {make_hashable(x): v for x, v in items.items()} + self.user_cls = user_cls + + def as_proxy(self): + return {k.vt.as_proxy(): v.as_proxy() for k, v in self.items.items()} + + def as_python_constant(self): + return { + k.vt.as_python_constant(): v.as_python_constant() + for k, v in self.items.items() + } + + def keys_as_python_constant(self): + return {k.vt.as_python_constant(): v for k, v in self.items.items()} + + def python_type(self): + return self.user_cls + + def __contains__(self, vt): + assert isinstance(vt, VariableTracker) + Hashable = ConstDictVariable._HashableTracker + return is_hashable(vt) and Hashable(vt) in self.items + + def reconstruct(self, codegen): + # instructions to load collections.OrderedDict if necessary + if self.user_cls is collections.OrderedDict: + codegen.extend_output( + [ + codegen.create_load_python_module(collections, True), + codegen.create_load_attr("OrderedDict"), + ] + ) + # instructions to build the dict keys and values + for key, value in self.items.items(): + codegen(key.vt) + codegen(value) + # BUILD_MAP and calling collections.OrderedDict if necessary + if self.user_cls is collections.OrderedDict: + codegen.extend_output( + [ + create_instruction("BUILD_MAP", arg=len(self.items)), + *create_call_function(1, False), + ] + ) + # BUILD_MAP only if user_cls is dict + else: + codegen.append_output(create_instruction("BUILD_MAP", arg=len(self.items))) + + def getitem_const(self, arg: VariableTracker): + key = ConstDictVariable._HashableTracker(arg) + if key not in self.items: + raise KeyError(arg.value) + return self.items[key] + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ( + BuiltinVariable, + ConstantVariable, + ListIteratorVariable, + ListVariable, + TupleVariable, + ) + + Hashable = ConstDictVariable._HashableTracker + + arg_hashable = args and is_hashable(args[0]) + + if name == "__getitem__": + assert len(args) == 1 + return self.getitem_const(args[0]) + elif name == "items": + assert not (args or kwargs) + return TupleVariable( + [TupleVariable([k.vt, v]) for k, v in self.items.items()] + ) + elif name == "keys": + assert not (args or kwargs) + return DictKeys(self) + elif name == "values": + assert not (args or kwargs) + return DictValues(self) + elif name == "copy": + assert not (args or kwargs) + return self.clone(items=self.items.copy(), mutable_local=MutableLocal()) + elif name == "__len__": + assert not (args or kwargs) + return ConstantVariable.create(len(self.items)) + elif name == "__setitem__" and arg_hashable and self.mutable_local: + assert not kwargs and len(args) == 2 + tx.output.side_effects.mutation(self) + self.items[Hashable(args[0])] = args[1] + return ConstantVariable.create(None) + elif name in ("pop", "get") and len(args) in (1, 2) and args[0] not in self: + # missing item, return the default value + if len(args) == 1: + return ConstantVariable(None) + else: + return args[1] + elif name == "pop" and arg_hashable and self.mutable_local: + tx.output.side_effects.mutation(self) + return self.items.pop(Hashable(args[0])) + elif name == "clear": + tx.output.side_effects.mutation(self) + self.items.clear() + return ConstantVariable.create(None) + elif ( + name == "update" + and len(args) == 1 + and isinstance( + args[0], + ( + ConstDictVariable, + ListVariable, + TupleVariable, + ListIteratorVariable, + ), + ) + and self.mutable_local + ): + tx.output.side_effects.mutation(self) + if isinstance(args[0], ConstDictVariable): + dict_vt = args[0] + else: + dict_vt = BuiltinVariable.call_custom_dict(tx, dict, args[0]) + self.items.update(dict_vt.items) + # Wrap strings + kwargs = { + Hashable(ConstantVariable.create(k)): v for k, v in kwargs.items() + } + self.items.update(kwargs) + return ConstantVariable.create(None) + elif name in ("get", "__getattr__") and args[0] in self: + return self.getitem_const(args[0]) + elif name == "__contains__" and len(args) == 1: + return ConstantVariable.create(args[0] in self) + else: + return super().call_method(tx, name, args, kwargs) + + def unpack_var_sequence(self, tx): + return [x.vt for x in self.items.keys()] + + +class DefaultDictVariable(ConstDictVariable): + def __init__(self, items, user_cls, default_factory=None, **kwargs): + super().__init__(items, user_cls, **kwargs) + assert user_cls is collections.defaultdict + self.default_factory = default_factory + + def is_python_constant(self): + # Return false for unsupported defaults. This ensures that a bad handler + # path is not taken in BuiltinVariable for getitem. + if self.default_factory not in [list, tuple, dict] and not self.items: + return False + return super().is_python_constant() + + @staticmethod + def is_supported_arg(arg): + if isinstance(arg, variables.BuiltinVariable): + return arg.fn in [list, tuple, dict] + else: + return isinstance(arg, variables.functions.BaseUserFunctionVariable) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__": + assert len(args) == 1 + + if args[0] in self: + return self.getitem_const(args[0]) + else: + if self.default_factory is None: + raise KeyError(f"{args[0]}") + else: + default_var = self.default_factory.call_function(tx, [], {}) + super().call_method( + tx, "__setitem__", (args[0], default_var), kwargs + ) + return default_var + else: + return super().call_method(tx, name, args, kwargs) + + +class SetVariable(ConstDictVariable): + """We model a sets as dictonary with None values""" + + def __init__( + self, + items: List[VariableTracker], + **kwargs, + ): + items = dict.fromkeys(items, SetVariable._default_value()) + super().__init__(items, **kwargs) + + @property + def set_items(self): + return set(self.items.keys()) + + @staticmethod + def _default_value(): + # Variable to fill in he keys of the dictinary + return ConstantVariable.create(None) + + def as_proxy(self): + return {k.vt.as_proxy() for k in self.set_items} + + def python_type(self): + return set + + def as_python_constant(self): + return {k.vt.as_python_constant() for k in self.set_items} + + def reconstruct(self, codegen): + codegen.foreach([x.vt for x in self.set_items]) + codegen.append_output(create_instruction("BUILD_SET", arg=len(self.set_items))) + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> "VariableTracker": + # We foward the calls to the dictionary model + if name == "add": + assert not kwargs + assert len(args) == 1 + name = "__setitem__" + args = (args[0], SetVariable._default_value()) + elif name == "pop": + assert not kwargs + assert not args + # Choose an item at random and pop it via the Dict.pop method + result = self.set_items.pop().vt + super().call_method(tx, name, (result,), kwargs) + return result + return super().call_method(tx, name, args, kwargs) + + def getitem_const(self, arg: VariableTracker): + raise RuntimeError("Illegal to getitem on a set") + + +class DictView(VariableTracker): + """ + Models _PyDictViewObject + + This is an "abstract" class. Subclasses will override kv and the items method + """ + + kv: Optional[str] = None + + def __init__(self, dv_dict: ConstDictVariable, **kwargs): + super().__init__(**kwargs) + assert self.kv in ("keys", "values") + assert isinstance(dv_dict, ConstDictVariable) + self.dv_dict = dv_dict + + @property + def view_items(self): + return getattr(self.dv_dict.items, self.kv)() + + @property + def view_items_vt(self): + # Returns an iterable of the unpacked items + # Implement in the subclasses + raise NotImplementedError() + + def unpack_var_sequence(self, tx): + def unwrap(x): + return x.vt if self.kv == "keys" else x + + return [unwrap(x) for x in self.view_items] + + def reconstruct(self, codegen): + codegen(self.dv_dict) + codegen.extend_output( + [ + create_instruction("LOAD_METHOD", argval=self.kv), + *create_call_method(0), + ] + ) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__len__": + return self.dv_dict.call_method(tx, name, args, kwargs) + return super().call_method(tx, name, args, kwargs) + + +class DictKeys(DictView): + kv = "keys" + + @property + def set_items(self): + return set(self.view_items) + + @property + def view_items_vt(self): + # Returns an iterable of the unpacked items + return [x.vt for x in self.view_items] + + def python_type(self): + return dict_keys + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__contains__": + return self.dv_dict.call_method(tx, name, args, kwargs) + return super().call_method(tx, name, args, kwargs) + + +class DictValues(DictView): + # DictValues is an iterable but cannot be compared. + kv = "values" + + @property + def view_items_vt(self): + return list(self.view_items) + + def python_type(self): + return dict_values + + +def _is_matching_transformers_cls(cls) -> bool: + mod = sys.modules.get("transformers.file_utils") + return mod is not None and issubclass(cls, mod.ModelOutput) + + +def _is_matching_diffusers_cls(cls) -> bool: + mod = sys.modules.get("diffusers.utils") + return mod is not None and issubclass(cls, mod.BaseOutput) + + +def _call_hasattr_customobj(self, tx, name: str) -> "VariableTracker": + """Shared method between DataClassVariable and CustomizedDictVariable where items are attrs""" + if name in self.items or hasattr(self.user_cls, name): + return ConstantVariable(True) + elif istype(self.mutable_local, MutableLocal) and self.source is None: + # Something created locally can't have any extra fields on it + return ConstantVariable(False) + elif self.mutable_local is None and self.source: + # Maybe add a guard + try: + example = tx.output.root_tx.get_example_value(self.source) + install_guard( + AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR) + ) + return ConstantVariable(hasattr(example, name)) + except KeyError: + pass + unimplemented( + f"hasattr({self.__class__.__name__}, {name}) {self.mutable_local} {self.source}" + ) + + +class DataClassVariable(ConstDictVariable): + """ + This is a bit of a hack to deal with + transformers.file_utils.ModelOutput() from huggingface. + + ModelOutput causes trouble because it a a mix of a dataclass and a + OrderedDict and it calls super() methods implemented in C. + """ + + # ModelOutput() excludes None, though generic datclasses don't + include_none = False + + @staticmethod + @functools.lru_cache(None) + def _patch_once(): + try: + from transformers.file_utils import ModelOutput + + for obj in ModelOutput.__dict__.values(): + if callable(obj): + skip_code(obj.__code__) + except ImportError: + pass + + try: + from diffusers.utils import BaseOutput + + for obj in BaseOutput.__dict__.values(): + if callable(obj): + skip_code(obj.__code__) + except ImportError: + pass + + @staticmethod + def is_matching_cls(cls): + return _is_matching_transformers_cls(cls) or _is_matching_diffusers_cls(cls) + + @classmethod + def is_matching_object(cls, obj): + return cls.is_matching_cls(type(obj)) + + @classmethod + def create(cls, user_cls, args, kwargs, options): + DataClassVariable._patch_once() + + skip_code(user_cls.__init__.__code__) + keys = [f.name for f in dataclasses.fields(user_cls)] + bound = inspect.signature(user_cls).bind(*args, **kwargs) + bound.apply_defaults() + assert set(bound.arguments.keys()) == set(keys) + items = {} + for key in keys: + val = bound.arguments[key] + key = ConstantVariable.create(key) + if isinstance(val, VariableTracker): + items[key] = val + else: + if cls.include_none: + assert variables.ConstantVariable.is_literal(val) + items[key] = variables.ConstantVariable.create(val) + else: + assert val is None, f"unexpected {val}" + + if len(items) == 1 and not isinstance(items[keys[0]], variables.TensorVariable): + unimplemented("DataClassVariable iterator constructor") + # TODO(jansel): implement unpacking logic in ModelOutput.__post_init__ + + return cls(items, user_cls, **options) + + @classmethod + def wrap(cls, builder, obj): + user_cls = type(obj) + keys = [f.name for f in dataclasses.fields(user_cls)] + + excluded = [] + items = {} + for key in keys: + # __init__ function of a dataclass might not have yet defined the key + if hasattr(obj, key): + val = getattr(obj, key) + var = builder.__class__( + tx=builder.tx, source=AttrSource(builder.source, key) + )(val) + if val is not None or cls.include_none: + key = ConstantVariable.create(key) + items[key] = var + else: + excluded.append(var) + return cls(items, user_cls) + + def __init__(self, items, user_cls, **options): + super().__init__(items, user_cls, **options) + assert self.is_matching_cls(user_cls) + + def as_proxy(self): + raise NotImplementedError() + + def reconstruct(self, codegen): + codegen.extend_output([codegen._create_load_const(self.user_cls)]) + # All the keys are just wrapped strings + d = self.keys_as_python_constant() + codegen.foreach(d.values()) + keys = tuple(d.keys()) + codegen.extend_output(codegen.create_call_function_kw(len(keys), keys, True)) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__": + assert not kwargs and len(args) == 1 + val = args[0] + if val.python_type() == str: + return self.getitem_const(val) + else: + return self.call_method(tx, "to_tuple", [], {}).call_method( + tx, "__getitem__", args, kwargs + ) + elif name == "to_tuple": + assert not (args or kwargs) + return variables.TupleVariable(list(self.items.values())) + elif name == "__setattr__": + name = "__setitem__" + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx, name: str) -> "VariableTracker": + name_vt = ConstantVariable.create(name) + if name_vt in self: + return self.call_method(tx, "__getitem__", [name_vt], {}) + elif not self.include_none: + defaults = {f.name: f.default for f in dataclasses.fields(self.user_cls)} + if name in defaults: + assert variables.ConstantVariable.is_literal(defaults[name]) + return variables.ConstantVariable.create(defaults[name]) + super().var_getattr(tx, name) + + call_hasattr = _call_hasattr_customobj + + +class CustomizedDictVariable(ConstDictVariable): + @staticmethod + def is_matching_cls(cls): + # True if using default OrderedDict.__init__ and did not implement __post_init__ + if ( + issubclass(cls, collections.OrderedDict) + and cls.__init__ is collections.OrderedDict.__init__ + and not hasattr(cls, "__post_init__") + ): + return True + # hack for HF usecase: + # assume dataclass annotation for ModelOutput subclass + # assume self.create is AA to ModelOutput.__post_init__ + return _is_matching_transformers_cls(cls) or _is_matching_diffusers_cls(cls) + + @classmethod + def is_matching_object(cls, obj): + return cls.is_matching_cls(type(obj)) + + # called from user_defined.py + # when is_matching_cls(cls) is true + @classmethod + def create(cls, user_cls, args, kwargs, options): + # avoid tracing when returning ModelOutput from forward func + for attr_name in ("__init__", "__post_init__", "__setattr__", "__setitem__"): + if hasattr(user_cls, attr_name): + fn = getattr(user_cls, attr_name) + assert callable(fn), f"expect callable attr {attr_name}" + if hasattr(fn, "__code__"): + skip_code(fn.__code__) + + if dataclasses.is_dataclass(user_cls): + # @dataclass CustomDict(a=1, b=2) + bound = inspect.signature(user_cls).bind(*args, **kwargs) + bound.apply_defaults() + + def make_var(x): + if isinstance(x, VariableTracker): + return x + elif ConstantVariable.is_literal(x): + return ConstantVariable.create(x) + else: + unimplemented( + "expect VariableTracker or ConstantVariable.is_literal" + ) + + items = { + ConstantVariable.create(k): make_var(v) + for k, v in bound.arguments.items() + } + elif not args: + # CustomDict(a=1, b=2) in the general (non-dataclass) case. + items = {ConstantVariable.create(k): v for k, v in kwargs.items()} + elif len(args) == 1 and isinstance(args[0], ConstDictVariable) and not kwargs: + # CustomDict({'a': 1, 'b': 2}) + items = args[0].items + else: + unimplemented("custom dict init with args/kwargs unimplemented") + + return cls(items, user_cls, **options) + + # called from builder.py + @classmethod + def wrap(cls, builder, obj): + raise NotImplementedError() + + def __init__(self, items, user_cls, **options): + super().__init__(items, user_cls, **options) + assert self.is_matching_cls(user_cls) + + def as_proxy(self): + raise NotImplementedError() + + # 'RETURN_VALUE triggered compile' + # called from torch/_dynamo/codegen.py + def reconstruct(self, codegen): + codegen.extend_output([codegen._create_load_const(self.user_cls)]) + # All the keys are just wrapped strings + d = self.keys_as_python_constant() + codegen.foreach(d.values()) + keys = tuple(d.keys()) + codegen.extend_output(codegen.create_call_function_kw(len(keys), keys, True)) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + fn = getattr(self.user_cls, name) + source = None if self.source is None else AttrSource(self.source, name) + + if hasattr(fn, "__objclass__") and fn.__objclass__ in ( + dict, + collections.OrderedDict, + ): + # for python dict method without overridden + return super().call_method(tx, name, args, kwargs) + elif name in ("__getitem__", "to_tuple", "__setitem__", "__setattr__"): + # for user overridden method + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=source), + [self] + list(args), + kwargs, + ) + + unimplemented("custom dict: call_method unimplemented name=%s", name) + + def var_getattr(self, tx, name: str) -> "VariableTracker": + name_vt = ConstantVariable.create(name) + if name_vt in self: + return self.call_method(tx, "__getitem__", [name_vt], {}) + super().var_getattr(tx, name) + + call_hasattr = _call_hasattr_customobj + + +@functools.lru_cache(None) +def _install_PretrainedConfig_patch(): + import transformers + + # We need to monkeypatch transformers here, sadly. + # TODO(voz): Upstream to transformers lib + + def _dynamo_overriden_transformers_eq(self, other): + if not hasattr(other, "__dict__"): + return False + return self.__dict__ == other.__dict__ + + transformers.configuration_utils.PretrainedConfig.__eq__ = ( + _dynamo_overriden_transformers_eq + ) + + +class HFPretrainedConfigVariable(VariableTracker): + """ + Hack for HuggingFace PretrainedConfig + """ + + @staticmethod + def is_matching_cls(cls): + mod = sys.modules.get("transformers.configuration_utils") + is_match = mod is not None and issubclass(cls, mod.PretrainedConfig) + + # Lazily install monkeypatch the first time we see it in dynamo + if is_match: + _install_PretrainedConfig_patch() + return is_match + + @classmethod + def is_matching_object(cls, obj): + return cls.is_matching_cls(type(obj)) + + def __init__(self, obj, **kwargs): + super().__init__(**kwargs) + self.obj = obj + assert self.is_matching_cls(type(obj)) + + def var_getattr(self, tx, name: str) -> "VariableTracker": + from . import ConstantVariable + + return ConstantVariable.create(getattr(self.obj, name)) + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + return variables.ConstantVariable.create(hasattr(self.obj, name)) + + +class PythonSysModulesVariable(VariableTracker): + """Special case for sys.modules. + + Without this we will guard on the exact set of modules imported in the + lifetime of the python program. + """ + + def python_type(self): + return dict + + def reconstruct(self, codegen): + codegen.extend_output( + [ + codegen.create_load_python_module(sys, True), + codegen.create_load_attr("modules"), + ] + ) + + def call_method( + self, tx, name, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ): + from .builder import VariableBuilder + + if name == "__getitem__": + return self.call_getitem(tx, *args, **kwargs) + elif name == "get": + return self.call_get(tx, *args, **kwargs) + elif name == "__contains__": + return self.call_contains(tx, *args, **kwargs) + + # Fallback to dict implementation + real_dict = VariableBuilder(tx, self.source)(sys.modules) + return real_dict.call_method(tx, name, args, kwargs) + + def _contains_helper(self, tx, key: VariableTracker): + k = key.as_python_constant() + has_key = k in sys.modules + install_guard( + self.make_guard( + functools.partial(GuardBuilder.DICT_CONTAINS, key=k, invert=not has_key) + ) + ) + return k, has_key + + def call_contains(self, tx, key: VariableTracker): + k, has_key = self._contains_helper(tx, key) + return ConstantVariable.create(value=has_key) + + def call_get( + self, tx, key: VariableTracker, default: Optional[VariableTracker] = None + ): + from .builder import VariableBuilder + + k, has_key = self._contains_helper(tx, key) + + if has_key: + return VariableBuilder( + tx, + GetItemSource(self.source, k), + )(sys.modules[k]) + + if default is not None: + return default + + return ConstantVariable.create(value=None) + + def call_getitem(self, tx, key: VariableTracker): + from .builder import VariableBuilder + + k, has_key = self._contains_helper(tx, key) + return VariableBuilder( + tx, + GetItemSource(self.source, k), + )(sys.modules[k]) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/distributed.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..bf17a87014826588143f8b978511c8638ec0aaa5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/distributed.py @@ -0,0 +1,388 @@ +# mypy: ignore-errors +import functools +import inspect +from typing import Dict, List + +import torch +from ...fx.experimental._backward_state import BackwardState +from .. import compiled_autograd, variables +from .._trace_wrapped_higher_order_op import trace_wrapped +from ..exc import unimplemented +from ..external_utils import call_module_hooks_from_backward_state +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GlobalSource +from ..utils import istype +from .base import VariableTracker +from .constant import ConstantVariable + + +class DistributedVariable(VariableTracker): + """ + The base distributed variable that encapsulates common methods + for the distributed objects (i.e. ProcessGroup, DeviceMesh, etc.). + Concrete distributed objects could inherit this class and add object + specific logic. + + i.e. It provides the check on the distributed package existance + and hold the tracking value for the corresponding distributed object. + """ + + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + if not DistributedVariable.is_available(): + unimplemented("torch.distributed package is not available!") + self.value = value + + def python_type(self): + return type(self.value) + + @staticmethod + def is_available(): + # check if the distributed package is available or not + return torch.distributed.is_available() + + +def is_from_local(value): + if not DistributedVariable.is_available(): + return False + from torch.distributed._tensor import DTensor + + return inspect.isfunction(value) and value is DTensor.from_local + + +def is_constant_pg_functions(value): + if not DistributedVariable.is_available(): + return False + + from torch.distributed.distributed_c10d import ( + _get_group_size_by_name, + _get_group_tag, + _rank_not_in_group, + _resolve_group_name_by_ranks_and_tag, + get_process_group_ranks, + ) + + constant_processgroup_functions = [ + _get_group_size_by_name, + _get_group_tag, + _rank_not_in_group, + get_process_group_ranks, + _resolve_group_name_by_ranks_and_tag, + ] + + return inspect.isfunction(value) and value in constant_processgroup_functions + + +class PlacementClassVariable(DistributedVariable): + @staticmethod + def is_placement_type(value): + # we can't rely on importing/accessing torch distributed, it is not always built. + if not DistributedVariable.is_available(): + return False + + from torch.distributed._tensor.placement_types import Placement + + return type(value) is type and issubclass(value, Placement) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if ( + inspect.getattr_static(self.value, "__new__", None) in (object.__new__,) + and self.source + ): + # NOTE: we don't need to track mutations to the placement class as they + # suppose to be immutable. + new_obj = object.__new__(self.value) + var = PlacementVariable(new_obj) + if inspect.getattr_static(self.value, "__init__", None): + var.call_method(tx, "__init__", args, kwargs) + return var + + return super().call_function(tx, args, kwargs) + + +class PlacementVariable(DistributedVariable): + @staticmethod + def is_placement(value): + # we can't rely on importing/accessing torch distributed, it is not always built. + if not DistributedVariable.is_available(): + return False + + from torch.distributed._tensor.placement_types import Placement + + return isinstance(value, Placement) + + def as_python_constant(self): + return self.value + + def var_getattr(self, tx, name: str) -> VariableTracker: + if name == "dim": + return ConstantVariable.create(self.value.dim) + return super().var_getattr(tx, name) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ConstantVariable + + # Placement types dynamo tracking only allows following methods + # and __setattr__ is for case like `Shard(dim)` and methods. + # Methods in the list must satisfy: + # 1. Input arguments are constants and do not need to be guarded on; + # 2. Output is constant with respect to their inputs + constant_fold_functions = [ + "__init__", + "__setattr__", + "is_shard", + "is_partial", + "is_replicate", + ] + + if name in constant_fold_functions: + try: + value_type = type(self.value) + assert ( + inspect.getattr_static(value_type, "__getattr__", None) is None + ), "no custom getattr allowed!" + method = inspect.getattr_static(value_type, name) + except AttributeError: + method = None + if method is object.__init__: + return ConstantVariable.create(None) + + args = [x.as_python_constant() for x in args] + kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} + if name == "__setattr__": + method(self.value, *args, **kwargs) + return self + constant_val = method(self.value, *args, **kwargs) + return ConstantVariable.create(constant_val) + + return super().call_method(tx, name, args, kwargs) + + +class DeviceMeshVariable(DistributedVariable): + @staticmethod + def is_device_mesh(value): + # we can't rely on importing/accessing torch distributed, it is not always built. + if not DistributedVariable.is_available(): + return False + + from torch.distributed.device_mesh import DeviceMesh + + return istype(value, DeviceMesh) + + def as_python_constant(self): + return self.value + + def var_getattr(self, tx, name: str) -> VariableTracker: + if name == "ndim": + return ConstantVariable.create(self.value.ndim) + return super().var_getattr(tx, name) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "size": + const_args = [x.as_python_constant() for x in args] + const_kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} + return ConstantVariable.create(self.value.size(*const_args, **const_kwargs)) + if name == "get_coordinate": + return ConstantVariable.create(self.value.get_coordinate()) + if name == "get_group": + return ConstantVariable.create(self.value.get_group()) + if name == "_get_or_create_default_group": + return ProcessGroupVariable(self.value._get_or_create_default_group()) + return super().call_method(tx, name, args, kwargs) + + +class ProcessGroupVariable(DistributedVariable): + """ + We don't want a ProcessGroup object to end up in our output graph. + + But it's common for dynamo to intercept a PG that is then used to get info like + rank() or world_size(), as well as passed to utility functions in distributed_c10d + which desugar it into plain types like a ranklist and tag. + + For convenience and proper guarding, we construct a variable type. + + TODO: make it possible to use ProcessGroupVariable as input to simple functions + like _expand_group without dynamo complaining about making a proxy for it. + It is not a tensor-like type, and we don't want a proxy- but dynamo assumes + torch library functions are dealing with tensor-like types and would have proxies + for their args. + TODO: should we make this inherit VT instead of UDOV? Do we want any of the default behaviors + or just graph-break whenever one of our special cases is not hit? + """ + + def as_python_constant(self): + return self.value + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "rank": + return variables.ConstantVariable.create(self.value.rank()) + if name == "size": + return variables.ConstantVariable.create(self.value.size()) + + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx, name): + if name == "group_name": + return variables.ConstantVariable.create(self.value.group_name) + if name in ["rank", "size"]: + return variables.LambdaVariable( + lambda *args, **kwargs: self.call_method(tx, name, args, kwargs) + ) + # TODO should this just raise unimplemented? + return super().var_getattr(tx, name) + + @staticmethod + def is_process_group(value): + # we can't rely on importing/accessing torch distributed, it is not always built. + if not DistributedVariable.is_available(): + return False + from torch._C._distributed_c10d import ProcessGroup + from torch.testing._internal.distributed.fake_pg import FakeProcessGroup + + return istype(value, (ProcessGroup, FakeProcessGroup)) + + @staticmethod + def get_global_pg_variable(): + """ + Make a ProcessGroupVariable from torch.distributed.group.WORLD and + intall guards. + """ + import torch.distributed as dist + + source = AttrSource( + AttrSource( + base=AttrSource( + base=GlobalSource(global_name="torch"), + member="distributed", + get_static=False, + ), + member="group", + get_static=False, + ), + member="WORLD", + get_static=False, + ) + install_guard(source.make_guard(GuardBuilder.ID_MATCH)) + return ProcessGroupVariable( + dist.group.WORLD, + source=source, + ) + + +class BackwardHookVariable(VariableTracker): + """ + Handles torch.utils.hooks.BackwardHook for module-level backward + hooks. + """ + + @staticmethod + def create( + tx, + module: VariableTracker, + user_hooks: VariableTracker, + user_pre_hooks: VariableTracker, + ): + if not compiled_autograd.compiled_autograd_enabled: + unimplemented("module-level backwards hooks require compiled autograd") + + def _in_graph_bw_hooks(bw_state: BackwardState): + """ + Rather than installing the user hooks in the graph (which + don't survive AotAutograd), we install hooks that will call + trace_wrapped in the backward pass that CompiledAutograd + can turn into actual hook calls. + """ + return torch.utils.hooks.BackwardHook( + None, + ( + functools.partial( + trace_wrapped, + fn=call_module_hooks_from_backward_state, + bw_state=bw_state, + hooks_name=user_hooks_name, + module_name=module_name, + ), + ), + ( + functools.partial( + trace_wrapped, + fn=call_module_hooks_from_backward_state, + bw_state=bw_state, + hooks_name=user_pre_hooks_name, + module_name=module_name, + ), + ), + ) + + module_name, bw_state_proxy = tx.output.add_backward_state_hook(module) + user_pre_hooks_name, _ = tx.output.add_backward_state_hook(user_pre_hooks) + user_hooks_name, _ = tx.output.add_backward_state_hook(user_hooks) + proxy = tx.output.create_proxy( + "call_function", + _in_graph_bw_hooks, + (bw_state_proxy,), + {}, + ) + proxy.node.meta["example_value"] = torch.utils.hooks.BackwardHook(None, (), ()) + return BackwardHookVariable(proxy, module, user_hooks, user_pre_hooks) + + def __init__( + self, + proxy: torch.fx.Proxy, + module: VariableTracker, + user_hooks: VariableTracker, + user_pre_hooks: VariableTracker, + **options, + ): + super().__init__(**options) + self.proxy = proxy + self.module = module + self.user_hooks = user_hooks + self.user_pre_hooks = user_pre_hooks + + def as_proxy(self): + return self.proxy + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if name in ("setup_input_hook", "setup_output_hook"): + return self._setup_hook(tx, name, *args, **kwargs) + return super().call_method(tx, name, args, kwargs) + + def _setup_hook(self, tx, hook_method_name, args): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + hook_method_name, + (self.as_proxy(), args.as_proxy()), + {}, + ), + ) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..38876055c138a357aed255589cef8dfbb7238c30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py @@ -0,0 +1,947 @@ +# mypy: ignore-errors + +import collections +import functools +import inspect +import itertools +import types +from typing import Dict, List, Optional, TYPE_CHECKING, Union + +import torch + +from .. import variables +from ..bytecode_transformation import create_call_function, create_rot_n +from ..exc import unimplemented, Unsupported +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, ConstantSource, DefaultsSource, GetItemSource +from ..utils import check_constant_args, get_first_attr, identity, istype, make_cell +from .base import MutableLocal, typestr, VariableTracker +from .constant import ConstantVariable +from .distributed import ProcessGroupVariable + +if TYPE_CHECKING: + from torch._guards import Source + + +def wrap_bound_arg(tx, val, source=None): + # Source propagation is best effort since not every object we encounter has a source to begin with. + if isinstance(val, VariableTracker): + return val + elif not source: + from torch._dynamo.variables.builder import SourcelessBuilder + + return SourcelessBuilder()(tx, val) + else: + # Create a lazy variable to avoid guarding on __defaults__ unless really + # needed. + return variables.LazyVariableTracker.create(val, source) + + +def wrap_args_kwargs(tx, result): + for k, v in list(result.items()): + if isinstance(v, (tuple, dict)): + # args/kwargs + result[k] = wrap_bound_arg(tx, v) + + +def init_cellvars(parent, result, code): + closure_cells = dict() + side_effects = parent.output.side_effects + + # for name in itertools.chain(code.co_cellvars, code.co_freevars): + for name in code.co_cellvars: + closure_cells[name] = side_effects.track_cell_new() + if name in result: + side_effects.store_cell(closure_cells[name], result.pop(name)) + + return closure_cells + + +def _create_nested_fn( + code, f_globals, name, defaults, closure, kwdefaults, annotations +): + from types import FunctionType + + func = FunctionType(code, f_globals, name, defaults, closure) + func.__kwdefaults__ = kwdefaults + + if isinstance(annotations, tuple): + from itertools import pairwise + + annotations = dict(pairwise(annotations)) + + # TypeError: __annotations__ must be set to a dict object + assert annotations is None or isinstance(annotations, dict) + func.__annotations__ = annotations + + return func + + +class BaseUserFunctionVariable(VariableTracker): + def get_filename(self): + return self.get_code().co_filename + + def get_name(self): + return self.get_code().co_name + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + return tx.inline_user_function_return( + self, list(self.self_args()) + list(args), kwargs + ) + + def call_hasattr(self, tx, name: str) -> VariableTracker: + result = False + + try: + result = hasattr(self.get_function(), name) + except NotImplementedError: + if name == "__name__" and isinstance(self, NestedUserFunctionVariable): + result = True + return variables.ConstantVariable.create(result) + + def inspect_parameter_names(self): + return list(inspect.signature(self.get_function()).parameters) + + def closure_vars(self, tx): + return {} + + +class UserFunctionVariable(BaseUserFunctionVariable): + """Some unsupported user-defined global function""" + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.CLOSURE_MATCH)) + return cls( + value, + source=source, + ) + + def __init__(self, fn, is_constant=False, **kwargs): + super().__init__(**kwargs) + if getattr(fn, "_dynamo_marked_constant", False): + # This method should be treated as a constant for the purposes of compilation + self.is_constant = True + else: + self.is_constant = False + + assert isinstance( + fn, (types.FunctionType, torch.jit.ScriptFunction) + ), f"expected FunctionType found {typestr(fn)} {fn}" + # unpack @torch._dynamo.optimize()(fn) wrapped function + fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) + # unpack torch.jit.script_if_tracing + if inspect.getattr_static(fn, "__script_if_tracing_wrapper", False): + fn = inspect.getattr_static(fn, "__original_fn", fn) + self.fn: types.FunctionType = fn + + def as_python_constant(self): + if istype(self, UserFunctionVariable): + return self.fn + # subclasses (such as methods) usually aren't a constant + return super().as_python_constant() + + def self_args(self): + return [] + + def get_function(self): + return self.fn + + def get_code(self): + return self.fn.__code__ + + def python_type(self): + return types.FunctionType + + def has_self(self): + return getattr(self.fn, "__self__", None) is not None + + def get_globals(self): + return self.fn.__globals__ + + def bind_args(self, parent, args, kwargs): + assert not self.is_constant + tx = parent.output.root_tx + wrap = functools.partial(wrap_bound_arg, tx=tx) + + fn: types.FunctionType = self.fn + defaults = fn.__defaults__ or [] + defaults_sources = [ + None if self.source is None else DefaultsSource(self.source, idx) + for idx, _ in enumerate(defaults) + ] + fake_func = types.FunctionType( + fn.__code__, + fn.__globals__, + fn.__name__, + tuple( + [ + wrap(val=arg, source=source) + for arg, source in zip(defaults, defaults_sources) + ] + ), + fn.__closure__, + ) + if fn.__kwdefaults__: + kwdefaults_sources = { + k: None + if self.source is None + else DefaultsSource(self.source, k, is_kw=True) + for k in fn.__kwdefaults__ + } + fake_func.__kwdefaults__ = { + k: wrap(val=v, source=kwdefaults_sources[k]) + for k, v in fn.__kwdefaults__.items() + } + + bound = inspect.signature(fake_func).bind(*args, **kwargs) + bound.apply_defaults() + result = dict(bound.arguments.items()) + + wrap_args_kwargs(tx, result) + closure_cells = init_cellvars(parent, result, fn.__code__) + closure = self.fn.__closure__ or () + assert len(closure) == len(self.fn.__code__.co_freevars) + for idx, name, cell in zip( + itertools.count(), self.fn.__code__.co_freevars, closure + ): + if name == "__class__": + source = AttrSource(self.source, "__class__") if self.source else None + result[name] = variables.UserDefinedClassVariable( + cell.cell_contents, + source=source, + ) + else: + var = tx.match_nested_cell(name, cell) + if var is not None: + # optimization for cleaner codegen + result[name] = var + elif self.source: + from .builder import VariableBuilder + + side_effects = parent.output.side_effects + if cell in side_effects: + out = side_effects[cell] + else: + closure_cell = GetItemSource( + AttrSource(self.source, "__closure__"), idx + ) + closure_cell_contents = AttrSource( + closure_cell, "cell_contents" + ) + try: + contents_var = VariableBuilder( + parent, closure_cell_contents + )(cell.cell_contents) + except ValueError: + # Cell has not yet been assigned + contents_var = variables.DeletedVariable() + + if ( + closure_cell_contents.name() + not in tx.mutated_closure_cell_contents + ): + # Optimistically don't allocate the cell, to + # reduce the number of side effects. This is + # important for cond, as without it, any accesses + # to closures create side effects and cond doesn't + # support side effects. If we're wrong and this + # closure cell gets written to, we will restart + # the analysis with this cell's name in the + # mutated list here + result[name] = contents_var + continue + + # cells are written to with "cell_contents", + # so the source should just be the closure_cell, not its contents + out = side_effects.track_cell_existing(closure_cell, cell) + side_effects.store_cell( + out, + contents_var, + ) + + result[name] = out + + else: + from .builder import SourcelessBuilder + + result[name] = SourcelessBuilder()(tx, cell.cell_contents) + + return result, closure_cells + + def export_freevars(self, parent, child): + pass + + def call_hasattr(self, tx, name: str) -> VariableTracker: + result = hasattr(self.fn, name) + return variables.ConstantVariable.create(result) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if self.is_constant: + return invoke_and_store_as_constant( + tx, self.fn, self.get_name(), args, kwargs + ) + + return super().call_function(tx, args, kwargs) + + +class UserMethodVariable(UserFunctionVariable): + """Some unsupported user-defined method""" + + def __init__(self, fn, obj, **kwargs): + super().__init__(fn=fn, **kwargs) + self.obj = obj + + def __str__(self): + return f"{self.__class__.__name__}({self.fn}, {self.obj})" + + def self_args(self): + return [self.obj] + + def python_type(self): + return types.MethodType + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + # For nn.Module methods, redirecting to NNModuleVariable.call_method for optimized solution + # rather than simple inlining. E.g, putting `call_method` op in FX graph for `forward` method + # since we ensure `forward` of allowed modules can be traced by AOT safely. + # Note this is not only for allowed modules, as user customized modules can extend from + # allowed modules but using parent's `forward` method, which is also covered by this branch. + + # If we are tracing the higher order op, we want Dynamo to step inside + # the module call so that Dynamo can see the underlying parameters and + # buffers and raise them as inputs to the graph. The is_root_tracer + # check bypasses the if condition for non-root tracers and directly + # calls the super().call_function at the end, which is basically + # equivalent of inlining the method. + if tx.output.is_root_tracer() and isinstance( + self.obj, variables.NNModuleVariable + ): + module_attr = getattr(self.fn, "__module__", "") + if ( + module_attr is not None + and module_attr.startswith("torch.nn.") + or self.is_constant + ): + return self.obj.call_method( + tx, self.fn.__name__, args, kwargs, constant=self.is_constant + ) + return super().call_function(tx, args, kwargs) + + def inspect_parameter_names(self): + return super().inspect_parameter_names()[1:] + + +class WrappedUserMethodVariable(UserMethodVariable): + def __init__(self, wrapped, context, **kwargs): + kwargs.pop("fn", None) + kwargs.pop("obj", None) + super().__init__(wrapped.fn, wrapped.obj, **kwargs) + self.wrapped = wrapped + self.context = context + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + self.context.enter(tx) + result = super().call_function(tx, args, kwargs) + self.context.exit(tx) + return result + + +class WrappedUserFunctionVariable(UserFunctionVariable): + def __init__(self, wrapped, context, **kwargs): + kwargs.pop("fn", None) + kwargs.pop("obj", None) + super().__init__(wrapped.fn, **kwargs) + self.wrapped = wrapped + self.context = context + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + self.context.enter(tx) + result = super().call_function(tx, args, kwargs) + self.context.exit(tx) + return result + + +def invoke_and_store_as_constant(tx, fn, name, args, kwargs): + def convert(x): + if isinstance(x, variables.TensorVariable): + return x.get_real_value() + return x.as_python_constant() + + args = [convert(x) for x in args] + kwargs = {k: convert(v) for k, v in kwargs.items()} + res = fn(*args, **kwargs) + return tx.output.register_attr_or_module( + res, + name, + source=ConstantSource(name), + ) + + +class NestedUserFunctionVariable(BaseUserFunctionVariable): + _nonvar_fields = { + "closure_scope", + "f_globals", + *BaseUserFunctionVariable._nonvar_fields, + } + + def __init__( + self, + fn_name, + code, + f_globals, + defaults, + kwdefaults, + annotations, + closure, + closure_scope, + wrapped_reconstructible=None, + **kwargs, + ): + super().__init__(**kwargs) + assert isinstance(fn_name.as_python_constant(), str) + assert isinstance(code.as_python_constant(), types.CodeType) + assert isinstance(f_globals, dict) + self.fn_name = fn_name + self.code = code + self.f_globals = f_globals + self.defaults = defaults + self.kwdefaults = kwdefaults + self.annotations = annotations + self.closure = closure + if closure is None: + closure_scope = None + self.closure_scope = closure_scope + # Either a source or a VT with .can_reconstruct() == True + self.wrapped_reconstructible: Optional[ + Union[Source, VariableTracker] + ] = wrapped_reconstructible + + def self_args(self): + return [] + + def get_code(self): + return self.code.as_python_constant() + + def get_function(self): + if self.closure: + raise NotImplementedError() + func = types.FunctionType( + self.code.as_python_constant(), + self.f_globals, + self.fn_name.as_python_constant(), + ) + if self.defaults: + func.__defaults__ = self.defaults.as_python_constant() + if self.kwdefaults: + func.__kwdefaults__ = self.kwdefaults.as_python_constant() + if self.annotations: + annotations = self.annotations.as_python_constant() + if isinstance(annotations, tuple): + from itertools import pairwise + + annotations = dict(pairwise(annotations)) + + # TypeError: __annotations__ must be set to a dict object + assert isinstance(annotations, dict) + func.__annotations__ = annotations + return func + + def has_closure(self): + return self.closure is not None + + def has_self(self): + return False + + def get_globals(self): + return self.f_globals + + def bind_args(self, parent, args, kwargs): + from .misc import InlinedClosureVariable + + code = self.get_code() + func = types.FunctionType( + code, + self.f_globals, + self.fn_name.as_python_constant(), + tuple(self.defaults.items) if self.defaults else None, + tuple(make_cell(None) for _ in range(len(self.get_code().co_freevars))), + ) + if self.kwdefaults: + func.__kwdefaults__ = self.kwdefaults.keys_as_python_constant() + bound = inspect.signature(func).bind(*args, **kwargs) + bound.apply_defaults() + result = dict(bound.arguments.items()) + wrap_args_kwargs(parent.output.root_tx, result) + closure_cells = init_cellvars(parent, result, code) + + for idx, name in enumerate(code.co_freevars): + cell = self.closure.items[idx] + assert getattr(cell, name, name) == name + assert name not in result + if isinstance(cell, InlinedClosureVariable): + # InlinedClosureVariable's are created from LOAD_CLOSURE's from + # InliningInstructionTranslators when the variable name is not found in closure_cells. + # They should remain outside of closure_cells, so that our callee (the + # InliningInstructionTranslator that traces `func`) handles + # the cell correctly - that is, the cell's contents are treated as if they + # are local variables, like in UserFunctionVariable's bind_args for freevars. + cand = parent + while cand and name not in cand.symbolic_locals: + cand = cand.parent + if cand is None: + raise RuntimeError( + f"Couldn't find {name} in the symbolic_locals of the inline interpreter stack" + ) + result[name] = cand.symbolic_locals[name] + else: + closure_cells[name] = self.closure.items[idx] + + return result, closure_cells + + def export_freevars(self, parent, child): + code = self.get_code() + for var in code.co_freevars: + if var in child.symbolic_locals: + parent.symbolic_locals[var] = child.symbolic_locals[var] + + def reconstruct(self, codegen): + codegen.load_import_from(__name__, "_create_nested_fn") + codegen(self.code) + codegen.extend_output([codegen._create_load_const(self.f_globals)]) + codegen(ConstantVariable.create(self.code.value.co_name)) + + if self.defaults: + codegen(self.defaults) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.closure: + codegen(self.closure) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.kwdefaults: + codegen(self.kwdefaults) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.annotations: + try: + annotations = self.annotations.as_python_constant() + codegen.extend_output([codegen._create_load_const(annotations)]) + except NotImplementedError: + codegen(self.annotations) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + codegen.extend_output(create_call_function(7, push_null=True)) + + if self.wrapped_reconstructible: + codegen.load_import_from("functools", "wraps") + codegen(self.wrapped_reconstructible) + codegen.extend_output(create_call_function(1, True)) + codegen.extend_output(create_rot_n(2)) + codegen.extend_output(create_call_function(1, True)) + + +class SkipFunctionVariable(VariableTracker): + def __init__(self, value, reason=None, **kwargs): + super().__init__(**kwargs) + self.value = value + self.reason = reason + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.FUNCTION_MATCH)) + return cls( + value, + source=source, + ) + + @staticmethod + @functools.lru_cache(None) + def fold_through_function_to_wrapper(): + return { + collections.namedtuple: variables.UserDefinedClassVariable, + } + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if inspect.getattr_static(self.value, "_torchdynamo_disable", False): + unimplemented(f"call torch._dynamo.disable() wrapped function {self.value}") + # Fold through the functions(e.g, collections.namedtuple) + # that inputs & outputs are all python constants + elif ( + self.value in self.fold_through_function_to_wrapper().keys() + and check_constant_args(args, kwargs) + ): + value = self.value( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + return self.fold_through_function_to_wrapper().get(self.value)( + value, mutable_local=MutableLocal() + ) + elif ( + self.value is functools.wraps + and not kwargs + and len(args) == 1 + and ( + args[0].source is not None or args[0].can_reconstruct(tx.output.root_tx) + ) + ): + + def wraps(fn): + if isinstance(fn, variables.NestedUserFunctionVariable): + if args[0].source: + reconstructible = args[0].source + else: + reconstructible = args[0] + return fn.clone(wrapped_reconstructible=reconstructible) + unimplemented(f"functools.wraps({fn})") + + return variables.LambdaVariable(wraps) + else: + try: + path = inspect.getfile(self.value) + except TypeError: + path = f"Builtin {self.value.__name__}" + msg = f"'skip function {self.value.__qualname__} in file {path}'" + msg += f"', {self.reason}'" if self.reason else "" + unimplemented(msg) + + +def _traceable_collective_remaps(): + # We can't rely on importing from distributed, since it's not always built + if torch.distributed.is_available(): + from torch.distributed._functional_collectives import ( + traceable_collective_remaps, + ) + + return traceable_collective_remaps + return {} + + +def _traceable_collectives_source(tx, fn): + assert torch.distributed.is_available(), "Illegal invocation." + assert fn in _traceable_collective_remaps().values() + + inner_name = fn.__name__ + path_source = tx.import_source("torch.distributed._functional_collectives") + return AttrSource(path_source, inner_name) + + +class CollectiveFunctionRewriteVariable(UserFunctionVariable): + """ + Some of the torch.distributed.* collective APIs are possible to rewrite to 'traceable' collectives. + + This class provides both a way to check if a function is remappable, and perform the remapping. + + In the case that a function is 'remappable' but only for some combinations of call-time arguments, + we check the args at `call_function` time and fall back to graph-breaking if needed. This is no worse + than status-quo as we currently graph-break on all distributed.* collectives. + """ + + def __init__(self, fn, *, replacement_var, **kwargs): + super().__init__(fn, **kwargs) + assert isinstance(replacement_var, UserFunctionVariable) + self.replacement_var = replacement_var + + @staticmethod + def create(tx, old_fn, source, **options): + new_fn, new_source = CollectiveFunctionRewriteVariable.rewrite(tx, old_fn) + return CollectiveFunctionRewriteVariable( + old_fn, + replacement_var=UserFunctionVariable(new_fn, source=new_source, **options), + source=source, + **options, + ) + + @staticmethod + def can_rewrite(variable): + return ( + inspect.isfunction(variable) and variable in _traceable_collective_remaps() + ) + + @staticmethod + def rewrite(tx, fn): + new_fn = _traceable_collective_remaps()[fn] + return new_fn, _traceable_collectives_source(tx, new_fn) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + # call_function must check any unsupported arguments and graph-break. + # It's safe to assume args/kwargs from orig_fn map 1:1 to args/kwargs of remapped_fn, + # since that's the contract for putting a mapping in `traceable_collective_remaps` + import torch.distributed as dist + from torch.distributed._functional_collectives import REDUCE_OP_TO_STR + + # Merge args into kwargs so positional and keyword args + # can be processed the same way. + signature = inspect.signature(self.fn) + kwargs = dict(signature.bind(*args, **kwargs).arguments) + args = () + + if "async_op" in kwargs and kwargs["async_op"].as_python_constant(): + unimplemented( + f"CollectiveFunctionRewriteVariable can't support async_op=True for {self.fn}" + ) + + if kwargs.get("group") is None or kwargs["group"].value is None: + kwargs["group"] = ProcessGroupVariable.get_global_pg_variable() + + if self.fn == dist.all_reduce: + reduce_op_var = kwargs.get("op") + reduce_op = ( + reduce_op_var.value + if reduce_op_var is not None + else signature.parameters["op"].default + ) + if reduce_op not in REDUCE_OP_TO_STR: + raise ValueError(f"Unsupported all_reduce op: {reduce_op}") + kwargs["op"] = variables.ConstantVariable.create( + REDUCE_OP_TO_STR[reduce_op] + ) + return self.replacement_var.call_function(tx, args, kwargs) + + +class FunctoolsPartialVariable(VariableTracker): + def __init__(self, func: VariableTracker, args, keywords, **kwargs): + super().__init__(**kwargs) + self.func = func + assert isinstance(args, list) + self.args = args + assert isinstance(keywords, dict) + self.keywords = keywords + + def reconstruct(self, codegen): + codegen.load_import_from("functools", "partial") + codegen(self.func) + if self.args: + codegen.foreach(self.args) + if not self.keywords: + codegen.extend_output(create_call_function(len(self.args) + 1, True)) + return + + codegen.foreach(self.keywords.values()) + keys = tuple(self.keywords.keys()) + codegen.extend_output( + codegen.create_call_function_kw(len(keys) + len(self.args) + 1, keys, True) + ) + + def get_function(self): + return self.as_python_constant() + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + merged_args = self.args + args + merged_kwargs = {**self.keywords, **kwargs} + return self.func.call_function(tx, merged_args, merged_kwargs) + + def call_hasattr(self, tx, name: str) -> VariableTracker: + # functools.partial uses slots, so attributes are constant + return variables.ConstantVariable.create( + hasattr(functools.partial(identity), name) + ) + + def as_python_constant(self): + return functools.partial( + self.func.as_python_constant(), + *[arg.as_python_constant() for arg in self.args], + **{k: v.as_python_constant() for k, v in self.keywords.items()}, + ) + + def guard_as_python_constant(self): + """Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants""" + return functools.partial( + self.func.guard_as_python_constant(), + *[v.guard_as_python_constant() for v in self.args], + **{k: v.guard_as_python_constant() for k, v in self.keywords.items()}, + ) + + +class TritonKernelVariable(VariableTracker): + def __init__(self, kernel, kernel_idx, grid, **kwargs): + from triton.runtime.autotuner import Autotuner + + from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table + + super().__init__(**kwargs) + + assert kernel is not None + + self.kernel = kernel + self.kernel_idx = kernel_side_table.add_kernel(kernel) + + assert kernel_idx is None or self.kernel_idx == kernel_idx + + self.grid = grid + + if isinstance(kernel, Autotuner): + # We only support configs and keys arguments of triton.autotune + # Make sure other arguments are defaulted + defaults = inspect.signature(Autotuner.__init__).parameters + + # Newer version of triton change attribute name from warmup to num_warmup and rep to num_rep. + # The call to get_first_attr is to maintain backward-compatibility. + if ( + ( + "warmup" in defaults + and defaults["warmup"].default + != get_first_attr(kernel, "num_warmups", "warmup") + ) + or ( + "rep" in defaults + and defaults["rep"].default + != get_first_attr(kernel, "num_reps", "rep") + ) + or ( + "prune_configs_by" in defaults + and defaults["prune_configs_by"].default + != kernel.early_config_prune + ) + # Set via reset_to_zero argument + or len(kernel.reset_idx) != 0 + or len(kernel.restore_idx) != 0 + ): + raise Unsupported( + "Only configs and keys are supported for triton.autotune" + ) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from triton.runtime.autotuner import Autotuner + + from .constant import ConstantVariable + from .dicts import ConstDictVariable + from .lists import BaseListVariable + + if self.grid is None: + raise Unsupported("Triton kernels should always be called with a grid") + + # Both for grid's meta as well as for the kernel, we need combined + # args and kwargs normalized + names = ( + variables.ConstantVariable.create(name) for name in self.kernel.arg_names + ) + kwargs = {variables.ConstantVariable.create(k): v for k, v in kwargs.items()} + normalized_args = {**dict(zip(names, args)), **kwargs} + + configs = ( + [config.kwargs for config in self.kernel.configs] + if isinstance(self.kernel, Autotuner) + else [{}] + ) + grids = [] + for config_args in configs: + # If the grid is a function, then lets execute it and convert it to + # a list + grid = self.grid + if isinstance(grid, (NestedUserFunctionVariable, UserFunctionVariable)): + # Populate the special "meta" argument to call the grid function + config_args = { + ConstantVariable.create(k): ConstantVariable.create(v) + for k, v in config_args.items() + } + meta = ConstDictVariable({**normalized_args, **config_args}, dict) + grid = grid.call_function(tx, [meta], {}) + + # Now, the grid must be a list either originally or through above + # modification + if isinstance(grid, BaseListVariable): + grids.append(grid.as_proxy()) + else: + unimplemented(f"grid for the triton kernel is {type(grid)}") + + for i in range(len(grids)): + if not isinstance(grids[i], tuple): + raise Unsupported("Only tuple grids are supported") + # inductor expects all grids to be 3-tuple so lets make it + if len(grids[i]) == 1: + grids[i] = (grids[i][0], 1, 1) + elif len(grids[i]) == 2: + grids[i] = (grids[i][0], grids[i][1], 1) + elif len(grids[i]) > 3: + raise Unsupported("Grid can have at most rank 3") + + assert len(grids) != 0 + if len(set(grids)) == 1: + # If there's only one unique grid, lets simplify + grids = [grids[0]] + + from torch._higher_order_ops.triton_kernel_wrap import ( + triton_kernel_wrapper_mutation, + ) + + # Combine args and kwargs and pass as a dict so that if user defined triton + # kernel uses variables as 'grid' or 'kernel', it does not conflict with + # parameters of the wrapper function + meta = ConstDictVariable(normalized_args, dict) + tx.output.create_proxy( + "call_function", + triton_kernel_wrapper_mutation, + (), + { + "kernel_idx": self.kernel_idx, + "grid": grids, + "kwargs": meta.as_proxy(), + }, + ) + + return variables.ConstantVariable( + None, + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__": + # __getitem__ should only be called if we don't already have a grid + # Only grid needs to be passed + if self.grid is not None or len(args) != 1: + raise Unsupported( + "Triton kernels should be called with only a single grid" + ) + + return TritonKernelVariable( + kernel=self.kernel, + kernel_idx=self.kernel_idx, + grid=args[0], + ) + elif name == "run": + if "grid" not in kwargs: + raise Unsupported("Triton kernel requires to be called with a grid") + grid = kwargs.pop("grid") + kwargs.pop("warmup", None) + # rewrite kernel.run(*args, grid=grid) to kernel[grid](*args) + return TritonKernelVariable( + kernel=self.kernel, kernel_idx=self.kernel_idx, grid=grid + ).call_function(tx, args, kwargs) + + # Bail out to parent's implementation + return super().call_method(tx, name, args, kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/higher_order_ops.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/higher_order_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cc54daf551c1b88eecbab8d1f771576666d6b798 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/higher_order_ops.py @@ -0,0 +1,1660 @@ +# mypy: ignore-errors + +import contextlib +import functools +import logging +import types + +from typing import Dict, List, Optional + +import torch._C +import torch.fx +import torch.nn +import torch.onnx.operators +from torch._dynamo.utils import deepcopy_to_fake_tensor, get_fake_value, get_real_value +from torch._dynamo.variables.base import VariableTracker +from torch._dynamo.variables.builtin import BuiltinVariable +from torch._dynamo.variables.functions import UserFunctionVariable +from torch._dynamo.variables.tensor import SymNodeVariable +from torch._guards import Source +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.utils import _pytree as pytree + +from ..exc import ( + UncapturedHigherOrderOpError, + unimplemented, + Unsupported, + UserError, + UserErrorType, +) +from ..source import AttrSource, FSDPNNModuleSource, GetItemSource, NNModuleSource +from ..utils import proxy_args_kwargs +from .dicts import ConstDictVariable +from .lists import ListVariable, TupleVariable +from .nn_module import NNModuleVariable, UnspecializedNNModuleVariable + + +log = logging.getLogger(__name__) + + +def raise_hard_error_if_graph_break(reason): + def deco(fn): + @functools.wraps(fn) + def graph_break_as_hard_error(*args, **kwargs): + try: + return fn(*args, **kwargs) + except Unsupported as e: + msg = " Scroll up to find out what causes the graph break." + raise UncapturedHigherOrderOpError(reason + msg) from e + + return graph_break_as_hard_error + + return deco + + +@contextlib.contextmanager +def dynamo_enable_grad(tx, enable=True): + from . import GradModeVariable + + org_value = torch.is_grad_enabled() + try: + GradModeVariable.create(tx, enable, initialized=True) + yield + finally: + GradModeVariable.create(tx, org_value, initialized=True) + + +def only_consist_of(var, types, allow_none=False): + if isinstance(var, types): + return True + if allow_none and var.is_python_constant() and var.as_python_constant() is None: + return True + if isinstance(var, (TupleVariable, ListVariable)): + return all(only_consist_of(item, types, allow_none) for item in var.items) + if isinstance(var, ConstDictVariable): + return all( + only_consist_of(item, types, allow_none) for item in var.items.values() + ) + return False + + +# A more read-able syntax sugar for creating a UserFunctionVariable for f +# and run call_function on it. Make it return a function to preserve the calling +# convention of the original f. +def _make_inlined(tx, f): + assert callable(f), "Expect f to be a python callable." + + def inline_call(*args, **kwargs): + return UserFunctionVariable(f).call_function(tx, args, kwargs) + + return inline_call + + +def _call_function_and_unflatten_output(tx, fn, args, kwargs, ret_vt, ret_treespec): + from .builder import wrap_fx_proxy + + flat_example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + ret_vt.as_proxy(), + ) + + # Store the invocation as a call + flat_variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn, + args=args, + kwargs=kwargs, + ), + example_value=flat_example_value, + ) + + # Transform variable back into a list (previously made into a tuple by + # speculate_subgraph function) so as to respect the pytree API typing. + flat_list_variable = BuiltinVariable(list).call_function(tx, [flat_variable], {}) + return ( + _make_inlined(tx, pytree.tree_unflatten)(flat_list_variable, ret_treespec) + if ret_treespec + else flat_variable + ) + + +def _assert_tensors_nonaliasing(inputs, outputs): + input_tensor_ids = { + id(t) for t in pytree.tree_leaves(inputs) if isinstance(t, torch.Tensor) + } + output_tensor_ids = { + id(t) for t in pytree.tree_leaves(outputs) if isinstance(t, torch.Tensor) + } + assert input_tensor_ids.isdisjoint( + output_tensor_ids + ), "inputs to function body cannot alias outputs" + + +def validate_args_and_maybe_create_graph_inputs( + sub_args, + tracer, + tx, + set_subgraph_inputs, + description, +): + from . import AutogradFunctionContextVariable, ConstantVariable, EnumVariable + from .builder import wrap_fx_proxy_cls + + assert tracer.parent is not None + + if set_subgraph_inputs == "flatten_manual": + flat_args, tree_spec = _make_inlined(tx, pytree.tree_flatten)( + ListVariable(sub_args) + ).unpack_var_sequence(tx) + + flat_inputs = validate_args_and_maybe_create_graph_inputs( + flat_args.unpack_var_sequence(tx), + tracer, + tx, + set_subgraph_inputs="manual", + description=description, + ) + + return _make_inlined(tx, pytree.tree_unflatten)( + ListVariable(flat_inputs), tree_spec + ).unpack_var_sequence(tx) + else: + args = [] + for a in sub_args: + assert isinstance(a, VariableTracker) + if set_subgraph_inputs == "automatic": + args.append(a) + continue + + if isinstance(a, (ConstantVariable, EnumVariable)): + # This arg is not used in the body of the higher order op. + # Currently, this new input is added to make the calls + # happy, which expect a fixed number of arguments. In + # future, we can clean this up. + tracer.create_graph_input("const") + new_arg = a + # Weird special case, we probably want to delete it or fold it + # into the next case (of `a` being placeable into a graph) + elif isinstance(a, AutogradFunctionContextVariable): + tracer.create_graph_input(a.as_proxy().node.name) + new_arg = a + # If `a` can be put into a graph + elif a.maybe_fx_node() is not None: + node = a.maybe_fx_node() + new_proxy = tracer.create_graph_input(node.name) + example_value = ( + node.meta["example_value"] if "example_value" in node.meta else None + ) + new_arg = wrap_fx_proxy_cls( + target_cls=type(a), + tx=tx, + proxy=new_proxy, + example_value=example_value, + ) + # If `a` cannot be put into a graph + else: + # HOPs work much better if they use speculate_subgraph(set_subgraph_inputs="automatic"). + raise unimplemented( + f"{description} with body that accepts non-Tensors as input. " + f"Got: {a.python_type()}" + ) + args.append(new_arg) + return args + + +# This helper function is used to make sure two graphs share the same input signature. For example, +# in torch.cond, two branches might lift different set of tensors as inputs. This function helps to +# dedup the inputs and modify the graphs to take the same set of inputs. +def _merge_graph_inputs( + l_graph, l_lifted_freevars, l_name, r_graph, r_lifted_freevars, r_name +): + def dedup_and_sort_lifted_freevars(l_lifted_freevars, r_lifted_freevars): + # The nn module attributes are guaranteed to be registered into the top-level graph module during + # higher order op speculation. Therefore, get_attr nodes in two branches with the same + # target refer to the same attribute and we can safely deduplicate them with their target. + # + # Note: ideally, dynamo should just create a single proxy for the same attribute of a nn module. But + # true_branch and false_branch belong to two separate tracing contexts, they may register the same + # attribute to top level seperately. This creates two get_attr proxies for the same attribute + # that have different meta data such as stack_trace (one stack trace for the true_branch, + # and the other for false_branch). It seems better to discard the proxy explicitly in cond + # than make dynamo create a single proxy for the same get_attr target. + def shared_getattrs(l_lifted_proxies, r_lifted_proxies): + true_targets = { + proxy.node.target: proxy + for proxy in l_lifted_proxies + if proxy.node.op == "get_attr" + } + l_shared_getattrs = {} + r_shared_getattrs = {} + + for false_proxy in r_lifted_proxies: + if ( + false_proxy.node.op == "get_attr" + and false_proxy.node.target in true_targets + ): + true_proxy = true_targets[false_proxy.node.target] + l_shared_getattrs[true_proxy] = true_proxy + r_shared_getattrs[false_proxy] = true_proxy + return l_shared_getattrs, r_shared_getattrs + + l_shared_getattrs, r_shared_getattrs = shared_getattrs( + l_lifted_freevars.keys(), r_lifted_freevars.keys() + ) + + l_shared_freevars = (l_lifted_freevars.keys() & r_lifted_freevars.keys()).union( + l_shared_getattrs.keys() + ) + r_shared_freevars = (l_lifted_freevars.keys() & r_lifted_freevars.keys()).union( + r_shared_getattrs.keys() + ) + unique_l_freevars = l_lifted_freevars.keys() - l_shared_freevars + unique_r_freevars = r_lifted_freevars.keys() - r_shared_freevars + + def _sort_by_name(vars): + return sorted(vars, key=lambda var: var.node.name) + + return ( + list(_sort_by_name(list(l_shared_freevars))), + list(_sort_by_name(list(r_shared_freevars))), + list(_sort_by_name(list(unique_l_freevars))), + list(_sort_by_name(list(unique_r_freevars))), + ) + + (l_shared, r_shared, unique_l, unique_r) = dedup_and_sort_lifted_freevars( + l_lifted_freevars, r_lifted_freevars + ) + + # Let's say we capture cond(pred, true_fn, false_fn, (x,)) + # With set_graph_input set to automatic, + # true_fn has lifted variables x, a, b, c + # false_fn has lifted variables x, a, b, d + # Then fixup_branch_inps make sure both branches have the same signature, i.e.: + # - true_fn(x, a, b, c_true_branch, d_false_branch) + # - false_fn(x, a, b, c_true_branch, d_false_branch) + # + # More formally, the signature has three parts in the following order: + # 1. used in both branches: x, a, b + # 2. only used in true branches: c, suffixed with _true_branch + # 3. only used in false branches: d, suffixed with _false_branch + # Within each part, we re-order the nodes by name to have a derterministic ordering for testing. + def fixup_branch_inps(graph, lifted_freevars, shared, unique_l, unique_r): + def _insert_or_replace_phs(new_args, name_suffix): + for arg in new_args: + new_ph = graph.placeholder(arg.node.name + name_suffix) + # Override with new_ph if there exists a old placeholder. + if arg in lifted_freevars: + old_ph = lifted_freevars[arg].node + old_ph.replace_all_uses_with(new_ph) + # replace_all_uses_with doesn't clean users. Clean it mannually so that we could erase it. + old_ph.users = {} + graph.erase_node(old_ph) + + first_not_ph_node = next( + node for node in graph.nodes if node.op != "placeholder" + ) + with graph.inserting_before(first_not_ph_node): + _insert_or_replace_phs(shared, "") + _insert_or_replace_phs(unique_l, "_" + l_name) + _insert_or_replace_phs(unique_r, "_" + r_name) + + fixup_branch_inps(l_graph, l_lifted_freevars, l_shared, unique_l, unique_r) + fixup_branch_inps(r_graph, r_lifted_freevars, r_shared, unique_l, unique_r) + return l_graph, r_graph, l_shared, r_shared, unique_l, unique_r + + +# See NOTE [HigherOrderOperator tracing design] for details of the design +def speculate_subgraph( + tx, + f, + sub_args, + sub_kwargs, + description, + *, + # source_target is the .value of HigherOrderOpVariable and is the + # target of the proxy that we created for the higherOrderOperator. + source_target=None, + always_restore=False, + enable_grad=None, + # NOTE [argument `set_subgraph_inputs`] + # set_subgraph_inputs controls what how to construct subgraphs' placeholders from sub_args. + # 1. if your HOP supports arbitrary inputs, use set_subtraph_inputs="automatic" (most recommended). + # 2. if your HOP supports only Tensor and symnode inputs, use set_subgraph_inputs="flatten_manual" (recommended). + # If sub_args contain Pytree structure (e.g. dict/list/tuple/set), the sub_args will be flattened first. + # Then the flattend args are manually set as subgraph's placeholders. + # 3. if your HOP must preserve inputs that are not tensor or symnode as placeholders e.g. AutogradFunctionContextVariable + # use set_subgraph_inputs="manual" (not recommended). We do not recommend it in general because it has the + # restriction that user need to manually control how to create placeholders and VariableTrackers for the args. + set_subgraph_inputs="automatic", + restore_side_effects=True, + should_flatten_outputs=False, + # Pass in an originating tracer - this is needed for preserving context + # across fwd-bwd for autograd.Function + tracer=None, +): + if sub_kwargs is None: + sub_kwargs = {} + + assert set_subgraph_inputs in { + "automatic", + "flatten_manual", + "manual", + }, "Please use one of the supported set_subgraph_inputs options." + + # See NOTE [Temporary argument `set_subgraph_inputs`] + if sub_kwargs and set_subgraph_inputs != "automatic": + unimplemented("Use `set_subgraph_inputs=automatic` when passing `sub_kwargs`.") + + try: + f, sub_args, sub_kwargs = VariableTracker.apply( + # ensure guards on args get installed in parent subgraph + lambda x: x.realize(), + (f, sub_args, sub_kwargs), + ) + + with tx.output.subtracer(source_target, tracer) as subtracer: + args = validate_args_and_maybe_create_graph_inputs( + sub_args, subtracer, tx, set_subgraph_inputs, description + ) + + validate_args_and_maybe_create_graph_inputs( + sub_kwargs.values(), + subtracer, + tx, + set_subgraph_inputs="automatic", + description=description, + ) + + autograd_ctx = ( + dynamo_enable_grad(tx, enable_grad) + if enable_grad is not None + else contextlib.nullcontext() + ) + + # For handling side effects, we can make an argument that we don't + # have to do anything here. The side effects infra does a good job + # of graph breaking if we mutate any nonlocal or global variable + # while subtracing. As a result if tracing succeeds, side effects + # data structure will only contain read-only data structures that + # are put there for tracking purposes. + # But on the other hand, there is an argument that if we ever write + # a new side effect in Dynamo which does not go through the side + # effect infra, we can end up in bad state. + # Therefore we restore the side effects after tracing. The catch is + # that we have to special handle tensor variables. If we have seen a + # nonlocal variable tensor during subtracing, we want to keep a + # track of that tensor, so that later subtracing or the root tracer + # itself does not create a new proxy for the already observed tensor + # variable. + if restore_side_effects: + prev_side_effects = tx.output.side_effects.clone() + + with autograd_ctx: + output = f.call_function(tx, args, sub_kwargs) + + if restore_side_effects: + new_side_effects = tx.output.side_effects.clone() + prev_side_effects.track_tensor_variables_from_runahead_side_effects( + new_side_effects + ) + tx.output.side_effects = prev_side_effects + + treespec = None + if should_flatten_outputs: + # Flatten the speculated subgraph output. + output, treespec = _make_inlined(tx, pytree.tree_flatten)( + output + ).unpack_var_sequence(tx) + # Actually, transform the list (returned by flatten) into a tuple + # for dynamo consistency. + output = BuiltinVariable(tuple).call_function(tx, [output], {}) + + # Register output to graph + # Modeled off of compile_and_call_fx_graph + # TODO: support pytree output + # We check always_restore because we dont use the output or side effects of always_restore code, + # like bwd. + if always_restore: + # Nothing left to do here + return (output, treespec), tx.output.graph, subtracer.lifted_freevars + else: + from . import TensorVariable + + if not only_consist_of(output, TensorVariable, allow_none=True): + unimplemented( + "HigherOrderOperator body's output must consist of tensors only" + ) + + # The output proxies might not belong to this SubgraphTracer + # (if they are free variables that were never lifted) + # so lift them here. + output_proxies = output.as_proxy() + output_proxies = pytree.tree_map( + subtracer.maybe_lift_tracked_freevar_to_input, output_proxies + ) + + tx.output.create_node( + "output", + "output", + (subtracer.create_arg((output_proxies,))), + {}, + ) + graph = tx.output.graph + graph.lint() + lifted_freevars = subtracer.lifted_freevars + + return ( + (output, treespec), + graph, + lifted_freevars, + ) + + except Unsupported as ex: + f_name = f"{type(f).__name__}" + if isinstance(f, UserFunctionVariable): + f_name = f.get_name() + msg = ( + f"speculate_subgraph: while introspecting {description}, we were unable " + f"to trace function `{f_name}` into a single graph. This means " + f"that Dynamo was unable to prove safety for this API and will " + f"fall back to eager-mode PyTorch, which could lead to a slowdown." + ) + log.info(msg) + log.info(ex) + raise ex + + +def make_attr(tx, name): + node = tx.output.create_proxy( + "get_attr", + name, + (), + {}, + ) + return node + + +def add_subgraph(tx, source, name, gm): + next_name = None + i = 0 + while not next_name: + candidate = f"{name}_{i}" + if candidate in tx.output.nn_modules: + i += 1 + else: + next_name = candidate + + gm.__name__ = next_name + if source.guard_source().is_fsdp_module(): + src = FSDPNNModuleSource(GetItemSource(source, next_name)) + else: + src = NNModuleSource(GetItemSource(source, next_name)) + gm.torchdynamo_force_dynamic = False + tx.output.register_attr_or_module(gm, next_name, source=src) + return next_name + + +class TorchHigherOrderOperatorVariable(VariableTracker): + def __init__(self, value, source: Optional[Source] = None, **kwargs): + super().__init__(**kwargs) + self.value = value + self.source = source + + @staticmethod + def make(value, source=None, **kwargs): + if value.__name__ == "cond": + return CondHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "while_loop": + return WhileLoopHigherOrderVariable(value, source, **kwargs) + elif value.__name__ in ("map", "map_impl"): + return MapHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "executorch_call_delegate": + return ExecutorchCallDelegateHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "out_dtype": + return OutDtypeHigherOrderVariable(value, source, **kwargs) + elif value is torch._functorch.eager_transforms.grad_impl: + return FunctorchGradHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "wrap": + return WrapHigherOrderVariable(value, source, **kwargs) + elif value.__name__ in ( + "wrap_activation_checkpoint", + "tag_activation_checkpoint", + ): + return CheckpointHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "_export_tracepoint": + return ExportTracepointHigherOrderVariable(value, source, **kwargs) + elif value.__name__ == "trace_wrapped": + return TraceWrappedHigherOrderOperatorVariable(value, source, **kwargs) + elif value.__name__ == "strict_mode": + return StrictModeHigherOrderVariable(value, source, **kwargs) + else: + unimplemented(f"HigherOrderOperator {value.__name__}") + + def call_function( + self, tx, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ) -> VariableTracker: + unimplemented(f"HigherOrderOperator {self.value.__name__}") + + +class CondHigherOrderVariable(TorchHigherOrderOperatorVariable): + @raise_hard_error_if_graph_break( + reason="Cond doesn't work unless it is captured completely with torch.compile." + ) + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import ( + ConstantVariable, + ListVariable, + NestedUserFunctionVariable, + TensorVariable, + UserFunctionVariable, + ) + + args, kwargs = VariableTracker.apply(lambda x: x.realize(), (args, kwargs)) + + for i, k in enumerate(["pred", "true_fn", "false_fn", "operands"]): + if v := kwargs.pop(k, None): + assert i == len( + args + ), "did not provide the right number of non-keyword args" + args.append(v) + + if kwargs: + unimplemented(f"torch.cond: Got unexpected kwargs: {list(kwargs.keys())}") + + # TODO(voz): Support fake tensor dispatch for recursive + # ops - see torch/dispatch/_dispatcher.py + if len(args) != 4: + unimplemented( + f"Expected 4 arguments but got {len(args)}.\n" + f"Usage: cond(pred, true_fn, false_fn, operands)", + ) + # predicate + if type(args[0]) not in (ConstantVariable, TensorVariable, SymNodeVariable): + unimplemented( + f"Expected pred to be bool or a boolean tensor with single " + f"item but got {str(type(args[0]))} " + f"with original python type {str(args[0].python_type())}.", + ) + + # operands + if not isinstance(args[3], (ListVariable, TupleVariable)): + unimplemented( + f"Expected a tuple but got {args[3].python_type()}", + ) + operands = args[3].unpack_var_sequence(tx) + if not only_consist_of(args[3], (TensorVariable,)): + unimplemented( + "Expect operands to be a tuple of pytrees that only consists of tensor leaves." + ) + + # branches + assert isinstance( + args[1], + ( + UserFunctionVariable, + NestedUserFunctionVariable, + NNModuleVariable, + UnspecializedNNModuleVariable, + ), + ), str( + type(args[1]) + ) # true_fn + + assert isinstance( + args[2], + ( + UserFunctionVariable, + NestedUserFunctionVariable, + NNModuleVariable, + UnspecializedNNModuleVariable, + ), + ), str( + type(args[2]) + ) # false_fn + + # Our strategy for tracing the true/false branches of cond + # are to checkpoint our graphstate, run the true branch, + # roll it back to the checkpoint, and run the false + # branch, and then merge the graphstates. Well, perhaps + # "merge" is too strong a word: we mostly assert that + # the resulting graphstates have to be the same. + # + # We only permit guards to diverge (we union the guards from + # both branches). In particular, this means that side + # effects are NOT permitted inside true/false branches; this + # would be difficult to implement, because of the path + # explosion problem. + + def speculate_branch(branch): + # NB: 0 is predicate + ix = 1 if branch else 2 + # TODO: Support kwargs + ( + (ret_val, ret_treespec), + ret_graph, + ret_lifted_freevars, + ) = speculate_subgraph( + tx, + args[ix], + operands, + {}, + "cond", + source_target=self.value, + should_flatten_outputs=True, + ) + + if not only_consist_of(ret_val, (TensorVariable,)): + unimplemented( + "Expected branches to return a possibly nested list/tuple/dict of tensors but it consists of non tensors.", + ) + return ret_val, ret_treespec, ret_graph, ret_lifted_freevars + + (true_r, true_treespec, true_graph, true_lifted_freevars) = speculate_branch( + True + ) + true_nn_modules = dict(tx.output.nn_modules) + + ( + false_r, + false_treespec, + false_graph, + false_lifted_freevars, + ) = speculate_branch(False) + false_nn_modules = dict(tx.output.nn_modules) + + same_treespec = _make_inlined(tx, pytree.TreeSpec.__eq__)( + true_treespec, false_treespec + ) + if not same_treespec.as_python_constant(): + unimplemented("Expected branches to return the same pytree structure.") + + def diff_meta(tensor_vars1, tensor_vars2): + assert all( + isinstance(var, TensorVariable) for var in tensor_vars1 + tensor_vars2 + ) + all_diffs = [] + for i, (var1, var2) in enumerate(zip(tensor_vars1, tensor_vars2)): + # We check the meta data associated with meta["example_value"] + meta1 = _extract_tensor_metadata( + var1.proxy.node.meta["example_value"], include_contiguity=False + ) + meta2 = _extract_tensor_metadata( + var2.proxy.node.meta["example_value"], include_contiguity=False + ) + if meta1 != meta2: + all_diffs.append((f"pair{i}:", meta1, meta2)) + return all_diffs + + if diffs := diff_meta( + true_r.unpack_var_sequence(tx), false_r.unpack_var_sequence(tx) + ): + unimplemented( + f"Expected branches to return tensors with same metadata. [(tensor_pair, difference)...]:{diffs}" + ) + + ( + true_graph, + false_graph, + true_shared, + false_shared, + unique_true, + unique_false, + ) = _merge_graph_inputs( + true_graph, + true_lifted_freevars, + "true_branch", + false_graph, + false_lifted_freevars, + "false_branch", + ) + + true_name = add_subgraph( + tx, + self.source, + "cond_true", + torch.fx.GraphModule(true_nn_modules, true_graph), + ) + false_name = add_subgraph( + tx, + self.source, + "cond_false", + torch.fx.GraphModule(false_nn_modules, false_graph), + ) + + true_node = make_attr(tx, true_name) + false_node = make_attr(tx, false_name) + + p_args = ( + args[0].as_proxy(), + true_node, + false_node, + # We pick true_shared but it shouldn't matter + true_shared + unique_true + unique_false, + ) + + return _call_function_and_unflatten_output( + tx, torch.ops.higher_order.cond, p_args, {}, true_r, true_treespec + ) + + +class WhileLoopHigherOrderVariable(TorchHigherOrderOperatorVariable): + @raise_hard_error_if_graph_break( + reason="while_loop doesn't work unless it is captured completely with torch.compile." + ) + def call_function( + self, tx, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ) -> VariableTracker: + from . import NestedUserFunctionVariable, TensorVariable, UserFunctionVariable + + args, kwargs = VariableTracker.apply(lambda x: x.realize(), (args, kwargs)) + + for i, k in enumerate(["cond_fn", "body_fn", "operands"]): + if v := kwargs.pop(k, None): + assert i == len( + args + ), "did not provide the right number of non-keyword args" + args.append(v) + + if kwargs: + unimplemented( + f"torch.while_loop: Got unexpected kwargs: {list(kwargs.keys())}" + ) + + if len(args) != 3: + unimplemented( + f"Expected 3 arguments but got {len(args)}.\n" + f"Usage: while_loop(cond_fn, body_fn, operands)", + ) + + def _check_supported_callable(fn_var): + assert isinstance( + fn_var, + ( + UserFunctionVariable, + NestedUserFunctionVariable, + NNModuleVariable, + UnspecializedNNModuleVariable, + ), + ), str(type(fn_var)) + + _check_supported_callable(args[0]) + _check_supported_callable(args[1]) + + # operands + if not isinstance(args[2], (ListVariable, TupleVariable)): + unimplemented( + f"Expected a tuple but got {args[2].python_type()}", + ) + + operands = args[2].unpack_var_sequence(tx) + if not only_consist_of(args[2], (TensorVariable,)): + unimplemented( + "Expect operands to be a tuple of pytrees that only consists of tensor leaves." + ) + + ( + (cond_r, cond_treespec), + cond_graph, + cond_lifted_freevars, + ) = speculate_subgraph( + tx, args[0], operands, {}, "while_loop", source_target=self.value + ) + cond_nn_modules = dict(tx.output.nn_modules) + if not isinstance(cond_r, TensorVariable): + unimplemented( + f"Expected cond_fn to return a tensor but got {cond_r.python_type()}", + ) + + cond_r_meta = _extract_tensor_metadata( + cond_r.proxy.node.meta["example_value"], include_contiguity=False + ) + if not cond_r_meta.dtype == torch.bool or not cond_r_meta.shape == torch.Size( + [] + ): + unimplemented( + f"Expected cond_fn to return a tensor with shape (,) but got {cond_r_meta.shape}" + ) + + ( + (body_r, body_treespec), + body_graph, + body_lifted_freevars, + ) = speculate_subgraph( + tx, + args[1], + operands, + {}, + "while_loop", + source_target=self.value, + should_flatten_outputs=True, + ) + body_nn_modules = dict(tx.output.nn_modules) + + ( + cond_graph, + body_graph, + cond_shared, + body_shared, + cond_unique, + body_unique, + ) = _merge_graph_inputs( + cond_graph, + cond_lifted_freevars, + "cond_fn", + body_graph, + body_lifted_freevars, + "body_fn", + ) + # We pick cond_shared but it shouldn't matter + merged_input = tuple(cond_shared + cond_unique + body_unique) + + cond_name = add_subgraph( + tx, + self.source, + "cond_fn", + torch.fx.GraphModule(cond_nn_modules, cond_graph), + ) + body_name = add_subgraph( + tx, + self.source, + "body_fn", + torch.fx.GraphModule(body_nn_modules, body_graph), + ) + + cond_node = make_attr(tx, cond_name) + body_node = make_attr(tx, body_name) + + p_args = ( + cond_node, + body_node, + merged_input, + ) + + return _call_function_and_unflatten_output( + tx, torch.ops.higher_order.while_loop, p_args, {}, body_r, body_treespec + ) + + +def non_single_tensor_return_unsupported(api, ret): + from . import TensorVariable + + if not isinstance(ret, TensorVariable): + raise Unsupported( + f"{api} over function that returns something " f"other than one Tensor" + ) + + +class MapHigherOrderVariable(TorchHigherOrderOperatorVariable): + def call_function( + self, tx, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ) -> VariableTracker: + from . import NestedUserFunctionVariable, TensorVariable, UserFunctionVariable + from .builder import wrap_fx_proxy_cls + + if len(kwargs) > 0: + unimplemented( + "torch.ops.higher_order.map: kwargs are not supported in the map operator." + ) + + assert type(args[0].realize()) in ( + UserFunctionVariable, + NestedUserFunctionVariable, + ) + assert type(args[1].realize()) is TensorVariable + + sample_shape = get_fake_value(args[1].as_proxy().node, tx).size() + + if len(sample_shape) < 1 or sample_shape[0] == 0: + unimplemented( + "map() operator doesn't support scalar or zero-sized tensors during tracing." + ) + + # To get the example output from map() we will need to provide at least one sample to + # the loop body. In our case we will always use xs[0], and our map() won't support zero + # sized tensor during tracing. + first_dim = wrap_fx_proxy_cls( + target_cls=TensorVariable, tx=tx, proxy=args[1].as_proxy()[0] + ) + + # TODO: Support kwargs + ( + (body_r, body_spec), + body_graph, + body_lifted_freevars, + ) = speculate_subgraph( + tx, + args[0], + [ + first_dim, + *args[2:], + ], + {}, + "torch.ops.higher_order.map", + source_target=self.value, + set_subgraph_inputs="flatten_manual", + should_flatten_outputs=True, + ) + + body_nn_modules = dict(tx.output.nn_modules) + + body_name = add_subgraph( + tx, + self.source, + "map_body", + torch.fx.GraphModule(body_nn_modules, body_graph), + ) + + body_node = make_attr(tx, body_name) + + p_args = ( + body_node, + [args[1].as_proxy()], + [arg.as_proxy() for arg in args[2:]] + list(body_lifted_freevars.keys()), + ) + return _call_function_and_unflatten_output( + tx, torch.ops.higher_order.map_impl, p_args, {}, body_r, body_spec + ) + + +class ExecutorchCallDelegateHigherOrderVariable(TorchHigherOrderOperatorVariable): + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + # This is operator for delegation within Executorch which calls a + # specific function in the given lowered module with the given + # operators. The actual operator is defined in the Executorch codebase. + # This is a bad hierarchical violation since + # executorch_call_delegate sits at a higher level than dynamo, but + # there's no real solution to this issue yet. + if len(kwargs) > 0: + unimplemented( + "executorch_call_delegate: kwargs arguments were not enabled." + ) + lowered_module = tx.output.get_submodule(args[0].module_key) + + lowered_node = make_attr(tx, args[0].module_key) + + p_args = tuple(arg.as_proxy() for arg in args[1:]) + real_sub_args = pytree.tree_map_only( + torch.fx.Proxy, lambda a: get_real_value(a.node, tx.output), p_args + ) + + example_res = lowered_module.original_module.module()(*real_sub_args) + + # NOTE [Guaranteeing the 1-1 correspondence of FakeTensors and real tensors]: + # executorch modules promise not to alias inputs and outputs. + # Thus, output FakeTensors will correctly not alias input FakeTensors. + _assert_tensors_nonaliasing(real_sub_args, example_res) + + example_value = deepcopy_to_fake_tensor(example_res, tx.fake_mode) + + p_args = (lowered_node,) + p_args + + # Store the invocation as a call + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.value, + args=tuple(p_args), + kwargs={}, + ), + example_value=example_value, + ) + + +class FunctorchGradHigherOrderVariable(TorchHigherOrderOperatorVariable): + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import ConstantVariable + from .builder import wrap_fx_proxy + + # TODO: Support `fn` with kwargs. + if not torch._dynamo.config.capture_func_transforms: + unimplemented( + "torch.func.grad capture is disabled, " + "it can be turned on by setting " + "`torch._dynamo.config.capture_func_transforms=True`" + ) + # [NOTE] Here we are (roughly) modelling the following + # + # grad_fn = torch.func.grad(fn, argnums=.., has_aux=..) + # grad_output = grad_fn(x) + grad_args = (args[0], args[1], args[2]) + + # get arguments + func, argnums, has_aux = grad_args + kwargs = args[4].items + if len(kwargs) > 0: + # Since speculate_subgraph doesn't support kwargs, we can't handle this for now. + unimplemented( + "torch.func.grad: kwargs arguments are currently unsupported." + ) + + # Trace through the `func` + # NOTE [HACK: Enable autograd while tracing function] + # `torch.func.grad` should not be affected by `no_grad` outside of `grad`. + # So, we enable_grad right before the function to which `grad` is applied + # (the parts explicitly disabled with `no_grad` inside the function are still disabled). + # Eg. + # def f(x): + # with no_grad(): # This will disable grad tracking under it. + # y = x * 2 + # + # return x ** 2 - y # grad tracking should be enabled irrespective of outside `no_grad`. + # + # with no_grad(): # This will not disable grad tracking inside of grad(f). + # grad_o = torch.func.grad(f)(x) + # TODO: Support kwargs + (body_r, _), body_graph, body_lifted_freevars = speculate_subgraph( + tx, + func, + args[3].items, + {}, + "torch.func.grad", + source_target=self.value, + # See NOTE [HACK: Enable autograd while tracing function] + enable_grad=True, + set_subgraph_inputs="manual", + ) + + body_name = add_subgraph( + tx, + self.source, + "grad_body", + torch.fx.GraphModule(tx.output.nn_modules, body_graph), + ) + body_node = make_attr(tx, body_name) + grad_proxy_args = ( + body_node, + *(arg.as_proxy() for arg in grad_args[1:]), + ) + + # Model `grad_fn = grad(fn, *grad_args, **grad_kwargs)` + grad_fn = tx.output.create_proxy( + "call_function", + torch.func.grad, + args=tuple(grad_proxy_args), + kwargs={}, + name="grad_proxy", + ) + + # Pass lifted freevars to the call to `grad_fn` + args = args[3].items + grad_fn_args = tuple(arg.as_proxy() for arg in args) + tuple( + body_lifted_freevars + ) + + # Call grad_fn with inputs. + # grad_output = grad_fn(*grad_fn_args, **grad_fn_kwargs) + grad_output = grad_fn(*grad_fn_args) + + # `grad_fn(*grad_fn_args, **grad_fn_kwargs)` + # Output of grad_fn is + # For has_aux=False, Tuple[gradients of inputs indicated by argnums]. + # For has_aux=True, Tuple[Tuple[gradients of inputs indicated by argnums], aux values] + # NOTE: example_value should match `grad_output`. + def _from_args(idx): + return args[idx].as_proxy().node.meta["example_value"].contiguous() + + def to_python_ints(argnums): + if not isinstance(argnums, (ConstantVariable, TupleVariable)): + raise UserError( + UserErrorType.INVALID_INPUT, + f"argnums is expected to be int or tuple of ints. Got {argnums}.", + ) + + if isinstance(argnums, ConstantVariable): + if not isinstance(argnums.value, (int, tuple)): + raise UserError( + UserErrorType.INVALID_INPUT, + f"argnums is expected to be int or tuple of ints. Got {argnums}.", + ) + return argnums.value + else: + const_vars = argnums.unpack_var_sequence(tx) + if not all( + isinstance(var, ConstantVariable) and isinstance(var.value, int) + for var in const_vars + ): + raise UserError( + UserErrorType.INVALID_INPUT, + f"argnums is expected to contain int only. Got {const_vars}.", + ) + return tuple(var.value for var in const_vars) + + argnums_v = to_python_ints(argnums) + example_value = pytree.tree_map(_from_args, argnums_v) + + if has_aux.value: + # case : has_aux = True + # NOTE: Currently speculate subgraph allows body_r to be + # Tensor or Tuple/List of Tensor. + # Since `grad` expects output with has_aux + # to be (output, aux), only valid output currently is + # (output, some_tensor) + body_r_proxy = body_r.as_proxy() + aux = body_r_proxy[1].node.meta["example_value"] + example_value = (example_value, aux) + + fx_proxy = wrap_fx_proxy(tx=tx, proxy=grad_output, example_value=example_value) + + # Call contiguous on all the computed grads. + if not has_aux.value: + if isinstance(argnums_v, int): + return fx_proxy.call_method(tx, "contiguous", (), {}) + else: + grads = fx_proxy + items = [] + for idx in range(len(argnums_v)): + proxy = grads.call_method( + tx, "__getitem__", (ConstantVariable.create(idx),), {} + ).call_method(tx, "contiguous", (), {}) + items.append(proxy) + return TupleVariable(items) + else: # case: has_aux.value = True + # fx_proxy -> Tuple(grads, aux) + grads = fx_proxy.call_method( + tx, "__getitem__", (ConstantVariable.create(0),), {} + ) + aux = fx_proxy.call_method( + tx, "__getitem__", (ConstantVariable.create(1),), {} + ) + if isinstance(argnums_v, int): + return TupleVariable([grads.call_method(tx, "contiguous", (), {}), aux]) + else: + items = [] + for idx in range(len(argnums_v)): + proxy = grads.call_method( + tx, "__getitem__", (ConstantVariable.create(idx),), {} + ).call_method(tx, "contiguous", (), {}) + items.append(proxy) + return TupleVariable([TupleVariable(items), aux]) + + +class FunctorchHigherOrderVariable(UserFunctionVariable): + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if not torch._dynamo.config.capture_func_transforms: + name = self.get_name() + assert name in ("grad_impl", "vmap_impl") + fn = name.split("_")[0] + unimplemented( + f"torch.func.{fn} capture is disabled, " + "it can be turned on by setting " + "`torch._dynamo.config.capture_func_transforms=True`" + ) + return super().call_function(tx, args, kwargs) + + +class WrapHigherOrderVariable(TorchHigherOrderOperatorVariable): + def create_wrapped_node(self, tx, args, kwargs, description): + # See NOTE [HigherOrderOperator tracing design] for more details + + ( + (body_r, treespec), + body_graph, + body_lifted_freevars, + ) = speculate_subgraph( + tx, + args[0], # function + [*args[1:]], + kwargs, + description, + source_target=self.value, + should_flatten_outputs=True, + ) + + body_gmod = torch.fx.GraphModule(tx.output.nn_modules, body_graph) + body_name = add_subgraph( + tx, + self.source, + "wrap_body", + body_gmod, + ) + + body_node = make_attr(tx, body_name) + + # Since, we call `speculate_subgraph` with `set_subgraph_inputs="automatic`, + # all the arguments are lifted. + lifted_args = tuple(arg for arg in body_lifted_freevars.keys()) + + proxy_args = (body_node,) + lifted_args + example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + body_r.as_proxy(), + ) + + return proxy_args, {}, example_value, body_r, treespec, body_gmod + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + # This flattens the kwargs into lifted args + p_args, p_kwargs, example_value, body_r, treespec, _ = self.create_wrapped_node( + tx, args, kwargs, "wrap" + ) + + if len(p_kwargs) > 0: + unimplemented("kwargs should have been flattened into lifted args") + + return _call_function_and_unflatten_output( + tx, self.value, tuple(p_args), p_kwargs, body_r, treespec + ) + + +class OutDtypeHigherOrderVariable(TorchHigherOrderOperatorVariable): + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + if len(kwargs) > 0: + unimplemented("out_dtype does not handle kwargs") + + p_args = tuple(arg.as_proxy() for arg in args) + op = p_args[0] + output_dtype = p_args[1] + fake_sub_args = pytree.tree_map_only( + torch.fx.Proxy, lambda a: a.node.meta["example_value"], p_args[2:] + ) + # This is a simplified implementation of this operator just for tracing. + # Actual implementation may also first promote the arguments + example_value = op(*fake_sub_args).to(dtype=output_dtype) + + # Store the invocation as a call + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.value, + args=tuple(p_args), + kwargs={}, + ), + example_value=example_value, + ) + + +class StrictModeHigherOrderVariable(TorchHigherOrderOperatorVariable): + @raise_hard_error_if_graph_break( + reason="strict_mode HOO doesn't work unless it is captured completely with torch.compile." + ) + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + callable = args[0] + + unpacked_sequence = args[1].unpack_var_sequence(tx) + # TODO (tmanlaibaatar) support pytree here + for arg in unpacked_sequence: + if isinstance(arg, (ListVariable, TupleVariable, ConstDictVariable)): + unimplemented("strict_mode HOO only works for flat inputs for now") + + if kwargs: + unimplemented( + f"strict_mode HOO received unexpected kwargs: {list(kwargs.keys())}" + ) + + ( + (ret_val, ret_treespec), + ret_graph, + ret_lifted_freevars, + ) = speculate_subgraph( + tx, + args[0], + unpacked_sequence, + {}, + "strict_mode", + source_target=self.value, + should_flatten_outputs=True, + ) + + strict_mode_nn_modules = dict(tx.output.nn_modules) + + strict_mode_name = add_subgraph( + tx, + self.source, + "strict_mode_body", + torch.fx.GraphModule(strict_mode_nn_modules, ret_graph), + ) + + strict_mode_node = make_attr(tx, strict_mode_name) + p_args = ( + strict_mode_node, + tuple(arg for arg in ret_lifted_freevars.keys()), + ) + + flat_example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + ret_val.as_proxy(), + ) + + # Store the invocation as a call + flat_variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + torch.ops.higher_order.strict_mode, + args=tuple(p_args), + kwargs={}, + ), + example_value=flat_example_value, + ) + + return _call_function_and_unflatten_output( + tx, torch.ops.higher_order.strict_mode, p_args, {}, ret_val, ret_treespec + ) + + +class CheckpointHigherOrderVariable(WrapHigherOrderVariable): + def call_function( + self, tx, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ) -> VariableTracker: + from torch._higher_order_ops.wrap import TagActivationCheckpoint + from torch.utils.checkpoint import noop_context_fn + from .builder import wrap_fx_proxy + + context_fn = None + if "context_fn" in kwargs and kwargs["context_fn"] != noop_context_fn: + ctx = kwargs.pop("context_fn") + if isinstance(ctx, torch._dynamo.variables.UserFunctionVariable): + context_fn = ctx.fn + elif isinstance( + ctx, torch._dynamo.variables.functions.FunctoolsPartialVariable + ): + context_fn = ctx.as_python_constant() + else: + raise NotImplementedError( + f"checkpoint not implemented for {type(ctx)} context_fn" + ) + + checkpoint_kwargs, gmod_kwargs = TagActivationCheckpoint.divide_kwargs(kwargs) + + # Here we use checkpoint_kwargs (and not gmod kwargs). gmod_kwargs are + # already flattened above and managed inside the fx graph. + ( + p_args, + _, + example_value, + body_r, + treespec, + checkpointed_gmod, + ) = self.create_wrapped_node( + tx, args, gmod_kwargs, "torch.utils.checkpoint.checkpoint" + ) + if context_fn is not None: + checkpointed_gmod.meta["_checkpoint_context_fn"] = context_fn + + _, checkpoint_kwargs = proxy_args_kwargs([], checkpoint_kwargs) + + # Store the invocation as a call + variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.value, + args=tuple(p_args), + kwargs=checkpoint_kwargs, + ), + example_value=example_value, + ) + + if treespec is None: + return variable + + # Transform variable back into a list (previously made into a tuple by + # speculate_subgraph function) so as to respect the pytree API typing. + variable = BuiltinVariable(list).call_function(tx, [variable], {}) + + return _make_inlined(tx, pytree.tree_unflatten)(variable, treespec) + + +class ExportTracepointHigherOrderVariable(TorchHigherOrderOperatorVariable): + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + p_args = tuple(arg.as_proxy() for arg in args) + p_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()} + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.value, + args=p_args, + kwargs=p_kwargs, + ), + example_value=None, + ) + + +class TraceWrappedHigherOrderOperatorVariable(TorchHigherOrderOperatorVariable): + """ + Handles torch._dynamo._trace_wrapped_higher_order_op.inner_trace + by unwrapping the higher order op and inlining through it. This op + is created by dynamo to survive through AotAutograd, then unwrapped + here in the call to dynamo from compiled autograd. + """ + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + kwargs = dict(kwargs) + fn = kwargs.pop("fn") + return fn.call_function(tx, args, kwargs) + + +class AutogradFunctionApplyVariable(VariableTracker): + def __init__(self, fwd_graph, bwd_graph, parent_source, **kwargs): + super().__init__(**kwargs) + self.fwd_graph = fwd_graph + self.bwd_graph = bwd_graph + self.parent_source = parent_source + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import ( + AutogradFunctionContextVariable, + UserDefinedClassVariable, + UserFunctionVariable, + UserMethodVariable, + ) + from .builder import wrap_fx_proxy + + """ + Consider the following: + class MySin(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.sin() + @staticmethod + def backward(ctx, grad): + x, = ctx.saved_tensors + return grad * x.cos() + We want the resulting graphs to look like: + def fwd(ctx, x): + # (output, saved tensors / attrs) + return (x.sin(), [x]) + # bwd(ctx, grad0, grad1, ..., gradn, *saved_tensors_or_attrs) + def bwd(ctx, grad, x): + return grad * x.cos() + To accomplish this, we're going to: + 1. Construct a ctx object + 2. (fwd_out, _), fwd_graph, fwd_freevars = speculate_subgraph on MySin.forward (manually_set_inputs=True) + 3. (bwd_out, _), bwd_graph, bwd_freevars = speculate_subgraph on MySin.backward, while manually setting + the ctx and grad inputs. + 4. Manually rewriting the fwd graph's output to be (output, stuff_that_gets_used in bwd_graph) + Getting from 3 to 4 is pretty elegant: stuff_that_gets_used in bwd graph is + just the bwd_freevars returned from speculate_subgraph, assuming MySin.backward + doesn't capture any arguments. + All these steps work if MySin.backward doesn't capture any values. This is a + limitation in general that we should check for. + """ + + prev_side_effects = tx.output.side_effects.clone() + fwd_tracer = torch._dynamo.output_graph.SubgraphTracer( + tx.output, + parent=tx.output.current_tracer, + source_target="autograd.Function", + ) + + fwd_src = AttrSource(self.parent_source, member="forward") + ctx = AutogradFunctionContextVariable.create(tx) + if isinstance(self.fwd_graph, types.FunctionType): + fwd_fn = UserFunctionVariable(self.fwd_graph, source=fwd_src) + fwd_args = [ctx, *args] + elif isinstance(self.fwd_graph, types.MethodType): + fwd_fn = UserMethodVariable( + self.fwd_graph.__func__, + UserDefinedClassVariable(self.fwd_graph.__class__), + source=fwd_src, + ) + fwd_args = [fwd_fn.obj, ctx, *args] + else: + unimplemented("non-function or method") + + # Speculate subgraph on the fwd + (fwd_out, _), fwd_graph, fwd_freevars = speculate_subgraph( + tx, + fwd_fn, + fwd_args, + kwargs, + "autograd.Function", + set_subgraph_inputs="manual", + restore_side_effects=False, + tracer=fwd_tracer, + ) + + if fwd_freevars: + unimplemented("NYI") + + if ctx.mutable_local in tx.output.side_effects.store_attr_mutations: + if ( + "_materialize_non_diff_grads" + in tx.output.side_effects.store_attr_mutations[ctx.mutable_local] + ): + unimplemented("NYI") + + bwd_tracer = torch._dynamo.output_graph.SubgraphTracer( + tx.output, + parent=fwd_tracer, + source_target="autograd.Function", + ) + + # Speculate subgraph on the backward. We make the + # bwd tracer a child of the fwd tracer, because backward may rely on + # tensors/attrs created in the fwd tracer. + + from .lists import BaseListVariable + + if isinstance(fwd_out, BaseListVariable): + bwd_args = [ctx, *fwd_out.items] + else: + bwd_args = [ctx, fwd_out] + + bwd_src = AttrSource(self.parent_source, member="backward") + if isinstance(self.bwd_graph, types.FunctionType): + bwd_fn = UserFunctionVariable(self.bwd_graph, source=bwd_src) + elif isinstance(self.bwd_graph, types.MethodType): + bwd_fn = UserMethodVariable( + self.bwd_graph.__func__, + UserDefinedClassVariable(self.bwd_graph.__class__), + source=bwd_src, + ) + bwd_args = [bwd_fn.obj, *bwd_args] + else: + unimplemented("non-function or method") + + with tx.output.subtracer(fwd_fn, fwd_tracer), tx.strict_translation_mode(): + (bwd_out, _), bwd_graph, bwd_freevars = speculate_subgraph( + tx, + bwd_fn, + bwd_args, + kwargs, + "autograd.Function", + enable_grad=False, + set_subgraph_inputs="manual", + restore_side_effects=False, + tracer=bwd_tracer, + ) + + # TODO: assert that bwd_graph didn't capture values that were + # not created inside fwd_graph. + + # TODO(oulgen): Ideally, we would not do a linear search for output + # node but as things currently are there could be nodes after the + # output node + # This is bug prone as if there's code after the output node, then + # graph.output will append the output at the very end + # This might be a behavior difference + + # Rewrite the output of fwd_graph to (output, stuff_necessary_for_bwd) + for node in fwd_graph.nodes: + if node.op == "output": + fwd_graph.erase_node(node) + break + + new_fwd_graph_outputs = (fwd_out.as_proxy(), list(bwd_freevars.keys())) + new_fwd_graph_outputs = pytree.tree_map(lambda x: x.node, new_fwd_graph_outputs) + fwd_graph.output(new_fwd_graph_outputs) + + # Store fwd_body + fwd_nn_modules = tx.copy_graphstate().output.nn_modules + fwd_name = add_subgraph( + tx, + fwd_src, + "fwd_body", + torch.fx.GraphModule(fwd_nn_modules.nn_modules, fwd_graph), + ) + + fwd_node = make_attr(tx, fwd_name) + + # Store bwd_body + bwd_nn_modules = tx.copy_graphstate().output.nn_modules + bwd_name = add_subgraph( + tx, + bwd_src, + "bwd_body", + torch.fx.GraphModule(bwd_nn_modules.nn_modules, bwd_graph), + ) + + bwd_node = make_attr(tx, bwd_name) + + tx.output.side_effects = prev_side_effects + + p_args = (fwd_node, bwd_node, *(arg.as_proxy() for arg in args)) + example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + fwd_out.as_proxy(), + ) + + # Store the invocation as a call + from torch._functorch.autograd_function import autograd_function_apply + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + autograd_function_apply, + args=p_args, + kwargs={}, + ), + example_value=example_value, + ) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py new file mode 100644 index 0000000000000000000000000000000000000000..0e2d3167f9780c160391fc7e88c1d725e67a01b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py @@ -0,0 +1,260 @@ +# mypy: ignore-errors + +MAX_CYCLE = 3000 + +import itertools +import operator + +from typing import Dict, List, Optional + +from .. import polyfill, variables +from ..exc import unimplemented + +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable + + +class ItertoolsVariable(VariableTracker): + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + def __repr__(self): + return f"ItertoolsVariable({self.value})" + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if ( + self.value is itertools.product + and not kwargs + and all(arg.has_unpack_var_sequence(tx) for arg in args) + ): + seqs = [arg.unpack_var_sequence(tx) for arg in args] + items = [] + for item in itertools.product(*seqs): + items.append(variables.TupleVariable(list(item))) + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif ( + self.value is itertools.chain + and not kwargs + and all(arg.has_unpack_var_sequence(tx) for arg in args) + ): + seqs = [arg.unpack_var_sequence(tx) for arg in args] + items = list(itertools.chain.from_iterable(seqs)) + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif self.value is itertools.accumulate: + from .builtin import BuiltinVariable + + if any(key not in ["initial", "func"] for key in kwargs.keys()): + unimplemented( + "Unsupported kwargs for itertools.accumulate: " + f"{','.join(set(kwargs.keys()) - {'initial', 'func'})}" + ) + + acc = kwargs.get("initial") + + if len(args) in [1, 2] and args[0].has_unpack_var_sequence(tx): + seq = args[0].unpack_var_sequence(tx) + + if "func" in kwargs and len(args) == 1: + func = kwargs["func"].call_function + elif len(args) == 2: + func = args[1].call_function + elif len(args) == 1: + # Default to operator.add + func = BuiltinVariable(operator.add).call_function + else: + unimplemented( + "itertools.accumulate can only accept one of: `func` kwarg, pos 2 arg" + ) + else: + unimplemented("Unsupported arguments for itertools.accumulate") + + items = [] + if acc is not None: + items.append(acc) + for item in seq: + if acc is None: + acc = item + else: + try: + acc = func(tx, [acc, item], {}) + except Exception: + raise unimplemented( # noqa: TRY200 + f"Unexpected failure in invoking function during accumulate. Failed running func {func}({item}{acc})" + ) + items.append(acc) + + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif ( + self.value is itertools.combinations + and not kwargs + and len(args) == 2 + and args[0].has_unpack_var_sequence(tx) + and args[1].is_python_constant() + ): + iterable = args[0].unpack_var_sequence(tx) + r = args[1].as_python_constant() + + items = [] + for item in itertools.combinations(iterable, r): + items.append(variables.TupleVariable(list(item))) + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif self.value is itertools.groupby: + if any(kw != "key" for kw in kwargs.keys()): + unimplemented( + "Unsupported kwargs for itertools.groupby: " + f"{','.join(set(kwargs.keys()) - {'key'})}" + ) + + def retrieve_const_key(key): + if isinstance(key, variables.SymNodeVariable): + return key.evaluate_expr() + elif isinstance(key, variables.ConstantVariable): + return key.as_python_constant() + else: + raise unimplemented( + "Unsupported key type for itertools.groupby: " + str(type(key)) + ) + + if len(args) == 1 and args[0].has_unpack_var_sequence(tx): + seq = args[0].unpack_var_sequence(tx) + keyfunc = ( + ( + lambda x: ( + retrieve_const_key( + kwargs.get("key").call_function(tx, [x], {}) + ) + ) + ) + if "key" in kwargs + else None + ) + else: + unimplemented("Unsupported arguments for itertools.groupby") + + result = [] + try: + for k, v in itertools.groupby(seq, key=keyfunc): + result.append( + variables.TupleVariable( + [ + variables.ConstantVariable.create(k) + if variables.ConstantVariable.is_literal(k) + else k, + variables.ListIteratorVariable( + list(v), mutable_local=MutableLocal() + ), + ], + mutable_local=MutableLocal(), + ) + ) + except Exception: + raise unimplemented( # noqa: TRY200 + "Unexpected failure when calling itertools.groupby" + ) + return variables.ListIteratorVariable(result, mutable_local=MutableLocal()) + elif self.value is itertools.repeat: + if len(args) < 2: + return variables.RepeatIteratorVariable( + *args, mutable_local=MutableLocal() + ) + + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder()(tx, polyfill.repeat), args, kwargs + ) + elif self.value is itertools.count: + return variables.CountIteratorVariable(*args, mutable_local=MutableLocal()) + elif self.value is itertools.cycle: + return variables.CycleIteratorVariable(*args, mutable_local=MutableLocal()) + else: + return super().call_function(tx, args, kwargs) + + +class IteratorVariable(VariableTracker): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def next_variables(self, tx): + unimplemented("abstract method, must implement") + + +class RepeatIteratorVariable(IteratorVariable): + def __init__(self, item: VariableTracker, **kwargs): + super().__init__(**kwargs) + self.item = item + + # Repeat needs no mutation, clone self + def next_variables(self, tx): + return self.item, self + + +class CountIteratorVariable(IteratorVariable): + def __init__(self, item: int = 0, step: int = 1, **kwargs): + super().__init__(**kwargs) + if not isinstance(item, VariableTracker): + item = ConstantVariable.create(item) + if not isinstance(step, VariableTracker): + step = ConstantVariable.create(step) + self.item = item + self.step = step + + def next_variables(self, tx): + assert self.mutable_local + tx.output.side_effects.mutation(self) + next_item = self.item.call_method(tx, "__add__", [self.step], {}) + self.item = next_item + return self.item, self + + +class CycleIteratorVariable(IteratorVariable): + def __init__( + self, + iterator: IteratorVariable, + saved: List[VariableTracker] = None, + saved_index: int = 0, + item: Optional[VariableTracker] = None, + **kwargs, + ): + if saved is None: + saved = [] + super().__init__(**kwargs) + self.iterator = iterator + self.saved = saved + self.saved_index = saved_index + self.item = item + + def next_variables(self, tx): + assert self.mutable_local + + if self.iterator is not None: + try: + new_item, _ = self.iterator.next_variables(tx) + if len(self.saved) > MAX_CYCLE: + unimplemented( + "input iterator to itertools.cycle has too many items" + ) + tx.output.side_effects.mutation(self) + self.saved.append(new_item) + self.item = new_item + if self.item is None: + return self.next_variables(tx) + return self.item, self + except StopIteration: + self.iterator = None + return self.next_variables(tx) + elif len(self.saved) > 0: + tx.output.side_effects.mutation(self) + self.saved_index = (self.saved_index + 1) % len(self.saved) + return self.item, self + else: + raise StopIteration diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lazy.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lazy.py new file mode 100644 index 0000000000000000000000000000000000000000..d5846b08233ac29626b2f4322394dab6bd24d303 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lazy.py @@ -0,0 +1,106 @@ +# mypy: ignore-errors + +import functools +from typing import Optional + +from .base import VariableTracker + + +class LazyCache: + """Container to cache the real VariableTracker""" + + def __init__(self, value, source): + assert source + self.value = value + self.source = source + self.vt: Optional[VariableTracker] = None + + def realize(self, parents_tracker): + assert self.vt is None + from ..symbolic_convert import InstructionTranslator + from .builder import VariableBuilder + + tx = InstructionTranslator.current_tx() + self.vt = VariableBuilder(tx, self.source)(self.value) + self.vt.parents_tracker.add(parents_tracker) + del self.value + del self.source + + +class LazyVariableTracker(VariableTracker): + """ + A structure that defers the creation of the actual VariableTracker + for a given underlying value until it is accessed. + + The `realize` function invokes VariableBuilder to produce the real object. + Once a LazyVariableTracker has been realized, internal bookkeeping will + prevent double realization. + + This object should be utilized for processing containers, or objects that + reference other objects where we may not want to take on creating all the + VariableTrackers right away. + """ + + _nonvar_fields = {"_cache", *VariableTracker._nonvar_fields} + + @staticmethod + def create(value, source, **options): + return LazyVariableTracker(LazyCache(value, source), source=source, **options) + + def __init__(self, _cache, **kwargs): + assert isinstance(_cache, LazyCache) + super().__init__(**kwargs) + self._cache = _cache + + def realize(self) -> VariableTracker: + """Force construction of the real VariableTracker""" + if self._cache.vt is None: + self._cache.realize(self.parents_tracker) + return self._cache.vt + + def unwrap(self): + """Return the real VariableTracker if it already exists""" + if self.is_realized(): + return self._cache.vt + return self + + def is_realized(self): + return self._cache.vt is not None + + def clone(self, **kwargs): + assert kwargs.get("_cache", self._cache) is self._cache + if kwargs.get("source", self.source) is not self.source: + self.realize() + return VariableTracker.clone(self.unwrap(), **kwargs) + + def __str__(self): + if self.is_realized(): + return self.unwrap().__str__() + return VariableTracker.__str__(self.unwrap()) + + def __getattr__(self, item): + return getattr(self.realize(), item) + + # most methods are auto-generated below, these are the ones we want to exclude + apply = VariableTracker.apply + copy = VariableTracker.copy + __post_init__ = VariableTracker.__post_init__ + __repr__ = VariableTracker.__repr__ + + +def _create_realize_and_forward(name): + @functools.wraps(getattr(VariableTracker, name)) + def realize_and_forward(self, *args, **kwargs): + return getattr(self.realize(), name)(*args, **kwargs) + + return realize_and_forward + + +def _populate(): + for name, value in VariableTracker.__dict__.items(): + if name not in LazyVariableTracker.__dict__: + if callable(value): + setattr(LazyVariableTracker, name, _create_realize_and_forward(name)) + + +_populate() diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py new file mode 100644 index 0000000000000000000000000000000000000000..5d36a80024eea20f9194e175dce272943fef03ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py @@ -0,0 +1,811 @@ +# mypy: ignore-errors + +import collections +import functools +import inspect +import operator +import types +from typing import Dict, List, Optional + +import torch +import torch.fx +from ..._guards import Source + +from .. import polyfill, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..exc import unimplemented +from ..source import AttrSource, GetItemSource +from ..utils import ( + get_fake_value, + guard_if_dyn, + is_namedtuple, + istype, + iter_contains, + namedtuple_fields, + odict_values, +) +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable +from .functions import UserFunctionVariable, UserMethodVariable + + +class BaseListVariable(VariableTracker): + @staticmethod + def cls_for_instance(obj): + if is_namedtuple(obj): + return functools.partial(NamedTupleVariable, tuple_cls=type(obj)) + return BaseListVariable.cls_for(type(obj)) + + @staticmethod + def cls_for(obj): + return { + iter: ListIteratorVariable, + list: ListVariable, + slice: SliceVariable, + torch.Size: SizeVariable, + tuple: TupleVariable, + odict_values: ListVariable, + torch.nn.ParameterList: ListVariable, + torch.nn.ModuleList: ListVariable, + collections.deque: DequeVariable, + }[obj] + + def __init__( + self, + items: List[VariableTracker], + **kwargs, + ): + super().__init__(**kwargs) + assert isinstance(items, list) + assert all(isinstance(x, VariableTracker) for x in items) + self.items: List[VariableTracker] = items + + def _as_proxy(self): + return [x.as_proxy() for x in self.items] + + def modified(self, items, **kwargs): + return type(self)(items, **kwargs) + + @property + def value(self): + return self.as_python_constant() + + def as_python_constant(self): + return self.python_type()([x.as_python_constant() for x in self.items]) + + def as_proxy(self): + assert self.python_type() is not SizeVariable + return self.python_type()(self._as_proxy()) + + def getitem_const(self, arg: VariableTracker): + from .tensor import SymNodeVariable + + if isinstance(arg, SymNodeVariable): + index = arg.sym_num + else: + index = arg.as_python_constant() + + if isinstance(index, slice): + if self.source is not None: + return self.clone( + items=self.items[index], + source=GetItemSource(self.source, index), + mutable_local=MutableLocal() if self.mutable_local else None, + ) + else: + return self.clone( + items=self.items[index], + mutable_local=MutableLocal() if self.mutable_local else None, + ) + else: + assert isinstance(index, (int, torch.SymInt)) + return self.items[index] + + def unpack_var_sequence(self, tx): + return list(self.items) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__getitem__": + from .tensor import TensorVariable + + assert not kwargs and len(args) == 1 + if isinstance(args[0], TensorVariable): + value = get_fake_value(args[0].as_proxy().node, tx) + if value.constant is not None and value.constant.numel() == 1: + value = variables.ConstantVariable.create(value.constant.item()) + else: + unimplemented("__getitem__ with non-constant tensor") + else: + value = args[0] + return self.getitem_const(value) + elif name == "__contains__": + assert len(args) == 1 + assert not kwargs + return iter_contains(self.items, args[0], tx) + elif name == "index": + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder()(tx, polyfill.index), [self] + list(args), kwargs + ) + + return super().call_method(tx, name, args, kwargs) + + @staticmethod + def list_compare(tx, op, left, right): + from .builtin import BuiltinVariable + + eq_result = BaseListVariable.list_eq(tx, left, right) + if op is operator.eq: + return eq_result + elif op is operator.ne: + return BuiltinVariable(operator.not_).call_function(tx, [eq_result], {}) + else: + unimplemented(f"list_compare {left} {op} {right}") + + @staticmethod + def list_eq(tx, left, right): + from .builtin import BuiltinVariable + + # Most list-like variables implement comparison ops the same way, + # so they can re-use this helper. + # There are quirks though, like how `tuple([2]) == torch.Size([2])`, + # but `tuple([2]) != list([2])` + if len(left.items) != len(right.items): + return ConstantVariable.create(False) + if len(left.items) == 0: + return ConstantVariable.create(True) + + # Generic list comparison works by iterating over left aka self and right the compared-to list. + # If we hit here, their lengths are the same and they cannot be expressed as python constants. + # So, we iterate over the zipped list items. + comps = [] + for l, r in zip(left.items, right.items): + comp = BuiltinVariable(operator.eq).call_function(tx, [l, r], {}) + if comp.is_python_constant() and not comp.as_python_constant(): + # early exit in false case + return comp + comps.append(comp) + + return functools.reduce( + lambda a, b: BuiltinVariable(operator.and_).call_function(tx, [a, b], {}), + comps, + ) + + +class RangeVariable(BaseListVariable): + def __init__(self, items, **kwargs): + items_to_map = items + start = variables.ConstantVariable.create(0) + stop = None + step = variables.ConstantVariable.create(1) + + if len(items_to_map) == 1: + (stop,) = items_to_map + elif len(items_to_map) == 2: + start, stop = items_to_map + elif len(items_to_map) == 3: + start, stop, step = items_to_map + else: + raise AssertionError() + + assert stop is not None + super().__init__([start, stop, step], **kwargs) + + def python_type(self): + return range + + def as_python_constant(self): + return range(*[x.as_python_constant() for x in self.items]) + + def as_proxy(self): + return self.python_type()(*self._as_proxy()) + + def unpack_var_sequence(self, tx): + return [variables.ConstantVariable.create(x) for x in self.as_python_constant()] + + def reconstruct(self, codegen): + assert "range" not in codegen.tx.f_globals + codegen.append_output(codegen.create_load_python_module(range, True)) + codegen.foreach(self.items) + codegen.extend_output(create_call_function(3, False)) + + def var_getattr(self, tx, name): + fields = ["start", "stop", "step"] + if name not in fields: + unimplemented(f"range.{name}") + return self.items[fields.index(name)] + + +class CommonListMethodsVariable(BaseListVariable): + """ + Implement methods common to List and other List-like things + """ + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "append" and self.mutable_local: + assert not kwargs + (arg,) = args + tx.output.side_effects.mutation(self) + self.items.append(arg) + return ConstantVariable.create(None) + elif ( + name == "extend" + and self.mutable_local + and args + and args[0].has_unpack_var_sequence(tx) + ): + assert not kwargs + (arg,) = args + seq = arg.unpack_var_sequence(tx) + tx.output.side_effects.mutation(self) + self.items.extend(seq) + return ConstantVariable.create(None) + elif name == "insert" and self.mutable_local: + assert not kwargs + idx, value = args + const_idx = idx.as_python_constant() + tx.output.side_effects.mutation(self) + self.items.insert(const_idx, value) + return ConstantVariable.create(None) + elif name == "pop" and self.mutable_local: + assert not kwargs + tx.output.side_effects.mutation(self) + return self.items.pop(*[a.as_python_constant() for a in args]) + elif name == "clear" and self.mutable_local: + assert not kwargs and not args + tx.output.side_effects.mutation(self) + self.items.clear() + return ConstantVariable.create(None) + elif ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + tx.output.side_effects.mutation(self) + if isinstance(key, SliceVariable): + self.items[key.as_python_constant()] = list(value.items) + else: + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + elif name == "copy": + # List copy() doesn't have args and kwargs + assert not kwargs + assert not args + items = list(self.items) + return self.modified(items, mutable_local=MutableLocal()) + else: + return super().call_method(tx, name, args, kwargs) + + +class ListVariable(CommonListMethodsVariable): + def python_type(self): + return list + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_LIST", arg=len(self.items))) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + tx.output.side_effects.mutation(self) + if isinstance(key, SliceVariable): + if not value.has_unpack_var_sequence(tx): + unimplemented( + f"Missing dynamo support for expanding {value} into a list for slice assignment." + ) + self.items[key.as_python_constant()] = value.unpack_var_sequence(tx) + else: + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + else: + return super().call_method(tx, name, args, kwargs) + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + if self.python_type() is not list: + return super().call_hasattr(tx, name) + return variables.ConstantVariable.create(hasattr([], name)) + + +class DequeVariable(CommonListMethodsVariable): + def python_type(self): + return collections.deque + + def reconstruct(self, codegen): + assert "deque" not in codegen.tx.f_globals + codegen.append_output( + codegen.create_load_python_module(collections.deque, True) + ) + codegen.foreach(self.items) + codegen.extend_output(create_call_function(len(self.items), False)) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + assert key.is_python_constant() and isinstance( + key.as_python_constant(), int + ) + tx.output.side_effects.mutation(self) + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + elif name == "extendleft" and self.mutable_local: + assert not kwargs + + (arg,) = args + prefix = arg.unpack_var_sequence(tx) + prefix.reverse() + tx.output.side_effects.mutation(self) + self.items = prefix + list(self.items) + return ConstantVariable.create(None) + elif name == "popleft" and self.mutable_local: + assert not args + assert not kwargs + item = self.items[0] + tx.output.side_effects.mutation(self) + self.items = self.items[1:] + return item + elif name == "appendleft" and self.mutable_local: + assert not kwargs + tx.output.side_effects.mutation(self) + self.items = [args[0]] + list(self.items) + return ConstantVariable.create(None) + else: + return super().call_method(tx, name, args, kwargs) + + +class TupleVariable(BaseListVariable): + def python_type(self): + return tuple + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_TUPLE", arg=len(self.items))) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + return super().call_method(tx, name, args, kwargs) + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + if self.python_type() is not tuple: + return super().call_hasattr(tx, name) + return variables.ConstantVariable.create(hasattr((), name)) + + +class SizeVariable(TupleVariable): + """torch.Size(...)""" + + def __init__( + self, + items: List[VariableTracker], + proxy: Optional[torch.fx.Proxy] = None, + **kwargs, + ): + self.proxy = proxy + super().__init__(items, **kwargs) + + def python_type(self): + return torch.Size + + def as_proxy(self): + if self.proxy is not None: + return self.proxy + + # torch.Size needs special handling. Normally, we pun a list-like + # container to directly contain Proxy/Node objects from FX, and FX + # knows to look inside containers (via map_aggregate). But torch.Size + # is weird; although it subclasses from tuple, it doesn't allow + # members which aren't int-like (rejecting Proxy and Node). This + # means we can't use the normal representation trick + # torch.Size([proxy0, proxy1]). I looked into seeing if I could + # relax torch.Size in PyTorch proper, but if torch.Size constructor + # sees a type that it doesn't recognize, it will try to call + # __index__() on it, so there is no BC way to actually change this + # behavior (though it occurs to me that I could have just added a + # YOLO no checking alternate constructor.) + # + # To work around this problem, I represent a torch.Size proxy as + # a straight up proxy, that would have been constructed by taking + # the constituent proxies as arguments. This trick can be generally + # used for any construct that we need a proxy for but we can't + # directly represent as an aggregate; I don't see very many examples + # of this in torchdynamo though! + + # Look for a proxy. If there are none, do the legacy behavior + tracer = None + proxies = self._as_proxy() + for proxy in proxies: + if isinstance(proxy, torch.fx.Proxy): + tracer = proxy.tracer + break + + if tracer is None: + return torch.Size(proxies) + + proxy = tracer.create_proxy("call_function", torch.Size, (proxies,), {}) + proxy.node.meta["example_value"] = torch.Size( + [ + p.node.meta["example_value"] if not isinstance(p, int) else p + for p in proxies + ] + ) + return proxy + + def reconstruct(self, codegen): + codegen.load_import_from("torch", "Size") + codegen.foreach(self.items) + build_torch_size = [ + create_instruction("BUILD_TUPLE", arg=len(self.items)), + ] + create_call_function(1, True) + codegen.extend_output(build_torch_size) + + def unpack_var_sequence(self, tx): + return list(self.items) + + def numel(self, tx): + from .builtin import BuiltinVariable + from .tensor import SymNodeVariable + + const_result = 1 + sym_sizes = [] + + for v in self.items: + if isinstance(v, ConstantVariable): + const_result *= v.value + else: + assert isinstance(v, SymNodeVariable), type(v) + # Delay proxy calls until we know it will be necessary + sym_sizes.append(v) + + result = ConstantVariable.create(const_result) + if sym_sizes and const_result == 1: + # Skip multiplying by 1 + result, *sym_sizes = sym_sizes + + if not sym_sizes or const_result == 0: + return result + + mul = BuiltinVariable(operator.mul) + for v in sym_sizes: + result = mul.call_function(tx, [result, v], {}) + return result + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__getitem__": + assert not kwargs and len(args) == 1 + out = self.get_item_dyn(tx, args[0]) + return out + elif name == "numel": + assert not args and not kwargs + return self.numel(tx) + + return super().call_method(tx, name, args, kwargs) + + def get_item_dyn(self, tx, arg: VariableTracker): + from .tensor import SymNodeVariable + + if isinstance(arg, SymNodeVariable): + index = arg.sym_num + else: + index = arg.as_python_constant() + if isinstance(index, slice): + return SizeVariable(self.items[index]) + else: + assert isinstance(index, (int, torch.SymInt)) + return self.items[index] + + +class NamedTupleVariable(TupleVariable): + def __init__(self, items, tuple_cls, **kwargs): + super().__init__(items, **kwargs) + self.tuple_cls = tuple_cls + + def python_type(self): + return self.tuple_cls + + def as_python_constant(self): + return self.python_type()(*[x.as_python_constant() for x in self.items]) + + def as_proxy(self): + assert self.python_type() is not SizeVariable + return self.python_type()(*self._as_proxy()) + + def reconstruct(self, codegen): + create_fn = getattr(self.tuple_cls, "_make", self.tuple_cls) + codegen.append_output(codegen._create_load_const(create_fn)) + codegen.foreach(self.items) + codegen.extend_output( + [ + create_instruction("BUILD_TUPLE", arg=len(self.items)), + ] + + create_call_function(1, True) + ) + + def var_getattr(self, tx, name): + def check_and_create_method(): + method = inspect.getattr_static(self.tuple_cls, name, None) + if isinstance(method, classmethod): + # We need the unbounded cls method to avoid the inline __self__ + return UserMethodVariable( + method.__func__, + variables.UserDefinedClassVariable(self.tuple_cls), + ) + elif isinstance(method, staticmethod): + return UserFunctionVariable(method.__func__) + elif inspect.isfunction(method): + return UserMethodVariable(method, self) + else: + return None + + fields = namedtuple_fields(self.tuple_cls) + if name not in fields: + method = check_and_create_method() + if not method: + super().var_getattr(tx, name) + return method + return self.items[fields.index(name)] + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + fields = namedtuple_fields(self.tuple_cls) + return variables.ConstantVariable.create(name in fields) + + +class SliceVariable(BaseListVariable): + def __init__(self, items, **kwargs): + items_to_map = items + start, stop, step = [variables.ConstantVariable.create(None)] * 3 + + if len(items_to_map) == 1: + (stop,) = items_to_map + elif len(items_to_map) == 2: + start, stop = items_to_map + elif len(items_to_map) == 3: + start, stop, step = items_to_map + else: + raise AssertionError() + + if isinstance(start, variables.TensorVariable) or isinstance( + stop, variables.TensorVariable + ): + unimplemented("Dynamic slicing on data-dependent value is not supported") + + super().__init__([start, stop, step], **kwargs) + + def as_proxy(self): + return slice(*self._as_proxy()) + + def python_type(self): + return slice + + def as_python_constant(self): + return slice(*[guard_if_dyn(x) for x in self.items]) + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_SLICE", arg=len(self.items))) + + def var_getattr(self, tx, name): + fields = ["start", "stop", "step"] + if name not in fields: + unimplemented(f"slice.{name}") + return self.items[fields.index(name)] + + +class ListIteratorVariable(VariableTracker): + def __init__(self, items, index: int = 0, **kwargs): + super().__init__(**kwargs) + assert isinstance(items, list) + # Removing this check as it slows things down too much + # https://github.com/pytorch/pytorch/pull/87533#issuecomment-1287574492 + + # assert all(isinstance(x, VariableTracker) for x in items) + self.items = items + self.index = index + + def __repr__(self): + return f"{self.__class__.__name__}(length={len(self.items)}, index={repr(self.index)})" + + def next_variables(self, tx): + assert self.mutable_local + old_index = self.index + if old_index >= len(self.items): + raise StopIteration() + tx.output.side_effects.mutation(self) + self.index += 1 + return self.items[old_index], self + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + if name == "__contains__": + assert len(args) == 1 + assert not kwargs + return iter_contains(self.items[self.index :], args[0], tx) + + return super().call_method(tx, name, args, kwargs) + + def as_python_constant(self): + if self.index > 0: + raise NotImplementedError() + return iter([x.as_python_constant() for x in self.items]) + + def unpack_var_sequence(self, tx): + return list(self.items[self.index :]) + + def reconstruct(self, codegen): + remaining_items = self.items[self.index :] + codegen.foreach(remaining_items) + codegen.extend_output( + [ + create_instruction("BUILD_TUPLE", arg=len(remaining_items)), + create_instruction("GET_ITER"), + ] + ) + + +class TupleIteratorVariable(ListIteratorVariable): + pass + + +class RestrictedListSubclassVariable(ListVariable): + """ + This is a special case of UserDefinedObjectVariable where: + 1) The user subclasses list + 2) None of the list methods are overriden, merely some new methods are added + + In these cases, we can prevent graph breaks by not using the general + UserDefinedObjectVariable machinery and instead treating it like + a ListVariable. + """ + + _nonvar_fields = {"user_cls", "user_cls_source", *ListVariable._nonvar_fields} + _allowed_names = { + "__call__", + "__module__", + "__dict__", + "__doc__", + "__name__", + "__qualname__", + } + _disallowed_names = { + "__getattribute__", + "__getattr__", + "__setattr__", + } + + @classmethod + def _is_non_conflicting_subclass( + cls, + user_cls: type, + python_cls: type, + ): + """Ensures user_cls inherits from python_cls (e.g. list) and does not override any methods on python_cls""" + if ( + not istype(user_cls, type) + or user_cls.__bases__ != (python_cls,) + or user_cls.__mro__ != (user_cls, python_cls, object) + ): + return False # not subclass + return not any( + hasattr(python_cls, name) or name in cls._disallowed_names + for name in set(user_cls.__dict__.keys()) - cls._allowed_names + ) + + @classmethod + def is_matching_cls(cls, user_cls: type): + return cls._is_non_conflicting_subclass(user_cls, list) + + def __init__(self, items, *, user_cls: type, user_cls_source: Source, **kwargs): + super().__init__(items=items, **kwargs) + self.user_cls = user_cls + self.user_cls_source = user_cls_source + assert istype(user_cls, type) + assert isinstance(user_cls_source, Source) + + def python_type(self): + return self.user_cls + + def as_proxy(self): + return [x.as_proxy() for x in self.items] + + def as_python_constant(self): + raise NotImplementedError() + + def is_python_constant(self): + return False + + @property + def value(self): + raise AttributeError("value") + + def modified(self, items, **kwargs): + return type(self)( + items, + user_cls=self.user_cls, + user_cls_source=self.user_cls_source, + **kwargs, + ) + + def reconstruct(self, codegen): + codegen(self.user_cls_source) + super().reconstruct(codegen) + codegen.extend_output(create_call_function(1, True)) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name in self.user_cls.__dict__: + method = self.user_cls.__dict__[name] + if isinstance(method, types.FunctionType): + # inline the method + source = AttrSource(self.user_cls_source, name) + return UserMethodVariable(method, self, source=source).call_function( + tx, args, kwargs + ) + unimplemented( + f"RestrictedListSubclassVariable method {self.user_cls.__name__}.{name}" + ) + return super().call_method(tx, name, args, kwargs) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + return self.call_method(tx, "__call__", args, kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b8635733e2f90a831d098a24da228054555eab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py @@ -0,0 +1,886 @@ +# mypy: ignore-errors + +import collections +import dataclasses +import functools +import inspect +import itertools +import sys +import types +from typing import Dict, List + +import torch._C +import torch._numpy as tnp +import torch.utils._pytree as pytree +from .. import config, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GetItemSource, ODictGetItemSource, TypeSource +from ..utils import ( + check_constant_args, + check_unspec_python_args, + identity, + is_tensor_base_attr_getter, + proxy_args_kwargs, +) +from .base import VariableTracker +from .functions import NestedUserFunctionVariable, UserFunctionVariable +from .user_defined import UserDefinedObjectVariable + + +class SuperVariable(VariableTracker): + def __init__(self, typevar, objvar=None, specialized=False, **kwargs): + super().__init__(**kwargs) + # typevar is the fist argument to super(). In the case where no argument + # is provided to super(), it is the __class__ object where + # the super() function is being called + self.typevar = typevar + # objvar here must be an instance or subtype of typevar. + # In the case where super() is called without arguments, it is the first argument + # to the current function where super() is called from (self for regular method, + # cls for a classmethod) + self.objvar = objvar + self.specialized = specialized # directly get attr from self.typevar if true + + def reconstruct(self, codegen): + codegen(variables.BuiltinVariable(super)) + codegen(self.typevar) + if self.objvar is not None: + codegen(self.objvar) + codegen.extend_output(create_call_function(2, True)) + else: + codegen.extend_output(create_call_function(1, True)) + + def _resolved_getattr_and_source(self, tx, name): + assert self.objvar, "1-arg super not implemented" + if self.specialized: + return getattr(self.typevar.as_python_constant(), name) + search_type = self.typevar.as_python_constant() + + # The rest of this function does two things: + # - Walk the mro to find where the attribute comes from to be + # able to provide accurate source + # - Call the getattr to get the object + + # Find the class object, where the function lives. + # When objvar is "self", use type(self), when objvar is "cls", use it as-is + type_to_use = self.objvar.python_type() + type_to_use_source = ( + TypeSource(self.objvar.source) if self.objvar.source else None + ) + if issubclass(type_to_use, type): + type_to_use = self.objvar.value + type_to_use_source = self.objvar.source + + source = None + if self.objvar.source is not None: + # Walk the mro tuple to find out the actual class where the + # attribute resides. + search_mro = type_to_use.__mro__ + start_index = search_mro.index(search_type) + 1 + for index in range(start_index, len(search_mro)): + if hasattr(search_mro[index], name): + # Equivalent of something like type(L['self']).__mro__[1].attr_name + source = AttrSource( + GetItemSource(AttrSource(type_to_use_source, "__mro__"), index), + name, + ) + break + + # TODO(jansel): there is a small chance this could trigger user code, prevent that + return getattr(super(search_type, type_to_use), name), source + + def var_getattr(self, tx, name: str) -> "VariableTracker": + # Check if getattr is a constant. If not, delay the actual work by + # wrapping the result in GetAttrVariable. Mostly super is called with a + # method, so most of the work is delayed to call_function. + # + # We could have just implemented a const_getattr. However, super is + # special when it comes to finding sources. Compared to other VTs, super + # requires the attr name to walk the mro and find the actual source (and + # not just AttrSource). + value, source = self._resolved_getattr_and_source(self, name) + if not variables.ConstantVariable.is_literal(value): + return GetAttrVariable(self, name) + if source: + install_guard(source.make_guard(GuardBuilder.CONSTANT_MATCH)) + return variables.ConstantVariable.create(value, source=source) + return variables.ConstantVariable.create(value) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + inner_fn, source = self._resolved_getattr_and_source(self, name) + + if inner_fn is object.__init__: + return LambdaVariable(identity) + elif inner_fn is torch.nn.Module.__init__: + objvar = self.objvar + from ..side_effects import AttributeMutationNew + + if ( + isinstance(objvar, variables.UserDefinedObjectVariable) + and isinstance(objvar.mutable_local, AttributeMutationNew) + and not (args or kwargs) + ): + tx.output.side_effects.store_attr( + objvar, + "__call_nn_module_init", + variables.ConstantVariable.create(True), + ) + return variables.ConstantVariable.create(None) + else: + unimplemented("super() nn.Module.__init__") + elif isinstance(inner_fn, types.FunctionType): + return variables.UserFunctionVariable( + inner_fn, source=source + ).call_function(tx, [self.objvar] + args, kwargs) + elif isinstance(inner_fn, types.MethodType): + return variables.UserMethodVariable( + inner_fn.__func__, self.objvar, source=source + ).call_function(tx, args, kwargs) + elif ( + inner_fn is collections.OrderedDict.__getitem__ + and isinstance(self.objvar, variables.UserDefinedObjectVariable) + and self.objvar.source + and len(args) == 1 + and len(kwargs) == 0 + and args[0].is_python_constant() + ): + from .builder import VariableBuilder + + key = args[0].as_python_constant() + return VariableBuilder(tx, ODictGetItemSource(self.objvar.source, key))( + collections.OrderedDict.__getitem__(self.objvar.value, key) + ) + elif inner_fn in ( + collections.OrderedDict.__setitem__, + object.__setattr__, + ) and isinstance(self.objvar, variables.CustomizedDictVariable): + assert not kwargs and len(args) == 2 + return super(variables.CustomizedDictVariable, self.objvar).call_method( + tx, "__setitem__", args, kwargs + ) + else: + unimplemented(f"non-function or method super: {inner_fn}") + + +class UnknownVariable(VariableTracker): + """ + It could be anything! + """ + + +class DelayGraphBreakVariable(UnknownVariable): + """ + Used to insert a dummy variable in the stack to do the graph break at CALL_FUNCTION. + """ + + +class ComptimeVariable(VariableTracker): + """ + This variable is special, it lets you execute arbitrary code at + Dynamo compile time + """ + + def reconstruct(self, codegen): + raise NotImplementedError("comptime is special form") + + def var_getattr(self, tx, name: str) -> "VariableTracker": + from ..comptime import comptime + + # To support the comptime.print_graph convenience accessors + from .functions import UserFunctionVariable + + return UserFunctionVariable( + getattr(comptime, name), source=AttrSource(self.source, name) + ) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from ..comptime import ComptimeContext + + # TODO: support an expression form as well + + assert not kwargs + assert len(args) == 1 + fn = args[0] + if isinstance(fn, UserFunctionVariable): + fn.get_function()(ComptimeContext(tx)) + elif isinstance(fn, NestedUserFunctionVariable): + # We have to manually bind the freevars ourselves + code = fn.get_code() + assert not fn.closure, ( + "comptime function must not have free variables, " + f"but these variables were free: {code.co_freevars}" + ) + func = types.FunctionType( + code, + fn.f_globals, + fn.fn_name.as_python_constant(), + tuple(fn.defaults.items) if fn.defaults else None, + # We could automatically promote free variables into + # ComptimeVar but this is confusing if you access + # a free variable that we actually DO have the runtime + # value for + # tuple(make_cell(ComptimeVar(i)) for i in fn.closure.items) + tuple(), + ) + func(ComptimeContext(tx)) + else: + raise RuntimeError(f"unsupported argument to comptime: {type(fn)}") + + return variables.ConstantVariable.create(None) + + +class ClosureVariable(UnknownVariable): + def __init__(self, name, **kwargs): + super().__init__(**kwargs) + self.name = name + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_closure(self.name)) + + +# closure variable created by an inlined function +class InlinedClosureVariable(UnknownVariable): + def __init__(self, name, **kwargs): + super().__init__(**kwargs) + self.name = name + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_closure(self.name)) + + +class NewCellVariable(VariableTracker): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +class NewGlobalVariable(VariableTracker): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +class InspectSignatureVariable(VariableTracker): + """represents inspect.signature(...)""" + + @staticmethod + def create(callable, **kwargs): + if kwargs: + unimplemented(f"inspect.signature with {kwargs}") + return InspectSignatureVariable(callable) + + def __init__(self, inspected: VariableTracker, **kwargs): + super().__init__(**kwargs) + self.inspected = inspected + + def var_getattr(self, tx, name: str) -> "VariableTracker": + if name == "parameters": + return variables.ConstDictVariable( + { + variables.ConstantVariable.create(name): InspectParameterVariable() + for name in self.inspected.inspect_parameter_names() + }, + user_cls=dict, + ) + return super().var_getattr(tx, name) + + +class InspectParameterVariable(VariableTracker): + """This is not implemented, if used will graph break.""" + + pass + + +def produce_trampoline_autograd_apply(fn_cls): + def trampoline_autograd_apply(*args, **kwargs): + return fn_cls.apply(*args, **kwargs) + + trampoline_autograd_apply._origin = produce_trampoline_autograd_apply + return trampoline_autograd_apply + + +class AutogradFunctionVariable(VariableTracker): + """represents a torch.autograd.Function subclass""" + + def __init__(self, fn_cls, **kwargs): + super().__init__(**kwargs) + self.fn_cls = fn_cls + + def call_apply(self, tx, args, kwargs): + requires_grad = False + + def visit(node): + nonlocal requires_grad + if isinstance(node, variables.TensorVariable): + if node.requires_grad is not False: + requires_grad = True + if isinstance(node, variables.NNModuleVariable): + if node.is_training(tx): + requires_grad = True + return node + + VariableTracker.apply(visit, (args, kwargs)) + + if ( + requires_grad + and torch.is_grad_enabled() + and config.capture_autograd_function + ): + # Note - this is the same check used in autograd/function.py, except inverted. + # If we want to support functorch transforms here, we will need to enable this. + if ( + self.fn_cls.setup_context + != torch.autograd.function._SingleLevelFunction.setup_context + ): + unimplemented( + "NYI - autograd.Function with custom setup_context method" + ) + + vjp_fn = self.fn_cls.vjp # type: ignore[attr-defined] + if vjp_fn is not torch.autograd.Function.vjp: + unimplemented("NYI - User defind vjp") + + jvp_fn = self.fn_cls.jvp # type: ignore[attr-defined] + if jvp_fn is not torch.autograd.Function.jvp: + unimplemented("NYI - User defind jvp") + + from .higher_order_ops import AutogradFunctionApplyVariable + + source = self.source + if source is None: + source = AttrSource( + tx.import_source(self.fn_cls.__module__), self.fn_cls.__name__ + ) + + return AutogradFunctionApplyVariable( + self.fn_cls.forward, + self.fn_cls.backward, + source, + source=AttrSource(source, member="apply"), + ).call_function(tx, args, kwargs) + + if self.source: + source = AttrSource(self.source, "forward") + else: + source = None + + fn = self.fn_cls.forward + ctx = AutogradFunctionContextVariable.create(tx) + args = [ctx, *args] + if isinstance(fn, types.FunctionType): + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, args, kwargs + ) + elif isinstance(fn, types.MethodType): + return variables.UserMethodVariable( + fn.__func__, + variables.UserDefinedClassVariable(self.fn_cls), + source=source, + ).call_function(tx, args, kwargs) + else: + unimplemented( + f"non-function or method in subclass of torch.autograd.Function: {fn}" + ) + + def call_function(self, tx, args, kwargs): + return AutogradFunctionVariable(self.fn_cls) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + from ..trace_rules import is_callable_allowed + from .builder import wrap_fx_proxy + + if name == "apply": + if is_callable_allowed(self.fn_cls): + trampoline_autograd_apply = produce_trampoline_autograd_apply( + self.fn_cls + ) + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + trampoline_autograd_apply, + *proxy_args_kwargs(args, kwargs), + ), + ) + else: + return self.call_apply(tx, args, kwargs) + + else: + unimplemented(f"Unsupported method: {name}") + + +@dataclasses.dataclass +class SavedTensorBox: + tensors: List[VariableTracker] = dataclasses.field(default_factory=list) + + +class AutogradFunctionContextVariable(UserDefinedObjectVariable): + """ + Tracks an autograd.Function() context using mutation tracking in side_effects.py + """ + + _nonvar_fields = { + "proxy", + "inference", + *UserDefinedObjectVariable._nonvar_fields, + } + + def __init__( + self, + value, + value_type=None, + inference=False, + proxy=None, + saved_tensors=None, + **kwargs, + ): + super().__init__(value=value, value_type=value_type, **kwargs) + self.inference = inference + self.proxy = proxy + self.saved_tensors = saved_tensors + + @staticmethod + def create(tx): + proxy = tx.output.create_proxy( + "call_function", torch.autograd.function.FunctionCtx, tuple(), {} + ) + out = tx.output.side_effects.track_object_new( + None, + torch.autograd.function.FunctionCtx, + functools.partial( + AutogradFunctionContextVariable, + inference=True, + proxy=proxy, + saved_tensors=SavedTensorBox(), + ), + {}, + ) + proxy.node.meta["example_value"] = out.value + return out + + def as_proxy(self): + if self.proxy is None: + unimplemented("proxy not set") + return self.proxy + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name != "save_for_backward": + unimplemented(f"autograd.Function context method: {name}") + if self.saved_tensors is None: + unimplemented( + "save_for_backward only supported on a newly constructed FunctionCtx" + ) + + if not self.inference: + assert self.source and not kwargs + tx.output.side_effects.track_save_for_backward(self, args) + + # In eager mode, multiple calls to .save_for_backward() will overwrite previous calls. + if len(self.saved_tensors.tensors) > 0: + self.saved_tensors.tensors = [] + for arg in args: + self.saved_tensors.tensors.append(arg) + return variables.ConstantVariable.create(None) + + def var_getattr(self, tx, name): + if name == "save_for_backward": + return LambdaVariable( + lambda *args, **kwargs: self.call_method(tx, name, args, kwargs) + ) + if name == "saved_tensors" and self.saved_tensors is not None: + return variables.TupleVariable(list(self.saved_tensors.tensors)) + return super().var_getattr(tx, name) + + +class LambdaVariable(VariableTracker): + def __init__(self, fn, **kwargs): + super().__init__(**kwargs) + self.fn = fn + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + return self.fn(*args, **kwargs) + + +class GetAttrVariable(VariableTracker): + def __init__(self, obj, name, **kwargs): + super().__init__(**kwargs) + assert isinstance(obj, VariableTracker) + assert isinstance(name, str) + self.obj = obj + self.name = name + + def __str__(self): + return f"{self.__class__.__name__}({self.obj}, {self.name})" + + @staticmethod + def create_getattr_proxy(base_proxy: torch.fx.Proxy, attr): + return getattr(base_proxy, attr) + + def as_proxy(self): + return GetAttrVariable.create_getattr_proxy(self.obj.as_proxy(), self.name) + + def const_getattr(self, tx, name): + if not isinstance(self.obj, variables.NNModuleVariable): + raise NotImplementedError() + step1 = tx.output.get_submodule(self.obj.module_key) + if self.name not in step1.__dict__: + raise NotImplementedError() + step2 = inspect.getattr_static(step1, self.name) + if name not in step2.__dict__: + raise NotImplementedError() + return inspect.getattr_static(step2, name) + + def reconstruct(self, codegen): + codegen(self.obj) + codegen.extend_output(codegen.create_load_attrs(self.name)) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + return self.obj.call_method(tx, self.name, args, kwargs) + + +class MethodWrapperVariable(VariableTracker): + def __init__(self, method_wrapper, **kwargs): + super().__init__(**kwargs) + self.method_wrapper = method_wrapper + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if is_tensor_base_attr_getter(self.method_wrapper) and isinstance( + args[0], variables.TensorVariable + ): + assert len(args) == 1 and len(kwargs) == 0 + + return args[0].var_getattr(tx, self.method_wrapper.__self__.__name__) + + super().call_function(tx, args, kwargs) + + def is_python_constant(self): + return True + + def as_python_constant(self): + return self.method_wrapper + + +class GetSetDescriptorVariable(VariableTracker): + def __init__(self, desc, **kwargs): + super().__init__(**kwargs) + self.desc = desc + + def var_getattr(self, tx, name): + if name == "__get__" and self.source: + from .builder import VariableBuilder + + return VariableBuilder(tx, AttrSource(self.source, "__get__"))( + self.desc.__get__ + ) + else: + return super().var_getattr(tx, name) + + def is_python_constant(self): + return True + + def as_python_constant(self): + return self.desc + + +class PythonModuleVariable(VariableTracker): + def __init__(self, value: types.ModuleType, **kwargs): + super().__init__(**kwargs) + self.value = value + self.is_torch = self.value is torch or self.value.__name__.startswith("torch.") + + def python_type(self): + return types.ModuleType + + def as_python_constant(self): + return self.value + + def __repr__(self): + return f"PythonModuleVariable({self.value})" + + def call_hasattr(self, tx, name): + if self.is_torch: + result = hasattr(self.value, name) + return variables.ConstantVariable.create(result) + return super().call_hasattr(tx, name) + + +class TypingVariable(VariableTracker): + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__" and len(args) == 1: + return variables.ConstantVariable.create( + self.value[args[0].as_python_constant()], + ) + unimplemented("typing") + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + +@functools.lru_cache(maxsize=1) +def get_np_to_tnp_map(): + from ..utils import NP_TO_TNP_MODULE + + np_fn_to_tnp_fn = {} + + for np_mod, tnp_mod in NP_TO_TNP_MODULE.items(): + for fn_name, tnp_fn in tnp_mod.__dict__.items(): + if callable(tnp_fn): + # some internal details do leak from tnp + # which are not part of numpy API. + if np_fn := getattr(np_mod, fn_name, None): + np_fn_to_tnp_fn[np_fn] = tnp_fn + + return np_fn_to_tnp_fn + + +class NumpyVariable(VariableTracker): + """ + Wrapper around `numpy.*`. Currently, is able to trace a small subset of numpy functions as well as numpy dtypes. + """ + + constant_fold_functions = (tnp.issubdtype,) + + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + @classmethod + def can_constant_fold_through(cls, fn): + mod = fn.__module__.split(".") + assert len(mod) >= 2 and mod[:2] == ["torch", "_numpy"] + return fn in cls.constant_fold_functions + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + if not config.trace_numpy: + unimplemented(f"numpy.{self.value}()") + + from ..utils import numpy_to_tensor_wrapper + + from .tensor import NumpyNdarrayVariable + + # lookup method name in tnp. Things like np.dtype(float) are not supported yet. + if self.value.__name__ == "dtype": + unimplemented( + f"numpy dtype function is not supported yet. Got type {type(self.value)}." + ) + else: # We are dealing with a callable. + func = get_np_to_tnp_map().get(self.value) + if func is None: + unimplemented( + f"Can't find numpy function {self.value} in torch._numpy. " + " Please file an issue to request support for this function." + ) + + if ( + func.__module__ == "torch._numpy.random" + and config.use_numpy_random_stream + ): + msg = f"delegate '{func.__qualname__}' to NumPy itself via " + msg += f"confg.use_numpy_random_stream={config.use_numpy_random_stream}" + unimplemented(msg) + + args, kwargs = NumpyNdarrayVariable.patch_args(func.__name__, args, kwargs) + + constant_args = check_constant_args(args, kwargs) + unspec_python_args = check_unspec_python_args(args, kwargs) + + if self.can_constant_fold_through(func) and ( + constant_args or unspec_python_args + ): + # constant fold + return variables.ConstantVariable.create( + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + + # TODO Add all the functions that go from constants to constants to can_constant_fold_through + proxy = tx.output.create_proxy( + "call_function", + numpy_to_tensor_wrapper(func), + *proxy_args_kwargs(args, kwargs), + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + unimplemented("numpy") + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + def as_proxy(self): + if config.trace_numpy and isinstance(self.value, type): + # This handles numpy dtype attributes such as np.float32 + # We return a string as we don't want to serialize non-PyTorch objects in the output FX graph + # In torch/_numpy we normalize strings to their dtypes when the input is a dtype, as NumPy does + return self.value.__name__ + + return super().as_proxy() + + +# Used to keep track of NULLs pushed on the stack for Python 3.11 function calls +class NullVariable(VariableTracker): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __str__(self): + return "NullVariable" + + def reconstruct(self, codegen): + if sys.version_info < (3, 11): + unimplemented("cannot reconstruct NullVariable in < Python 3.11") + codegen.append_output(create_instruction("PUSH_NULL")) + + +class DeletedVariable(VariableTracker): + """Marker used to implement delattr()""" + + +class StringFormatVariable(VariableTracker): + """ + Represents a call to str.format(), we delay calling format until after the graph. + """ + + _nonvar_fields = {"format_string", *VariableTracker._nonvar_fields} + + @classmethod + def create(cls, format_string, sym_args, sym_kwargs): + if all( + x.is_python_constant() + for x in itertools.chain(sym_args, sym_kwargs.values()) + ): + return variables.ConstantVariable.create( + format_string.format( + *[v.as_python_constant() for v in sym_args], + **{k: v.as_python_constant() for k, v in sym_kwargs.items()}, + ) + ) + return cls(format_string, list(sym_args), dict(sym_kwargs)) + + def __init__(self, format_string, sym_args, sym_kwargs, **kwargs): + super().__init__(**kwargs) + assert isinstance(format_string, str) + self.format_string = format_string + self.sym_args = sym_args + self.sym_kwargs = sym_kwargs + + def __repr__(self): + return f"{self.__class__.__name__}({self.format_string!r}, {self.sym_args!r}, {self.sym_kwargs!r})" + + def reconstruct(self, codegen): + if sys.version_info >= (3, 11): + codegen.append_output(create_instruction("PUSH_NULL")) + codegen.append_output(codegen.create_load_const(self.format_string)) + codegen.append_output(codegen.create_load_attr("format")) + codegen(variables.TupleVariable(self.sym_args)) + kwargs = { + variables.ConstantVariable.create(k): v for k, v in self.sym_kwargs.items() + } + codegen(variables.ConstDictVariable(kwargs)) + codegen.append_output(create_instruction("CALL_FUNCTION_EX", arg=1)) + + +class DebuggingVariable(VariableTracker): + """ + Represents a call to a debugging function like print(), or something + registered to config.reorderable_logging_functions. + """ + + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + @staticmethod + def is_reorderable_logging_function(obj): + return ( + callable(obj) + and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)) + and obj in torch._dynamo.config.reorderable_logging_functions + ) + + def call_function(self, tx, args, kwargs): + if tx.export: + # For export cases, we can just make debugging functions no-ops + return + + if not self.can_reorder_logs(self.value, args, kwargs): + unimplemented( + f"Reordering debugging function {self.value} " + f"with inputs {args} {kwargs} is not yet implemented." + ) + + tx.debug_locals.append((self, list(args))) + + def reconstruct(self, codegen): + return self.source.reconstruct(codegen) + + @staticmethod + def can_reorder_logs(fn, args, kwargs) -> True: + """ + Run some additional checks for what sort of function calls can we + actually reorder. + """ + + allowed_input_types = ( + variables.TensorVariable, + variables.ConstantVariable, + StringFormatVariable, + ) + + flat_args = pytree.tree_leaves([args, kwargs]) + for arg in flat_args: + if not isinstance(arg, allowed_input_types): + return False + + return True diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py new file mode 100644 index 0000000000000000000000000000000000000000..0209a7d2e6dd909110166d0bdfe241b17dbc0e05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py @@ -0,0 +1,813 @@ +# mypy: ignore-errors + +import functools +import inspect +import itertools +import types +from contextlib import contextmanager, nullcontext +from typing import Any, Dict, List + +import torch.nn + +from .. import trace_rules, variables +from ..exc import unimplemented, UnspecializeRestartAnalysis, Unsupported +from ..guards import GuardBuilder, install_guard +from ..mutation_guard import GenerationTracker +from ..source import ( + AttrSource, + FSDPNNModuleSource, + GetItemSource, + NNModuleSource, + NotNNModuleSource, +) +from ..utils import ( + get_custom_getattr, + get_fake_value, + is_lazy_module, + is_namedtuple, + is_safe_constant, + istensor, + istype, + nnmodule_has_hooks, + object_has_getattribute, + proxy_args_kwargs, +) +from .base import MutableLocal, typestr, VariableTracker +from .functions import invoke_and_store_as_constant +from .lists import SliceVariable +from .user_defined import UserDefinedObjectVariable + + +def initialize_lazy_module(tx, mod, args, kwargs): + """ + Fairly coupled helper used by NNModuleVariable and UnspecializedNNModuleVariable. + + Used to cause lazy module to be initialized (and delete its init hook) before tracing. Especially + useful now that 'allowed' modules graph-break on hooks, calling this first ensures there is no hook + by the time we trace __call__ and thus no graph-break for lazy allowed modules. + """ + if hasattr(mod, "_initialize_hook"): + + def convert_to_fake(x): + if is_namedtuple(x): + return type(x)(*(convert_to_fake(elem) for elem in x)) + elif isinstance(x, dict): + return {k: convert_to_fake(v) for k, v in x.items()} + elif isinstance(x, (list, tuple, set)): + return type(x)(convert_to_fake(elem) for elem in x) + elif isinstance(x, torch.fx.Proxy): + return get_fake_value(x.node, tx) + else: + return x + + proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs) + fake_args = [convert_to_fake(arg) for arg in proxy_args] + fake_kwargs = {k: convert_to_fake(v) for k, v in proxy_kwargs.items()} + mod._infer_parameters(mod, fake_args, fake_kwargs) + + +@contextmanager +def record_nn_module_stack(module_key: str, source, tx, mod: torch.nn.Module): + fully_qualified_name = source.name() + try: + tx.nn_module_stack[module_key] = (fully_qualified_name, type(mod)) + yield + finally: + del tx.nn_module_stack[module_key] + + +class NNModuleVariable(VariableTracker): + _nonvar_fields = {"module_type", "module_key", *VariableTracker._nonvar_fields} + + def __init__( + self, module_type: type, module_key: str, module: torch.nn.Module, **kwargs + ): + super().__init__(**kwargs) + self.module_type = module_type + self.module_key = module_key + self.module = module + assert self.source + + def python_type(self): + return self.module_type + + def _wrap_submodule(self, tx, source, submod, *key_extra, **options): + return + + def unpack_var_sequence(self, tx): + # implement list/iter/tuple/etc calls + base = tx.output.get_submodule(self.module_key) + if isinstance(base, torch.nn.ModuleDict): + result = [] + for name, submod in base.items(): + name_var = variables.ConstantVariable.create(name) + tx.output.register_attr_or_module( + submod, + self.module_key, + name, + source=NNModuleSource(GetItemSource(self.source, name)), + ) + result.append(name_var) + return result + + assert isinstance( + base, (torch.nn.ModuleList, torch.nn.ParameterList, torch.nn.Sequential) + ), typestr(base) + assert self.source + result = [] + for idx, submod in enumerate(base): + result.append( + tx.output.register_attr_or_module( + submod, + self.module_key, + idx, + source=NNModuleSource(GetItemSource(self.source, idx)), + ) + ) + return result + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + mod = tx.output.get_submodule(self.module_key) + result = hasattr(mod, name) + install_guard( + NNModuleSource(AttrSource(self.source, name)).make_guard( + GuardBuilder.HASATTR + ) + ) + return variables.ConstantVariable.create(result) + + def is_training(self, tx): + mod = tx.output.get_submodule(self.module_key) + return getattr(mod, "training", False) + + def convert_to_unspecialized(self, tx): + """Restart analysis treating this module as an UnspecializedNNModuleVariable""" + mod = tx.output.get_submodule(self.module_key) + GenerationTracker.tag(mod) + + # Mark the class dynamic unless its module initialization + if tx.f_code.co_name != "__init__": + GenerationTracker.mark_class_dynamic(type(mod)) + raise UnspecializeRestartAnalysis() + + def _custom_getattr_fallback(self, base, tx, name, options): + """Check for a __getattr__ and handle it specially if it is implemented""" + if object_has_getattribute(base): + unimplemented("torch.nn.Module with a custom __getattribute__ defined") + + getattr_fn = get_custom_getattr(base) + if getattr_fn is None: + return None + + if not isinstance(getattr_fn, types.FunctionType): + unimplemented("torch.nn.Module with a non-function custom __getattr__") + + return variables.UserMethodVariable(getattr_fn, self, **options).call_function( + tx, [variables.ConstantVariable.create(name)], {} + ) + + def var_getattr(self, tx, name): + from .builder import VariableBuilder + + if self.source: + source = AttrSource(self.source, name) + else: + source = None + + base = tx.output.get_submodule(self.module_key) + base_dict = object.__getattribute__(base, "__dict__") + object_member = True + all_class_attribute_names = set() + for x in inspect.getmro(base.__class__): + all_class_attribute_names.update(x.__dict__.keys()) + + if not self.source: + unimplemented("GETATTR with no source") + + if name in base_dict: + subobj = base_dict[name] + elif ( + "_modules" in base_dict + and name in base_dict["_modules"] + and name not in all_class_attribute_names + ): + subobj = base_dict["_modules"][name] + elif "_parameters" in base_dict and name in base_dict["_parameters"]: + subobj = base_dict["_parameters"][name] + elif "_buffers" in base_dict and name in base_dict["_buffers"]: + subobj = base_dict["_buffers"][name] + else: + try: + subobj = inspect.getattr_static(base, name) + object_member = False + except AttributeError: + # see if we can fallback to __getattr__, which is not checked by getattr_static + result = self._custom_getattr_fallback( + base=base, tx=tx, name=name, options={"source": source} + ) + if result is not None: + return result + # if we can't find a __getattr__, just raise the AttributeError + raise + + if name == "__class__" and not object_member: + return variables.UserDefinedClassVariable(base.__class__, source=source) + + if object_member: + return VariableBuilder(tx, NNModuleSource(source))(subobj) + else: + if istype(subobj, property): + return variables.UserFunctionVariable( + subobj.fget, + source=source, + ).call_function(tx, [(self)], {}) + elif istype(subobj, classmethod): + return variables.UserMethodVariable( + subobj.__func__, + variables.UserDefinedObjectVariable(type(base)), + source=source, + ) + elif istype(subobj, staticmethod): + return variables.UserFunctionVariable( + subobj.__get__(base), source=source + ) + elif istype(subobj, types.FunctionType): + return variables.UserMethodVariable(subobj, self, source=source) + elif is_safe_constant(subobj) or istensor(subobj): + # Support possibly common cases of class members + return VariableBuilder(tx, NNModuleSource(source))(subobj) + else: + unimplemented(f"class property {typestr(base)} {typestr(subobj)}") + + return variables.GetAttrVariable(self, name, source=source) + + def call_function( + self, + tx, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + mod = tx.output.get_submodule(self.module_key) + + with record_nn_module_stack(self.module_key, self.source, tx, mod): + is_lazy = is_lazy_module(mod) + if ( + isinstance(mod, torch.nn.Sequential) + and mod.__class__.forward is torch.nn.Sequential.forward + ): + if nnmodule_has_hooks(mod): + # We do not want to unroll sequential if it has hooks, since evaporating it + # will cause hooks to not fire! + # This terminates and restart the tracing process + self.convert_to_unspecialized(tx) + + # Unroll sequential + assert ( + not is_lazy + ), "Expected lazy sequential isn't a valid combination?" + assert not kwargs + (arg,) = args + # TODO: Use named_children when it supports remove_duplicate=False. + for child_name, submod in mod._modules.items(): + tx.call_function( + tx.output.register_attr_or_module( + submod, + self.module_key, + child_name, + source=NNModuleSource(AttrSource(self.source, child_name)), + ), + [arg], + {}, + ) + arg = tx.pop() + return arg + + if is_lazy: + # The module type will change after it is called + if mod.cls_to_become is not None: + self.module_type = mod.cls_to_become + + # The pre-hook runs to initialize the module shapes, then deletes itself. After this, + # the module is more or less not lazy and can be treated as a normal module regardless of + # is_allowed or other variations. + initialize_lazy_module(tx, mod, args, kwargs) + + # If we are tracing the higher order op, we want Dynamo to step + # inside the module call so that Dynamo can see the underlying + # parameters and buffers and raise them as inputs to the graph. + if tx.output.is_root_tracer() and mod.__module__.startswith( + ("torch.nn.", "torch.ao.") + ): + if nnmodule_has_hooks( + mod, check_forward_hooks=True, check_backward_hooks=True + ): + # End of fn, this bubbles up and restarts tracing. + self.convert_to_unspecialized(tx) + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_module", + self.module_key, + *proxy_args_kwargs(args, kwargs), + ), + ) + else: + assert self.source, ( + "Must provide a valid source in order to inline, " + "since inlined function may have default args which must be guarded." + ) + if isinstance(mod, torch.fx.GraphModule): + # TODO: do we want to support __call__ for GM's? + # If so at least some changes are needed, we don't allow inlining + # the call_wrapped currently, and maybe other issues too + fn = mod.forward + else: + fn = mod._call_impl + fn_source = AttrSource(self.source, "__call__") + if istype(fn, types.MethodType): + fn = fn.__func__ + fn_source = AttrSource(fn_source, "__func__") + args = [self] + args + else: + assert istype(fn, types.FunctionType) + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=fn_source), + args, + kwargs, + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + constant=False, + ) -> "VariableTracker": + from . import ConstantVariable, ListIteratorVariable, TupleVariable + + key = self.module_key + module = tx.output.get_submodule(key) + + def generic_call_method_helper(name): + # Helper function to put a `call_method` node in FX graph, + # with nn.Module as the first arg. + mod_proxy = tx.output.create_proxy( + "get_attr", + self.module_key, + tuple(), + {}, + ) + mod_proxy.node.meta["example_value"] = module + + proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs) + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_method", + name, + args=(mod_proxy, *proxy_args), + kwargs=proxy_kwargs, + ), + ) + + if name in ["_call_impl", "_wrapped_call_impl"]: + # Example: `self.layer.__call__(x)` + # This is used for explicit calling `__call__` in a forward function. + # Dynamo inlines `__call__`, includes hooks. + return self.call_function(tx, args, kwargs) + elif name == "forward": + # Example: `self.layer.forward(x)` + # This is used for explicit calling `forward` in a forward function. + # Dynamo puts `call_method` node in FX, doesn't trigger hooks. + with record_nn_module_stack(self.module_key, self.source, tx, module): + return generic_call_method_helper(name) + + if name == "_check_input_dim" and trace_rules.is_torch_inline_allowed( + inspect.getfile(module.__class__._check_input_dim) + ): + return ConstantVariable.create(True) + + if name == "_get_item_by_idx": + assert args[1].is_python_constant() + assert isinstance(args[0], TupleVariable) + mod_var = args[0].items[args[1].value] + if isinstance(mod_var, UnspecializedNNModuleVariable): + return mod_var + key = mod_var.module_key + submod = tx.output.get_submodule(key) + return tx.output.register_attr_or_module( + submod, + key, + key, + source=NNModuleSource(GetItemSource(self.source, key)), + ) + + if constant: + fn = getattr(module, name) + name = f"{module.__class__.__name__}_{name}_result" + return invoke_and_store_as_constant(tx, fn, name, args, kwargs) + + def assert_all_args_kwargs_const(): + if not all( + x.is_python_constant() for x in itertools.chain(args, kwargs.values()) + ): + raise unimplemented(f"non-const NNModule method {name}") + + def get_kwargs(*names): + assert_all_args_kwargs_const() + fn = getattr(module, name) + bound_args = inspect.signature(fn).bind( + *([x.as_python_constant() for x in args]), + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + bound_args.apply_defaults() + bound_args = bound_args.arguments + return {k: bound_args[k] for k in names} + + def wrap_values(items): + result = [] + for name, submod in items: + result.append( + tx.output.register_attr_or_module( + submod, + key, + name, + source=NNModuleSource(gen_source(self.source, name)), + ) + ) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + + def named_embed(name, obj): + return TupleVariable( + [ + ConstantVariable.create(name), + tx.output.register_attr_or_module( + obj, + key, + name, + source=NNModuleSource(gen_source(self.source, name)), + ), + ] + ) + + def gen_source(source, name): + name_split = name.split(".") + if name_split[0] == "": + return source + while len(name_split) > 0: + x = name_split.pop(0) + source = AttrSource(source, x) + return source + + if name == "named_children": + assert not (args or kwargs) + result = [] + for name, submod in module.named_children(): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_parameters": + result = [] + for name, param in module.named_parameters( + **get_kwargs("prefix", "recurse") + ): + result.append(named_embed(name, param)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_buffers": + result = [] + for name, buffer in module.named_buffers( + **get_kwargs("prefix", "recurse", "remove_duplicate") + ): + result.append(named_embed(name, buffer)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_modules": + result = [] + for name, submod in module.named_modules( + **get_kwargs("memo", "prefix", "remove_duplicate") + ): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "children": + assert not (args or kwargs) + return wrap_values(module.named_children()) + elif name == "modules": + return wrap_values(module.named_modules()) + elif name == "parameters": + return wrap_values(module.named_parameters(**get_kwargs("recurse"))) + elif name == "buffers": + return wrap_values(module.named_buffers(**get_kwargs("recurse"))) + elif name == "keys": + assert not (args or kwargs) + result = [] + for name in module.keys(): + result.append(ConstantVariable.create(name)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "values": + assert not (args or kwargs) + return wrap_values(module.items()) + elif name == "items": + assert not (args or kwargs) + result = [] + for name, submod in module.items(): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "__len__": + assert not (args or kwargs) + return ConstantVariable.create(len(module)) + elif ( + name == "__contains__" + and isinstance(module, (torch.nn.ModuleDict, torch.nn.ParameterDict)) + and args + and args[0].is_python_constant() + ): + return ConstantVariable.create( + args[0].as_python_constant() in module._modules + ) + elif name == "__getitem__": + assert not kwargs and len(args) == 1 + builtin_supported = ( + torch.nn.ModuleDict.__getitem__, + torch.nn.ModuleList.__getitem__, + torch.nn.ParameterDict.__getitem__, + torch.nn.ParameterList.__getitem__, + torch.nn.Sequential.__getitem__, + ) + + if type(module).__getitem__ not in builtin_supported: + assert isinstance(args[0], variables.ConstantVariable), typestr(args[0]) + key = args[0].as_python_constant() + assert isinstance(key, (str, int)) + fn = getattr(module, name).__func__ + + assert isinstance(fn, types.FunctionType) + + src = AttrSource(AttrSource(self.source, name), "__func__") + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=src), + [self] + list(args), + kwargs, + ) + + assert self.source + + if isinstance(args[0], SliceVariable): + # Build a TupleVariable of NNModules + result = [] + submods = [] + + # Turn the slice into the list of integers + keys = list(range(len(module)))[args[0].as_python_constant()] + for idx, submod in enumerate(module[args[0].as_python_constant()]): + key = keys[idx] + src = NNModuleSource(GetItemSource(self.source, key)) + result.append( + tx.output.register_attr_or_module( + submod, + key, + source=src, + ) + ) + submods.append(submod) + + new_module = torch.nn.Sequential(*submods) + new_module_variable = tx.output.register_attr_or_module( + new_module, + f"{self}.__getitem__(slice)", + source=NNModuleSource( + GetItemSource(self.source, args[0].as_python_constant()) + ), + ) + return new_module_variable + + from .tensor import SymNodeVariable + + if isinstance(args[0], SymNodeVariable): + key = args[0].evaluate_expr(tx.output) + else: + key = args[0].as_python_constant() + + submod = module[key] + return tx.output.register_attr_or_module( + submod, + self.module_key, + key, + source=NNModuleSource(GetItemSource(self.source, key)), + ) + elif ( + name == "_get_abs_string_index" + or ( + isinstance(module, torch.nn.modules.conv._ConvNd) + and name == "_conv_forward" + ) + or ( + isinstance(module, torch.nn.modules.conv._ConvTransposeNd) + and name == "_output_padding" + ) + ): + # Inline the function + fn = getattr(module, name).__func__ + fn_source = AttrSource(AttrSource(self.source, name), "__func__") + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=fn_source), + [self] + args, + kwargs, + ) + # A loose heuristic, but seems to be generally good before we drop into the + # manual handling of inputs + elif ( + name in module.__class__.__dict__ + and callable(module.__class__.__dict__[name]) + and all( + isinstance(x, variables.TensorVariable) + for x in itertools.chain(args, kwargs.values()) + ) + ): + return generic_call_method_helper(name) + else: + return super().call_method(tx, name, args, kwargs) + + +class UnspecializedNNModuleVariable(UserDefinedObjectVariable): + _nonvar_fields = {"value_type", *UserDefinedObjectVariable._nonvar_fields} + + """ + The above class will specialize on the id() of a module and place + parameters on the torch.fx.GraphModule. Giving one graph per + module instance. This version treats nn.Modules() like other user + defined objects and will pass parameters into the FX graph as inputs. + Giving one graph per module class. + """ + + def __init__(self, value, **kwargs): + if type(value) is torch.jit._script.RecursiveScriptModule: + raise Unsupported( + "ScriptModules aren't supported in UnspecializedNNModuleVariable" + " becuase their .forward function isn't a static member of their type" + ) + if "value_type" in kwargs: + lazy_value_to_become = getattr(kwargs["value_type"], "cls_to_become", None) + if type(value) is lazy_value_to_become: + # We may have cloned a variabletracker for a LazyModule earlier (e.g. tracking side-effects) + # and then later we called and mutated the LazyModule into a MaterializedModule. + # We do not do the mutation upon first seeing a LazyModule since we preserve eager semantics to only + # mutate upon first call, but this requires we update multiple copies of the VariableTracker post-mutation. + kwargs["value_type"] = type(value) + + super().__init__(value=value, **kwargs) + + @staticmethod + @functools.lru_cache(None) + def _nn_module_method_ids(): + return { + id(x.__code__) + for x in torch.nn.Module.__dict__.values() + if hasattr(x, "__code__") + } + + def unpack_var_sequence(self, tx): + from .builder import VariableBuilder + + try: + fn = inspect.getattr_static(self.value_type, "__iter__") + except AttributeError as e: + raise NotImplementedError from e + + if fn in ( + torch.nn.ModuleList.__iter__, + torch.nn.ParameterList.__iter__, + torch.nn.Sequential.__iter__, + ): + assert self.source + return [ + VariableBuilder(tx, source=GetItemSource(self.source, idx))(item) + for idx, item in enumerate(self.value) + ] + + return super().unpack_var_sequence(tx) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + mod = self.value + # see comment on lazy module handling in NNModuleVariable.call_function for context + if is_lazy_module(mod): + if mod.cls_to_become is not None: + self.value_type = mod.cls_to_become + initialize_lazy_module(tx, mod, args, kwargs) + name = "_call_impl" + fn = getattr(self.value_type, name) + if self.source: + source = AttrSource(AttrSource(self.source, "__class__"), name) + else: + source = None + + ctx = ( + record_nn_module_stack(str(id(mod)), self.source, tx, mod) + if self.source + else nullcontext() + ) + with ctx: + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, [self] + list(args), kwargs + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from .builder import VariableBuilder + + if name in ["_call_impl", "_wrapped_call_impl"]: + fn = getattr(self.value_type, name) + if self.source: + source = AttrSource(AttrSource(self.source, "__class__"), name) + else: + source = None + + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, [self] + list(args), kwargs + ) + + if name not in getattr(self.value, "__dict__", {}): + try: + method = inspect.getattr_static(type(self.value), name) + except AttributeError: + method = None + + if method is torch.nn.Module.parameters: + assert not args or kwargs + if tx.output.side_effects.has_pending_mutation(self): + unimplemented("Module.parameters() with pending mutation") + install_guard( + self.source.make_guard(GuardBuilder.NN_MODULE_PARAM_NAMES) + ) + items = [] + for name, value in self.value.named_parameters(): + items.append( + VariableBuilder(tx, AttrSource(self.source, name))(value) + ) + return variables.ListIteratorVariable( + items, mutable_local=MutableLocal() + ) + elif isinstance(method, staticmethod): + source = AttrSource( + AttrSource(AttrSource(self.source, "__class__"), name), "__func__" + ) + return tx.inline_user_function_return( + variables.UserFunctionVariable(method.__func__, source=source), + args, + kwargs, + ) + + if id(method.__code__) in self._nn_module_method_ids(): + unimplemented(f"UnspecializedNNModuleVariable missing {name}") + + return super().call_method(tx, name, args, kwargs) + + +class FSDPManagedNNModuleVariable(UnspecializedNNModuleVariable): + """ + Tracing behavior: trace into submodules and treat them as Unspecialized, do not + register parameters to the top-level, treat them as function inputs. + + Guards behavior: if 'skip_fsdp_guards', many guards that would be installed + by a vanilla UnspecializedNNModuleVariable are simply dropped, on the basis + that a user wrapping their model in FSDP(model) is already opting into a + requirement to not modify internal model state, which would already break FSDP without + compilation. + """ + + def __init__(self, value, **kwargs): + source = kwargs.get("source", None) + assert ( + source is not None + ), "FSDPManagedNNModule depends on having an accurate source to control guarding." + + super().__init__(value=value, **kwargs) + self.source = source + + @staticmethod + def _wrap_source(source): + if not isinstance(source, (FSDPNNModuleSource, NotNNModuleSource)): + if torch._dynamo.config.skip_fsdp_guards: + return FSDPNNModuleSource(source) + else: + # this makes us behave like a usual UnspecializedNNModuleVariable for guarding purposes + return NotNNModuleSource(source) + else: + return source + + def __setattr__(self, name: str, value: Any) -> None: + if name == "source": + value = FSDPManagedNNModuleVariable._wrap_source(value) + + return super().__setattr__(name, value) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..4b3246cb1cd7a564627d5a0b3c2a022fa5f849d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py @@ -0,0 +1,230 @@ +# mypy: ignore-errors + +import weakref +from typing import Dict, List + +import torch + +from ..decorators import mark_static_address + +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, ConstDictKeySource, GetItemSource, GlobalWeakRefSource +from ..utils import GLOBAL_KEY_PREFIX + +from .base import VariableTracker +from .constant import ConstantVariable +from .dicts import ConstDictVariable +from .lists import ListVariable +from .misc import GetAttrVariable +from .user_defined import UserDefinedObjectVariable + + +class ArgMappingException(Exception): + pass + + +class GuardInstallException(Exception): + pass + + +class OptimizerVariable(UserDefinedObjectVariable): + def __init__( + self, + value, + grad_to_source=None, + static_tensor_names=None, + tensor_to_source=None, + **kwargs, + ): + super().__init__(value, **kwargs) + + for group in self.value.param_groups: + if "capturable" in group: + group["capturable"] = True + + for p in group["params"]: + mark_static_address(p, guard=False) + + self.grad_to_source = grad_to_source or {} + self.tensor_to_source = tensor_to_source or {} + self.static_tensor_names = static_tensor_names or set() + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + """This is an optimization to avoid tracing the very slow initialization of the optimizer""" + if name == "_init_group": + try: + py_args, py_kwargs = self.get_python_args(*args, **kwargs) + ret_val = self.value._init_group(*py_args, **py_kwargs) + self.map_sources_and_install_guards(tx) + self.update_list_args(tx, args, kwargs, py_args, py_kwargs) + # stash a weak_ptr to optimizer to invalidate code + # if the optimizer object dies + mangled_name = f"__optimizer_{id(self.value)}" + tx.store_global_weakref_by_id(mangled_name, self.value) + self.create_finalizer(tx) + + # This is currently safe only because the only actual `ret_val`s returned + # by the `_init_group` of existing optimizers are properties that are invariant + # to the input tensors (e.g. dtype, layout). Changing these would trigger a + # recompilation and hence never result in the wrong specialization of `ret_val`. + return ConstantVariable.create(ret_val) + except (ArgMappingException, GuardInstallException) as _: + # trace normally if we can't map args or install guards correctly + pass + + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx, name): + if name == "_init_group": + return GetAttrVariable(self, name) + + return super().var_getattr(tx, name) + + def get_python_args(self, *args, **kwargs): + """Get python values equivalent to the variable tracker args""" + + def map_arg(arg): + if isinstance(arg, ConstantVariable): + return arg.as_python_constant() + elif isinstance(arg, ListVariable) and not arg.items: + return [] + elif ( + isinstance(arg, ConstDictVariable) + and isinstance(arg.source, GetItemSource) + and isinstance(arg.source.base, AttrSource) + and arg.source.base.member == "param_groups" + ): + return self.value.param_groups[arg.source.index] + + raise ArgMappingException() + + new_args = [map_arg(arg) for arg in args] + new_kwargs = {k: map_arg(v) for k, v in kwargs.items()} + + return new_args, new_kwargs + + def map_sources_and_install_guards(self, tx): + self.grad_to_source = {} + self.tensor_to_source = {} + + from .builder import VariableBuilder + + param_groups_vt = VariableBuilder(tx, AttrSource(self.source, "param_groups"))( + self.value.param_groups + ).recursive_realize() + + for g_ind, (group, group_vt) in enumerate( + zip(self.value.param_groups, param_groups_vt.items) + ): + group_source = group_vt.source + params_vt = group_vt.getitem_const(ConstantVariable.create("params")) + for p_ind, (p, p_vt) in enumerate( + zip(group["params"], params_vt.unpack_var_sequence(tx)) + ): + param_source = p_vt.source + self.tensor_to_source[p] = param_source + grad_source = AttrSource( + param_source, + "grad", + ) + if p.grad is not None: + self.grad_to_source[p.grad] = grad_source + else: + install_guard(grad_source.make_guard(GuardBuilder.CONSTANT_MATCH)) + + # state guards take a long time to generate + # so we manually generate them here + state_source = AttrSource(self.source, "state") + install_guard(state_source.make_guard(GuardBuilder.DICT_KEYS)) + for idx, (p, value) in enumerate(self.value.state.items()): + tx.store_global_weakref_by_id(GLOBAL_KEY_PREFIX, p) + p_state_source = GetItemSource( + state_source, ConstDictKeySource(state_source, idx) + ) + install_guard(p_state_source.make_guard(GuardBuilder.DICT_KEYS)) + for k, v in value.items(): + if ( + isinstance(v, torch.Tensor) + and v not in self.grad_to_source + and v not in self.tensor_to_source + ): + self.tensor_to_source[v] = GetItemSource(p_state_source, k) + elif v is None or isinstance(v, (bool, int, float, str)): + install_guard( + GetItemSource(p_state_source, k).make_guard( + GuardBuilder.CONSTANT_MATCH + ) + ) + else: + raise GuardInstallException() + + def wrap_tensor(self, tx, tensor_value): + """Wrap state tensor in a TensorVariable""" + from .builder import VariableBuilder + + # If we have a source for a tensor already use it, + # if we have not seen a tensor before, stash and use a + # global weak ref source, since it must be an optimizer tensor + # that we have missed + + if tensor_value in self.tensor_to_source: + # mark these tensors as static for cudagraphs + mark_static_address(tensor_value, guard=False) + builder = VariableBuilder(tx, self.tensor_to_source[tensor_value]) + self.static_tensor_names.add(tx.output.module_key_name(builder.name)) + elif tensor_value in self.grad_to_source: + builder = VariableBuilder(tx, self.grad_to_source[tensor_value]) + else: + # mark these tensors as static for cudagraphs + mark_static_address(tensor_value, guard=False) + + global_name = tx.store_global_weakref_by_id(GLOBAL_KEY_PREFIX, tensor_value) + builder = VariableBuilder(tx, GlobalWeakRefSource(global_name)) + self.static_tensor_names.add(tx.output.module_key_name(builder.name)) + + result = builder(tensor_value) + return result + + def update_list_args(self, tx, args, kwargs, py_args, py_kwargs): + """Update the args and kwargs to the traced optimizer call""" + for arg, py_arg in zip(args, py_args): + if isinstance(arg, ListVariable): + assert isinstance( + py_arg, list + ), "py_arg should be a list in optimizer variable" + for i, val in enumerate(py_arg): + tx.output.side_effects.mutation(arg) + if isinstance(val, torch.Tensor): + arg.items.append(self.wrap_tensor(tx, val)) + else: + from .builder import SourcelessBuilder, VariableBuilder + + if arg.source: + arg.items.append( + VariableBuilder(tx, GetItemSource(arg.source, i))(val) + ) + else: + arg.items.append(SourcelessBuilder()(tx, val)) + + def create_finalizer(self, tx): + names_to_delete = self.static_tensor_names + value = self.value + tc = tx.output.tracing_context + + def init_finalizer(gm): + def clear_static_tensor_refs(): + for name in names_to_delete: + gm._buffers.pop(name, None) + gm._parameters.pop(name, None) + if tc.params_flat: + tc.params_flat.clear() + + weakref.finalize(value, clear_static_tensor_refs) + + tx.output.add_graph_finalizer(init_finalizer) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6af76690df6cbbaa7538bb4adfa6acc5b3ce24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py @@ -0,0 +1,84 @@ +# mypy: ignore-errors + +from inspect import getattr_static + +from ..bytecode_transformation import create_call_function +from ..exc import Unsupported +from .base import VariableTracker + + +class SDPAParamsVariable(VariableTracker): + """Represents the c++ params struct for scaled dot product attention. + This is a read-only container.""" + + @staticmethod + def create(tx, value, source): + from torch.backends.cuda import SDPAParams + from ..source import AttrSource + from .builder import VariableBuilder + from .torch import TorchInGraphFunctionVariable + + query_var = VariableBuilder(tx, AttrSource(source, "query"))(value.query) + key_var = VariableBuilder(tx, AttrSource(source, "key"))(value.key) + value_var = VariableBuilder(tx, AttrSource(source, "value"))(value.value) + attn_mask_var = VariableBuilder(tx, AttrSource(source, "attn_mask"))( + value.attn_mask + ) + dropout_var = VariableBuilder(tx, AttrSource(source, "dropout"))(value.dropout) + is_causal_var = VariableBuilder(tx, AttrSource(source, "is_causal"))( + value.is_causal + ) + param_vars = [ + query_var, + key_var, + value_var, + attn_mask_var, + dropout_var, + is_causal_var, + ] + return TorchInGraphFunctionVariable(SDPAParams).call_function( + tx, param_vars, {} + ) + + def __init__(self, proxy, param_vars, **kwargs): + self.proxy = proxy + self.param_vars = param_vars + super().__init__(**kwargs) + + def reconstruct(self, codegen): + assert self.source is None + assert self.param_vars is not None + codegen.load_import_from("torch._C", "_SDPAParams") + codegen.foreach(self.param_vars) + codegen.extend_output(create_call_function(len(self.param_vars), True)) + + def as_proxy(self): + return self.proxy + + def var_getattr(self, tx, name: str) -> VariableTracker: + import torch._C + from ..source import AttrSource + from .builder import wrap_fx_proxy + from .misc import GetAttrVariable + + try: + getattr_static(torch._C._SDPAParams, name) + except AttributeError: + # Using raise from is too verbose here + raise Unsupported( # noqa: TRY200 + f"Unsupported torch._C._SDPAParams attribute {name}" + ) + + proxy = GetAttrVariable.create_getattr_proxy(self.as_proxy(), name) + if self.source is not None: + return wrap_fx_proxy( + tx=tx, proxy=proxy, source=AttrSource(self.source, name) + ) + else: + return wrap_fx_proxy(tx=tx, proxy=proxy) + + @staticmethod + def is_sdpa_params(value): + from torch.backends.cuda import SDPAParams + + return value is SDPAParams diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..940e92adc1a50cfd791a6e689594d627a8f28f3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py @@ -0,0 +1,1189 @@ +# mypy: ignore-errors + +import functools + +import inspect +import operator +import types +from typing import Dict, List + +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +from ..bytecode_transformation import create_call_method +from ..external_utils import call_hook_from_backward_state + +try: + import numpy as np +except ModuleNotFoundError: + np = None + + +import sympy + +import torch._numpy as tnp + +import torch.fx +import torch.random +from torch._dynamo import compiled_autograd +from torch._subclasses.meta_utils import is_sparse_any + +from torch.fx.experimental.symbolic_shapes import ( + guard_scalar, + GuardOnDataDependentSymNode, + has_free_symbols, + is_symbolic, + SymTypes, +) + +from .. import config, variables +from .._trace_wrapped_higher_order_op import trace_wrapped + +from ..exc import unimplemented, UserError, UserErrorType +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource +from ..utils import ( + fqn, + get_custom_getattr, + get_fake_value, + get_real_value, + guard_if_dyn, + object_has_getattribute, + product, + proxy_args_kwargs, + tensortype_to_dtype, +) +from .base import VariableTracker +from .constant import ConstantVariable +from .lists import SizeVariable + +supported_tensor_comparison_ops = { + ">": operator.gt, + "<": operator.lt, + ">=": operator.ge, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, +} +supported_const_comparison_ops = { + "is": operator.is_, + "is not": operator.is_not, + "==": operator.eq, + "!=": operator.ne, +} + + +class TensorVariable(VariableTracker): + """A torch.Tensor input or an intermediate value in the FX graph""" + + _nonvar_fields = { + "proxy", + "dtype", + "device", + "layout", + "ndim", + "size", + "stride", + "requires_grad", + "is_quantized", + "is_contiguous", + "is_sparse", + "class_type", + "specialized_value", + *VariableTracker._nonvar_fields, + } + + def get_real_value(self): + """ + Get the actual value represented by this variable if computation is run + using the user-provided inputs. + NOTE: this runs actual tensor computation and may be + slow and memory-intensive. + """ + return get_real_value(self.proxy.node, self.proxy.tracer) + + def __init__( + self, + proxy: torch.fx.Proxy, + *, + dtype, + device, + layout, + ndim, + requires_grad, + is_quantized, + is_sparse, + class_type, + size=None, + stride=None, + is_contiguous=None, + **kwargs, + ): + super().__init__(**kwargs) + self.proxy = proxy + self.dtype = dtype + self.device = device + self.layout = layout + self.ndim = ndim + self.size = size + self.stride = stride + self.requires_grad = requires_grad + self.is_quantized = is_quantized + self.is_contiguous = is_contiguous + self.is_sparse = is_sparse + self.class_type = class_type + + def as_proxy(self): + return self.proxy + + def python_type(self): + return self.class_type + + @staticmethod + def specialize(value: torch.Tensor): + props = { + "dtype": value.dtype, + "device": value.device, + "layout": value.layout, + "ndim": int(value.ndim), + "requires_grad": value.requires_grad, + "is_quantized": value.is_quantized, + "is_sparse": value.is_sparse, + "class_type": type(value), + } + if is_sparse_any(value) and not has_free_symbols(value): + props["size"] = tuple( + [int(s) if is_symbolic(s) else s for s in value.size()] + ) + elif not has_free_symbols(value): + # this is a fully static shape, and the keys on props here inform specialization. + # We have to cast to int here, because these might get accessed as ConstantVariable, which has + # a strict no-symint policy. If we got here due to not having free symbols, this is a known constant + # already. We could remove the discrepancy here, by having ConstantVariable be more permissive for + # constant backed SymInts, but that assert being strict has led to some good signal in hunting bugs, and + # I'd like to keep it around for now. + props["size"] = tuple( + # the non is_symbolic case applies to the jagged layout + # NestedTensor case as singleton ints are not symbolic + [int(s) if is_symbolic(s) else s for s in value.size()] + ) + props["stride"] = tuple(value.stride()) + if torch._C._functorch.is_batchedtensor(value): + # Batched tensors does not support contiguity patterns, so + # we refrain from computing the `is_contiguous` property + props["is_contiguous"] = None + else: + props["is_contiguous"] = tuple( + [ + x + for x in torch._prims_common._memory_formats + if value.is_contiguous(memory_format=x) + ] + ) + return props + + def dynamic_getattr(self, tx, name): + fake_val = self.proxy.node.meta["example_value"] + # For getattrs on tensors without sources, + # we can do better than the default (creating a GetAttrVariable) + # if: + # (1) the tensor is a traceable tensor subclass + # (2) We are getattr'ing an inner tensor from that subclass + if not self.source and is_traceable_wrapper_subclass(fake_val): + fake_val = self.proxy.node.meta["example_value"] + attrs, ctx = fake_val.__tensor_flatten__() + proxy = getattr(self.as_proxy(), name) + example_value = getattr(fake_val, name) + if name in attrs: + # attrs returned from tensor_flatten are always tensors + assert isinstance(example_value, torch.Tensor) + from .builder import wrap_fx_proxy + + return wrap_fx_proxy(tx=tx, proxy=proxy, example_value=example_value) + # any other attributes on the subclass (that are not methods) + # are assumed to be constant metadata. + elif not callable(example_value): + from .builder import SourcelessBuilder + + return SourcelessBuilder()(tx, example_value) + + if not (self.source and self.source.subguards_allowed()): + raise NotImplementedError() + + # For local source, we associate the real value. We use this real value + # for implementing getattr fallthrough on the variable tracker base class. + + # Note - this scope construction is mirrored in guards + # A subsequent PR will introduce a util. + scope = {"L": tx.output.local_scope, "G": tx.output.global_scope} + try: + # We raise in case we get a typerror bug w/ SuperSource. + # SuperSource has bugs in it atm, and can produce code like + # eval("super(L['mod'].model.model.encoder.embed_positions.forward__class__, + # L['mod'].model.model.encoder.embed_positions)", scope) + # Which is incorrect, and violates the invariant that all sources should be eval()-able against the scope. + _input_associated_real_value = eval(self.source.name(), scope) + except Exception as exc: + raise NotImplementedError() from exc + + if _input_associated_real_value is None: + raise NotImplementedError() + + if object_has_getattribute(_input_associated_real_value): + raise NotImplementedError() + + if get_custom_getattr(_input_associated_real_value): + raise NotImplementedError() + + real_value = getattr(_input_associated_real_value, name) + if callable(real_value): + # Callables have more nuanced handling, and we should let the existing system delegate here. + # Raising was past behavior and so should always be sound to fall back. + # Note - at a certain point we may want to handle + raise NotImplementedError() + + from ..guards import GuardBuilder + from .builder import VariableBuilder + + attr_source = AttrSource(self.source, name) + install_guard(attr_source.make_guard(GuardBuilder.HASATTR)) + return VariableBuilder(tx, attr_source)(real_value) + + def method_attr_ndim(self, tx): + if self.ndim is not None: + return ConstantVariable.create(self.ndim) + else: + return self.call_method(tx, "dim", [], {}) + + def method_attr_dtype(self, tx): + if self.dtype is not None: + return ConstantVariable.create(self.dtype) + + def method_attr_device(self, tx): + if self.device is not None: + return ConstantVariable.create(self.device) + + def method_attr_layout(self, tx): + if self.layout is not None: + return ConstantVariable.create(self.layout) + + def method_attr_is_cuda(self, tx): + if self.device is not None: + return ConstantVariable.create(self.device.type == "cuda") + + def method_attr_shape(self, tx): + if self.size is not None: + sizes = [variables.ConstantVariable.create(x) for x in self.size] + return SizeVariable(sizes) + else: + return self.call_method(tx, "size", [], {}) + + def method_attr_requires_grad(self, tx): + if self.requires_grad is not None: + return ConstantVariable.create(self.requires_grad) + + def method_attr_is_quantized(self, tx): + if self.is_quantized is not None: + return ConstantVariable.create(self.is_quantized) + + def method_attr_is_sparse(self, tx): + if self.is_sparse is not None: + return ConstantVariable.create(self.is_sparse) + + def method_attr_data(self, tx): + return self.call_method(tx, "detach", [], {}) + + def method_attr__version(self, tx): + from ..tensor_version_op import _tensor_version + + return variables.TorchInGraphFunctionVariable(_tensor_version).call_function( + tx, [self], {} + ) + + def var_getattr(self, tx, name): + from . import UserDefinedClassVariable + + if tx.strict_checks_enabled: + if name in self._strict_mode_banned_ops(): + unimplemented(f"Illegal getattr invocation {name} in strict mode") + + if name == "__class__": + return UserDefinedClassVariable(self.python_type()) + + handler = getattr(self, f"method_attr_{name}", None) + result = handler(tx) if handler is not None else None + + # Add a guard for type matching, these guards are checked before tensor guards + # In some cases, a . guard can be evaluated first, and break if + # is later changed to another type + if ( + result is not None + and self.source + and self.source.subguards_allowed() + and not ( + name not in ("grad", "requires_grad") and result.is_python_constant() + ) + ): + install_guard(self.make_guard(GuardBuilder.TYPE_MATCH)) + result.source = AttrSource(self.source, name) + + # It's hard to get inplace view (metadata mutation) on graph input work properly across + # dynamo/aot/inductor, just fall back. + if self.source is not None and hasattr(torch.ops.aten, name): + fn = getattr(torch.ops.aten, name) + if ( + hasattr(fn, "overloads") + and hasattr(fn, fn.overloads()[0]) + and torch.Tag.inplace_view in getattr(fn, fn.overloads()[0]).tags + ): + # Delay the graph break to the actual call of unsqueeze_/resize_/resize_as_ etc. + return variables.misc.DelayGraphBreakVariable( + source=AttrSource(self.source, name) + ) + + # For attributes (not methods) that were not caught in the special handling above, + # (e.g. tensor.real), we handle these generically, assuming that the output type is + # a tensor. + if result is None and name != "grad": + + def try_generic_attr_handling(): + from .builder import wrap_fx_proxy + from .misc import GetAttrVariable + + try: + static_attr = inspect.getattr_static(torch.Tensor, name) + except AttributeError: + return None + + # Make sure this is an attribute, not a method. + # type(torch.Tensor.H) should be "getset_descriptor" + # This is a because of CPython implementation, see THPVariableType: + # these attributes are implemented under tp_getset, which appear + # as `getset_descriptor`s, (compared to, say, methods which appear + # as `method_descriptor`s) + if type(static_attr) != types.GetSetDescriptorType: + return None + + proxy = GetAttrVariable.create_getattr_proxy(self.as_proxy(), name) + if self.source is not None: + return wrap_fx_proxy( + tx=tx, proxy=proxy, source=AttrSource(self.source, name) + ) + else: + return wrap_fx_proxy(tx=tx, proxy=proxy) + + result = try_generic_attr_handling() + + if result is None: + result = self.dynamic_getattr(tx, name) + + if result is None: + raise NotImplementedError() + return result + + def has_unpack_var_sequence(self, tx): + return self.ndim > 0 + + def unpack_var_sequence(self, tx, idxes=None): + from .builder import wrap_fx_proxy_cls + + if idxes is None: + if self.size: + length = self.size[0] + else: + dyn_length = self.call_method( + tx, "size", [ConstantVariable.create(0)], {} + ) + # SymNodeVariable for symbolic sizes, ConstantVariable for constants OR values produced through + # symbolic_shapes, but that end up as int/sympy.Integer + assert isinstance(dyn_length, (SymNodeVariable, ConstantVariable)) + if isinstance(dyn_length, SymNodeVariable): + length = dyn_length.evaluate_expr(tx.output) + else: + length = dyn_length.value + idxes = range(length) + return [ + wrap_fx_proxy_cls(target_cls=type(self), tx=tx, proxy=self.as_proxy()[i]) + for i in idxes + ] + + def _strict_mode_banned_ops(self): + return torch._dynamo.config._autograd_backward_strict_mode_banned_ops + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if tx.strict_checks_enabled: + if name in self._strict_mode_banned_ops(): + unimplemented(f"Illegal method invocation {name} in strict mode") + + """ + Dispatch to a method-specific handler defined below. If the + handler returns None (or doesn't exist) we put the method call + in the graph. + """ + try: + handler_method = getattr(self, f"method_{name}") + except AttributeError: + pass + else: + try: + result = handler_method(*args, **kwargs) + if result: + return result + except TypeError as e: + unimplemented(f"unhandled args for {name}: {e}") + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + name, + *proxy_args_kwargs([self, *args], kwargs), + ), + ) + + def method_size(self, *args, **kwargs): + return self._method_size_stride("size", *args, **kwargs) + + def method_stride(self, *args, **kwargs): + return self._method_size_stride("stride", *args, **kwargs) + + def _method_size_stride(self, name, dim=None): + dim = guard_if_dyn(dim) + + def make_const_size_variable(x, **options): + return SizeVariable( + [ConstantVariable.create(y, **options) for y in x], **options + ) + + RetVariable = ( + make_const_size_variable if name == "size" else ConstantVariable.create + ) + + # Technically, this should not be necessary, but I'm including it + # for enhanced BC, in case example_value is sometimes not set + # (it really should always be set though!) + if (r := getattr(self, name)) is not None: + if dim is None: + return RetVariable(r) + else: + return ConstantVariable.create(r[dim]) + + # It might still be constant! Consult the fake tensor and see + if (fake := self.proxy.node.meta.get("example_value")) is not None: + if dim is None: + fake_r = getattr(fake, name)() + if not has_free_symbols(fake_r): + # int conversion for safety, in case a SymInt refined + # to constant + return RetVariable(tuple(int(r) for r in fake_r)) + else: + fake_r = getattr(fake, name)(dim) + if not has_free_symbols(fake_r): + return ConstantVariable.create(int(fake_r)) + + def method_numel(self): + if self.size is not None: + return ConstantVariable.create(product(self.size)) + + # It might still be constant! Consult the fake tensor and see + if (fake := self.proxy.node.meta.get("example_value")) is not None: + fake_r = fake.numel() + if not has_free_symbols(fake_r): + return ConstantVariable.create(int(fake_r)) + + method_nelement = method_numel + + def method_dim(self): + if self.ndim is not None: + return ConstantVariable.create(self.ndim) + + method_ndimension = method_dim + + def method_is_floating_point(self): + if self.dtype is not None: + return ConstantVariable.create(self.dtype.is_floating_point) + + def method_is_contiguous(self, memory_format=None): + memory_format = ( + memory_format.as_python_constant() + if memory_format is not None + else torch.contiguous_format + ) + if self.is_contiguous is not None: + return ConstantVariable.create(memory_format in self.is_contiguous) + elif (fake := self.proxy.node.meta.get("example_value")) is not None: + return ConstantVariable.create( + fake.is_contiguous(memory_format=memory_format) + ) + + def method_type(self, dtype=None, non_blocking=False, **kwargs): + if ( + dtype is None + and self.dtype is not None + and isinstance(self.device, torch.device) + ): + tensortype = next( + k for k, v in tensortype_to_dtype.items() if self.dtype in v + ) + if self.device.type == "cuda": + return ConstantVariable.create(f"torch.cuda.{tensortype.__name__}") + else: + return ConstantVariable.create(f"torch.{tensortype.__name__}") + elif ( + dtype is not None + and fqn(type(dtype.as_python_constant())) == "torch.tensortype" + ): + # torch.FloatTensor, etc. are all of type "torch.tensortype". + # torch.fx's tracer fails on these types, because it doesn't support arguments of torch.tensortype type. + # So, we pass it in as a string (which is also supported, see above implementation for .type() with 0 args) + tensor_type = dtype.as_python_constant() + tensor_type_const = ConstantVariable.create(fqn(tensor_type)) + + from ..symbolic_convert import InstructionTranslator + from .builder import wrap_fx_proxy + + tx = InstructionTranslator.current_tx() + + if non_blocking: + kwargs = {"non_blocking": non_blocking, **kwargs} + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + "type", + *proxy_args_kwargs([self, tensor_type_const], kwargs), + ), + ) + + def method_as_subclass(self, cls): + if isinstance(cls, TensorSubclassVariable) and cls.source: + from ..symbolic_convert import InstructionTranslator + from .builder import VariableBuilder + from .torch_function import TensorWithTFOverrideVariable + + tx = InstructionTranslator.current_tx() + + # [Note: __torch_function__] coerce this tensor variable into a TensorWithTFOverrideVariable + # in eager, this is just a type change. This isn't sound if a __torch_function__ tensor subclass + # defines a constructor, but if only a __torch_function__ impl is defined, this is okay to call. + # It is up to the user whether this is correct behavior or not. + py_cls = cls.as_python_constant() + torch_fn = VariableBuilder( + tx, + AttrSource(AttrSource(cls.source, "__torch_function__"), "__func__"), + )(py_cls.__torch_function__.__func__) + + return TensorWithTFOverrideVariable.from_tensor_var( + tx, self, py_cls, torch_fn + ) + + def method_get_device(self): + if isinstance(self.device, torch.device): + index = self.device.index if self.device.type != "cpu" else -1 + return ConstantVariable.create(index) + + def method_element_size(self): + return ConstantVariable.create(self.dtype.itemsize) + + def method_numpy(self, *, force=False): + if not config.trace_numpy: + unimplemented("Tensor.numpy(). config.trace_numpy is False") + if not np: + unimplemented("Tensor.numpy(). NumPy is not available") + if self.layout != torch.strided: + raise TypeError( + f"can't convert {self.layout} layout tensor to numpy. Use Tensor.dense() first" + ) + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + # We don't check that the tensor is on CPU when force is False, as this + # allows us to execute NumPy code on CUDA. Same for requires_grad=True + if force and force.as_python_constant(): + # If the user set force=True we try to preserve the semantics (no gradients, move to CPU...) + t = self.call_method(tx, "detach", [], {}) + proxy = tx.output.create_proxy("call_method", "cpu", (t.as_proxy(),), {}) + else: + # Hacky way to create a view of self that will be marked as NumpyNdarrayVariable + proxy = tx.output.create_proxy( + "call_method", "view_as", *proxy_args_kwargs([self, self], {}) + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def method_tolist(self): + from ..symbolic_convert import InstructionTranslator + from .builder import SourcelessBuilder + + tx = InstructionTranslator.current_tx() + + def tolist(tensor, sub_proxy): + def wrap(i, sub_proxy): + return SymNodeVariable.create( + tx, + sub_proxy.item(), + sym_num=tx.output.shape_env.create_unbacked_symint(), + ) + + if tensor.dtype not in [ + torch.int8, + torch.int16, + torch.int32, + torch.int64, + ]: + unimplemented("Input tensor for tolist must be an integer tensor") + + if tensor.dim() == 0: + return wrap(tensor, sub_proxy) + + if tensor.dim() == 1: + return [wrap(val, sub_proxy[i]) for i, val in enumerate(tensor)] + + return [ + tolist(sub_tensor, sub_proxy=sub_proxy[i]) + for i, sub_tensor in enumerate(tensor) + ] + + tensor = self.as_proxy().node.meta["example_value"] + out = tolist(tensor, self.as_proxy()) + return SourcelessBuilder()(tx, out) + + def method_backward(self, *args, **kwargs): + unimplemented("Tensor.backward") + + def method_data_ptr(self, *args, **kwargs): + unimplemented("Tensor.data_ptr") + + def method_item(self, *args, **kwargs): + if not config.capture_scalar_outputs: + unimplemented("Tensor.item") + + def method___len__(self): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + return self.call_method(tx, "size", [ConstantVariable.create(0)], {}) + + def method___setitem__(self, key, value): + def has_bool_key(v): + if isinstance(v, TensorVariable): + return v.dtype in (torch.bool, torch.int8) + elif isinstance(v, variables.TupleVariable): + return any(has_bool_key(item) for item in v.items) + else: + return False + + if ( + has_bool_key(key) + and isinstance(value, TensorVariable) + and value.requires_grad + and torch.is_grad_enabled() + ): + unimplemented( + "boolean masking setitem backwards, see https://github.com/pytorch/pytorch/issues/114123" + ) + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + tx.output.create_proxy( + "call_function", + operator.setitem, + *proxy_args_kwargs([self, key, value], {}), + ) + return ConstantVariable.create(None) + + def method_resize_(self, *args, **kwargs): + unimplemented("Tensor.resize_") + + def method_resize_as_(self, *args, **kwargs): + unimplemented("Tensor.resize_as_") + + def method_set_(self, *args, **kwargs): + if len(args) > 1: + # torch.Tensor.set_() has several overloads. + # aten::set_.source_Tensor(Tensor) gets special handling + # in AOTAutograd and functionalization, because it is the most common + # overload and is used by FSDP. + # graph-breaking on aten::set_source_Tensor_storage_offset for now, + # unless we find that we need to make it work. + unimplemented("Tensor.set_.source_Tensor_storage_offset") + + def method_add_(self, other, *, alpha=None): + if alpha is not None: + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + result = variables.TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [other, alpha], {} + ) + return self.call_method(tx, "add_", [result], {}) + + def method_addcdiv_(self, tensor1, tensor2, *, value=None): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + if value is not None: + result = variables.TorchInGraphFunctionVariable(torch.div).call_function( + tx, [tensor1, tensor2], {} + ) + result = variables.TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [result, value], {} + ) + return self.call_method(tx, "add_", [result], {}) + + def method___contains__(self, arg): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + # Rewrite __contains__ here so that downstream passes can trace through + # without dealing with unbacked symbool. Roughly the code we translate is: + # def __contains__(self, x): + # return (x == self).any().item() + result = variables.TorchInGraphFunctionVariable(torch.eq).call_function( + tx, [self, arg], {} + ) + result = variables.TorchInGraphFunctionVariable(torch.any).call_function( + tx, [result], {} + ) + return result.call_method(tx, "item", [], {}) + + def method_redistribute(self, *args, **kwargs): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + # rewrite non-primitive args/kwargs to be included in the on-the-fly prim function + # and rewrite args to have only proxyable args, then insert call_function + args_as_value = [x.as_python_constant() for x in args] + kwargs_as_value = {k: v.as_python_constant() for k, v in kwargs.items()} + + def redistribute_fn_with_prim_types(x): + return x.redistribute(*args_as_value, **kwargs_as_value) + + # attach the same function name for better debugging + redistribute_fn_with_prim_types.__name__ = "prim_redistribute" + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + redistribute_fn_with_prim_types, + *proxy_args_kwargs([self], {}), + ), + ) + + def method_register_hook(self, *args, **kwargs): + return self._method_register_hook("register_hook", *args, **kwargs) + + def method_register_post_accumulate_grad_hook(self, *args, **kwargs): + return self._method_register_hook( + "register_post_accumulate_grad_hook", *args, **kwargs + ) + + def _method_register_hook(self, name: str, hook: VariableTracker): + # Note - do not arbitrarily add hooks here - make sure they match the same contract + # see [On tensor.register_hook] + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + if not self.source: + if not compiled_autograd.compiled_autograd_enabled: + # TODO(voz): + # We can relax this by speculating the callable and ensuring that it doesn't modify arbitrary + # python state. + # We *Must* be in compiled_autograd here because backward hooks can contain anything, and it is unsafe to run + # them in a compiled bwd without re-entering dynamo as compiled_autograd does. + # + # Discussion point 1 - Should we bypass this if nopython/fullgraph = True? + # No. Because this was going to be a graph break anyway - this check does not + # introduce new graph breaks where there were none. + # + # Discussion point 2 - Should we defer this check to backwards? + # No. Because compiled autograd is not yet ready for prime time. As such, if we defer, a user + # would have no recourse - their forward traces just fine, but will fail at backwards unless + # compiled_autograd is enabled. If compiled_autograd fails (there are a lot of failures today) + # then they have nothing they can do except disable compile. + unimplemented( + "Compilation of intermediate hooks requires compiled autograd" + ) + + hook_name, bw_state_proxy = tx.output.add_backward_state_hook(hook) + + def _register_hook_trampoline(tensor, bw_state): + register_hook = getattr(tensor, name) + register_hook( + functools.partial( + trace_wrapped, + fn=call_hook_from_backward_state, + bw_state=bw_state, + hook_name=hook_name, + ) + ) + # TODO(jansel): returning None here is wrong, it should be + # RemovableHandle, but we need some extra work to support + # this properly. + return None + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + _register_hook_trampoline, + (self.as_proxy(), bw_state_proxy), + {}, + ), + ) + + handle_variable = variables.RemovableHandleVariable( + mutable_local=variables.base.MutableLocal(), + ) + tx.output.side_effects.register_hook(self, hook, handle_variable, name) + return handle_variable + + def method_requires_grad_(self, requires_grad=True): + if requires_grad is not True: + requires_grad = requires_grad.as_python_constant() + + if self.as_proxy().node.meta["example_value"].requires_grad != requires_grad: + unimplemented("Tensor.requires_grad_") + else: + return self + + def method_new(self, *args, **kwargs): + # Convert x.new(torch.Size) into x.new_empty(torch.Size), + # as Tensor.new acts differently with a Size input versus a tuple input. + if (len(args) == 1 and isinstance(args[0], SizeVariable)) or ( + len(args) >= 1 + and all( + isinstance(a, ConstantVariable) and a.python_type() == int for a in args + ) + ): + from ..symbolic_convert import InstructionTranslator + + return self.call_method( + InstructionTranslator.current_tx(), "new_empty", args, kwargs + ) + + def method_untyped_storage(self): + return UntypedStorageVariable( + self, self.as_proxy().node.meta["example_value"].untyped_storage() + ) + + def rename(self, tx, name): + self.proxy.node._rename(name) + return super().rename(tx, name) + + +class SymNodeVariable(VariableTracker): + """ + Represents a symbolic size, e.g., as returned by tensor.size(0) + """ + + @classmethod + def create(cls, tx, proxy, sym_num, **options): + if "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == sym_num + if sym_num is None: + sym_num = get_fake_value(proxy.node, tx) + proxy.node.meta["example_value"] = sym_num + + if isinstance(sym_num, (sympy.Integer, int, bool)): + sym_num = int(sym_num) if isinstance(sym_num, sympy.Integer) else sym_num + return ConstantVariable.create(sym_num) + + return SymNodeVariable(proxy, sym_num, **options) + + def __init__(self, proxy, sym_num, **kwargs): + super().__init__(**kwargs) + self.proxy = proxy + # TODO: Should we allow non SymTypes here? Today it is allowed + self.sym_num = sym_num + + def python_type(self): + if isinstance(self.sym_num, SymTypes): + return self.sym_num.node.pytype + else: + return type(self.sym_num) + + def as_proxy(self): + return self.proxy + + def evaluate_expr(self, output_graph=None): + try: + return guard_scalar(self.sym_num) + except GuardOnDataDependentSymNode as e: + raise UserError( # noqa: TRY200 + UserErrorType.ANTI_PATTERN, + f"Consider annotating your code using torch._constrain_as_*(). {str(e)}", + case_name="constrain_as_size_example", + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + name, + *proxy_args_kwargs([self, *args], kwargs), + ), + ) + + +class NumpyNdarrayVariable(TensorVariable): + """ + Represents a np.ndarray, but backed by torch Tensor via torch._numpy.ndarray. + Use this for Tensor.numpy() call. + """ + + @staticmethod + def create(tx, proxy, **options): + from .builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + target_cls=NumpyNdarrayVariable, + tx=tx, + proxy=proxy, + **options, + ) + + def var_getattr(self, tx, name): + # NB: This INTENTIONALLY does not call super(), because there is + # no intrinsic reason ndarray properties are related to Tensor + # properties. The inheritance here is for implementation sharing. + + from ..utils import numpy_attr_wrapper + from .builder import wrap_fx_proxy + + result = None + + example_value = self.as_proxy().node.meta["example_value"] + example_ndarray = tnp.ndarray(example_value) + + def insert_into_graph(): + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", numpy_attr_wrapper, (self.as_proxy(), name), {} + ), + ) + + if name in ["T", "real", "imag"]: + proxy = tx.output.create_proxy( + "call_function", + numpy_attr_wrapper, + (self.as_proxy(), name), + {}, + ) + result = NumpyNdarrayVariable.create(tx, proxy) + + # These are awkward to implement. The standard playbook for torch._numpy + # interop is to trace a call into the torch._numpy wrapper which works for + # Tensor operations. However, we don't want to do this for calls + # that don't return Tensors, because in those cases we may not want + # to trace the attribute access into the graph at all (it is sort + # of harmless to do so, because AOTAutograd will eliminate them, + # but it's best not to trace them in to begin with.) But in any + # case, tracing these into the graph is like trying to fit a square + # peg into a round hole; best not to do it. So instead we + # painstakingly implement these by hand + # + # NB: only ALWAYS specialized attributes can go here; notably, + # size/shape not allowed! + elif name in ("ndim", "itemsize"): + return ConstantVariable.create(getattr(example_ndarray, name)) + elif name in ("shape", "stride"): + if not has_free_symbols(r := getattr(example_ndarray, name)): + return ConstantVariable.create(tuple(int(r) for r in r)) + return insert_into_graph() + elif name == "size": + if not has_free_symbols(r := example_ndarray.size): + return ConstantVariable.create(int(r)) + return insert_into_graph() + elif name in ["base", "flags", "dtype"]: + unimplemented(f"TODO: add support for ndarray.{name}") + elif name in ["__version__"]: + unimplemented("delegate np.__version__ to NumPy") + if result is None: + raise NotImplementedError() + return result + + @staticmethod + def patch_args(name, args, kwargs): + if name == "clip": + kwargs_rename = {"a_min": "min", "a_max": "max"} + kwargs = {kwargs_rename.get(k, k): v for k, v in kwargs.items()} + return args, kwargs + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from ..utils import numpy_method_wrapper + + args, kwargs = self.patch_args(name, args, kwargs) + + if name in ["__len__", "size", "tolist"]: + # delegate back to TensorVariable + return super().call_method(tx, name, args, kwargs) + if name == "tobytes": + unimplemented("tobytes is not modelled in torch._numpy") + proxy = tx.output.create_proxy( + "call_function", + numpy_method_wrapper(name), + *proxy_args_kwargs([self] + list(args), kwargs), + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def python_type(self): + return np.ndarray + + +class UnspecializedPythonVariable(TensorVariable): + """ + This is a 1-element tensor represents unspecialized python float/int. + """ + + def __init__( + self, proxy: torch.fx.Proxy, *, raw_value=None, need_unwrap=True, **kwargs + ): + super().__init__(proxy, **kwargs) + self.raw_value = raw_value + self.need_unwrap = need_unwrap + + @classmethod + def from_tensor_variable(cls, tensor_variable, raw_value, need_unwrap=True): + # Convert a `TensorVariable` instance into an `UnspecializedPythonVariable` instance. + return UnspecializedPythonVariable( + **dict(tensor_variable.__dict__), + raw_value=raw_value, + need_unwrap=need_unwrap, + ) + + +class FakeItemVariable(TensorVariable): + """An unspecialized python variable which prevents access to the underlying raw value. + This is needed if item is called on a FakeTensor.""" + + def __init__(self, proxy: torch.fx.Proxy, **kwargs): + need_unwrap = kwargs.pop("need_unwrap", False) + super().__init__(proxy, **kwargs) + self.need_unwrap = need_unwrap + + @classmethod + def from_tensor_variable(cls, tensor_variable): + return FakeItemVariable(**dict(tensor_variable.__dict__)) + + +class TensorSubclassVariable(VariableTracker): + def __init__(self, value, *args, **kwargs): + self.value = value + super().__init__(*args, **kwargs) + + def call_function( + self, tx, args: List[VariableTracker], kwargs: Dict[str, VariableTracker] + ) -> VariableTracker: + if len(args) == 1 and isinstance(args[0], TensorVariable): + from .builder import VariableBuilder + from .torch_function import TensorWithTFOverrideVariable + + torch_fn = VariableBuilder( + tx, AttrSource(self.source, "__torch_function__") + )(self.value.__torch_function__) + + return TensorWithTFOverrideVariable.from_tensor_var( + tx, args[0], self.value, torch_fn + ) + + return super().call_function(tx, args, kwargs) + + def as_python_constant(self): + return self.value + + def python_type(self): + return type(self.value) + + +class UntypedStorageVariable(VariableTracker): + _nonvar_fields = { + "example_value", + *VariableTracker._nonvar_fields, + } + + def __init__( + self, + from_tensor: TensorVariable, + example_value: torch.UntypedStorage, + **kwargs, + ): + super().__init__(**kwargs), + self.from_tensor = from_tensor + # Example_value will always have device="meta" + self.example_value = example_value + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if name == "size": + assert not args + assert not kwargs + result = self.example_value.size() + if not has_free_symbols(result): + # avoid creating a node in the graph + return ConstantVariable.create(int(result)) + else: + from ..external_utils import untyped_storage_size + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + untyped_storage_size, + (self.from_tensor.as_proxy(),), + {}, + ), + ) + if name == "resize_" and len(args) == 1: + assert not kwargs + tx.output.create_proxy( + "call_function", + torch.ops.inductor.resize_storage_bytes_, + (self.from_tensor.as_proxy(), args[0].as_proxy()), + {}, + ) + return self + + return super().call_method(tx, name, args, kwargs) + + def reconstruct(self, codegen): + codegen(self.from_tensor) + codegen.append_output(codegen.create_load_method("untyped_storage")) + codegen.extend_output(create_call_method(0)) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5fc0a2312dd687eaa48c847c7bef4107b49387 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py @@ -0,0 +1,823 @@ +# mypy: ignore-errors + +import inspect +import logging + +import math +import re +from typing import Dict, List + +import torch._C +import torch._refs +import torch.fx +import torch.nn +import torch.onnx.operators +from torch._logging import warning_once + +from torch._streambase import _StreamBase +from ..._guards import TracingContext +from .. import config, polyfill, variables +from ..codegen import PyCodegen +from ..device_interface import get_registered_device_interfaces +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import SyntheticLocalSource +from ..utils import ( + check_constant_args, + check_unspec_python_args, + guard_if_dyn, + has_torch_function, + hashable, + product, + proxy_args_kwargs, + unwrap_if_wrapper, +) +from .base import VariableTracker +from .ctx_manager import ( + AutocastModeVariable, + NullContextVariable, + TorchFunctionDisableVariable, +) +from .distributed import is_constant_pg_functions, is_from_local, ProcessGroupVariable +from .lists import ListVariable, TupleVariable +from .torch_function import can_dispatch_torch_function, dispatch_torch_function + +try: + import numpy as np +except ModuleNotFoundError: + np = None + +log = logging.getLogger(__name__) + +supported_ctx_manager_classes = { + torch.profiler.profiler.profile, + torch.autograd.profiler.profile, + torch.autograd.profiler.record_function, + torch._C.DisableTorchFunctionSubclass, + torch._functorch.vmap.vmap_increment_nesting, + torch._functorch.eager_transforms.grad_increment_nesting, + torch._functorch.eager_transforms.enable_inplace_requires_grad, + torch.amp.autocast_mode.autocast, + torch.autograd.grad_mode.enable_grad, + torch.autograd.grad_mode.inference_mode, + torch.autograd.grad_mode.no_grad, + torch.autograd.grad_mode.set_grad_enabled, + torch.autograd.graph.disable_saved_tensors_hooks, + torch.cpu.amp.autocast_mode.autocast, + torch.cuda.amp.autocast_mode.autocast, +} + + +REWRITE_OPS_TO_TENSOR_SIZE_METHOD = [ + torch.onnx.operators.shape_as_tensor, + torch._shape_as_tensor, +] + +constant_fold_functions = [ + torch._assert, + torch._utils._get_device_index, + torch._C._get_cublas_allow_tf32, + torch.cuda.get_device_properties, + torch.cuda.is_available, + torch.distributed.is_available, + torch.get_autocast_gpu_dtype, + torch.get_default_dtype, + torch.is_autocast_cache_enabled, + torch.is_autocast_cpu_enabled, + torch.is_autocast_enabled, + torch.is_complex, + torch.is_floating_point, + torch.nn.functional._Reduction.get_enum, + torch.promote_types, + torch._C._get_privateuse1_backend_name, +] + + +if torch.distributed.is_available(): + constant_fold_functions.extend( + [ + torch.distributed.is_initialized, + torch.distributed.get_rank, + torch.distributed.get_world_size, + ] + ) + + +tracing_state_functions = { + torch.jit.is_scripting: False, + torch.jit.is_tracing: False, + torch._C._get_tracing_state: None, + torch.fx._symbolic_trace.is_fx_tracing: False, + torch.onnx.is_in_onnx_export: False, + torch._dynamo.external_utils.is_compiling: True, + torch._utils.is_compiling: True, + torch.compiler.is_compiling: True, + torch.compiler.is_dynamo_compiling: True, +} + + +class BaseTorchVariable(VariableTracker): + """common base for all torch.* functions, classes, modules and other things""" + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.FUNCTION_MATCH)) + return cls( + value, + source=source, + ) + + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + def reconstruct(self, codegen): + try: + name = f"{self.value.__module__}.{self.value.__name__}" + except Exception: + name = f"torch_obj_{id(self.value)}" + unique_var_name = "__" + re.sub(r"[^a-zA-Z0-9_]+", "_", name) + codegen.extend_output( + codegen.setup_globally_cached(unique_var_name, self.value, False) + ) + + def as_proxy(self): + return self.value + + def python_type(self): + return type(self.value) + + def as_python_constant(self): + return self.value + + def call_hasattr(self, tx, name): + result = hasattr(self.value, name) + return variables.ConstantVariable.create(result) + + def can_constant_fold_through(self): + if self.value in constant_fold_functions: + return True + return getattr(self.value, "__module__", None) == "math" + + +class TorchCtxManagerClassVariable(BaseTorchVariable): + """Points to a context manager class in torch.* that dynamo has implementations""" + + def __repr__(self): + return f"TorchCtxManagerClassVariable({self.value})" + + @staticmethod + def is_matching_cls(value): + # Unwrap if it's a functools.lru_cache wrapper + value = unwrap_if_wrapper(value) + # We can't do isinstance(value, type) check because some ctx managers + # are implemented as a function decorated by contextlib.contextmanager, + # E.g., torch._functorch.vmap.vmap_increment_nesting. + return hashable(value) and value in supported_ctx_manager_classes + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import ( + DisabledSavedTensorsHooksVariable, + GradIncrementNestingCtxManagerVariable, + GradInplaceRequiresGradCtxManagerVariable, + GradModeVariable, + InferenceModeVariable, + StreamVariable, + VmapIncrementNestingCtxManagerVariable, + ) + + if self.value is torch.no_grad: + if len(args) == 1 and isinstance( + args[0], variables.functions.BaseUserFunctionVariable + ): + ctx = GradModeVariable.create(tx, False) + return ctx.call_function(tx, args, kwargs) + else: + return GradModeVariable.create(tx, False) + elif self.value is torch.enable_grad: + if len(args) == 1 and isinstance( + args[0], variables.functions.BaseUserFunctionVariable + ): + ctx = GradModeVariable.create(tx, True) + return ctx.call_function(tx, args, kwargs) + return GradModeVariable.create(tx, True) + elif self.value is torch.set_grad_enabled and len(args) == 1: + return GradModeVariable.create( + tx, args[0].as_python_constant(), initialized=True + ) + elif self.value is torch.inference_mode: + assert len(args) <= 1 and len(kwargs) == 0 + inf_mode = args[0].as_python_constant() if len(args) == 1 else True + return InferenceModeVariable.create(tx, inf_mode) + elif inspect.isclass(self.value) and issubclass(self.value, _StreamBase): + from torch._dynamo.variables.builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + StreamVariable, + tx, + tx.output.create_proxy( + "call_function", + self.value, + (), + {}, + ), + ) + elif self.value in ( + torch.amp.autocast_mode.autocast, + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ): + return AutocastModeVariable.create(self.value, args, kwargs) + elif self.value in ( + torch.profiler.profile, + torch.profiler.record_function, + torch.autograd.profiler.profile, + torch.autograd.profiler.record_function, + ): + warning_once(log, "Profiler function %s will be ignored", self.value) + return NullContextVariable() + elif self.value is torch._C.DisableTorchFunctionSubclass: + assert not (args or kwargs) + return TorchFunctionDisableVariable.create(tx) + elif self.value is torch._functorch.vmap.vmap_increment_nesting: + assert len(args) == 2 + return VmapIncrementNestingCtxManagerVariable.create( + tx, + [guard_if_dyn(x) for x in args], + ) + elif self.value is torch._functorch.eager_transforms.grad_increment_nesting: + assert len(args) == 0 + return GradIncrementNestingCtxManagerVariable.create(tx) + elif ( + self.value is torch._functorch.eager_transforms.enable_inplace_requires_grad + ): + assert len(args) == 1 + return GradInplaceRequiresGradCtxManagerVariable.create( + tx, + [guard_if_dyn(x) for x in args], + ) + elif self.value is torch.autograd.graph.disable_saved_tensors_hooks: + assert len(args) == 1 + return DisabledSavedTensorsHooksVariable.create( + tx, args[0].as_python_constant() + ) + + +class TorchInGraphFunctionVariable(BaseTorchVariable): + """Points to a torch function/method that should be put in FX graph""" + + def __repr__(self): + return f"TorchInGraphFunctionVariable({self.value})" + + def get_function(self): + return self.value + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from . import ( + ConstantVariable, + DeterministicAlgorithmsVariable, + GradModeVariable, + SDPAParamsVariable, + StreamContextVariable, + SymNodeVariable, + TensorVariable, + UserDefinedObjectVariable, + ) + + from .builder import wrap_fx_proxy, wrap_fx_proxy_cls + + constant_args = check_constant_args(args, kwargs) + unspec_python_args = check_unspec_python_args(args, kwargs) + + if self.can_constant_fold_through() and (constant_args or unspec_python_args): + # constant fold + return ConstantVariable.create( + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + elif self.value in tracing_state_functions: + assert not args and not kwargs + # See: https://github.com/pytorch/pytorch/issues/110765 + if self.value in ( + torch._utils.is_compiling, + torch._dynamo.external_utils.is_compiling, + torch.compiler.is_compiling, + torch.compiler.is_dynamo_compiling, + ): + tx.mark_inconsistent_side_effects() + return ConstantVariable.create(tracing_state_functions[self.value]) + elif self.value is torch.overrides.get_default_nowrap_functions.__wrapped__: + # [Note: __torch_function__] we return empty here because we restrict + # the set of functions that we trace __torch_function__ on to + # functions outside of the actual set. Implementing this properly will require implementing + # some variable types to track and compare tensor getset descriptors + from .builder import SourcelessBuilder + + return SourcelessBuilder()( + tx, torch.overrides.get_default_nowrap_functions() + ) + elif self.value == torch.ops.inductor.accumulate_grad_.default: + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder()(tx, polyfill.accumulate_grad), args, kwargs + ) + elif self.value == math.radians and not (constant_args or unspec_python_args): + # Use polyfill to convert math.radians(x) into math.pi * x / 180.0 + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder()(tx, polyfill.radians), args, kwargs + ) + elif self.value in (torch.is_tensor, torch.overrides.is_tensor_like): + assert len(args) == 1 + if isinstance(args[0], TensorVariable) or ( + self.value is torch.overrides.is_tensor_like + and isinstance(args[0], UserDefinedObjectVariable) + and hasattr(args[0].value, "__torch_function__") + ): + return ConstantVariable.create(True) + else: + return ConstantVariable.create(False) + elif self.value in ( + torch.is_floating_point, + torch.is_complex, + ): + input_arg = None + if args: + input_arg = args[0] + else: + assert "input" in kwargs + input_arg = kwargs["input"] + if isinstance(input_arg, TensorVariable) and input_arg.dtype is not None: + if self.value is torch.is_floating_point: + return ConstantVariable.create(input_arg.dtype.is_floating_point) + elif self.value is torch.is_complex: + return ConstantVariable.create(input_arg.dtype.is_complex) + else: + raise AssertionError(f"calling {self.value}") + elif ( + self.value is torch.numel + and isinstance(args[0], TensorVariable) + and args[0].size is not None + ): + return ConstantVariable.create(product(args[0].size)) + elif self.value in REWRITE_OPS_TO_TENSOR_SIZE_METHOD: + assert len(args) == 1 + assert isinstance(args[0], TensorVariable) + return args[0].call_method(tx, "size", [], {}) + elif self.value in ( + torch.nn.modules.utils._single, + torch.nn.modules.utils._pair, + torch.nn.modules.utils._triple, + torch.nn.modules.utils._quadruple, + torch.nn.modules.utils._ntuple, + ): + return self._call_ntuple(tx, args, kwargs) + elif self.value is torch.is_grad_enabled: + assert not (args or kwargs) + install_guard(GradModeVariable._guards_singleton) + return ConstantVariable.create(torch.is_grad_enabled()) + elif self.value is torch.use_deterministic_algorithms and len(args) == 1: + return DeterministicAlgorithmsVariable.create( + tx, args[0].as_python_constant() + ) + elif self.value is torch.are_deterministic_algorithms_enabled: + assert not (args or kwargs) + install_guard(DeterministicAlgorithmsVariable._guards_singleton) + return ConstantVariable.create(torch.are_deterministic_algorithms_enabled()) + elif self.value is torch._C._is_torch_function_enabled: + assert not (args or kwargs) + install_guard(TorchFunctionDisableVariable._guards_singleton) + return ConstantVariable.create(tx.output.torch_function_enabled) + elif self.value in ( + torch.overrides.has_torch_function, + torch.overrides.has_torch_function_variadic, + torch.overrides.has_torch_function_unary, + ): + assert not kwargs + elems = ( + args[0].unpack_var_sequence(tx) + if len(args) == 1 and isinstance(args[0], TupleVariable) + else args + ) + return ConstantVariable.create( + any(has_torch_function(x) for x in elems), + ) + elif any( + self.value is method + for method in [ + device_interface.stream + for _, device_interface in get_registered_device_interfaces() + ] + ): + assert len(args) == 1 + return StreamContextVariable.create(tx, args[0]) + elif self.value is torch.from_numpy: + if not config.trace_numpy: + unimplemented("torch.from_numpy. config.trace_numpy is False") + if not np: + unimplemented("torch.from_numpy. NumPy is not available") + return wrap_fx_proxy_cls( + target_cls=TensorVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + torch.as_tensor, + *proxy_args_kwargs(args, {}), + ), + example_value=None, + ) + elif can_dispatch_torch_function(tx, args, kwargs): + return dispatch_torch_function(tx, self, args, kwargs) + elif self.value is torch.jit.annotate: + assert len(args) == 2 + return args[1] + elif self.value is torch.backends.cudnn.is_acceptable: + # is_acceptable(tensor) returns true if + # (a) tensor dtype/device are supported by cudnn + # (b) cudnn is available + # (c) some initialization has completed + # technically, it depends on some global state from (c) (torch.backends.cudnn.__cudnn_version) + assert ( + len(args) == 1 or "tensor" in kwargs + ), "Expect 1 input to cudnn.is_acceptable" + tensor_variable = args[0] if len(args) > 0 else kwargs["tensor"] + assert isinstance( + tensor_variable, TensorVariable + ), "Expect input to cudnn.is_acceptable to be a tensor" + tensor_inp = torch.tensor( + 0, dtype=tensor_variable.dtype, device=tensor_variable.device + ) + return ConstantVariable.create( + torch.backends.cudnn.is_acceptable(tensor_inp) + ) + elif self.value is torch.utils.hooks.BackwardHook: + return variables.BackwardHookVariable.create(tx, *args, **kwargs) + elif self.value is torch.nn.Parameter: + return self.call_nn_parameter(tx, *args, **kwargs) + elif ( + self.value == torch.numel + and len(args) == 1 + and isinstance(args[0], TensorVariable) + and len(kwargs) == 0 + ): + # TODO(voz): This is rewritten as a call_method because + # torch.numel(x) w/ sym shapes raises a RuntimeError and x.numel() does not + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_method", + "numel", + *proxy_args_kwargs(args, kwargs), + ), + ) + # TODO: These special cases shouldn't be necessary; we should + # generically support torch.ops that return int + elif ( + self.value in (torch.ops.aten.sym_size, torch.ops.aten.sym_size.int) + and len(args) == 2 + and len(kwargs) == 0 + and isinstance(args[0], TensorVariable) + ): + # we see this when retracing already traced code + return args[0].call_method(tx, "size", [args[1]], {}) + elif ( + self.value in (torch.ops.aten.sym_stride, torch.ops.aten.sym_stride.int) + and len(args) == 2 + and len(kwargs) == 0 + and isinstance(args[0], TensorVariable) + ): + return args[0].call_method(tx, "stride", [args[1]], {}) + elif ( + self.value == torch.addcdiv + and len(args) == 3 + and "value" in kwargs + and len(kwargs) == 1 + ): + # decompose addcdiv into constituent ops, prevents a graph break due to converting + # value to a scalar + result = TorchInGraphFunctionVariable(torch.div).call_function( + tx, args[1:], {} + ) + result = TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [result, kwargs["value"]], {} + ) + return TorchInGraphFunctionVariable(torch.add).call_function( + tx, [args[0], result], {} + ) + elif ( + self.value is torch._assert + and len(args) >= 1 + and ( + (args[0].is_python_constant() and args[0].as_python_constant()) + or ( + isinstance(args[0], variables.SymNodeVariable) + and args[0].evaluate_expr() + ) + ) + ): + return ConstantVariable(None) + elif SDPAParamsVariable.is_sdpa_params(self.value): + return wrap_fx_proxy( + tx, + proxy=tx.output.create_proxy( + "call_function", + torch._C._SDPAParams, + *proxy_args_kwargs(args, kwargs), + ), + param_vars=args, + ) + elif is_constant_pg_functions(self.value): + # because the input is a "ProcessGroupVariable", we'll be guarding on its + # ID_MATCH based on how it was constructed. + + # We desugar it at trace-time into ranks by directly calling util + # bake the result into the trace + if len(args) == 1: + # group or group name + assert isinstance(args[0], (ProcessGroupVariable, ConstantVariable)) + elif len(args) == 2: + # ranks + tag + assert isinstance(args[0], ListVariable) and isinstance( + args[1], ConstantVariable + ) + else: + raise AssertionError( + f"Invalid group value ({args}) for constant pg " + f"function {self.value}" + ) + args_as_value = [arg.as_python_constant() for arg in args] + invocation_result = self.value(*args_as_value) + + # Note - while we *could* cook up sources around invocations, like a FunctionSource + # the space of invoking functions in the middle of the guard chain is very iffy. As such, + # guard propagation via options is the best we can do. + from .builder import SourcelessBuilder + + return SourcelessBuilder()(tx, invocation_result) + elif is_from_local(self.value): + # rewrite non-primitive args/kwargs to be included in the on-the-fly prim function + # and rewrite args to have only proxyable args, then insert call_function + args_as_value = [x.as_python_constant() for x in args[1:]] + kwargs_as_value = {k: v.as_python_constant() for k, v in kwargs.items()} + + def fn_with_prim_types(x): + return self.value(x, *args_as_value, **kwargs_as_value) + + # attach the same function name for better debugging + fn_with_prim_types.__name__ = "prim " + self.value.__name__ + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_with_prim_types, + *proxy_args_kwargs([args[0]], {}), + ), + ) + elif ( + self.value is torch.nested.nested_tensor + and kwargs.get("layout", torch.strided) == torch.strided + ): + raise unimplemented("torch.compile does not support strided NestedTensor") + elif self.value is torch.nn.functional.one_hot and ( + len(args) + len(kwargs) == 1 + or ( + len(args) == 2 + and args[1].is_python_constant() + and args[1].as_python_constant() == -1 + ) + ): + raise unimplemented( + "torch.nn.functional.one_hot with data-dependent output shape" + ) + elif ( + self.value is torch.fx.experimental.symbolic_shapes.guard_size_oblivious + and len(args) == 1 + and isinstance(args[0], SymNodeVariable) + ): + # TODO: this probably should be folded somewhere else but I'm not + # sure where + # TODO: some of the other symbolic_shapes special tools can also + # get this treatment too + (cond,) = args + return variables.ConstantVariable.create( + torch.fx.experimental.symbolic_shapes.guard_size_oblivious(cond.sym_num) + ) + elif self.value is torch._C._autograd._unsafe_set_version_counter: + from ..tensor_version_op import _unsafe_set_version_counter + + return TorchInGraphFunctionVariable( + _unsafe_set_version_counter + ).call_function(tx, args, kwargs) + else: + any_symints_or_symfloats = any(isinstance(x, SymNodeVariable) for x in args) + all_ints_or_floats = all( + isinstance(x, (variables.ConstantVariable, variables.SymNodeVariable)) + for x in args + ) + bin_ops = {"add", "sub", "mul", "div", "sqrt"} + if ( + getattr(self.value, "__module__", "") == "torch" + and self.value.__name__ in bin_ops + and any_symints_or_symfloats + and all_ints_or_floats + ): + msg = f"""\ +Calling {str(self.value)} on only torch.SymInt arguments is not yet supported. +To support this behavior, we need to allow const-propping tensors that store symint data. +For now, dynamo will explicitly graph break when it encounters user code with this behavior. +""" + log.warning(msg) + raise unimplemented(msg) + + # TODO(voz): Replace w/ dynamic shape rewrite table. + # Ideally, we would be able to do this at ctor time, but alas we need a combination + # of value + args to determine this. + fn_ = self.value + if any(isinstance(x, SymNodeVariable) for x in args): + torch_sym_op = f"_sym_{self.value.__name__}" + if getattr(self.value, "__module__", None) == "math" and hasattr( + torch, torch_sym_op + ): + fn_ = getattr(torch, torch_sym_op) + + if fn_ is torch.tensor: + + def check_any_unspec(x): + # NB: This includes UnspecializedPythonVariable + if isinstance(x, (TensorVariable, SymNodeVariable)): + return True + elif isinstance(x, (ListVariable, TupleVariable)): + return any(check_any_unspec(y) for y in x.items) + # TODO: there maybe other recursive structures you need to + # check + else: + return False + + data_arg = None + if args: + data_arg = args[0] + elif "data" in kwargs: + data_arg = kwargs["data"] + + # NB: OK to pass torch.tensor(tensor), this will trace fine + if not isinstance(data_arg, TensorVariable) and check_any_unspec( + data_arg + ): + # This is slower and less canonical, so only use it if we + # have to + fn_ = torch._refs.tensor + + tensor_variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_, + *proxy_args_kwargs(args, kwargs), + ), + ) + + if ( + isinstance(tensor_variable, TensorVariable) + and "requires_grad" in kwargs + and kwargs["requires_grad"].as_python_constant() + ): + unimplemented( + """factory functions that return tensors that require grad are not supported. +Either create the tensor outside the compiled region, or do not set the tensor to require_grad""" + ) + + if "out" in kwargs and not ( + isinstance(kwargs["out"], variables.ConstantVariable) + and kwargs["out"].as_python_constant() is None + ): + # out variants of torch operators like torch.sort and + # torch.sigmoid mutate the tensors in the out field. Track such + # tensors and rewrite the symbolic locals. + if isinstance(tensor_variable, TupleVariable): + assert isinstance(kwargs["out"], (TupleVariable, ListVariable)) + output_tensor_names = [ + tx.find_symbolic_locals_name(x) for x in kwargs["out"].items + ] + for idx, name in enumerate(output_tensor_names): + if name in tx.symbolic_locals: + tx.symbolic_locals[name] = tensor_variable.items[idx] + for out_tensor, result_tensor in zip( + kwargs["out"].items, tensor_variable.items + ): + if ( + out_tensor.source + and out_tensor in tx.output.graphargs + and out_tensor.size != result_tensor.size + ): + # It's hard to get out variants with resizing on graph inputs work + # properly across dynamo/aot/inductor, just fall back. + unimplemented("out variants with resizing on graph inputs") + elif isinstance(tensor_variable, TensorVariable): + assert isinstance(kwargs["out"], TensorVariable) + assert "example_value" in kwargs["out"].proxy.node.meta + fake_tensor = tensor_variable.proxy.node.meta["example_value"] + fake_out = kwargs["out"].proxy.node.meta["example_value"] + if ( + kwargs["out"].source + and kwargs["out"] in tx.output.graphargs + and fake_out.shape != fake_tensor.shape + ): + # It's hard to get out variants with resizing on graph inputs work + # properly across dynamo/aot/inductor, just fall back. + unimplemented("out variants with resizing on graph inputs") + if not torch._prims_common.is_contiguous(fake_out): + # It's difficult to handle strides correctly in functionalization + # when calling an out= op with a non-contiguous out argument + unimplemented( + "out= op was called where output tensor was non-contiguous" + ) + name = tx.find_symbolic_locals_name(kwargs["out"]) + if name in tx.symbolic_locals: + tx.symbolic_locals[name] = tensor_variable + else: + unimplemented(f"out variant of {type(kwargs['out'])}") + + return tensor_variable + + def _call_ntuple(self, tx, args, kwargs): + """inline behavior of torch.nn.modules.utils._ntuple""" + if self.value is torch.nn.modules.utils._ntuple: + count = args[0].as_python_constant() + else: + count = self.value.__closure__[0].cell_contents + assert isinstance(count, int) + assert not kwargs + + def handle_ntuple(value): + if value.has_unpack_var_sequence(tx): + return variables.TupleVariable( + list(value.unpack_var_sequence(tx)), + ) + elif value.is_python_constant(): + # constant prop through it + return variables.ConstantVariable.create( + torch.nn.modules.utils._ntuple(count)(value.as_python_constant()), + ) + else: + unimplemented(f"torch.nn.modules.utils._ntuple({value})") + + if self.value is torch.nn.modules.utils._ntuple: + return variables.LambdaVariable(handle_ntuple) + else: + return handle_ntuple(args[0]) + + @classmethod + def call_nn_parameter(cls, tx, data=None, requires_grad=True): + """A call to torch.nn.Parameter() gets lifted to before the graph""" + if isinstance(requires_grad, variables.VariableTracker): + try: + requires_grad = requires_grad.as_python_constant() + except NotImplementedError: + unimplemented("Parameter(requires_grad=...) not constant") + + if not isinstance(data, variables.TensorVariable): + unimplemented(f"Parameter(data={data}) not implemented") + + # this results in cleaner graphs, but only works for inputs + if data.source: + return cls._nn_param_via_prefix_insert(tx, data, requires_grad) + + unimplemented("Parameter() on non-input") + + @staticmethod + def _nn_param_via_prefix_insert(tx, data, requires_grad): + # Alternate version if we have a .source + from .builder import VariableBuilder + + varname = tx.output.new_var() + + # construct the nn.Parmeter before the graph save it to varname + cg = PyCodegen(tx) + cg.load_import_from("torch.nn", "Parameter") + cg(data.source) + cg(variables.ConstantVariable(requires_grad)) + cg.call_function(2, True) + cg.store(varname) + tx.output.pregraph_bytecode.extend(cg.get_instructions()) + + # add the newly constructed nn.Parameter as a graph input + source = SyntheticLocalSource(varname) + example_value = torch.nn.Parameter( + tx.output.example_value_from_input_node(data.as_proxy().node) + ) + result = VariableBuilder(tx, source)(example_value) + # No need to guard on this since we already guarded on `data`. + # These guards would fail since varname doesn't exist until after the function starts + TracingContext.get().guards_context.dynamo_guards.remove_guards_with_source( + source + ) + return result diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py new file mode 100644 index 0000000000000000000000000000000000000000..e96d7d783897d86f35525f7751fc10b08eb2d072 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py @@ -0,0 +1,270 @@ +# mypy: ignore-errors + +import inspect +from typing import Dict, List + +import torch.utils._pytree as pytree + +from torch.overrides import _get_overloaded_args, get_default_nowrap_functions +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GlobalSource +from ..utils import has_torch_function, is_tensor_base_attr_getter +from .base import VariableTracker +from .constant import ConstantVariable +from .lists import TupleVariable +from .tensor import TensorSubclassVariable, TensorVariable +from .user_defined import UserDefinedObjectVariable + + +# [Note: __torch_function__] This feature is a prototype and has some rough edges (contact mlazos with issues): +# At a high level, a torch function tensor subclass is represented as a TensorWithTFOverrideVariable, which dispatches +# __torch_function__ on attribute accesses, method calls, and torch API calls. +# The following is not supported: +# - triggering __torch_function__ on tensor subclass non-tensor custom attributes +# - graph breaking on mutating guardable tensor properties within a __torch_function__ context, this can cause +# excessive recompiles in certain degenerate cases +# - Matching the exact eager behavior of *ignoring* __torch_function__ objects in non-tensor argument positions of Torch API calls + +# The following is supported: +# - static method impls of __torch_function__ on custom objects; this will trigger on torch API calls with the object as +# any argument +# - triggering __torch_function__ on torch API calls with tensor subclass arguments +# - __torch_function__ calls on base tensor attribute access and method calls for tensor subclass instances +# - matches the dispatch ordering behavior of eager __torch_function__ with subclass/object argumnents in any argument position + +# See https://docs.google.com/document/d/1WBxBSvW3NXhRp9ncmtokJloMLCtF4AYNhJaffvHe8Kw/edit#heading=h.vacn73lozd9w +# for more information on the design. + +# To enable subclass behavior, add your tensor subclass type to traceable_tensor_subclasses in dynamo/config.py + + +banned_attrs = [ + fn.__self__.__name__ + for fn in get_default_nowrap_functions() + if is_tensor_base_attr_getter(fn) +] + + +def _get_subclass_type(var): + assert isinstance(var, (TensorWithTFOverrideVariable, UserDefinedObjectVariable)) + return var.python_type() + + +def _get_subclass_type_var(tx, var): + assert isinstance(var, (TensorWithTFOverrideVariable, UserDefinedObjectVariable)) + if isinstance(var, TensorWithTFOverrideVariable): + return var.class_type_var(tx) + elif isinstance(var, UserDefinedObjectVariable): + from .builder import SourcelessBuilder, VariableBuilder + + if var.source: + return VariableBuilder(tx, var.source)(var.python_type()) + else: + return SourcelessBuilder()(tx, var.python_type()) + + +def _is_attr_overidden(tx, var, name): + import torch + + overridden = False + try: + attr_val = inspect.getattr_static(var.python_type(), name) + overridden |= attr_val != getattr(torch.Tensor, name) + except AttributeError: + pass + + return overridden + + +def call_torch_function( + tx, torch_function_type, torch_function_var, fn, types, args, kwargs +): + from .builder import SourcelessBuilder + + # signature: + # def __torch_function__(cls, func, types, args=(), kwargs=None): + tf_args = ( + torch_function_type, + fn, + types, + SourcelessBuilder()(tx, tuple(args)), + SourcelessBuilder()(tx, kwargs), + ) + return tx.inline_user_function_return(torch_function_var, tf_args, {}) + + +def build_torch_function_fn(tx, value, source): + from .builder import SourcelessBuilder, VariableBuilder + + if source: + return VariableBuilder( + tx, + AttrSource(AttrSource(source, "__torch_function__"), "__func__"), + )(value.__torch_function__.__func__) + else: + return SourcelessBuilder()(tx, value.__torch_function__.__func__) + + +def can_dispatch_torch_function(tx, args, kwargs): + if tx.output.torch_function_enabled: + all_args = pytree.arg_tree_leaves(*args, **kwargs) + return any(has_torch_function(arg) for arg in all_args) + else: + return False + + +def dispatch_torch_function(tx, fn, args, kwargs): + """Gathers all args that are TensorWithTFOverrideVariable and dispatches based on the ordering in _get_overloaded_args""" + + all_args = pytree.arg_tree_leaves(*args, **kwargs) + overloaded_args = _get_overloaded_args( + [arg for arg in all_args if has_torch_function(arg)], + _get_subclass_type, + ) + + for arg in overloaded_args: + res = arg.call_torch_function( + tx, + fn, + TupleVariable([_get_subclass_type_var(tx, arg) for arg in overloaded_args]), + args, + kwargs, + ) + + if not (isinstance(res, ConstantVariable) and res.value is NotImplemented): + return res + + unimplemented( + f"All __torch_function__ overrides for call {fn} with args {args} and kwargs {kwargs} returned NotImplemented" + ) + + +class TensorWithTFOverrideVariable(TensorVariable): + """ + Represents a tensor subclass instance with a __torch_function__ override. + """ + + def __init__(self, *args, **kwargs): + self.torch_function_fn = kwargs.pop("torch_function_fn") + super().__init__(*args, **kwargs) + + @classmethod + def from_tensor_var(cls, tx, tensor_var, class_type, torch_function_fn): + import torch + + kwargs = dict(tensor_var.__dict__) + assert ( + kwargs.pop("class_type") is torch.Tensor + ), "invalid class type in TensorWithTFOverrideVariable.from_tensor_var" + var = cls(torch_function_fn=torch_function_fn, class_type=class_type, **kwargs) + var.install_global(tx) + return var + + def install_global(self, tx): + # stash the subclass type to rewrap an output tensor if needed + # this is needed because the actual type needs to be available + # each time the compiled artifact is run and outputs a wrapped tensor. + if self.global_mangled_class_name(tx) not in tx.output.global_scope: + # Safe because global_mangled_class_name figures it out + tx.output.install_global_unsafe( + self.global_mangled_class_name(tx), self.class_type + ) + + def python_type(self): + return self.class_type + + def class_type_var(self, tx): + return TensorSubclassVariable( + self.class_type, source=GlobalSource(self.global_mangled_class_name(tx)) + ) + + def global_mangled_class_name(self, tx): + # The global_mangled_class_name should be different for different + # invocations of torch.compile. Otherwise, we can run into a situation + # where multiple torch.compile invocations re-use the same global name, + # but the global's lifetime is tied to the first invocation (and + # may be deleted when the first torch.compile invocation is deleted) + # We mangle it based off of the output_graph's id. + compile_id = tx.output.compile_id + return f"__subclass_{self.class_type.__name__}_{id(self.class_type)}_c{id}" + + def var_getattr(self, tx, name): + # [Note: __torch_function__] We currently only support attributes that are defined on + # base tensors, custom attribute accesses will graph break. + import torch + from .builder import SourcelessBuilder + + if name in banned_attrs or not hasattr(torch.Tensor, name): + unimplemented( + f"Accessing {name} on a tensor subclass with a __torch_function__ override is not supported" + ) + + if _is_attr_overidden(tx, self, name): + unimplemented( + f"Accessing overridden method/attribute {name} on a tensor" + " subclass with a __torch_function__ override is not supported" + ) + + if tx.output.torch_function_enabled: + if self.source: + install_guard( + AttrSource(AttrSource(self.source, "__class__"), name).make_guard( + GuardBuilder.FUNCTION_MATCH + ) + ) + get_fn = SourcelessBuilder()(tx, getattr(torch.Tensor, name).__get__) + + return self.call_torch_function( + tx, + get_fn, + TupleVariable([self.class_type_var(tx)]), + [self], + {}, + ) + else: + return super().var_getattr(tx, name) + + def call_torch_function(self, tx, fn, types, args, kwargs): + return call_torch_function( + tx, + self.class_type_var(tx), + self.torch_function_fn, + fn, + types, + args, + kwargs, + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + # This code block implements inlining the __torch_function__ override + # of `call_method`. + if tx.output.torch_function_enabled: + import torch + from .builder import SourcelessBuilder, VariableBuilder + + if _is_attr_overidden(tx, self, name): + unimplemented( + f"Calling overridden method {name} on a tensor" + " subclass with a __torch_function__ override is not supported" + ) + + # [Note: __torch_function__] Currently we only support methods that are defined on tensor + # we will graph break in other cases this will need a bigger overhaul of extracting methods/comparing them for equality + # We've established with the above check that the method is not overridden, so we guard that the method is the same + # as the impl defined on tensor and retrieve it + if self.source: + func_var = VariableBuilder( + tx, AttrSource(AttrSource(self.source, "__class__"), name) + )(inspect.getattr_static(self.python_type(), name)) + else: + func_var = SourcelessBuilder()(tx, getattr(torch.Tensor, name)) + return dispatch_torch_function(tx, func_var, [self] + args, kwargs) + else: + return super().call_method(tx, name, args, kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/variables/user_defined.py b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/user_defined.py new file mode 100644 index 0000000000000000000000000000000000000000..ece1e320f4c1f6dd2fa3acb9572184c8c821ac03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/variables/user_defined.py @@ -0,0 +1,946 @@ +# mypy: ignore-errors + +import collections +import contextlib +import functools +import importlib +import inspect +import itertools +import random +import sys +import threading +import types +from typing import Dict, List + +from ..bytecode_transformation import create_call_function + +try: + import numpy as np +except ModuleNotFoundError: + np = None + +try: + from torch.utils._cxx_pytree import PyTreeSpec +except ImportError: + PyTreeSpec = type(None) + +import torch._dynamo.config + +import torch.nn +from torch._guards import TracingContext + +from .. import variables +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GetItemSource, ODictGetItemSource, RandomValueSource +from ..utils import ( + all_hook_names, + build_checkpoint_variable, + check_constant_args, + get_custom_getattr, + has_torch_function, + is_namedtuple_cls, + is_utils_checkpoint, + istype, + namedtuple_fields, + object_has_getattribute, + proxy_args_kwargs, + tensortype_to_dtype, +) +from .base import MutableLocal, VariableTracker +from .ctx_manager import GenericContextWrappingVariable, NullContextVariable +from .dicts import DefaultDictVariable + + +class UserDefinedVariable(VariableTracker): + pass + + +class UserDefinedClassVariable(UserDefinedVariable): + def __init__(self, value, **kwargs): + super().__init__(**kwargs) + self.value = value + + def as_python_constant(self): + return self.value + + def python_type(self): + return type(self.value) + + def as_proxy(self): + return self.value + + def __str__(self): + return f"UserDefinedClassVariable({self.value})" + + @staticmethod + @functools.lru_cache(None) + def _constant_fold_classes(): + return { + torch.device, + torch.finfo, + torch.iinfo, + torch.Size, + } + + @staticmethod + @functools.lru_cache(None) + def _in_graph_classes(): + return set(tensortype_to_dtype.keys()) | { + torch.Tensor, + torch.cuda.Stream, + torch.cuda.Event, + } + + def can_constant_fold_through(self): + return self.value in self._constant_fold_classes() + + def var_getattr(self, tx, name: str) -> "VariableTracker": + from .. import trace_rules + from . import ConstantVariable + from .builder import VariableBuilder + + if name == "__name__": + return ConstantVariable.create(self.value.__name__) + + source = AttrSource(self.source, name) if self.source is not None else None + try: + obj = inspect.getattr_static(self.value, name) + except AttributeError: + obj = None + + if isinstance(obj, staticmethod): + func = obj.__get__(self.value) + if source is not None: + return trace_rules.lookup(func).create_with_source(func, source=source) + else: + return trace_rules.lookup(func)(func) + elif isinstance(obj, classmethod): + return variables.UserMethodVariable(obj.__func__, self, source=source) + elif source and inspect.ismemberdescriptor(obj): + return VariableBuilder(tx, source)(obj.__get__(self.value)) + + # Special handling of collections.OrderedDict.fromkeys() + # Wrap it as GetAttrVariable(collections.OrderedDict, "fromkeys") to make it consistent with + # collections.defaultdict, and both will be handled at UserDefinedClassVariable.call_method(). + # Otherwise, it would be wrapped as UserDefinedObjectVariable(collections.OrderedDict.fromkeys), + # and we need duplicate code to handle both cases. + if self.value is collections.OrderedDict and name == "fromkeys": + return super().var_getattr(tx, name) + + if name in getattr(self.value, "__dict__", {}) or ( + self.value.__module__.startswith("torch.") + or self.value.__module__ == "torch" + ): + if source: + return VariableBuilder(tx, source)(obj) + elif ConstantVariable.is_literal(obj): + return ConstantVariable.create(obj) + + return super().var_getattr(tx, name) + + def _call_cross_entropy_loss(self, tx, args, kwargs): + """ + functional: input, target, weight=None, size_average=None, ignore_index=- 100, reduce=None, reduction='mean', + label_smoothing=0.0 + + non functional ctor: weight=None, size_average=None, ignore_index=- 100, reduce=None, reduction='mean', + label_smoothing=0.0 + + non functional loss call: input, target, optional_output + """ + from . import ConstantVariable + + def normalize_args( + weight=ConstantVariable.create(None), + size_average=ConstantVariable.create(None), + ignore_index=ConstantVariable.create(-100), + reduce=ConstantVariable.create(None), + reduction=ConstantVariable.create("mean"), + label_smoothing=ConstantVariable.create(0.0), + ): + return ( + weight, + size_average, + ignore_index, + reduce, + reduction, + label_smoothing, + ) + + ( + weight, + size_average, + ignore_index, + reduce_arg, + reduction, + label_smoothing, + ) = normalize_args(*args, **kwargs) + + def fake_cross_entropy_loss(input, target): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + torch.nn.functional.cross_entropy, + *proxy_args_kwargs( + [ + input, + target, + weight, + size_average, + ignore_index, + reduce_arg, + reduction, + label_smoothing, + ], + {}, + ), + ), + ) + + return variables.LambdaVariable(fake_cross_entropy_loss) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if ( + name == "__subclasses__" + and len(args) == 0 + and not kwargs + and "__subclasses__" not in self.value.__dict__ + ): + options = {"mutable_local": MutableLocal()} + subs_as_vars: List[VariableTracker] = list() + for sub in self.value.__subclasses__(): + source = AttrSource(tx.import_source(sub.__module__), sub.__name__) + subs_as_vars.append( + variables.UserDefinedClassVariable(sub, source=source) + ) + + return variables.ListVariable(subs_as_vars, **options) + elif ( + self.value in {collections.OrderedDict, collections.defaultdict} + and name == "fromkeys" + ): + from .builtin import BuiltinVariable + + return BuiltinVariable.call_custom_dict_fromkeys( + tx, self.value, *args, **kwargs + ) + + return super().call_method(tx, name, args, kwargs) + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from ..side_effects import SideEffects + from .builder import SourcelessBuilder, wrap_fx_proxy + from .builtin import BuiltinVariable + + constant_args = check_constant_args(args, kwargs) + + if self.can_constant_fold_through() and constant_args: + # constant fold + return variables.ConstantVariable.create( + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + elif self.value is torch.nn.CrossEntropyLoss: + return self._call_cross_entropy_loss(tx, args, kwargs) + elif self.value is contextlib.nullcontext: + return NullContextVariable() + elif self.value is collections.OrderedDict: + return BuiltinVariable.call_custom_dict( + tx, collections.OrderedDict, *args, **kwargs + ) + elif ( + self.value is collections.defaultdict + and len(args) <= 1 + and DefaultDictVariable.is_supported_arg(args[0]) + ): + return DefaultDictVariable( + {}, + collections.defaultdict, + args[0], + mutable_local=MutableLocal(), + ) + elif self.value is collections.deque and not kwargs: + if len(args) == 0: + items = [] + elif len(args) == 1 and args[0].has_unpack_var_sequence(tx): + items = args[0].unpack_var_sequence(tx) + else: + unimplemented("deque() with more than 1 arg not supported") + return variables.lists.DequeVariable(items, mutable_local=MutableLocal()) + elif self.value is functools.partial: + if not args: + unimplemented("functools.partial malformed") + # The first arg, a callable (the ctor below will assert on types) + fn = args[0] + rest_args = args[1:] + # guards for the produced FunctoolsPartialVariable are installed in FunctoolsPartialVariable ctor from the + # args and keywords + return variables.functions.FunctoolsPartialVariable( + fn, args=rest_args, keywords=kwargs + ) + elif ( + issubclass(type(self.value), type) + and hasattr( + self.value, "__enter__" + ) # TODO(voz): These can invoke user code! + and hasattr( + self.value, "__exit__" + ) # TODO(voz): These can invoke user code! + and check_constant_args(args, kwargs) + and self.value.__init__ == object.__init__ + and len(kwargs) == 0 # TODO(ybliang): support kwargs + ): + unwrapped_args = [x.as_python_constant() for x in args] + return GenericContextWrappingVariable( + unwrapped_args, + cm_obj=self.value(*unwrapped_args), + ) + + elif is_namedtuple_cls(self.value): + fields = namedtuple_fields(self.value) + # check if this a quasi-namedtuple or a real one + if self.value.__module__ == "torch.return_types": + # create pseudo-defaults from values of the quasi-namedtuple + field_defaults = dict(zip(fields, args[0].items)) + else: + field_defaults = self.value._field_defaults + + items = list(args) + items.extend([None] * (len(fields) - len(items))) + + var_tracker_kwargs = {} + for field_name, var_tracker in zip(fields, items): + if var_tracker is None: + if field_name in kwargs: + field_var = kwargs[field_name] + else: + assert field_name in field_defaults + field_var = SourcelessBuilder()(tx, field_defaults[field_name]) + var_tracker_kwargs[field_name] = field_var + + for name, value in var_tracker_kwargs.items(): + assert name in fields + items[fields.index(name)] = value + + assert all(x is not None for x in items) + return variables.NamedTupleVariable(items, self.value) + elif ( + inspect.getattr_static(self.value, "__new__", None) in (object.__new__,) + and SideEffects.cls_supports_mutation_side_effects(self.value) + and self.source + ): + var = tx.output.side_effects.track_object_new( + self.source, + self.value, + variables.UnspecializedNNModuleVariable + if issubclass(self.value, torch.nn.Module) + else UserDefinedObjectVariable, + {}, + ) + if ( + inspect.getattr_static(self.value, "__init__", None) + is torch.nn.Module.__init__ + ): + tx.output.side_effects.store_attr( + var, + "__call_nn_module_init", + variables.ConstantVariable.create(True), + ) + return var + else: + var.call_method(tx, "__init__", args, kwargs) + return var + elif variables.CustomizedDictVariable.is_matching_cls(self.value): + options = {"mutable_local": MutableLocal()} + return variables.CustomizedDictVariable.create( + self.value, args, kwargs, options + ) + elif variables.DataClassVariable.is_matching_cls(self.value): + options = {"mutable_local": MutableLocal()} + return variables.DataClassVariable.create(self.value, args, kwargs, options) + elif ( + variables.RestrictedListSubclassVariable.is_matching_cls(self.value) + and self.source + ): + return variables.RestrictedListSubclassVariable( + variables.BuiltinVariable(list).call_function(tx, args, kwargs).items, + user_cls=self.value, + user_cls_source=self.source, + mutable_local=MutableLocal(), + ) + elif self.value in self._in_graph_classes(): + # torch.LongTensor cannot accept a list of FakeTensors. + # So we stack the list of FakeTensors instead. + if ( + np + and self.value in tensortype_to_dtype + and len(args) == 1 + and isinstance(args[0], variables.ListVariable) + and len(args[0].items) > 1 + and all(isinstance(x, variables.TensorVariable) for x in args[0].items) + ): + # Stack FakeTensor + stacked = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + torch.stack, + *proxy_args_kwargs(args, kwargs), + ), + ) + args = [stacked] + + tensor_variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.value, + *proxy_args_kwargs(args, kwargs), + ), + ) + + return tensor_variable + + return super().call_function(tx, args, kwargs) + + def const_getattr(self, tx, name): + if name == "__name__": + return self.value.__name__ + return super().const_getattr(tx, name) + + +class UserDefinedObjectVariable(UserDefinedVariable): + """ + Mostly objects of defined type. Catch-all for something where we only know the type. + """ + + _nonvar_fields = {"value", "value_type", *UserDefinedVariable._nonvar_fields} + + def __init__(self, value, value_type=None, **kwargs): + super().__init__(**kwargs) + self.value = value + self.value_type = value_type or type(value) + assert type(value) is self.value_type + + def __str__(self): + inner = self.value_type.__name__ + if inner in [ + "builtin_function_or_method", + "getset_descriptor", + "method_descriptor", + "method", + ]: + inner = str(getattr(self.value, "__name__", None)) + return f"{self.__class__.__name__}({inner})" + + def python_type(self): + return self.value_type + + def guard_as_python_constant(self): + if self.source: + install_guard(self.source.make_guard(GuardBuilder.ID_MATCH)) + return self.value + return super().guard_as_python_constant() + + def torch_function_check(self): + assert has_torch_function( + self + ), f"calling torch function on object without __torch_function__ {self}" + + def get_torch_fn(self, tx): + self.torch_function_check() + from .torch_function import build_torch_function_fn + + return build_torch_function_fn(tx, self.value, self.source) + + def call_torch_function(self, tx, fn, types, args, kwargs): + self.torch_function_check() + + from .torch_function import _get_subclass_type_var, call_torch_function + + return call_torch_function( + tx, + _get_subclass_type_var(tx, self), + self.get_torch_fn(tx), + fn, + types, + args, + kwargs, + ) + + @staticmethod + @functools.lru_cache(None) + def _supported_random_functions(): + fns = { + random.random, + random.randint, + random.randrange, + random.uniform, + } + return fns + + def _maybe_get_baseclass_method(self, name): + if name not in getattr(self.value, "__dict__", {}): + try: + return inspect.getattr_static(type(self.value), name) + except AttributeError: + pass + return None + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ( + BuiltinVariable, + ConstantVariable, + TupleVariable, + UserMethodVariable, + ) + + method = self._maybe_get_baseclass_method(name) + if method is not None: + if method is object.__init__: + return ConstantVariable.create(None) + + # [NOTE] OrderedDict, dict subtypes must always have source + # We cannot instantiate such subtypes in-graph due to builtin __new__ + if method is collections.OrderedDict.keys: + # subclass of OrderedDict + assert not (args or kwargs) + assert self.source # OrderedDict, dict subtypes must always have source + keys = list(self.value.keys()) + assert all(map(ConstantVariable.is_literal, keys)) + install_guard(self.source.make_guard(GuardBuilder.DICT_CONST_KEYS)) + return TupleVariable([ConstantVariable.create(k) for k in keys]) + + if ( + method in (collections.OrderedDict.__contains__, dict.__contains__) + and len(args) == 1 + and isinstance(args[0], (ConstantVariable, BuiltinVariable)) + and inspect.getattr_static(type(self.value), "keys") + in (collections.OrderedDict.keys, dict.keys) + ): + assert not kwargs + assert self.source # OrderedDict, dict subtypes must always have source + install_guard(self.source.make_guard(GuardBuilder.DICT_CONST_KEYS)) + return ConstantVariable.create( + args[0].as_python_constant() in self.value + ) + + if method is collections.OrderedDict.items and isinstance( + self.value, collections.OrderedDict + ): + assert self.source # OrderedDict, dict subtypes must always have source + assert not (args or kwargs) + items = [] + keys = self.call_method(tx, "keys", [], {}) + for key in keys.unpack_var_sequence(tx): + items.append( + TupleVariable( + [key, self.odict_getitem(tx, key)], + ) + ) + return TupleVariable(items) + + if method is collections.OrderedDict.__getitem__ and len(args) == 1: + assert not kwargs + assert self.source # OrderedDict, dict subtypes must always have source + return self.odict_getitem(tx, args[0]) + + # check for methods implemented in C++ + if isinstance(method, types.FunctionType): + source = ( + None + if self.source is None + else AttrSource(AttrSource(self.source, "__class__"), name) + ) + # TODO(jansel): add a guard to check for monkey patching? + return UserMethodVariable(method, self, source=source).call_function( + tx, args, kwargs + ) + + if method is list.__len__ and self.source and not (args or kwargs): + install_guard(self.source.make_guard(GuardBuilder.SEQUENCE_LENGTH)) + return ConstantVariable(len(self.value)) + + return super().call_method(tx, name, args, kwargs) + + def unpack_var_sequence(self, tx): + if ( + self.source + and self._maybe_get_baseclass_method("__iter__") is list.__iter__ + and self._maybe_get_baseclass_method("__len__") is list.__len__ + and self._maybe_get_baseclass_method("__getitem__") is list.__getitem__ + ): + install_guard(self.source.make_guard(GuardBuilder.SEQUENCE_LENGTH)) + return [ + variables.LazyVariableTracker.create( + self.value[k], + source=GetItemSource(self.source, k), + ) + for k in range(len(self.value)) + ] + return super().unpack_var_sequence(tx) + + def is_supported_random(self): + try: + return self.value in self._supported_random_functions() + except TypeError: + # TypeError: unhashable type + return False + + def call_function( + self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" + ) -> "VariableTracker": + from .. import trace_rules + from .builder import VariableBuilder + + if ( + self.is_supported_random() + and all(k.is_python_constant() for k in args) + and all(v.is_python_constant() for v in kwargs.values()) + ): + args = [x.as_python_constant() for x in args] + kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} + random_call_index = len(tx.output.random_calls) + example_value = self.value(*args, **kwargs) + source = RandomValueSource(random_call_index) + tx.output.random_calls.append((self.value, args, kwargs)) + return VariableBuilder(tx, source).wrap_unspecialized_primitive( + example_value + ) + elif istype(self.value, types.MethodType): + func = self.value.__func__ + obj = self.value.__self__ + if ( + func is torch.utils._contextlib._DecoratorContextManager.clone + and variables.TorchCtxManagerClassVariable.is_matching_cls( + obj.__class__ + ) + and not (args or kwargs) + ): + return variables.TorchCtxManagerClassVariable( + obj.__class__ + ).call_function(tx, args, kwargs) + + if ( + func is torch.autograd.grad_mode.inference_mode.clone + and obj.__class__ is torch.autograd.grad_mode.inference_mode + ): + # simulate the inference_mode.clone implementation + var = variables.ConstantVariable(obj.mode) + return variables.TorchCtxManagerClassVariable( + obj.__class__ + ).call_function(tx, [var], kwargs) + elif ( + istype(self.value, functools.partial) + and trace_rules.lookup(self.value.func) + == variables.TorchInGraphFunctionVariable + and all( + variables.ConstantVariable.is_literal(v) + for v in itertools.chain(self.value.args, self.value.keywords.values()) + ) + ): + if self.source: + install_guard( + AttrSource(self.source, "func").make_guard(GuardBuilder.ID_MATCH), + AttrSource(self.source, "args").make_guard( + GuardBuilder.CONSTANT_MATCH + ), + AttrSource(self.source, "keywords").make_guard( + GuardBuilder.CONSTANT_MATCH + ), + ) + + partial_args = [ + variables.ConstantVariable.create(v) for v in self.value.args + ] + partial_args.extend(args) + partial_kwargs = { + k: variables.ConstantVariable.create(v) + for k, v in self.value.keywords.items() + } + partial_kwargs.update(kwargs) + if is_utils_checkpoint(self.value.func): + return build_checkpoint_variable().call_function( + tx, partial_args, partial_kwargs + ) + return variables.TorchInGraphFunctionVariable( + self.value.func + ).call_function(tx, partial_args, partial_kwargs) + elif callable(self.value): + if self.source: + install_guard(self.source.make_guard(GuardBuilder.FUNCTION_MATCH)) + return self.call_method(tx, "__call__", args, kwargs) + + return super().call_function(tx, args, kwargs) + + def _check_for_getattribute(self): + if object_has_getattribute(self.value): + unimplemented("UserDefinedObjectVariable with custom __getattribute__") + + def _check_for_getattr(self): + return get_custom_getattr(self.value) + + def _getattr_static(self, name): + if ( + isinstance(self.value, (torch.nn.Module, PyTreeSpec)) + or "__slots__" in self.value.__class__.__dict__ + or type(self.value) == threading.local + ): + # getattr_static doesn't work on these + subobj = getattr(self.value, name) + else: + subobj = inspect.getattr_static(self.value, name) + return subobj + + def var_getattr(self, tx, name): + from .. import trace_rules + from . import ConstantVariable + from .builder import VariableBuilder + + value = self.value + source = AttrSource(self.source, name) if self.source else None + self._check_for_getattribute() + getattr_fn = self._check_for_getattr() + + class NO_SUCH_SUBOBJ: + pass + + try: + subobj = self._getattr_static(name) + except AttributeError: + subobj = NO_SUCH_SUBOBJ + if isinstance(getattr_fn, types.FunctionType): + return variables.UserMethodVariable( + getattr_fn, self, source=source + ).call_function(tx, [ConstantVariable.create(name)], {}) + elif getattr_fn is not None: + unimplemented("UserDefined with non-function __getattr__") + + if isinstance(subobj, property): + # Rewrite the source being explicit about reading it statically. + if self.source: + source = AttrSource(self.source, name, get_static=True) + source = AttrSource(source, "fget") + return variables.UserMethodVariable( + subobj.fget, self, source=source + ).call_function(tx, [], {}) + elif isinstance(subobj, torch.distributions.utils.lazy_property): + subobj_var = UserDefinedObjectVariable(subobj, source=source) + return variables.UserMethodVariable( + subobj.__get__.__func__, subobj_var, source=source + ).call_function(tx, [self], {}) + elif isinstance(subobj, staticmethod): + func = subobj.__get__(self.value) + if source is not None: + return trace_rules.lookup(func).create_with_source(func, source=source) + else: + return trace_rules.lookup(func)(func) + elif isinstance(subobj, classmethod): + return variables.UserMethodVariable( + subobj.__func__, self.var_getattr(tx, "__class__"), source=source + ) + elif isinstance(subobj, types.FunctionType) or ( + isinstance(subobj, types.MethodType) + and isinstance(self.value, torch.nn.Module) + ): + # Since we get subobj via self._getattr_static, which may not trigger dynamic lookup. + # Static lookup can't tell us it's a method or function correctly, + # so we trigger dynamic lookup here to get the correct type. + dynamic_subobj = getattr(self.value, name) + + while dynamic_subobj is subobj and hasattr(subobj, "_torchdynamo_inline"): + subobj = subobj._torchdynamo_inline + dynamic_subobj = subobj + source = AttrSource(source, "_torchdynamo_inline") if source else None + + if isinstance(subobj, types.MethodType): + if dynamic_subobj.__self__ is not self.value: + unimplemented("__self__ mismatch for bound method") + func = subobj.__func__ + else: + assert isinstance(subobj, types.FunctionType) + func = subobj + + if inspect.ismethod(dynamic_subobj): + return variables.UserMethodVariable(func, self, source=source) + elif inspect.isfunction(dynamic_subobj): + if is_utils_checkpoint(func): + return build_checkpoint_variable(source=source) + elif source is not None: + return trace_rules.lookup(func).create_with_source( + func, source=source + ) + else: + return trace_rules.lookup(func)(func) + + if ( + name in getattr(value, "__dict__", {}) + or ConstantVariable.is_literal(subobj) + or isinstance( + subobj, + ( + torch.Tensor, + torch.nn.Module, + ), + ) + ): + if source: + return VariableBuilder(tx, source)(subobj) + elif ConstantVariable.is_literal(subobj): + return ConstantVariable.create(subobj) + + if ( + name not in getattr(value, "__dict__", {}) + and type(value).__module__.startswith("torch.") + and "torch.optim" not in type(value).__module__ + and not callable(value) + and not isinstance(subobj, types.MethodDescriptorType) + ): + if not source: + assert getattr( + importlib.import_module(type(value).__module__), + type(value).__name__, + ) is type(value) + source = AttrSource( + AttrSource( + tx.import_source(type(value).__module__), type(value).__name__ + ), + name, + ) + + return VariableBuilder(tx, source)(subobj) + options = {"source": source} + if isinstance( + subobj, + ( + torch.distributions.constraints._Interval, + torch.distributions.constraints._Real, + torch.distributions.constraints.Constraint, + ), + ): + return UserDefinedObjectVariable(subobj, **options) + elif isinstance(self.value, torch.nn.Module) and name in all_hook_names: + assert isinstance(subobj, collections.OrderedDict) + if not subobj: + return variables.ConstDictVariable( + subobj, collections.OrderedDict, **options + ) + + if name == "__class__": + return UserDefinedClassVariable(type(self.value), **options) + + return variables.GetAttrVariable(self, name, **options) + + def call_hasattr(self, tx, name: str) -> "VariableTracker": + if tx.output.side_effects.is_attribute_mutation(self): + try: + result = tx.output.side_effects.load_attr(self, name, deleted_ok=True) + return variables.ConstantVariable.create( + not isinstance(result, variables.DeletedVariable) + ) + except KeyError: + pass + if self.source: + install_guard( + AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR) + ) + if self._check_for_getattribute() or self._check_for_getattr(): + unimplemented("hasattr with custom __getattr__") + + try: + self._getattr_static(name) + return variables.ConstantVariable.create(True) + except AttributeError: + return variables.ConstantVariable.create(False) + + def odict_getitem(self, tx, key): + from .builder import VariableBuilder + from .dicts import is_hashable + + # TODO this should probably be merged with the dict handling + + index = ( + key.source + if is_hashable(key) and key.source is not None + else key.as_python_constant() + ) + + return VariableBuilder( + tx, + ODictGetItemSource(self.source, index), + )(collections.OrderedDict.__getitem__(self.value, key.as_python_constant())) + + +class KeyedJaggedTensorVariable(UserDefinedObjectVariable): + @staticmethod + def is_matching_object(obj): + mod = sys.modules.get("torchrec.sparse.jagged_tensor") + return mod is not None and type(obj) is mod.KeyedJaggedTensor + + def __init__(self, value, **kwargs): + from torchrec.sparse.jagged_tensor import KeyedJaggedTensor + + assert type(value) is KeyedJaggedTensor + super().__init__(value, **kwargs) + + def var_getattr(self, tx, name): + if ( + torch._dynamo.config.force_unspec_int_unbacked_size_like_on_torchrec_kjt + and self.source is not None + and name in ("_length_per_key", "_offset_per_key") + ): + with TracingContext.patch(force_unspec_int_unbacked_size_like=True): + return super().var_getattr(tx, name) + return super().var_getattr(tx, name) + + +class RemovableHandleVariable(VariableTracker): + REMOVED = -1 + + def __init__( + self, + mutable_local=None, + # index of the registration in the side_effects owned register_hook/handle list, used during removal. + idx=None, + **kwargs, + ): + super().__init__(**kwargs) + self.mutable_local = mutable_local + self.idx = idx + + def call_method(self, tx, method_name, args, kwargs): + if method_name == "remove": + if self.idx != self.REMOVED: + tx.output.side_effects.remove_hook(self.idx) + self.idx = self.REMOVED + return variables.ConstantVariable.create(None) + super().call_method(tx, method_name, args, kwargs) + + def reconstruct(self, codegen): + if self.idx == self.REMOVED: + # Hook has already been removed, return a dummy handle + codegen.load_import_from("torch._dynamo.utils", "invalid_removeable_handle") + codegen.extend_output(create_call_function(0, True)) + return + # unreachable due to codegen.add_cache() when the hook is installed + super().reconstruct(codegen)