diff --git a/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..11c88ed6137311caa08d037d4cec8bb7a0e8d1da --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f203f4a2aafa1ea7c74d256be45c559177c572601dc4749c187d297a1ec66f0 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/16.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/16.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8fdc7fc50bfda237b8f3f6a00a37f7eaa054cd42 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:301e1add3c4acd1ed87b54a117b1235368f80b5dd37d35c9bf39295de9e13318 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c230e3a7e9588a0143f18945a748e65a48d209d4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ca0c5fdd534e75e8973663c4bfdae3b96b2122d129c2b197f95ef0ea9f183da +size 9372 diff --git a/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..11a2fac6874a2732abd5de4bcd14eff38de6dc43 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:566fefd513470ca286b9454e42c6c19724e407e6964316c14c091a0c02dac226 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__init__.py b/venv/lib/python3.10/site-packages/torch/_dynamo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..05d58662da794990d63d39e9ca72afc2c4de0b7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/__init__.py @@ -0,0 +1,96 @@ +import torch +from . import convert_frame, eval_frame, resume_execution +from .backends.registry import list_backends, lookup_backend, register_backend +from .callback import callback_handler, on_compile_end, on_compile_start +from .code_context import code_context +from .convert_frame import replay +from .decorators import ( + allow_in_graph, + assume_constant_result, + disable, + disallow_in_graph, + forbid_in_graph, + graph_break, + mark_dynamic, + mark_static, + mark_static_address, + maybe_mark_dynamic, + run, +) +from .eval_frame import ( + _reset_guarded_backend_cache, + explain, + export, + is_dynamo_supported, + is_inductor_supported, + optimize, + optimize_assert, + OptimizedModule, + reset_code, +) +from .external_utils import is_compiling +from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count + +__all__ = [ + "allow_in_graph", + "assume_constant_result", + "disallow_in_graph", + "forbid_in_graph", + "graph_break", + "mark_dynamic", + "maybe_mark_dynamic", + "mark_static", + "mark_static_address", + "optimize", + "optimize_assert", + "export", + "explain", + "run", + "replay", + "disable", + "reset", + "OptimizedModule", + "is_compiling", + "register_backend", + "list_backends", + "lookup_backend", +] + +if torch.manual_seed is torch.random.manual_seed: + import torch.jit._builtins + + # Wrap manual_seed with the disable decorator. + # Can't do it at its implementation due to dependency issues. + torch.manual_seed = disable(torch.manual_seed) + # Add the new manual_seed to the builtin registry. + torch.jit._builtins._register_builtin(torch.manual_seed, "aten::manual_seed") + + +def reset() -> None: + """Clear all compile caches and restore initial state""" + with convert_frame.compile_lock: + reset_code_caches() + convert_frame.input_codes.clear() + convert_frame.output_codes.clear() + orig_code_map.clear() + guard_failures.clear() + graph_break_reasons.clear() + resume_execution.ContinueExecutionCache.cache.clear() + _reset_guarded_backend_cache() + reset_frame_count() + torch._C._dynamo.compiled_autograd.clear_cache() + convert_frame.FRAME_COUNTER = 0 + convert_frame.FRAME_COMPILE_COUNTER.clear() + callback_handler.clear() + + +def reset_code_caches() -> None: + """Clear compile caches that are keyed by code objects""" + with convert_frame.compile_lock: + for weak_code in ( + convert_frame.input_codes.seen + convert_frame.output_codes.seen + ): + code = weak_code() + if code: + reset_code(code) + code_context.clear() diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..677e52c1e0f8909bd5bbcbf74fc7023b68c7de95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..706e21edd869ee806ab371818b1505649028343f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a664a16a84d1e64ba2223e873b20731104ef9ef2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fb787df9381be764907199044d81f4b0343c67e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce8864da21f047036c3788165e8b583ccad0a2f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aec90e0cf6d0e232366f4ba94a9a26e8ca51e3fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aa5e86b5f950ec749bb4338e7388c7f0f83c005 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/codegen.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/codegen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..864d76200c3d2863fcd89a0705dd199db968e058 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/codegen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff6caf0e38829fcfcb27d76b9d93b05e9c21e297 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0cfeb8e18c6bf9b1152ddca6a52edd65b53354c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2506baeb6ea2334a7b1cfda8271fa69f9ca75af Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..741b7cbd69e18641fdcf272fb3de9587f153c896 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43172f8f1e70527e365f47baf44be670c2c4e202 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90c303269443021512676091deff01e8894dba0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20085f3e93e65ebe44bfffa7145bc0ccc2e3702d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b8f16d33fbe2cac6071e348bcc2f36ed94b27a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10e021ae037eb765abef1b6654b64c5aa82abd5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09c9dcf105e40d94a994325b25dac3d16a39c018 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bbb97fe3dd2321b9e3e9e376810be6ca42ec6bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a55fc4ea52a8c620bf0ee3b5eab881e2b445b9f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/source.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/source.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11b632118ae82ec59900eb113aac9a3a53b43840 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/source.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b5bff093d7b7a450bf00489103fe9cc3f56aecf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de4edd6157d5aa5210bb596f493d39b0a8f6e98b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..801932a0181e6fd32cc76a0c8bb72e0bbcd59f41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e99cb43717080a9d355bbd953881957285e20b39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..008b810925aa4309f3be76a24d9f06720371760d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py b/venv/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py new file mode 100644 index 0000000000000000000000000000000000000000..6e22cafcc6dd5efeac0d05a81c8a9612270bd1d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py @@ -0,0 +1,120 @@ +import torch +from torch._C import DispatchKey +from torch._higher_order_ops.utils import autograd_not_implemented + +from torch._ops import HigherOrderOperator +from torch._subclasses import FakeTensorMode +from torch.fx.experimental._backward_state import BackwardState + +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.utils._python_dispatch import _get_current_dispatch_mode +from torch.utils._pytree import tree_map_only + + +__all__ = ["trace_wrapped"] + + +# trace_wrapped(*args, fn) is equivalent to fn(*args), but with a twist: +# if you make_fx trace through this call, we will not actually trace into fn; instead, +# we will directly insert it as a call_function to fn in the graph. +# (Unlike make_fx, Dynamo WILL inline into fn.) +# You can think of this as a one off allow_in_graph equivalent for proxy tensor tracing. +# +# Because proxy tensor tracing does not actually run the function, there are +# requirements on the behavior of fn. We are still figuring it out, but here is the current state: +# +# 1) fn SHOULD only take a single argument, which must be a tensor +# 2) fn MUST return a new tensor with the same metadata as the original tensor +# (e.g., zeros_like(input) is a permissible implementation of fn). +# This is verified via an extra assert that is inserted into the traced graph. +# 3) fn MAY have side effects, but it MAY NOT perform metadata mutation on other tensors +# participating in proxy tensor tracing (it MAY mutate other tensors, it MAY mutate Python state) +# These requirements stem from the requirement that we need to continue performing proxy tensor tracing, +# which assumes accurate fake tensor metadata, without actually running fn. +# In the future, we may allow for a "meta" function associated with fn to allow for more interesting input-output patterns. +# +# Note that tensors / Python state are allowed to be mutated. +# This is relaxed constraint is not always sound, but it is sound for backward tracing with fake +# tensors as it takes place in AOTAutograd, as the backward pass is guaranteed not to depend on concrete +# tensor values (via fake tensor) or Python state (because the autograd engine doesn't depend on Python). +# +# The intended use case for this function is to allow AOTAutograd to defer complex +# backward hooks to compiled autograd. AOTAutograd performs a make_fx trace which preserves +# the function call as is in the graph, and only when we Dynamo through the backward graph in +# compiled autograd do we inline into the function. + + +def trace_wrapped(*args, **kwargs): + with torch.no_grad(): + return _trace_wrapped_op(*args, **kwargs) + + +# TODO(jansel): need to ensure this does not get DCEed +_trace_wrapped_op = HigherOrderOperator("trace_wrapped") + + +def _assert_meta(grad, size, stride, dtype): + assert grad.size() == size, "size mismatch" + assert grad.stride() == stride, "stride mismatch" + assert grad.dtype == dtype, "dtype mismatch" + return grad + + +@_trace_wrapped_op.py_impl(ProxyTorchDispatchMode) +def inner_trace(mode, *args, bw_state=None, **kwargs): + def self_invoke(*args, **dyn_kwargs): + with torch.no_grad(): + return _trace_wrapped_op(*args, **dyn_kwargs, **kwargs) + + def unwrap_proxies(x): + if isinstance(x, torch.Tensor): + return mode.tracer.unwrap_proxy(x) + if isinstance(x, (list, tuple)): + return type(x)(map(unwrap_proxies, x)) + if x is None: + return None + raise AssertionError(f"unhandled type: {type(x)}") + + proxy_kwargs = {} + if bw_state is not None: + assert isinstance(bw_state, BackwardState) and bw_state.proxy is not None + proxy_kwargs["bw_state"] = bw_state.proxy + out_proxy = mode.tracer.create_proxy( + "call_function", + self_invoke, + unwrap_proxies(args), + proxy_kwargs, + name="trace_wrapped", + ) + + if args[0] is None: + grad = args[1] # module backward hooks + else: + grad = args[0] # other backward hooks + grad = tree_map_only(torch.Tensor, torch.empty_like, grad) + track_tensor_tree(grad, out_proxy, constant=None, tracer=mode.tracer) + return grad + + +@_trace_wrapped_op.py_impl(FakeTensorMode) +def inner_fake(*args, **kwargs): + raise RuntimeError("This op should never be invoked here") + + +@_trace_wrapped_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def _trace_wrapped_op_dense(*args, fn, **kwargs): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + return fn(*args, **kwargs) + + +_trace_wrapped_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(_trace_wrapped_op, deferred_error=True) +) + + +@_trace_wrapped_op.py_functionalize_impl +def _trace_wrapped_functionalized(ctx, *args, **kwargs): + unwrapped_args = ctx.unwrap_tensors(args) + with ctx.redispatch_to_next(): + return ctx.wrap_tensors(_trace_wrapped_op(*unwrapped_args, **kwargs)) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py b/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..c321cce36ce1ceed512ba8295732bcc5d18ae03a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py @@ -0,0 +1,250 @@ +import bisect +import dataclasses +import dis +import sys +from typing import Any, Set, Union + +TERMINAL_OPCODES = { + dis.opmap["RETURN_VALUE"], + dis.opmap["JUMP_FORWARD"], + dis.opmap["RAISE_VARARGS"], + # TODO(jansel): double check exception handling +} +if sys.version_info >= (3, 9): + TERMINAL_OPCODES.add(dis.opmap["RERAISE"]) +if sys.version_info >= (3, 11): + TERMINAL_OPCODES.add(dis.opmap["JUMP_BACKWARD"]) + TERMINAL_OPCODES.add(dis.opmap["JUMP_FORWARD"]) +else: + TERMINAL_OPCODES.add(dis.opmap["JUMP_ABSOLUTE"]) +JUMP_OPCODES = set(dis.hasjrel + dis.hasjabs) +JUMP_OPNAMES = {dis.opname[opcode] for opcode in JUMP_OPCODES} +HASLOCAL = set(dis.haslocal) +HASFREE = set(dis.hasfree) + +stack_effect = dis.stack_effect + + +def get_indexof(insts): + """ + Get a mapping from instruction memory address to index in instruction list. + Additionally checks that each instruction only appears once in the list. + """ + indexof = {} + for i, inst in enumerate(insts): + assert inst not in indexof + indexof[inst] = i + return indexof + + +def remove_dead_code(instructions): + """Dead code elimination""" + indexof = get_indexof(instructions) + live_code = set() + + def find_live_code(start): + for i in range(start, len(instructions)): + if i in live_code: + return + live_code.add(i) + inst = instructions[i] + if inst.exn_tab_entry: + find_live_code(indexof[inst.exn_tab_entry.target]) + if inst.opcode in JUMP_OPCODES: + find_live_code(indexof[inst.target]) + if inst.opcode in TERMINAL_OPCODES: + return + + find_live_code(0) + + # change exception table entries if start/end instructions are dead + # assumes that exception table entries have been propagated, + # e.g. with bytecode_transformation.propagate_inst_exn_table_entries, + # and that instructions with an exn_tab_entry lies within its start/end. + if sys.version_info >= (3, 11): + live_idx = sorted(live_code) + for i, inst in enumerate(instructions): + if i in live_code and inst.exn_tab_entry: + # find leftmost live instruction >= start + start_idx = bisect.bisect_left( + live_idx, indexof[inst.exn_tab_entry.start] + ) + assert start_idx < len(live_idx) + # find rightmost live instruction <= end + end_idx = ( + bisect.bisect_right(live_idx, indexof[inst.exn_tab_entry.end]) - 1 + ) + assert end_idx >= 0 + assert live_idx[start_idx] <= i <= live_idx[end_idx] + inst.exn_tab_entry.start = instructions[live_idx[start_idx]] + inst.exn_tab_entry.end = instructions[live_idx[end_idx]] + + return [inst for i, inst in enumerate(instructions) if i in live_code] + + +def remove_pointless_jumps(instructions): + """Eliminate jumps to the next instruction""" + pointless_jumps = { + id(a) + for a, b in zip(instructions, instructions[1:]) + if a.opname == "JUMP_ABSOLUTE" and a.target is b + } + return [inst for inst in instructions if id(inst) not in pointless_jumps] + + +def propagate_line_nums(instructions): + """Ensure every instruction has line number set in case some are removed""" + cur_line_no = None + + def populate_line_num(inst): + nonlocal cur_line_no + if inst.starts_line: + cur_line_no = inst.starts_line + + inst.starts_line = cur_line_no + + for inst in instructions: + populate_line_num(inst) + + +def remove_extra_line_nums(instructions): + """Remove extra starts line properties before packing bytecode""" + + cur_line_no = None + + def remove_line_num(inst): + nonlocal cur_line_no + if inst.starts_line is None: + return + elif inst.starts_line == cur_line_no: + inst.starts_line = None + else: + cur_line_no = inst.starts_line + + for inst in instructions: + remove_line_num(inst) + + +@dataclasses.dataclass +class ReadsWrites: + reads: Set[Any] + writes: Set[Any] + visited: Set[Any] + + +def livevars_analysis(instructions, instruction): + indexof = get_indexof(instructions) + must = ReadsWrites(set(), set(), set()) + may = ReadsWrites(set(), set(), set()) + + def walk(state, start): + if start in state.visited: + return + state.visited.add(start) + + for i in range(start, len(instructions)): + inst = instructions[i] + if inst.opcode in HASLOCAL or inst.opcode in HASFREE: + if "LOAD" in inst.opname or "DELETE" in inst.opname: + if inst.argval not in must.writes: + state.reads.add(inst.argval) + elif "STORE" in inst.opname: + state.writes.add(inst.argval) + elif inst.opname == "MAKE_CELL": + pass + else: + raise NotImplementedError(f"unhandled {inst.opname}") + if inst.exn_tab_entry: + walk(may, indexof[inst.exn_tab_entry.target]) + if inst.opcode in JUMP_OPCODES: + walk(may, indexof[inst.target]) + state = may + if inst.opcode in TERMINAL_OPCODES: + return + + walk(must, indexof[instruction]) + return must.reads | may.reads + + +@dataclasses.dataclass +class FixedPointBox: + value: bool = True + + +@dataclasses.dataclass +class StackSize: + low: Union[int, float] + high: Union[int, float] + fixed_point: FixedPointBox + + def zero(self): + self.low = 0 + self.high = 0 + self.fixed_point.value = False + + def offset_of(self, other, n): + prior = (self.low, self.high) + self.low = min(self.low, other.low + n) + self.high = max(self.high, other.high + n) + if (self.low, self.high) != prior: + self.fixed_point.value = False + + def exn_tab_jump(self, depth): + prior = (self.low, self.high) + self.low = min(self.low, depth) + self.high = max(self.high, depth) + if (self.low, self.high) != prior: + self.fixed_point.value = False + + +def stacksize_analysis(instructions) -> Union[int, float]: + assert instructions + fixed_point = FixedPointBox() + stack_sizes = { + inst: StackSize(float("inf"), float("-inf"), fixed_point) + for inst in instructions + } + stack_sizes[instructions[0]].zero() + + for _ in range(100): + if fixed_point.value: + break + fixed_point.value = True + + for inst, next_inst in zip(instructions, instructions[1:] + [None]): + stack_size = stack_sizes[inst] + # CALL_FINALLY in Python 3.8 is handled differently when determining stack depth. + # See https://github.com/python/cpython/blob/3.8/Python/compile.c#L5450. + # Essentially, the stack effect of CALL_FINALLY is computed with jump=True, + # but the resulting stack depth is propagated to the next instruction, not the + # jump target. + is_call_finally = ( + sys.version_info < (3, 9) and inst.opcode == dis.opmap["CALL_FINALLY"] + ) + if inst.opcode not in TERMINAL_OPCODES: + assert next_inst is not None, f"missing next inst: {inst}" + stack_sizes[next_inst].offset_of( + stack_size, + stack_effect(inst.opcode, inst.arg, jump=is_call_finally), + ) + if inst.opcode in JUMP_OPCODES and not is_call_finally: + stack_sizes[inst.target].offset_of( + stack_size, stack_effect(inst.opcode, inst.arg, jump=True) + ) + if inst.exn_tab_entry: + # see https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt + # on why depth is computed this way. + depth = inst.exn_tab_entry.depth + int(inst.exn_tab_entry.lasti) + 1 + stack_sizes[inst.exn_tab_entry.target].exn_tab_jump(depth) + + if False: + for inst in instructions: + stack_size = stack_sizes[inst] + print(stack_size.low, stack_size.high, inst) + + low = min([x.low for x in stack_sizes.values()]) + high = max([x.high for x in stack_sizes.values()]) + + assert fixed_point.value, "failed to reach fixed point" + assert low >= 0 + return high diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py b/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..63edf253062fece499cd276570cef07c4c58e5b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py @@ -0,0 +1,1114 @@ +import copy +import dataclasses +import dis +import itertools +import sys +import types +from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple + +from .bytecode_analysis import ( + get_indexof, + propagate_line_nums, + remove_extra_line_nums, + stacksize_analysis, +) + + +@dataclasses.dataclass +class InstructionExnTabEntry: + start: "Instruction" + end: "Instruction" + target: "Instruction" + depth: int + lasti: bool + + def __repr__(self) -> str: + return ( + f"InstructionExnTabEntry(start={self.start.short_inst_repr()}, " + f"end={self.end.short_inst_repr()}, " + f"target={self.target.short_inst_repr()}, " + f"depth={self.depth}, lasti={self.lasti})" + ) + + def __eq__(self, o) -> bool: + return ( + self.start is o.start + and self.end is o.end + and self.target is o.target + and self.depth == o.depth + and self.lasti == o.lasti + ) + + +@dataclasses.dataclass +class Instruction: + """A mutable version of dis.Instruction""" + + opcode: int + opname: str + arg: Optional[int] + argval: Any + offset: Optional[int] = None + starts_line: Optional[int] = None + is_jump_target: bool = False + positions: Optional["dis.Positions"] = None + # extra fields to make modification easier: + target: Optional["Instruction"] = None + exn_tab_entry: Optional[InstructionExnTabEntry] = None + + def __hash__(self) -> int: + return id(self) + + def __eq__(self, other) -> bool: + return id(self) == id(other) + + def short_inst_repr(self) -> str: + return f"Instruction(opname={self.opname}, offset={self.offset})" + + +def convert_instruction(i: dis.Instruction) -> Instruction: + return Instruction( + i.opcode, + i.opname, + i.arg, + i.argval, + i.offset, + i.starts_line, + i.is_jump_target, + getattr(i, "positions", None), + ) + + +class _NotProvided: + def __repr__(self) -> str: + return "_NotProvided" + + +def create_instruction( + name, *, arg=None, argval=_NotProvided, target=None +) -> Instruction: + """ + At most one of `arg`, `argval`, and `target` can be not None/_NotProvided. + This is to prevent ambiguity, e.g. does + create_instruction("LOAD_CONST", 5) + mean load the constant at co_consts[5], or load the constant 5? + + If `arg` is not provided, it will be computed during assembly from + `argval` or `target`. + + Do not use for LOAD_GLOBAL - use create_load_global instead. + """ + assert name != "LOAD_GLOBAL" + cnt = (arg is not None) + (argval is not _NotProvided) + (target is not None) + if cnt > 1: + raise RuntimeError( + "only one of arg, argval, and target can be not None/_NotProvided" + ) + if arg is not None and not isinstance(arg, int): + raise RuntimeError("instruction arg must be int or None") + return Instruction( + opcode=dis.opmap[name], opname=name, arg=arg, argval=argval, target=target + ) + + +# Python 3.11 remaps +def create_jump_absolute(target) -> Instruction: + inst = "JUMP_FORWARD" if sys.version_info >= (3, 11) else "JUMP_ABSOLUTE" + return create_instruction(inst, target=target) + + +def create_load_global(name, push_null) -> Instruction: + """ + `name` is the name of the global to be loaded. + `push_null` specifies whether or not a NULL should be pushed to the stack + before the global (Python 3.11+ only). + + Python 3.11 changed the LOAD_GLOBAL instruction in that the first bit of + the instruction arg specifies whether a NULL should be pushed to the stack + before the global. The remaining bits of the instruction arg contain the + name index. See `create_call_function` for why this NULL is needed. + + The instruction's `arg` is actually computed when assembling the bytecode. + For Python 3.11, push_null information is propagated through the arg. + + NOTE: we don't use create_instruction since LOAD_GLOBAL is the only instruction + where both arg and argval need to be specified. + """ + return Instruction( + opcode=dis.opmap["LOAD_GLOBAL"], + opname="LOAD_GLOBAL", + arg=push_null, + argval=name, + ) + + +def create_dup_top() -> Instruction: + if sys.version_info >= (3, 11): + return create_instruction("COPY", arg=1) + return create_instruction("DUP_TOP") + + +def create_rot_n(n) -> List[Instruction]: + """ + Returns a "simple" sequence of instructions that rotates TOS to the n-th + position in the stack. For Python < 3.11, returns a single ROT_* + instruction. If no such instruction exists, an error is raised and the + caller is expected to generate an equivalent sequence of instructions. + For Python >= 3.11, any rotation can be expressed as a simple sequence of + swaps. + """ + if n <= 1: + # don't rotate + return [] + + if sys.version_info >= (3, 11): + # rotate can be expressed as a sequence of swap operations + # e.g. rotate 3 is equivalent to swap 3, swap 2 + return [create_instruction("SWAP", arg=i) for i in range(n, 1, -1)] + + # ensure desired rotate function exists + if sys.version_info < (3, 8) and n >= 4: + raise AttributeError(f"rotate {n} not supported for Python < 3.8") + if sys.version_info < (3, 10) and n >= 5: + raise AttributeError(f"rotate {n} not supported for Python < 3.10") + + if n <= 4: + return [create_instruction("ROT_" + ["TWO", "THREE", "FOUR"][n - 2])] + return [create_instruction("ROT_N", arg=n)] + + +def create_call_function(nargs, push_null) -> List[Instruction]: + """ + Creates a sequence of instructions that makes a function call. + + `push_null` is used in Python 3.11+ only. It is used in codegen when + a function call is intended to be made with the NULL + fn convention, + and we know that the NULL has not been pushed yet. We will push a + NULL and rotate it to the correct position immediately before making + the function call. + push_null should default to True unless you know you are calling a function + that you codegen'd with a null already pushed, for example + (assume `math` is available in the global scope), + + create_load_global("math", True) # pushes a null + create_instruction("LOAD_ATTR", argval="sqrt") + create_instruction("LOAD_CONST", argval=25) + create_call_function(1, False) + """ + if sys.version_info >= (3, 11): + output = [] + if push_null: + output.append(create_instruction("PUSH_NULL")) + output.extend(create_rot_n(nargs + 2)) + output.append(create_instruction("PRECALL", arg=nargs)) + output.append(create_instruction("CALL", arg=nargs)) + return output + return [create_instruction("CALL_FUNCTION", arg=nargs)] + + +def create_call_method(nargs) -> List[Instruction]: + if sys.version_info >= (3, 11): + return [ + create_instruction("PRECALL", arg=nargs), + create_instruction("CALL", arg=nargs), + ] + return [create_instruction("CALL_METHOD", arg=nargs)] + + +def lnotab_writer( + lineno: int, byteno: int = 0 +) -> Tuple[List[int], Callable[[int, int], None]]: + """ + Used to create typing.CodeType.co_lnotab + See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt + This is the internal format of the line number table if Python < 3.10 + """ + assert sys.version_info < (3, 10) + lnotab: List[int] = [] + + def update(lineno_new, byteno_new): + nonlocal byteno, lineno + while byteno_new != byteno or lineno_new != lineno: + byte_offset = max(0, min(byteno_new - byteno, 255)) + line_offset = max(-128, min(lineno_new - lineno, 127)) + assert byte_offset != 0 or line_offset != 0 + byteno += byte_offset + lineno += line_offset + lnotab.extend((byte_offset, line_offset & 0xFF)) + + return lnotab, update + + +def linetable_310_writer(first_lineno): + """ + Used to create typing.CodeType.co_linetable + See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt + This is the internal format of the line number table for Python 3.10 + """ + assert sys.version_info >= (3, 10) and sys.version_info < (3, 11) + linetable: List[int] = [] + lineno = first_lineno + lineno_delta = 0 + byteno = 0 + + def _update(byteno_delta, lineno_delta): + while byteno_delta != 0 or lineno_delta != 0: + byte_offset = max(0, min(byteno_delta, 254)) + line_offset = max(-127, min(lineno_delta, 127)) + assert byte_offset != 0 or line_offset != 0 + byteno_delta -= byte_offset + lineno_delta -= line_offset + linetable.extend((byte_offset, line_offset & 0xFF)) + + def update(lineno_new, byteno_new): + nonlocal lineno, lineno_delta, byteno + byteno_delta = byteno_new - byteno + byteno = byteno_new + _update(byteno_delta, lineno_delta) + lineno_delta = lineno_new - lineno + lineno = lineno_new + + def end(total_bytes): + _update(total_bytes - byteno, lineno_delta) + + return linetable, update, end + + +def encode_varint(n: int) -> List[int]: + """ + 6-bit chunk encoding of an unsigned integer + See https://github.com/python/cpython/blob/3.11/Objects/locations.md + """ + assert n >= 0 + b = [n & 63] + n >>= 6 + while n > 0: + b[-1] |= 64 + b.append(n & 63) + n >>= 6 + return b + + +def linetable_311_writer(first_lineno: int): + """ + Used to create typing.CodeType.co_linetable + See https://github.com/python/cpython/blob/3.11/Objects/locations.md + This is the internal format of the line number table for Python 3.11 + """ + assert sys.version_info >= (3, 11) + linetable = [] + lineno = first_lineno + + def update(positions: "dis.Positions", inst_size): + nonlocal lineno + lineno_new = positions.lineno if positions else None + + def _update(delta, size): + assert 0 < size <= 8 + # first byte - use 13 (no column info) is positions is + # malformed, otherwise use 14 (long form) + other_varints: Tuple[int, ...] = () + if ( + positions + and positions.lineno is not None + and positions.end_lineno is not None + and positions.col_offset is not None + and positions.end_col_offset is not None + ): + linetable.append(0b1_1110_000 + size - 1) + # for whatever reason, column offset needs `+ 1` + # https://github.com/python/cpython/blob/1931c2a438c50e6250725c84dff94fc760b9b951/Python/compile.c#L7603 + other_varints = ( + positions.end_lineno - positions.lineno, + positions.col_offset + 1, + positions.end_col_offset + 1, + ) + else: + linetable.append(0b1_1101_000 + size - 1) + # encode signed int + if delta < 0: + delta = ((-delta) << 1) | 1 + else: + delta <<= 1 + # encode unsigned int + linetable.extend(encode_varint(delta)) + for n in other_varints: + linetable.extend(encode_varint(n)) + + if lineno_new is None: + lineno_delta = 0 + else: + lineno_delta = lineno_new - lineno + lineno = lineno_new + while inst_size > 8: + _update(lineno_delta, 8) + inst_size -= 8 + _update(lineno_delta, inst_size) + + return linetable, update + + +@dataclasses.dataclass +class ExceptionTableEntry: + start: int + end: int + target: int + depth: int + lasti: bool + + +def encode_exception_table_varint(n: int) -> List[int]: + """ + Similar to `encode_varint`, but the 6-bit chunks are ordered in reverse. + """ + assert n >= 0 + b = [n & 63] + n >>= 6 + while n > 0: + b.append(n & 63) + n >>= 6 + b.reverse() + for i in range(len(b) - 1): + b[i] |= 64 + return b + + +def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int: + """ + Inverse of `encode_exception_table_varint`. + """ + b = next(bytes_iter) + val = b & 63 + while b & 64: + val <<= 6 + b = next(bytes_iter) + val |= b & 63 + return val + + +def check_exception_table(tab: List[ExceptionTableEntry]) -> None: + """ + Verifies that a list of ExceptionTableEntries will make a well-formed + jump table: entries are non-empty, sorted, and do not overlap. + """ + for i in range(len(tab) - 1): + assert ( + tab[i].start <= tab[i].end + and tab[i].end < tab[i + 1].start + and tab[i + 1].start <= tab[i + 1].end + ) + + +def parse_exception_table(exntab: bytes) -> List[ExceptionTableEntry]: + """ + Parse the exception table according to + https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt + """ + exntab_iter = iter(exntab) + tab = [] + try: + while True: + start = decode_exception_table_varint(exntab_iter) * 2 + length = decode_exception_table_varint(exntab_iter) * 2 + end = start + length - 2 + target = decode_exception_table_varint(exntab_iter) * 2 + dl = decode_exception_table_varint(exntab_iter) + depth = dl >> 1 + lasti = bool(dl & 1) + tab.append(ExceptionTableEntry(start, end, target, depth, lasti)) + except StopIteration: + check_exception_table(tab) + return tab + + +def assemble_exception_table(tab: List[ExceptionTableEntry]) -> bytes: + """ + Inverse of parse_exception_table - encodes list of exception + table entries into bytes. + """ + b = [] + for entry in tab: + first_entry = encode_exception_table_varint(entry.start // 2) + first_entry[0] |= 1 << 7 + b.extend(first_entry) + length = entry.end - entry.start + 2 + b.extend(encode_exception_table_varint(length // 2)) + b.extend(encode_exception_table_varint(entry.target // 2)) + dl = (entry.depth << 1) + entry.lasti + b.extend(encode_exception_table_varint(dl)) + return bytes(b) + + +def assemble(instructions: List[Instruction], firstlineno: int) -> Tuple[bytes, bytes]: + """Do the opposite of dis.get_instructions()""" + code: List[int] = [] + if sys.version_info >= (3, 11): + lnotab, update_lineno = linetable_311_writer(firstlineno) + num_ext = 0 + for i, inst in enumerate(instructions): + if inst.opname == "EXTENDED_ARG": + inst_size = 1 + num_ext += 1 + # copy positions from the actual instruction + for j in (1, 2, 3): + if instructions[i + j].opname != "EXTENDED_ARG": + inst.positions = instructions[i + j].positions + break + else: + inst_size = instruction_size(inst) // 2 + num_ext + num_ext = 0 + update_lineno(inst.positions, inst_size) + num_ext = 0 + arg = inst.arg or 0 + code.extend((inst.opcode, arg & 0xFF)) + for _ in range(instruction_size(inst) // 2 - 1): + code.extend((0, 0)) + else: + if sys.version_info < (3, 10): + lnotab, update_lineno = lnotab_writer(firstlineno) + else: + lnotab, update_lineno, end = linetable_310_writer(firstlineno) + + for inst in instructions: + if inst.starts_line is not None: + update_lineno(inst.starts_line, len(code)) + arg = inst.arg or 0 + code.extend((inst.opcode, arg & 0xFF)) + + if sys.version_info >= (3, 10): + end(len(code)) + + return bytes(code), bytes(lnotab) + + +def _get_instruction_by_offset(offset_to_inst: Dict[int, Instruction], offset: int): + """ + Get the instruction located at a given offset, accounting for EXTENDED_ARGs + """ + for n in (0, 2, 4, 6): + if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG: + return offset_to_inst[offset + n] + return None + + +def virtualize_jumps(instructions) -> None: + """Replace jump targets with pointers to make editing easier""" + jump_targets = {inst.offset: inst for inst in instructions} + + for inst in instructions: + if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel: + inst.target = _get_instruction_by_offset(jump_targets, inst.argval) + + +_REL_JUMPS = set(dis.hasjrel) + + +def flip_jump_direction(instruction: Instruction) -> None: + if sys.version_info < (3, 11): + raise RuntimeError("Cannot flip jump direction in Python < 3.11") + if "FORWARD" in instruction.opname: + instruction.opname = instruction.opname.replace("FORWARD", "BACKWARD") + elif "BACKWARD" in instruction.opname: + instruction.opname = instruction.opname.replace("BACKWARD", "FORWARD") + else: + raise AttributeError("Instruction is not a forward or backward jump") + instruction.opcode = dis.opmap[instruction.opname] + assert instruction.opcode in _REL_JUMPS + + +def _get_instruction_front(instructions: List[Instruction], idx: int): + """ + i.e. get the first EXTENDED_ARG instruction (if any) when targeting + instructions[idx] with a jump. + """ + target = instructions[idx] + for offset in (1, 2, 3): + if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG: + target = instructions[idx - offset] + else: + break + return target + + +def devirtualize_jumps(instructions): + """Fill in args for virtualized jump target after instructions may have moved""" + indexof = get_indexof(instructions) + jumps = set(dis.hasjabs).union(set(dis.hasjrel)) + + for inst in instructions: + if inst.opcode in jumps: + target = _get_instruction_front(instructions, indexof[inst.target]) + if inst.opcode in dis.hasjabs: + if sys.version_info < (3, 10): + inst.arg = target.offset + elif sys.version_info < (3, 11): + # `arg` is expected to be bytecode offset, whereas `offset` is byte offset. + # Divide since bytecode is 2 bytes large. + inst.arg = int(target.offset / 2) + else: + raise RuntimeError("Python 3.11+ should not have absolute jumps") + else: # relative jump + # byte offset between target and next instruction + inst.arg = int(target.offset - inst.offset - instruction_size(inst)) + if inst.arg < 0: + if sys.version_info < (3, 11): + raise RuntimeError("Got negative jump offset for Python < 3.11") + inst.arg = -inst.arg + # forward jumps become backward + if "FORWARD" in inst.opname: + flip_jump_direction(inst) + elif inst.arg > 0: + # backward jumps become forward + if sys.version_info >= (3, 11) and "BACKWARD" in inst.opname: + flip_jump_direction(inst) + if sys.version_info >= (3, 10): + # see bytecode size comment in the absolute jump case above + inst.arg //= 2 + inst.argval = target.offset + inst.argrepr = f"to {target.offset}" + + +def virtualize_exception_table(exn_tab_bytes: bytes, instructions: List[Instruction]): + """Replace exception table entries with pointers to make editing easier""" + exn_tab = parse_exception_table(exn_tab_bytes) + offset_to_inst = {cast(int, inst.offset): inst for inst in instructions} + offsets = sorted(offset_to_inst.keys()) + end_offset_idx = 0 + exn_tab_iter = iter(exn_tab) + try: + + def step(): + nonlocal end_offset_idx + entry = next(exn_tab_iter) + # find rightmost offset <= entry.end, since entry.end may not be + # an actual instruction, e.g. if the end instruction is LOAD_GLOBAL, + # which takes more than 2 bytes, then entry.end points to the end + # of the LOAD_GLOBAL instruction, not the beginning. + while ( + end_offset_idx < len(offsets) and offsets[end_offset_idx] <= entry.end + ): + end_offset_idx += 1 + assert end_offset_idx > 0 + end_offset = offsets[end_offset_idx - 1] + inst_entry = InstructionExnTabEntry( + _get_instruction_by_offset(offset_to_inst, entry.start), + _get_instruction_by_offset(offset_to_inst, end_offset), + _get_instruction_by_offset(offset_to_inst, entry.target), + entry.depth, + entry.lasti, + ) + return entry, inst_entry + + entry, inst_entry = step() + for inst in instructions: + while inst.offset > entry.end: + entry, inst_entry = step() + if inst.offset >= entry.start: + inst.exn_tab_entry = copy.copy(inst_entry) + except StopIteration: + pass + + +def compute_exception_table( + instructions: List[Instruction], +) -> List[ExceptionTableEntry]: + """Compute exception table in list format from instructions with exn_tab_entries""" + exn_dict: Dict[Tuple[int, int], Tuple[int, int, bool]] = {} + indexof = get_indexof(instructions) + + for inst in instructions: + if inst.exn_tab_entry: + # account for prefixed EXTENDED_ARGS + start = _get_instruction_front( + instructions, indexof[inst.exn_tab_entry.start] + ).offset + # point to the last 2 bytes of the end instruction + end = ( + cast(int, inst.exn_tab_entry.end.offset) + + instruction_size(inst.exn_tab_entry.end) + - 2 + ) + target = _get_instruction_front( + instructions, indexof[inst.exn_tab_entry.target] + ).offset + key = (start, end) + val = (target, inst.exn_tab_entry.depth, inst.exn_tab_entry.lasti) + if key in exn_dict: + assert exn_dict[key] == val + exn_dict[key] = val + + # Dynamo may construct nested exception table entries for convenience, + # but Python expects exception table entries to not overlap. + # NOTE: below, "keys" refer to old instruction entries' starts and ends, + # and "entries" refer to the generated exception table entries. + + # Sort keys by increasing start, then decreasing end + keys_sorted = sorted(exn_dict.keys(), key=lambda t: (t[0], -t[1])) + # smallest byte that the next exception table entry can start at + nexti = 0 + # stack of current nested keys + key_stack: List[Tuple[int, int]] = [] + exn_tab: List[ExceptionTableEntry] = [] + + def pop(): + """ + Pop the key_stack and append an exception table entry if possible. + """ + nonlocal nexti + if key_stack: + key = key_stack.pop() + if nexti <= key[1]: + exn_tab.append( + ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key]) + ) + nexti = key[1] + 2 + + for key in keys_sorted: + # pop keys that are no longer nested over the current key + while key_stack and key_stack[-1][1] < key[0]: + pop() + if key_stack: + # create an entry covering to the current key, if possible + assert key_stack[-1][0] <= key[0] <= key[1] <= key_stack[-1][1] + left = max(nexti, key_stack[-1][0]) + if left < key[0]: + exn_tab.append( + ExceptionTableEntry(left, key[0] - 2, *exn_dict[key_stack[-1]]) + ) + nexti = key[0] + key_stack.append(key) + while key_stack: + pop() + check_exception_table(exn_tab) + return exn_tab + + +def check_inst_exn_tab_entries_nested( + tab: List[InstructionExnTabEntry], indexof +) -> None: + """ + Checks `tab` is a properly sorted list of nested InstructionExnTabEntry's, + i.e. no entries partially overlap. + "Properly sorted" means entries are sorted by increasing starts, then + decreasing ends. + """ + entry_stack: List[Tuple[int, int]] = [] + for entry in tab: + key = (indexof[entry.start], indexof[entry.end]) + while entry_stack and entry_stack[-1][1] < key[0]: + entry_stack.pop() + if entry_stack: + assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1] + entry_stack.append(key) + + +def propagate_inst_exn_table_entries(instructions: List[Instruction]) -> None: + """ + Copies exception table entries to all instructions in an entry's range. + Supports nested exception table entries. + """ + indexof = get_indexof(instructions) + entries: Dict[Tuple[int, int], InstructionExnTabEntry] = {} + for inst in instructions: + if inst.exn_tab_entry: + key = ( + indexof[inst.exn_tab_entry.start], + indexof[inst.exn_tab_entry.end], + ) + if key in entries: + assert inst.exn_tab_entry == entries[key] + entries[key] = inst.exn_tab_entry + sorted_entries = [ + entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1])) + ] + check_inst_exn_tab_entries_nested(sorted_entries, indexof) + # Propagation of nested entries works since nested entries come later + # in sorted order. + for entry in sorted_entries: + for i in range(indexof[entry.start], indexof[entry.end] + 1): + instructions[i].exn_tab_entry = copy.copy(entry) + + +def check_inst_exn_tab_entries_valid(instructions: List[Instruction]): + """ + Checks that exn_tab_entries of instructions are valid. + An entry's start, end, and target must be in instructions. + Instructions with an exn_tab_entry are located within + the entry's start and end instructions. + Instructions do not share exn_tab_entries. + + Implicitly checks for no duplicate instructions. + """ + indexof = get_indexof(instructions) + exn_tab_entry_set = set() + for i, inst in enumerate(instructions): + if inst.exn_tab_entry: + assert sys.version_info >= (3, 11) + assert id(inst.exn_tab_entry) not in exn_tab_entry_set + exn_tab_entry_set.add(id(inst.exn_tab_entry)) + entry = inst.exn_tab_entry + assert entry.start in indexof + assert entry.end in indexof + assert entry.target in indexof + assert indexof[entry.start] <= i <= indexof[entry.end] + + +def strip_extended_args(instructions: List[Instruction]) -> None: + instructions[:] = [i for i in instructions if i.opcode != dis.EXTENDED_ARG] + + +def remove_load_call_method(instructions: List[Instruction]) -> List[Instruction]: + """LOAD_METHOD puts a NULL on the stack which causes issues, so remove it""" + rewrites = {"LOAD_METHOD": "LOAD_ATTR", "CALL_METHOD": "CALL_FUNCTION"} + for inst in instructions: + if inst.opname in rewrites: + inst.opname = rewrites[inst.opname] + inst.opcode = dis.opmap[inst.opname] + return instructions + + +def remove_jump_if_none(instructions: List[Instruction]) -> None: + new_insts = [] + for inst in instructions: + new_insts.append(inst) + if "_NONE" in inst.opname: + is_op = create_instruction("IS_OP", arg=int("NOT" in inst.opname)) + is_op.argval = is_op.arg + jump_op = create_instruction( + "POP_JUMP_FORWARD_IF_TRUE" + if "FORWARD" in inst.opname + else "POP_JUMP_BACKWARD_IF_TRUE", + target=inst.target, + ) + # modify inst in-place to preserve jump target + inst.opcode = dis.opmap["LOAD_CONST"] + inst.opname = "LOAD_CONST" + inst.arg = None + inst.argval = None + new_insts.extend([is_op, jump_op]) + instructions[:] = new_insts + + +def explicit_super(code: types.CodeType, instructions: List[Instruction]) -> None: + """convert super() with no args into explicit arg form""" + cell_and_free = (code.co_cellvars or tuple()) + (code.co_freevars or tuple()) + if not len(code.co_varnames): + # A function with no argument cannot contain a valid "super()" call + return + output = [] + for idx, inst in enumerate(instructions): + output.append(inst) + if inst.opname == "LOAD_GLOBAL" and inst.argval == "super": + nexti = instructions[idx + 1] + if nexti.opname in ("CALL_FUNCTION", "PRECALL") and nexti.arg == 0: + assert "__class__" in cell_and_free + output.append(create_instruction("LOAD_DEREF", argval="__class__")) + first_var = code.co_varnames[0] + if first_var in cell_and_free: + output.append(create_instruction("LOAD_DEREF", argval=first_var)) + else: + output.append(create_instruction("LOAD_FAST", argval=first_var)) + nexti.arg = 2 + nexti.argval = 2 + if nexti.opname == "PRECALL": + # also update the following CALL instruction + call_inst = instructions[idx + 2] + call_inst.arg = 2 + call_inst.argval = 2 + + instructions[:] = output + + +def fix_extended_args(instructions: List[Instruction]) -> int: + """Fill in correct argvals for EXTENDED_ARG ops""" + output: List[Instruction] = [] + + def maybe_pop_n(n): + for _ in range(n): + if output and output[-1].opcode == dis.EXTENDED_ARG: + output.pop() + + for inst in instructions: + if inst.opcode == dis.EXTENDED_ARG: + # Leave this instruction alone for now so we never shrink code + inst.arg = 0 + elif inst.arg and inst.arg > 0xFFFFFF: + maybe_pop_n(3) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 24)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + elif inst.arg and inst.arg > 0xFFFF: + maybe_pop_n(2) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + elif inst.arg and inst.arg > 0xFF: + maybe_pop_n(1) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + output.append(inst) + + added = len(output) - len(instructions) + assert added >= 0 + instructions[:] = output + return added + + +# from https://github.com/python/cpython/blob/v3.11.1/Include/internal/pycore_opcode.h#L41 +# TODO use the actual object instead, can interface from eval_frame.c +_PYOPCODE_CACHES = { + "BINARY_SUBSCR": 4, + "STORE_SUBSCR": 1, + "UNPACK_SEQUENCE": 1, + "STORE_ATTR": 4, + "LOAD_ATTR": 4, + "COMPARE_OP": 2, + "LOAD_GLOBAL": 5, + "BINARY_OP": 1, + "LOAD_METHOD": 10, + "PRECALL": 1, + "CALL": 4, +} + + +def instruction_size(inst) -> int: + if sys.version_info >= (3, 11): + return 2 * (_PYOPCODE_CACHES.get(dis.opname[inst.opcode], 0) + 1) + return 2 + + +def check_offsets(instructions) -> None: + offset = 0 + for inst in instructions: + assert inst.offset == offset + offset += instruction_size(inst) + + +def update_offsets(instructions) -> None: + offset = 0 + for inst in instructions: + inst.offset = offset + offset += instruction_size(inst) + + +def debug_bytes(*args) -> str: + index = range(max(map(len, args))) + result = [] + for arg in ( + [index] + list(args) + [[int(a != b) for a, b in zip(args[-1], args[-2])]] + ): + result.append(" ".join(f"{x:03}" for x in arg)) + + return "bytes mismatch\n" + "\n".join(result) + + +def debug_checks(code): + """Make sure our assembler produces same bytes as we start with""" + dode = transform_code_object(code, lambda x, y: None, safe=True) + assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code) + assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab) + + +HAS_LOCAL = set(dis.haslocal) +HAS_NAME = set(dis.hasname) +HAS_FREE = set(dis.hasfree) +HAS_CONST = set(dis.hasconst) + + +def get_const_index(code_options, val) -> int: + for i, v in enumerate(code_options["co_consts"]): + # NOTE: stronger comparison is required, since we have + # examples where two values compare equal but have + # different semantic meaning in some cases, e.g. + # 0.0 == -0.0 but have different effects in torch.copysign. + if val is v: + return i + code_options["co_consts"] += (val,) + return len(code_options["co_consts"]) - 1 + + +def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=None): + # compute instruction arg from argval if arg is not provided + names = {name: idx for idx, name in enumerate(code_options["co_names"])} + if sys.version_info < (3, 11): + assert varname_from_oparg is None + varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])} + freenames = { + name: idx + for idx, name in enumerate( + code_options["co_cellvars"] + code_options["co_freevars"] + ) + } + else: + assert callable(varname_from_oparg) + allnames = {} + for idx in itertools.count(): + try: + name = varname_from_oparg(idx) + allnames[name] = idx + except IndexError: + break + varnames = {name: allnames[name] for name in code_options["co_varnames"]} + freenames = { + name: allnames[name] + for name in code_options["co_cellvars"] + code_options["co_freevars"] + } + for i in range(len(instructions)): + + def should_compute_arg(): + # argval is prioritized over arg + return instructions[i].argval is not _NotProvided + + if instructions[i].opname == "LOAD_GLOBAL": + # 3.11 LOAD_GLOBAL requires both arg and argval - see create_load_global + assert instructions[i].arg is not None + assert instructions[i].argval is not _NotProvided + if sys.version_info >= (3, 11): + instructions[i].arg = (names[instructions[i].argval] << 1) + ( + cast(int, instructions[i].arg) % 2 + ) + else: + instructions[i].arg = names[instructions[i].argval] + elif instructions[i].opcode in HAS_LOCAL: + if should_compute_arg(): + instructions[i].arg = varnames[instructions[i].argval] + elif instructions[i].opcode in HAS_NAME: + if should_compute_arg(): + instructions[i].arg = names[instructions[i].argval] + elif instructions[i].opcode in HAS_FREE: + if should_compute_arg(): + instructions[i].arg = freenames[instructions[i].argval] + elif instructions[i].opcode in HAS_CONST: + # NOTE: only update argval if arg is not provided. This assumes + # that any additions to co_consts are appended. + if instructions[i].arg is None: + # cannot use a dictionary since consts may not be hashable + idx = get_const_index(code_options, instructions[i].argval) + assert idx >= 0 + instructions[i].arg = idx + + +def get_code_keys() -> List[str]: + # Python 3.11 changes to code keys are not fully documented. + # See https://github.com/python/cpython/blob/3.11/Objects/clinic/codeobject.c.h#L24 + # for new format. + keys = ["co_argcount"] + keys.append("co_posonlyargcount") + keys.extend( + [ + "co_kwonlyargcount", + "co_nlocals", + "co_stacksize", + "co_flags", + "co_code", + "co_consts", + "co_names", + "co_varnames", + "co_filename", + "co_name", + ] + ) + if sys.version_info >= (3, 11): + keys.append("co_qualname") + keys.append("co_firstlineno") + if sys.version_info >= (3, 10): + keys.append("co_linetable") + else: + keys.append("co_lnotab") + if sys.version_info >= (3, 11): + # not documented, but introduced in https://github.com/python/cpython/issues/84403 + keys.append("co_exceptiontable") + keys.extend( + [ + "co_freevars", + "co_cellvars", + ] + ) + return keys + + +def transform_code_object(code, transformations, safe=False) -> types.CodeType: + keys = get_code_keys() + code_options = {k: getattr(code, k) for k in keys} + assert len(code_options["co_varnames"]) == code_options["co_nlocals"] + + instructions = cleaned_instructions(code, safe) + propagate_line_nums(instructions) + + transformations(instructions, code_options) + return clean_and_assemble_instructions(instructions, keys, code_options)[1] + + +def clean_and_assemble_instructions( + instructions: List[Instruction], keys: List[str], code_options: Dict[str, Any] +) -> Tuple[List[Instruction], types.CodeType]: + # also implicitly checks for no duplicate instructions + check_inst_exn_tab_entries_valid(instructions) + + code_options["co_nlocals"] = len(code_options["co_varnames"]) + varname_from_oparg = None + if sys.version_info >= (3, 11): + # temporary code object with updated names + tmp_code = types.CodeType(*[code_options[k] for k in keys]) + varname_from_oparg = tmp_code._varname_from_oparg # type: ignore[attr-defined] + fix_vars(instructions, code_options, varname_from_oparg=varname_from_oparg) + + dirty = True + while dirty: + update_offsets(instructions) + devirtualize_jumps(instructions) + # this pass might change offsets, if so we need to try again + dirty = bool(fix_extended_args(instructions)) + + remove_extra_line_nums(instructions) + bytecode, lnotab = assemble(instructions, code_options["co_firstlineno"]) + if sys.version_info < (3, 10): + code_options["co_lnotab"] = lnotab + else: + code_options["co_linetable"] = lnotab + + code_options["co_code"] = bytecode + code_options["co_stacksize"] = stacksize_analysis(instructions) + assert set(keys) - {"co_posonlyargcount"} == set(code_options.keys()) - { + "co_posonlyargcount" + } + if sys.version_info >= (3, 11): + code_options["co_exceptiontable"] = assemble_exception_table( + compute_exception_table(instructions) + ) + return instructions, types.CodeType(*[code_options[k] for k in keys]) + + +def populate_kw_names_argval(instructions, consts): + for inst in instructions: + if inst.opname == "KW_NAMES": + inst.argval = consts[inst.arg] + + +def cleaned_instructions(code, safe=False) -> List[Instruction]: + instructions = list(map(convert_instruction, dis.get_instructions(code))) + check_offsets(instructions) + if sys.version_info >= (3, 11): + populate_kw_names_argval(instructions, code.co_consts) + virtualize_exception_table(code.co_exceptiontable, instructions) + virtualize_jumps(instructions) + strip_extended_args(instructions) + if not safe: + if sys.version_info < (3, 11): + remove_load_call_method(instructions) + else: + remove_jump_if_none(instructions) + update_offsets(instructions) + devirtualize_jumps(instructions) + explicit_super(code, instructions) + return instructions + + +_unique_id_counter = itertools.count() + + +def unique_id(name) -> str: + return f"{name}_{next(_unique_id_counter)}" + + +def is_generator(code: types.CodeType) -> bool: + co_generator = 0x20 + return (code.co_flags & co_generator) > 0 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/cache_size.py b/venv/lib/python3.10/site-packages/torch/_dynamo/cache_size.py new file mode 100644 index 0000000000000000000000000000000000000000..340f227a9956f7e5621afe1fbe41109da9221bc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/cache_size.py @@ -0,0 +1,172 @@ +import logging +import types +import weakref +from dataclasses import dataclass +from typing import Tuple + +from . import config + +log = logging.getLogger(__name__) +""" +[Note on cache size limit] + +Background - TorchDynamo cache is a linked list. Each cache entry is a +(check_fn, out_code, next pointer). These are stored on the f_code's co_extra +scratch space. When a frame is invoked, we walk this linked list and run +check_fn in each cache_entry to decide if the frame needs recompilation. If none +of the check_fn's returns True, we recompile and add a new entry. To ensure we +don't end up recompiling infinitely, we put limits on the cache size. + +There are two limits +1) cache_size_limit +2) accumulated_cache_size_limit + + +Earlier we used to have only limit - maximum number of entries in 1 cache line +(which is now represented by (2) above). So, why do we need two limits? Lets try +to understand that. + +In general, we want our cache limit value to be a small number (e.g. 8 or even +lower). This ensures that for frames that cause too many recompilation fall to +eager quickly. However, there is another problem that prevents us from lowering +the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put +ID_MATCH guards on nn module if there is a graph break. This means we will have +many recompilations for the same code object because the ID_MATCH guard fails +for different instances of the nn module. This is a common pattern in how models +are authored. Therefore, this requires us to keep the cache_size_limit high. + +We resolve this by introducing these two limits. The first limit (1) limits the +number of cache entries that have an ID_MATCH'd guard for an nn module instance. +And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations +for a code object. One important question is - what is the limit for the code +object that does not have any ID_MATCH guard? For such code objects, we choose +(1) as the cache size limit. + +Lets take an example to understand how these limits help. Suppose, we have 16 +instances of a nn module and we ID_MATCH on the self object. Further, suppose +the inputs to these functions have varying batch size, leading to one +recompilation. In total, there will be 32 recompilations, and therefore 32 cache +entries on the forward code object. In the older case when we had only 1 limit, +our cache size limit must be >= 32 to capture all these recompilations. Now, +suppose there is a separate function in the same program which is very dynamic +and unsuitable for compilation. Such a function will need to undergo 32 +compilations to burst the cache and fallback to eager. These 32 recompilations +are too many and we want to fallback for these compilation-unfriendly functions +sooner. + +In the new scenario, we can have (1) cache_size_limit = 2, (2) +accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can +have maximum of two cache entries, and the maximum number of cache entries +(irrespective of ID_MATCH obj) is 32. This covers the case of forward code +object which has 32 recompilations. For the other function, the one unsuitable +for recompilation, our limit is 2. So, we will burst the cache in just 2 +recompilations. In this manner, these 2 limits help us resolve the tension +mentioned earlier. +""" + + +@dataclass +class CacheSizeRelevantForFrame: + """ + We track the number of cache entries that have same id_match objects as the + given frame. + + TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count - + https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this + could be useful for debugging as well. + """ + + # Total number of CacheEntry objects in the Dynamo linked list + num_cache_entries: int = 0 + + # Number of CacheEntry objects having same ID_MATCH'd objects as given frame. + num_cache_entries_with_same_id_matched_objs: int = 0 + + def will_compilation_exceed(self, limit: int) -> bool: + # Checks if a compilation will exceed the given limit (thats why >=). + return ( + self.will_compilation_exceed_accumulated_limit() + or self.will_compilation_exceed_specific_limit(limit) + ) + + def will_compilation_exceed_accumulated_limit(self) -> bool: + return self.num_cache_entries >= config.accumulated_cache_size_limit + + def will_compilation_exceed_specific_limit(self, limit: int) -> bool: + return self.num_cache_entries_with_same_id_matched_objs >= limit + + +def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str): + obj = frame.f_locals.get(local_name, None) + weak_id = None + try: + weak_id = weakref.ref(obj) + except TypeError: + pass # cannot weakref bool object + return weak_id + + +def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool: + """ + Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones + in frame.f_locals. + """ + if not cache_entry: + return False + + for ( + local_name, + weakref_from_cache_entry, + ) in cache_entry.check_fn.id_matched_objs.items(): + if weakref_from_cache_entry() is not None: + weakref_from_frame = _get_weakref_from_f_locals(frame, local_name) + if weakref_from_frame != weakref_from_cache_entry: + return False + + # Also covers the case where no ID_MATCH objects are saved in frame.f_locals + return True + + +def compute_cache_size( + frame: types.FrameType, cache_entry +) -> CacheSizeRelevantForFrame: + # Walk the linked list to calculate the cache size + num_cache_entries = 0 + num_cache_entries_with_same_id_matched_objs = 0 + + while cache_entry: + num_cache_entries += 1 + # Track the number of cache entries having same ID_MATCH'd objects as + # that of frame.f_locals. This will be used later to compare against the + # cache_size_limit. + if _has_same_id_matched_objs(frame, cache_entry): + num_cache_entries_with_same_id_matched_objs += 1 + cache_entry = cache_entry.next + + return CacheSizeRelevantForFrame( + num_cache_entries, num_cache_entries_with_same_id_matched_objs + ) + + +def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool: + """ + If the frame (earlier parsed by compute_cache_size) has more than 1 cache + entry with same ID_MATCH'd objects, then its a recompilation. + """ + # Note that you can have multiple entries in the cache but still not a + # recompile, e.g., you can have 64 nn module instances, each one having an + # ID_MATCH guard, and each one having just 1 cache entry in the cache. In + # this case, we can have 64 entries in the cache, but no recompilation + # because there is only one entry for each id_matched_obj. + return cache_size.will_compilation_exceed(1) + + +def exceeds_cache_size_limit(cache_size: CacheSizeRelevantForFrame) -> Tuple[bool, str]: + """ + Checks if we are exceeding the cache size limit. + """ + if cache_size.will_compilation_exceed_accumulated_limit(): + return True, "accumulated_cache_size_limit" + if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit): + return True, "cache_size_limit" + return False, "" diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/callback.py b/venv/lib/python3.10/site-packages/torch/_dynamo/callback.py new file mode 100644 index 0000000000000000000000000000000000000000..a65e2844f215f4195d20b46499668ec12eb0919f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/callback.py @@ -0,0 +1,82 @@ +class CompilationCallbackHandler: + def __init__(self): + self.start_callbacks = [] + self.end_callbacks = [] + + def register_start_callback(self, callback): + """ + Register a callback function to be called when the compilation starts. + + Args: + - callback (callable): The callback function to register. + """ + self.start_callbacks.append(callback) + return callback + + def register_end_callback(self, callback): + """ + Register a callback function to be called when the compilation ends. + + Args: + - callback (callable): The callback function to register. + """ + self.end_callbacks.append(callback) + return callback + + def remove_start_callback(self, callback): + """ + Remove a registered start callback function. + + Args: + - callback (callable): The callback function to remove. + """ + self.start_callbacks.remove(callback) + + def remove_end_callback(self, callback): + """ + Remove a registered end callback function. + + Args: + - callback (callable): The callback function to remove. + """ + self.end_callbacks.remove(callback) + + def run_start_callbacks(self): + """ + Execute all registered start callbacks. + """ + for callback in self.start_callbacks: + callback() + + def run_end_callbacks(self): + """ + Execute all registered end callbacks. + """ + for callback in self.end_callbacks: + callback() + + def clear(self): + """ + Clear all registered callbacks. + """ + self.start_callbacks.clear() + self.end_callbacks.clear() + + +callback_handler = CompilationCallbackHandler() + + +def on_compile_start(callback): + """ + Decorator to register a callback function for the start of the compilation. + """ + callback_handler.register_start_callback(callback) + return callback + + +def on_compile_end(callback): + """ + Decorator to register a callback function for the end of the compilation. + """ + callback_handler.register_end_callback(callback) + return callback diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/code_context.py b/venv/lib/python3.10/site-packages/torch/_dynamo/code_context.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe19016ca137b462b288279b92774109961fd72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/code_context.py @@ -0,0 +1,29 @@ +import types + +from .utils import ExactWeakKeyDictionary + + +class CodeContextDict: + def __init__(self): + self.code_context = ExactWeakKeyDictionary() + + def has_context(self, code: types.CodeType): + return code in self.code_context + + def get_context(self, code: types.CodeType): + ctx = self.code_context.get(code) + if ctx is None: + ctx = {} + self.code_context[code] = ctx + return ctx + + def pop_context(self, code: types.CodeType): + ctx = self.get_context(code) + self.code_context._remove_id(id(code)) + return ctx + + def clear(self): + self.code_context.clear() + + +code_context = CodeContextDict() diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/codegen.py b/venv/lib/python3.10/site-packages/torch/_dynamo/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..7d283789c22c020052b24dfa536b2ffcbdcc2e4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/codegen.py @@ -0,0 +1,398 @@ +import collections +import dataclasses +import re +import sys +import types +from typing import Counter, Dict, List, Optional + +import torch.nn +from . import utils + +from .bytecode_transformation import ( + create_call_function, + create_dup_top, + create_instruction, + create_load_global, + create_rot_n, + Instruction, +) +from .exc import unimplemented +from .source import AttrSource, Source +from .utils import is_safe_constant, rot_n_helper +from .variables.base import VariableTracker +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .variables.torch_function import TensorWithTFOverrideVariable + + +@dataclasses.dataclass +class GraphOutputEntry: + index: int + variable: VariableTracker + + +class PyCodegen: + """ + Helper class uses for constructing Python bytecode + """ + + def __init__( + self, + tx=None, + root: Optional[torch.nn.Module] = None, + graph_output_var: Optional[str] = None, + tempvars=None, + ): + self.root = root + self.top_of_stack: Optional[VariableTracker] = None + self.uses: Counter[VariableTracker] = collections.Counter() + self.graph_outputs: Dict[int, GraphOutputEntry] = {} + self._output: List[Instruction] = [] + self.tempvars = tempvars or {} + self.tx = tx + self.graph_output_var = graph_output_var + self.code_options = self.tx.output.code_options + self.cell_and_freevars = self.tx.cell_and_freevars + self.new_var = self.tx.output.new_var + self.mutable_side_effects_from_source = False + self.value_from_source: bool = True + + def restore_stack(self, stack_values, *, value_from_source=True): + prior = self.mutable_side_effects_from_source + self.mutable_side_effects_from_source = True + prev = self.value_from_source + self.value_from_source &= value_from_source + try: + self.foreach(stack_values) + finally: + self.mutable_side_effects_from_source = prior + self.value_from_source = prev + + def graph_output_vars(self): + return [x.variable for x in self.graph_outputs.values()] + + def call_reconstruct(self, value): + res = value.reconstruct(self) + assert res is None, f"reconstruct!=None {value}" + + def __call__(self, value, allow_cache=True): + """Generate code such that top-of-stack (TOS) is set to value""" + if isinstance(value, Source): + self.call_reconstruct(value) + self.clear_tos() + return + + assert isinstance(value, VariableTracker) + output = self._output + graph_outputs = self.graph_outputs + + if self.top_of_stack is value and allow_cache: + output.append(create_dup_top()) + return + + if self.mutable_side_effects_from_source: + # this is needed to get aliasing relationships right + # value.mutable_local.source will get mutated to hold `value` + # mutable_side_effects_from_source=False is used to codegen the mutation + # mutable_side_effects_from_source=True is used to codegen a reference + from .side_effects import MutableSideEffects + + if isinstance(value.mutable_local, MutableSideEffects): + self(value.mutable_local.source) + return + + if allow_cache: + if value.mutable_local and value.mutable_local in self.tempvars: + output.append(self.create_load(self.tempvars[value.mutable_local])) + self.top_of_stack = value + return + if self.tempvars.get(value) is not None: + output.append(self.create_load(self.tempvars[value])) + self.top_of_stack = value + return + + if value.source is not None and allow_cache and self.value_from_source: + self.call_reconstruct(value.source) + elif value.is_python_constant() and is_safe_constant( + value.as_python_constant() + ): + output.append(self.create_load_const(value.as_python_constant())) + elif isinstance(value, TensorWithTFOverrideVariable): + graph_outputs_key = self.add_graph_output(value) + + self.load_import_from(utils.__name__, "to_subclass") + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.append( + self.create_load_global( + value.global_mangled_class_name(self.tx), False, add=True + ) + ) + output.extend(create_call_function(2, True)) + elif isinstance( + value, + ( + TensorVariable, + SymNodeVariable, + UnspecializedPythonVariable, + NumpyNdarrayVariable, + ), + ): + graph_outputs_key = self.add_graph_output(value) + + if isinstance(value, NumpyNdarrayVariable): + self.load_import_from(utils.__name__, "to_numpy_helper") + + self.load_graph_output(graph_outputs[graph_outputs_key].index) + + if isinstance(value, NumpyNdarrayVariable): + output.extend(create_call_function(1, True)) + elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap: + output.extend( + [self.create_load_attr("item")] + create_call_function(0, True) + ) + elif isinstance(value, NNModuleVariable): + parts = value.module_key.split(".") + if parts[0] in self.code_options["co_varnames"]: + output.append(self.create_load(parts[0])) + parts = parts[1:] + else: + assert self.root is not None + output.append(self.create_load_output(self.root)) + for part in parts: + output.append(self.create_load_attr(part)) + else: + self.uses[value] += 1 + try: + self.call_reconstruct(value) + except NotImplementedError: + unimplemented(f"reconstruct: {value}") + if allow_cache and value in self.tempvars: + self._output.append(create_dup_top()) + self.add_cache(value) + + self.top_of_stack = value + + def add_graph_output(self, value): + graph_outputs_key = id(value.as_proxy()) + if graph_outputs_key not in self.graph_outputs: + self.graph_outputs[graph_outputs_key] = GraphOutputEntry( + len(self.graph_outputs), value + ) + return graph_outputs_key + + def load_graph_output(self, index): + output = self._output + output.append(self.create_load(self.graph_output_var)) + output.append(self._create_load_const(index)) + output.append(create_instruction("BINARY_SUBSCR")) + + def add_cache(self, value): + var = self.new_var() + self.tempvars[value] = var + if value.mutable_local: + self.tempvars[value.mutable_local] = var + self._output.append(self.create_store(var)) + + def foreach(self, items): + for i in items: + self(i) + + def setup_globally_cached(self, name, value, push_null): + """Store value in a new global""" + name = re.sub(r"[^a-zA-Z0-9_]+", "_", name) + f_globals = self.tx.f_globals + if name in f_globals: + assert id(f_globals[name]) == id(value) + else: + f_globals[name] = value + return [self.create_load_global(name, push_null, add=True)] + + def clear_tos(self): + self.top_of_stack = None + + def append_output(self, inst): + assert isinstance(inst, Instruction) + self._output.append(inst) + self.clear_tos() + + def extend_output(self, insts): + assert all(isinstance(x, Instruction) for x in insts) + self._output.extend(insts) + self.clear_tos() + + def get_instructions(self) -> List[Instruction]: + return self._output + + def create_load(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("LOAD_DEREF", argval=name) + assert name in self.code_options["co_varnames"], f"{name} missing" + return create_instruction("LOAD_FAST", argval=name) + + def create_load_closure(self, name) -> Instruction: + assert name in self.cell_and_freevars() + return create_instruction("LOAD_CLOSURE", argval=name) + + def create_store(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("STORE_DEREF", argval=name) + assert name in self.code_options["co_varnames"] + return create_instruction("STORE_FAST", argval=name) + + def create_load_global(self, name, push_null, add=False) -> Instruction: + if add: + self.tx.output.update_co_names(name) + assert name in self.code_options["co_names"], f"{name} not in co_names" + return create_load_global(name, push_null) + + def create_load_const(self, value) -> Instruction: + assert is_safe_constant(value), f"unsafe constant {value}" + return self._create_load_const(value) + + def _create_load_const(self, value) -> Instruction: + return create_instruction("LOAD_CONST", argval=value) + + create_load_output = _create_load_const + + def create_load_method(self, name): + self.tx.output.update_co_names(name) + return create_instruction("LOAD_METHOD", argval=name) + + def create_load_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("LOAD_ATTR", argval=name) + + def load_attr(self, name): + self.append_output(self.create_load_attr(name)) + + def create_load_attrs(self, names): + return [self.create_load_attr(name) for name in names.split(".")] + + def create_store_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("STORE_ATTR", argval=name) + + def store_attr(self, name): + self.append_output(self.create_store_attr(name)) + + def load_function_name(self, fn_name, push_null, num_on_stack=0): + """Load the global fn_name on the stack num_on_stack down""" + output = [] + if push_null and sys.version_info >= (3, 11): + output.extend( + [create_instruction("PUSH_NULL"), *self.rot_n(num_on_stack + 1)] + ) + output.extend( + [ + self.create_load_global(fn_name, False, add=True), + *self.rot_n(num_on_stack + 1), + ] + ) + return output + + def rot_n(self, n): + try: + return create_rot_n(n) + except AttributeError: + # desired rotate bytecode doesn't exist, generate equivalent bytecode + return [ + create_instruction("BUILD_TUPLE", arg=n), + self._create_load_const(rot_n_helper(n)), + *create_rot_n(2), + create_instruction("CALL_FUNCTION_EX", arg=0), + create_instruction("UNPACK_SEQUENCE", arg=n), + ] + + def pop_null(self): + # POP_TOP doesn't work for null, so we pop nulls by pushing in a + # nop function, calling it (which consumes the null), and popping the result. + assert sys.version_info >= (3, 11) + return [ + self._create_load_const(lambda: None), + *create_call_function(0, False), + create_instruction("POP_TOP"), + ] + + def call_function(self, nargs: int, push_null: bool): + self.extend_output(create_call_function(nargs, push_null=push_null)) + + def dup_top(self): + self.append_output(create_dup_top()) + + def store(self, varname): + self.append_output(self.create_store(varname)) + + def make_function_with_closure( + self, fn_name: str, code: types.CodeType, push_null: bool, num_on_stack=0 + ): + freevars = code.co_freevars + assert freevars + output = self._output + if sys.version_info >= (3, 11) and push_null: + output.append(create_instruction("PUSH_NULL")) + output.extend(self.rot_n(num_on_stack + 1)) + for var in freevars: + assert var in self.cell_and_freevars() + output.append(create_instruction("LOAD_CLOSURE", argval=var)) + output.append(create_instruction("BUILD_TUPLE", arg=len(freevars))) + output.append(self.create_load_const(code)) + if sys.version_info < (3, 11): + output.append(self.create_load_const(fn_name)) + output.append(create_instruction("MAKE_FUNCTION", arg=0x08)) + output.extend(self.rot_n(num_on_stack + 1)) + self.clear_tos() + + def create_load_python_module(self, mod, push_null) -> Instruction: + """ + Generate a LOAD_GLOBAL instruction to fetch a given python module. + """ + output = self.tx.output + global_scope = output.global_scope + name = re.sub(r"^.*[.]", "", mod.__name__) + if global_scope.get(name, None) is mod: + return self.create_load_global(name, push_null, add=True) + prefix = f"___module_{name}" + global_name = self.tx.output.install_global_by_id(prefix, mod) + return self.create_load_global(global_name, push_null, add=True) + + def make_call_generated_code(self, fn_name: str) -> None: + """Call the generated code function stored in fn_name""" + self.extend_output(self.load_function_name(fn_name, True)) + + graphargs = self.tx.output.graphargs + for arg in graphargs: + if arg.is_unspecialized: + self.extend_output( + [ + self.create_load_python_module(torch, True), + self.create_load_attr("as_tensor"), + ] + ) + self.call_reconstruct(arg) + self.extend_output(create_call_function(1, False)) + else: + self.call_reconstruct(arg) + + self.extend_output(create_call_function(len(graphargs), False)) + + def load_import_from(self, module_name, object_name) -> None: + self(AttrSource(self.tx.import_source(module_name), object_name)) + + def create_call_function_kw(self, nargs, kw_names, push_null) -> List[Instruction]: + if sys.version_info >= (3, 11): + output = create_call_function(nargs, push_null) + assert output[-2].opname == "PRECALL" + kw_names_inst = create_instruction("KW_NAMES", argval=kw_names) + output.insert(-2, kw_names_inst) + return output + return [ + self.create_load_const(kw_names), + create_instruction("CALL_FUNCTION_KW", arg=nargs), + ] diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py b/venv/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe43fcaaf37b41dbb97adc2ef196c6614a0a252 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py @@ -0,0 +1,280 @@ +import contextlib +import functools +from typing import List, Optional + +import torch +from torch._dynamo.external_utils import call_backward, call_hook +from torch._dynamo.source import GetItemSource, LocalSource +from torch._dynamo.utils import counters, lazy_format_graph_code +from torch._logging import getArtifactLogger, trace_structured +from torch._prims_common import clone_preserve_strides +from torch._subclasses import FakeTensorMode +from torch.fx import GraphModule +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import ( + decompose, + disable_autocast_cache, + disable_proxy_modes_tracing, + fetch_object_proxy, + ProxyTorchDispatchMode, + PythonKeyTracer, + track_tensor_tree, +) +from torch.fx.experimental.symbolic_shapes import DimDynamic, ShapeEnv +from torch.fx.proxy import Proxy + +compiled_autograd_log = getArtifactLogger(__name__, "compiled_autograd") + + +def maybe_clone(x): + if x is not None: + return clone_preserve_strides(x) + return x + + +class AutogradCompilerInstance: + def __init__(self, compiler_fn) -> None: + self.compiler_fn = compiler_fn + self.stack = contextlib.ExitStack() + self.close = self.stack.close + self.shape_env = ShapeEnv() + self.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=True, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + self.fx_tracer = PythonKeyTracer() + self.proxy_mode = ProxyTorchDispatchMode(self.fx_tracer, "symbolic") + self.hooks_proxy: Optional[Proxy] = None + + def wrap_fake(self, x, source): + assert isinstance(x, torch.Tensor) + return self.fake_tensor_mode.from_tensor(x, source=source) + + @staticmethod + def source(name, idx) -> GetItemSource: + return GetItemSource(LocalSource(name), idx) + + def begin_capture(self, inputs: List[torch.Tensor], sizes: List[int]): + counters["compiled_autograd"]["captures"] += 1 + self.fx_tracer.root = torch.nn.Module() + self.fx_tracer.graph = torch.fx.Graph(tracer_cls=PythonKeyTracer) + self.fx_tracer.tensor_attrs = {} + args_proxy = self.fx_tracer.create_proxy("placeholder", "inputs", (), {}) + sizes_proxy = self.fx_tracer.create_proxy("placeholder", "sizes", (), {}) + self.hooks_proxy = self.fx_tracer.create_proxy("placeholder", "hooks", (), {}) + + # tensor inputs to fake tensors + inputs = [ + self.wrap_fake(x, self.source("inputs", idx)) + for idx, x in enumerate(inputs) + ] + proxies = [args_proxy[i] for i in range(len(inputs))] + self.bind_tensors_to_proxies(inputs, proxies) + + # size inputs to symints + sizes = [ + self.shape_env.create_unspecified_symint_and_symbol( + val, + self.source("sizes", idx), + DimDynamic.DYNAMIC, + ) + for idx, val in enumerate(sizes) + ] + self.bind_tensors_to_proxies(sizes, sizes_proxy) + + # TODO(jansel): are all these modes needed? + self.stack.enter_context(decompose({})) + self.stack.enter_context(self.fake_tensor_mode) + self.stack.enter_context(self.proxy_mode.sym_mode) + self.stack.enter_context(self.proxy_mode) + self.stack.enter_context(disable_autocast_cache()) + return inputs, sizes + + def proxy_call_backward( + self, + inputs, + output_metadatas, + saved_tensors, + backward_idx: int, + ): + assert self.hooks_proxy is not None + backward_fn = self.hooks_proxy[backward_idx] # type: ignore[index] + proxies = self.fx_tracer.create_proxy( + kind="call_function", + target=call_backward, + args=( + backward_fn, + self.to_proxy(saved_tensors), + *self.to_proxy(inputs), + ), + kwargs={}, + ) + + with disable_proxy_modes_tracing(): + # create fake Tensors + grad_ins: List[Optional[torch.Tensor]] = [] + for output_metadata in output_metadatas: + if output_metadata is None: + grad_ins.append(None) + continue + + layout, device, dtype, size = output_metadata + grad_ins.append( + torch.empty(size=size, dtype=dtype, layout=layout, device=device) + ) + self.bind_tensors_to_proxies(grad_ins, proxies) + return tuple(grad_ins) + + def proxy_call_hook(self, hook, *args): + return self.fx_tracer.create_proxy( + "call_function", + call_hook, + ( + hook, + *[self.to_proxy(x) for x in args], + ), + {}, + ) + + def tensor_pre_hook(self, inputs, hook_id, i: int): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxy = self.proxy_call_hook( + hook, + inputs[i], + ) + with disable_proxy_modes_tracing(): + inputs[i] = maybe_clone(inputs[i]) + self.bind_tensors_to_proxies([inputs[i]], [proxy]) + return inputs + + def pre_hook(self, inputs, hook_id): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + inputs, + ) + with disable_proxy_modes_tracing(): + inputs = [maybe_clone(x) for x in inputs] + self.bind_tensors_to_proxies(inputs, proxies) + return inputs + + def post_hook(self, outputs, inputs, hook_id): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + outputs, + inputs, + ) + with disable_proxy_modes_tracing(): + outputs = [maybe_clone(x) for x in outputs] + self.bind_tensors_to_proxies(outputs, proxies) + return outputs + + def post_acc_grad_hook(self, input, hook_id): + assert isinstance(input, torch.Tensor) + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + input, + ) + with disable_proxy_modes_tracing(): + input = [maybe_clone(input)] + self.bind_tensors_to_proxies(input, proxies) + return input + + def end_capture(self, outputs): + self.stack.close() + self.fx_tracer.create_node( + "output", + "output", + (self.fx_tracer.create_arg(self.to_proxy(outputs)),), + {}, + ) + graph = GraphModule( + self.fx_tracer.root, self.fx_tracer.graph, "CompiledAutograd" + ) + compiled_autograd_log.info( + "%s", lazy_format_graph_code("Compiled autograd graph", graph) + ) + trace_structured( + "compiled_autograd_graph", + payload_fn=lambda: graph.print_readable(print_output=False), + ) + return self.compiler_fn(graph) + + def to_proxy(self, t): + if t is None: + return None + if isinstance(t, list): + return [self.to_proxy(x) for x in t] + if isinstance(t, tuple): + return tuple(self.to_proxy(x) for x in t) + assert isinstance(t, (torch.Tensor, torch.SymInt)) + return fetch_object_proxy(self.fx_tracer)(t).proxy + + def bind_tensors_to_proxies(self, tensors, proxies): + if isinstance(proxies, torch.fx.Proxy): + proxies = [proxies[i] for i in range(len(tensors))] + assert len(tensors) == len(proxies) + track_tensor_tree(tensors, proxies, constant=None, tracer=self.fx_tracer) + + def bind_backward_state(self, index: int): + assert self.hooks_proxy is not None + proxy = self.hooks_proxy[index] # type: ignore[index] + bw_state = BackwardState() + track_tensor_tree(bw_state, proxy, constant=None, tracer=self.fx_tracer) + return bw_state + + +compiled_autograd_enabled = False + +# We may have code like: +# with enable(compiler_fn): +# ... +# with disable(): +# ... +# ... +# The disable() call just want to disable compiled autograd temporarily. +# But overall the feature is enabled. +# +# The code covered by the disable context manager has no way to know if +# compiled autograd is overall eanbled. Use another variable +# compiled_autograd_enabled_count to indicate how many times compiled +# autograd has been enabled in the call stack for this purpose. +compiled_autograd_enabled_count = 0 + + +@contextlib.contextmanager +def enable(compiler_fn): + prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler( + functools.partial(AutogradCompilerInstance, compiler_fn) + ) + global compiled_autograd_enabled, compiled_autograd_enabled_count + compiled_autograd_enabled = True + compiled_autograd_enabled_count += 1 + try: + with torch.autograd.set_multithreading_enabled(False): + yield + finally: + compiled_autograd_enabled_count -= 1 + if not prior: + compiled_autograd_enabled = False + torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior) + + +@contextlib.contextmanager +def disable(): + prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(None) + global compiled_autograd_enabled + compiled_autograd_enabled = False + try: + yield + finally: + if prior: + compiled_autograd_enabled = True + torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/comptime.py b/venv/lib/python3.10/site-packages/torch/_dynamo/comptime.py new file mode 100644 index 0000000000000000000000000000000000000000..7b876258bd485e0f3a2ef4a33ce9ce07d0ec8b3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/comptime.py @@ -0,0 +1,373 @@ +# This file establishes the public comptime interface to Dynamo. +# This allows Dynamo users to execute arbitrary Python code while +# Dynamo is symbolically evaluating their original programs. +# +# The goal of the public API is to give users rope, without actually +# leaking private implementation details of Dynamo. + +import builtins +import dis +import traceback +from typing import Optional, Union + +import torch +from torch.fx.experimental.symbolic_shapes import free_symbols + +from .exc import unimplemented +from .variables.constant import ConstantVariable +from .variables.tensor import SymNodeVariable + + +class ComptimeVar: + """ + A ComptimeVar represents a Python value, at some particular point + in time, in the Python code we are symbolically evaluating with + torchdynamo. This must be distinguished from a runtime value, as + at compile-time there are some properties of the variable we + do not know (for example, if the ComptimeVar represents a Tensor, + we only know metadata about the tensor; we do NOT know what the + actual data in the Tensor is.) + """ + + def __init__(self, v): + self.__variable = v + + def as_proxy(self): + """ + Returns an fx.Proxy (or tuple/list of fx.Proxy) representing + this variable in the FX graph we are assembling to pass + to the user compiler. + + This method only works for variables we actually track in + the FX graph, aka Tensors (and ints, if you are compiling + with dynamic shapes). In particular, if you have a list + or tuple of tensors, you will get a list/tuple of proxies + (not a single proxy representing the entire list/tuple). + """ + return self.__variable.as_proxy() + + def is_proxy(self): + """ + Returns True if as_proxy() would succeed. + """ + return self.__variable.is_proxy() + + def as_fake(self): + """ + Returns a "fake" value (either a FakeTensor or a SymInt) + representing the variable in question. This only works + for variables that denote Tensor or int. You can use + this to query metadata; e.g., v.as_fake().size(0) will + tell you the compile-time known size of the tensor. + + WARNING: Do NOT mutate the returned tensor. + """ + return self.__variable.as_proxy().node.meta["example_value"] + + def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]: + """ + Returns the size of the tensor (if dim is None) or the size + at the dimension dim. The returned size may be a SymInt. + """ + return self.as_fake().size(dim) + + def python_type(self): + """ + Returns what type(v) would have returned for the variable + at compile time. + """ + return self.__variable.python_type() + + def as_python_constant(self): + """ + Returns the Python value this variable would have, but only if it is + completely known at compile-time (e.g., it is constant). + + WARNING: Do NOT mutate the returned constant. The returned constant + may or may not correspond to the actual value this variable may take + on at runtime; for example, if the variable in question is a constant + list, we may return a copy of that list. + """ + return self.__variable.as_python_constant() + + def is_python_constant(self): + """ + Returns True if as_python_constant would succeed. + """ + return self.__variable.is_python_constant() + + def is_dynamic(self): + if isinstance(self.__variable, SymNodeVariable): + fs = free_symbols(self.__variable.sym_num) + return bool(fs) + return False + + def force_static(self): + """ + Forces that a value is static, inducing a guard on its specific value + """ + if isinstance(self.__variable, SymNodeVariable): + self.__variable.evaluate_expr() + elif isinstance(self.__variable, ConstantVariable): + # TODO: Maybe complain if this isn't a int/bool/float variable + pass + else: + raise AssertionError( + f"cannot force {self.__variable} ({type(self.__variable)}) static" + ) + + def _i_will_not_complain_if_bc_breaks_VariableTracker(self): + """ + Returns the internal data structure VariableTracker that Dynamo uses + to represent variables at compile time. There are no BC guarantees on + this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on + it. + """ + return self.__variable + + def __repr__(self): + # TODO: The default repr is pretty bad, do better + return repr(self.__variable) + + # TODO: API for adding a custom guard + + +class ComptimeContext: + """ + This context class provides access to a public API for Dynamo's internals. + If there is something here you would find useful that is missing, please + file a feature request at https://github.com/pytorch/pytorch/ + """ + + def __init__(self, tx): + self.__tx = tx + + def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar: + """ + Retrieve the compile-time known information about a local. + """ + tx = self.__get_tx(stacklevel) + return ComptimeVar(tx.symbolic_locals[name]) + + def graph_break(self, msg="ComptimeContext.graph_break"): + """ + Manually trigger a graph break + """ + unimplemented(msg) + + def graph(self): + """ + Retrieve the partially constructed FX graph that would be + passed to the user compiler after compilation. + """ + return self.__tx.output.graph + + def assert_static(self, val): + """ + Asserts that the int is static (and not dynamic, per dynamic shapes) + """ + assert ( + not val.is_dynamic() + ), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)" + + def print_graph(self, *, verbose=True, file=None): + """ + Print the partially constructed FX graph that would be passed + to the user compiler after compilation. + """ + print( + self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file + ) + + def parent(self): + return ComptimeContext(self.__tx.parent) + + def __get_tx(self, stacklevel): + tx = self.__tx + for _ in range(stacklevel): + tx = tx.parent + return tx + + def print_disas(self, *, file=None, stacklevel=0): + """ + Print the current series of opcodes being executed (not including + parent frames), including where you are in the particular opcode + stream. + """ + tx = self.__get_tx(stacklevel) + print( + dis.Bytecode( + tx.f_code, + current_offset=tx.instructions[tx.instruction_pointer].offset, + ).dis(), + file=file, + ) + + def print_value_stack(self, *, file=None, stacklevel=0): + """ + Print the current Python value stack. Note that this is NOT the same + as the traceback; use print_bt() to print that. Note that at + stacklevel=0, this will typically be empty, as comptime cannot + currently be used in an expression context where there would be + intermediates on the stack. If you would find this useful, please + file a bug at https://github.com/pytorch/pytorch/ + + NB: Stack grows downwards in our print + """ + # TODO: improve printing + tx = self.__get_tx(stacklevel) + for s in tx.stack: + print(f"- {s}", file=file) + + def print_locals(self, *, file=None, stacklevel=0): + """ + Print all of the locals available in the current context. + By default this view is very limited; you can get more information + about any individual local using get_local(). + """ + # TODO: improve by improving the VariableTracker printing + tx = self.__get_tx(stacklevel) + for k, v in tx.symbolic_locals.items(): + print(f"{k} = {v}", file=file) + + def print_bt(self, *, file=None, stacklevel=0): + """ + Print the user code backtrace, starting at the beginning of the + frame Dynamo started evaluating. Note that this MAY NOT go all + the way to the torch.compile invocation, as we may have done + a graph break and are compiling an intermediate frame as the + starting point. If you think the other behavior would be better, + file a bug at https://github.com/pytorch/pytorch/ + """ + stack = [] + tx = self.__get_tx(stacklevel) + while tx is not None: + stack.append(tx.frame_summary()) + tx = getattr(tx, "parent", None) + print( + "".join(traceback.StackSummary.from_list(reversed(stack)).format()), + file=file, + ) + + def print_guards(self, *, file=None): + """ + Print the currently installed guards for the Dynamo context. + This does NOT include guards associated with variables that + may or may not be installed in the future if those variables + are used. + """ + # TODO: improve print format, current guard format is extremely + # verbose + print( + "\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)), + file=file, + ) + + def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self): + """ + Returns the internal data structure InstructionTranslator that Dynamo + uses to track state of symbolic evaluation. There are no BC + guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if + you rely on it. + """ + return self.__tx + + +class _Comptime: + @staticmethod + def __call__(fn): + """fn gets called at compile time in TorchDynamo, does nothing otherwise""" + return + + # Convenience wrappers that are more compact to use + + @staticmethod + def graph_break(): + comptime(lambda ctx: ctx.graph_break()) + + @staticmethod + def print_graph(): + comptime(lambda ctx: ctx.print_graph()) + + @staticmethod + def print_disas(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_disas( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_value_stack(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + # This is a more useful variant of print_value_stack that can be used + # in an expression context; e.g., x + print_value_stack_and_return(y + z), + # you will see x on the stack prior to the addition operation + @staticmethod + def print_value_stack_and_return(e, *, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + return e + + @staticmethod + def print_locals(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_locals( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_bt(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_bt( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_guards(): + comptime(lambda ctx: ctx.print_guards()) + + @staticmethod + def assert_static(val): + comptime(lambda ctx: ctx.assert_static(ctx.get_local("val"))) + + @staticmethod + def force_static(val): + comptime(lambda ctx: ctx.get_local("val").force_static()) + + @staticmethod + def breakpoint(): + """ + Like pdb breakpoint(), but drop into pdb whenever this line + of code is compiled by dynamo. Use it by putting + this in your model code:: + + from torch._dynamo.comptime import comptime + comptime.breakpoint() + + And then, inside pdb, you can access 'ctx' to query things + about the compilation context:: + + (Pdb) !ctx.print_bt() + (Pdb) !ctx.print_locals() + (Pdb) p ctx.get_local("attention").as_fake() + """ + + def inner(inner_ctx): + ctx = inner_ctx.parent() + builtins.breakpoint() + + comptime(inner) + + +comptime = _Comptime() diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/config.py b/venv/lib/python3.10/site-packages/torch/_dynamo/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a41094fc36b5447b0c6ac7decb17ca202384f3aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/config.py @@ -0,0 +1,423 @@ +import getpass +import inspect +import os +import re +import sys +import tempfile +from os.path import abspath, dirname +from typing import Any, Callable, Dict, Optional, Set, Type, TYPE_CHECKING, Union + +import torch + +# to configure logging for dynamo, aot, and inductor +# use the following API in the torch._logging module +# torch._logging.set_logs(dynamo=, aot=, inductor) +# or use the environment variable TORCH_LOGS="dynamo,aot,inductor" (use a prefix + to indicate higher verbosity) +# see this design doc for more detailed info +# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit# +# the name of a file to write the logs to +# [@compile_ignored: debug] +log_file_name: Optional[str] = None + +# [@compile_ignored: debug] Verbose will print full stack traces on warnings and errors +verbose = os.environ.get("TORCHDYNAMO_VERBOSE", "0") == "1" + +# [@compile_ignored: runtime_behaviour] verify the correctness of optimized backend +verify_correctness = False + +# need this many ops to create an FX graph +minimum_call_count = 1 + +# turn on/off DCE pass +dead_code_elimination = True + +# disable (for a function) when cache reaches this size + +# controls the maximum number of cache entries with a guard on same ID_MATCH'd +# object. It also controls the maximum size of cache entries if they don't have +# any ID_MATCH'd guards. +# [@compile_ignored: runtime_behaviour] +cache_size_limit = 8 + +# [@compile_ignored: runtime_behaviour] controls the maximum number of entries for a code object. +accumulated_cache_size_limit = 64 + +# whether or not to specialize on int inputs. This only has an effect with +# dynamic_shapes; when dynamic_shapes is False, we ALWAYS specialize on int +# inputs. Note that assume_static_by_default will also cause ints to get +# specialized, so this is mostly useful for export, where we want inputs +# to be dynamic, but accesses to ints should NOT get promoted into inputs. +specialize_int = False + +# legacy config, does nothing now! +dynamic_shapes = True + +use_lazy_graph_module = ( + os.environ.get("TORCH_COMPILE_USE_LAZY_GRAPH_MODULE", "1") == "1" +) + +# This is a temporarily flag, which changes the behavior of dynamic_shapes=True. +# When assume_static_by_default is True, we only allocate symbols for shapes marked dynamic via mark_dynamic. +# NOTE - this flag can be removed once we can run dynamic_shapes=False w/ the mark_dynamic API +# see [Note - on the state of mark_dynamic] +assume_static_by_default = True + +# This flag changes how dynamic_shapes=True works, and is meant to be used in conjunction +# with assume_static_by_default=True. +# With this flag enabled, we always compile a frame as fully static for the first time, and, if we fail +# any guards due to wobbles in shape, we recompile with *all* the wobbled shapes as being marked dynamic. +automatic_dynamic_shapes = True + +# This flag changes how the shapes of parameters are treated. +# If this flag is set to True, then the shapes of torch.nn.Parameter as well as of torch.Tensor are attempted to be dynamic +# If this flag is set to False, then the shapes of torch.nn.Parameter are assumed to be static, +# while the shapes of torch.Tensor are assumed to be dynamic. +force_parameter_static_shapes = True + +# This flag ensures that the shapes of a nn module are always assumed to be static +# If the flag is set to True, then the shapes of a nn.module are assumed to be static +# If the flag is set to False, then the shapes of a nn.module can be dynamic +force_nn_module_property_static_shapes = True + +# Typically, if you mark_dynamic a dimension, we will error if the dimension +# actually ended up getting specialized. This knob changes the behavior so +# that we don't error at all. This is helpful for our CI where I'm using a +# heuristic to mark batch dimensions as dynamic and the heuristic may get it +# wrong. +allow_ignore_mark_dynamic = False + +# Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing) +guard_nn_modules = False + +# Uses CPython internal dictionary tags to detect mutation. There is some +# overlap between guard_nn_modules_using_dict_tags and guard_nn_modules flag. +# guard_nn_modules unspecializes the nn module instance and adds guard for each +# relevant member of the nn modules. On the other hand, +# guard_nn_modules_using_dict_tags specializes on each nn module instance but +# uses low overhead dict version matching to detect mutations, obviating the +# need to guard on members of the nn modules. With +# guard_nn_modules_using_dict_tags, the guard_nn_modules is not really required +# but kept around for debugging and discussing unspecializing nn module +# variables. +# TODO(janimesh, voz): Remove both of these flags (or atleast guard_nn_modules) +# once we have reached stability for the guard_nn_modules_using_dict_tags. +guard_nn_modules_using_dict_tags = True + +# This feature doesn't really work. We offer this flag for experimental +# purposes / if you want to help us build out support. +# +# torchdynamo has very limited support for tensor subclasses that implement +# __torch_function__. Our current support is limited to tensor subclasses +# that DO NOT store metadata on the tensor (in general, dynamo does not +# support Python code that stores extra attributes on tensors at present). +# If your tensor subclass purely changes function call behavior via +# __torch_function__, you can allow torchdynamo to trace into it by +# adding it to traceable_tensor_subclasses. We don't do any safety checks, +# so it is up to you to ensure that your subclass is well behaved. See also +# https://github.com/pytorch/torchdynamo/issues/1948 +# +# We do NOT currently support __torch_dispatch__. The implementation is +# currently buggy, the main show stopper for nontrivial use is +# https://github.com/pytorch/torchdynamo/issues/1952 +traceable_tensor_subclasses: Set[Type[Any]] = set() + +# Suppress errors in torch._dynamo.optimize, instead forcing a fallback to eager. +# This is a good way to get your model to work one way or another, but you may +# lose optimization opportunities this way. Devs, if your benchmark model is failing +# this way, you should figure out why instead of suppressing it. +suppress_errors = bool(os.environ.get("TORCHDYNAMO_SUPPRESS_ERRORS", False)) + +# Record and write an execution record of the current frame to a file +# if an exception is encountered +# @compile_ignored[debug] +replay_record_enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1" + +# Rewrite assert statement in python with torch._assert +rewrite_assert_with_torch_assert = True + +# Disable dynamo +disable = os.environ.get("TORCH_COMPILE_DISABLE", False) + +# [@compile_ignored: runtime_behaviour] Get a cprofile trace of Dynamo +cprofile = os.environ.get("TORCH_COMPILE_CPROFILE", False) + +# legacy config, does nothing now! +skipfiles_inline_module_allowlist: Dict[Any, Any] = {} + +# If a string representing a PyTorch module is in this ignorelist, +# the `allowed_functions.is_allowed` function will not consider it +# when creating a list of PyTorch functions that will appear in +# FX IR. +allowed_functions_module_string_ignorelist = { + "torch.distributions", + "torch.testing", + "torch._refs", + "torch._prims", + "torch._decomp", +} + +# Debug Flag to try minifier at different stages. Possible values are {None, "aot", "dynamo"} +# None - Minifier is switched off +# dynamo - Runs minifier on the TorchDynamo produced graphs, if compilation fails +# aot - Runs minifier on the Aot Autograd produced graphs, if compilation fails +# [@compile_ignored: debug] +repro_after = os.environ.get("TORCHDYNAMO_REPRO_AFTER", None) + +# Compiler compilation debug info +# 1: Dumps the original graph out to repro.py if compilation fails +# 2: Dumps a minifier_launcher.py if compilation fails. +# 3: Always dumps a minifier_launcher.py. Good for segfaults. +# 4: Dumps a minifier_launcher.py if the accuracy fails. +# [@compile_ignored: debug] +repro_level = int(os.environ.get("TORCHDYNAMO_REPRO_LEVEL", 2)) + +# By default, we try to detect accuracy failure by running both forward +# and backward of a torchdynamo produced graph (if you are using repro_after +# 'dynamo'). This setting forces us to only test the forward graph and +# not the backward graph. This can be helpful if you're trying to debug +# an inference only problem, but the minifier seems to be choking on the +# backwards step +# TODO: Detect this situation automatically so the user doesn't need +# to manually configure this +# [@compile_ignored: debug] +repro_forward_only = os.environ.get("TORCHDYNAMO_REPRO_FORWARD_ONLY") == "1" + +# The tolerance we should use when testing if a compiled graph +# has diverged so that we should treat it as an accuracy failure +# [@compile_ignored: debug] +repro_tolerance = 1e-3 + +# If True, when testing if two models are the same, we will test them against +# a third fp64 reference and only report a problem if the RMSE relative to the +# fp64 is greater. However, this will use more memory; you may disable this +# if memory usage is too high. +# [@compile_ignored: runtime_behaviour] +same_two_models_use_fp64 = True + +# Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type. +# When this flag is set to False, we introduce a graph break instead of capturing. +# This requires dynamic_shapes to be True. +capture_scalar_outputs = False + +# Not all backends support operators that have dynamic output shape (e.g., +# nonzero, unique). When this flag is set to False, we introduce a graph +# break instead of capturing. This requires dynamic_shapes to be True. +# If you set this to True, you probably also want capture_scalar_outputs +# (these are separated for historical reasons). +capture_dynamic_output_shape_ops = False + +# By default, dynamo will treat all ints as backed SymInts, which means (1) it +# will wait to see the int change over multiple runs before generalizing and +# (2) it will still always 0/1 specialize an int. When true, this knob +# forces dynamo to treat _length_per_key and _offset_per_key on +# KeyedJaggedTensor from torchrec as size-like unbacked SymInts, so that +# they (1) generalize immediately and (2) unsoundly never compare equal to +# 0/1. This is not on by default as AOTAutograd/Inductor cannot currently +# compile this code; however, this can be useful for export. +force_unspec_int_unbacked_size_like_on_torchrec_kjt = False + +# Should almost always be true in prod. This relaxes the requirement that cond's true_fn and +# false_fn produces code with identical guards. +enforce_cond_guards_match = True + +# Specify how to optimize a compiiled DDP module. The flag accepts a bollean +# value or a string. There are 4 modes. +# 1. "ddp_optimizer" (or True): with "ddp_ptimizer", Dynamo will automatically +# split model graph into pieces to match DDP bucket sizes to allow DDP +# comm/compute overlap. +# 2. "python_reducer" (experimental): this optimization requires the usage +# of compiled_autograd. With "python_reducer", DDP will disable the C++ reducer +# and use the Python reducer to allow compiled_autograd to trace the +# communication and allow comm/compute overlap without graph-breaks. +# 3. "python_reducer_without_compiled_forward" (experimental): this mode is +# similar to "python_reducer". One should only use this optimization mode +# when compiled_autograd is used but the DDP module is not compiled. +# 4. "no_optimization" (or False): Dynamo won't split the model graph, nor +# will Python reducer be used. With this mode, there will be no graph-breaks +# and the original DDP C++ reducer will be used. There will no comm/compute +# overlap. This mode CANNOT be used with compiled_autograd. +# Note that to avoid breaking the existing usage, mode 1 and mode 4 can be +# specified with a boolean value. True is using ddp_optimizer and False is +# no optimization. +optimize_ddp: Union[bool, str] = True + +_ddp_optimization_mode = [ + "ddp_optimizer", + "python_reducer", # experimental mode + "python_reducer_without_compiled_forward", # experimental mode + "no_optimization", +] + + +def _get_optimize_ddp_mode(): + m = sys.modules[__name__] + if isinstance(m.optimize_ddp, bool): + if m.optimize_ddp: + mode = "ddp_optimizer" + else: + mode = "no_optimization" + elif isinstance(m.optimize_ddp, str): + mode = m.optimize_ddp + else: + raise ValueError(f"Invalid type, {type(optimize_ddp)=}") + + assert mode in m._ddp_optimization_mode, f"Invalid mode {mode=}" + return mode + + +# If True, delays DDPOptimizer submodule compilation to 1st run of the model, +# so that real tensor strides are used in all submodules +# (instead of using FakeTensor strides which can differ from real tensor strides and causes error in some cases). +# This feature is not hardened yet and it's known to cause issues to some models, so False by default. +optimize_ddp_lazy_compile = False + +# Whether to skip guarding on FSDP-managed modules +skip_fsdp_guards = True + +# Make dynamo skip guarding on hooks on nn modules +# Note: unsafe: if your model actually has hooks and you remove them, or doesn't and you add them, +# dynamo will not notice and will execute whichever version you first compiled. +skip_nnmodule_hook_guards = True + +# If True, raises exception if TorchDynamo is called with a context manager +raise_on_ctx_manager_usage = True + +# If True, raise when aot autograd is unsafe to use +raise_on_unsafe_aot_autograd = False + +# If true, error if you torch.jit.trace over a dynamo-optimized function. +# If false, silently suppress dynamo +error_on_nested_jit_trace = True + +# If true, error with a better message if we symbolically trace over a +# dynamo-optimized function. If false, silently suppress dynamo. +error_on_nested_fx_trace = True + +# Disables graph breaking on rnn. YMMV with backends. +allow_rnn = False + +# If true, error if we try to compile a function that has +# been seen before. +# [@compile_ignored: runtime_behaviour] +error_on_recompile = False + +# [@compile_ignored: debug] Whether to report any guard failures (deprecated: does not do anything) +report_guard_failures = True + +# [@compile_ignored: debug] root folder of the project +base_dir = dirname(dirname(dirname(abspath(__file__)))) + +# Trace through NumPy or graphbreak +trace_numpy = True + +# Trace through torch.distributed code +trace_distributed = False + +# Default NumPy dtypes when tracing with torch.compile +# We default to 64bits. For efficiency, one may want to change these to float32 +numpy_default_float = "float64" +numpy_default_complex = "complex128" +numpy_default_int = "int64" + +# use numpy's PRNG if True, pytorch otherwise +use_numpy_random_stream = False + + +def is_fbcode(): + return not hasattr(torch.version, "git_version") + + +def default_debug_dir_root(): + # [@compile_ignored: debug] + DEBUG_DIR_VAR_NAME = "TORCH_COMPILE_DEBUG_DIR" + if DEBUG_DIR_VAR_NAME in os.environ: + return os.path.join(os.environ[DEBUG_DIR_VAR_NAME], "torch_compile_debug") + elif is_fbcode(): + return os.path.join( + tempfile.gettempdir(), getpass.getuser(), "torch_compile_debug" + ) + else: + return os.path.join(os.getcwd(), "torch_compile_debug") + + +# [@compile_ignored: debug] +debug_dir_root = default_debug_dir_root() + +# [@compile_ignored: debug] +_save_config_ignore = { + "repro_after", + "repro_level", + # workaround: "cannot pickle PyCapsule" + "constant_functions", + # workaround: "cannot pickle module" + "skipfiles_inline_module_allowlist", +} + +# for backend="cudagraphs", mutations on input be sent to the cudagraph backend +# or replayed in aot_autograd epilogue. default is False because mutation on inputs +# can prevent cudagraphing. +cudagraph_backend_keep_input_mutation = False + +# When True, only ops that have the torch.Tag.pt2_compliant tag +# will be allowed into the graph; all other ops will be disallowed +# and will fall back to eager-mode PyTorch. Useful to ensure +# correctness of custom ops. +only_allow_pt2_compliant_ops = False + +capture_autograd_function = True + +# enable/disable dynamo tracing for `torch.func` transforms +capture_func_transforms = False + +# enable/disable user-defined triton kernel optimizations +optimize_user_defined_triton_kernels = True + +# If to log Dynamo compilation metrics into log files (for OSS) and Scuba tables (for fbcode). +log_compilation_metrics = True + +# A set of logging functions which will be reordered to the end of graph breaks, +# allowing dynamo to construct larget graph. Note that there are some +# limitations to this, such as how it does not correctly print objects that were +# mutated after the print statement. +reorderable_logging_functions: Set[Callable[[Any], None]] = set() + +# simulates what would happen if we didn't have support for BUILD_SET opcode, +# used for testing +inject_BUILD_SET_unimplemented_TESTING_ONLY = False + +_autograd_backward_strict_mode_banned_ops = [ + "stride", + "requires_grad", + "storage_offset", + "layout", + "data", +] + +_autograd_backward_strict_mode_banned_ops.extend( + [name for name, _ in inspect.getmembers(torch.Tensor) if re.match(r"^is_.*", name)] +) + +# Enables caching of dispatches to fake tensors. +fake_tensor_cache_enabled = ( + os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE", "1") == "1" +) + +# Enables cross checking between the fake tensor cache and dispatch. +fake_tensor_cache_crosscheck_enabled = ( + os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE_CROSSCHECK", "0") == "1" +) + +# support `context_fn` in torch.utils.checkpoint.checkpoint API under torch.compile(). +# WARNING: this is an experimental flag and is subject to change. +_experimental_support_context_fn_in_torch_utils_checkpoint = False + +if TYPE_CHECKING: + from torch.utils._config_typing import * # noqa: F401, F403 + + def _make_closure_patcher(**changes): + ... + + +from torch.utils._config_module import install_config_module + +install_config_module(sys.modules[__name__]) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py b/venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..15456e30858112dcf1e23577c15bf96755628d88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py @@ -0,0 +1,924 @@ +import collections +import dis +import functools +import itertools +import logging +import os +import random +import sys +import threading +import time +import traceback +import types +import typing +import weakref +from typing import Any, Callable, Dict, List, Optional, Set + +from torch.fx._lazy_graph_module import ( # type: ignore[attr-defined] + _use_lazy_graph_module, +) + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +import torch._logging +from torch._guards import compile_context, CompileContext, CompileId, tracing +from torch._logging import structured +from torch._utils_internal import signpost_event +from torch.fx.experimental.symbolic_shapes import ( + ConstraintViolationError, + GuardOnDataDependentSymNode, +) +from torch.fx.graph_module import _forward_from_src as original_forward_from_src +from torch.nn.parallel.distributed import DistributedDataParallel +from torch.utils._python_dispatch import _disable_current_modes +from torch.utils._traceback import format_traceback_short + +from . import config, exc, trace_rules +from .backends.registry import CompilerFn +from .bytecode_analysis import remove_dead_code, remove_pointless_jumps +from .bytecode_transformation import ( + check_inst_exn_tab_entries_valid, + Instruction, + is_generator, + propagate_inst_exn_table_entries, + transform_code_object, +) +from .cache_size import ( + CacheSizeRelevantForFrame, + compute_cache_size, + exceeds_cache_size_limit, + is_recompilation, +) +from .eval_frame import always_optimize_code_objects, skip_code, TorchPatcher +from .exc import ( + augment_exc_message, + BackendCompilerFailed, + format_error_msg, + InternalTorchDynamoError, + TorchRuntimeError, + UncapturedHigherOrderOpError, + unimplemented, + Unsupported, +) +from .guards import ( + CheckFunctionManager, + get_and_maybe_log_recompilation_reason, + GuardedCode, +) +from .hooks import Hooks +from .output_graph import OutputGraph +from .replay_record import ExecutionRecord +from .symbolic_convert import InstructionTranslator, SpeculationLog +from .trace_rules import is_numpy +from .types import BytecodeHook +from .utils import ( + CleanupManager, + CompilationMetrics, + counters, + dynamo_timed, + format_bytecode, + frame_phase_timing, + gen_record_file_name, + increment_frame, + is_namedtuple, + istype, + LazyString, + maybe_cprofile, + orig_code_map, + record_compilation_metrics, + reset_graph_break_dup_checker, + setup_compile_debug, + troubleshooting_url, + write_record_to_file, +) + +log = logging.getLogger(__name__) +bytecode_log = torch._logging.getArtifactLogger(__name__, "bytecode") +GlobalStateGuard = torch._C._dynamo.guards.GlobalStateGuard + +compile_lock = threading.RLock() + + +class Tracker: + def __init__(self): + self.seen = [] + self.seen_ids = set() + + def add(self, strong_obj): + idx = id(strong_obj) + if idx not in self.seen_ids: + obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx)) + self.seen.append(obj) + self.seen_ids.add(idx) + + def __contains__(self, item): + return id(item) in self.seen_ids + + def clear(self): + self.seen.clear() + self.seen_ids.clear() + + +input_codes = Tracker() +output_codes = Tracker() + +initial_global_state: Optional[GlobalStateGuard] = None + + +@functools.wraps(original_forward_from_src) +def fx_forward_from_src_skip_result(*args, **kwargs): + # we monkey patch FX to prevent infinite loop of trying to convert + # our generated code + result: types.FunctionType = original_forward_from_src(*args, **kwargs) + skip_code(result.__code__) + return result + + +def preserve_global_state(fn): + """ + Context manager to: + 1) Save/restore torch.is_grad_enabled() state + 2) Save/restore python random state + 3) Save/restore torch random state + 4) Monkey patch torch.fx.graph_module._forward_from_src + """ + + @functools.wraps(fn) + def _fn(*args, **kwargs): + guards = GlobalStateGuard() + prior_grad_mode = torch.is_grad_enabled() + prior_inference_mode = torch.is_inference_mode_enabled() + prior_deterministic = torch.are_deterministic_algorithms_enabled() + prior_warn_only = torch.is_deterministic_algorithms_warn_only_enabled() + py_rng_state = random.getstate() + torch_rng_state = torch.random.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + prior_fwd_from_src = torch.fx.graph_module._forward_from_src + torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result + cleanup = setup_compile_debug() + try: + return fn(*args, **kwargs) + finally: + cleanup.close() + torch._C._set_grad_enabled(prior_grad_mode) + torch.torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode) + torch.use_deterministic_algorithms( + prior_deterministic, warn_only=prior_warn_only + ) + random.setstate(py_rng_state) + torch.random.set_rng_state(torch_rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + torch.fx.graph_module._forward_from_src = prior_fwd_from_src + assert ( + guards.check() + ), "Global state changed while dynamo tracing, please report a bug" + + _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined] + return _fn + + +@TorchPatcher.suppress_torch_distributed_warnings +def has_tensor_in_frame(frame): + """Check if the frame has torch.* related bits""" + # Check if the function was decorated using torch._dynamo.optimize + if frame.f_code in always_optimize_code_objects: + return True + + # Check if there is global import of torch.* + for co_name in frame.f_code.co_names: + if co_name in frame.f_globals: + obj = frame.f_globals[co_name] + if isinstance(obj, types.ModuleType) and ( + obj.__name__.startswith("torch.") or obj is torch + ): + return True + # ... or a global import of numpy.* + if np and config.trace_numpy and (obj is np or is_numpy(obj)): + return True + + seen_ids: Dict[int, bool] = dict() + + def has_tensor(obj): + """Recursively check if the obj has a tensor""" + obj_id = id(obj) + if obj_id in seen_ids: + return seen_ids[obj_id] + seen_ids[obj_id] = False + + if isinstance(obj, (torch.Tensor, torch.nn.Module)) or ( + istype(obj, type) and issubclass(obj, torch.nn.Module) + ): + seen_ids[obj_id] = True + return seen_ids[obj_id] + elif ( + config.trace_numpy + and np + and (istype(obj, np.ndarray) or isinstance(obj, np.generic)) + ): + seen_ids[obj_id] = True + return seen_ids[obj_id] + elif istype(obj, (list, tuple)): + seen_ids[obj_id] = any(has_tensor(v) for v in obj) + return seen_ids[obj_id] + elif istype(obj, dict): + # Some packages like pytest can be updated during runtime. So, make a + # copy of values to avoid issues like "RuntimeError: dictionary + # changed size during iteration" + values = list(obj.values()) + seen_ids[obj_id] = any(has_tensor(v) for v in values) + return seen_ids[obj_id] + elif istype(obj, (str, int, float, type(None), bool)): + seen_ids[obj_id] = False + return seen_ids[obj_id] + elif is_namedtuple(obj) and hasattr(obj, "_fields"): + seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields) + return seen_ids[obj_id] + else: + # if config.debug: + # print( + # f"Assuming that object of type {type(obj)} does not have a tensor" + # ) + return False + + # Check if the passed arguments are of type Tensor + for value in frame.f_locals.values(): + if has_tensor(value): + return True + + log.debug( + "skipping because no torch.* %s \ + %s %s", + frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_code.co_firstlineno, + ) + + return False + + +def exception_handler(e, code, frame=None, export=False): + record_filename = None + if hasattr(e, "exec_record"): + record_filename = gen_record_file_name(e, code) + write_record_to_file(record_filename, e.exec_record) + e.record_filename = record_filename + + augment_exc_message(e, export=export) + + +FRAME_COUNTER = 0 +FRAME_COMPILE_COUNTER: typing.Counter[int] = collections.Counter() + + +def convert_frame_assert( + compiler_fn: CompilerFn, + one_graph: bool = True, + export: bool = False, + export_constraints=None, +): + """Fully convert a frame into an FX graph""" + reset_graph_break_dup_checker() + + def _convert_frame_assert( + frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, *, skip: int = 0 + ): + increment_frame() + + code = frame.f_code + + cache_size = compute_cache_size(frame, cache_entry) + recompile_reasons = None + if is_recompilation(cache_size): + recompile_reasons = get_and_maybe_log_recompilation_reason( + cache_entry, frame + ) + + input_codes.add(code) + if code in output_codes: + return None + if ( + os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") + and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name + ): + return None + if code.co_name == "" and code.co_filename.endswith( + ( + "transformers/file_utils.py", + "transformers/utils/generic.py", + "diffusers/utils/outputs.py", + ) + ): + # not needed, but cleans up torchbench error stats + return None + if code.co_name == "__setattr__": + # setattr could be tricky to handle generally, + # but also not likely useful to compile- skip the whole frame + return None + if code.co_name == "__init__" and code.co_filename.startswith( + os.path.dirname(torch.optim.__file__) + ): + # optimizer support is still incomplete see + # test_state_dict in test/dynamo/test_optimizers.py + return None + + # Check if the frame is generated by an exec builtin call + # TODO - Running exec generated frame seems propagates f_globals to the + # next frames. + if code.co_name == "" and code.co_filename == "": + return None + + if ( + code.co_name == "" + and code.co_filename == "" + and not bool(frame.f_builtins) + ): + # namedtuple subclass constructor. Empty builtins cause issue with + # len keyword in LIST_LEN guard. + return None + + if is_generator(code): + unimplemented("generator") + exceeded, limit_type = exceeds_cache_size_limit(cache_size) + if exceeded: + + def format_func_info(code): + return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})" + + def format_guard_failures(): + assert recompile_reasons, "TODO(whc) any other recompile reasons?" + return recompile_reasons[-1] + + log.warning( + "torch._dynamo hit config.%s (%s)\n" + " function: %s\n" + " last reason: %s\n" + 'To log all recompilation reasons, use TORCH_LOGS="recompiles".\n' + "To diagnose recompilation issues, see %s.", + limit_type, + getattr(config, limit_type), + format_func_info(code), + format_guard_failures(), + troubleshooting_url, + ) + unimplemented(f"{limit_type} reached") + + if not has_tensor_in_frame(frame): + return None + + global initial_global_state + initial_global_state = GlobalStateGuard() + + global FRAME_COUNTER + if "_id" not in frame_state: + frame_state["_id"] = FRAME_COUNTER + FRAME_COUNTER += 1 + frame_id = frame_state["_id"] + + frame_compile_id = FRAME_COMPILE_COUNTER[frame_id] + FRAME_COMPILE_COUNTER[frame_id] += 1 + + compile_id = CompileId(frame_id, frame_compile_id) + + signpost_event( + "dynamo", + "_convert_frame_assert._compile", + { + "co_name": code.co_name, + "co_filename": code.co_filename, + "co_firstlineno": code.co_firstlineno, + "cache_size": cache_size.num_cache_entries_with_same_id_matched_objs, + "accumulated_cache_size": cache_size.num_cache_entries, + }, + ) + + return _compile( + frame.f_code, + frame.f_globals, + frame.f_locals, + frame.f_builtins, + compiler_fn, + one_graph, + export, + export_constraints, + hooks, + cache_size, + frame, + frame_state=frame_state, + compile_id=compile_id, + skip=skip + 1, + ) + + _convert_frame_assert._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined] + + def _clone_with_backend(backend): + return convert_frame_assert(backend, one_graph, export, export_constraints) + + _convert_frame_assert._clone_with_backend = _clone_with_backend # type: ignore[attr-defined] + return _convert_frame_assert + + +from collections import OrderedDict + +from torch.utils.hooks import RemovableHandle + +# we have to use `OrderedDict` to make `RemovableHandle` work. +_bytecode_hooks: Dict[int, BytecodeHook] = OrderedDict() + + +def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle: + """Register hooks for bytecode generated by Dynamo. The hook can do some + logging, as well as return a new code object to be used. Please refer + to `BytecodeHook` for the hook signature. + """ + handle = RemovableHandle(_bytecode_hooks) + _bytecode_hooks[handle.id] = hook + return handle + + +@_use_lazy_graph_module(config.use_lazy_graph_module) +@maybe_cprofile +def _compile( + code: types.CodeType, + globals: Dict[str, object], + locals: Dict[str, object], + builtins: Dict[str, object], + compiler_fn: CompilerFn, + one_graph: bool, + export: bool, + export_constraints, + hooks: Hooks, + cache_size: CacheSizeRelevantForFrame, + frame: Optional[types.FrameType] = None, + frame_state=None, + compile_id=None, + *, + skip: int = 0, +) -> Optional[GuardedCode]: + from torch.fx.experimental.validator import ( + bisect, + BisectValidationException, + translation_validation_enabled, + ValidationException, + ) + + output: Optional[OutputGraph] = None + tracer: Optional[InstructionTranslator] = None + # This is shared across restarts + mutated_closure_cell_contents: Set[str] = set() + speculation_log = SpeculationLog() + torch._dynamo.callback_handler.run_start_callbacks() + + @preserve_global_state + def transform(instructions, code_options): + nonlocal output + nonlocal tracer + speculation_log.restart() + tracer = InstructionTranslator( + instructions, + code, + locals, + globals, + builtins, + code_options, + compiler_fn, + one_graph, + export, + export_constraints, + mutated_closure_cell_contents, + frame_state=frame_state, + speculation_log=speculation_log, + ) + + try: + with tracing(tracer.output.tracing_context), tracer.set_current_tx(): + tracer.run() + except exc.UnspecializeRestartAnalysis: + speculation_log.clear() + raise + except (exc.SpeculationRestartAnalysis, exc.SkipFrame): + raise + except Exception: + if translation_validation_enabled(): + bisect(tracer.output.shape_env) + raise + finally: + tracer.output.call_cleanup_hooks() + + output = tracer.output + assert output is not None + assert output.output_instructions + instructions[:] = output.output_instructions + code_options.update(output.code_options) + + if config.dead_code_elimination: + propagate_inst_exn_table_entries(instructions) + check_inst_exn_tab_entries_valid(instructions) + instructions[:] = remove_pointless_jumps(remove_dead_code(instructions)) + + @dynamo_timed(phase_name="entire_frame_compile") + def compile_inner( + code: types.CodeType, + one_graph: bool, + hooks: Hooks, + transform: Callable[[List[Instruction], Dict[str, Any]], Any], + ) -> Optional[GuardedCode]: + nonlocal output + for attempt in itertools.count(): + CompileContext.get().attempt = attempt + try: + out_code = transform_code_object(code, transform) + break + except exc.RestartAnalysis as e: + log.info( + "Restarting analysis due to %s", + LazyString(format_traceback_short, e.__traceback__), + ) + if attempt > 100: + unimplemented("100+ RestartAnalysis() calls") + except exc.SkipFrame as e: + log.debug( + "Skipping frame %s %s \ + %s %s", + e, + code.co_name, + code.co_filename, + code.co_firstlineno, + ) + if one_graph: + log.debug("No graph captured with one_graph=True") + return None + + def log_bytecode(prefix, name, filename, line_no, code): + if bytecode_log.isEnabledFor(logging.DEBUG): + bytecode_log.debug( + format_bytecode(prefix, name, filename, line_no, code) + ) + + log_bytecode( + "ORIGINAL BYTECODE", + code.co_name, + code.co_filename, + code.co_firstlineno, + code, + ) + log_bytecode( + "MODIFIED BYTECODE", + code.co_name, + code.co_filename, + code.co_firstlineno, + out_code, # type: ignore[possibly-undefined] + ) + + for hook in _bytecode_hooks.values(): + hook_output = hook(code, out_code) + if hook_output is not None: + out_code = hook_output + + orig_code_map[out_code] = code + output_codes.add(out_code) + + assert output is not None + + # Tests for new code objects. + # The rationale for these tests can be found in torch/csrc/dynamo/eval_frame.c + # Only test once the code object is created. + # They are not tested during runtime. + + def count_args(code): + import inspect + + return ( + code.co_argcount + + code.co_kwonlyargcount + + bool(code.co_flags & inspect.CO_VARARGS) + + bool(code.co_flags & inspect.CO_VARKEYWORDS) + ) + + total_argcount_old = count_args(code) + total_argcount_new = count_args(out_code) + msg = "arg mismatch: " + msg += f"old code object has args {code.co_varnames[:total_argcount_old]}, " + msg += f"new code object has args {out_code.co_varnames[:total_argcount_new]}" + assert ( + code.co_varnames[:total_argcount_old] + == out_code.co_varnames[:total_argcount_new] + ), msg + + msg = "free var mismatch: " + msg += f"old code object has free var {code.co_freevars}, " + msg += f"new code object has free var {out_code.co_freevars}" + assert code.co_freevars == out_code.co_freevars, msg + + msg = "cell var mismatch: " + msg += f"old code object has cell var {code.co_cellvars}, " + msg += f"new code object has cell var {out_code.co_cellvars}" + assert code.co_cellvars == out_code.co_cellvars, msg + + # Skipping Dynamo on a frame without any extracted graph. + # This does not affect eager functionality. But this is necessary + # for export for cases where Dynamo-reconstructed bytecode can create + # new function frames, confusing export in thinking that there + # are extra graphs now. + + if output.export and output.is_empty_graph(): + return None + + assert output.guards is not None + CleanupManager.instance[out_code] = output.cleanups + check_fn = CheckFunctionManager( + output, + hooks.guard_fail_fn if hooks else None, + ) + + guarded_code = GuardedCode(out_code, check_fn.check_fn) + + if not output.is_empty_graph() and hooks.guard_export_fn is not None: + # We should not run the guard_export_fn when Dynamo does not + # generate any graph. This can happen in export when TorchDynamo + # generated bytecode has some reconstruction logic for mutated + # variables which can trigger TorchDynamo on the children frames but + # they are benign and do not generate any new graphs. + hooks.guard_export_fn(output.guards) + + return guarded_code + + with compile_context(CompileContext(compile_id)): + log.debug( + "torchdynamo start compiling %s %s:%s, stack (elided %s frames):\n%s", + code.co_name, + code.co_filename, + code.co_firstlineno, + skip + 2, + # -2: omit current frame, omit contextlib decorator + "".join(traceback.format_list(traceback.extract_stack()[: -2 - skip])), + ) + # -4: -2 as above, plus trace_structured frames + torch._logging.trace_structured( + "dynamo_start", + lambda: { + "stack": structured.from_traceback( + traceback.extract_stack()[: -4 - skip] + ) + }, + ) + start_time = time.time() + fail_type: Optional[str] = None + fail_reason: Optional[str] = None + fail_user_frame_filename: Optional[str] = None + fail_user_frame_lineno: Optional[int] = None + try: + guarded_code = compile_inner(code, one_graph, hooks, transform) + return guarded_code + except ( + Unsupported, + TorchRuntimeError, + BackendCompilerFailed, + AssertionError, + ConstraintViolationError, + GuardOnDataDependentSymNode, + ValidationException, + UncapturedHigherOrderOpError, + BisectValidationException, + ) as e: + fail_type = str(type(e)) + fail_reason = str(e) + exception_handler(e, code, frame, export=export) + if e.innermost_user_frame_summary is not None: # type: ignore[union-attr] + fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[union-attr] + fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[union-attr] + raise + except Exception as e: + fail_type = str(type(e)) + fail_reason = str(e) + exception_handler(e, code, frame, export=export) + if e.innermost_user_frame_summary is not None: # type: ignore[attr-defined] + fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[attr-defined] + fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[attr-defined] + raise InternalTorchDynamoError(str(e)).with_traceback( + e.__traceback__ + ) from None + finally: + if tracer: + tracer.output.local_scope = {} + + from .utils import curr_frame + + frame_key = str(curr_frame) + if ( + fail_reason is None + and output is not None + and frame_key in frame_phase_timing + ): + guard_count = len(output.guards) + shape_env_guard_count = len(output.shape_env.guards) + graph_op_count = output.count_calls() + graph_node_count = len(output.graph.nodes) + graph_input_count = len(output.placeholders) + entire_frame_compile_time = frame_phase_timing[frame_key].get( + "entire_frame_compile", None + ) + backend_compile_time = frame_phase_timing[frame_key].get( + "backend_compile", None + ) + inductor_compile_time = frame_phase_timing[frame_key].get( + "inductor_compile", None + ) + code_gen_time = frame_phase_timing[frame_key].get("code_gen", None) + non_compliant_ops = {op.__qualname__ for op in output.non_compliant_ops} + compliant_custom_ops = { + op.__qualname__ for op in output.compliant_custom_ops + } + else: + guard_count = None + shape_env_guard_count = None + graph_op_count = None + graph_node_count = None + graph_input_count = None + entire_frame_compile_time = None + backend_compile_time = None + inductor_compile_time = None + code_gen_time = None + non_compliant_ops = set({}) + compliant_custom_ops = set({}) + metrics = CompilationMetrics( + frame_key, + code.co_name, + code.co_filename, + code.co_firstlineno, + cache_size.num_cache_entries_with_same_id_matched_objs, + cache_size.num_cache_entries, + guard_count, + shape_env_guard_count, + graph_op_count, + graph_node_count, + graph_input_count, + start_time, + entire_frame_compile_time, + backend_compile_time, + inductor_compile_time, + code_gen_time, + fail_type, + fail_reason, + fail_user_frame_filename, + fail_user_frame_lineno, + non_compliant_ops, + compliant_custom_ops, + ) + record_compilation_metrics(metrics) + torch._dynamo.callback_handler.run_end_callbacks() + + +def convert_frame(compiler_fn: CompilerFn, hooks: Hooks): + """Try to convert a frame into an FX graph, if error leave frame unmodified""" + inner_convert = convert_frame_assert(compiler_fn, one_graph=False) + + def _convert_frame( + frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, skip: int = 0 + ): + counters["frames"]["total"] += 1 + try: + result = inner_convert( + frame, cache_entry, hooks, frame_state, skip=skip + 1 + ) + counters["frames"]["ok"] += 1 + return result + except Exception as e: + # These two exception types are "soft" failure, in the sense that + # we know this is due to something we didn't implement all the + # way, scare the user less about it. That being said, if you + # are trying to understand why a graph break happened, it's still + # important to have this information, so offer it. + # + # NB: NotImplementedError used to be on this list, but actually + # it is impossible for it to reach here, as it is converted into + # InternalTorchDynamoError. This behavior seemed reasonable + # to me (ezyang, Aug 2023) so I kept it, but maybe at some point + # someone wanted these to also get suppressed. If so, you'll + # need to make these exceptions not get wrapped + + # We intentionally don't want to suppress error here. + if isinstance(e, UncapturedHigherOrderOpError): + raise + + soft_fail = isinstance(e, Unsupported) + if not config.suppress_errors and not soft_fail: + raise + + # Suppress the error. NB: It's very important to do the + # suppression logging HERE, where the actual suppression + # happens. Previously it was somewhere else and so it was + # possible to accidentally not log at all. + record_filename = getattr(e, "record_filename", None) + code = frame.f_code + error_msg = format_error_msg(e, code, record_filename, frame) + + if soft_fail: + log.info(error_msg, exc_info=True) + else: + log.warning(error_msg, exc_info=True) + return None + + _convert_frame._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined] + _convert_frame._clone_with_backend = lambda backend: convert_frame(backend, hooks) # type: ignore[attr-defined] + return _convert_frame + + +# TODO mlazos: add support for same args, or record them +def replay(filename): + from .backends.debugging import eager + + original_replay_val = config.replay_record_enabled + config.replay_record_enabled = False + with open(filename, "rb") as in_file: + record = ExecutionRecord.load(in_file) + record.globals = dict(itertools.chain(record.globals.items(), globals().items())) + + try: + _compile( + record.code, + record.globals, + record.locals, + record.builtins, + compiler_fn=eager, + one_graph=False, + export=False, + export_constraints=None, + hooks=Hooks(), + cache_size=CacheSizeRelevantForFrame(0, 0), + frame=None, + frame_state={}, + ) + finally: + config.replay_record_enabled = original_replay_val + + +def first_real_inst_idx(code): + if sys.version_info < (3, 11): + return 0 + for inst in dis.get_instructions(code): + if inst.opname == "RESUME": + return inst.offset // 2 + raise RuntimeError("RESUME instruction not found in code") + + +def catch_errors_wrapper(callback, hooks: Hooks): + @functools.wraps(callback) + def catch_errors(frame, cache_entry, frame_state): + assert frame_state is not None + + is_skipfile = trace_rules.check(frame.f_code) + if ( + # TODO: the first condition is not covered by any test + frame.f_lasti >= first_real_inst_idx(frame.f_code) + or is_skipfile + or config.disable + ): + if log.isEnabledFor(logging.DEBUG): + skip_reason = ( + "traced frame already" + if frame.f_lasti >= first_real_inst_idx(frame.f_code) + else "in skipfiles" + if trace_rules.check(frame.f_code) + else "dynamo tracing is disabled" + ) + if not is_skipfile or config.verbose: + log.debug( + "skipping: %s (reason: %s, file: %s)", + frame.f_code.co_name, + skip_reason, + frame.f_code.co_filename, + ) + return None + if frame.f_code.co_filename == "" and frame.f_code.co_name == "__new__": + # nametuple constructor + return None + if config._get_optimize_ddp_mode() == "ddp_optimizer": + ddp_module = DistributedDataParallel._get_active_ddp_module() + if ddp_module: + with compile_lock: + from torch._dynamo.backends.distributed import DDPOptimizer + + ddp_optimizer = DDPOptimizer( + bucket_bytes_cap=ddp_module.bucket_bytes_cap, + backend_compile_fn=callback._torchdynamo_orig_callable, + ) + assert hasattr( + callback, "_clone_with_backend" + ), "DDPOptimizer only supports callback fns that know how to clone themselves." + hijacked_callback = callback._clone_with_backend( + ddp_optimizer.compile_fn, + ) + return hijacked_callback(frame, cache_entry, hooks, frame_state) + + with compile_lock, _disable_current_modes(): + # skip=1: skip this frame + return callback(frame, cache_entry, hooks, frame_state, skip=1) + + catch_errors._torchdynamo_orig_callable = callback # type: ignore[attr-defined] + return catch_errors diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py b/venv/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py new file mode 100644 index 0000000000000000000000000000000000000000..1289bdcdffe47bce04e3aa0a663b5d8d5c443b10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py @@ -0,0 +1,23 @@ +import contextlib +import threading + +# Global variable to identify which SubgraphTracer we are in. +# It is sometimes difficult to find an InstructionTranslator to use. +_current_scope_id = threading.local() + + +def current_scope_id(): + global _current_scope_id + if not hasattr(_current_scope_id, "value"): + _current_scope_id.value = 1 + return _current_scope_id.value + + +@contextlib.contextmanager +def enter_new_scope(): + global _current_scope_id + try: + _current_scope_id.value = current_scope_id() + 1 + yield + finally: + _current_scope_id.value = current_scope_id() - 1 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py b/venv/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..56e099fbff444324b2c87bbcaaf8fd9f4aa8fb8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py @@ -0,0 +1,802 @@ +# mypy: disable-error-code="method-assign" + +import copy +import functools +import getpass +import inspect +import itertools +import logging +import os +import re +import subprocess +import tempfile +import textwrap +from collections import Counter +from importlib import import_module +from typing import Any, Callable, Dict, List, Optional, TypeVar + +import torch +import torch._prims_common as utils +import torch._subclasses.meta_utils +from torch import Tensor + +from torch._dynamo.testing import rand_strided +from torch._prims_common import is_float_dtype +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._content_store import ContentStoreReader, ContentStoreWriter + +from . import config +from .utils import clone_inputs, get_debug_dir + +log = logging.getLogger(__name__) + +T = TypeVar("T") + + +inductor_config = import_module("torch._inductor.config") +use_buck = inductor_config.is_fbcode() + +if use_buck: + import libfb.py.build_info + + +extra_deps = [] +extra_imports = "" +if use_buck: + extra_deps = [ + "//caffe2/torch/fb/sparsenn:sparsenn_operators_gpu", + "//caffe2/torch/fb/sparsenn:sparsenn_operators", + "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu", + "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops", + ] + cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined] + extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps]) + + +BUCK_CMD_PREFIX = ["buck2", "run", "@mode/dev-nosan"] + + +class BuckTargetWriter: + def __init__(self, filename): + self.subdir, self.py_file = os.path.split(os.path.abspath(filename)) + self.target = self.py_file.replace(".py", "") + + # Get main_module path from fbcode + self.path = f'{self.subdir.replace("/", ".")}.{self.target}' + self.path = self.path[self.path.find("fbcode.") :] + self.path = self.path[7:] + + # Get cmd line path + tmp = self.subdir + tmp = tmp[tmp.find("fbcode/") :][7:] + self.cmd_line_path = f"//{tmp}:{self.target}" + + def build(self): + extra_cpp_deps = "\n".join([f' "{x}",' for x in extra_deps]) + return textwrap.dedent( + f""" +load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") + +python_binary( + name="{self.target}", + srcs = ["{self.py_file}"], + compile = False, + deps = [ + "//caffe2:torch", + "//caffe2/functorch:functorch", + "//triton:triton", + "{cur_target}", + ], + cpp_deps = [ +{extra_cpp_deps} + ], + main_module = "{self.path}", + par_style = "xar", +) +""" + ) + + def write(self, print_msg=True): + target_file = os.path.join(self.subdir, "TARGETS") + with open(target_file, "w") as fd: + fd.write(self.build()) + # log.warning("Wrote isolation TARGETS file at %s", target_file) + cmd_split = BUCK_CMD_PREFIX + [self.cmd_line_path] + if print_msg: + log.warning( + "Found an example that reproduces the error. Run this cmd to repro - %s", + " ".join(cmd_split), + ) + return cmd_split + + +def minifier_dir(): + path = os.path.join(get_debug_dir(), "minifier") + if path is None: + path = f"{tempfile.gettempdir()}/minifier_{getpass.getuser()}" + if not os.path.exists(path): + os.makedirs(path, exist_ok=True) + return path + + +MAX_CONSTANT_NUMEL_INLINE = 4 + + +class NNModuleToString: + safe_reprs = [ + torch.nn.Linear, + torch.nn.Conv1d, + torch.nn.Conv2d, + torch.nn.Conv3d, + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.nn.LayerNorm, + torch.nn.Dropout, + torch.nn.Softmax, + torch.nn.ReLU, + torch.nn.GELU, + torch.nn.Identity, + torch.nn.MaxPool2d, + torch.nn.Embedding, + torch.nn.Tanh, + torch.nn.ConvTranspose1d, + torch.nn.GLU, + torch.nn.LSTM, + torch.nn.Flatten, + torch.nn.AdaptiveAvgPool2d, + ] + + @staticmethod + def can_convert_to_string(gm): + cant_convert = set() + for _, module in gm.named_children(): + if type(module) not in NNModuleToString.safe_reprs: + cant_convert.add(module) + + if len(cant_convert) > 0: + log.warning("We have not tested reprs of some modules - %s", cant_convert) + # TODO - Assuming that all modules can be safely repr'd. Check if that assumption is correct. + return True + + @staticmethod + def convert(gm): + from torch.nn.modules.module import _addindent + + tab = " " * 4 + + model_str = textwrap.dedent( + """ + from torch.nn import * + class Repro(torch.nn.Module): + def __init__(self): + super().__init__() + """ + ) + + for module_name, module in gm.named_children(): + module_str = f"{module.__repr__()}" + # module should be a core torch.nn.Module, so all parameters + # should be on the same device. + example_param = next(module.parameters(), None) + if example_param is not None and example_param.is_cuda: + module_str = f"{module_str}.cuda()" + model_str += f"{tab*2}self.{module_name} = {module_str}\n" + + for buffer_name, buffer in gm._buffers.items(): + if buffer is None: + continue + # Serialize full data for small buffers + if buffer.numel() <= MAX_CONSTANT_NUMEL_INLINE: + from torch._tensor_str import PRINT_OPTS + + assert PRINT_OPTS.threshold >= MAX_CONSTANT_NUMEL_INLINE + tensor_str = repr(buffer) + elif torch.is_floating_point(buffer): + tensor_str = f"torch.randn({list(buffer.shape)}, dtype={buffer.dtype})" + else: + tensor_str = ( + f"torch.randint(1, size={list(buffer.shape)}, dtype={buffer.dtype})" + ) + if buffer.is_cuda: + tensor_str = f"{tensor_str}.cuda()" + model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n" + + for param_name, param in gm._parameters.items(): + if param is None: + continue + maybe_device = "" + if param.is_cuda: + maybe_device = ', device="cuda"' + tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))" + model_str += f"{tab*2}self.{param_name} = {tensor_str}\n" + + # TODO - Keep this code for now. But, I don't think we will need this. + # attrs = dir(gm) + # for attr in attrs: + # if "_tensor_constant" in attr: + # val = getattr(gm, attr) + # model_str += f" {attr} = {val!r}\n" + + model_str += f"{_addindent(gm.code, 4)}\n" + return model_str + + +@functools.lru_cache(None) # subprocess is expensive +def _cuda_system_info_comment(): + if not torch.cuda.is_available(): + return "# torch.cuda.is_available()==False, no GPU info collected\n" + + model_str = "# CUDA Info: \n" + try: + cuda_version_out = subprocess.check_output(["nvcc", "--version"]) + cuda_version_lines = cuda_version_out.decode().split("\n") + comment = "".join([f"# {s} \n" for s in cuda_version_lines if s not in [""]]) + model_str += f"{comment}\n" + except (FileNotFoundError, subprocess.CalledProcessError): + model_str += "# nvcc not found\n" + + gpu_names = Counter( + torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count()) + ) + + model_str += "# GPU Hardware Info: \n" + for name, count in gpu_names.items(): + model_str += f"# {name} : {count} \n" + model_str += "\n" + return model_str + + +def generate_config_string(*, stable_output=False): + import torch._functorch.config + import torch._inductor.config + + if stable_output: + return "# config omitted due to stable_output=True" + + experimental_config = torch.fx.experimental._config.codegen_config() # type: ignore[attr-defined] + return f"""\ +import torch._dynamo.config +import torch._inductor.config +import torch._functorch.config +import torch.fx.experimental._config +{torch._dynamo.config.codegen_config()} +{torch._inductor.config.codegen_config()} +{torch._functorch.config.codegen_config()} +{experimental_config} +""" + + +def get_minifier_repro_path(): + return os.path.join(minifier_dir(), "minifier_launcher.py") + + +def helper_for_dump_minify(contents): + minified_repro_path = get_minifier_repro_path() + log.warning("Writing minified repro to:\n%s", minified_repro_path) + + if use_buck: + BuckTargetWriter(minified_repro_path).write() + try: + with open(minified_repro_path, "w") as fd: + fd.write(contents) + + except OSError as e: + log.exception(e) + raise NotImplementedError("Could not write to {minified_repro_path}") from e + + +class AccuracyError(Exception): + pass + + +def clone_inputs_retaining_gradness(example_inputs): + """ + This clone inputs is different from utils clone_input. In case of minifier, + all the tensors are leaf tensors while creating a new graph. So, we set the + requires_grad field w/o checking the leafness of the tensor. + """ + cloned_inputs = clone_inputs(example_inputs) + for idx in range(len(example_inputs)): + if isinstance(cloned_inputs[idx], torch.Tensor): + cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad) + return cloned_inputs + + +def run_fwd_maybe_bwd(gm, args, only_fwd=False, disable_clone=False): + """ + Runs a forward and possibly backward iteration for a given mod and args. + + When disable_clone is True, we will use args as-is without cloning. + This is higher fidelity but we may destroy the args in the process. + """ + from torch._functorch.aot_autograd import make_boxed_func + + from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass + + gm = copy.deepcopy(gm) + if not disable_clone: + args = clone_inputs_retaining_gradness(args) + + if hasattr(gm, "zero_grad"): + gm.zero_grad(True) + + # TorchInductor returned callable expects lists. So, boxing the call. + orig_named_parameters = getattr(gm, "named_parameters", None) + orig_named_buffers = getattr(gm, "named_buffers", None) + if not hasattr(gm, "_boxed_call") and ( + orig_named_parameters is not None or orig_named_buffers is not None + ): + gm = make_boxed_func(gm) + if orig_named_parameters is not None: + gm.named_parameters = orig_named_parameters + if orig_named_buffers is not None: + gm.named_buffers = orig_named_buffers + + out = gm(args) + if only_fwd: + return out + if requires_bwd_pass(out): + loss = reduce_to_scalar_loss(out) + loss.backward() + return collect_results(gm, out, None, args) + + +def same_two_models( + gm, + opt_gm, + example_inputs, + only_fwd=False, + *, + require_fp64=False, + ignore_non_fp=False, +): + """ + Check two models have same accuracy. + + require_fp64: if True, raise an error if we unable to calculate the fp64 reference + ignore_non_fp: if True, do not compare outputs which are not floating point. This + is mostly useful for the minifier (which wants to avoid quantizing floating point + error into integer/boolean error) + """ + from .eval_frame import OptimizedModule + from .testing import ( + named_buffers_for_optimized_module, + named_parameters_for_optimized_module, + ) + from .utils import same + + if isinstance(gm, OptimizedModule): + gm.named_parameters = named_parameters_for_optimized_module(gm) + gm.named_buffers = named_buffers_for_optimized_module(gm) + + if isinstance(opt_gm, OptimizedModule): + opt_gm.named_parameters = named_parameters_for_optimized_module(opt_gm) + opt_gm.named_buffers = named_buffers_for_optimized_module(opt_gm) + + ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd) + + fp64_ref = None + if config.same_two_models_use_fp64: + try: + fp64_model, fp64_examples = cast_to_fp64( + copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs) + ) + fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd) + except Exception: + if require_fp64: + raise RuntimeError("Could not generate fp64 outputs") # noqa: TRY200 + log.warning("Could not generate fp64 outputs") + + try: + res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd) + except Exception as e: + # This means that the minified graph is bad/exposes a different problem. + # As we are checking accuracy here, lets log the exception and return True. + log.exception( + "While minifying the program in accuracy minification mode, " + "ran into a runtime exception which is likely an unrelated issue." + " Skipping this graph." + ) + return True + + passing = same( + ref, + res, + fp64_ref, + tol=config.repro_tolerance, + equal_nan=True, + ignore_non_fp=ignore_non_fp, + ) + return passing + + +def cast_dtype_args_to_fp64(model): + for node in model.graph.nodes: + if ( + node.op == "call_function" + and node.target == torch.ops.prims.convert_element_type.default + ): + assert len(node.args) == 2 + if is_float_dtype(node.args[1]) and node.args[1] != torch.float64: + node.args = (node.args[0], torch.float64) + if node.op == "call_function": + dtype = node.kwargs.get("dtype") + if dtype is not None and is_float_dtype(dtype): + new_kwargs = dict(node.kwargs) + new_kwargs["dtype"] = torch.float64 + node.kwargs = new_kwargs + + model.graph.lint() + model.recompile() + return model + + +def cast_to(dtype, model, inputs): + from torch.utils._pytree import tree_map + + model = model.to(dtype) + if dtype == torch.float64: + # If casting to fp64 for accuracy comparison, we need to + # replace dtype arguments embedded in the graph with fp64 + model = cast_dtype_args_to_fp64(model) + + inputs = tree_map( + lambda x: x.to(dtype) + if isinstance(x, torch.Tensor) and x.is_floating_point() + else x, + inputs, + ) + return model, inputs + + +def cast_to_fp64(model, inputs): + return cast_to(torch.float64, model, inputs) + + +def backend_accuracy_fails( + gm, + example_inputs, + compiler_fn, + only_fwd=False, + *, + require_fp64=False, + ignore_non_fp=False, +): + try: + compiled_gm = compiler_fn( + copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs) + ) + return not same_two_models( + gm, + compiled_gm, + example_inputs, + only_fwd, + require_fp64=require_fp64, + ignore_non_fp=ignore_non_fp, + ) + except Exception as e: + # This means that the minified graph is bad/exposes a different problem. + # As we are checking accuracy here, lets log the exception and return False. + log.exception( + "While minifying the program in accuracy minification mode, " + "ran into a runtime exception which is likely an unrelated issue." + " Skipping this graph" + ) + return False + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# REPRO SUPPORT CODE +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +# Helper functions for computing what the default values of tensor +# values should be. These all coincide with factory functions, e.g., torch.empty + + +def _stride_or_default( + stride: Optional["torch._prims_common.StrideType"], + *, + shape: "torch._prims_common.ShapeType", +) -> "torch._prims_common.StrideType": + return stride if stride is not None else utils.make_contiguous_strides_for(shape) + + +def _mk_defaulter(d: T) -> Callable[[Optional[T]], T]: + return lambda x: x if x is not None else d + + +_dtype_or_default = _mk_defaulter(torch.float32) +_device_or_default = _mk_defaulter(torch.device("cpu")) +_storage_offset_or_default = _mk_defaulter(0) +_requires_grad_or_default = _mk_defaulter(False) +_is_leaf_or_default = _mk_defaulter(False) + + +class NopInputReader: + def __init__(self): + self.total = 0 + + def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None): + self.total += 1 + + def tensor(self, *args, **kwargs): + pass + + def symint(self, *args, **kwargs): + pass + + +# TODO: Support bundling the entire repro into a zip file for ease of +# transferring around +class InputReader: + def __init__(self, save_dir=None, *, pbar=None): + # If None, we will generate random data instead. It's important + # to natively support this use case as it will allow people to + # share repros without including the real data, if the problem + # reproduces even on random data. + if save_dir is None: + log.warning("no save_dir specified, will generate random data") + self.store = ContentStoreReader(save_dir) if save_dir is not None else None + self.args = [] + self.pbar = pbar + + def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None): + if self.pbar is not None: + self.pbar.update(1) + device = _device_or_default(device) + dtype_hint = _dtype_or_default(dtype_hint) + if self.store is not None and storage_hash is not None: + try: + storage = self.store.read_storage(storage_hash) + except FileNotFoundError: + pass + else: + if device != storage.device: + log.warning("device mismatch: %s != %s", device, storage.device) + # TODO: transfer it to the right device? But failing this + # way would be very mysterious! Would have been better + # not to store device in the serialized format... + return storage + log.warning("could not load %s, generating random data instead", storage_hash) + shape = (nbytes // dtype_hint.itemsize,) + stride = _stride_or_default(None, shape=shape) + return rand_strided(shape, stride, dtype_hint, device).untyped_storage() + + def tensor( + self, + storage, + shape, + stride=None, + *, + storage_offset=None, + dtype=None, + requires_grad=None, + is_leaf=None, + **metadata, + ): + stride = _stride_or_default(stride, shape=shape) + storage_offset = _storage_offset_or_default(storage_offset) + dtype = _dtype_or_default(dtype) + is_leaf = _is_leaf_or_default(is_leaf) + requires_grad = _requires_grad_or_default(requires_grad) + t = torch.tensor( + [], dtype=dtype, device=storage.device, requires_grad=requires_grad + ) + with torch.no_grad(): + t.set_(storage, storage_offset, shape, stride) + if not is_leaf: + # Fake up some autograd history in a very naughty way + with torch.enable_grad(): + t = t.clone(memory_format=torch.preserve_format) + with torch.no_grad(): + t.set_(storage, storage_offset, shape, stride) + assert torch._subclasses.meta_utils.safe_is_leaf(t) == is_leaf + torch._utils.set_tensor_metadata(t, metadata) + self.args.append(t) + return t # for BC + + def symint(self, val): + self.args.append(val) + return val # for BC + + +# Here is our writer strategy: +# 1. We will stream all of the inputs to disk +# 2. You can now deterministically randomize the inputs, or reload +# the inputs from disk +# 3. You can YOLO run the script without the inputs, in which case +# we'll fill the inputs with random data and pray. This is the +# legacy behavior, but it's also useful if you want to find out +# if we're so broken even random inputs trigger it +# 4. We could offer an in process "check if the randomized thing +# works too" but this is delicate so we don't do it + + +class InputWriter: + def __init__(self, save_dir, *, stable_hash=False): + self._lines = [] + # TODO: consider ensuring tensor and storage counters line up? + self.storage_counter = itertools.count() + self.save_dir = save_dir + self.store = ( + ContentStoreWriter(save_dir, stable_hash=stable_hash) + if save_dir is not None + else None + ) + self.seen_storages = {} + + def lines(self): + r = [ + "def load_args(reader):", + ] + r.extend(f" {l}" for l in self._lines) + # In case we need to change the internal format of load_args + # in an FC-breaking way + r.append("load_args._version = 0") + return r + + # Storages are untyped, but we need to initialize them with data if + # we don't have the real data, so we give a hint saying what kind + # of initialization may be appropriate + # + # If we had a FakeTensor, device_hint tells us what device should be + def storage(self, untyped_storage, *, dtype_hint=None, device_hint=None) -> str: + ws = StorageWeakRef(untyped_storage) + v = self.seen_storages.get(ws) + if v is not None: + return v + v = f"buf{next(self.storage_counter)}" + maybe_dtype_hint = "" + if _dtype_or_default(None) != _dtype_or_default(dtype_hint): + maybe_dtype_hint = f", dtype_hint={dtype_hint!r}" + # TODO: being optional on device is kind of pointless as the default + # is CPU but most repros we care about are CUDA + maybe_device = "" + device = untyped_storage.device + if device.type == "meta": + assert device_hint is not None + device = device_hint + if _device_or_default(None) != device: + maybe_device = f", device={device!r}" + nbytes = untyped_storage.nbytes() + storage_hash = None + if self.store is not None and untyped_storage.device.type != "meta": + storage_hash = self.store.write_storage(untyped_storage) + self._lines.append( + f"{v} = reader.storage({storage_hash!r}, {nbytes!r}{maybe_device}{maybe_dtype_hint})" + ) + self.seen_storages[ws] = v + return v + + def tensor(self, name, t) -> None: + storage = self.storage( + t.untyped_storage(), dtype_hint=t.dtype, device_hint=t.device + ) + args = [] + # NB: this is positional, must come first + if _stride_or_default(None, shape=t.shape) != t.stride(): + args.append(str(tuple(t.stride()))) + if _dtype_or_default(None) != t.dtype: + args.append(f"dtype={t.dtype!r}") + if _storage_offset_or_default(None) != t.storage_offset(): + args.append(f"storage_offset={t.storage_offset()!r}") + tensor_metadata = torch._utils.get_tensor_metadata(t) + if tensor_metadata: + args.extend(f"{k}={v!r}" for k, v in tensor_metadata.items()) + if _requires_grad_or_default(None) != t.requires_grad: + args.append(f"requires_grad={t.requires_grad!r}") + is_leaf = torch._subclasses.meta_utils.safe_is_leaf(t) + if _is_leaf_or_default(None) != is_leaf: + args.append(f"is_leaf={is_leaf!r}") + self._lines.append( + "reader.tensor(" + + ", ".join([storage, str(tuple(t.shape)), *args]) + + f") # {name}" + ) + + # TODO: this doesn't actually symint atm + def symint(self, name, val) -> None: + if isinstance(val, torch.SymInt): + val = val.node.hint + self._lines.append(f"reader.symint({val!r}) # {name}") + + +def aot_graph_input_parser( + func: Callable[[List[Tensor]], List[Tensor]], + device: str = "cuda", + sym_shapes: Optional[Dict[str, int]] = None, + default_sym_shape: Optional[int] = None, +) -> Dict[str, Any]: + """ + Takes in a function which has been printed with print_readable() and constructs kwargs to run it. + + Handles Tensor inputs, Symints, and a graph module which might have tensor constants. + + Consider a function `forward` defined as follows: + + def forward(self, primals_1: "f32[1001, 6]", primals_2: "f32[s0]", primals_3: "Sym(s0)",): + _tensor_constant0: "i64[4190]" = self._tensor_constant0 + # Further implementation + + kwargs = aot_graph_input_parser(forward) + forward(**kwargs) + """ + + from torch.fx.graph import dtype_abbrs + + dtype_map = {value: key for key, value in dtype_abbrs.items()} + dtype_pattern = "|".join(dtype_abbrs.values()) + + # Extracting the source code from the function + source = inspect.getsource(func) + + # Regular expressions + tensor_assignment_regex = rf"(_tensor_constant\d+): \"({dtype_pattern})\[\s*(.*?)\s*\]\" = self\.(_tensor_constant\d+)" + tensor_regex = rf"({dtype_pattern})\[\s*(.*?)\s*\]" + sym_shape_regex = r"Sym\((s\d+)\)" + + class TensorContainer: + "Container for tensors as attributes" + pass + + # Dictionary for tensors from annotations + kwargs: Dict[str, Any] = {} + + sym_shapes = sym_shapes or {} + + def get_sym_int(symint): + torch._check( + symint in sym_shapes or default_sym_shape is not None, + lambda: f"{symint} not in symbolic_shapes and default sym shape not passed in", + ) + return sym_shapes.get(symint, default_sym_shape) + + def gen_tensor(shape, dtype) -> Tensor: + # Resolve symbolic shapes to concrete values + resolved_shape = [] + dynamic_dims = [] + for i, dim in enumerate(shape): + dim = dim.strip() + if "s" in dim: + s = get_sym_int(dim) + resolved_shape.append(s) + dynamic_dims.append(i) + else: + resolved_shape.append(int(dim)) + + constructor = torch.randn if dtype.is_floating_point else torch.zeros + out = constructor(resolved_shape, dtype=dtype, device=device) # type: ignore[call-arg] + for d in dynamic_dims: + torch._dynamo.mark_dynamic(out, d) + return out + + # Parse function annotations for tensor generation + annotations = func.__annotations__ + for param, annotation in annotations.items(): + # Skip 'return' annotation + if param == "return": + continue + + match = re.search(tensor_regex, annotation) + if match: + data_type, shape_str = match.groups() + shape = tuple(shape_str.split(",")) + dtype = dtype_map[data_type] + kwargs[param] = gen_tensor(shape, dtype) + + match = re.search(sym_shape_regex, annotation) + if match: + kwargs[param] = get_sym_int(match.group(1)) + + if "self" in inspect.signature(func).parameters: + container = TensorContainer() + kwargs["self"] = container + for match in re.finditer(tensor_assignment_regex, source): + attr_name, data_type, shape_str, _ = match.groups() + shape = tuple(shape_str.split(",")) + dtype = dtype_map[data_type] + setattr(container, attr_name, gen_tensor(shape, dtype)) + + return kwargs diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/decorators.py b/venv/lib/python3.10/site-packages/torch/_dynamo/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..8c82bf542169673ac1dc8917a8a53c114cf69b19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/decorators.py @@ -0,0 +1,347 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING + +import torch +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from . import trace_rules, variables +from .comptime import comptime +from .eval_frame import DisableContext, innermost_fn, RunOnlyContext +from .exc import IncorrectUsage +from .external_utils import is_compiling + +if TYPE_CHECKING: + from torch._C._dynamo.eval_frame import ( # noqa: F401 + reset_code, + set_eval_frame, + set_guard_error_hook, + skip_code, + unsupported, + ) +else: + for name in dir(torch._C._dynamo.eval_frame): + if name.startswith("__"): + continue + globals()[name] = getattr(torch._C._dynamo.eval_frame, name) + + +def run(fn=None): + """Don't do any dynamic compiles, just use prior optimizations""" + if fn is not None: + fn = innermost_fn(fn) + assert callable(fn) + return RunOnlyContext()(fn) + return RunOnlyContext() + + +def disable(fn=None, recursive=True): + """ + Decorator and context manager to disable TorchDynamo + + If recursive=True, Dynamo is completely skipped on the decorated function + frame as well as the recursively invoked functions. + + If recursive=False, Dynamo skips frames associated with the function code, + but still process recursively invoked frames. + """ + if recursive: + if fn is not None: + fn = innermost_fn(fn) + assert callable(fn) + return DisableContext()(fn) + return DisableContext() + else: + return skip(fn) + + +def skip(fn=None): + """ + Skip frames associated with the function code, but still process recursively + invoked frames + """ + if fn is None: + return skip + fn = innermost_fn(fn) + assert callable(fn) + skip_code(fn.__code__) + fn._torchdynamo_disable = True + return fn + + +def assume_constant_result(fn): + fn._dynamo_marked_constant = True + return fn + + +def allow_in_graph(fn): + """ + Customize which functions TorchDynamo will include in the generated + graph. Similar to `torch.fx.wrap()`. + :: + + torch._dynamo.allow_in_graph(my_custom_function) + + @torch._dynamo.optimize(...) + def fn(a): + x = torch.add(x, 1) + x = my_custom_function(x) + x = torch.add(x, 1) + return x + + fn(...) + + Will capture a single graph containing `my_custom_function()`. + """ + if isinstance(fn, (list, tuple)): + return [allow_in_graph(x) for x in fn] + assert callable(fn), "allow_in_graph expects a callable" + if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable: + trace_rules._disallowed_callable_ids.remove(id(fn)) + trace_rules._allowed_callable_ids.add(id(fn)) + return fn + + +def _disallow_in_graph_helper(throw_if_not_allowed): + def inner(fn): + if isinstance(fn, (list, tuple)): + return [disallow_in_graph(x) for x in fn] + assert callable(fn), "disallow_in_graph expects a callable" + if ( + throw_if_not_allowed + and trace_rules.lookup_callable(fn) + != variables.TorchInGraphFunctionVariable + and trace_rules.lookup(fn) != variables.TorchInGraphFunctionVariable + ): + raise IncorrectUsage( + "disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). " + "Allowed callables means callables that TorchDynamo puts as-is in the extracted graph." + ) + trace_rules._allowed_callable_ids.remove(id(fn)) + trace_rules._disallowed_callable_ids.add(id(fn)) + return fn + + return inner + + +def disallow_in_graph(fn): + """ + Customize which functions TorchDynamo will exclude in the generated + graph and force a graph break on. + :: + + torch._dynamo.disallow_in_graph(torch.sub) + + @torch._dynamo.optimize(...) + def fn(a): + x = torch.add(x, 1) + x = torch.sub(x, 1) + x = torch.add(x, 1) + return x + + fn(...) + + Will break the graph on `torch.sub`, and give two graphs each with a + single `torch.add()` op. + """ + return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn) + + +@_disallow_in_graph_helper(throw_if_not_allowed=False) +def graph_break(): + """Force a graph break""" + pass + + +def forbid_in_graph(fn): + """ + Customize which functions TorchDynamo will assert are not present while tracing. + + If you want a graph break on this function instead, use disallow_in_graph. + TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust + documentation would not be amiss. + """ + if isinstance(fn, (list, tuple)): + return [forbid_in_graph(x) for x in fn] + assert callable(fn), "forbid_in_graph applies only to callables" + fn._dynamo_forbidden = True + return fn + + +# Helper function to flatten a tensor subclass and apply a function to +# all inner tensors that match the outer dim. Used to reduce duplication +# across the various marking APIs. +def _apply_func_to_inner_tensors_of_same_dim(func, t, *args, **kwargs): + assert is_traceable_wrapper_subclass(t) + + attrs, ctx = t.__tensor_flatten__() + for attr in attrs: + inner = getattr(t, attr) + if inner.dim() == t.dim(): + func(inner, *args, **kwargs) + + +@dataclass(frozen=True) +class _DimRange: + """ + This represents an dimension of a tensor and the corresponding + min and max values it can take. Don't create this + class directly; instead, use :func:`mark_dynamic`. + """ + + dim: int + min: int + max: int + + +@forbid_in_graph +def mark_dynamic(t, index, *, min=None, max=None): + """ + Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim. + + [Note - on the state of mark_dynamic] + + The behavior of having a dynamic dimension on a tensor is governed by a few factors: + + 1) torch._dynamo.config dynamic_shapes True or False. + a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work. + a) dynamic_shapes=False - This config will raise an exception when used in conjunction with + mark_dynamic. We will eventually support this. + + 2) If the dimension is fully constrained - as in, it does not allow more than a single value + in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export), + we will raise an error + + 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded + range of shapes, in eager we will pass it through, but export will raise an error. + + 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made + before torch.compile. + + """ + if is_traceable_wrapper_subclass(t): + # default behavior: mirror mark_dynamic() on all inner tensors with same dim as t + # TODO: Make this configurable via a supported public API + _apply_func_to_inner_tensors_of_same_dim( + mark_dynamic, t, index, min=min, max=max + ) + + if isinstance(index, int): + if not hasattr(t, "_dynamo_dynamic_indices"): + t._dynamo_dynamic_indices = set() + t._dynamo_dynamic_range = set() + # TODO(voz): Should we bounds check? + t._dynamo_dynamic_indices.add(index) + t._dynamo_dynamic_range.add(_DimRange(index, min, max)) + return + + assert isinstance(index, (list, tuple)) + for i in index: + mark_dynamic(t, i, min=min, max=max) + + +@forbid_in_graph +def maybe_mark_dynamic(t, index): + """ + Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this + dimension ends up getting specialized, don't error). + """ + if is_traceable_wrapper_subclass(t): + # default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t + # TODO: Make this configurable via a supported public API + _apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index) + + if isinstance(index, int): + if not hasattr(t, "_dynamo_weak_dynamic_indices"): + t._dynamo_weak_dynamic_indices = set() + # TODO(voz): Should we bounds check? + t._dynamo_weak_dynamic_indices.add(index) + return + + assert isinstance(index, (list, tuple)) + for i in index: + maybe_mark_dynamic(t, i) + + +def mark_static(t, index=None): + """ + Mark a tensor as having a static dim. + + This will prevent us from attempting to compile it dynamically + when dynamic=True; this can improve trace-time performance. + + This has lower precedence than mark_dynamic. + + Unlike mark_dynamic, this can be done inside a graph, in which case it + induces specialization on the tensor. + """ + if is_compiling(): + if index is None: + for s in t.size(): + comptime.force_static(s) + else: + comptime.force_static(t.size(index)) + return + + if is_traceable_wrapper_subclass(t): + # default behavior: mirror mark_static() on all inner tensors with same dim as t + # TODO: Make this configurable via a supported public API + _apply_func_to_inner_tensors_of_same_dim(mark_static, t, index) + + if isinstance(index, int): + if not hasattr(t, "_dynamo_static_indices"): + t._dynamo_static_indices = set() + # TODO(voz): Should we bounds check? + t._dynamo_static_indices.add(index) + elif index is None: + for i in range(t.dim()): + mark_static(t, i) + else: + assert isinstance(index, (list, tuple)) + for i in index: + mark_static(t, i) + + +@forbid_in_graph +def mark_static_address(t, guard=True): + """ + Marks an input tensor whose data_ptr will not change across multiple calls + to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation + is not needed for this input. The data_ptr will be guarded if guard=True. Note: + Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called. + """ + if not isinstance(t, torch.Tensor): + raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}") + + if guard: + t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined] + else: + t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined] + + +# Note: this carefully avoids eagerly import einops. +# TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2 +def _allow_in_graph_einops(): + import einops + + try: + # requires einops > 0.6.1, torch >= 2.0 + from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401 + _ops_were_registered_in_torchdynamo, + ) + + # einops > 0.6.1 will call the op registration logic as it is imported. + pass + except ImportError: + # einops <= 0.6.1 + allow_in_graph(einops.rearrange) + allow_in_graph(einops.reduce) + if hasattr(einops, "repeat"): + allow_in_graph(einops.repeat) # available since einops 0.2.0 + if hasattr(einops, "einsum"): + allow_in_graph(einops.einsum) # available since einops 0.5.0 + if hasattr(einops, "pack"): + allow_in_graph(einops.pack) # available since einops 0.6.0 + if hasattr(einops, "unpack"): + allow_in_graph(einops.unpack) # available since einops 0.6.0 + + +trace_rules.add_module_init_func("einops", _allow_in_graph_einops) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/device_interface.py b/venv/lib/python3.10/site-packages/torch/_dynamo/device_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..4857b963881f784eb72115cdbb6560282796c4b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/device_interface.py @@ -0,0 +1,199 @@ +import inspect +from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union + +import torch +from torch._streambase import _EventBase, _StreamBase + +get_cuda_stream: Optional[Callable[[int], int]] +if torch.cuda._is_compiled(): + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream +else: + get_cuda_stream = None + +_device_t = Union[torch.device, str, int, None] + +# Recording the device properties in the main process but used in worker process. +caching_worker_device_properties: Dict[str, Any] = {} +caching_worker_current_devices: Dict[str, int] = {} + + +class DeviceInterfaceMeta(type): + def __new__(metacls, *args, **kwargs): + class_member = args[2] + if "Event" in class_member: + assert inspect.isclass(class_member["Event"]) and issubclass( + class_member["Event"], _EventBase + ), "DeviceInterface member Event should be inherit from _EventBase" + if "Stream" in class_member: + assert inspect.isclass(class_member["Stream"]) and issubclass( + class_member["Stream"], _StreamBase + ), "DeviceInterface member Stream should be inherit from _StreamBase" + return super().__new__(metacls, *args, **kwargs) + + +class DeviceInterface(metaclass=DeviceInterfaceMeta): + """ + This is a simple device runtime interface for Inductor. It enables custom + backends to be integrated with Inductor in a device-agnostic semantic. + """ + + class device: + def __new__(cls, device: _device_t): + raise NotImplementedError() + + class Worker: + """ + Worker API to query device properties that will work in multi processing + workers that cannot use the GPU APIs (due to processing fork() and + initialization time issues). Properties are recorded in the main process + before we fork the workers. + """ + + @staticmethod + def set_device(device: int): + raise NotImplementedError() + + @staticmethod + def current_device() -> int: + raise NotImplementedError() + + @staticmethod + def get_device_properties(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def current_device(): + raise NotImplementedError() + + @staticmethod + def set_device(device: _device_t): + raise NotImplementedError() + + @staticmethod + def device_count(): + raise NotImplementedError() + + @staticmethod + def is_available() -> bool: + raise NotImplementedError() + + @staticmethod + def stream(stream: torch.Stream): + raise NotImplementedError() + + @staticmethod + def current_stream(): + raise NotImplementedError() + + @staticmethod + def set_stream(stream: torch.Stream): + raise NotImplementedError() + + @staticmethod + def _set_stream_by_id(stream_id: int, device_index: int, device_type: int): + raise NotImplementedError() + + @staticmethod + def get_raw_stream(): + raise NotImplementedError() + + @staticmethod + def synchronize(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def get_device_properties(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def get_compute_capability(device: _device_t = None): + raise NotImplementedError() + + +class CudaInterface(DeviceInterface): + device = torch.cuda.device + + # register Event and Stream class into the backend interface + # make sure Event and Stream are implemented and inherited from the _EventBase and _StreamBase + Event = torch.cuda.Event + Stream = torch.cuda.Stream + + class Worker: + @staticmethod + def set_device(device: int): + caching_worker_current_devices["cuda"] = device + + @staticmethod + def current_device() -> int: + if "cuda" in caching_worker_current_devices: + return caching_worker_current_devices["cuda"] + return torch.cuda.current_device() + + @staticmethod + def get_device_properties(device: _device_t = None): + if device is not None: + if isinstance(device, str): + device = torch.device(device) + assert device.type == "cuda" + if isinstance(device, torch.device): + device = device.index + if device is None: + device = CudaInterface.Worker.current_device() + + if "cuda" not in caching_worker_device_properties: + device_prop = [ + torch.cuda.get_device_properties(i) + for i in range(torch.cuda.device_count()) + ] + caching_worker_device_properties["cuda"] = device_prop + + return caching_worker_device_properties["cuda"][device] + + current_device = staticmethod(torch.cuda.current_device) + set_device = staticmethod(torch.cuda.set_device) + device_count = staticmethod(torch.cuda.device_count) + stream = staticmethod(torch.cuda.stream) # type: ignore[assignment] + current_stream = staticmethod(torch.cuda.current_stream) + set_stream = staticmethod(torch.cuda.set_stream) # type: ignore[assignment] + _set_stream_by_id = staticmethod(torch.cuda._set_stream_by_id) # type: ignore[assignment] + synchronize = staticmethod(torch.cuda.synchronize) + get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment] + get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[arg-type] + + # Can be mock patched by @patch decorator. + @staticmethod + def is_available() -> bool: + return torch.cuda.is_available() + + @staticmethod + def get_compute_capability(device: _device_t = None): + major, min = torch.cuda.get_device_capability(device) + return major * 10 + min + + +device_interfaces: Dict[str, Type[DeviceInterface]] = {} + + +def register_interface_for_device( + device: Union[str, torch.device], device_interface: Type[DeviceInterface] +): + if isinstance(device, torch.device): + device = str(device) + device_interfaces[device] = device_interface + + +def get_interface_for_device(device: Union[str, torch.device]) -> Type[DeviceInterface]: + if isinstance(device, torch.device): + device = str(device) + if device in device_interfaces: + return device_interfaces[device] + raise NotImplementedError(f"No interface for device {device}") + + +def get_registered_device_interfaces() -> Iterable[Tuple[str, Type[DeviceInterface]]]: + return device_interfaces.items() + + +register_interface_for_device("cuda", CudaInterface) +for i in range(torch.cuda.device_count()): + register_interface_for_device(f"cuda:{i}", CudaInterface) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py b/venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..53ab0df3a947c46066e8f0775e7db2082210298e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py @@ -0,0 +1,1561 @@ +# mypy: disable-error-code="method-assign" + +""" +Functions in this file are responsible for modifying the eval frame +handler at RUNTIME. Therefore, all functions in this file are hot. +Functions that only execute at compile time should be placed +in torch._dynamo.convert_frame. +""" + +from __future__ import annotations + +import contextlib +import functools +import inspect +import logging +import os +import sys +import textwrap +import threading +import traceback +import types +import warnings +import weakref +from enum import Enum +from os.path import dirname, join +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union +from unittest.mock import patch + +import torch +import torch.fx +import torch.utils._pytree as pytree +import torch.utils.checkpoint +from torch import _guards +from torch._subclasses import fake_tensor +from torch._utils_internal import log_export_usage +from torch.export import Constraint +from torch.export.dynamic_shapes import _process_dynamic_shapes +from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode +from torch.fx.experimental.symbolic_shapes import ( + ConstraintViolationError, + DimDynamic, + StatelessSymbolicContext, +) +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo + +from ..fx import GraphModule +from .backends.registry import CompilerFn, lookup_backend + +from .hooks import Hooks + +# see discussion at https://github.com/pytorch/pytorch/issues/120699 +reset_code = torch._C._dynamo.eval_frame.reset_code # noqa: F401 +set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401 +set_guard_error_hook = torch._C._dynamo.eval_frame.set_guard_error_hook # noqa: F401 +skip_code = torch._C._dynamo.eval_frame.skip_code # noqa: F401 +unsupported = torch._C._dynamo.eval_frame.unsupported # noqa: F401 + +from . import config, convert_frame, external_utils, trace_rules, utils +from .code_context import code_context +from .exc import CondOpArgsMismatchError, UserError, UserErrorType +from .mutation_guard import install_generation_tagging_init +from .types import CacheEntry, DynamoCallback +from .utils import common_constant_types, compile_times + +log = logging.getLogger(__name__) + +from torch._dispatch.python import enable_python_dispatcher + +always_optimize_code_objects = utils.ExactWeakKeyDictionary() +null_context = contextlib.nullcontext + + +import sympy + + +# See https://github.com/python/typing/pull/240 +class Unset(Enum): + token = 0 + + +unset = Unset.token + +guarded_backend_cache = threading.local() +cached_backends: Dict[int, CompilerFn] = {} + + +def check_current_backend(backend_obj_id: int): + """ + Called from guards to check if we need to recompile due to a backend change + """ + # TODO(jansel): we should move guarded_backend_cache to C++ + try: + if guarded_backend_cache.skip_backend_check_for_run_only_mode: + return True + except AttributeError: + # Go slightly faster next time + guarded_backend_cache.skip_backend_check_for_run_only_mode = False + try: + current_backend = guarded_backend_cache.current_backend + except AttributeError: + current_backend = None + return ( + # Avoid the dict lookup in case of exact same object + id(current_backend) == backend_obj_id + or current_backend == cached_backends.get(backend_obj_id, None) + ) + + +def _reset_guarded_backend_cache(): + global cached_backends + guarded_backend_cache.skip_backend_check_for_run_only_mode = False + guarded_backend_cache.current_backend = None + for backend in cached_backends.values(): + if hasattr(backend, "reset"): + backend.reset() + cached_backends.clear() + + +def backend_cache_manager(callback: DynamoCallback): + # callback is False for RunOnlyContext. RunOnlyContext is used + # as a way to re-use the previous compiled cache. + # We therefore skip the check and re-use whatever code that's already cached. + # Note: the cache that's actually used depends on the caching policy. + if callback is False: + + def change(): + try: + prev_skip = guarded_backend_cache.skip_backend_check_for_run_only_mode + except AttributeError: + prev_skip = False + guarded_backend_cache.skip_backend_check_for_run_only_mode = True + + def revert(): + guarded_backend_cache.skip_backend_check_for_run_only_mode = prev_skip + + return revert + + else: + backend = innermost_fn(callback) + + def change(): + cached_backends.setdefault(id(backend), backend) + try: + prev_backend = guarded_backend_cache.current_backend + except AttributeError: + prev_backend = None + guarded_backend_cache.current_backend = backend + + def revert(): + guarded_backend_cache.current_backend = prev_backend + + return revert + + return change + + +DONT_WRAP_FILES = { + # For tracing into fx modules + inspect.getsourcefile(GraphModule), + join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"), +} + + +def _debug_get_cache_entry_list( + code: Union[types.CodeType, Callable[..., Any]] +) -> List[CacheEntry]: + """ + Given a code object or a callable object, retrieve the cache entries + stored in this code. + """ + if callable(code): + code = code.__code__ + return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code) + + +class OptimizedModule(torch.nn.Module): + """ + Wraps the original nn.Module object and later patches its + forward method to optimized self.forward method. + """ + + _torchdynamo_orig_callable: Callable[..., Any] + get_compiler_config: Callable[[], Any] + + def __init__(self, mod: torch.nn.Module, dynamo_ctx): + super().__init__() + # Installs the params/buffer + self._orig_mod = mod + self.dynamo_ctx = dynamo_ctx + self._initialize() + + def _initialize(self): + # Do this stuff in constructor to lower overhead slightly + if isinstance(self._orig_mod.forward, types.MethodType) and trace_rules.check( + self._orig_mod.forward + ): + # This may be a torch.nn.* instance in trace_rules.py which + # won't trigger a frame evaluation workaround to add an extra + # frame we can capture + self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod)) + else: + # Invoke hooks outside of dynamo then pickup the inner frame + self.forward = self.dynamo_ctx(self._orig_mod.__call__) + + if hasattr(self._orig_mod, "_initialize_hook"): + self._forward = self.forward + self.forward = self._call_lazy_check + + def __getstate__(self): + state = dict(self.__dict__) + state.pop("forward", None) + state.pop("__call__", None) + return state + + def __setstate__(self, state): + self.__dict__ = state + self._initialize() + + def __getattr__(self, name): + if name == "_orig_mod": + return self._modules["_orig_mod"] + return getattr(self._orig_mod, name) + + def _call_lazy_check(self, *args, **kwargs): + if hasattr(self._orig_mod, "_initialize_hook"): + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it. + # Afterwards, lazy module deletes its pre-hooks + # to avoid treating it as lazy on subsequent recompile. + self._orig_mod._infer_parameters(self._orig_mod, args, kwargs) + return self._forward(*args, **kwargs) + + def __dir__(self): + orig_mod_attrs = self._orig_mod.__dir__() + return orig_mod_attrs + [ + attr for attr in super().__dir__() if attr not in orig_mod_attrs + ] + + +def remove_from_cache(f): + """ + Make sure f.__code__ is not cached to force a recompile + """ + if isinstance(f, types.CodeType): + reset_code(f) + elif hasattr(f, "__code__"): + reset_code(f.__code__) + elif hasattr(getattr(f, "forward", None), "__code__"): + reset_code(f.forward.__code__) + else: + from . import reset # type: ignore[attr-defined] + + reset() + log.warning("could not determine __code__ for %s", f) + + +def nothing(): + pass + + +def always_false(): + return False + + +def innermost_fn(fn): + """ + In case of nesting of _TorchDynamoContext calls, find the innermost + function. TorchDynamo caches on fn.__code__ object, so its necessary to find + the innermost function to pass on the optimize, run, disable etc. + """ + unaltered_fn = fn + while hasattr(unaltered_fn, "_torchdynamo_orig_callable"): + unaltered_fn = unaltered_fn._torchdynamo_orig_callable + assert callable(unaltered_fn) + return unaltered_fn + + +def make_set_enable_dynamic(enable: bool): + assert isinstance(enable, bool) + if enable: + # Assume everything is dynamic by default + return config._make_closure_patcher(assume_static_by_default=False) + else: + return config._make_closure_patcher( + automatic_dynamic_shapes=False, assume_static_by_default=True + ) + + +class _TorchDynamoContext: + def __init__( + self, + callback: DynamoCallback, + on_enter=nothing, + backend_ctx_ctor=null_context, + patch_fn=nothing, + first_ctx=False, + *, + export=False, + dynamic=None, + compiler_config=None, + ): + super().__init__() + assert callable(callback) or callback is False or callback is None + self.callback: DynamoCallback = callback + self.prior: Union[Unset, DynamoCallback] = unset + self.first_ctx = first_ctx + self.export = export + self.compiler_config = compiler_config + self.cleanup_fns: List[Callable[[], Any]] = [] + self.enter_exit_hooks = [backend_cache_manager(self.callback)] + patch_fn() + + if dynamic is not None: + self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic)) + + if on_enter is not nothing: + # this case is not common + def call_on_enter(): + on_enter() + return nothing + + self.enter_exit_hooks.append(call_on_enter) + + if backend_ctx_ctor is not contextlib.nullcontext: + # this case is not common + def call_backend_ctx(): + ctx = backend_ctx_ctor() + ctx.__enter__() + return functools.partial(ctx.__exit__, None, None, None) + + self.enter_exit_hooks.append(call_backend_ctx) + + def __enter__(self): + if config.raise_on_ctx_manager_usage: + raise RuntimeError( + "torch._dynamo.optimize(...) is used with a context manager. " + "Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html " + "to use torch._dynamo.optimize(...) as an annotation/decorator. " + ) + self.cleanup_fns = [enter() for enter in self.enter_exit_hooks] + self.prior = set_eval_frame(self.callback) + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self.prior is not unset + set_eval_frame(self.prior) + self.prior = unset + for cleanup in self.cleanup_fns: + cleanup() + self.cleanup_fns.clear() + + def __call__(self, fn): + # public api for compiler config/options + def get_compiler_config(): + return self.compiler_config + + fn = innermost_fn(fn) + + # add context containing GraphModule to any GraphModule forward functions + from torch.fx._lazy_graph_module import _LazyGraphModule + + if isinstance(fn, _LazyGraphModule) or ( + isinstance(getattr(fn, "__self__", None), _LazyGraphModule) + and fn.__name__ == "_lazy_forward" + ): + # Since dynamo will run the forward method for the GraphModule shortly + # anyways, it does not hurt to do the real recompilation here if + # this is a _LazyGraphModule. This makes it easier for dynamo to + # optimize a _LazyGraphModule. + + lazy_gm = fn if isinstance(fn, _LazyGraphModule) else fn.__self__ + + _LazyGraphModule.force_recompile(lazy_gm) + + # Assume that the underlying node metadata of `fn`, + # a GraphModule instance, accurately represents + # all instances of type(fn). + code_context.get_context(lazy_gm.forward.__code__)[ + "orig_graphmodule" + ] = weakref.ref(lazy_gm) + + if not isinstance(fn, _LazyGraphModule): + # replace fn with the real forward method + fn = lazy_gm.forward + elif isinstance(fn, GraphModule): + code_context.get_context(fn.forward.__code__)[ + "orig_graphmodule" + ] = weakref.ref(fn) + + # Optimize the forward method of torch.nn.Module object + if isinstance(fn, torch.nn.Module): + mod = fn + new_mod = OptimizedModule(mod, self) + # Save the function pointer to find the original callable while nesting + # of decorators. + new_mod._torchdynamo_orig_callable = mod.forward + + # when compiling torch.nn.Module, + # provide public api OptimizedModule.get_compiler_config() + assert not hasattr(new_mod, "get_compiler_config") + new_mod.get_compiler_config = get_compiler_config + + return new_mod + assert callable(fn) + + try: + filename = inspect.getsourcefile(fn) + except TypeError: + filename = None + if ( + (filename is None or trace_rules.check(fn)) + and ( + getattr(fn, "__name__", "") not in ["_call_impl", "_wrapped_call_impl"] + ) + and filename not in DONT_WRAP_FILES + ): + # call to a builtin without a frame for us to capture + fn = external_utils.wrap_inline(fn) + + callback = self.callback + + if isinstance(self, DisableContext): + is_jit_tracing = always_false + is_fx_tracing = always_false + else: + is_jit_tracing = torch._C._is_tracing + is_fx_tracing = torch.fx._symbolic_trace.is_fx_tracing + + @functools.wraps(fn) + def _fn(*args, **kwargs): + if is_fx_tracing(): + if config.error_on_nested_fx_trace: + raise RuntimeError( + "Detected that you are using FX to symbolically trace " + "a dynamo-optimized function. This is not supported at the moment." + ) + else: + return fn(*args, **kwargs) + + if is_jit_tracing(): + if config.error_on_nested_jit_trace: + raise RuntimeError( + "Detected that you are using FX to torch.jit.trace " + "a dynamo-optimized function. This is not supported at the moment." + ) + else: + return fn(*args, **kwargs) + + cleanups = [enter() for enter in self.enter_exit_hooks] + prior = set_eval_frame(callback) + try: + return fn(*args, **kwargs) + finally: + set_eval_frame(prior) + for cleanup in cleanups: + cleanup() + + # hooks to properly handle inlining + if isinstance(self, DisableContext): + _fn._torchdynamo_disable = True # type: ignore[attr-defined] + else: + _fn._torchdynamo_inline = fn # type: ignore[attr-defined] + + # Save the function pointer to find the original callable while nesting + # of decorators. + _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined] + + # when compiling user function instead of nn.Module + # provide public api _fn.get_compiler_config() + assert not hasattr(_fn, "get_compiler_config") + _fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined] + + # If the function is called using torch._dynamo.optimize decorator, we + # should prevent any type of skipping. + if callback not in (None, False): + if not hasattr(fn, "__code__"): + raise RuntimeError( + textwrap.dedent( + """ + + torch._dynamo.optimize is called on a non function object. + If this is a callable class, please wrap the relevant code into a function and optimize the + wrapper function. + + >> class CallableClass: + >> def __init__(self): + >> super().__init__() + >> self.relu = torch.nn.ReLU() + >> + >> def __call__(self, x): + >> return self.relu(torch.sin(x)) + >> + >> def print_hello(self): + >> print("Hello world") + >> + >> mod = CallableClass() + + If you want to optimize the __call__ function and other code, wrap that up in a function + + >> def wrapper_fn(x): + >> y = mod(x) + >> return y.sum() + + and then optimize the wrapper_fn + + >> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn) + """ + ) + ) + always_optimize_code_objects[fn.__code__] = True + + return _fn + + +class OptimizeContext(_TorchDynamoContext): + def __init__( + self, + callback, + backend_ctx_ctor, + first_ctx=False, + *, + export=False, + dynamic=None, + compiler_config=None, + ): + def on_enter(): + install_generation_tagging_init() + + super().__init__( + callback=callback, + on_enter=on_enter, + backend_ctx_ctor=backend_ctx_ctor, + patch_fn=TorchPatcher.patch, + first_ctx=first_ctx, + export=export, + dynamic=dynamic, + compiler_config=compiler_config, + ) + + +class RunOnlyContext(_TorchDynamoContext): + def __init__(self): + # cudagraph trees relies on generation increment + def on_enter(): + torch._dynamo.mutation_guard.GenerationTracker.generation += 1 + + super().__init__(callback=False, on_enter=on_enter) + + +class DisableContext(_TorchDynamoContext): + def __init__(self): + super().__init__(callback=None) + + +def _optimize_catch_errors( + compile_fn, + hooks: Hooks, + backend_ctx_ctor=null_context, + export=False, + dynamic=None, + compiler_config=None, +): + return OptimizeContext( + convert_frame.catch_errors_wrapper(compile_fn, hooks), + backend_ctx_ctor=backend_ctx_ctor, + first_ctx=True, + export=export, + dynamic=dynamic, + compiler_config=compiler_config, + ) + + +def get_compiler_fn(compiler_fn): + from .repro.after_dynamo import wrap_backend_debug + + if hasattr(compiler_fn, "compiler_name"): + compiler_str = compiler_fn.compiler_name + elif isinstance(compiler_fn, str): + compiler_str = compiler_fn + else: + compiler_str = None + compiler_fn = lookup_backend(compiler_fn) + return wrap_backend_debug(compiler_fn, compiler_str) + + +class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg] + def __call__(self, fn): + assert callable(fn) + return fn + + +def check_if_dynamo_supported(): + if sys.version_info >= (3, 12): + raise RuntimeError("Python 3.12+ not yet supported for torch.compile") + + +def is_dynamo_supported(): + try: + check_if_dynamo_supported() + return True + except Exception: + return False + + +def check_if_inductor_supported(): + check_if_dynamo_supported() + + if sys.platform == "win32": + raise RuntimeError("Windows not yet supported for inductor") + + +def is_inductor_supported(): + try: + check_if_inductor_supported() + return True + except Exception: + return False + + +def optimize( + backend="inductor", + *, + nopython=False, + guard_export_fn=None, + guard_fail_fn=None, + disable=False, + dynamic=None, +): + """ + The main entrypoint of TorchDynamo. Do graph capture and call + backend() to optimize extracted graphs. + + Args: + backend: One of the two things: + - Either, a function/callable taking a torch.fx.GraphModule and + example_inputs and returning a python callable that runs the + graph faster. + One can also provide additional context for the backend, like + torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute. + See AOTAutogradMemoryEfficientFusionWithContext for the usage. + - Or, a string backend name in `torch._dynamo.list_backends()` + nopython: If True, graph breaks will be errors and there will + be a single whole-program graph. + disable: If True, turn this decorator into a no-op + dynamic: If True, upfront compile as dynamic a kernel as possible. If False, + disable all dynamic shapes support (always specialize). If None, automatically + detect when sizes vary and generate dynamic kernels upon recompile. + + Example Usage:: + + @torch._dynamo.optimize() + def toy_example(a, b): + ... + """ + check_if_dynamo_supported() + # Note: The hooks object could be global instead of passed around, *however* that would make + # for a confusing API usage and plumbing story wherein we nest multiple .optimize calls. + # There is some prior art around this, w/r/t nesting backend calls are enforced to be the same + # compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an + # easier to understand UX at the cost of a little more plumbing on our end. + hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn) + torch._C._log_api_usage_once("torch._dynamo.optimize") + if disable or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1": + return _NullDecorator() + + backend = get_compiler_fn(backend) + + # Find if backend has any extra context manager + backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context) + + if nopython: + return optimize_assert( + backend, + dynamic=dynamic, + hooks=hooks, + ) + return _optimize_catch_errors( + convert_frame.convert_frame(backend, hooks=hooks), + hooks, + backend_ctx_ctor, + dynamic=dynamic, + compiler_config=backend.get_compiler_config() + if hasattr(backend, "get_compiler_config") + else None, + ) + + +# TODO(voz): Consider making "explain" output alongside a run / part of a run +@patch("torch._dynamo.symbolic_convert.explain", True) +def explain(f, *extra_args, **extra_kwargs): + def inner(*args, **kwargs): + # TODO(voz): Do we want a decorator for this? + from . import reset # type: ignore[attr-defined] + + reset() + + graphs: List[torch.fx.GraphModule] = [] + break_reasons: List[Any] = [] + op_count: int = 0 + ops_per_graph: List[torch.fx.Node] = [] + out_guards: List[_guards.Guard] = [] + + def dynamo_graph_accumulating_compiler( + gm: torch.fx.GraphModule, example_inputs + ): + from .backends.debugging import _explain_graph_detail + + nonlocal graphs + nonlocal op_count + nonlocal ops_per_graph + nonlocal break_reasons + + gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail( + gm, graphs, op_count, ops_per_graph, break_reasons + ) + + return gm.forward + + def guard_export_print(guards): + nonlocal out_guards + out_guards.extend(guards) + + opt_f = optimize( + dynamo_graph_accumulating_compiler, + nopython=False, + guard_export_fn=guard_export_print, + )(f) + # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject. + opt_f(*args, **kwargs) + + graph_count = len(graphs) + + # For the explanation summary, dedupe reasons by the innermost stack frame and dedupe by it. + deduped_reasons = {} + for reason in break_reasons: + innermost_frame = reason.user_stack[-1] + # __repr__ uniquely identifies a FrameSummary so we can use it for deduping + deduped_reasons[repr(innermost_frame)] = reason + + formatted_list = "" + for idx, break_reason in enumerate(deduped_reasons.values()): + formatted_stack = "".join(traceback.format_list(break_reason.user_stack)) + msg = f"{idx + 1}. Reason: {break_reason.reason}\n User Stack: {formatted_stack}\n" + formatted_list += msg + + graph_break_count = graph_count - 1 + compile_time = compile_times(repr="str") + + # TODO(voz): Do we want a decorator for this? + reset() + from .backends.debugging import ExplainOutput + + return ExplainOutput( + graphs, + graph_count, + graph_break_count, + break_reasons, + op_count, + ops_per_graph, + out_guards, + compile_time, + ) + + if extra_args or extra_kwargs: + warnings.warn( + "explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. " + "If you don't migrate, we may break your explain call in the future if your user defined kwargs " + "conflict with future kwargs added to explain(f)." + ) + return inner(*extra_args, **extra_kwargs) + else: + return inner + + +class FlattenInputOutputSignature(torch.fx.interpreter.Transformer): + def __init__( + self, + m: torch.fx.GraphModule, + flat_args: Tuple[Any], + matched_input_elements_positions: List[int], + flat_results: List[Any], + matched_output_elements_positions: List[int], + example_fake_inputs: List[torch.Tensor], + flat_args_dynamic_dims: List[Set[int]], + fake_mode: Optional[fake_tensor.FakeTensorMode] = None, + ): + super().__init__(m) + + assert len(flat_args_dynamic_dims) == len(flat_args) + matched_input_elements_to_fake = { + val: example_fake_inputs[ix] + for ix, val in enumerate(matched_input_elements_positions) + } + + self.new_args = [] + for i in range(0, len(flat_args)): + arg = super().placeholder(f"arg{i}", (), {}) + if i in matched_input_elements_to_fake: + arg.node.meta["val"] = matched_input_elements_to_fake[i] + else: + # Fill node.mata["val"] with faketensor from the input, + # if it's not found in matched_input_elements_positions + if fake_mode is not None and isinstance(flat_args[i], torch.Tensor): + # TODO(zhxchen17) Also preserve all the user constraints here. + arg.node.meta["val"] = fake_mode.from_tensor( + flat_args[i], + symbolic_context=StatelessSymbolicContext( + dynamic_sizes=[ + DimDynamic.DYNAMIC + if d in flat_args_dynamic_dims[i] + else DimDynamic.STATIC + for d in range(len(flat_args[i].shape)) + ], + constraint_sizes=[None] * len(flat_args[i].shape), + ), + ) + self.new_args.append(arg) + self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions) + self.matched_output_elements_positions = matched_output_elements_positions + self.flat_results = flat_results + + def placeholder(self, target, args, kwargs): + arg = next(self.old_args_gen) + if "val" in self.current_node.meta: + arg.node.meta["val"] = self.current_node.meta["val"] + if "tensor_dict" in self.current_node.meta: + arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"] + if "example_value" in self.current_node.meta: + arg.node.meta["example_value"] = self.current_node.meta["example_value"] + return arg + + def output(self, target, args, kwargs): + dynamo_result_flat = args[0] + lookup = [*dynamo_result_flat, *self.new_args] + new_results_flat = [] + for i in range(len(self.flat_results)): + if self.matched_output_elements_positions[i] is not None: + new_results_flat.append( + lookup[self.matched_output_elements_positions[i]] + ) + else: + const_val = self.flat_results[i] + assert isinstance(const_val, tuple(common_constant_types)) + new_results_flat.append(const_val) + return super().output(target, (new_results_flat,), {}) + + def run_node(self, n): + self.current_node = n + result_proxy = super().run_node(n) + if "val" in self.current_node.meta: + result_proxy.node.meta["val"] = self.current_node.meta["val"] + if "example_value" in self.current_node.meta: + result_proxy.node.meta["example_value"] = self.current_node.meta[ + "example_value" + ] + if self.current_node.op != "output": + result_proxy.node._rename( + getattr(self.current_node, "name", result_proxy.node.name) + ) + return result_proxy + + def transform(self): + result_gm = super().transform() + if "dynamo_flat_name_to_original_fqn" in self.module.meta: + result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[ + "dynamo_flat_name_to_original_fqn" + ] + return result_gm + + +class ExportResult(NamedTuple): + graph_module: torch.fx.GraphModule + guards: _guards.GuardsSet + # NB: Do not add new fields without overriding __iter__; people are + # destructuring so it is BC-breaking + + +def check_signature_rewritable(graph): + input_errors = [] + for node in graph.graph.nodes: + if node.op == "placeholder": + assert hasattr(node, "_dynamo_source") + source = node._dynamo_source + user_stacks = graph._source_to_user_stacks.get(source) + if user_stacks is None: + continue + assert len(user_stacks) > 0 + # In some cases we may not have a useful stack. Look for a + # useful stack + stack = None + for s in user_stacks: + if len(s) == 0: + continue + stack = s + break + if stack is None: + msg = f"{source.name()}, a closed over free variable" + else: + tb = "".join(traceback.format_list(stack)) + extra = "" + if len(user_stacks) > 1: + extra = f"(elided {len(user_stacks)-1} more accesses)" + msg = f"{source.name()}, accessed at:\n{tb}{extra}" + # TODO: option to print ALL of the stack traces at once + input_errors.append(msg) + + if input_errors: + raise UserError( + UserErrorType.INVALID_INPUT, + "Cannot export model which references tensors that are neither " + "buffers/parameters/constants nor are direct inputs. For each tensor, if you'd " + "like this tensor to be an explicit input, add it as a dummy argument " + "to the top-level model definition you are exporting; if you would " + "like its value to be embedded as an exported constant, wrap its access " + "in a function marked with @assume_constant_result.\n\n" + + "\n\n".join(input_errors), + ) + + +def rewrite_signature( + f_sig, + graph, + fake_mode, + flat_args, + in_spec, + example_fake_inputs, + graph_captured_input, + graph_captured_output, + dynamo_traced_result, + flat_args_dynamic_dims, +): + orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec) + + def check_user_input_output(flat_values, error_type): + supported_types = [ + torch.Tensor, + torch.SymInt, + torch.SymFloat, + torch.SymBool, + torch._C.ScriptObject, + ] + list(common_constant_types) + + def is_supported_type(val): + return isinstance(val, tuple(supported_types)) + + value_type = "input" if error_type == UserErrorType.INVALID_INPUT else "output" + # We only check that the outputs are not None. Inputs can be None. + for v in flat_values: + if not is_supported_type(v): + if error_type == UserErrorType.INVALID_INPUT and v is None: + continue + + raise UserError( + error_type, + f"It looks like one of the {value_type}s with type `{type(v)}` " + "is not supported or pytree-flattenable. \n" + f"Exported graphs {value_type}s can only contain the " + f"following supported types: {supported_types}. \n" + "If you are using a custom class object, " + "please register a pytree_flatten/unflatten function " + "using `torch.utils._pytree.register_pytree_node` or " + "`torch.export.register_dataclass`.", + ) + + check_user_input_output(flat_args, UserErrorType.INVALID_INPUT) + flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result) + check_user_input_output(flat_results_traced, UserErrorType.INVALID_OUTPUT) + + def produce_matching(debug_type, sources, candidates): + matched_elements_positions: List[Optional[int]] = [] + dict_of_source_vals = {} + for i, val in enumerate(sources): + dict_of_source_vals[id(val)] = i + + for i, val in enumerate(candidates): + if isinstance(val, tuple(common_constant_types)): + matched_elements_positions.append(None) + elif id(val) not in dict_of_source_vals: + raise AssertionError( + f"Unexpectedly found a {type(val)} in the {debug_type}.\n" + 'Please file an issue along with a paste of the logs from TORCH_LOGS="+export"' + ) + else: + matched_elements_positions.append(dict_of_source_vals[id(val)]) + + return matched_elements_positions + + matched_input_elements_positions = produce_matching( + "inputs", flat_args, graph_captured_input + ) + + assert graph_captured_output is not None + matched_output_elements_positions = produce_matching( + "outputs", list(graph_captured_output) + flat_args, flat_results_traced + ) + + new_graph = FlattenInputOutputSignature( + graph, + flat_args, + matched_input_elements_positions, + flat_results_traced, + matched_output_elements_positions, + example_fake_inputs, + flat_args_dynamic_dims, + fake_mode, + ).transform() + + # Make dynamo graph to have same input/output spec as user code + def argument_names(f_sig, args, kwargs) -> List[str]: + def signature_to_fullargspec(sig: inspect.Signature): + # Get a list of Parameter objects from the Signature object + params = list(sig.parameters.values()) + # Separate positional arguments, keyword-only arguments and varargs/varkw + args = [ + p.name + for p in params + if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + ] + kwonlyargs = [ + p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY + ] + varargs = next( + (p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL), + None, + ) + varkw = next( + (p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD), + None, + ) + # Get default values for positional arguments and keyword-only arguments + defaults = tuple( + p.default + for p in params + if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + and p.default is not inspect.Parameter.empty + ) + kwonlydefaults = { + p.name: p.default + for p in params + if p.kind == inspect.Parameter.KEYWORD_ONLY + and p.default is not inspect.Parameter.empty + } + # Get annotations for parameters and return value + annotations = {} + if sig.return_annotation: + annotations = {"return": sig.return_annotation} + for parameter in params: + annotations[parameter.name] = parameter.annotation + # Return a FullArgSpec object with the extracted attributes + return inspect.FullArgSpec( + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations + ) + + fullargspec = signature_to_fullargspec(f_sig) + + # 1. Map `args` 1-to-1 to positional arguments in original signature. + input_strs = fullargspec.args[: len(args)] + + if len(args) > len(fullargspec.args): + # 2. If there are more arguments left in `args`, they map to varargs in original + # signature. Assign names as {varargs}_0, {varargs}_1, ... + assert fullargspec.varargs is not None, "More arguments than expected" + input_strs += [ + f"{fullargspec.varargs}_{i}" + for i in range(0, len(args) - len(input_strs)) + ] + elif len(args) < len(fullargspec.args): + # 3. If there are fewer arguments in `args` than `fullargspec.args`, + # it implies these are arguments either with default values, or provided in + # `kwargs`. The former can be safely ignored. Because Dynamo.export does not + # export them as part of the function signature. The latter will be handled + # in the next step. + for unprovided_arg in fullargspec.args[ + len(args) : -len(fullargspec.defaults or []) + ]: + assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}" + + # 4. Keyword arguments provided in `kwargs`. + input_strs += list(kwargs.keys()) + + # 5. Keyword-only arguments with default values if not provided are not exported + # as part of the function signature. + for kwonly_arg in fullargspec.kwonlyargs: + kwonlydefaults = fullargspec.kwonlydefaults or {} + assert ( + kwonly_arg in kwargs or kwonly_arg in kwonlydefaults + ), f"Missing keyword only argument {kwonly_arg}" + + return input_strs + + new_graph.graph._codegen = _PyTreeCodeGen( + _PyTreeInfo( + argument_names(f_sig, orig_args, orig_kwargs), + in_spec, + out_spec_traced, + ) + ) + new_graph.recompile() + return new_graph + + +def export( + f: Callable[..., Any], + *extra_args, + aten_graph: bool = False, + pre_dispatch: bool = False, + decomposition_table: Optional[ + Dict[torch._ops.OpOverload, Callable[..., Any]] + ] = None, + tracing_mode: str = "symbolic", + constraints: Optional[List[Constraint]] = None, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, + assume_static_by_default: bool = False, + same_signature: bool = True, + disable_constraint_solver: bool = False, + _log_export_usage: bool = True, + **extra_kwargs, +) -> Callable[..., ExportResult]: + """ + Export an input function f to a format that can be executed outside of PyTorch using the FX graph. + + Args: + f (callable): A PyTorch function to be exported. + + aten_graph (bool): If True, exports a graph with ATen operators. + If False, exports a graph with Python operators. Default is False. + + pre_dispatch (bool): If True, exports a graph with ATen operators, + but before any logic in the PyTorch dispatcher has run. + This can be useful if you want to apply further transformations on a graph before running it + through autograd, autocast, or any other functionalities that are integrated into the dispatcher. + This flag is only valid if aten_graph=True is set. + Default is False. + + decomposition_table (dict): A dictionary that maps operators to their decomposition functions. + Required if aten_graph or tracing_mode is specified. Default is None. + + tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic". + + constraints: [DEPRECATED: use ``dynamic_shapes`` instead, see below] + An optional list of constraints on the dynamic arguments + that specify their possible range of shapes. By default, shapes of + input torch.Tensors are assumed to be static. If an input torch.Tensor + is expected to have dynamic shapes, please use :func:`dynamic_dim` + to define :class:`Constraint` objects that specify the dynamics and the possible + range of shapes. See :func:`dynamic_dim` docstring for examples on + how to use it. + + dynamic_shapes: + An optional argument where the type should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + same_signature (bool): If True, rewrite the returned graph's signature to be the same as f. + + disable_constraint_solver (bool): Whether the dim constraint solver must be disabled. + + Returns: + A function that given args and kwargs, returns a tuple of (graph, guards) + Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options. + Guards: The guards we accumulated during tracing f above + + Raises: + AssertionError: If decomposition_table is specified without setting aten_graph=True, + or if graph breaks during tracing in export. + + AssertionError: If Dynamo input and output is not consistent with traced input/output. + + Note - this headerdoc was authored by ChatGPT, with slight modifications by the author. + """ + if _log_export_usage: + log_export_usage(event="export.private_api", flags={"_dynamo"}) + + # Deal with "local variable referenced before assignment" + _f = f + _assume_static_by_default = assume_static_by_default + + def inner(*args, **kwargs): + nonlocal constraints + if constraints is not None: + if _log_export_usage: + warnings.warn( + "Using `constraints` to specify dynamic shapes for export is DEPRECATED " + "and will not be supported in the future. " + "Please use `dynamic_shapes` instead (see docs on `torch.export.export`).", + DeprecationWarning, + stacklevel=2, + ) + else: + constraints = _process_dynamic_shapes(_f, args, kwargs, dynamic_shapes) + f = _f + assume_static_by_default = _assume_static_by_default + check_if_dynamo_supported() + torch._C._log_api_usage_once("torch._dynamo.export") + if decomposition_table is not None: + assert ( + aten_graph + ), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True" + if pre_dispatch: + assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True" + f = innermost_fn(f) + call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f + original_signature = inspect.signature(call_to_inspect) + graph = None + out_guards = None + graph_captured_input = None + graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None + fake_mode = None + + def guard_export_print(guards: _guards.GuardsSet): + nonlocal out_guards + assert ( + out_guards is None + ), "whole graph export entails exactly one guard export" + out_guards = guards + + example_inputs = [] + + def dynamo_normalization_capturing_compiler( + gm: torch.fx.GraphModule, inner_example_inputs + ): + nonlocal graph + assert ( + graph is None + ), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph." + graph = gm + + nonlocal fake_mode, example_inputs + # NB: do NOT pass inner_example_inputs here, we are detecting the + # Dynamo allocated fake mode, which should be DISTINCT from a + # potential outer ambient fake mode which the user provided. + # example_inputs is always the user specified inputs, so they + # would have the wrong fake mode attached to them + fake_mode = _guards.detect_fake_mode() + example_inputs = inner_example_inputs + + def result_capturing_wrapper(*graph_inputs): + nonlocal graph_captured_result + nonlocal graph_captured_input + + graph_captured_input = graph_inputs + assert graph is not None + + named_parameters = dict(graph.named_parameters(remove_duplicate=False)) + named_buffers = dict(graph.named_buffers(remove_duplicate=False)) + + ambient_fake_mode = ( + _guards.detect_fake_mode(graph_inputs) + if _guards.detect_fake_mode(graph_inputs) is not None + else fake_mode + ) + + with ambient_fake_mode, enable_python_dispatcher(): + params_and_buffers = { + **named_parameters, + **named_buffers, + } + fake_params_buffers = dict() + + for name, value in params_and_buffers.items(): + fake_params_buffers[name] = ambient_fake_mode.from_tensor( + value, static_shapes=True + ) + + fake_graph_inputs = pytree.tree_map( + ambient_fake_mode.from_tensor, graph_inputs + ) + graph_captured_result = torch.func.functional_call( + graph, fake_params_buffers, fake_graph_inputs + ) + + return graph_captured_result + + return result_capturing_wrapper + + # Note: This is needed by rewrite_signature. We need to put it before + # optimize_assert since user program may mutate the inputs. + flat_args, in_spec = pytree.tree_flatten((args, kwargs)) + + remove_from_cache(f) + constraint_violation_error = None + if tracing_mode != "symbolic": + assume_static_by_default = True + with config.patch( + specialize_int=True, + assume_static_by_default=assume_static_by_default, + automatic_dynamic_shapes=False, + capture_dynamic_output_shape_ops=True, + capture_scalar_outputs=True, + ): + opt_f = optimize_assert( + dynamo_normalization_capturing_compiler, + hooks=Hooks( + guard_export_fn=guard_export_print, + guard_fail_fn=None, + ), + export=True, + export_constraints=constraints, + )(f) + # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject. + try: + result_traced = opt_f(*args, **kwargs) + except ConstraintViolationError as e: + constraint_violation_error = e + remove_from_cache(f) + + if ( + not disable_constraint_solver + and (shape_env := getattr(fake_mode, "shape_env", None)) is not None + and (dim_constraints := shape_env.dim_constraints) is not None + and not isinstance( + call_to_inspect, (torch._ops.OpOverloadPacket, torch._ops.OpOverload) + ) + and not trace_rules.check(call_to_inspect) + ): + dim_constraints.solve() + dim_constraints.remove_redundant_dynamic_results() + forced_specializations = dim_constraints.forced_specializations() + msg = dim_constraints.prettify_results( + original_signature, constraint_violation_error, forced_specializations + ) + if constraint_violation_error: + constraint_violation_error.args = ( + constraint_violation_error.args[0] + msg, + ) + else: + if forced_specializations: + constraint_violation_error = ConstraintViolationError(msg) + else: + log.info( + "Summary of dimension constraints:%s", + msg, + ) + + # Error if we have any constraints on static values + for k in shape_env.var_to_range.keys(): + if isinstance(k, sympy.Integer): + constraint_violation_error = ConstraintViolationError( + f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n" + "It appears that you're trying to set a constraint on a " + f"value which we evaluated to have a static value of {k}. " + 'Set TORCH_LOGS="+export" for more information.' + ) + if constraint_violation_error: + raise constraint_violation_error + + assert ( + graph is not None + ), "Failed to produce a graph during tracing as no tensor operations were found." + assert hasattr(graph, "_source_to_user_stacks") + assert out_guards is not None, "Failed to produce guards during tracing" + assert fake_mode is not None + + log.info( + "Dynamo captured graph:\n\n%s", graph.print_readable(print_output=False) + ) + + # This check need to happened before aten_graph + # because placeholder's _source_node attribute is not preserved by make_fx + if same_signature: + check_signature_rewritable(graph) + + # NB: This is mostly hitting the cache; Dynamo already converted these + example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs] + + if aten_graph: + # Running graph with interpreter is needed for propagating the stack_trace + def graph_with_interpreter(*args): + with torch.fx.traceback.preserve_node_meta(): + return torch.fx.Interpreter(graph).run(*args) + + with maybe_disable_fake_tensor_mode(), enable_python_dispatcher(), ( + fake_mode + ): + try: + graph = make_fx( + graph_with_interpreter, + decomposition_table=decomposition_table, + tracing_mode="real", + _allow_non_fake_inputs=True, + pre_dispatch=pre_dispatch, + _allow_fake_constant=False, + )(*example_fake_inputs) + except CondOpArgsMismatchError as e: + # Wrap the internal error to the user-facing error + raise UserError( # noqa: TRY200 + UserErrorType.DYNAMIC_CONTROL_FLOW, + str(e), + case_name="cond_operands", + ) + + assert graph is not None + for node in graph.graph.nodes: + if node.op == "get_attr" and isinstance( + getattr(graph, node.target), torch.Tensor + ): + node.meta["val"] = fake_mode.from_tensor( + getattr(graph, node.target), static_shapes=True + ) + + if same_signature: + flat_args_dynamic_dims = [ + {c.dim for c in (constraints or ()) if c.w_tensor() is x} + for x in flat_args + ] + graph = rewrite_signature( + original_signature, + graph, + fake_mode, + flat_args, + in_spec, + example_fake_inputs, + graph_captured_input, + graph_captured_result, + result_traced, # type: ignore[possibly-undefined] + flat_args_dynamic_dims, + ) + # Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check + assert graph is not None + graph.meta["input_shape_constraints"] = ( + [constraint.serializable_spec for constraint in constraints] + if constraints + else [] + ) + + return ExportResult(graph, out_guards) + + if extra_args or extra_kwargs: + warnings.warn( + "export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. " + "If you don't migrate, we may break your export call in the future if your user defined kwargs " + "conflict with future kwargs added to export(f)." + ) + return inner(*extra_args, **extra_kwargs) + else: + return inner + + +def optimize_assert( + backend, + *, + hooks=Hooks(None, None), + export=False, + export_constraints=None, + dynamic=None, +): + """ + The same as `torch._dynamo.optimize(backend, nopython=True)` + """ + backend = get_compiler_fn(backend) + + # Find if backend has any extra context manager + backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context) + + return _optimize_catch_errors( + convert_frame.convert_frame_assert( + backend, export=export, export_constraints=export_constraints + ), + hooks, + backend_ctx_ctor, + export=export, + dynamic=dynamic, + ) + + +class TorchPatcher: + @staticmethod + @functools.lru_cache(None) + def patch(): + # A better way to disable the following would be decorate the source + # functions with @torch._disable_dynamo. However, this causes issues + # with torch.deploy internally. + from .decorators import disable + + torch.jit.trace = disable(torch.jit.trace) + torch.jit.trace_module = disable(torch.jit.trace_module) + torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph) + torch.fx._symbolic_trace.Tracer.trace = disable( + torch.fx._symbolic_trace.Tracer.trace + ) + torch.distributions.Distribution.set_default_validate_args(False) + + from ..optim import ( + adadelta, + adagrad, + adam, + adamax, + adamw, + asgd, + lbfgs, + nadam, + radam, + rmsprop, + rprop, + sgd, + sparse_adam, + ) + + optimizer_modules = { + adadelta, + adagrad, + adam, + adamax, + adamw, + asgd, + lbfgs, + nadam, + radam, + rmsprop, + rprop, + sgd, + sparse_adam, + } + + for opt_mod in optimizer_modules: + opt_name = opt_mod.__name__.split(".")[-1] + fused_fn_name = f"_fused_{opt_name}" + single_tensor_fn_name = f"_single_tensor_{opt_name}" + + if hasattr(opt_mod, fused_fn_name): + setattr( + opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name)) + ) + + optimizer_classes = [ + opt + for opt in torch.optim.__dict__.values() + if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer) + ] + + # Note: we don't support sparsity or tracing through backwards + excluded_optimizer_classes = { + torch.optim.SparseAdam, + torch.optim.LBFGS, + } + + for opt in optimizer_classes: + if opt in excluded_optimizer_classes: + opt.step = disable(opt.step) + + if hasattr(opt, "_init_group"): + opt._init_group = disable(opt._init_group) + + @staticmethod + def suppress_torch_distributed_warnings(fn): + def inner_fn(*args, **kwargs): + warnings.filterwarnings( + "ignore", category=UserWarning, module="torch.distributed" + ) + return fn(*args, **kwargs) + + return inner_fn diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/exc.py b/venv/lib/python3.10/site-packages/torch/_dynamo/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c75ad7e68d510e08da5ef319ba8585fa81b5a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/exc.py @@ -0,0 +1,335 @@ +import os +import textwrap +from enum import auto, Enum +from traceback import extract_stack, format_exc, format_list, StackSummary +from typing import cast, NoReturn, Optional + +import torch._guards + +from . import config + +from .utils import counters + + +def exportdb_error_message(case_name): + return ( + "For more information about this error, see: " + + "https://pytorch.org/docs/main/generated/exportdb/index.html#" + + case_name.replace("_", "-") + ) + + +import logging + +log = logging.getLogger(__name__) +graph_breaks_log = torch._logging.getArtifactLogger(__name__, "graph_breaks") + + +class TorchDynamoException(RuntimeError): + pass + + +class InternalTorchDynamoError(TorchDynamoException): + pass + + +class RestartAnalysis(TorchDynamoException): + pass + + +class SpeculationRestartAnalysis(RestartAnalysis): + pass + + +class UnspecializeRestartAnalysis(RestartAnalysis): + pass + + +class SkipFrame(TorchDynamoException): + pass + + +class TorchRuntimeError(TorchDynamoException): + pass + + +class InvalidBackend(TorchDynamoException): + def __init__(self, name): + super().__init__( + f"Invalid backend: {name!r}, see `torch._dynamo.list_backends()` for available backends." + ) + + +class ResetRequired(TorchDynamoException): + def __init__(self): + super().__init__( + textwrap.dedent( + """ + Must call `torch._dynamo.reset()` before changing backends. Detected two calls to + `torch.compile()` with a different backend compiler arguments. + """ + ) + ) + + +class BackendCompilerFailed(TorchDynamoException): + def __init__(self, backend_fn, inner_exception): + self.backend_name = getattr(backend_fn, "__name__", "?") + self.inner_exception = inner_exception + msg = f"backend={self.backend_name!r} raised:\n{type(inner_exception).__name__}: {inner_exception}" + super().__init__(msg) + + +class Unsupported(TorchDynamoException): + def __init__(self, msg): + super().__init__(msg) + self.real_stack = torch._guards.TracingContext.extract_stack() + self.msg = msg + self.category: Optional[str] = None + self.add_to_stats() + + def remove_from_stats(self): + assert self.category is not None + counters[self.category][self.msg] -= 1 + if counters[self.category][self.msg] <= 0: + del counters[self.category][self.msg] + + def add_to_stats(self, category="unimplemented"): + self.category = category + counters[category][self.msg] += 1 + + +class RecompileError(TorchDynamoException): + pass + + +class ArgsMismatchError(Unsupported): + def __init__(self, msg): + super().__init__(msg) + + +class AttributeMutationError(Unsupported): + def __init__(self, msg): + super().__init__(msg) + + +class CondOpArgsMismatchError(ArgsMismatchError): + """ + Internal error from cond() due to arguments mismatch. + """ + + def __init__(self, msg): + super().__init__(msg) + + +class UserErrorType(Enum): + DYNAMIC_CONTROL_FLOW = auto() + ANTI_PATTERN = auto() + STANDARD_LIBRARY = auto() + CONSTRAINT_VIOLATION = auto() + DYNAMIC_DIM = auto() + INVALID_INPUT = auto() + INVALID_OUTPUT = auto() + + +class UserError(Unsupported): + def __init__(self, error_type: UserErrorType, msg, case_name=None): + """ + Type of errors that would be valid in Eager, but not supported in TorchDynamo. + The error message should tell user about next actions. + + error_type: Type of user error + msg: Actionable error message + case_name: (Optional) Unique name (snake case) for the usage example in exportdb. + """ + if case_name is not None: + assert isinstance(case_name, str) + if msg.endswith("."): + msg += " " + else: + msg += "\n" + msg += exportdb_error_message(case_name) + super().__init__(msg) + self.error_type = error_type + self.message = msg + + +class UncapturedHigherOrderOpError(TorchDynamoException): + pass + + +class IncorrectUsage(Exception): + pass + + +# These exceptions are ok to fallback to eager/graph_break. +exceptions_allowed_to_be_fallback = ( + torch._subclasses.fake_tensor.DataDependentOutputException, + torch._subclasses.fake_tensor.DynamicOutputShapeException, + torch._subclasses.fake_tensor.UnsupportedOperatorException, + torch._subclasses.fake_tensor.UnsupportedFakeTensorException, +) + + +def unimplemented_with_warning(e: Exception, code, msg: str) -> NoReturn: + # This function calls unimplemented internally and eventually graph breaks + # or falls to eager. unimplemented itself does not print any user warnings, + # i.e., its very silent. This helper function is intended when an error is + # encountered in the torch.compile stack which is worth showing as warning + # to the user. For example, if AOT Autograd backend fails with a fake tensor + # exception, its ok to fallback to eager but not silently. Here, we can use + # this function to log the message and the stack trace. + graph_break_msg = format_error_msg_verbose(e, code) + graph_breaks_log.debug("%s", graph_break_msg) + log.warning(msg) + raise unimplemented(msg) from e + + +def unimplemented(msg: str) -> NoReturn: + assert msg != os.environ.get("BREAK", False) + raise Unsupported(msg) + + +def warning(msg: str) -> None: + counters["warnings"][msg] += 1 + assert msg != os.environ.get("BREAK", False) + + +# KeyError has special handling for its args +# see https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L2534 for details +class KeyErrorMsg: + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self) -> str: + return self.__str__() + + +def augment_exc_message(exc: Exception, msg: str = "\n", export: bool = False) -> None: + import traceback + + exc.innermost_user_frame_summary = None # type: ignore[attr-defined] + + real_stack = get_real_stack(exc) + if real_stack is not None and len(real_stack) > 0: + exc.innermost_user_frame_summary = real_stack[-1] # type: ignore[attr-defined] + msg += f"\nfrom user code:\n {''.join(traceback.format_list(real_stack))}" + + if config.replay_record_enabled and hasattr(exc, "record_filename"): + msg += f"\nLast frame execution written to {exc.record_filename}. To run only this frame while debugging, run\ + torch._dynamo.replay('{exc.record_filename}').\n" + + if not config.verbose and hasattr(exc, "real_stack"): + msg += '\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n' + + if hasattr(exc, "inner_exception") and hasattr( + exc.inner_exception, "minifier_path" + ): + if hasattr(exc.inner_exception, "buck_command"): + msg += ( + f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run " + f"this buck command to find the smallest traced graph " + f"which reproduces this error: {exc.inner_exception.buck_command}\n" + ) + else: + msg += ( + f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run " + "this script to find the smallest traced graph which reproduces this error.\n" + ) + + if not config.suppress_errors and not export: + msg += ( + "\n\n" + "You can suppress this exception and fall back to eager by setting:\n" + " import torch._dynamo\n" + " torch._dynamo.config.suppress_errors = True\n" + ) + + old_msg = "" if len(exc.args) == 0 else str(exc.args[0]) + + if isinstance(exc, KeyError): + exc.args = (KeyErrorMsg(old_msg + msg),) + exc.args[1:] + else: + new_msg = old_msg + msg + exc.args = (new_msg,) + exc.args[1:] + + +def get_real_stack(exc: Exception, frame=None) -> Optional[StackSummary]: + real_stack = getattr(exc, "real_stack", None) + if real_stack is None: + return None + + # NB: it's possible for real_stack to be []; we still attempt to + # report a stack anyway because the stack_above_dynamo may still + # be useful for debugging + + stack_above_dynamo = [] + if frame is not None: + # NB: frame is PyInterpreterFrame on Python 3.11 and later, + # not a TRUE frame object. You can't actually feed it + # to traceback because it doesn't have enough information. + # To solve this problem, we technically should just materialize + # the frame, the same way _PyFrame_GetFrameObject would do + # (but we cannot actually do this, because this populates + # frame_obj field, which default eval frame doesn't like). + # + # Fortunately, in this case, we can hack it: there's no need + # to actually use the truly top frame, we can just extract + # from where we are right now and rely on filter_stack to + # get rid of all the dynamo frames. For ease of testing + # we apply this behavior to ALL Python versions + stack_above_dynamo = filter_stack(extract_stack()) + + return cast(StackSummary, stack_above_dynamo + real_stack) + + +# filter out all frames after entering dynamo +def filter_stack(stack): + user_stack = [] + for frame in stack: + if "convert_frame" in frame.filename: + break + if "eval_frame" in frame.filename or "torch._dynamo.optimize(" in frame.line: + continue + user_stack.append(frame) + + return user_stack + + +def format_error_msg_verbose( + exc: Exception, code, record_filename=None, frame=None +) -> str: + msg = ( + f"WON'T CONVERT {code.co_name} {code.co_filename} line {code.co_firstlineno}\n" + ) + msg += "=" * 10 + " TorchDynamo Stack Trace " + "=" * 10 + "\n" + msg += format_exc() + real_stack = get_real_stack(exc, frame) + if real_stack is not None: + msg += ( + "\n" + + "=" * 10 + + " The above exception occurred while processing the following code " + + "=" * 10 + + "\n\n" + ) + msg += "".join(format_list(real_stack)) + msg += "\n" + msg += "=" * 10 + + return msg + + +def format_error_msg(exc: Exception, code, record_filename=None, frame=None) -> str: + msg = os.linesep * 2 + + if config.verbose: + msg = format_error_msg_verbose(exc, code, record_filename, frame) + else: + msg = f"WON'T CONVERT {code.co_name} {code.co_filename}\ + line {code.co_firstlineno} \ndue to: \n{format_exc()}" + + return msg diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/external_utils.py b/venv/lib/python3.10/site-packages/torch/_dynamo/external_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..87e1e44fbe1aac89176e2fbce6982a19f8fe2ddb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/external_utils.py @@ -0,0 +1,103 @@ +# This module contains functions that *will be allowed* by dynamo + +import functools + +import torch +import torch.utils._pytree as pytree + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + + +def is_compiling() -> bool: + """ + Indicates whether we are tracing/compiling with torch.compile() or torch.export(). + + If need to check specifically that TorchDynamo is used, then use + torch.compiler.is_dynamo_compiling(). + + TODO(khabinov): we should deprecate this function and use one of these two: + * torch.compiler.is_compiling(), + * torch.compiler.is_dynamo_compiling(). + It will depend on the context where to use what. + """ + return torch.compiler.is_compiling() + + +def wrap_inline(fn): + """ + Create an extra frame around fn that is not in skipfiles + """ + + @functools.wraps(fn) + def inner(*args, **kwargs): + return fn(*args, **kwargs) + + return inner + + +def call_hook(hook, *args): + """ + Used by compiled autograd to handle hook returning None + """ + result = hook(*args) + if result is None: + return args[0] + return result + + +def wrap_numpy(f): + r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function + from ``torch.Tensor``s to ``torch.Tensor``s. + """ + if not np: + return f + + @functools.wraps(f) + def wrap(*args, **kwargs): + args, kwargs = pytree.tree_map_only( + torch.Tensor, lambda x: x.numpy(), (args, kwargs) + ) + out = f(*args, **kwargs) + return pytree.tree_map_only(np.ndarray, lambda x: torch.as_tensor(x), out) + + return wrap + + +class FakeContext: + def __init__(self, saved_tensors): + # this will cache the results of saved_tensors + # and will no longer call into c++ binding + self.saved_tensors = saved_tensors + + +def call_backward(backward_fn, saved_tensors, *args): + grads = backward_fn(FakeContext(saved_tensors), *args) + + # in eager, we wrap in a tuple when there's only one grad output + if type(grads) is not tuple: + grads = (grads,) + + return grads + + +def untyped_storage_size(x: torch.Tensor): + return x.untyped_storage().size() + + +def call_hook_from_backward_state(*args, bw_state, hook_name: str, **kwargs): + return getattr(bw_state, hook_name)(*args, **kwargs) + + +def call_module_hooks_from_backward_state( + _, result, *args, bw_state, hooks_name: str, module_name: str +): + module = getattr(bw_state, module_name) + hooks = getattr(bw_state, hooks_name) + for hook in hooks: + new_result = hook(module, result, *args) + if new_result is not None: + result = new_result + return result diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py b/venv/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..c43e9a230517677062b26dc7509b57c898f98143 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py @@ -0,0 +1,57 @@ +import tokenize + +from typing import Dict, List, Optional + +cache: Dict[str, Dict[int, str]] = {} + + +def clearcache() -> None: + cache.clear() + + +def _add_file(filename: str) -> None: + try: + with open(filename) as f: + tokens = list(tokenize.generate_tokens(f.readline)) + except OSError: + cache[filename] = {} + return + + # NOTE: undefined behavior if file is not valid Python source, + # since tokenize will have undefined behavior. + result: Dict[int, str] = {} + # current full funcname, e.g. xxx.yyy.zzz + cur_name = "" + cur_indent = 0 + significant_indents: List[int] = [] + + for i, token in enumerate(tokens): + if token.type == tokenize.INDENT: + cur_indent += 1 + elif token.type == tokenize.DEDENT: + cur_indent -= 1 + # possible end of function or class + if significant_indents and cur_indent == significant_indents[-1]: + significant_indents.pop() + # pop the last name + cur_name = cur_name.rpartition(".")[0] + elif ( + token.type == tokenize.NAME + and i + 1 < len(tokens) + and tokens[i + 1].type == tokenize.NAME + and (token.string == "class" or token.string == "def") + ): + # name of class/function always follows class/def token + significant_indents.append(cur_indent) + if cur_name: + cur_name += "." + cur_name += tokens[i + 1].string + result[token.start[0]] = cur_name + + cache[filename] = result + + +def get_funcname(filename: str, lineno: int) -> Optional[str]: + if filename not in cache: + _add_file(filename) + return cache[filename].get(lineno, None) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/guards.py b/venv/lib/python3.10/site-packages/torch/_dynamo/guards.py new file mode 100644 index 0000000000000000000000000000000000000000..682610aeb908a47d756da38a18543493e753b970 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/guards.py @@ -0,0 +1,1505 @@ +from __future__ import annotations + +import ast +import builtins +import collections +import dataclasses +import enum +import functools +import importlib +import inspect +import itertools +import logging +import math +import os +import re +import sys +import textwrap +import types +import weakref +from inspect import currentframe, getframeinfo +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from weakref import ReferenceType + + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +import torch.utils._device +from torch._dynamo.source import ( + is_from_local_source, + TensorProperty, + TensorPropertySource, +) + +from torch._guards import ( + DuplicateInputs, + Guard, + GuardBuilderBase, + GuardEnvExpr, + GuardSource, + Source, +) + +from torch._logging import structured +from torch.fx.experimental.symbolic_shapes import ( + EqualityConstraint, + is_symbolic, + SYMPY_INTERP, +) +from torch.utils._traceback import format_frame, report_compile_source_on_error +from torch.utils.weak import TensorWeakRef + +from . import config, convert_frame, exc, mutation_guard +from .eval_frame import set_guard_error_hook +from .source import AttrSource, DefaultsSource, LocalSource, TypeSource +from .types import CacheEntry, ExtraState, GuardedCode, GuardFail, GuardFn # noqa: F401 +from .utils import ( + common_constant_types, + dict_keys_repr, + guard_failures, + istype, + key_is_id, + key_to_id, + orig_code_map, + tensor_always_has_static_shape, + tuple_iterator_getitem, + tuple_iterator_len, +) + +log = logging.getLogger(__name__) +guards_log = torch._logging.getArtifactLogger(__name__, "guards") +recompiles_log = torch._logging.getArtifactLogger(__name__, "recompiles") +recompiles_verbose_log = torch._logging.getArtifactLogger( + __name__, "recompiles_verbose" +) +verbose_guards_log = torch._logging.getArtifactLogger(__name__, "verbose_guards") + +TensorGuards = torch._C._dynamo.guards.TensorGuards +check_obj_id = torch._C._dynamo.guards.check_obj_id +check_type_id = torch._C._dynamo.guards.check_type_id +dict_version = torch._C._dynamo.guards.dict_version + + +# For user stack printing +@functools.lru_cache(None) +def uninteresting_files(): + import torch._dynamo.external_utils + + mods = [ + torch._dynamo.external_utils, + ] + return {inspect.getfile(m) for m in mods} + + +CLOSURE_VARS = { + "___check_type_id": check_type_id, + "___check_obj_id": check_obj_id, + "___odict_getitem": collections.OrderedDict.__getitem__, + "___key_to_id": key_to_id, + "___dict_version": dict_version, + "___dict_contains": lambda a, b: a in b, + "___tuple_iterator_len": tuple_iterator_len, + "___tuple_iterator_getitem": tuple_iterator_getitem, + "__math_isnan": math.isnan, + "__numpy_isnan": None if np is None else np.isnan, + "inf": float("inf"), + "__load_module": importlib.import_module, + "utils_device": torch.utils._device, + "device": torch.device, + "___from_numpy": + # If not numpy array, piggy back on e.g. tensor guards to check type + (lambda a: torch.as_tensor(a) if isinstance(a, (np.generic, np.ndarray)) else a), + "torch": torch, + "inspect": inspect, +} + +if sys.version_info[:2] <= (3, 8): + # [Note: Python Version <= 3.8] + # This branch should be dropped when we drop support for Python 3.8. + # Reason: 'ast.unparse' function was introduced in Python 3.9. + + try: + import astunparse # type: ignore[import] + + def _ast_unparse(node: ast.AST) -> str: + return astunparse.unparse(node).replace("\n", "") + + HAS_UNPARSE_FUNCTIONS = True + except ImportError: + HAS_UNPARSE_FUNCTIONS = False + pass +else: + HAS_UNPARSE_FUNCTIONS = True + + def _ast_unparse(node: ast.AST) -> str: + return ast.unparse(node).replace("\n", "") + + +def strip_function_call(name): + """ + "___odict_getitem(a, 1)" => "a" + "a.layers[slice(2)][0]._xyz" ==> "a" + "getattr(a.layers[slice(2)][0]._abc, '0')" ==> "a" + "getattr(getattr(a.x[3], '0'), '3')" ==> "a" + "a.layers[slice(None, -1, None)][0]._xyz" ==> "a" + """ + # recursively find valid object name in function + valid_name = re.compile("[A-Za-z_].*") + curr = "" + for char in name: + if char in " (": + curr = "" + elif char in "),[]": + if curr and curr != "None" and valid_name.match(curr): + return strip_function_call(curr) + else: + curr += char + + return strip_getattr_getitem(name) + + +def strip_getattr_getitem(name): + """ + "a[1]" => "a" + "a.foo" => "a" + """ + return re.split(r"[.\[]", name)[0] + + +def get_verbose_code_part(code_part, guard): + extra = "" + if guard.user_stack: + for fs in reversed(guard.user_stack): + if fs.filename not in uninteresting_files(): + extra = f" # {format_frame(fs, line=True)}" + break + elif guard.stack: + extra = f" # {format_frame(guard.stack.summary()[-1])}" + + return f"{code_part:<60}{extra}" + + +def convert_to_concrete_values(size_or_stride): + converted: List[Optional[int]] = [] + for dim in size_or_stride: + if not is_symbolic(dim): + converted.append(dim) + else: + assert isinstance(dim, torch.SymInt) + converted.append(dim.node.maybe_as_int()) + return converted + + +def get_tensor_guard_code_part(value, name, sizes, strides): + pytype = type(value) + dispatch_key = ( + torch._C._dispatch_keys(value) | torch._C._dispatch_tls_local_include_set() + ) - torch._C._dispatch_tls_local_exclude_set() + dtype = value.dtype + device_index = value.device.index + requires_grad = value.requires_grad + guard_str = ( + f"check_tensor({name}, {pytype.__qualname__}, {dispatch_key}, {dtype}, " + f"device={device_index}, requires_grad={requires_grad}, size={sizes}, stride={strides})" + ) + return guard_str + + +# The ready to eval generated code (possibly multiple parts) for a guard, plus +# the original guard object that created it for provenance +@dataclasses.dataclass +class GuardCodeList: + code_list: List[str] + guard: Guard + + +class GuardBuilder(GuardBuilderBase): + def __init__( + self, + id_ref: Callable[[Any], str], + source_ref: Callable[[Source], str], + lookup_weakrefs: Callable[[object], ReferenceType[object]], + local_scope: Dict[str, object], + global_scope: Dict[str, object], + check_fn_manager: CheckFunctionManager, + ): + self.id_ref = id_ref + self.source_ref = source_ref + self.lookup_weakrefs = lookup_weakrefs + self.scope: Dict[str, Dict[str, object]] = {"L": local_scope, "G": global_scope} + self.scope["__builtins__"] = builtins.__dict__.copy() + for ( + name, + package_module, + ) in torch.package.package_importer._package_imported_modules.items(): + name = name.replace(">", "_").replace("<", "_").replace(".", "_dot_") + # Write the package module into the scope so that we can import it + self.scope["__builtins__"][name] = package_module + # Write the demangled name to the scope so that we can use it + self.scope[name] = package_module + + self.argnames: List[str] = [] + # Code is python expression strings generated for each guard + self.code: List[GuardCodeList] = [] + # shape_env_code is only used by builder and is used for + # shape env code. This exists only because we need to make sure + # shape env guards get run after tensor match guards (since the + # tensor match guards make sure we actually have tensors) + self.shape_env_code: List[GuardCodeList] = [] + + # [Note - On Eager Tensor Guards] + # Most of the time, we generate Python code in a guard to directly + # check various properties. However, tensors are a bit special; + # it is too slow to check their properties one-by-one in Python. + # Instead, there is a C++ function TensorGuards.check which takes + # all of the tensor arguments and checks them all against compile-time + # examples entirely in C++. Thus, every time we process a + # TENSOR_MATCH guard, we just add another entry to + # tensor_check_names/tensor_check_examples, saying "for this local, + # check it against this example", and it all ends up getting + # swept up into a single call to ___check_tensors. Invariant: + # len(tensor_check_names) == len(tensor_check_examples). + # TODO: something here + self.tensor_check_names: List[str] = [] + self.tensor_check_examples: List[torch.Tensor] = [] + self.tensor_check_guards: List[Guard] = [] + + self.check_fn_manager: CheckFunctionManager = check_fn_manager + # Keep track of weak references of objects with ID_MATCH guard. This + # info is stored alongside optimized_code and check_fn and is used to + # limit the number of cache entries with same ID_MATCH'd object. + self.id_matched_objs: Dict[str, ReferenceType[object]] = {} + + # Warning: use this with care! This lets you access what the current + # value of the value you are guarding on is. You probably don't want + # to actually durably save this value though (because it's specific + # to this frame!) Instead, you should be reading out some property + # (like its type) which is what you permanently install into the + # guard code. + def get(self, name: str) -> Any: + return eval(name, self.scope, CLOSURE_VARS) + + # Registers the usage of the source name referenced by the + # string (or stored in the Guard) as being guarded upon. It's important + # to call this before generating some code that makes use of 'guard', + # because without this call, we won't actually bind the variable + # you reference in the actual guard closure (oops!) + def arg_ref(self, guard: Union[str, Guard]) -> str: + name: str + if isinstance(guard, str): + name = guard + else: + name = guard.name + base = strip_getattr_getitem(strip_function_call(name)) + if base not in self.argnames: + if re.match(r"[a-zA-Z0-9_]+", base): + if re.match(r"^\d+$", base): + log.warning("invalid var name: %s", guard) + self.argnames.append(base) + + return name + + def _guard_on_attribute(self, guard: Guard, attr_name: str, guard_fn): + attr_source = AttrSource(guard.originating_source, attr_name) + # Copy the stack info + new_guard = Guard( + attr_source, guard_fn, stack=guard.stack, user_stack=guard.user_stack + ) + new_guard.create(self) + + def TYPE_MATCH(self, guard: Guard) -> None: + # ___check_type_id is same as `id(type(x)) == y` + t = type(self.get(guard.name)) + obj_id = self.id_ref(t) + code = f"___check_type_id({self.arg_ref(guard)}, {obj_id})" + self._produce_guard_code(guard, [code]) + + def DICT_VERSION(self, guard: Guard): + # ___check_dict_version is same as `dict_version(x) == y` + ref = self.arg_ref(guard) + version = dict_version(self.get(guard.name)) + code = f"___dict_version({ref}) == {version}" + self._produce_guard_code(guard, [code]) + + def DICT_CONTAINS(self, guard: Guard, key: str, invert: bool): + dict_ref = self.arg_ref(guard) + + maybe_not = "not " if invert else "" + code = f"{maybe_not}___dict_contains({key!r}, {dict_ref})" + return self._produce_guard_code(guard, [code]) + + def BOOL_FALSE(self, guard: Guard): + # Guard on the runtime value being 'False', + # can be faster than seemingly equivalent checks like DICT_KEYS for empty dict + # + # WARNING: this guard is not safe to use generally. It only works if the runtime + # value is of a type that supports bool(), and some types e.g. Tensor do not. + # Only use this guard in cases you can guarantee the runtime type will be friendly. + # (e.g. Specialized NNModule with mutation protection via setattr) + # + # Why not simply check the runtime type inside this guard? It's slow enough to defeat + # the purpose of using this guard, which itself is supposed to be a faster alternative + # to DICT_KEYS. + ref = self.arg_ref(guard) + code = f"not {ref}" + self._produce_guard_code(guard, [code]) + + def ID_MATCH(self, guard: Guard): + # ___check_obj_id is same as `id(x) == y` + if isinstance(guard.originating_source, TypeSource): + # optional optimization to produce cleaner/faster guard code + return self.TYPE_MATCH( + Guard(guard.originating_source.base, GuardBuilder.TYPE_MATCH) # type: ignore[arg-type] + ) + + ref = self.arg_ref(guard) + val = self.get(guard.name) + code = f"___check_obj_id({ref}, {self.id_ref(val)})" + self._produce_guard_code(guard, [code]) + + # Keep track of ID_MATCH'd objects. This will be used to modify the + # cache size logic + if isinstance(guard.originating_source, LocalSource): + # TODO(janimesh) - This is currently restricted to nn.Module objects + # because many other ID_MATCH'd objects fail - like DeviceMesh. + # Increase the scope of ID_MATCH'd objects. + if isinstance(val, torch.nn.Module): + local_name = guard.originating_source.local_name + weak_id = self.lookup_weakrefs(val) + if weak_id is not None: + self.id_matched_objs[local_name] = weak_id + + def NAME_MATCH(self, guard: Guard): + obj = self.get(guard.name) + self._guard_on_attribute(guard, "__name__", GuardBuilder.EQUALS_MATCH) + + def DATA_PTR_MATCH(self, guard: Guard): + obj = self.get(guard.name) + code = f"{self.arg_ref(guard)}.data_ptr() == {obj.data_ptr()}" + self._produce_guard_code(guard, [code]) + + def HASATTR(self, guard: Guard): + assert isinstance( + guard.originating_source, AttrSource + ), f"invalid source {guard.name}" + base_source = guard.originating_source.base + base = base_source.name() + attr = guard.originating_source.member + + ref = self.arg_ref(base) + val = hasattr(self.get(base), attr) + code = None + if val: + code = f"hasattr({ref}, {attr!r})" + else: + code = f"not hasattr({ref}, {attr!r})" + + self._produce_guard_code(guard, [code], provided_guarded_object=self.get(base)) + + def FUNCTORCH_STACK_MATCH(self, guard: Guard): + # Invalidate functorch code if current level is different than + # the one when FX graph was generated + # if torch._C._functorch.peek_interpreter_stack() is not None: + cis = torch._functorch.pyfunctorch.retrieve_all_functorch_interpreters() + states = [ci.get_state() for ci in cis] + code = [f"torch._functorch.pyfunctorch.compare_functorch_state({states})"] + self._produce_guard_code(guard, code) + + def EQUALS_MATCH(self, guard: Guard): + ref = self.arg_ref(guard) + val = self.get(guard.name) + t = type(val) + if np: + np_types: Tuple[Type[Any], ...] = ( + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float16, + np.float32, + np.float64, + ) + else: + np_types = () + ok_types = tuple( + common_constant_types + | { + type, + list, + tuple, + set, + frozenset, + slice, + range, + torch.Size, + *np_types, + } + ) + if istype(val, dict): + assert all( + istype(x, ok_types) for x in itertools.chain(val.keys(), val.values()) + ) + else: + assert istype( + val, + ok_types, + ), f"Unexpected type {type(val)}, not in {ok_types}" + + # Special case for nan because float("nan") == float("nan") evaluates to False + if istype(val, float) and math.isnan(val): + self.TYPE_MATCH(guard) + code = list() + code.append(f"__math_isnan({ref})") + self._produce_guard_code(guard, code) + return + # Python math library doesn't support complex nan, so we need to use numpy + elif istype(val, complex) and np.isnan(val): + self.TYPE_MATCH(guard) + code = list() + code.append(f"__numpy_isnan({ref})") + self._produce_guard_code(guard, code) + return + + code = list() + + # If matching equality against list/tuple, we must also check that + # the internal types match. (TODO: what about nested lists?) + if istype(val, (list, tuple)): + # NB: SEQUENCE_LENGTH takes care of the outer __check_type_id test + self.SEQUENCE_LENGTH(guard) + + for idx, elem in enumerate(val): + code.append( + f"___check_type_id({ref}[{idx}], {self.id_ref(type(elem))})" + ) + else: + # Add type check to prevent equality check between tensor and non-tensor. + self.TYPE_MATCH(guard) + + if istype(val, torch.Size): + val = tuple(val) + + # Code object can not be compared against their string representation + # I.e `eval(f"{compile('2+2','','exec')!r}")` raises SyntaxError + assert not istype(val, types.CodeType) + + # TODO: It feels like it would be better to just implement our own + # equality test in C that handles all of the necessary type checking + # and NaN tests + code.append(f"{ref} == {val!r}") + self._produce_guard_code(guard, code) + + def CONSTANT_MATCH(self, guard: Guard): + val = self.get(guard.name) + if istype(val, (bool, type(None), types.CodeType)): + self.ID_MATCH(guard) + else: + self.EQUALS_MATCH(guard) + + def NN_MODULE(self, guard: Guard): + self.ID_MATCH(guard) + ref = self.arg_ref(guard) + val = self.get(guard.name) + + def setup_guard(): + assert istype(val.training, bool) + self._guard_on_attribute(guard, "training", GuardBuilder.CONSTANT_MATCH) + + if hasattr(val, "training"): + # There are cases where a monkeypatched object has a guard made between __new__ and __init__ + setup_guard() + else: + exc.unimplemented(f"Guard setup for uninitialized class {type(val)}") + + def FUNCTION_MATCH(self, guard: Guard): + """things like torch.add and user defined functions""" + if guard.is_local(): + return self.ID_MATCH(guard) + + def CLOSURE_MATCH(self, guard: Guard): + """matches a closure by __code__ id.""" + if guard.is_local(): + val = self.get(guard.name) + # Strictly only want user-defined functions + if type(val) == types.FunctionType and hasattr(val, "__code__"): + self._guard_on_attribute(guard, "__code__", GuardBuilder.HASATTR) + self._guard_on_attribute(guard, "__code__", GuardBuilder.FUNCTION_MATCH) + else: + self.FUNCTION_MATCH(guard) + + def BUILTIN_MATCH(self, guard: Guard): + return self.FUNCTION_MATCH(guard) + + def PYMODULE_MATCH(self, guard: Guard): + return self.FUNCTION_MATCH(guard) + + def SEQUENCE_LENGTH(self, guard): + # This guard is used to check lenght of PySequence objects like list, + # tuple, collections.deque etc + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + if len(value) == 0: + code.append(f"not {ref}") + else: + code.append(f"len({ref}) == {len(value)}") + + self._produce_guard_code(guard, code) + + def DICT_LENGTH(self, guard): + self.SEQUENCE_LENGTH(guard) + + def TUPLE_ITERATOR_LEN(self, guard): + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}") + + self._produce_guard_code(guard, code) + + # TODO(voz): Deduplicate w/ AOTAutograd dupe input guards + def DUPLICATE_INPUT(self, guard, source_b): + ref_a = self.arg_ref(guard) + ref_b = self.arg_ref(source_b.name()) + + code = [f"{ref_b} is {ref_a}"] + self._produce_guard_code(guard, code) + + def DICT_KEYS(self, guard): + # Guard on the keys and their order + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + any_key_is_id = any(key_is_id(k) for k in value.keys()) + const_keys_repr = dict_keys_repr( + key_to_id(value), + local=is_from_local_source(guard.originating_source), + ) + if any_key_is_id: + code.append(f"___key_to_id({ref}) == {const_keys_repr}") + else: + code.append(f"list({ref}.keys()) == {const_keys_repr}") + + self._produce_guard_code(guard, code) + + def WEAKREF_ALIVE(self, guard): + self._produce_guard_code(guard, [f"{self.arg_ref(guard)} is not None"]) + + def NN_MODULE_PARAM_NAMES(self, guard): + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + keys = {k for k, v in value.named_parameters()} + + self.TYPE_MATCH(guard) + code = list() + code.append(f"{{k for k, v in {ref}.named_parameters()}} == {keys!r}") + + self._produce_guard_code(guard, code) + + def DICT_CONST_KEYS(self, guard): + """Constant keys match""" + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + code.append(f"list({ref}.keys()) == {list(value.keys())!r}") + + self._produce_guard_code(guard, code) + + def OBJECT_MUTATION(self, guard: Guard): + mutation_guard.watch(self.get(guard.name), self.check_fn_manager) + + def GRAD_MODE(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def DETERMINISTIC_ALGORITHMS(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def TORCH_FUNCTION_STATE(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def DEFAULT_DEVICE(self, guard: Guard): + """Guard on CURRENT_DEVICE per torch.utils._device""" + assert guard.source is GuardSource.GLOBAL + import torch.utils._device as m + + self._produce_guard_code( + guard, [f"utils_device.CURRENT_DEVICE == {m.CURRENT_DEVICE!r}"] + ) + + def BACKEND_MATCH(self, guard: Guard): + """Guard on backend matching based on id of current_backend""" + assert guard.source is GuardSource.GLOBAL + backend_id = ( + f"{id(torch._dynamo.eval_frame.guarded_backend_cache.current_backend)}" + ) + code = [f"___check_current_backend({backend_id})"] + self._produce_guard_code(guard, code) + + def SHAPE_ENV(self, guard: Guard): + # Let's handle ShapeEnv guards. To do this, we will resolve + # shape variables to sources from tracked_fakes. This must happen after + # tensor checks. + assert guard.name == "" + output_graph = self.check_fn_manager.output_graph + # NB: self.output_graph can be None in the debug_nops tests + fs = output_graph.tracked_fakes + input_contexts = [a.symbolic_context for a in fs] + + def get_sources(t_id, dim): + # Looks up base sources mapped to a tensor id and uses them to create + # sources for the corresponding tensor dimension. + return [ + TensorPropertySource(source, TensorProperty.SIZE, dim) + for source in output_graph.tracked_fakes_id_to_source[t_id] + ] + + if output_graph.export_constraints: + from sympy import Symbol + + source_pairs: List[Tuple[Source, Source]] = [] + derived_equalities: List[ # type: ignore[type-arg] + Tuple[Source, Union[Source, Symbol], Callable] + ] = [] + phantom_symbols: Dict[str, Symbol] = {} + for constraint in output_graph.export_constraints: + if constraint.t_id in output_graph.tracked_fakes_id_to_source: + torch.export.dynamic_shapes._process_equalities( + constraint, + get_sources, + output_graph.shape_env, + source_pairs, + derived_equalities, + phantom_symbols, + ) + else: + log.warning("Untracked tensor used in export constraints") + equalities_inputs = EqualityConstraint( + source_pairs=source_pairs, + derived_equalities=derived_equalities, + phantom_symbols=list(phantom_symbols.values()), + warn_only=False, + ) + else: + equalities_inputs = None + guards = output_graph.shape_env.produce_guards( + [a.fake for a in fs], + [a.source for a in fs], + input_contexts=input_contexts, + equalities_inputs=equalities_inputs, + source_ref=self.source_ref, + # Export keeps static. + ignore_static=(not self.check_fn_manager.output_graph.export), + ) + # When exporting, we may work with the shape constraints some more in + # postprocessing, so don't freeze yet + if not self.check_fn_manager.output_graph.export: + output_graph.shape_env.freeze() + for shape_guard in guards: + self._produce_guard_code(guard, [shape_guard], shape_env=True) + + def TENSOR_MATCH(self, guard: Guard, value=None): + if guard.is_nn_module() or guard.originating_source.is_dict_key(): + self.ID_MATCH(guard) + else: + if isinstance(value, TensorWeakRef): + value = value() + + value = value if value is not None else self.get(guard.name) + assert isinstance(value, torch.Tensor) + + tensor_name = self.arg_ref(guard) + # [Note - On Export Tensor Guards] + # + # In eager mode, tensor guards are evaluated through C++, in guards.cpp + # see [Note - On Eager Tensor Guards] for more info. + # + # In export mode, we instead maintain parallel logic between C++ and python + # here, with an exception of checking the dispatch key - with the idea that a dispatch key + # is an entirely runtime notion that would make no sense to keep in an exported graph. + # + # Now, this idea is okay, but to paraphrase @ezyang, this mental model is sufficient for now, although + # not entirely true. + # For example, suppose one of the input tensors had the negative dispatch key. + # You should end up with a graph that is specialized for tensors that have a negative dispatch key. + # If you allow a Tensor that does NOT have this bit set, you will accidentally run it "as if" it were negated. + # Now, negative key only shows up for complex numbers, and most likely, the exported to target doesn't + # support this feature at all, but the point stands that :some: tensor state only shows up on dispatch key. + # TODO(voz): Either populate a dispatch_key check into the guards, or error on users passing in an unsupported + # subset of keys during export. + # + # The list of tensor fields and calls we care about can be found in `terms` below. + # TODO(voz): We are missing storage offset in all our tensor guards? + code: List[str] = list() + if self.check_fn_manager.output_graph.export: + self.TYPE_MATCH(guard) + terms = [ + "dtype", + "device", + "requires_grad", + "ndimension()", + ] + + for term in terms: + real_value = self.get(tensor_name + "." + term) + if istype(real_value, (torch.device, torch.dtype)): + # copy pasted from EQUALS_MATCH + code.append(f"str({tensor_name}.{term}) == {str(real_value)!r}") + else: + code.append(f"{tensor_name}.{term} == {real_value}") + else: + self.tensor_check_names.append(tensor_name) + self.tensor_check_examples.append(value) + self.tensor_check_guards.append(guard) + + # A frame is valid for reuse with dynamic dimensions if the new + # (user-requested) dynamic dimensions are a subset of the old + # (already compiled) dynamic dimensions. + # + # It's a little non-obvious why you'd want this: in particular, + # if an already compiled frame matches all of the guards, why + # not just use it, why force a recompile? + # + # We force it for two reasons: + # + # - The user *required* us to compile with a new dynamic dimension, + # we should not ignore that and serve up the old, specialized + # frame. Listen to the user! + # + # - In fact, we are obligated to *raise an error* if we fail to + # make the requested dimension dynamic. If we don't + # recompile, we can't tell if that dimension can actually be + # made dynamic. + # + # If the new dynamic dims are a subset of the old, we already know + # we can make them dynamic (since we made them dynamic in old). + # This is slightly unsound, because maybe your input size is + # [s0, s0, s1] and so you can do it dynamic if you say dynamic + # dims {0, 1, 2} but you can't if you only do {0, 2} (because now + # the second s0 is specialized). But we're not entirely sure if + # this is a good idea anyway lol... (if you want to try removing + # this logic, be my guest! -- ezyang 2024) + # + assert guard.source is not None + static, reason = tensor_always_has_static_shape( + value, is_tensor=True, guard_source=guard.source + ) + if not static: + if hasattr(value, "_dynamo_dynamic_indices"): + code.append( + f"(({tensor_name}._dynamo_dynamic_indices.issubset({value._dynamo_dynamic_indices})) if hasattr({tensor_name}, '_dynamo_dynamic_indices') else True)" # noqa: B950 + ) + # In the case of us not having any dynamic dimension indices, we compiled the frame with no chance of + # raising for this specific tensor - and any inputs with more dynamic user directives specified must be recompiled. + else: + code.append( + f"hasattr({tensor_name}, '_dynamo_dynamic_indices') == False" + ) + if len(code) > 0: + self._produce_guard_code(guard, code) + + # A util that appends guarded code, or, in the case of export, adds data onto guards + def _produce_guard_code( + self, guard, code_list, provided_guarded_object=None, shape_env=False + ): + # WARNING: It is important that cur_frame/caller do NOT stay in + # the current frame, because they will keep things live longer + # than they should. See TestMisc.test_release_module_memory + cur_frame = currentframe() + assert cur_frame is not None + caller = cur_frame.f_back + del cur_frame + assert caller is not None + func_name = getframeinfo(caller)[2] + del caller + # We use func_name for export, so might as well get a nice defensive check out of it + assert func_name in dir( + self.__class__ + ), f"_produce_guard_code must be called from inside GuardedCode. Called from {func_name}" + + if shape_env: + self.shape_env_code.append(GuardCodeList(code_list, guard)) + else: + self.code.append(GuardCodeList(code_list, guard)) + + # Not all guards have names, some can be installed globally (see asserts on HAS_GRAD) + if provided_guarded_object is None: + name_valid = guard.name is not None and guard.name != "" + + guarded_object = self.get(guard.name) if name_valid else None + else: + guarded_object = provided_guarded_object + + guarded_object_type = ( + weakref.ref(type(guarded_object)) if guarded_object is not None else None + ) + obj_ref = None + # Not necessary to have weakref for Enum type, but there is a bug that + # makes hasattr(guarded_object.__class__, "__weakref__") return True. + if hasattr(guarded_object.__class__, "__weakref__") and not isinstance( + guarded_object, enum.Enum + ): + obj_ref = weakref.ref(guarded_object) + + guard.set_export_info( + func_name, + guarded_object_type, + code_list, + obj_ref, + ) + + +# Common Sub-Expression Elimination for Python expressions. +# +# There are 2 steps to this pass: +# 1. Count the frequency of each sub-expression (i.e. inner +# node in the AST tree) +# +# 2. Replace those that occur more than once by a fresh variable 'v'. +# 'v' will be defined in the 'preface' list (output argument to +# 'NodeTransformer') +# +# NB: the use of 'ast.unparse' while visiting the nodes makes this pass +# quadratic on the depth of the tree. +# +# NB: this pass creates a new variable for each AST node that is repeated +# more than 'USE_THRESHOLD'. e.g. if 'a.b.c.d' is used 10 times, 'a.b.c' +# and 'a.b' are also used 10 times. So, there will be a new variable for +# each of them. +class PyExprCSEPass: + # Maximum number of times a given expression can be used without being + # replaced by a fresh variable. + USE_THRESHOLD = 1 + + # Ad-Hoc: AST nodes this pass focuses on. + ALLOWED_NODE_TYPES = (ast.Attribute, ast.Call, ast.Subscript) + + @dataclasses.dataclass + class Config: + expr_count: Dict[str, int] + expr_to_name: Dict[str, str] + + class ExprCounter(ast.NodeVisitor): + def __init__(self, config: PyExprCSEPass.Config) -> None: + self._config = config + + def visit(self, node: ast.AST) -> Any: + if isinstance(node, PyExprCSEPass.ALLOWED_NODE_TYPES): + self._config.expr_count[_ast_unparse(node)] += 1 + super().visit(node) + + class Replacer(ast.NodeTransformer): + def __init__( + self, + config: PyExprCSEPass.Config, + gen_name: Callable[[], str], + ) -> None: + super().__init__() + self._config = config + self._gen_name = gen_name + self.preface: List[str] = [] + + def visit(self, node: ast.AST) -> Any: + if isinstance(node, PyExprCSEPass.ALLOWED_NODE_TYPES): + expr = _ast_unparse(node) + + # Replacement only occurs if a given expression is used more + # than once. + if self._config.expr_count[expr] > PyExprCSEPass.USE_THRESHOLD: + if expr not in self._config.expr_to_name: + # Parent 'visit' is called so that we CSE the inner expressions first. + # + # The resulting expression is used as right-hand-side of the variable + # assignment. i.e. we are CSE-ing the children before the parents. + # + # Indexing still uses the old 'node', since that's what was counted + # by the 'NodeVisitor'. + node_ = super().visit(node) + expr_ = _ast_unparse(node_) + var_name = self._gen_name() + self.preface.append(f"{var_name} = {expr_}") + self._config.expr_to_name[expr] = var_name + else: + var_name = self._config.expr_to_name[expr] + return ast.Name(var_name, ast.Load()) + + return super().visit(node) + + def __init__(self) -> None: + self._counter = 0 + self._config = self.Config( + expr_count=collections.defaultdict(lambda: 0), expr_to_name={} + ) + + def _new_var(self, prefix: str = "_var") -> str: + name = f"{prefix}{self._counter}" + self._counter += 1 + return name + + def count(self, exprs: List[str]) -> None: + counter = self.ExprCounter(self._config) + for e in exprs: + try: + counter.visit(ast.parse(e)) + except SyntaxError as ex: + log.exception("Failed to visit expr at line %s.\n%s", ex.lineno, e) + raise + + def replace(self, expr: str) -> Tuple[List[str], str]: + replacer = self.Replacer(self._config, self._new_var) + new_node = replacer.visit(ast.parse(expr)) + return replacer.preface, _ast_unparse(new_node) + + +def must_add_nn_module_guards(guard): + # For config.guard_nn_modules=False, we can skip all the guards that + # originate from inside of nn module except for a few categories. + return ( + # Guard for defaults + isinstance(guard.originating_source, DefaultsSource) + # Guard using dict tags if the config flag is set + or ( + config.guard_nn_modules_using_dict_tags + and guard.create_fn is GuardBuilder.NN_MODULE + ) + ) + + +class DeletedGuardFn: + pass + + +# NB: Naively, you'd expect this to only be a function that produces +# the callable that constitutes the guard. However, there is some +# delicate handling for invalidating this check function when the +# locals/globals get invalidated, so there's some extra state +# we have to hold in this manager class. +class CheckFunctionManager: + def __init__( + self, + output_graph=None, + guard_fail_fn: Optional[Callable[[GuardFail], None]] = None, + ): + guards = output_graph.guards if output_graph else None + self._weakrefs: Dict[int, ReferenceType[object]] = {} + self.output_graph = output_graph + w_builder = None + + def source_ref(source): + guard_source = source.guard_source() + if guard_source is GuardSource.CONSTANT: + # No need to track constants + return source.name() + assert w_builder + r_builder = w_builder() + assert r_builder is not None + return r_builder.arg_ref(source.name()) + + builder = GuardBuilder( + self.id_ref, + source_ref, + self.lookup_weakrefs, + output_graph.local_scope, + output_graph.global_scope, + self, + ) + + # Break retain cycle. See test_release_scope_memory + def cleanup_builder(weak_b): + b = weak_b() + if b: + b.scope = None + + # Break retain cycle. See test_release_input_memory + w_builder = weakref.ref(builder, cleanup_builder) + + for guard in sorted(guards or [], key=Guard.sort_key): + if ( + not config.guard_nn_modules + and guard.is_nn_module() + # Default func args must be guarded on. + # TODO: we could make use of 'DefaultsSource' and offer a .guard.is_defaults() API + and "__defaults__" not in guard.name + and "__kwdefaults__" not in guard.name + and (config.skip_nnmodule_hook_guards or "hooks" not in guard.name) + ): + continue + + guard.create(builder) + self.check_fn = self.compile_check_fn(builder, guards, guard_fail_fn) + # Keep track of weak references of objects with ID_MATCH guard. This + # info is stored alongside optimized_code and check_fn and is used to + # limit the number of cache entries with same ID_MATCH'd object. + # TODO(janimesh) - Currently this information is stored as an attr on + # the check_fn itself to avoid changing CacehEntry datastructure in + # eval_frame.c. In future, we should probably replace check_fn with a + # queryable data structure such that this information is already present + # in some form. + self.check_fn.id_matched_objs = builder.id_matched_objs + + # NB - We have to very careful of cleaning up here. Because of the + # invalidate function, we can create a weakref finalizer that keeps + # `self` alive for very long. Sometimes by mistake, we can run + # invalidate for a type/object (check id_ref method) that Python can + # leak by design, preventing us from calling the finalizer. In that + # case, the `self` will be alive even though the cache entry will be + # deleted (check invalidate method), which can cause a memory leak, + # e.g., not setting output_graph = None can keep hold of nn_modules. + self._weakrefs.clear() + self.output_graph = None + + def compile_check_fn(self, builder, guards_out, guard_fail_fn): + # see parallel handling of ".0" / "___implicit0" in _eval_frame.c + largs = builder.argnames + largs += ["**___kwargs_ignored"] + + guards_log.debug("GUARDS:") + + # Don't report this guard, it's always the same, useless! + code_parts = ["___check_global_state()"] + verbose_code_parts = code_parts[:] + structured_guard_fns = [] + + def add_code_part(code_part, guard, log_only=False): + verbose_code_part = get_verbose_code_part(code_part, guard) + guards_log.debug("%s", verbose_code_part) + + structured_guard_fns.append( + lambda: { + "code": code_part, + "stack": structured.from_traceback(guard.stack.summary()) + if guard.stack + else None, + "user_stack": structured.from_traceback(guard.user_stack) + if guard.user_stack + else None, + } + ) + + if verbose_guards_log.isEnabledFor(logging.DEBUG): + maybe_stack = "" + maybe_user_stack = "" + if guard is not None: + if guard.stack: + maybe_stack = f"\nStack:\n{''.join(guard.stack.format())}" + if guard.user_stack: + maybe_user_stack = ( + f"\nUser stack:\n{''.join(guard.user_stack.format())}" + ) + verbose_guards_log.debug( + "Guard: %s%s%s", + code_part, + maybe_stack, + maybe_user_stack, + ) + + if not log_only: + code_parts.append(code_part) + verbose_code_parts.append(verbose_code_part) + + seen = set() + for gcl in builder.code: + for code in gcl.code_list: + if code not in seen: + add_code_part(code, gcl.guard) + seen.add(code) + + tensor_check_names = builder.tensor_check_names + check_tensors_fn = None + check_tensors_verbose_fn = None + if tensor_check_names: + assert ( + not self.output_graph.export + ), "Illegal to set tensor_check_names in export." + tensor_check_examples = builder.tensor_check_examples + + dynamic_dims_sizes = [ + convert_to_concrete_values( + self.output_graph.tensor_weakref_to_sizes_strides[t]["size"] + ) + for t in tensor_check_examples + ] + + dynamic_dims_strides = [ + convert_to_concrete_values( + self.output_graph.tensor_weakref_to_sizes_strides[t]["stride"] + ) + for t in tensor_check_examples + ] + + tensor_guards = TensorGuards( + *tensor_check_examples, + dynamic_dims_sizes=dynamic_dims_sizes, + dynamic_dims_strides=dynamic_dims_strides, + ) + check_tensors_fn = tensor_guards.check + check_tensors_verbose_fn = tensor_guards.check_verbose + tensor_check_args = ", ".join( + tensor_check_names + ["tensor_check_names=tensor_check_names"] + ) + # Do this manually, to un-stagger the guards in log message + code_parts.append(f"___check_tensors({tensor_check_args})") + verbose_code_parts.append(f"___check_tensors({tensor_check_args})") + tensor_check_guards = builder.tensor_check_guards + + for i, name in enumerate(tensor_check_names): + # This is a copy of what guards.cpp checks against + # Keep this in sync with TensorCheck constructor + t = tensor_check_examples[i] + sizes = dynamic_dims_sizes[i] + strides = dynamic_dims_strides[i] + code_part = get_tensor_guard_code_part(t, name, sizes, strides) + add_code_part(code_part, tensor_check_guards[i], log_only=True) + + aotautograd_guards: List[GuardEnvExpr] = ( + self.output_graph.tracing_context.guards_context.aotautograd_guards + if self.output_graph + else [] + ) + for guard in aotautograd_guards: + if isinstance(guard, DuplicateInputs): + source_a = guard.input_source_a + source_b = guard.input_source_b + add_code_part(f"{source_a.name()} is {source_b.name()}", None) + else: + raise RuntimeError(f"Unknown GuardEnvExpr: {guard}") + + # TODO: the "guard" here is actually just the top level SHAPE_ENV + # which is useless. Get ShapeEnv to pass in more provenance. + for gcl in builder.shape_env_code: + for code in gcl.code_list: + add_code_part(code, gcl.guard) + + # OK, all done generating guards + torch._logging.trace_structured( + "dynamo_guards", payload_fn=lambda: [f() for f in structured_guard_fns] + ) + + global_state = convert_frame.initial_global_state + if global_state is None: + # we should only hit this case in NopTests() + global_state = convert_frame.GlobalStateGuard() + closure_vars = { + "___check_tensors": check_tensors_fn, + "___check_tensors_verbose": check_tensors_verbose_fn, + "___check_global_state": global_state.check, + "___check_current_backend": torch._dynamo.eval_frame.check_current_backend, + "tensor_check_names": tensor_check_names, + **SYMPY_INTERP, + **CLOSURE_VARS, + } + + unique_code_parts = list(unique(code_parts)) + make_guard_fn_args = ", ".join(closure_vars.keys()) + guard_body, pycode = build_guard_function(unique_code_parts, make_guard_fn_args) + + if os.environ.get("TORCHDYNAMO_PRINT_GUARDS", None) == "1": + print("GUARDS\n", guard_body) + + out: Dict[str, Any] = dict() + + # We don't put builder.scope as the globals in exec call because + # guard_fn.__globals__ becomes equal to builder.scope. This causes + # guard_fn to hold a referece to f_locals sitting in builder.scope["L"] + globals_for_guard_fn = {"G": builder.scope["G"]} + try: + exec(pycode, globals_for_guard_fn, out) + except SyntaxError as ex: + log.exception("Failed to exec guard at line %s.\n%s", ex.lineno, pycode) + raise + guard_fn = out["___make_guard_fn"](*closure_vars.values()) + guard_fn.closure_vars = closure_vars + # TODO(whc) maybe '.code_parts' was only kept around for the guard callback? so we don't need both + guard_fn.args = largs + guard_fn.code_parts = code_parts + guard_fn.verbose_code_parts = verbose_code_parts + # Grab only G, but preserve "G" because guards access it as "G" + guard_fn.global_scope = globals_for_guard_fn + guard_fn.guard_fail_fn = guard_fail_fn + # will be populated by a non-owning reference to CacheEntry/ExtraState + # when the CacheEntry is constructed + guard_fn.cache_entry = None + guard_fn.extra_state = None + return guard_fn + + def invalidate(self): + # Some tests reveal that CheckFunctionManager has no attribute + # check_fn, but this case should not be of any concern. + # This case doesn't seem easy to repro. + if ( + hasattr(self, "check_fn") + and self.check_fn is not DeletedGuardFn + and (cache_entry := self.check_fn.cache_entry) is not None + and (extra_state := self.check_fn.extra_state) is not None + ): + assert isinstance(cache_entry, CacheEntry) + assert isinstance(extra_state, ExtraState) + extra_state.invalidate(cache_entry) + self.check_fn.cache_entry = None + self.check_fn.extra_state = None + self.check_fn = DeletedGuardFn + + def id_ref(self, obj): + """add a weakref, return the id""" + try: + if id(obj) not in self._weakrefs: + # We will clear the _weakrefs dict at the end of __init__ + # function, which will delete the callbacks as well. Therefore, + # we are using a finalizer which is kept alive. + self._weakrefs[id(obj)] = weakref.ref(obj) + weakref.finalize(obj, self.invalidate) + except TypeError: + pass # cannot weakref bool object + return id(obj) + + def lookup_weakrefs(self, obj): + """Lookup the _weakrefs created in id_ref function for ID_MATCH'd objects""" + if id(obj) in self._weakrefs: + return self._weakrefs[id(obj)] + return None + + +def build_guard_function(code_parts, closure_args) -> Tuple[str, str]: + from torch._inductor.utils import IndentedBuffer + + if HAS_UNPARSE_FUNCTIONS: + csepass = PyExprCSEPass() + csepass.count(code_parts) + + def replace(expr: str) -> Tuple[List[str], str]: + return csepass.replace(expr) + + else: + + def replace(expr: str) -> Tuple[List[str], str]: + return [], expr + + # Generate the inner body of the guard function. + # i.e. if-chain of the guard expressions. + guard_body = IndentedBuffer() + for expr in code_parts: + preface, expr = replace(expr) + guard_body.writelines(preface) + guard_body.writeline(f"if not ({expr}):") + with guard_body.indent(): + guard_body.writeline("return False") + + # Wrap the inner body into the actual guard function. + guard = IndentedBuffer() + guard.writeline("def guard(L):") + with guard.indent(): + guard.splice(guard_body) + guard.writeline("return True") + + # Wrap the whole guard function into another function + # with the closure variables. + make_guard_fn = IndentedBuffer() + make_guard_fn.writeline(f"def ___make_guard_fn({closure_args}):") + with make_guard_fn.indent(): + make_guard_fn.splice(guard) + make_guard_fn.writeline("return guard") + + return guard_body.getvalue(), make_guard_fn.getvalue() + + +def is_recompiles_enabled(): + return torch._logging._internal.log_state.is_artifact_enabled("recompiles") + + +def is_recompiles_verbose_enabled(): + return torch._logging._internal.log_state.is_artifact_enabled("recompiles_verbose") + + +def get_guard_fail_reason( + guard_fn: GuardFn, + code: types.CodeType, + f_locals: Dict[str, object], +) -> str: + """ + Return the reason why `guard_fn` failed. + Updates `guard_failures` with the generated reason. + Only the first failed check of guard_fn is reported. + """ + scope = {"L": f_locals, "G": guard_fn.global_scope["G"]} + scope.update(guard_fn.closure_vars) + scope["___check_tensors"] = scope["___check_tensors_verbose"] + reasons: List[str] = [] + for part in guard_fn.verbose_code_parts: + global_scope = dict(guard_fn.global_scope) + global_scope["__compile_source__"] = part + with report_compile_source_on_error(): + try: + fail_reason = eval(part, global_scope, scope) + except Exception as e: + if is_recompiles_verbose_enabled(): + continue + else: + raise + # Only ___check_tensors knows how to return a fancy fail reason; + # for everything else we just report the code that failed + + if isinstance(fail_reason, bool) and not fail_reason: + fail_reason = part + if isinstance(fail_reason, str): + reasons.append(fail_reason) + if not is_recompiles_verbose_enabled(): + break + + reason_str = "\n".join(reasons) + guard_failures[orig_code_map[code]].append(reason_str) + + try: + if guard_fn.guard_fail_fn is not None: + guard_fn.guard_fail_fn( + GuardFail(reason_str or "unknown reason", orig_code_map[code]) + ) + except Exception as e: + log.exception( + "Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval", + ) + + return reason_str + + +def get_and_maybe_log_recompilation_reason( + cache_entry, frame: types.FrameType +) -> List[str]: + """ + Return the list of guard failure reasons using cache_entry. + Logs the recompilation reason if `recompiles` logging is enabled. + Raises a RecompileError if `config.error_on_recompile` is enabled. + """ + reasons = [] + while cache_entry is not None: + reason = get_guard_fail_reason( + cache_entry.check_fn, cache_entry.code, frame.f_locals + ) + if reason: + reasons.append(reason) + cache_entry = cache_entry.next + + code = frame.f_code + + # at least one of "recompiles" or "recompiles_verbose" is enabled + do_recompiles_log = is_recompiles_enabled() or is_recompiles_verbose_enabled() + + if do_recompiles_log or config.error_on_recompile: + if is_recompiles_verbose_enabled(): + failures = "\n\n".join( + f"guard {i} failures:\n" + textwrap.indent(reason, "- ") + for i, reason in enumerate(reasons) + ) + else: + failures = textwrap.indent("\n".join(reasons), "- ") + guard_failure_details = ( + f"triggered by the following guard failure(s):\n{failures}" + ) + message = ( + f"Recompiling function {code.co_name} in {code.co_filename}:{code.co_firstlineno}\n" + f"{textwrap.indent(guard_failure_details, ' ')}" + ) + if do_recompiles_log: + if is_recompiles_verbose_enabled(): + recompiles_verbose_log.debug(message) + else: + recompiles_log.debug(message) + if config.error_on_recompile: + raise exc.RecompileError(message) + + return reasons + + +def guard_error_hook( + guard_fn: GuardFn, + code: types.CodeType, + f_locals: Dict[str, object], + index: int, + last: bool, +): + print( + f"ERROR RUNNING GUARDS {code.co_name} {code.co_filename}:{code.co_firstlineno}" + ) + print("lambda " + ", ".join(guard_fn.args) + ":") + print(" ", " and\n ".join(guard_fn.code_parts)) + local_scope = {"L": f_locals, **guard_fn.closure_vars} + for guard in guard_fn.code_parts: + try: + eval(guard, guard_fn.global_scope, local_scope) + except: # noqa: B001,E722 + print(f"Malformed guard:\n{guard}") + + +set_guard_error_hook(guard_error_hook) + + +def unique(seq): + seen = set() + for x in seq: + if x not in seen: + yield x + seen.add(x) + + +def make_dupe_guard(obj_source, dupe_source): + # Note - we may end up in a situation where we invoke something like + # def fn(x, y) + # with fn(x, x) + # Prior to the addition of tracking to all relevant objects, we would handle this just fine by + # eagerly re-entering VB and rewrapping inputs, correctly creating graphargs and placeholders. However, + # with tracking on inputs, duplicate inputs or aliased relationships may end up getting erased here - + # In the fn(x, x) example call above look like a graph with a single input. + # In order to ensure that we do not reuse fn(x, x) for fn(x, y), we create a duplicate input guard. + + # Note - we may not have a source, that is fine, it just means we had an object that is safe to have + # leave unsourced - like a local list created and discharged entirely within a local scope. + if dupe_source and dupe_source != obj_source: + ser_source_is_local = is_from_local_source(dupe_source) + source_is_local = is_from_local_source(obj_source) + # Note - both must be local, or global, or we will run afoul of a lack of merging in how we currently + # reconcile guards builder scopes in compile_check_fn. This technically means we miss a guard here, + # so maybe we should do this refactor before we land this... + # TODO(voz): Combine local and global guard builders. + if ser_source_is_local == source_is_local: + # Note - this is a little aggressive - these being duplicate input does not always matter. + # However, this should always be a sound guard to add here. + return functools.partial(GuardBuilder.DUPLICATE_INPUT, source_b=dupe_source) + return None + + +def install_guard(*guards, skip=0): + """ + Add dynamo guards to the current tracing context. + + Args: + guards: guard(s) to add + skip: number of stack frames to ignore for debug stack trace + """ + from torch._guards import TracingContext + + collect_debug_stack = guards_log.isEnabledFor( + logging.DEBUG + ) or verbose_guards_log.isEnabledFor(logging.DEBUG) + add = TracingContext.get().guards_context.dynamo_guards.add + for guard in guards: + assert isinstance(guard, Guard) + add(guard, collect_debug_stack=collect_debug_stack, skip=skip + 1) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/hooks.py b/venv/lib/python3.10/site-packages/torch/_dynamo/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..1cef496992c3301c9e9a90d1c60046a64a717497 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/hooks.py @@ -0,0 +1,12 @@ +import dataclasses + +from typing import Callable, Optional + +from torch._guards import GuardsSet +from .types import GuardFail + + +@dataclasses.dataclass +class Hooks: + guard_export_fn: Optional[Callable[[GuardsSet], None]] = None + guard_fail_fn: Optional[Callable[[GuardFail], None]] = None diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/logging.py b/venv/lib/python3.10/site-packages/torch/_dynamo/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..1e9a820785be20a33a7eabc136eabb38c2894948 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/logging.py @@ -0,0 +1,57 @@ +import itertools +import logging + +from torch.hub import _Faketqdm, tqdm + +# Disable progress bar by default, not in dynamo config because otherwise get a circular import +disable_progress = True + + +# Return all loggers that torchdynamo/torchinductor is responsible for +def get_loggers(): + return [ + logging.getLogger("torch.fx.experimental.symbolic_shapes"), + logging.getLogger("torch._dynamo"), + logging.getLogger("torch._inductor"), + ] + + +# Creates a logging function that logs a message with a step # prepended. +# get_step_logger should be lazily called (i.e. at runtime, not at module-load time) +# so that step numbers are initialized properly. e.g.: + +# @functools.lru_cache(None) +# def _step_logger(): +# return get_step_logger(logging.getLogger(...)) + +# def fn(): +# _step_logger()(logging.INFO, "msg") + +_step_counter = itertools.count(1) + +# Update num_steps if more phases are added: Dynamo, AOT, Backend +# This is very inductor centric +# _inductor.utils.has_triton() gives a circular import error here + +if not disable_progress: + try: + import triton # noqa: F401 + + num_steps = 3 + except ImportError: + num_steps = 2 + pbar = tqdm(total=num_steps, desc="torch.compile()", delay=0) + + +def get_step_logger(logger): + if not disable_progress: + pbar.update(1) + if not isinstance(pbar, _Faketqdm): + pbar.set_postfix_str(f"{logger.name}") + + step = next(_step_counter) + + def log(level, msg, **kwargs): + logger.log(level, "Step %s: %s", step, msg, **kwargs) + + return log diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py b/venv/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py new file mode 100644 index 0000000000000000000000000000000000000000..fb514005ac9327d3299d6911b37b4b7173f1fd6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py @@ -0,0 +1,126 @@ +# mypy: disable-error-code="method-assign" + +import functools +import weakref + +import torch.nn +from torch.nn import Module + +from .utils import ExactWeakKeyDictionary, is_lazy_module + + +class MutationTracker: + db = ExactWeakKeyDictionary() + + def __init__(self): + self.mutation_count = 0 + self.watchers = [] + + def on_mutation(self, name): + self.mutation_count += 1 + tmp = self.watchers + self.watchers = [] + for ref in tmp: + guarded = ref() + if guarded is not None: + guarded.invalidate(ref) + + def track(self, guarded_code): + self.watchers.append(weakref.ref(guarded_code)) + + +def watch(obj, guarded_code): + """invalidate guarded_code when obj is mutated""" + ensure_patched(type(obj)) + + if obj not in MutationTracker.db: + MutationTracker.db[obj] = MutationTracker() + tracker = MutationTracker.db[obj] + tracker.track(guarded_code) + + +def ensure_patched(cls): + if getattr(cls, "___needs_mutation_patch", True): + cls.___needs_mutation_patch = False + original_setattr = cls.__setattr__ + + @functools.wraps(original_setattr) + def custom_setattr(self, key, value): + try: + MutationTracker.db[self].on_mutation(key) + except KeyError: + pass + return original_setattr(self, key, value) + + cls.__setattr__ = custom_setattr + + +class GenerationTracker: + generation = 0 + dynamic_classes = ExactWeakKeyDictionary() + generation_values = ExactWeakKeyDictionary() + + @classmethod + def tag(cls, obj): + cls.generation_values[obj] = cls.generation + + @staticmethod + def mark_class_dynamic(cls): + assert issubclass(cls, torch.nn.Module) + GenerationTracker.dynamic_classes[cls] = True + + @classmethod + def get_generation_value(cls, obj): + if obj not in cls.generation_values: + return -1 + return cls.generation_values[obj] + + @classmethod + def check(cls, obj): + return ( + obj in cls.generation_values + and cls.generation_values[obj] == cls.generation + ) + + +def is_dynamic_nn_module(obj): + """Check for nn.Modules() created dynamically or mutated""" + if isinstance(obj, torch.nn.Module) and "forward" in obj.__dict__: + # A monkey patched `.forward` indicates something wacky is going on + return True + if hasattr(obj, "torchdynamo_force_dynamic"): + return obj.torchdynamo_force_dynamic + if is_lazy_module(obj): + return False + dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check( + obj + ) + return dyn + + +def install_generation_tagging_init(): + """ + Monkey patch torch.nn.Module.__init__ and torch.nn.Module.__setstate__ + so we can detect nn.Module instances created dynamically inside forward methods. + """ + + if getattr(Module, "___needs_generation_tag_patch", True): + init = Module.__init__ + + def patched_init(self, *args, **kwargs): + init(self, *args, **kwargs) + GenerationTracker.tag(self) + + Module.__init__ = patched_init + + setstate = Module.__setstate__ + + def patched_setstate(self, state): + setstate(self, state) + GenerationTracker.tag(self) + + Module.__setstate__ = patched_setstate + + Module.___needs_generation_tag_patch = False # type: ignore[attr-defined] + + GenerationTracker.generation += 1 diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/output_graph.py b/venv/lib/python3.10/site-packages/torch/_dynamo/output_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5beddbd0c910b13b9422186571eda31e03a2ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/output_graph.py @@ -0,0 +1,2063 @@ +import collections +import contextlib +import copy +import functools +import itertools +import logging +import operator +import re +import sys +import traceback +import weakref +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union + +import sympy + +import torch._guards + +import torch._logging + +import torch.nn +import torch.utils._pytree as pytree +from torch import fx +from torch._guards import ( + Checkpointable, + GlobalContextCheckpointState, + GuardsCheckpointState, + Source, + TracingContext, +) +from torch._utils_internal import signpost_event +from torch.fx._lazy_graph_module import _make_graph_module # type: ignore[attr-defined] +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.sym_node import SymNode +from torch.fx.experimental.symbolic_shapes import free_symbols, is_symbolic, ShapeEnv +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch.utils._sympy.interp import sympy_interp +from torch.utils._sympy.reference import PythonReferenceAnalysis +from torch.utils.weak import WeakTensorKeyDictionary + +from . import config, logging as torchdynamo_logging, variables +from .backends.registry import CompiledFn, CompilerFn +from .bytecode_transformation import ( + create_call_function, + create_instruction, + Instruction, + unique_id, +) +from .code_context import code_context +from .codegen import PyCodegen +from .current_scope_id import enter_new_scope +from .exc import ( + BackendCompilerFailed, + exceptions_allowed_to_be_fallback, + SkipFrame, + unimplemented, + unimplemented_with_warning, +) +from .guards import GuardBuilder, install_guard +from .mutation_guard import is_dynamic_nn_module +from .side_effects import SideEffects +from .source import ( + AttrSource, + BackwardStateSource, + ConstantSource, + GlobalStateSource, + is_constant_source, + is_from_local_source, + LocalSource, + ParamBufferSource, + ShapeEnvSource, + TensorProperty, + TensorPropertySource, +) +from .utils import ( + checkpoint_params, + CleanupHook, + clone_inputs, + count_calls, + counters, + dynamo_timed, + get_instruction_source_311, + get_static_address_type, + graph_break_reasons, + increment_op_count, + lazy_format_graph_code, + lazy_format_graph_tabular, + LazyString, + same, +) +from .variables.base import VariableTracker +from .variables.builder import ( + BackwardStateGraphArg, + GraphArg, + TrackedFake, + VariableBuilder, + wrap_fx_proxy, +) +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) + +from .variables.torch_function import TensorWithTFOverrideVariable + +log = logging.getLogger(__name__) +graph_tabular_log = torch._logging.getArtifactLogger(__name__, "graph") +graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code") +graph_sizes_log = torch._logging.getArtifactLogger(__name__, "graph_sizes") +trace_call_log = torch._logging.getArtifactLogger(__name__, "trace_call") + + +class OutputGraphState(NamedTuple): + input_source_to_var: Dict[Source, VariableTracker] + tracked_fakes: List[TrackedFake] + guard_state: GuardsCheckpointState + nn_modules: Optional[Dict[str, torch.nn.Module]] + register_finalizer_fns: List[Callable[[fx.GraphModule], None]] + global_state: Optional[Dict[str, bool]] + param_name_to_source: Optional[Dict[str, Source]] + side_effects: SideEffects + timestamp: int + non_compliant_ops: Set[torch._ops.OpOverload] + compliant_custom_ops: Set[torch._ops.OpOverload] + + def diff(self, other: "OutputGraphState", *, prefix: str = "") -> Optional[str]: + for k in self._fields: + if k == "guard_state": + r = self.guard_state.diff(other.guard_state) + if r is not None: + return r + continue + elif k == "side_effects": + r = self.side_effects.diff(other.side_effects) + if r is not None: + return r + continue + + sv = getattr(self, k) + ov = getattr(other, k) + if sv != ov: + return f"{prefix}{k} mismatch: {sv} != {ov}" + return None + + # Back compat .guards api + @property + def guards(self): + return self.guard_state.dynamo_guards + + +@functools.lru_cache(None) +def _step_logger(): + return torchdynamo_logging.get_step_logger(log) + + +@dataclass +class GraphCompileReason: + """Stores why a given output graph was compiled; i.e. what caused the graph break.""" + + reason: str + user_stack: List[traceback.FrameSummary] + + # Indicates if this was a graph compile reason due to graph break. + graph_break: bool = True + + def __post_init__(self): + if self.graph_break: + graph_break_reasons.append(self) + + +def _get_gen_rand_values_fn(random_calls): + def _gen_rand_values(): + return [fn(*args, **kwargs) for fn, args, kwargs in random_calls] + + return _gen_rand_values + + +class FakeRootModule(torch.nn.Module): + """Trick the constructor of fx.GraphModule""" + + def __init__(self, nn_modules: Dict[str, torch.nn.Module]): + super().__init__() + for k, v in nn_modules.items(): + setattr(self, k, v) + + def __repr__(self): + return "FakeRootModule(...)" + + +class WrapperBackend: + def __init__(self, backend: CompilerFn): + self.backend: CompilerFn = backend + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.restore = checkpoint_params(gm) + self.gm = gm + copy_gm = copy.deepcopy(self.gm) + self.candidate = self.backend(copy_gm, example_inputs) + + if self.candidate is None or self.candidate is self.gm.forward: + return self.gm.forward + + if not config.verify_correctness: + return self.candidate + + # if verify_correctness=True + try: + correct = self.gm.forward(*clone_inputs(example_inputs)) + result = self.candidate(*clone_inputs(example_inputs)) + + # TODO: replace `same` function with the one in testing + if same(correct, result): + return self.candidate + + raise RuntimeError(f"incorrect results of backend {self}") + return self.gm.forward + + except Exception: + log.exception("error in verify_correctness") + raise + finally: + self.restore() + + +Scope = Dict[str, object] + + +class OutputGraph(Checkpointable[OutputGraphState]): + """ + Wrapper class to hold outputs of InstructionTranslator. Mainly the + generated fx.Graph. + + OutputGraph is 1:1 with a frame being processed. Each frame is associated + with some root InstructionTranslator. When user code calls a function, + we construct a InliningInstructionTranslator that continues to write into + the root InstructionTranslator's OutputGraph. + """ + + def __init__( + self, + code_options: Dict[str, Any], + compiler_fn: Optional[CompilerFn], + root_tx, + export: bool, + export_constraints, + frame_state, + local_scope: Scope, + global_scope: Scope, + f_code, + ): + super().__init__() + self.tracers = [SubgraphTracer(self, export_root=export)] + # Map from graph input's `Source` to its `VariableTracker` to + # de-duplicate graph inputs by source and reuse the tracker + self.input_source_to_var: Dict[Source, VariableTracker] = {} + self.export = export + self.export_constraints = export_constraints + self.frame_state = frame_state + self.tensor_weakref_to_sizes_strides = WeakTensorKeyDictionary() + self.cleanup_hooks: List[Callable[[], Any]] = [] + # compile_id is an id number for the current torch.compile + self.compile_id: int = next(_compile_id_counter) + # Set of globals installed via install_global* APIs + self.installed_globals: Set[str] = set() + + # TODO: maybe should just pass the entire f_code in here? Not + # sure... + self.co_fields = { + "co_name": f_code.co_name, + "co_filename": f_code.co_filename, + "co_firstlineno": f_code.co_firstlineno, + } + + # tracked_fakes says where any tensor that was wrapped to fake came + # from. It is similar to GraphArg, in that all GraphArgs will get + # will get added to TrackedFakes, but TrackedFakes also contains + # GraphArgs that got pruned, and things like Tensor attributes which + # aren't explicit graph inputs. Used by shape guard + self.tracked_fakes: List[TrackedFake] = [] + + # List of symbols for which we have exact bindings in the arguments + # already + self.bound_symbols: Set[sympy.Symbol] = set() + + shape_env = ShapeEnv( + # Reference Cycle! + # Share a reference to the list of TrackedFake. + # + # ShapeEnv needs this in order to be able to reproduce the call + # to produce_guards at an arbitrary time point. That is because + # TrackedFake instances may have its metadata changed throughout + # the program execution. + tracked_fakes=self.tracked_fakes, + allow_scalar_outputs=config.capture_scalar_outputs, + allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops, + co_fields=self.co_fields, + ) + + # In export mode, we force the shape_env to strictly disallow any constraining + # of the user marked dynamic dims + fake_mode = torch._subclasses.FakeTensorMode( + shape_env=shape_env, + # TODO (tmanlaibaatar) Remove this once we always lift params and buffers + allow_non_fake_inputs=True if self.export else False, + ) + self.tracing_context: TracingContext = TracingContext(fake_mode) + self.init_ambient_guards() + + # Map each tensor id to a list of sources. This is necessary because + # tensor ids cannot be recovered from tracked fakes (in general). + # We use this map to interpret (i.e., check for violations of) constraints, + # specifically equality constraints, which have shared tensor ids in them. + # This map should also be generally useful, e.g., for (de)serialization. + self.tracked_fakes_id_to_source: Dict[ + int, List[Source] + ] = collections.defaultdict(list) + # Stores the full fqn of a param or buffer to the relevant source. + self.param_name_to_source: Optional[Dict[str, Source]] = dict() + self.side_effects = SideEffects() + self.code_options = dict(code_options) + self.output_instructions: List[Instruction] = [] + # used to track nodes that are added between calls of copy_graphstate + # and restore_graphstate + self.timestamp = 0 + + # A list of register_finalizer_fns to apply to the output graph module + self.register_finalizer_fns: List[Callable[[fx.GraphModule], None]] = [] + + # Not checkpointed + self.compiler_fn: Optional[CompilerFn] = compiler_fn + self.global_scope = global_scope + self.local_scope = local_scope + self.root_tx = root_tx + from torch._dynamo.symbolic_convert import InstructionTranslatorBase + + # Given a source, what are the user stacks of all locations that + # accessed it? + # + # For efficiency, we only populate this: + # - During export, and + # - If the source could potentially lead to a spurious export input + # + # Feel free to populate this more frequently if other use-cases arise, + # but be aware that we have to generate full stacks for each + # recording! + self.source_to_user_stacks: Dict[Source, List[traceback.StackSummary]] = {} + + self._current_tx: List[InstructionTranslatorBase] = [] + self.cleanups: List[CleanupHook] = [] + self.should_exit = False + self.unspec_variable_map: Dict[str, UnspecializedPythonVariable] = {} + self.torch_function_enabled = torch._C._is_torch_function_enabled() + # Tracks if the output graph has a user defined allowed function in the + # graph. This is used later to determine if we should fallback to eager + # for certain exceptions. THe idea is that if the user has applied + # allow_in_graph, they would like to see the error instead of falling + # back for backend errors. + self.has_user_defined_allowed_in_graph = False + + # Tracks a list of called ops that were not tagged with "pt2_compliant_tag". + # This information is useful for logging. + self.non_compliant_ops: Set[torch._ops.OpOverload] = set({}) + + # Tracks a list of called custom ops that were tagged with "pt2_compliant_tag". + # This information is useful for logging. + self.compliant_custom_ops: Set[torch._ops.OpOverload] = set({}) + + # We save the global torch state here to be restored in case of graph + # breaks. The relevant issue is seen here + # https://github.com/pytorch/pytorch/pull/100570#issuecomment-1543427086 + # where inlining of a function changes the global state (because of the + # presence of torch.no_grad) and there is a graph break. + self.save_global_state() + + # Tracks the original FQNs of the constant tensors from the original graph, + # i.e. buffers and parameters. + self.dynamo_flat_name_to_original_fqn: Dict[str, str] = {} + + # All calls to random() are replaced with a single call to __gen_rand_values + # functions that returns a tuple of random values for each original call. + # random_calls tracks calls to random() and random_values_var stores the name of + # the variable that stores __gen_rand_values results. + self.random_calls: List[ + Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] + ] = [] + self.random_values_var = None + + # Bytecode to insert right before we call the graph + self.pregraph_bytecode: List[Instruction] = [] + + # Use to pass values to backward hooks when using compiled autograd + self.backward_state: Dict[str, VariableTracker] = {} + self.backward_state_proxy: Optional[torch.fx.Proxy] = None + self.backward_state_var: Optional[str] = None + + def add_backward_state_hook(self, hook: VariableTracker): + name = f"hook{len(self.backward_state)}" + assert name not in self.backward_state + self.backward_state[name] = hook + return name, self.get_backward_state_proxy() + + def get_backward_state_proxy(self): + if self.backward_state_proxy is None: + if self.export: + unimplemented("backward_state does not support export") + self.backward_state_proxy = self.root_tracer.create_graph_input( + "dynamo_backward_state", BackwardState, source=BackwardStateSource() + ) + self.backward_state_proxy.node.meta["grapharg"] = BackwardStateGraphArg() + self.backward_state_proxy.node.meta["example_value"] = BackwardState() + self.backward_state_var = self.new_var() + return self.backward_state_proxy + + # This gets its own helper function so guards DEBUG logs are more informative + def init_ambient_guards(self): + # Register a SHAPE_ENV guard to make sure we setup shape guards + # that show up in ShapeEnv + self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV)) + + self.guards.add( + GlobalStateSource().make_guard(GuardBuilder.DETERMINISTIC_ALGORITHMS) + ) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.GRAD_MODE)) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.DEFAULT_DEVICE)) + + self.guards.add( + GlobalStateSource().make_guard(GuardBuilder.TORCH_FUNCTION_STATE) + ) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.BACKEND_MATCH)) + + def add_cleanup_hook(self, fn: Callable[[], Any]): + self.cleanup_hooks.append(fn) + + def call_cleanup_hooks(self): + for hook in reversed(self.cleanup_hooks): + hook() + self.cleanup_hooks.clear() + + @property + def root_tracer(self): + return self.tracers[0] + + @property + def current_tracer(self): + return self.tracers[-1] + + def is_root_tracer(self): + # Helper to tell if we are inside the higher order operator tracing. + return len(self.tracers) == 1 + + @property + def graph(self): + return self.current_tracer.graph + + # TODO(rzou): can delete after we refactor speculate_subgraph to use nested GraphTracer. + @graph.setter + def graph(self, value): + self.current_tracer.graph = value + + @property + def input_name_to_proxy(self): + return self.current_tracer.input_name_to_proxy + + @property + def real_value_cache(self): + return self.current_tracer.real_value_cache + + # If you are here, and you're looking for create_graph_input, + # to avoid ambiguity, please call one of the following: + # - self.current_tracer.create_graph_input + # - self.root_tracer.create_graph_input + # See NOTE [HigherOrderOperator tracing design] for more context. + + def create_proxy(self, *args, **kwargs): + return self.current_tracer.create_proxy(*args, **kwargs) + + def create_node(self, *args, **kwargs): + return self.current_tracer.create_node(*args, **kwargs) + + def remove_node(self, *args, **kwargs): + return self.current_tracer.remove_node(*args, **kwargs) + + @contextlib.contextmanager + def subtracer(self, source_target, prior_tracer): + new_scope_ctx = enter_new_scope() + try: + if prior_tracer: + # Lineage MUST stay preserved + assert prior_tracer.parent is self.current_tracer + new_scope_ctx.__enter__() + tracer = ( + prior_tracer + if prior_tracer + else SubgraphTracer( + self, parent=self.current_tracer, source_target=source_target + ) + ) + self.tracers.append(tracer) + yield tracer + finally: + new_scope_ctx.__exit__(None, None, None) + self.tracers.pop() + + @property + def output(self): + return self + + @property + def fake_mode(self): + return self.tracing_context.fake_mode + + @property + def shape_env(self): + return self.tracing_context.fake_mode.shape_env + + @property + def guards(self) -> torch._guards.GuardsSet: + return self.tracing_context.guards_context.dynamo_guards + + @property + def nn_modules(self) -> Dict[str, Any]: + return self.tracing_context.module_context.nn_modules + + def save_global_state(self, out=None): + """ + Saves to out if it is provided. Else saves to the tracing context's global_state. + """ + global_state = ( + out if out is not None else self.tracing_context.global_context.global_state + ) + + # TODO - Consider having a torch level API for torch_function_state. As + # of now, we create a ref cycle by passing the + # output.set_torch_function_state to + # output.tracing_context.global_context.global_state. In the interim, + # the problem can be solved by manually set + # output.tracing_context.global_context.global_state to None at cleanup. + global_state["torch_function_enabled"] = ( + self.set_torch_function_state, + self.torch_function_enabled, + ) + global_state["grad_enabled"] = (torch.set_grad_enabled, torch.is_grad_enabled()) + global_state["autocast_enabled"] = ( + torch.set_autocast_enabled, + torch.is_autocast_enabled(), + ) + global_state["autocast_cpu_enabled"] = ( + torch.set_autocast_cpu_enabled, + torch.is_autocast_cpu_enabled(), + ) + global_state["autocast_gpu_dtype"] = ( + torch.set_autocast_gpu_dtype, + torch.get_autocast_gpu_dtype(), + ) + global_state["autocast_cpu_dtype"] = ( + torch.set_autocast_cpu_dtype, + torch.get_autocast_cpu_dtype(), + ) + global_state["autocast_cache_enabled"] = ( + torch.set_autocast_cache_enabled, + torch.is_autocast_cache_enabled(), + ) + + def push_tx(self, tx): + self._current_tx.append(tx) + + def pop_tx(self): + return self._current_tx.pop() + + @property + def current_tx(self): + return self.root_tx if not self._current_tx else self._current_tx[-1] + + def copy_graphstate(self) -> OutputGraphState: + """Create a checkpoint of the current state by copying everything""" + assert self.param_name_to_source is not None + guards_graph_state = self.tracing_context.guards_context.copy_graphstate() + module_state = self.tracing_context.module_context.copy_graphstate() + global_state = self.tracing_context.global_context.copy_graphstate() + state = OutputGraphState( + dict(self.input_source_to_var), + list(self.tracked_fakes), + guards_graph_state, + module_state, + list(self.register_finalizer_fns), + global_state, + dict(self.param_name_to_source), + self.side_effects.clone(), + self.timestamp, + set(self.non_compliant_ops), + set(self.compliant_custom_ops), + ) + self.timestamp += 1 + return state + + def restore_graphstate(self, state: OutputGraphState): + """Restore a checkpoint created by self.copy_graphstate()""" + ( + self.input_source_to_var, + self.tracked_fakes, + guards_state, + module_state, + self.register_finalizer_fns, + global_state, + self.param_name_to_source, + self.side_effects, + self.timestamp, + self.non_compliant_ops, + self.compliant_custom_ops, + ) = state + self.tracing_context.guards_context.restore_graphstate(guards_state) + self.tracing_context.module_context.restore_graphstate(module_state) + self.tracing_context.global_context.restore_graphstate(global_state) + + # FX deepcopy doesn't work for a partially created graph, so just remove new nodes + removed_nodes = 0 + for node in reversed(list(self.graph.nodes)): + if ( + node.meta["creation_timestamp"] > self.timestamp + # placeholders here may have been lazily added by existing objects + and node.op != "placeholder" + ): + # Erasing node alone does not remove the meta information + # So, remove the help tensor explicitly + if "example_value" in node.meta: + del node.meta["example_value"] + self.remove_node(node) + self.real_value_cache.pop(node, None) + removed_nodes += 1 + log.debug("restore_graphstate: removed %s nodes", removed_nodes) + + def add_symbol_bindings(self, arg: GraphArg): + # Insert implicit size vars as necessary. With dynamic shapes, we + # maintain the invariant that every sizevar gets a direct SymInt input + # into the graph. This means downstream graph transforms can assume + # every size variable is explicitly bound and accessible, instead of + # having to pull it out implicitly from tensors. + + if self.export: + return + + assert arg.fake_tensor is not None + + def bind_symint(s, prop): + if not (is_symbolic(s) and isinstance(s.node.expr, sympy.Symbol)): + return + s0 = s.node.expr + if s0 in self.bound_symbols: + return + self.bound_symbols.add(s0) + log.debug("bind_symint %s %s", s, prop.name()) + # TODO: don't readd symint if we already have it in graph + # (this is harmless because we do remove the unused ones later) + proxy = self.root_tracer.create_graph_input( + str(s0), + torch.SymInt, + before=True, + source=prop, + ) + proxy.node.meta["example_value"] = s + proxy.node.meta["grapharg"] = GraphArg( + prop, + s, + is_unspecialized=False, + fake_tensor=None, + is_tensor=False, + ) + + def handle_tensor(t, src): + for i, s in enumerate(t.size()): + bind_symint(s, TensorPropertySource(src, TensorProperty.SIZE, i)) + for i, s in enumerate(t.stride()): + bind_symint(s, TensorPropertySource(src, TensorProperty.STRIDE, i)) + bind_symint( + t.storage_offset(), + TensorPropertySource(src, TensorProperty.STORAGE_OFFSET), + ) + if is_traceable_wrapper_subclass(t): + attrs, ctx = t.__tensor_flatten__() + for attr in attrs: + inner_t = getattr(t, attr) + handle_tensor(inner_t, AttrSource(src, attr)) + + handle_tensor(arg.fake_tensor, arg.source) + + def count_calls(self): + return count_calls(self.graph) + + def is_empty_graph(self): + return len(list(self.graph.nodes)) == 0 + + def get_submodule(self, keys): + assert keys + obj: Union[torch.nn.Module, Dict[str, torch.nn.Module]] = self.nn_modules + for k in keys.split("."): + if isinstance(obj, dict): + obj = obj[k] + else: + obj = getattr(obj, k) + return obj + + def new_var(self, name="tmp"): + existing = set(self.code_options["co_varnames"]) + for i in itertools.count(): + var = f"{name}_{i}" + if var not in existing: + self.code_options["co_varnames"] += (var,) + return var + + def update_co_names(self, name): + """Ensure self.code_options.co_names contains name""" + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + + @staticmethod + def module_key_name(*names): + # create a new unique name + name = "_".join(map(str, names)) + # Strip the guard lookup L/G access + name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name) + # e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv + name = re.sub(r"\[(\d+)\]", r"_\g<1>", name) + # e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv + name = re.sub(r"[^a-zA-Z0-9]", "_", name) + + if not name or not name[0].isalpha(): + name = "sub" + name + + return name + + def register_attr_or_module( + self, + target: Union[torch.nn.Module, torch.Tensor, Any], + *names, + **options, + ): + if is_dynamic_nn_module(target): + return variables.UnspecializedNNModuleVariable(target, **options) + + options = dict(options) + assert "source" in options + source = options["source"] + assert not isinstance(source, ParamBufferSource) + + if isinstance(target, torch.Tensor): + tracer = self.current_tracer + if not self.is_root_tracer(): + # For higher order ops, we don't want to insert the get_attr in + # innermost graph. Instead, we want to raise the params/buffers + # as inputs to the higher-order graph, and register them as + # get_attrs in the root tracer. + + # Note that Dynamo will still call lift_tracked_freevar_to_input + # when these inputs are encountered for the inner graph. The + # only difference is what happens at the root tracer for + # nn.Parameters vs free inputs. The free inputs are registered + # as placeholders in the root graph, whereas the nn.Parameters + # are registered as get_attr nodes in the root graph. + tracer = self.root_tracer + + if not is_constant_source(source): + install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH)) + + if get_static_address_type(target) == "guarded": + install_guard(source.make_guard(GuardBuilder.DATA_PTR_MATCH)) + + def wrap_name(module_key): + assert self.param_name_to_source is not None + self.param_name_to_source[module_key] = source + + return wrap_fx_proxy( + self.root_tx, + tracer.create_proxy("get_attr", module_key, tuple(), {}), + example_value=target, + **options, + ) + + elif isinstance(target, torch.nn.Module): + assert isinstance(target, torch.nn.Module) + + install_guard(source.make_guard(GuardBuilder.NN_MODULE)) + + def wrap_name(module_key): + return NNModuleVariable(type(target), module_key, target, **options) + + elif isinstance(target, (torch.SymInt, torch.SymFloat)): + # HACKY CODE REGION BEGIN + # WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS + # This ultimately gets written to self.nn_modules, which is unfortunate + # Attrs that are tenors and symints and such need to be migrated to have their + # own storage + # alas, this is like this for now + + def wrap_name(module_key): + return SymNodeVariable.create( + self, + self.create_proxy("get_attr", module_key, tuple(), {}), + sym_num=target, + **options, + ) + + # HACKY CODE REGION END + else: + + def wrap_name(module_key): + self.output.update_co_names(module_key) + self.global_scope[module_key] = target + return VariableBuilder(self, ConstantSource(source_name=module_key))( + target + ) + + for k, v in self.nn_modules.items(): + if v is target: + # it already exists + return wrap_name(k) + + name = OutputGraph.module_key_name(*names) + + base = name + for i in itertools.count(): + if name not in self.nn_modules: + self.nn_modules[name] = target + if isinstance(target, torch.nn.Module): + + def register_leaf_name(leaf_name): + assert self.param_name_to_source is not None + new_source = ParamBufferSource(source, leaf_name) + new_name = f"{name}.{leaf_name}" + self.param_name_to_source[new_name] = new_source + if isinstance(source, LocalSource): + self.dynamo_flat_name_to_original_fqn[ + OutputGraph.module_key_name(new_source.name()) + ] = leaf_name + + # annoying, but there are cases when we do not have parameters + # see test_nn_moduledict_contains + if hasattr(target, "_parameters"): + for leaf_name, _ in target.named_parameters(): + register_leaf_name(leaf_name) + if hasattr(target, "_buffers"): + for leaf_name, _ in target.named_buffers(): + register_leaf_name(leaf_name) + + return wrap_name(name) + name = f"{base}_{i}" + + raise AssertionError("unreachable") + + def compile_subgraph( + self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None + ): + """ + Generate a subgraph to continue execution on user code. + Automatically restore live variables. + """ + assert reason is not None + + from .decorators import disable + + self.partial_convert = partial_convert + self.compile_subgraph_reason = reason + self.should_exit = True + + log.debug("COMPILING GRAPH due to %s", reason) + + if not all(block.can_restore() for block in tx.block_stack): + unimplemented("compile_subgraph with block_depth != 0") + + prefix_insts: List[Instruction] = [] + if sys.version_info >= (3, 11): + # prefix instructions (Python 3.11+) + for inst in tx.prefix_insts: + if inst.opname == "MAKE_CELL": + prefix_insts.append( + create_instruction("MAKE_CELL", argval=inst.argval) + ) + elif inst.opname == "COPY_FREE_VARS": + prefix_insts.append( + create_instruction( + "COPY_FREE_VARS", arg=len(tx.code_options["co_freevars"]) + ) + ) + else: + prefix_insts.append(copy.copy(inst)) + assert not ( + self.pregraph_bytecode and self.export + ), "export does not support pregraph_bytecode" + prefix_insts.extend(self.pregraph_bytecode) + + def append_prefix_insts(): + self.add_output_instructions(prefix_insts) + prefix_insts.clear() + + for block in reversed(tx.block_stack): + block.exit(tx) + + self.cleanup_graph() + tx.prune_dead_locals() + stack_values = list(tx.stack) + root = FakeRootModule(self.nn_modules) + # Add all the local vars to the "stack" so restore at the end + restore_vars = [] + val_to_names: Dict[VariableTracker, List[str]] = {} + if stack_values: + val_to_names[stack_values[-1]] = list() + # NB: Typically (i.e., for graph compile from RETURN_VALUE), + # symbolic_locals will be empty at this point, as prune_dead_locals + # will clear out all of symbolic_locals because RETURN_VALUE is the + # last instruction and no more locals are used. The fanciness here + # is only needed for partial graphs. + for k, v in tx.symbolic_locals.items(): + # Note! this explicitly uses .local_name for matching + # Failure to do so will cause spurious registrations in val_to_names. + # This will in turn result in spurious variables showing up in the graph. + # This was very tricky to debug. For an example, dump the graph at call_user_compiler + # while running test_subgraphs.py + if isinstance(v.source, LocalSource) and v.source.local_name == k: + continue # no need to restore initial state + if v not in val_to_names: + val_to_names[v] = list() + val_to_names[v].append(k) + for v in val_to_names.keys(): + restore_vars.extend(val_to_names[v]) + stack_values.extend([v] * len(val_to_names[v])) + + # to handle random calls + if len(self.random_calls) > 0: + append_prefix_insts() + random_calls_instructions = [] + self.random_values_var = self.new_var("random_values") + rand_fn = disable(_get_gen_rand_values_fn(self.random_calls)) + rand_fn_name = self.install_global("__gen_rand_values", rand_fn) + codegen = PyCodegen(tx, root) + random_calls_instructions.extend( + codegen.load_function_name(rand_fn_name, True) + ) + random_calls_instructions.extend(create_call_function(0, False)) + random_calls_instructions.append( + codegen.create_store(tx.output.random_values_var), + ) + self.add_output_instructions(random_calls_instructions) + + if ( + stack_values + and all( + not isinstance( + v, + ( + UnspecializedPythonVariable, + NumpyNdarrayVariable, + TensorWithTFOverrideVariable, + ), + ) + for v in stack_values + ) + and all(isinstance(x, TensorVariable) for x in stack_values) + and len(set(stack_values)) == len(stack_values) + and self.side_effects.is_empty() + and not len(tx.debug_locals) != 0 + and not self.backward_state + ): + append_prefix_insts() + # optimization to generate better code in a common case + self.add_output_instructions( + self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root) + + [create_instruction("UNPACK_SEQUENCE", arg=len(stack_values))] + ) + else: + graph_output_var = self.new_var("graph_out") + pass1 = PyCodegen(tx, root, graph_output_var) + self.codegen_suffix(tx, stack_values, pass1) + + # one more time now that we have established tempvars + pass2 = PyCodegen( + tx, + root, + graph_output_var, + tempvars={val: None for val, count in pass1.uses.items() if count > 1}, + ) + self.codegen_suffix(tx, stack_values, pass2) + + output = [] + if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0: + output.extend( + self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root) + ) + + if len(pass2.graph_outputs) != 0: + output.append(pass2.create_store(graph_output_var)) + else: + output.append(create_instruction("POP_TOP")) + append_prefix_insts() + self.add_output_instructions(output + pass2.get_instructions()) + + # restore all the live local vars + self.add_output_instructions( + [PyCodegen(tx).create_store(var) for var in reversed(restore_vars)] + ) + + def codegen_suffix(self, tx, stack_values, cg): + if self.backward_state: + assert not self.export + for name, val in self.backward_state.items(): + cg(val) + cg.append_output(cg.create_load(self.backward_state_var)) + cg.store_attr(name) + self.side_effects.codegen_hooks(cg) + self.side_effects.codegen_save_tempvars(cg) + + # Return variables used for logging at the end + for debug_var, args in tx.debug_locals: + cg(debug_var) + for arg in args: + cg(arg) + cg.extend_output(create_call_function(len(args), True)) + + cg.restore_stack(stack_values, value_from_source=not tx.export) + self.side_effects.codegen_update_mutated(cg) + + def cleanup_graph(self): + """ + Remove "creation_timestamp" from node meta + + Remove this pattern from the graph: + torch._C._set_grad_enabled(False) + torch._C._set_grad_enabled(True) + """ + assert self.should_exit + nodes = list(self.graph.nodes) + for node in nodes: + node.meta.pop("creation_timestamp", None) + + grad_enabled = torch.is_grad_enabled() + for node1, node2 in zip(nodes, nodes[1:]): + if ( + node1.target is torch._C._set_grad_enabled + and tuple(node1.args) == (not grad_enabled,) + and not node1._erased + ): + grad_enabled = node1.args[0] + if ( + node2.target is torch._C._set_grad_enabled + and tuple(node2.args) == (not grad_enabled,) + and not node2._erased + ): + grad_enabled = node2.args[0] + self.graph.erase_node(node1) + self.graph.erase_node(node2) + + def get_graph_sizes_structured(self): + ret = {} + for node in self.graph.nodes: + example_value = node.meta.get("example_value", None) + if isinstance(example_value, torch._subclasses.FakeTensor): + size = example_value.size() + ret[node.name] = [s if isinstance(s, int) else repr(s) for s in size] + return ret + + def get_graph_sizes(self, name: str): + graph_sizes_str = "TRACED GRAPH TENSOR SIZES\n" + graph_sizes_str += f"===== {name} =====\n" + for node in self.graph.nodes: + example_value = node.meta.get("example_value", None) + if isinstance(example_value, torch._subclasses.FakeTensor): + size = example_value.size() + graph_sizes_str += f"{node.name}: {tuple(size)}\n" + concrete_size = [] + has_symint = False + for sz in size: + if isinstance(sz, int): + concrete_size.append(sz) + elif isinstance(sz, torch.SymInt): + has_symint = True + concrete_size.append(sz.node.hint) + else: + break + else: + if has_symint: + graph_sizes_str += ( + f"{node.name} (concrete): {tuple(concrete_size)}\n" + ) + return graph_sizes_str + + @contextlib.contextmanager + def restore_global_state(self): + """ + Momentarily restores the global state to what it was prior to tracing the current output + """ + prior_global_state = self.tracing_context.global_context.copy_graphstate() + current_global_state: Dict[str, Tuple[Any, bool]] = {} + self.save_global_state(out=current_global_state) + try: + # Set to state prior to tracing the graph + self.tracing_context.global_context.restore_graphstate(prior_global_state) + yield + finally: + # Reset to state at the current time (e.g. before calling the user compiler) + self.tracing_context.global_context.restore_graphstate( + GlobalContextCheckpointState(current_global_state) + ) + + @torch._guards.TracingContext.clear_frame() + def compile_and_call_fx_graph(self, tx, rv, root): + """ + Generate code from self.graph and return the Instruction()s to + call that generated code. + """ + from .decorators import disable + + assert self.should_exit + + name = unique_id("__compiled_fn") + + assert isinstance(rv, list) + assert isinstance(root, FakeRootModule) + self.create_node( + "output", + "output", + (self.current_tracer.create_arg(tuple(x.as_proxy() for x in rv)),), + {}, + ) + self.insert_deferred_runtime_asserts(root, name) + # NB: deferred runtime asserts can keep graphargs live, so make sure + # those are inserted before pruning + self.remove_unused_graphargs() + ncalls = count_calls(self.graph) + counters["stats"]["calls_captured"] += ncalls + + # free a bit of memory + self.real_value_cache.clear() + + gm = _make_graph_module(root, self.graph) + for register_finalizer in self.register_finalizer_fns: + register_finalizer(gm) + + gm.compile_subgraph_reason = self.compile_subgraph_reason + gm.meta[ + "dynamo_flat_name_to_original_fqn" + ] = self.dynamo_flat_name_to_original_fqn.copy() + + graph_code_log.debug("%s", lazy_format_graph_code(name, gm)) + torch._logging.trace_structured( + "dynamo_output_graph", + lambda: {"sizes": self.get_graph_sizes_structured()}, + payload_fn=lambda: gm.print_readable(print_output=False), + ) + graph_tabular_log.debug("%s", lazy_format_graph_tabular(name, gm)) + graph_sizes_log.debug("%s", LazyString(lambda: self.get_graph_sizes(name))) + self.call_cleanup_hooks() + old_fake_mode = self.tracing_context.fake_mode + if not self.export: + # TODO(voz): The way export uses gm, and fake tensors, is not supported with us resetting + backend_fake_mode = torch._subclasses.FakeTensorMode( + shape_env=old_fake_mode.shape_env, + ) + # TODO(voz): Ostensibily, this should be scoped and + # restore back to old_fake_mode, but doing so currently violates + # a lot of fake_tensor ownership assumptions and runs afoul of detect_fake_mode + self.tracing_context.fake_mode = backend_fake_mode + + with self.restore_global_state(): + compiled_fn = self.call_user_compiler(gm) + compiled_fn = disable(compiled_fn) + + counters["stats"]["unique_graphs"] += 1 + # This is safe because we pre-process name to be unique + self.install_global_unsafe(name, compiled_fn) + + cg = PyCodegen(tx) + cg.make_call_generated_code(name) + return cg.get_instructions() + + @property + def placeholders(self) -> List[fx.Node]: + r = [] + for node in self.graph.nodes: + if node.op == "placeholder": + r.append(node) + continue + break + return r + + @property + def graphargs(self) -> List[GraphArg]: + return [node.meta["grapharg"] for node in self.placeholders] + + @dynamo_timed(phase_name="backend_compile") + def call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn: + assert self.compiler_fn is not None + tot = 0 + placeholders = [] + for node in gm.graph.nodes: + if node.op in ("call_function", "call_method", "call_module"): + tot += 1 + if node.op == "placeholder": + placeholders.append(node) + increment_op_count(tot) + for pl in placeholders: + arg = pl.meta["grapharg"] + # TODO: Why isn't this stored in meta :think: + pl._dynamo_source = arg.source + + gm._param_name_to_source = self.param_name_to_source # type: ignore[assignment] + gm._source_to_user_stacks = self.source_to_user_stacks # type: ignore[assignment] + + try: + name = ( + self.compiler_fn.__name__ + if hasattr(self.compiler_fn, "__name__") + else "" + ) + _step_logger()(logging.INFO, f"calling compiler function {name}") + compiler_fn = self.compiler_fn + if config.verify_correctness: + compiler_fn = WrapperBackend(compiler_fn) + compiled_fn = compiler_fn(gm, self.example_inputs()) + _step_logger()(logging.INFO, f"done compiler function {name}") + assert callable(compiled_fn), "compiler_fn did not return callable" + except exceptions_allowed_to_be_fallback as e: + if self.has_user_defined_allowed_in_graph: + raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( + e.__traceback__ + ) from None + msg = ( + "Backend compiler failed with a fake tensor exception at \n" + f"{self.root_tx.format_frame_summary()}" + "Adding a graph break." + ) + unimplemented_with_warning(e, self.root_tx.f_code, msg) + except SkipFrame as e: + # The backend compiler has requested that we skip the frame, instead of + # aborting execution. + raise e + except Exception as e: + raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( + e.__traceback__ + ) from None + + signpost_event( + "dynamo", + "OutputGraph.call_user_compiler", + { + **self.co_fields, + "op_count": tot, + "node_count": len(gm.graph.nodes), + "input_count": len(placeholders), + }, + ) + + return compiled_fn + + def example_inputs(self) -> List[torch.Tensor]: + result = [] + for arg in self.graphargs: + result.append(arg.example) + return result + + def remove_unused_graphargs(self) -> None: + assert self.should_exit + # Miniature DCE pass, but only for obviously trivial operations + for node in reversed(list(self.graph.nodes)): + if len(list(node.users)) == 0: + if node.op == "get_attr": + self.remove_node(node) + elif node.op == "call_function" and node.target is operator.getitem: + self.remove_node(node) + + def placeholder_binds_symbol(node): + arg = node.meta["grapharg"] + example = arg.example + if isinstance(example, torch.SymInt) and isinstance( + example.node.expr, sympy.Symbol + ): + return example.node.expr + return None + + def remove_unused(node): + log.debug("REMOVE UNUSED GRAPHARG %s", node.meta["grapharg"].source.name()) + # I'm not really sure why you need to delete these from the + # node since the node is going to get removed + del node.meta["grapharg"] + self.remove_node(node) + self.real_value_cache.pop(node, None) + + used_symbols = set() + recheck_placeholders = [] + for node in self.placeholders: + binds_symbol = placeholder_binds_symbol(node) is not None + # Don't delete symbol bindings yet + if binds_symbol: + if not node.users: + recheck_placeholders.append(node) + else: + if not node.users and not isinstance( + node.meta["grapharg"], BackwardStateGraphArg + ): + remove_unused(node) + else: + # Register the free symbols as uses + arg = node.meta["grapharg"] + if isinstance(arg, BackwardStateGraphArg): + continue + fake = ( + arg.fake_tensor if arg.fake_tensor is not None else arg.example + ) + used_symbols |= free_symbols(fake) + + # After removing unused graphargs, prune unused binds_symbol + for node in recheck_placeholders: + symbol = placeholder_binds_symbol(node) + if symbol is not None: + if symbol not in used_symbols: + remove_unused(node) + else: + # Make sure we delete later occurrences of the same symbol + used_symbols.remove(symbol) + + # TODO: this is a generic pass that should live outside of Dynamo + def insert_deferred_runtime_asserts(self, root, name) -> None: + """ + During tracing, we may have discovered that some data-dependent values + had runtime assert on them; e.g., torch.empty(x.item()) induces a runtime + that x.item() >= 0. This asserts can happen unpredictably during fake + tensor propagation, so we cannot conveniently insert them into the FX graph + when they occur. Instead, we accumulate them in the ShapeEnv, and in this + pass insert them into the graph as proper tests. + """ + # TODO: Request simplification on runtime asserts before emitting them + ras_by_symbol = self.shape_env.deferred_runtime_asserts.copy() + + if not any(ras for ras in ras_by_symbol.values()): + return + + gm = fx.GraphModule(root, self.graph) + graph_code_log.debug( + "%s", + lazy_format_graph_code(f"pre insert_deferred_runtime_asserts {name}", gm), + ) + + # We are going to mutate the dict + symbol_to_proxy = {} + placeholders = set() + last_placeholder = None + for node in self.graph.nodes: + if node.op != "placeholder": + last_placeholder = node + break + placeholders.add(node) + assert last_placeholder is not None + + # Identify what symbols we need to reify. This isn't strictly needed + # but helps reduce churn on the graph + needed_symbols: Set[sympy.Symbol] = set() + for ras in ras_by_symbol.values(): + for ra in ras: + needed_symbols.update(free_symbols(ra.expr)) + + log.debug("needed_symbols = %s", needed_symbols) + + for node in self.graph.nodes: + # Placeholders can match symbols, but when we destructure them + # with size we have to make sure we insert the nodes after all + # the placeholders + with self.graph.inserting_before( + node.next if node not in placeholders else last_placeholder.next + ): + if "example_value" not in node.meta: + continue + + defs = [] + + # For every new unbacked symbol, we need an fx.Node representing + # precisely this value. There are a few places where the unbacked + # symbol could have come from, and we will check them to setup + # these nodes. + # + # For a case like item(), this is trivial (no new node is added.) + # + # For nonzero(), we need to add something like i0 = out.size(0) + # + # We could end up with duplicate nodes this way but it is not a + # big deal. + # + # We also do this to setup backed SymInts, but those are all going + # to be matched from placeholders + def match_symbol(symint, cb): + if ( + isinstance(symint, torch.SymInt) + and isinstance(symint.node, SymNode) + and isinstance(s := symint.node.expr, sympy.Symbol) + and s not in symbol_to_proxy + and s in needed_symbols + ): + symbol_to_proxy[s] = fx.Proxy(cb()) + log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s]) + defs.append(s) + + match_symbol(node.meta["example_value"], lambda: node) + if isinstance(t := node.meta["example_value"], torch.Tensor): + for i, s in enumerate(t.size()): + match_symbol( + s, lambda: self.graph.call_method("size", (node, i)) + ) + for i, s in enumerate(t.stride()): + match_symbol( + s, lambda: self.graph.call_method("stride", (node, i)) + ) + match_symbol( + t.storage_offset(), + lambda: self.graph.call_method("storage_offset", (node,)), + ) + + for i0 in defs: + ras = ras_by_symbol.pop(i0, []) + # Before we perform any asserts, first apply range + # refinement. This is important, because if we are going + # to retrace the graph (and we typically are if we send + # the graph to AOTAutograd), we need to make sure we apply + # range refinement (ala _check_is_size) first, BEFORE we + # run any of the asserts. Otherwise, we may decide to + # perform substitutions based on the asserts which we then + # can't back out, because value ranges can only be applied + # to asserts.) + # + # A perhaps better long term plan is to avoid this order + # dependence by making it possible to refine ranges on + # arbitrary expressions, not just symbols. But it is not + # so easy to make use of this information, see + # https://twitter.com/ezyang/status/1745801370299482492 + # We actually made an attempt at this in + # https://github.com/pytorch/pytorch/pull/119043 + # which didn't work. + # + # Another ideas for how to do this: + # - Have bound_sympy be the source of truth of the ranges of any expression + # - Cache intermediate results for every subexpression of bound_sympy + # - This cache should be possible to edit to refine ranges + # + # One issue with this proposal is that if + # we have a bound on 2x, we are not going to be able to + # apply it for 4x. Similarly, we may have bounds for an + # equivalent expression that we are not applying because + # it's not a perfect match (e.g. x < y vs y > x)". + # + # The first issue we already have it and it's impossible + # to solve in general, so any implementation on a best + # effort basis should do. + # + # The second issue is a preexisting one. It can be mitigated + # with a normalisation algorithm. In general, it may also + # be on a best effort basis, but since our grammar is not + # terribly difficult, chances are we could even fully + # normalise SymPy expressions... who knows. + + if i0 in self.shape_env.size_like: + self.graph.call_function( + torch._check_is_size, (symbol_to_proxy[i0].node,) + ) + + vr = self.shape_env.var_to_range[i0] + if not self.shape_env._default_unspecified_value_range().issubset( + vr + ): + # The runtime range is constrained, so add a runtime + # assert and also explicitly refine the range + # (refinement should not be necessary once runtime + # asserts cause refinement, but that's NYI) + def convert(s): + try: + return int(s) + except TypeError: + return None + + self.graph.call_function( + torch._constrain_as_value, + ( + symbol_to_proxy[i0].node, + convert(vr.lower), + convert(vr.upper), + ), + ) + + for ra in ras: + log.debug("inserting runtime assert %s", ra.expr) + # Need to process ALL free symbols, not just unbacked ones + fvs = free_symbols(ra.expr) + missing = fvs - symbol_to_proxy.keys() + if missing: + i1 = sorted(missing)[0] + # TODO: Remove relaxing assert on unbacked_symint https://github.com/pytorch/pytorch/issues/119689 + # assert self.shape_env.is_unbacked_symint(i1), i1 + ras_by_symbol.setdefault(i1, []).append(ra) + else: + # Convert the sympy expression into a sequence of FX + # nodes + res = sympy_interp( + PythonReferenceAnalysis, symbol_to_proxy, ra.expr + ).node + self.graph.call_function( + torch.ops.aten._assert_scalar.default, + # TODO: use ra.msg here, but it's pretty + # useless right now + ( + res, + f"Deferred runtime assertion failed {ra.expr}", + ), + ) + + def add_output_instructions(self, prefix: List[Instruction]) -> None: + """ + We call this on the creation of a new compiled subgraph that is inserted + before user code. + """ + self.output_instructions.extend(prefix) + self.should_exit = True + + def install_global_unsafe(self, name, value) -> None: + """ + WARNING: prefer the safer `install_global_by_id/install_global`. + torch.compile instances should be independent of each other; + one footgun is to have one instance depend on the existence of + a global installed by another instance. This can happen if we mangle + a global the same way across both instances. + """ + assert name not in self.installed_globals + self.installed_globals.add(name) + self.cleanups.append(CleanupHook.create(self.global_scope, name, value)) + + def install_global_by_id(self, prefix, value) -> str: + """ + Installs a global if it hasn't been installed already. + This is determined by (prefix, id(value)) pair. + + Returns the name of the newly installed global. + """ + # NB: need self.compile_id to distinguish this global + # from another global created in a different torch.compile instance + name = f"{prefix}_{id(value)}_c{self.compile_id}" + if name in self.installed_globals: + return name + self.install_global_unsafe(name, value) + return name + + def install_global(self, prefix, value) -> str: + """ + Installs a global, generating a unique name for it. + + Returns the name of the newly installed global. + """ + # NB: unique_id is unique, even across torch.compile instances + name = unique_id(prefix) + self.install_global_unsafe(name, value) + return name + + def cleanup(self) -> None: + # There is a reference cycle between tracer and OutputGraph, causing + # some of the tensor objects to be held alive for longer than necessary. + self.root_tx = None + self.nn_modules.clear() + self.param_name_to_source = None + + for node in self.graph.nodes: + if "grapharg" in node.meta: + del node.meta["grapharg"] + self.real_value_cache.clear() + self.input_name_to_proxy.clear() + self.side_effects.clear() + self.register_finalizer_fns.clear() + self.dynamo_flat_name_to_original_fqn.clear() + self.tracing_context.clear() + + def set_torch_function_state(self, enabled: bool) -> None: + self.torch_function_enabled = enabled + + def add_graph_finalizer( + self, register_finalizer: Callable[[fx.GraphModule], None] + ) -> None: + self.register_finalizer_fns.append(register_finalizer) + + def example_value_from_input_node(self, node: torch.fx.Node): + """Extract the non-fake example tensor""" + if node.op == "placeholder": + return node.meta["grapharg"].example + assert node.op == "get_attr" + return self.nn_modules[node.target] # type: ignore[index] + + +err_epilogue = ( + "With the current config, we will graph break " + "(and fall back to eager-mode PyTorch) on all ops " + "that have do not have the 'pt2_compliant_tag'. " + "Please see the following doc for how to mark this op as PT2 compliant " + "https://docs.google.com/document/d/1W--T6wz8IY8fOI0Vm8BF44PdBgs283QvpelJZWieQWQ" +) + + +def check_pt2_compliant_op(output_graph, kind, target, args, kwargs): + if kind != "call_function": + return + + def encountered_compliant_op(target): + if target.namespace in {"prim", "prims", "aten"}: + return + output_graph.compliant_custom_ops.add(target) + + def encountered_non_compliant_op(target, msg): + output_graph.non_compliant_ops.add(target) + if config.only_allow_pt2_compliant_ops: + unimplemented(msg + " " + err_epilogue) + + if isinstance(target, torch._ops.OpOverload): + if torch.Tag.pt2_compliant_tag in target.tags: + encountered_compliant_op(target) + return + encountered_non_compliant_op( + target, + f"Encountered the torch.ops.OpOverload {target} " + f"that is not PT2 compliant.", + ) + return + + if isinstance(target, torch._ops.OpOverloadPacket): + overloads = tuple(target.overloads()) + # Optimization: Overload resolution is expensive. + # If there's only one overload, we know what it will resolve to. + if len(overloads) == 1: + op = getattr(target, overloads[0]) + if torch.Tag.pt2_compliant_tag in op.tags: + encountered_compliant_op(op) + return + encountered_non_compliant_op( + op, + f"Encountered the non-overloaded " + f"torch.ops.OpOverloadPacket {target} " + f"that is not PT2 compliant. ", + ) + return + + args, kwargs = torch._dynamo.utils.get_fake_values_from_nodes( + output_graph.current_tx, (args, kwargs), False + ) + try: + overload = torch._C._jit_resolve_packet( + target._qualified_op_name, *args, **kwargs + ) + except RuntimeError as e: + unimplemented(str(e)) + + op = getattr(target, overload) + if torch.Tag.pt2_compliant_tag in op.tags: + encountered_compliant_op(op) + else: + encountered_non_compliant_op( + op, + f"Encountered the torch.ops.OpOverloadPacket {target} " + f"which resolves to the overload ({overload}) that is " + f"not PT2 compliant.", + ) + + +_compile_id_counter = itertools.count() + + +class SubgraphTracer(fx.Tracer): + """ + Holds an FX graph that is being traced. OutputGraph owns a SubgraphTracer + and the separation of responsibilities is that SubgraphTracer is + responsible for building the graph while OutputGraph is responsible for + compiling and executing the graph. + """ + + def __init__( + self, output_graph, parent=None, export_root=False, source_target=None + ): + super().__init__() + self.output_graph = weakref.proxy(output_graph) + self.graph = torch.fx.Graph() + + # The export is only ever set for the ROOT tracer. It controls + # whether or not certain inputs are allowed to be added or not. + # Look at call sites of create_graph_input to see how it is used. + if export_root: + assert parent is None + self.export_root = export_root + # Map from graph input name to its placeholder proxy object, where the + # map's keys give all current placeholder node names and can be used to + # create unique node names + self.input_name_to_proxy: Dict[str, fx.Proxy] = {} + # Node => computed real value (see utils.get_real_value) + self.real_value_cache: Dict[fx.Node, torch.Tensor] = {} + + # SubgraphTracers can be nested. See NOTE [HigherOrderOperator tracing design] + self.parent = parent + # A dict mapping previously free variables (Proxy objects) + # to new Proxy objects that wrap inputs to this subgraph. + # + # This dict serves two purposes: + # - Proxies are associated with VariableTrackers. If we see + # the same VariableTracker twice (and it is a free variable), + # then we want to use the same Proxy in the current subgraph to + # record the tracing. + # - If we are tracing a HigherOrderOperator's body_fn, then we + # need to keep track of what free variables were lifted so we can + # rewrite the HigherOrderOperator call using the traced body_fn. + # Dicts maintain the order of args for the HigherOrderOperator call. + self.lifted_freevars = {} + self.prev_inst = None + + self._cur_code = None + self._orig_gm_meta = None + self._orig_gm_lineno_map = None + self._orig_gm_firstlineno = None + # Each SubgraphTracer is associated with a source target, which indicates + # which operator this subgraph is attached to. We compute a source_fn_stack + # based on the source target. For the root tracer, it's set to []. + # This is useful for debugging and transforming the exported graph. + if self.parent is None: + self.source_fn_stack = [] + else: + self.source_fn_stack = self.parent.source_fn_stack + [ + (self.graph._target_to_str(source_target), source_target) + ] + + def create_proxy( + self, + kind, + target, + args, + kwargs, + name=None, + type_expr=None, + proxy_factory_fn=None, + ): + # NOTE: [Nested SubgraphTracer and free_variable handling] + # -------------------------------------------------------- + # Read NOTE [HigherOrderOperator tracing design] first. + # + # Let's say we're in the middle of introspecting the body of a possibly + # nested HigherOrderOperator, and we see a free variable. + # + # There are two cases: + # 1. We see a free variable that is already tracked by Dynamo. + # 2. We see a free variable that has not been tracked by Dynamo + # + # In case 1, we call `maybe_lift_tracked_freevar_to_input` (below) + # which will lift the freevar to be an input of this subgraph + # and also recursively lift it to be an input on the parent(s). + # + # In case 2, before the call to `create_proxy`, the InstructionTranslator + # will see the freevar when it gets loaded by Python bytecode. + # E.g. for Python 3.11 the bytecodes that may do this are LOAD_DEREF or + # LOAD_GLOBAL. + # There, the InstructionTranslator asks Dynamo to begin tracking the + # freevar by building a new Variable. + # Building a new Variable automatically lifts the freevar to be an + # input of the root SubgraphTracer. + # + # The implications for the code below are: + # - We will always be in Case 1 when we get to this code. + # - Any "free variable" we encounter here is guaranteed to already be + # bound, that is, it is either a graph input of the root graph, or + # some local variable of the root graph or a subgraph. + # - The additional work we need to do here is *only* that we need to + # lift this free variable into inputs (recursively) of each nested + # higher-order-op subgraph until we hit the subgraph where the free + # variable is bound + if self.parent is not None: + flat_args, tree_spec = pytree.tree_flatten((args, kwargs)) + new_flat_args = [] + for arg in flat_args: + maybe_new_arg = self.maybe_lift_tracked_freevar_to_input(arg) + new_flat_args.append(maybe_new_arg) + + args, kwargs = pytree.tree_unflatten(new_flat_args, tree_spec) + + rv = super().create_proxy( + kind, target, args, kwargs, name, type_expr, proxy_factory_fn + ) + + # append stack trace to fx node + tx = self.output_graph.current_tx + + # log detailed location of line of code in 3.11 + if sys.version_info >= (3, 11) and kind in ( + "call_function", + "call_method", + "call_module", + ): + cur_inst = tx.current_instruction + if ( + cur_inst is not self.prev_inst + and cur_inst.positions is not None + and cur_inst.positions.lineno is not None + ): + tx_code = tx.f_code + header = tx.get_line_of_code_header(lineno=cur_inst.positions.lineno) + + def get_trace_call_log_str(): + line = get_instruction_source_311(tx_code, cur_inst).rstrip() + return f"TRACE FX call {rv.node.name} from {header}\n{line}" + + trace_call_log.debug("%s", LazyString(get_trace_call_log_str)) + self.prev_inst = cur_inst + + # update reference to original meta if we're tracing a new code object + is_retracing = False + if tx.f_code is not self._cur_code: + orig_graphmodule_maybe = code_context.get_context(tx.f_code).get( + "orig_graphmodule", lambda: None + )() + if isinstance(orig_graphmodule_maybe, torch.fx.GraphModule): + is_retracing = True + self._orig_gm_meta = [ + nd.meta for nd in orig_graphmodule_maybe.graph.nodes + ] + self._orig_gm_lineno_map = orig_graphmodule_maybe._lineno_map + self._orig_gm_firstlineno = ( + orig_graphmodule_maybe.forward.__code__.co_firstlineno + ) + else: + self._orig_gm_meta = None + self._orig_gm_lineno_map = None + self._orig_gm_firstlineno = None + nn_module_stack = tx.nn_module_stack + if nn_module_stack: + rv.node.meta["nn_module_stack"] = nn_module_stack.copy() + + if kind in {"call_function", "call_method"}: + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + (rv.node.name, target) + ] + elif kind == "call_module": + if self.parent is not None: + unimplemented("Invoking an nn.Module inside HigherOrderOperator") + # For modules we store the class + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + ( + rv.node.name, + rv.node.meta["nn_module_stack"][target][1], + ) + ] + + # preserve original meta if it is available + if ( + self._orig_gm_meta + and self._orig_gm_lineno_map + and self._orig_gm_firstlineno + ): + lineno = tx.current_instruction.starts_line + node_idx = None + if lineno is not None: + node_idx = self._orig_gm_lineno_map.get( + lineno - self._orig_gm_firstlineno, None + ) + if node_idx is not None: + meta = self._orig_gm_meta[node_idx] + for field in fx.proxy._COPY_META_FIELDS: + if field in meta: + rv.node.meta[field] = meta[field] + if "stack_trace" in meta: + rv.node.meta["stack_trace"] = meta["stack_trace"] + + if not is_retracing: + if "nn_module_stack" not in rv.node.meta: + nn_module_stack = tx.nn_module_stack + if nn_module_stack: + rv.node.meta["nn_module_stack"] = nn_module_stack.copy() + + if "source_fn_stack" not in rv.node.meta: + if kind in {"call_function", "call_method"}: + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + (rv.node.name, target) + ] + elif kind == "call_module": + if self.parent is not None: + unimplemented( + "Invoking an nn.Module inside HigherOrderOperator" + ) + # For modules we store the class + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + ( + rv.node.name, + rv.node.meta["nn_module_stack"][target][1], + ) + ] + + if "stack_trace" not in rv.node.meta: + frame_summaries: List[traceback.FrameSummary] = [] + while tx: + frame_summaries.append(tx.frame_summary()) + tx = getattr(tx, "parent", None) + # Reverse the frame_summaries, such that the innermost frame is at the last + frame_summaries.reverse() + + # official from_list stub doesn't have new-style type + msgs = traceback.StackSummary.from_list(frame_summaries).format() + rv.node.stack_trace = "".join(msgs) + + return rv + + def create_node( + self, op, target, args=None, kwargs=None, name=None, type_expr=None + ): + check_pt2_compliant_op(self.output_graph, op, target, args, kwargs) + if self.parent is not None: + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + for arg in flat_args: + if not isinstance(arg, torch.fx.Node): + continue + assert ( + arg.graph == self.graph + ), "create_node using arg not from this SubgraphTracer" + + node = super().create_node(op, target, args, kwargs, name, type_expr) + node.meta["creation_timestamp"] = self.output_graph.timestamp + return node + + # Note: we did not override erase_node since + # we call self.graph.erase_node elsewhere + def remove_node(self, node): + if len(node.users) > 0: + user_graph_nodes: List[torch.fx.Node] = [] + for user in node.users.keys(): + # For the case where user.graph == self.graph, that is a real bug and will raise + # properly. + if user.graph != self.graph: + # This is a nested graph, which needs to be deleted. + # If we do not do this, we will raise on attempting to remove this. + # As we only get here during restoration cleanup, this is sound. + user_graph_nodes.extend(reversed(list(user.graph.nodes))) + for other_graph_node in user_graph_nodes: + other_graph_node.graph.erase_node(other_graph_node) + self.graph.erase_node(node) + self.input_name_to_proxy.pop(node.name, None) + + # when before=True, we will insert this input before the most recent + # inserted proxy. This is a hack to get around an ordering problem, + # where we first insert a tensor argument, and then insert bindings + # for SymInts that may occur in the tensor argument. + # Remove this if https://github.com/pytorch/pytorch/issues/99007 gets + # fixed. + def create_graph_input(self, name, type_expr=None, before=False, source=None): + log.debug( + "create_graph_input %s %s", + name, + source.name() if source is not None else "(none)", + ) + if source is None: + assert ( + self.parent is not None + ), "you are required to provide a source for inputs on the root tracer" + + # In eager, we are generally OK with adding graph inputs whenever we + # want, because we take care of writing the bytecode that knows how + # to source all the inputs. + # + # In export, this is bad, because you want a self-contained export + # object which only depends on the inputs you explicitly passed to it. + # So we are a bit more strict about what sources can become inputs + # in export + if self.export_root: + if not is_from_local_source(source, allow_cell_or_freevar=False): + self.output_graph.source_to_user_stacks.setdefault(source, []).append( + TracingContext.extract_stack() + ) + + # unique + if name in self.input_name_to_proxy: + for i in itertools.count(): + candidate_name = f"{name}_{i}" + if candidate_name not in self.input_name_to_proxy: + name = candidate_name + break + + if self.input_name_to_proxy: + prev_name = next(reversed(self.input_name_to_proxy)) + node = self.input_name_to_proxy[prev_name].node + if before: + ctx = self.graph.inserting_before(node) + else: + ctx = self.graph.inserting_after(node) + else: + ctx = self.graph.inserting_before(None) + with ctx: + proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr) + if self.input_name_to_proxy and before: + k, v = self.input_name_to_proxy.popitem() + self.input_name_to_proxy[name] = proxy + self.input_name_to_proxy[k] = v + else: + self.input_name_to_proxy[name] = proxy + return proxy + + # See NOTE: [Nested SubgraphTracer and free_variable handling] for more details + def lift_tracked_freevar_to_input(self, proxy): + # You're doing something wrong if we are the root SubgraphTracer because + # Dynamo adds tensors to graph inputs before creating a proxy for them. + assert ( + self.parent is not None + ), "lift_tracked_freevar_to_input should not be called on root SubgraphTracer" + # Proxys are associated with VariableTracker. + # It is possible that we've already lifted the Proxy to be an input. + # If that is the case, just return the already lifted Proxy. + if proxy in self.lifted_freevars: + return self.lifted_freevars[proxy] + new_proxy = self.create_graph_input(proxy.node.name) + new_proxy.node.meta["example_value"] = proxy.node.meta["example_value"] + self.lifted_freevars[proxy] = new_proxy + if self.parent is not None and proxy.tracer != self.parent: + self.parent.lift_tracked_freevar_to_input(proxy) + return new_proxy + + def maybe_lift_tracked_freevar_to_input(self, arg): + """ + If arg is a free variable, then lift it to be an input. + Returns the new lifted arg (if arg was a freevar), else the + original arg. + """ + if not isinstance(arg, torch.fx.Proxy): + return arg + elif arg.tracer == self: + return arg + return self.lift_tracked_freevar_to_input(arg) + + +# NOTE: [HigherOrderOperator tracing design] +# Ignoring HigherOrderOperators for a moment, +# OutputGraph represents the graph being built by Dynamo that may be compiled +# and executed. It holds a root SubgraphTracer where the FX graph is built. +# +# HigherOrderOperators are operators that take functions as their arguments. +# When Dynamo encounters a HigherOrderOperator, then it attempts to introspect +# the function passed to it (call this the "body function"), capture it into a +# GraphModule, and rewrite the call to the HigherOrderOperator to use the +# GraphModule. +# +# The way we handle the capture of body functions is through having +# (possibly nested) SubgraphTracers, one per body function. +# +# Mechanically, we do the introspection by: +# - Creating a new SubgraphTracer via OutputGraph.subtracer +# - Executing the body function. +# This constructs the graph of the body function in the new SubgraphTracer +# while modifying the state of the OutputGraph. For example: +# - the OutputGraph can receive new GraphArgs (if we discover any new +# untracked Tensors) +# - side effects from the body function get accumulated into +# OutputGraph.side_effects +# - guards produced by the body function get accumulated into OutputGraph.guards +# +# The traced function has some special properties that make it easier for us +# to transform later down the line: +# - we lift all free variables to being inputs. +# +# If the introspection fails (due to the existence of graph breaks), then +# we roll back the current OutputGraph state and graph break on the +# HigherOrderOperator. diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/polyfill.py b/venv/lib/python3.10/site-packages/torch/_dynamo/polyfill.py new file mode 100644 index 0000000000000000000000000000000000000000..fc82d90c4c8a0344e9e027f683fee4c9e6195b44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/polyfill.py @@ -0,0 +1,47 @@ +# mypy: ignore-errors + +""" +Python polyfills for common builtins. +""" +import math + +import torch + + +def all(iterator): + for elem in iterator: + if not elem: + return False + return True + + +def any(iterator): + for elem in iterator: + if elem: + return True + return False + + +def index(iterator, item, start=0, end=None): + for i, elem in enumerate(list(iterator))[start:end]: + if item == elem: + return i + # This will not run in dynamo + raise ValueError(f"{item} is not in {type(iterator)}") + + +def repeat(item, count): + for i in range(count): + yield item + + +def radians(x): + return math.pi / 180.0 * x + + +def accumulate_grad(x, new_grad): + new_grad = torch.clone(new_grad) + if x.grad is None: + x.grad = new_grad + else: + x.grad.add_(new_grad) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/profiler.py b/venv/lib/python3.10/site-packages/torch/_dynamo/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b52551c67137519ce65b49c4416fd08c814545ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/profiler.py @@ -0,0 +1,155 @@ +import dataclasses +import os +from typing import Any, List + +import torch + +from .utils import print_once + + +@dataclasses.dataclass +class ProfileMetrics: + microseconds: float = 0.0 + operators: int = 0 + fusions: int = 0 + graphs: int = 0 + + def __iadd__(self, other: "ProfileMetrics"): + self.microseconds += other.microseconds + self.operators += other.operators + self.fusions += other.fusions + return self + + def __add__(self, other: "ProfileMetrics"): + assert isinstance(other, ProfileMetrics) + return ProfileMetrics( + self.microseconds + other.microseconds, + self.operators + other.operators, + self.fusions + other.fusions, + ) + + def __truediv__(self, other): + if isinstance(other, int): + other = ProfileMetrics(other, other, other) + return ProfileMetrics( + self.microseconds / max(1, other.microseconds), + self.operators / max(1, other.operators), + self.fusions / max(1, other.fusions), + ) + + def __str__(self): + return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time" + + def tocsv(self): + return [self.operators, self.microseconds] + + +class ProfileResult: + def __init__(self, captured, total, unique_graphs): + self.captured: ProfileMetrics = captured or ProfileMetrics() + self.total: ProfileMetrics = total or ProfileMetrics() + self.unique_graphs: int = unique_graphs + + def __iadd__(self, other: "ProfileResult"): + self.captured += other.captured + self.total += other.total + self.unique_graphs += other.unique_graphs + return self + + def percent(self): + return self.captured / self.total + + def __str__(self): + return ( + f"{self.unique_graphs:2} graphs {self.captured.graphs:2} graph calls " + f"{self.captured.operators:4}/{self.total.operators:4} = " + + str(self.percent()) + ) + + def tocsv(self): + return [ + self.unique_graphs, + self.captured.graphs, + self.captured.operators, + self.total.operators, + ] + self.percent().tocsv() + + +def should_print_missing(): + return os.environ.get("TORCHDYNAMO_PRINT_MISSING") == "1" + + +def print_missing(stack): + if any("/torch/autograd/profiler.py" in x for x in stack): + return + stack = [ + x for x in stack if ("> ".join(stack[-3:])) + + +class Profiler: + unique_graphs = 0 + + def __init__(self): + self.prof = torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CPU], + with_stack=should_print_missing(), + ) + + def results(self): + captured_regions = 0 + captured_ops = 0 + captured_microseconds = 0 + total_ops = 0 + total_microseconds = 0 + + last_op_end_time = -1 + captured_region_end_time = -1 + events = sorted(self.prof.events(), key=lambda x: x.time_range.start) + for e in events: + if e.name == "TORCHDYNAMO": + captured_region_end_time = e.time_range.end + captured_regions += 1 + # ignore `handle = torch.zeros(1)` in record_function.__init__() + total_ops -= 1 + elif e.time_range.start >= last_op_end_time: + last_op_end_time = e.time_range.end + if e.time_range.end <= captured_region_end_time: + captured_ops += 1 + captured_microseconds += e.time_range.elapsed_us() + elif should_print_missing(): + print_missing(e.stack) + total_ops += 1 + total_microseconds += e.time_range.elapsed_us() + else: + pass # ops recursively called from other ops (ignored) + + unique_graphs = Profiler.unique_graphs + Profiler.unique_graphs = 0 + # we counted one extra op that is part of the profiler setup code + total_ops -= 1 + + return ProfileResult( + captured=ProfileMetrics( + microseconds=captured_microseconds, + operators=captured_ops, + fusions=captured_ops - captured_regions, + graphs=captured_regions, + ), + total=ProfileMetrics( + microseconds=total_microseconds, + operators=total_ops, + fusions=total_ops - 1, + ), + unique_graphs=unique_graphs, + ) + + +def fx_insert_profiling(gm: torch.fx.GraphModule, example_inputs: List[Any]): + def _wrapped(*args): + with torch.profiler.record_function("TORCHDYNAMO"): + return gm.forward(*args) + + Profiler.unique_graphs += 1 + return _wrapped diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/replay_record.py b/venv/lib/python3.10/site-packages/torch/_dynamo/replay_record.py new file mode 100644 index 0000000000000000000000000000000000000000..7a312e5d58a9ef8721df767c10f0b11f052555f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/replay_record.py @@ -0,0 +1,110 @@ +import dataclasses +from dataclasses import field +from types import CodeType, ModuleType +from typing import Any, Dict + +from torch.utils._import_utils import import_dill + +dill = import_dill() + + +@dataclasses.dataclass +class ModuleRecord: + module: ModuleType + accessed_attrs: Dict[str, Any] = field(default_factory=dict) + + +@dataclasses.dataclass +class DummyModule: + name: str + is_torch: bool = False + + @property + def __name__(self): + return self.name + + +@dataclasses.dataclass +class ExecutionRecord: + code: CodeType + globals: Dict[str, Any] = field(default_factory=dict) + locals: Dict[str, Any] = field(default_factory=dict) + builtins: Dict[str, Any] = field(default_factory=dict) + code_options: Dict[str, Any] = field(default_factory=dict) + + def dump(self, f): + assert dill is not None, "replay_record requires `pip install dill`" + dill.dump(self, f) + + @classmethod + def load(cls, f): + assert dill is not None, "replay_record requires `pip install dill`" + return dill.load(f) + + +@dataclasses.dataclass +class ExecutionRecorder: + LOCAL_MOD_PREFIX = "___local_mod_" + + code: CodeType + globals: Dict[str, Any] = field(default_factory=dict) + locals: Dict[str, Any] = field(default_factory=dict) + builtins: Dict[str, Any] = field(default_factory=dict) + code_options: Dict[str, Any] = field(default_factory=dict) + name_to_modrec: Dict[str, Any] = field(default_factory=dict) + + def add_local_var(self, name, var): + if isinstance(var, ModuleType): + self.locals[name] = self._add_mod(var) + else: + self.locals[name] = var + + def add_global_var(self, name, var): + if isinstance(var, ModuleType): + self.globals[name] = self._add_mod(var) + else: + self.globals[name] = var + + def add_local_mod(self, name, mod): + assert isinstance(mod, ModuleType) + + self.add_global_var(name, mod) + + def record_module_access(self, mod, name, val): + if isinstance(val, ModuleType): + self.name_to_modrec[mod.__name__].accessed_attrs[name] = self._add_mod(val) + return + + if mod.__name__ in self.name_to_modrec: + self.name_to_modrec[mod.__name__].accessed_attrs[name] = val + + def get_record(self): + return ExecutionRecord( + self.code, + ExecutionRecorder._resolve_modules(self.globals), + ExecutionRecorder._resolve_modules(self.locals), + self.builtins.copy(), + self.code_options.copy(), + ) + + def _add_mod(self, mod): + if mod.__name__ not in self.name_to_modrec: + self.name_to_modrec[mod.__name__] = ModuleRecord(mod) + + return self.name_to_modrec[mod.__name__] + + # Convert ModuleRecords -> DummyModule tree + @classmethod + def _resolve_modules(cls, vars): + def resolve_module(var): + if not isinstance(var, ModuleRecord): + return var + + dummy_mod = DummyModule(var.module.__name__) + for attr_name, attr_value in var.accessed_attrs.items(): + attr_value = resolve_module(attr_value) + dummy_mod.__setattr__(attr_name, attr_value) + + return dummy_mod + + return {k: resolve_module(v) for k, v in vars.items()} diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py b/venv/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b565aa275a2822e373fada9f043994d8c0eebe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py @@ -0,0 +1,648 @@ +import copy +import dataclasses +import sys +import types +from typing import Any, cast, Dict, List, Optional, Tuple + +from .bytecode_transformation import ( + create_call_function, + create_call_method, + create_dup_top, + create_instruction, + create_jump_absolute, + Instruction, + InstructionExnTabEntry, + transform_code_object, + unique_id, +) +from .utils import ExactWeakKeyDictionary + +# taken from code.h in cpython +CO_OPTIMIZED = 0x0001 +CO_NEWLOCALS = 0x0002 +CO_VARARGS = 0x0004 +CO_VARKEYWORDS = 0x0008 +CO_NESTED = 0x0010 +CO_GENERATOR = 0x0020 +CO_NOFREE = 0x0040 +CO_COROUTINE = 0x0080 +CO_ITERABLE_COROUTINE = 0x0100 +CO_ASYNC_GENERATOR = 0x0200 + + +@dataclasses.dataclass(frozen=True) +class ReenterWith: + stack_index: int + target_values: Optional[Tuple[Any, ...]] = None + + # If we do not want to destroy the stack, we can do the same thing as a + # `SETUP_WITH` block, only that we store the context manager in a local_symbol + def try_except(self, code_options, cleanup: List[Instruction]): + """ + Codegen based off of: + load args + enter context + try: + (rest) + finally: + exit context + """ + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + ctx_name = unique_id(f"___context_manager_{self.stack_index}") + if ctx_name not in code_options["co_varnames"]: + code_options["co_varnames"] += (ctx_name,) + for name in ["__enter__", "__exit__"]: + if name not in code_options["co_names"]: + code_options["co_names"] += (name,) + + except_jump_target = create_instruction( + "NOP" if sys.version_info < (3, 11) else "PUSH_EXC_INFO" + ) + cleanup_complete_jump_target = create_instruction("NOP") + + setup_finally = [ + *load_args, + *create_call_function(len(load_args), True), + create_instruction("STORE_FAST", argval=ctx_name), + create_instruction("LOAD_FAST", argval=ctx_name), + create_instruction("LOAD_METHOD", argval="__enter__"), + *create_call_method(0), + create_instruction("POP_TOP"), + ] + + if sys.version_info < (3, 11): + setup_finally.append( + create_instruction("SETUP_FINALLY", target=except_jump_target) + ) + else: + exn_tab_begin = create_instruction("NOP") + exn_tab_end = create_instruction("NOP") + exn_tab_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_begin, + exn_tab_end, + except_jump_target, + self.stack_index + 1, + False, + ) + setup_finally.append(exn_tab_begin) + + def create_reset(): + return [ + create_instruction("LOAD_FAST", argval=ctx_name), + create_instruction("LOAD_METHOD", argval="__exit__"), + create_instruction("LOAD_CONST", argval=None), + create_dup_top(), + create_dup_top(), + *create_call_method(3), + create_instruction("POP_TOP"), + ] + + if sys.version_info < (3, 9): + epilogue = [ + create_instruction("POP_BLOCK"), + create_instruction("BEGIN_FINALLY"), + except_jump_target, + *create_reset(), + create_instruction("END_FINALLY"), + ] + elif sys.version_info < (3, 11): + epilogue = [ + create_instruction("POP_BLOCK"), + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, + *create_reset(), + create_instruction("RERAISE"), + cleanup_complete_jump_target, + ] + else: + finally_exn_tab_end = create_instruction("RERAISE", arg=0) + finally_exn_tab_target = create_instruction("COPY", arg=3) + except_jump_target.exn_tab_entry = InstructionExnTabEntry( + except_jump_target, + finally_exn_tab_end, + finally_exn_tab_target, + self.stack_index + 2, + True, + ) + epilogue = [ + exn_tab_end, + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, # PUSH_EXC_INFO + *create_reset(), + finally_exn_tab_end, # RERAISE 0 + finally_exn_tab_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + cleanup_complete_jump_target, + ] + + cleanup[:] = epilogue + cleanup + return setup_finally + + def __call__(self, code_options, cleanup): + """ + Codegen based off of: + with ctx(args): + (rest) + """ + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + if sys.version_info < (3, 9): + with_cleanup_start = create_instruction("WITH_CLEANUP_START") + begin_finally = create_instruction("BEGIN_FINALLY") + cleanup[:] = [ + create_instruction("POP_BLOCK"), + begin_finally, + with_cleanup_start, + create_instruction("WITH_CLEANUP_FINISH"), + create_instruction("END_FINALLY"), + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_cleanup_start), + create_instruction("POP_TOP"), + ], None + elif sys.version_info < (3, 11): + with_except_start = create_instruction("WITH_EXCEPT_START") + pop_top_after_with_except_start = create_instruction("POP_TOP") + + cleanup_complete_jump_target = create_instruction("NOP") + + cleanup[:] = [ + create_instruction("POP_BLOCK"), + create_instruction("LOAD_CONST", argval=None), + create_instruction("DUP_TOP"), + create_instruction("DUP_TOP"), + create_instruction("CALL_FUNCTION", arg=3), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + with_except_start, + create_instruction( + "POP_JUMP_IF_TRUE", target=pop_top_after_with_except_start + ), + create_instruction("RERAISE"), + pop_top_after_with_except_start, + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_except_start), + create_instruction("POP_TOP"), + ], None + else: + pop_top_after_with_except_start = create_instruction("POP_TOP") + cleanup_complete_jump_target = create_instruction("NOP") + + def create_load_none(): + return create_instruction("LOAD_CONST", argval=None) + + exn_tab_1_begin = create_instruction("POP_TOP") + exn_tab_1_end = create_instruction("NOP") + exn_tab_1_target = create_instruction("PUSH_EXC_INFO") + exn_tab_2_end = create_instruction("RERAISE", arg=2) + exn_tab_2_target = create_instruction("COPY", arg=3) + + exn_tab_1_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_begin, + exn_tab_1_end, + exn_tab_1_target, + self.stack_index + 1, + True, + ) + exn_tab_1_target.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_target, + exn_tab_2_end, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + pop_top_after_with_except_start.exn_tab_entry = InstructionExnTabEntry( + pop_top_after_with_except_start, + pop_top_after_with_except_start, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + + cleanup[:] = [ + exn_tab_1_end, + create_load_none(), + create_load_none(), + create_load_none(), + *create_call_function(2, False), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + exn_tab_1_target, # PUSH_EXC_INFO + create_instruction("WITH_EXCEPT_START"), + create_instruction( + "POP_JUMP_FORWARD_IF_TRUE", + target=pop_top_after_with_except_start, + ), + exn_tab_2_end, # RERAISE 2 + exn_tab_2_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + pop_top_after_with_except_start, + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + return [ + *load_args, + *create_call_function(len(load_args), True), + create_instruction("BEFORE_WITH"), + exn_tab_1_begin, # POP_TOP + ], exn_tab_1_target + + +@dataclasses.dataclass +class ResumeFunctionMetadata: + code: types.CodeType + instructions: List[Instruction] = dataclasses.field(default_factory=list) + # Python 3.11+ fields + # NOTE: Python 3.11 removed blocks, but for our purposes, a "block" consists + # of instructions of all exception table entries that have the same target. + + # map from PUSH_EXC_INFO's in the prefix to original block target offset + prefix_block_target_offset_remap: List[int] = dataclasses.field( + default_factory=list + ) + # map from new block target offsets to original block target offsets + block_target_offset_remap: Optional[Dict[int, int]] = None + + +def _filter_iter(l1, l2, cond): + """ + Two-pointer conditional filter. + e.g. _filter_iter(insts, sorted_offsets, lambda i, o: i.offset == o) + returns the instructions with offsets in sorted_offsets + """ + it = iter(l2) + res = [] + try: + cur = next(it) + for val in l1: + if cond(val, cur): + res.append(val) + cur = next(it) + except StopIteration: + pass + return res + + +class ContinueExecutionCache: + cache = ExactWeakKeyDictionary() + generated_code_metadata = ExactWeakKeyDictionary() + + @classmethod + def lookup(cls, code, lineno, *key): + if code not in cls.cache: + cls.cache[code] = dict() + key = tuple(key) + if key not in cls.cache[code]: + cls.cache[code][key] = cls.generate(code, lineno, *key) + return cls.cache[code][key] + + @classmethod + def generate( + cls, + code, + lineno, + offset: int, + setup_fn_target_offsets: Tuple[int], # only used in Python 3.11+ + nstack: int, + argnames: Tuple[str], + setup_fns: Tuple[ReenterWith], + null_idxes: Tuple[int], + ) -> types.CodeType: + assert offset is not None + assert not ( + code.co_flags + & (CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR) + ) + assert code.co_flags & CO_OPTIMIZED + if code in ContinueExecutionCache.generated_code_metadata: + return cls.generate_based_on_original_code_object( + code, + lineno, + offset, + setup_fn_target_offsets, + nstack, + argnames, + setup_fns, + null_idxes, + ) + + is_py311_plus = sys.version_info >= (3, 11) + meta = ResumeFunctionMetadata(code) + + def update(instructions: List[Instruction], code_options: Dict[str, Any]): + meta.instructions = copy.deepcopy(instructions) + + args = [f"___stack{i}" for i in range(nstack)] + args.extend(v for v in argnames if v not in args) + freevars = tuple(code_options["co_cellvars"] or []) + tuple( + code_options["co_freevars"] or [] + ) + code_options[ + "co_name" + ] = f"torch_dynamo_resume_in_{code_options['co_name']}_at_{lineno}" + if is_py311_plus: + qualified_path = code_options["co_qualname"].rsplit(".", maxsplit=1) + if len(qualified_path) == 1: + code_options["co_qualname"] = code_options["co_name"] + else: + assert len(qualified_path) == 2 + module_name, co_name = qualified_path + code_options[ + "co_qualname" + ] = f"{module_name}.torch_dynamo_resume_in_{co_name}_at_{lineno}" + code_options["co_firstlineno"] = lineno + code_options["co_cellvars"] = tuple() + code_options["co_freevars"] = freevars + code_options["co_argcount"] = len(args) + code_options["co_posonlyargcount"] = 0 + code_options["co_kwonlyargcount"] = 0 + code_options["co_varnames"] = tuple( + args + [v for v in code_options["co_varnames"] if v not in args] + ) + code_options["co_flags"] = code_options["co_flags"] & ~( + CO_VARARGS | CO_VARKEYWORDS + ) + target = next(i for i in instructions if i.offset == offset) + + prefix = [] + if is_py311_plus: + if freevars: + prefix.append( + create_instruction("COPY_FREE_VARS", arg=len(freevars)) + ) + prefix.append(create_instruction("RESUME", arg=0)) + + cleanup: List[Instruction] = [] + hooks = {fn.stack_index: fn for fn in setup_fns} + hook_target_offsets = { + fn.stack_index: setup_fn_target_offsets[i] + for i, fn in enumerate(setup_fns) + } + offset_to_inst = {inst.offset: inst for inst in instructions} + # map old hook targets to new targets generated by the hook + old_hook_target_remap = {} + null_idxes_i = 0 + for i in range(nstack): + while ( + null_idxes_i < len(null_idxes) + and null_idxes[null_idxes_i] == i + null_idxes_i + ): + prefix.append(create_instruction("PUSH_NULL")) + null_idxes_i += 1 + prefix.append(create_instruction("LOAD_FAST", argval=f"___stack{i}")) + if i in hooks: + hook = hooks.pop(i) + hook_insts, exn_target = hook(code_options, cleanup) + prefix.extend(hook_insts) + if is_py311_plus: + hook_target_offset = hook_target_offsets.pop(i) + old_hook_target = offset_to_inst[hook_target_offset] + meta.prefix_block_target_offset_remap.append(hook_target_offset) + old_hook_target_remap[old_hook_target] = exn_target + if is_py311_plus: + # reverse the mapping since targets of later/nested contexts are inserted + # into the mapping later, but show up earlier in the prefix. + meta.prefix_block_target_offset_remap = list( + reversed(meta.prefix_block_target_offset_remap) + ) + + assert not hooks + + prefix.append(create_jump_absolute(target)) + + # because the line number table monotonically increases from co_firstlineno + # remove starts_line for any instructions before the graph break instruction + # this will ensure the instructions after the break have the correct line numbers + for inst in instructions: + if inst.offset == target.offset: + break + inst.starts_line = None + if sys.version_info >= (3, 11): + inst.positions = None + + if cleanup: + prefix.extend(cleanup) + prefix.extend(cls.unreachable_codes(code_options)) + + # remap original instructions' exception table entries + if old_hook_target_remap: + assert is_py311_plus + for inst in instructions: + if ( + inst.exn_tab_entry + and inst.exn_tab_entry.target in old_hook_target_remap + ): + inst.exn_tab_entry.target = old_hook_target_remap[ + inst.exn_tab_entry.target + ] + + # TODO(jansel): add dead code elimination here + instructions[:] = prefix + instructions + + new_code = transform_code_object(code, update) + ContinueExecutionCache.generated_code_metadata[new_code] = meta + return new_code + + @staticmethod + def unreachable_codes(code_options) -> List[Instruction]: + """Codegen a `raise None` to make analysis work for unreachable code""" + return [ + create_instruction("LOAD_CONST", argval=None), + create_instruction("RAISE_VARARGS", arg=1), + ] + + @classmethod + def generate_based_on_original_code_object( + cls, code, lineno, offset: int, setup_fn_target_offsets: Tuple[int, ...], *args + ): + """ + This handles the case of generating a resume into code generated + to resume something else. We want to always generate starting + from the original code object so that if control flow paths + converge we only generated 1 resume function (rather than 2^n + resume functions). + """ + + meta: ResumeFunctionMetadata = ContinueExecutionCache.generated_code_metadata[ + code + ] + new_offset = None + + def find_new_offset( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + nonlocal new_offset + (target,) = (i for i in instructions if i.offset == offset) + # match the functions starting at the last instruction as we have added a prefix + (new_target,) = ( + i2 + for i1, i2 in zip(reversed(instructions), reversed(meta.instructions)) + if i1 is target + ) + assert target.opcode == new_target.opcode + new_offset = new_target.offset + + transform_code_object(code, find_new_offset) + + if sys.version_info >= (3, 11): + # setup_fn_target_offsets currently contains the target offset of + # each setup_fn, based on `code`. When we codegen the resume function + # based on the original code object, `meta.code`, the offsets in + # setup_fn_target_offsets must be based on `meta.code` instead. + if not meta.block_target_offset_remap: + block_target_offset_remap = meta.block_target_offset_remap = {} + + def remap_block_offsets( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + # NOTE: each prefix block generates exactly one PUSH_EXC_INFO, + # so we can tell which block a prefix PUSH_EXC_INFO belongs to, + # by counting. Then we can use meta.prefix_block-target_offset_remap + # to determine where in the original code the PUSH_EXC_INFO offset + # replaced. + prefix_blocks: List[Instruction] = [] + for inst in instructions: + if len(prefix_blocks) == len( + meta.prefix_block_target_offset_remap + ): + break + if inst.opname == "PUSH_EXC_INFO": + prefix_blocks.append(inst) + + # offsets into prefix + for inst, o in zip( + prefix_blocks, meta.prefix_block_target_offset_remap + ): + block_target_offset_remap[cast(int, inst.offset)] = o + + # old bytecode targets are after the prefix PUSH_EXC_INFO's + old_start_offset = ( + cast(int, prefix_blocks[-1].offset) if prefix_blocks else -1 + ) + # offsets into old bytecode + old_inst_offsets = sorted( + n for n in setup_fn_target_offsets if n > old_start_offset + ) + targets = _filter_iter( + instructions, old_inst_offsets, lambda inst, o: inst.offset == o + ) + new_targets = _filter_iter( + zip(reversed(instructions), reversed(meta.instructions)), + targets, + lambda v1, v2: v1[0] is v2, + ) + for new, old in zip(new_targets, targets): + block_target_offset_remap[old.offset] = new[1].offset + + transform_code_object(code, remap_block_offsets) + + # if offset is not in setup_fn_target_offsets, it is an error + setup_fn_target_offsets = tuple( + meta.block_target_offset_remap[n] for n in setup_fn_target_offsets + ) + return ContinueExecutionCache.lookup( + meta.code, lineno, new_offset, setup_fn_target_offsets, *args + ) + + +""" +# partially finished support for with statements + +def convert_locals_to_cells( + instructions: List[Instruction], + code_options: Dict[str, Any]): + + code_options["co_cellvars"] = tuple( + var + for var in code_options["co_varnames"] + if var not in code_options["co_freevars"] + and not var.startswith("___stack") + ) + cell_and_free = code_options["co_cellvars"] + code_options["co_freevars"] + for inst in instructions: + if str(inst.argval).startswith("___stack"): + continue + elif inst.opname == "LOAD_FAST": + inst.opname = "LOAD_DEREF" + elif inst.opname == "STORE_FAST": + inst.opname = "STORE_DEREF" + elif inst.opname == "DELETE_FAST": + inst.opname = "DELETE_DEREF" + else: + continue + inst.opcode = dis.opmap[inst.opname] + assert inst.argval in cell_and_free, inst.argval + inst.arg = cell_and_free.index(inst.argval) + +def patch_setup_with( + instructions: List[Instruction], + code_options: Dict[str, Any] +): + nonlocal need_skip + need_skip = True + target_index = next( + idx for idx, i in enumerate(instructions) if i.offset == offset + ) + assert instructions[target_index].opname == "SETUP_WITH" + convert_locals_to_cells(instructions, code_options) + + stack_depth_before = nstack + stack_effect(instructions[target_index].opcode, + instructions[target_index].arg) + + inside_with = [] + inside_with_resume_at = None + stack_depth = stack_depth_before + idx = target_index + 1 + for idx in range(idx, len(instructions)): + inst = instructions[idx] + if inst.opname == "BEGIN_FINALLY": + inside_with_resume_at = inst + break + elif inst.target is not None: + unimplemented("jump from with not supported") + elif inst.opname in ("BEGIN_FINALLY", "WITH_CLEANUP_START", "WITH_CLEANUP_FINISH", "END_FINALLY", + "POP_FINALLY", "POP_EXCEPT", + "POP_BLOCK", "END_ASYNC_FOR"): + unimplemented("block ops not supported") + inside_with.append(inst) + stack_depth += stack_effect(inst.opcode, inst.arg) + assert inside_with_resume_at + + instructions = [ + create_instruction("LOAD_FAST", f"___stack{i}") for i in range(nstack) + ] + [ + create_instruction("SETUP_WITH", target=instructions[target_index].target) + ... call the function ... + unpack_tuple + ] + [ + create_instruction("JUMP_ABSOLUTE", target=inside_with_resume_at) + ] +""" diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/side_effects.py b/venv/lib/python3.10/site-packages/torch/_dynamo/side_effects.py new file mode 100644 index 0000000000000000000000000000000000000000..80bd9e0b0c1a5f3e4e599b12cfc5a71a93913ec3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/side_effects.py @@ -0,0 +1,542 @@ +import inspect +from typing import Any, Dict, List, Optional, Union + +import torch.nn + +from . import utils, variables +from .bytecode_transformation import ( + create_call_function, + create_call_method, + create_instruction, +) +from .codegen import PyCodegen +from .exc import unimplemented +from .source import LocalSource, Source +from .utils import nn_module_new, object_new +from .variables.base import ( + is_side_effect_safe, + MutableLocalBase, + MutableLocalSource, + VariableTracker, +) + + +class MutableSideEffects(MutableLocalBase): + """ + VariableTracker.mutable_local marker to indicate a list passed as + an input that if we mutate we need to re-apply those mutations after + the graph runs. + """ + + def __init__(self, source: Source, is_modified: bool = False): + super().__init__(MutableLocalSource.Existing) + self.source = source + self.is_modified = is_modified + + +class AttributeMutation(MutableLocalBase): + """ + VariableTracker.mutable_local marker to track changes to attributes + """ + + def __init__(self, typ: MutableLocalSource, source: Optional[Source]): + super().__init__(typ) + self.source = source + + +class AttributeMutationExisting(AttributeMutation): + def __init__(self, source: Source): + super().__init__(MutableLocalSource.Existing, source) + self.source = source + + +class AttributeMutationNew(AttributeMutation): + def __init__(self, source: Optional[Source], cls_source: Optional[Source]): + super().__init__(MutableLocalSource.Local, source) + self.cls_source = cls_source + + +class SideEffects: + """ + Track side effects (list mutation, setattr, etc) that need to be + applied after an FX graph is run. + """ + + id_to_variable: Dict[int, VariableTracker] + store_attr_mutations: Dict[MutableLocalBase, Dict[str, VariableTracker]] + keepalive: List[Any] + + def __init__( + self, + id_to_variable=None, + store_attr_mutations=None, + keepalive=None, + save_for_backward=None, + tensor_hooks=None, + ): + super().__init__() + self.id_to_variable = id_to_variable or {} + self.store_attr_mutations = store_attr_mutations or {} + self.keepalive = keepalive or [] + self.save_for_backward = save_for_backward or [] + self.tensor_hooks = tensor_hooks or {} + + def __eq__(self, other: object) -> bool: + assert isinstance(other, SideEffects) + # NB: do NOT test keepalive + return ( + self.id_to_variable == other.id_to_variable + and self.store_attr_mutations == other.store_attr_mutations + and self.save_for_backward == other.save_for_backward + and self.tensor_hooks == other.tensor_hooks + ) + + def diff(self, other: "SideEffects") -> Optional[str]: + if self.id_to_variable != other.id_to_variable: + sk_itv = self.id_to_variable.keys() + ok_itv = other.id_to_variable.keys() + if sk_itv != ok_itv: + return f"id_to_variable keys: {sk_itv} != {ok_itv}" + # Feel free to augment this with more fancy diffing logic + # if needed for debugging + return "id_to_variable: unknown diff" + elif self.store_attr_mutations != other.store_attr_mutations: + sk_sam = self.store_attr_mutations.keys() + ok_sam = other.store_attr_mutations.keys() + if sk_sam != ok_sam: + return f"store_attr_mutations keys: {sk_sam} != {ok_sam}" + return "store_attr_mutations: unknown diff" + elif self.save_for_backward != other.save_for_backward: + return "save_for_backward" + elif self.tensor_hooks != other.tensor_hooks: + return "tensor_hooks" + else: + return None + + def clone(self): + """Create a shallow copy""" + return self.__class__( + id_to_variable=dict(self.id_to_variable), + store_attr_mutations={ + k: dict(v) for k, v in self.store_attr_mutations.items() + }, + keepalive=list(self.keepalive), + save_for_backward=self.save_for_backward, + tensor_hooks=self.tensor_hooks, + ) + + def apply(self, fn, cache=None, skip_fn=lambda _: False): + if cache is None: + cache = dict() + + self.id_to_variable = { + k: VariableTracker.apply(fn, v, cache, skip_fn) + for k, v in self.id_to_variable.items() + } + self.store_attr_mutations = { + k: VariableTracker.apply(fn, v, cache, skip_fn) + for k, v in self.store_attr_mutations.items() + } + self.save_for_backward = VariableTracker.apply( + fn, self.save_for_backward, cache, skip_fn + ) + self.tensor_hooks = VariableTracker.apply(fn, self.tensor_hooks, cache, skip_fn) + + def __contains__(self, item): + return id(item) in self.id_to_variable + + def __getitem__(self, item): + return self.id_to_variable[id(item)] + + def check_allowed_side_effect(self, item): + from torch._dynamo.variables.misc import AutogradFunctionContextVariable + + # People do things like self.dim = dim inside autograd.Function. + # These are benign. + if isinstance(item, AutogradFunctionContextVariable): + return True + if not is_side_effect_safe(item.mutable_local): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (SideEffects)" + ) + + def store_attr(self, item: VariableTracker, name: str, value: VariableTracker): + assert self.is_attribute_mutation(item) + self.check_allowed_side_effect(item) + if item.mutable_local not in self.store_attr_mutations: + self.store_attr_mutations[item.mutable_local] = {} + self.store_attr_mutations[item.mutable_local][name] = value + + def load_attr(self, item, name, deleted_ok=False): + assert self.is_attribute_mutation(item) + result = self.store_attr_mutations[item.mutable_local][name] + if not deleted_ok and isinstance(result, variables.DeletedVariable): + unimplemented("read deleted attribute") + return result + + def store_cell(self, cellvar, value): + assert isinstance(cellvar, variables.NewCellVariable) + assert isinstance(value, variables.VariableTracker) + self.store_attr(cellvar, "cell_contents", value) + + def load_cell(self, cellvar): + assert isinstance(cellvar, variables.NewCellVariable) + return self.load_attr(cellvar, "cell_contents") + + def load_global(self, gvar: VariableTracker, name: str): + assert isinstance(gvar, variables.VariableTracker) + return self.load_attr(gvar, name) + + def store_global(self, gvar: VariableTracker, name: str, value: VariableTracker): + assert isinstance(gvar, variables.VariableTracker) + assert isinstance(value, variables.VariableTracker) + self.store_attr(gvar, name, value) + + @staticmethod + def cls_supports_mutation_side_effects(cls): + return inspect.getattr_static(cls, "__setattr__", None) in ( + object.__setattr__, + torch.nn.Module.__setattr__, + ) + + def is_attribute_mutation(self, item): + return isinstance(item.mutable_local, AttributeMutation) + + def has_pending_mutation(self, item): + return self.is_attribute_mutation(item) and bool( + self.store_attr_mutations.get(item.mutable_local) + ) + + def is_modified(self, item): + if isinstance(item.mutable_local, AttributeMutationNew): + return True + if self.is_attribute_mutation(item): + return item.mutable_local in self.store_attr_mutations + return item.mutable_local.is_modified + + def _track_obj( + self, + item: Any, + variable: VariableTracker, + mutable_cls=MutableSideEffects, + ): + """Start tracking a new variable for mutation""" + assert variable.source is not None + variable.mutable_local = mutable_cls(variable.source) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + track_mutable = _track_obj + + def track_object_existing( + self, + item: Any, + variable: VariableTracker, + ): + return self._track_obj(item, variable, mutable_cls=AttributeMutationExisting) + + def track_object_new( + self, + cls_source: Source, + user_cls: Any, + variable_cls: Any, + options, + ): + if user_cls is torch.autograd.function.FunctionCtx: + obj = torch.autograd.Function() + elif issubclass(user_cls, torch.nn.Module): + obj = nn_module_new(user_cls) + else: + obj = object_new(user_cls) + variable = variable_cls( + obj, + mutable_local=AttributeMutationNew(None, cls_source), + **options, + ) + self.id_to_variable[id(obj)] = variable + self.keepalive.append(obj) + return variable + + def track_cell_new( + self, + ): + obj = object() + variable = variables.NewCellVariable( + mutable_local=AttributeMutationNew(None, None), + ) + self.id_to_variable[id(obj)] = variable + self.keepalive.append(obj) + return variable + + def track_cell_existing(self, source: Source, item: Any): + variable = variables.NewCellVariable( + mutable_local=AttributeMutationExisting(source), + ) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + def track_global_existing(self, source: Source, item: Any): + variable = variables.NewGlobalVariable( + mutable_local=AttributeMutationExisting(source), + ) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + def track_save_for_backward(self, ctx, args): + assert isinstance(ctx, variables.AutogradFunctionContextVariable) + self.save_for_backward.append((ctx, args)) + + def track_tensor_variables_from_runahead_side_effects(self, other): + # In higher order ops we want to keep track of tensors seen in the + # speculate_subgraph so that we don't lift them again as a new input in + # other speculate_subgraph or in the root tracer. + for other_item in other.keepalive: + other_id = id(other_item) + other_variable = other.id_to_variable[other_id] + if other_id not in self.id_to_variable and isinstance( + other_variable, variables.TensorVariable + ): + self.track_object_existing(other_item, other_variable) + + def prune_dead_object_new(self, tx): + live_new_objects = set() + skip_obj = None + + def visit(var: VariableTracker): + if ( + isinstance(var.mutable_local, AttributeMutationNew) + and var.mutable_local is not skip_obj + ): + live_new_objects.add(var.mutable_local) + return var + + def is_live(var: Union[MutableLocalBase, VariableTracker]): + if isinstance(var, AttributeMutationNew): + return var in live_new_objects + if isinstance(var, VariableTracker): + return is_live(var.mutable_local) + return True + + VariableTracker.apply(visit, (tx.stack, tx.symbolic_locals)) + for var in self.id_to_variable.values(): + if not isinstance(var.mutable_local, AttributeMutationNew): + VariableTracker.apply(visit, var) + + for skip_obj, setattrs in self.store_attr_mutations.items(): + VariableTracker.apply(visit, setattrs) + + self.id_to_variable = { + k: v for k, v in self.id_to_variable.items() if is_live(v) + } + self.store_attr_mutations = { + k: v for k, v in self.store_attr_mutations.items() if is_live(k) + } + + def mutation(self, var): + self.check_allowed_side_effect(var) + if isinstance(var.mutable_local, MutableSideEffects): + var.mutable_local = MutableSideEffects(var.mutable_local.source, True) + + def _get_modified_vars(self): + return [var for var in self.id_to_variable.values() if self.is_modified(var)] + + def codegen_save_tempvars(self, cg: PyCodegen): + for var in self._get_modified_vars(): + if isinstance( + var.mutable_local, (AttributeMutationExisting, AttributeMutationNew) + ) and isinstance(var, variables.NewCellVariable): + cg.load_import_from(utils.__name__, "make_cell") + cg.extend_output(create_call_function(0, True)) + cg.add_cache(var) + if isinstance(var.mutable_local, AttributeMutationNew): + var.mutable_local.source = LocalSource(cg.tempvars[var]) # type: ignore[attr-defined] + elif isinstance(var.mutable_local, AttributeMutationNew): + if isinstance(var, variables.AutogradFunctionContextVariable): + unimplemented("AutogradFunctionContextVariable escaped") + if "__call_nn_module_init" in self.store_attr_mutations.get( + var.mutable_local, {} + ): + assert isinstance(var, variables.UnspecializedNNModuleVariable) + cg.load_import_from(utils.__name__, "nn_module_new") + else: + cg.load_import_from(utils.__name__, "object_new") + cg(var.mutable_local.cls_source) + cg.extend_output(create_call_function(1, True)) + cg.add_cache(var) + var.mutable_local.source = LocalSource(cg.tempvars[var]) + elif var in cg.tempvars: + assert cg.tempvars.get(var) is None + # subsequent usage should point to the original variable + cg(var.mutable_local.source) + cg.add_cache(var) + + for ctx, args in self.save_for_backward: + cg(ctx.source) + cg.extend_output( + [create_instruction("LOAD_METHOD", argval="save_for_backward")] + ) + for arg in args: + cg(arg) + cg.extend_output( + [ + *create_call_method(len(args)), + create_instruction("POP_TOP"), + ] + ) + + def register_hook(self, tensor, hook, handle, name): + assert isinstance(tensor, variables.TensorVariable) + assert isinstance(hook, variables.VariableTracker) + assert ( + isinstance(handle, variables.RemovableHandleVariable) + and handle.mutable_local + ) + assert hasattr(torch.Tensor, name) + idx = len(self.tensor_hooks.keys()) + # duplicate index possible because of self.remove_hook() + while idx in self.tensor_hooks: + idx += 1 + self.tensor_hooks[idx] = (tensor, hook, handle, name) + assert not handle.idx + handle.idx = idx + + def remove_hook(self, idx): + del self.tensor_hooks[idx] + + def codegen_hooks(self, cg): + for ( + tensor, + hook, + handle, + name, + ) in self.tensor_hooks.values(): + # Note: [On tensor.register_hook] + # + # register_hook on a tensor, AKA backward hooks, have slightly nuanced differences in how they are implemented + # when it comes to hooks on objects with sources (inputs, params) vs objects without sources (intermediaries). + # + # For tensors with a source, we bypass direct inclusion of register_hook calls in the graph. + # Instead, these are tracked and stashed as a global variable, enabling their association with tensors in + # the residuals. During dynamo's frame creation, these hooks are invoked seamlessly on known reconstructible/fetch-able + # tensors. Because a source indicates knowledge of this object outside the torch compile region, and + # because we are running residuals firmly before .backward() can be run, it is sound to invoke + # `register_hook` on a known tensor. + # + # For tensors without a source, we support a limited subset of hooks. Global functions only, and + # compiled_autograd must be enabled or we will graph break. + # + # Handling the Handle: When a user retains the register_hook result in a handle, we intercept the + # STORE_FAST operation to record the user-designated local variable name. This ensures the reconstructed + # bytecode retains this name. If no handle is defined, we simply pop the generated value to keep the + # stack intact. + # + # Dynamo Tensor Hooks Workflow: + # - Functions passed to register_hook are lifted globally. + # - For tensors with sources: + # - In the "side_effects" phase of codegen, we iterate over tensors with hooks to: + # - Generate the tensor. + # - Issue a register_hook call on the tensor, linking to the globally stored function. + # - Incorporate a handle if one was established in the eager phase. + # - For tensors without sources: + # - We don't generate any instructions for registering a hook. + # - Handles from intermediary hooks are NYI. + # - We produce a call function that utilizes the trace_wrapped higher order op, closing over it. + # - We then manually insert the call function above into the graph. + # - The handle's exact user-specified name, "user_code_variable_name", is discerned and associated during STORE_FAST. + assert tensor.source, "Hooks on non input tensors NYI - should not get here" + cg(tensor) + cg.extend_output([cg.create_load_attr(name)]) + cg(hook) + cg.extend_output(create_call_function(1, True)) + + # Adding the handle to the cache means RemovableHandleVariable().reconstruct() will + # be associated with the return value of register_hook(). This consumes the top of stack. + cg.add_cache(handle) + + def codegen_update_mutated(self, cg: PyCodegen): + suffixes = [] + for var in self._get_modified_vars(): + if isinstance(var, variables.ListVariable): + # old[:] = new + cg(var, allow_cache=False) + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output( + [ + cg.create_load_const(None), + cg.create_load_const(None), + create_instruction("BUILD_SLICE", arg=2), + ] + ) + suffixes.append([create_instruction("STORE_SUBSCR")]) + elif isinstance(var, variables.ConstDictVariable): + cg.tx.output.update_co_names("clear") + cg.tx.output.update_co_names("update") + + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output([create_instruction("LOAD_METHOD", argval="update")]) + cg(var, allow_cache=False) + + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output([create_instruction("LOAD_METHOD", argval="clear")]) + + suffixes.append( + [ + *create_call_method(0), # clear + create_instruction("POP_TOP"), + *create_call_method(1), # update + create_instruction("POP_TOP"), + ] + ) + elif self.is_attribute_mutation(var): + for name, value in self.store_attr_mutations.get( + var.mutable_local, {} + ).items(): + if isinstance(var, variables.NewGlobalVariable): + cg.tx.output.update_co_names(name) + cg(value) + suffixes.append( + [create_instruction("STORE_GLOBAL", argval=name)] + ) + elif name == "__call_nn_module_init": + pass # handled in codegen_save_tempvars + elif isinstance(value, variables.DeletedVariable): + if isinstance( + var.mutable_local, AttributeMutationExisting + ) and hasattr(getattr(var, "value", None), name): + cg.tx.output.update_co_names(name) + cg(var.mutable_local.source) + suffixes.append( + [create_instruction("DELETE_ATTR", argval=name)] + ) + else: + cg.tx.output.update_co_names(name) + cg(value) + cg(var.mutable_local.source) + suffixes.append([create_instruction("STORE_ATTR", argval=name)]) + elif isinstance(var, variables.TupleIteratorVariable): + for _ in range(var.index): + cg.load_import_from(utils.__name__, "iter_next") + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output(create_call_function(1, True)) + cg.append_output(create_instruction("POP_TOP")) + else: + raise AssertionError(type(var)) + + # do all the actual mutations at the very end to handle dependencies + for suffix in reversed(suffixes): + cg.extend_output(suffix) + + def is_empty(self): + return not ( + any(map(self.is_modified, self.id_to_variable.values())) + or self.tensor_hooks + or self.save_for_backward + or self.tensor_hooks + ) + + def clear(self): + self.keepalive.clear() + self.id_to_variable.clear() diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/source.py b/venv/lib/python3.10/site-packages/torch/_dynamo/source.py new file mode 100644 index 0000000000000000000000000000000000000000..915dae91b64f43db0082c50a48f40c3340a60acf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/source.py @@ -0,0 +1,545 @@ +import collections +import dataclasses +import enum +from typing import Any, Optional, Union + +from torch._guards import ChainedSource, GuardSource, Source + +from . import utils +from .bytecode_transformation import create_call_function, create_instruction +from .utils import enum_repr + +# It shouldn't be supported to construct an NNModuleVariable inside an FSDP module, +# so those cases are omitted intentionally +_GUARD_SOURCE_NN_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_NN_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_NN_MODULE, + GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL_NN_MODULE, + GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL_NN_MODULE, +} + +_GUARD_SOURCE_FSDP_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL_FSDP_MODULE, +} + +_GUARD_SOURCE_NOT_NN_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL, + GuardSource.GLOBAL: GuardSource.GLOBAL, + GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL, + GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL, + GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL, + GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL, +} + + +def is_constant_source(source): + if isinstance(source, ConstantSource): + return True + try: + if source.guard_source() == GuardSource.CONSTANT: + return True + except NotImplementedError: + pass + + return False + + +def reconstruct_getitem( + source: Union["GetItemSource", "ODictGetItemSource"], codegen, index_is_slice +): + source.base.reconstruct(codegen) + if isinstance(source.index, Source): + source.index.reconstruct(codegen) + else: + if index_is_slice: + assert isinstance(source, GetItemSource) + codegen.append_output(codegen.create_load_const(source.unpack_slice())) + else: + codegen.append_output(codegen.create_load_const(source.index)) + + +@dataclasses.dataclass(frozen=True) +class LocalSource(Source): + local_name: str + cell_or_freevar: bool = False + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(self.local_name)) + + def guard_source(self): + return GuardSource.LOCAL + + def name(self): + return f"L[{repr(self.local_name)}]" + + +@dataclasses.dataclass(frozen=True) +class SyntheticLocalSource(Source): + local_name: str + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(self.local_name)) + + def guard_source(self): + return GuardSource.SYNTHETIC_LOCAL + + def name(self): + return f"SYNTHETIC_LOCAL[{self.local_name!r}]" + + +@dataclasses.dataclass(frozen=True) +class RandomValueSource(Source): + random_call_index: int + + def guard_source(self): + return GuardSource.RANDOM_VALUE + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(codegen.tx.output.random_values_var)) + codegen.append_output(codegen.create_load_const(self.random_call_index)) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def name(self): + return f"random_value_{self.random_call_index}" + + +@dataclasses.dataclass(frozen=True) +class GlobalSource(Source): + global_name: str + + def reconstruct(self, codegen): + codegen.append_output( + codegen.create_load_global(self.global_name, False, add=True) + ) + + def guard_source(self): + return GuardSource.GLOBAL + + def name(self): + return f"G[{repr(self.global_name)}]" + + +@dataclasses.dataclass(frozen=True) +class GlobalWeakRefSource(Source): + global_name: str + + def reconstruct(self, codegen): + codegen.append_output( + codegen.create_load_global(self.global_name, True, add=True) + ) + codegen.extend_output(create_call_function(0, False)) + + def guard_source(self): + return GuardSource.GLOBAL + + def name(self): + return f"G[{repr(self.global_name)}]()" + + +@dataclasses.dataclass(frozen=True) +class AttrSource(ChainedSource): + member: str + get_static: bool = False + + def __post_init__(self): + assert self.base, "Can't construct an AttrSource without a valid base source" + if "." in self.member: + member_parts = self.member.split(".") + object.__setattr__( + self, "base", AttrSource(self.base, ".".join(member_parts[:-1])) + ) + object.__setattr__(self, "member", member_parts[-1]) + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.extend_output(codegen.create_load_attrs(self.member)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if self.get_static: + return f"inspect.getattr_static({self.base.name()}, {self.member!r})" + elif not self.member.isidentifier(): + return f"getattr({self.base.name()}, {self.member!r})" + return f"{self.base.name()}.{self.member}" + + +@dataclasses.dataclass(frozen=True) +class ParamBufferSource(AttrSource): + def guard_source(self): + return _GUARD_SOURCE_NN_MODULE[self.base.guard_source()] + + +# This source is intended to be used in places where a source is needed but it is expected +# that the symbol will be simplified out later on. Symbols with ephemeral sources are +# prioritized to be simplified out when e.g. compared against a symbol without an ephemeral +# source. Guarding on this source is an error. +# +# Example: During subclass view fake-ification, any close-over ViewFunc state should be +# symbolicized / fake-ified to avoid invalid specialization during view replay. This source +# is useful for symbols utilized in the middle of the view chain that are not expected to be +# present within the final view shape metadata. +@dataclasses.dataclass(frozen=True) +class EphemeralSource(Source): + desc: Optional[str] = None + + def guard_source(self): + return GuardSource.EPHEMERAL + + def name(self): + return f"" + + def make_guard(self): + raise NotImplementedError() + + def is_ephemeral(self): + return True + + +class TensorProperty(enum.Enum): + SIZE = 0 + STRIDE = 1 + STORAGE_OFFSET = 2 + + def method_name(self): + if self is TensorProperty.SIZE: + return "size" + elif self is TensorProperty.STRIDE: + return "stride" + elif self is TensorProperty.STORAGE_OFFSET: + return "storage_offset" + + +@dataclasses.dataclass(frozen=True) +class TensorPropertySource(ChainedSource): + prop: TensorProperty + idx: Optional[int] = None # None for STORAGE_OFFSET + + def __post_init__(self): + assert self.base is not None + if self.prop is TensorProperty.STORAGE_OFFSET: + assert self.idx is None + else: + assert self.idx is not None + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_attr(self.prop.method_name())) + if self.idx is not None: + codegen.append_output(codegen.create_load_const(self.idx)) + codegen.extend_output( + create_call_function(1 if self.idx is not None else 0, True) + ) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if self.prop is TensorProperty.SIZE: + return f"{self.base.name()}.size()[{self.idx}]" + elif self.prop is TensorProperty.STRIDE: + return f"{self.base.name()}.stride()[{self.idx}]" + elif self.prop is TensorProperty.STORAGE_OFFSET: + assert self.idx is None + return f"{self.base.name()}.storage_offset()" + else: + raise AssertionError(f"unhandled {self.prop}") + + +@dataclasses.dataclass(frozen=True) +class NegateSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + raise NotImplementedError() + + def guard_source(self): + return self.base.guard_source() + + def name(self): + # NB: use method call so that function stripping regexes work + return f"{self.base.name()}.__neg__()" + + +@dataclasses.dataclass(frozen=True) +class ConvertIntSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"cast_symbool_to_symint_guardless({self.base.name()})" + + +@dataclasses.dataclass(frozen=True) +class DefaultsSource(ChainedSource): + idx_key: Union[int, str] + is_kw: bool = False + field: str = dataclasses.field(init=False, repr=False, compare=False) + _name: str = dataclasses.field(init=False, repr=False, compare=False) + + def __post_init__(self): + assert ( + self.base + ), "Base must be a valid source in order to properly track and guard this Defaults to its origin." + if self.is_kw: + assert isinstance(self.idx_key, str) + object.__setattr__(self, "field", "__kwdefaults__") + object.__setattr__( + self, "_name", f"{self.base.name()}.{self.field}['{self.idx_key}']" + ) + else: + assert isinstance(self.idx_key, int) + object.__setattr__(self, "field", "__defaults__") + object.__setattr__( + self, "_name", f"{self.base.name()}.{self.field}[{self.idx_key}]" + ) + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.extend_output(codegen.create_load_attrs(self.field)) + codegen.append_output(codegen.create_load_const(self.idx_key)) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return self._name + + +@dataclasses.dataclass(frozen=True) +class GetItemSource(ChainedSource): + index: Any + index_is_slice: bool = False + + def __post_init__(self): + assert self.base is not None + if isinstance(self.index, slice): + # store the hashable version of the slice so the whole GetItemSource is hashable + super().__setattr__("index", self.index.__reduce__()) + super().__setattr__("index_is_slice", True) + + def reconstruct(self, codegen): + reconstruct_getitem(self, codegen, index_is_slice=self.index_is_slice) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def guard_source(self): + return self.base.guard_source() + + def unpack_slice(self): + assert self.index_is_slice + slice_class, slice_args = self.index + return slice_class(*slice_args) + + def name(self): + # Index can be of following types + # 1) ConstDictKeySource + # 2) enum.Enum + # 3) index is a slice - example 1:4 + # 4) index is a constant - example string, integer + if isinstance(self.index, Source): + if not isinstance(self.index, ConstDictKeySource): + raise ValueError( + "GetItemSource index must be a constant, enum or ConstDictKeySource" + ) + return f"{self.base.name()}[{self.index.name()}]" + elif self.index_is_slice: + return f"{self.base.name()}[{self.unpack_slice()!r}]" + elif isinstance(self.index, enum.Enum): + return f"{self.base.name()}[{enum_repr(self.index, self.guard_source().is_local())}]" + else: + return f"{self.base.name()}[{self.index!r}]" + + +@dataclasses.dataclass(frozen=True) +class ConstDictKeySource(GetItemSource): + def is_dict_key(self): + return True + + def reconstruct(self, codegen): + codegen.load_import_from(utils.__name__, "dict_keys_getitem") + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_const(self.index)) + codegen.extend_output(create_call_function(2, True)) + + def name(self): + # The list creation will be CSE'd by PyExprCSEPass + return f"list({self.base.name()}.keys())[{self.index!r}]" + + +@dataclasses.dataclass(frozen=True) +class TupleIteratorGetItemSource(GetItemSource): + def reconstruct(self, codegen): + codegen.load_import_from(utils.__name__, "tuple_iterator_getitem") + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_const(self.index)) + codegen.extend_output(create_call_function(2, True)) + + def name(self): + return f"___tuple_iterator_getitem({self.base.name()}, {self.index!r})" + + +@dataclasses.dataclass(frozen=True) +class TypeSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + codegen.load_import_from("builtins", "type") + self.base.reconstruct(codegen) + codegen.extend_output(create_call_function(1, True)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"type({self.base.name()})" + + +@dataclasses.dataclass(frozen=True) +class ODictGetItemSource(ChainedSource): + index: Any + + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + codegen.append_output( + codegen._create_load_const(collections.OrderedDict.__getitem__) + ) + reconstruct_getitem(self, codegen, index_is_slice=False) + codegen.extend_output(create_call_function(2, True)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if isinstance(self.index, type): + rep = f'__load_module("{self.index.__module__}").{self.index.__qualname__}' + return f"___odict_getitem({self.base.name()}, {rep})" + elif isinstance(self.index, Source): + return f"___odict_getitem({self.base.name()}, {self.index.name()})" + else: + return f"___odict_getitem({self.base.name()}, {self.index!r})" + + +@dataclasses.dataclass(frozen=True) +class NNModuleSource(ChainedSource): + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return _GUARD_SOURCE_NN_MODULE[self.base.guard_source()] + + def name(self): + return self.base.name() + + +@dataclasses.dataclass(frozen=True) +class NotNNModuleSource(NNModuleSource): + def guard_source(self): + return _GUARD_SOURCE_NOT_NN_MODULE[self.base.guard_source()] + + +@dataclasses.dataclass(frozen=True) +class FSDPNNModuleSource(NNModuleSource): + def guard_source(self): + return _GUARD_SOURCE_FSDP_MODULE[self.base.guard_source()] + + +@dataclasses.dataclass(frozen=True) +class GlobalStateSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.GLOBAL + + +@dataclasses.dataclass(frozen=True) +class ConstantSource(Source): + source_name: str + + def reconstruct(self, codegen): + codegen.append_output( + codegen.create_load_global(self.source_name, False, add=False) + ) + + def guard_source(self): + return GuardSource.CONSTANT + + def name(self): + return self.source_name + + def make_guard(self, fn): + raise NotImplementedError() + + +@dataclasses.dataclass(frozen=True) +class NumpyTensorSource(ChainedSource): + def name(self) -> str: + return f"___from_numpy({self.base.name()})" + + def guard_source(self): + return self.base.guard_source() + + def reconstruct(self, codegen): + codegen.load_import_from("torch", "as_tensor") + self.base.reconstruct(codegen) + codegen.extend_output(create_call_function(1, True)) + + +# This is a synthetic source that is associated with the singleton +# shape env guard we always register for all frames. We get the actual +# guard contents from the ambient ShapeEnv +@dataclasses.dataclass(frozen=True) +class ShapeEnvSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.SHAPE_ENV + + +@dataclasses.dataclass(frozen=True) +class BackwardStateSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.BACKWARD_STATE + + +def is_from_local_source(source: Source, *, allow_cell_or_freevar=True): + if isinstance(source, ChainedSource): + return is_from_local_source( + source.base, allow_cell_or_freevar=allow_cell_or_freevar + ) + if not isinstance(source, LocalSource): + return False + if not allow_cell_or_freevar and source.cell_or_freevar: + return False + return True + + +# TODO: can probably write a generic "test this on everything in the chain" +# helper +def is_from_defaults(source: Source): + if isinstance(source, DefaultsSource): + return True + if isinstance(source, ChainedSource): + return is_from_defaults(source.base) + return False diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py b/venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..6701115cae873402cd8fac9aafe888f49a621be0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py @@ -0,0 +1,2603 @@ +import collections +import contextlib +import copy +import dataclasses +import dis +import functools +import importlib +import inspect +import itertools +import linecache +import logging +import operator +import sys +import textwrap +import threading +import traceback +import types +import typing +import weakref +from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Type +from unittest.mock import patch + +import torch +import torch._logging +from torch._guards import Checkpointable, tracing, TracingContext + +from . import config, exc, logging as torchdynamo_logging, trace_rules, variables +from .bytecode_analysis import ( + get_indexof, + JUMP_OPNAMES, + livevars_analysis, + propagate_line_nums, +) +from .bytecode_transformation import ( + cleaned_instructions, + create_call_function, + create_instruction, + create_jump_absolute, + Instruction, + is_generator, + unique_id, +) +from .code_context import code_context +from .codegen import PyCodegen +from .current_scope_id import current_scope_id +from .exc import ArgsMismatchError, BackendCompilerFailed, unimplemented, Unsupported +from .funcname_cache import get_funcname +from .guards import GuardBuilder, install_guard +from .output_graph import GraphCompileReason, OutputGraph, OutputGraphState +from .replay_record import DummyModule, ExecutionRecorder +from .resume_execution import ContinueExecutionCache, ReenterWith +from .source import ( + AttrSource, + GetItemSource, + GlobalSource, + GlobalWeakRefSource, + LocalSource, + Source, +) +from .trace_rules import is_builtin_constant, is_forbidden +from .utils import ( + counters, + get_fake_value, + get_instruction_source_311, + graph_break_dup_warning_checker, + istype, + LazyString, + proxy_args_kwargs, +) +from .variables.base import ( + _is_top_level_scope, + is_side_effect_safe, + MutableLocal, + typestr, + VariableTracker, +) +from .variables.builder import VariableBuilder, wrap_fx_proxy +from .variables.builtin import BuiltinVariable +from .variables.constant import ConstantVariable +from .variables.ctx_manager import ( + ContextWrappingVariable, + GenericContextWrappingVariable, + WithExitFunctionVariable, +) +from .variables.dicts import ConstDictVariable, SetVariable +from .variables.functions import ( + BaseUserFunctionVariable, + NestedUserFunctionVariable, + SkipFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) +from .variables.lists import ( + BaseListVariable, + ListIteratorVariable, + ListVariable, + SliceVariable, + TupleVariable, +) +from .variables.misc import ( + ClosureVariable, + GetAttrVariable, + InlinedClosureVariable, + NullVariable, + PythonModuleVariable, + UnknownVariable, +) +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + supported_const_comparison_ops, + supported_tensor_comparison_ops, + SymNodeVariable, + TensorVariable, +) +from .variables.user_defined import ( + RemovableHandleVariable, + UserDefinedClassVariable, + UserDefinedObjectVariable, + UserDefinedVariable, +) + +log = logging.getLogger(__name__) +graph_break_log = torch._logging.getArtifactLogger(__name__, "graph_breaks") +trace_call_log = torch._logging.getArtifactLogger(__name__, "trace_call") +trace_source_log = torch._logging.getArtifactLogger(__name__, "trace_source") +tls = threading.local() + + +@dataclasses.dataclass +class SpeculationEntry: + filename: str + lineno: int + instruction_pointer: int + failed: bool = False + reason: Optional[GraphCompileReason] = None + + def fail_and_restart_analysis(self): + """ + Start tracing of the current frame over again, and don't take this branch. + """ + self.failed = True + raise exc.SpeculationRestartAnalysis() + + +@dataclasses.dataclass +class SpeculationLog: + """ + SpeculationLog replaces the prior copy_graphstate/restore_graphstate + checkpointing. Rather than saving/restoring state, we restart the + dynamo conversion process over from the beginning -- but when we + hit the start of the speculation that failed, we instead generate + a graph break. + """ + + entries: List[SpeculationEntry] = dataclasses.field(default_factory=list) + index: int = 0 + + def restart(self): + self.index = 0 + + def clear(self): + self.entries.clear() + self.index = 0 + + def next(self, filename: str, lineno: int, instruction_pointer) -> SpeculationEntry: + """ + Lookup or create a SpeculationEntry() that is shared across + RestartAnalysis calls. Args are used only for debug checks. + """ + if len(self.entries) == self.index: + self.entries.append(SpeculationEntry(filename, lineno, instruction_pointer)) + entry = self.entries[self.index] + self.index += 1 + assert ( + entry.instruction_pointer == instruction_pointer + and entry.filename == filename + and entry.lineno == lineno + ), textwrap.dedent( + f""" + SpecuationLog diverged at {self.index} of {len(self.entries)}: + - Run1: {entry.filename}:{entry.lineno} (ip={entry.instruction_pointer}) + - Run2: {filename}:{lineno} (ip={instruction_pointer}) + Please submit a bug report. + """ + ) + return entry + + +@functools.lru_cache(None) +def _step_logger(): + return torchdynamo_logging.get_step_logger(log) + + +@dataclasses.dataclass +class BlockStackEntry: + target: Instruction + stack_index: Optional[int] = None + with_context: Optional[ContextWrappingVariable] = None + + def can_restore(self): + return self.with_context is not None + + def resume_fn(self): + assert self.stack_index is not None + if self.with_context and self.with_context.target_values: + return ReenterWith(self.stack_index, tuple(self.with_context.target_values)) + else: + return ReenterWith(self.stack_index) + + def exit(self, tx): + assert self.with_context is not None + return self.with_context.exit(tx) + + +class InstructionTranslatorGraphState(NamedTuple): + output: OutputGraphState + symbolic_locals: Dict[str, VariableTracker] + stack: List[VariableTracker] + block_stack: List[BlockStackEntry] + instruction_pointer: Optional[int] + current_instruction: Instruction + next_instruction: Optional[Instruction] + lineno: int + + def diff(self, other: "InstructionTranslatorGraphState") -> Optional[str]: + for k in self._fields: + if k == "output": + return self.output.diff(other.output, prefix=f"{k}.") + sv = getattr(self, k) + ov = getattr(other, k) + if sv != ov: + return f"{k} mismatch: {sv} != {ov}" + return None + + +def stack_op(fn: typing.Callable[..., object]): + nargs = len(inspect.signature(fn).parameters) + fn_var = BuiltinVariable(fn) + + @functools.wraps(fn) + def impl(self: "InstructionTranslatorBase", inst: Instruction): + self.push(fn_var.call_function(self, self.popn(nargs), {})) + + return impl + + +def _detect_and_normalize_assert_statement( + self: "InstructionTranslatorBase", + truth_fn: typing.Callable[[object], bool], + push: bool, +): + # Detect if this jump instruction is assert and normalize the assert + # by pushing dummy error message when nothing is given. + # + # Python 3.9 assertion is in following format: + # 18 POP_JUMP_IF_TRUE 28 + # 20 LOAD_ASSERTION_ERROR + # 22 LOAD_CONST 3 ('Assert message') -> optional instruction + # 24 CALL_FUNCTION 1 -> optional instruction + # 26 RAISE_VARARGS + # + # Python 3.8 assertion is in following format: + # 18 POP_JUMP_IF_TRUE 28 + # 20 LOAD_GLOBAL 0 (Assertion type) + # 22 LOAD_CONST 3 ('Assert message') -> optional instruction + # 24 CALL_FUNCTION 1 -> optional instruction + # 26 RAISE_VARARGS 1 + + if (truth_fn is not operator.truth) or push: + return False + + assert isinstance(self.instruction_pointer, int) + current_instruction_pointer = self.instruction_pointer + inst = self.instructions[current_instruction_pointer] + # Detect LOAD_ASSERTION_ERROR or LOAD_GLOBAL 0 + if sys.version_info < (3, 9): + if inst.opname != "LOAD_GLOBAL" or inst.argval != "AssertionError": + return False + else: + if inst.opname != "LOAD_ASSERTION_ERROR": + return False + + current_instruction_pointer += 1 + + # Use dummy error message if its hard to extract + error_msg = "assertion error" + + inst = self.instructions[current_instruction_pointer] + # DETECT RAISE_VARARGS or LOAD CONST + if inst.opname == "LOAD_CONST": + if not isinstance(inst.argval, str): + return False + error_msg = inst.argval + + # if it is LOAD_CONSTANT, it must be followed by CALL_FUNCTION + # (PRECALL for Python 3.11+) + current_instruction_pointer += 1 + inst = self.instructions[current_instruction_pointer] + if inst.opname not in ("CALL_FUNCTION", "PRECALL"): + return False + + # for Python 3.11+, PRECALL should be followed by CALL, then RAISE_VARARGS + # for Python < 3.11, CALL_FUNCTION should be followed by RAISE_VARARGS + current_instruction_pointer += 1 + if inst.opname == "PRECALL": + current_instruction_pointer += 1 + inst = self.instructions[current_instruction_pointer] + + if inst.opname != "RAISE_VARARGS": + return False + + self.push(ConstantVariable.create(error_msg)) + + return True + + +def generic_jump(truth_fn: typing.Callable[[object], bool], push: bool): + def inner(self: "InstructionTranslatorBase", inst: Instruction): + value: VariableTracker = self.pop() + if ( + config.rewrite_assert_with_torch_assert + and _detect_and_normalize_assert_statement(self, truth_fn, push) + ): + error_msg: VariableTracker = self.pop() + # Skip over things like `assert True` + if value.is_python_constant() and bool(value.as_python_constant()): + self.jump(inst) + return + + # TODO maybe should respect DtoH sync intention of users later?? + # Manually insert torch._assert_async instead of python assert and jump over + # assert related instructions as we don't need them anymore. + + # if we see Tensor as assert statement, no need to call scalar_tensor + if isinstance(value, TensorVariable): + self.output.create_proxy( + "call_function", + torch._assert_async, + *proxy_args_kwargs((value, error_msg), {}), + ) + self.jump(inst) + return + + if isinstance(value, SymNodeVariable): + # if the assertion is normal shape expression. + # just install guard and bail out. + sym_expr = value.sym_num + if not isinstance(sym_expr, torch.SymBool): + sym_expr = sym_expr != 0 + + result = torch.fx.experimental.symbolic_shapes.expect_true(sym_expr) + if not result: + raise unimplemented( + "Assertion failed on symbolic shapes. Did you make sure eager mode succeeds?" + ) + self.jump(inst) + return + + scalar_to_tensor_proxy = self.output.create_proxy( + "call_function", torch.scalar_tensor, *proxy_args_kwargs((value,), {}) + ) + + scalar_to_tensor = wrap_fx_proxy( + self, + scalar_to_tensor_proxy, + example_value=get_fake_value(scalar_to_tensor_proxy.node, self), + ) + + self.output.create_proxy( + "call_function", + torch._assert_async, + *proxy_args_kwargs((scalar_to_tensor, error_msg), {}), + ) + self.jump(inst) + return + + if value.is_python_constant(): + if truth_fn(value.as_python_constant()): + push and self.push(value) + self.jump(inst) + elif ( + isinstance(value, (TensorVariable)) and self.should_compile_partial_graph() + ): + # compile a partial subgraph prefix then jump into user code + if self.has_backedge(): + msg = ( + "Skipping frame because there is a graph break in a for/while loop\n" + f"{self.frame_summary()}" + ) + log.info(msg) + raise exc.SkipFrame(msg) + + self.push(value) + log.debug("generic_jump triggered compile") + self.output.compile_subgraph( + self, + reason=GraphCompileReason( + f"generic_jump {typestr(value)}", [self.frame_summary()] + ), + ) + self.pop() + + if_next = self.create_call_resume_at(self.next_instruction) + push and self.push(value) + if_jump = self.create_call_resume_at(inst.target) + + self.output.add_output_instructions( + [create_instruction(inst.opname, target=if_jump[0])] + if_next + if_jump + ) + elif isinstance(value, NNModuleVariable): + # Equivalent of "self.nn_module is not None" + mod = self.output.get_submodule(value.module_key) + if truth_fn(mod): + push and self.push(value) + self.jump(inst) + elif isinstance(value, UserDefinedObjectVariable): + x = value.var_getattr(self, "__bool__") + # if __bool__ is missing, trying __len__ to infer a truth value. + if isinstance(x, GetAttrVariable): + x = value.var_getattr(self, "__len__") + + # __bool__ or __len__ is function + if isinstance(x, UserMethodVariable): + result = x.call_function(self, [], {}) + if isinstance(result, ConstantVariable) and isinstance( + result.value, (bool, int) + ): + if truth_fn(result.value): + push and self.push(value) + self.jump(inst) + else: + unimplemented( + "generic_jump on UserDefined with __bool__ returning non-constant" + ) + # __bool__ or __len__ is non-function or not existed in the user defined object + else: + if truth_fn(True): + push and self.push(value) + self.jump(inst) + elif not isinstance(value, TensorVariable) and value.has_unpack_var_sequence( + self + ): + if truth_fn(len(value.unpack_var_sequence(self))): + push and self.push(value) + self.jump(inst) + elif isinstance(value, SymNodeVariable): + eval_result = value.evaluate_expr(self.output) + if truth_fn(eval_result): + push and self.push(value) + self.jump(inst) + elif isinstance(value, variables.BackwardHookVariable): + if truth_fn(True): + push and self.push(value) + self.jump(inst) + else: + from .source import is_constant_source + + if value.source is not None and is_constant_source(value.source): + if truth_fn(value.get_real_value()): # type: ignore[attr-defined] + push and self.push(value) + self.jump(inst) + else: + # TODO link the torch.cond doc later + raise exc.UserError( + exc.UserErrorType.DYNAMIC_CONTROL_FLOW, + "Dynamic control flow is not supported at the moment. Please use " + "functorch.experimental.control_flow.cond to explicitly capture the control flow.", + case_name="cond_operands", + ) + + return inner + + +explain = False + + +def break_graph_if_unsupported(*, push): + def decorator(inner_fn): + @functools.wraps(inner_fn) + def wrapper(self: "InstructionTranslatorBase", inst: Instruction): + speculation = self.speculate() + if speculation.failed: + assert speculation.reason is not None + return handle_graph_break(self, inst, speculation.reason) + try: + TracingContext.set_current_loc( + self.f_code.co_filename, self.lineno, self.f_code.co_name + ) + return inner_fn(self, inst) + except Unsupported as excp: + if self.generic_context_manager_depth > 0: + # We don't support graph break under GenericContextWrappingVariable, + # If there is, we roll back to the checkpoint and fall back. + excp.remove_from_stats() + unimplemented("Graph break under GenericContextWrappingVariable") + + if isinstance(excp, exc.UncapturedHigherOrderOpError): + raise + + if not self.should_compile_partial_graph(): + raise + + user_stack = excp.real_stack + # TODO: Also report the traceback from the parent frame + user_stack_formatted = "".join(traceback.format_list(user_stack)) + frame_loc = (user_stack[-1].filename, user_stack[-1].lineno) + # torch._dynamo.explain() formats this a little nicer, and presents a slightly + # more actionable user code pointer + if ( + graph_break_log.isEnabledFor(logging.DEBUG) + and not explain + and graph_break_dup_warning_checker.add(frame_loc) + ): + # This log line is exercised from + # python test/dynamo/test_exc.py -k test_graph_break_log + graph_break_log.debug( + "Graph break: from user code at:\n%s", + user_stack_formatted, + exc_info=True, + ) + else: + # This log line MUST NOT contain the string "Graph break", + # exercised by + # python test/dynamo/test_misc.py -k test_duplicate_graph_break_log + log.debug( + "Unsupported break in user code at %s:%s (details suppressed)", + *frame_loc, + ) + + if self.has_backedge(): + msg = ( + "Skipping frame because there is a graph break in a for/while loop\n" + f"{self.frame_summary()}" + ) + log.info(msg) + raise exc.SkipFrame(msg) from excp + + excp.remove_from_stats() + excp.add_to_stats("graph_break") + speculation.reason = GraphCompileReason(excp.msg, user_stack) + speculation.fail_and_restart_analysis() + + def handle_graph_break( + self: "InstructionTranslatorBase", + inst: Instruction, + reason: GraphCompileReason, + ): + self.output.compile_subgraph(self, reason=reason) + cg = PyCodegen(self) + cleanup: List[Instruction] = [] + # Reconstruct the context variables in the block stack + for b in self.block_stack: + assert b.with_context is not None + cg(b.with_context) + cg.extend_output(b.resume_fn().try_except(cg.code_options, cleanup)) + self.output.add_output_instructions(cg.get_instructions()) + del cg + + if sys.version_info >= (3, 11) and inst.opname == "CALL": + kw_names = ( + self.kw_names.as_python_constant() + if self.kw_names is not None + else () + ) + if len(kw_names) > 0: + self.output.add_output_instructions( + [create_instruction("KW_NAMES", argval=kw_names)] + ) + self.output.add_output_instructions( + create_call_function(inst.arg, False) + ) + else: + # copy instruction, but without exception table data + assert inst.target is None + inst_copy = copy.copy(inst) + inst_copy.exn_tab_entry = None + self.output.add_output_instructions([inst_copy]) + + self.output.add_output_instructions(cleanup) + + if sys.version_info >= (3, 11) and inst.opname == "CALL": + # stack effect for PRECALL + CALL is split between the two instructions + stack_effect = dis.stack_effect( + dis.opmap["PRECALL"], inst.arg + ) + dis.stack_effect(dis.opmap["CALL"], inst.arg) + else: + stack_effect = dis.stack_effect(inst.opcode, inst.arg) + self.popn(push - stack_effect) + + for _ in range(push): + self.push(UnknownVariable()) + self.output.add_output_instructions( + self.create_call_resume_at(self.next_instruction) + ) + + return wrapper + + return decorator + + +class InstructionTranslatorBase(Checkpointable[InstructionTranslatorGraphState]): + output: OutputGraph + symbolic_locals: Dict[str, VariableTracker] + symbolic_globals: Dict[str, VariableTracker] + stack: List[VariableTracker] + instruction_pointer: Optional[int] + current_instruction: Instruction + next_instruction: Optional[Instruction] + block_stack: List[BlockStackEntry] + lineno: int + kw_names: Optional[ConstantVariable] + accept_prefix_inst: bool + prefix_insts: List[Instruction] + inline_depth: int + inconsistent_side_effects: bool + current_speculation: Optional[SpeculationEntry] + + def mark_inconsistent_side_effects(self): + """ + InstructionTranslator has encountered instructions which may cause + dynamo to see a different version of history from eager + See: https://github.com/pytorch/pytorch/issues/110765 + """ + self.inconsistent_side_effects = True + + def has_backedge(self): + cur_offset = self.current_instruction.offset + assert self.instruction_pointer is not None + for inst in self.instructions[self.instruction_pointer :]: + if inst.opname in JUMP_OPNAMES: + jump_offset = inst.argval + if jump_offset < cur_offset: + return True + return False + + def cell_and_freevars(self): + if not hasattr(self, "_cell_and_freevars"): + self._cell_and_freevars = tuple( + self.code_options["co_cellvars"] or [] + ) + tuple(self.code_options["co_freevars"] or []) + return self._cell_and_freevars + + def prune_dead_locals(self): + reads = livevars_analysis(self.instructions, self.current_instruction) + # implicit use by super() + # reads = reads | {"__class__"} + # output variables? + reads = reads | set(self.cell_and_freevars()) + self.symbolic_locals = { + k: v for k, v in self.symbolic_locals.items() if k in reads + } + self.output.side_effects.prune_dead_object_new(self) + + def call_function( + self, + fn: VariableTracker, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ): + assert isinstance(fn, VariableTracker) + assert isinstance(args, list) + assert isinstance(kwargs, dict) + assert all( + isinstance(x, VariableTracker) + for x in itertools.chain(args, kwargs.values()) + ) + inner_fn = None + if hasattr(fn, "value"): + inner_fn = fn.value + if hasattr(fn, "fn"): + inner_fn = fn.fn + if inner_fn and callable(inner_fn) and is_forbidden(inner_fn): + raise AssertionError(f"Attempt to trace forbidden callable {inner_fn}") + self.push(fn.call_function(self, args, kwargs)) + + def inline_user_function_return(self, fn, args, kwargs): + """ + A call to some user defined function by inlining it. + """ + return InliningInstructionTranslator.inline_call(self, fn, args, kwargs) + + def get_line_of_code_header(self, lineno=None): + if lineno is None: + lineno = self.lineno + inline_depth_str = ( + f" (inline depth: {self.inline_depth})" if self.inline_depth > 0 else "" + ) + funcname = get_funcname(self.f_code.co_filename, lineno) + funcname_str = "" if funcname is None else f" ({funcname})" + return f"{self.f_code.co_filename}:{lineno} in {self.f_code.co_name}{funcname_str}{inline_depth_str}" + + def get_log_starts_line_log_str(self): + log_str = f"TRACE starts_line {self.get_line_of_code_header()}\n" + line = linecache.getline(self.f_code.co_filename, self.lineno).rstrip() + log_str += f" {line}" + return log_str + + def log_starts_line(self): + trace_source_log.debug("%s", LazyString(self.get_log_starts_line_log_str)) + + def step(self): + """Process exactly one instruction, return False we should exit""" + assert isinstance(self.instruction_pointer, int) + inst = self.instructions[self.instruction_pointer] + self.current_instruction = inst + self.instruction_pointer += 1 + if self.instruction_pointer < len(self.instructions): + self.next_instruction = self.instructions[self.instruction_pointer] + else: + self.instruction_pointer = None + self.next_instruction = None + if inst.starts_line and self.lineno != inst.starts_line: + self.lineno = inst.starts_line + self.log_starts_line() + + if ( + len(self.stack) == 0 + and self.should_compile_partial_graph() + and self.is_non_empty_graph() + ): + self.current_speculation = self.speculate() + if self.current_speculation.failed: + return self.step_graph_break(inst) + + log.debug("TRACE %s %s %s", inst.opname, inst.argval, self.stack) + + # 3.11 no longer uses a block stack, but we still keep track of one + # so that we know which contexts are currently active. + # For our purposes, all exception table entries with the same target + # are considered to be part of the same "block". + if sys.version_info >= (3, 11): + entry = inst.exn_tab_entry + if not ( + # still in the same block + self.block_stack + and entry + and self.block_stack[-1].target is entry.target + ): + if not entry: + # no longer in any block + # It is possible for NOPs to be between two instructions + # in the same block, but the NOPs are not covered by an + # exception table entry. In this case, assume that we + # are still in the same block. + if self.block_stack and inst.opname != "NOP": + # If we really escape from a block and the current + # instruction is not in another block, then there + # should be no other nested blocks that we are in. + assert len(self.block_stack) == 1 + self.block_stack.pop() + elif ( + # current instruction is in the previous block + len(self.block_stack) > 1 + and self.block_stack[-2].target is entry.target + ): + # exit the current block + self.block_stack.pop() + else: + # current instruction is in a new block + # push block to stack - note, BEFORE_WITH blocks won't + # be pushed here since BEFORE_WITH pushes the block, and + # the current instruction would be counted as being in that block. + self.block_stack.append( + BlockStackEntry(entry.target, len(self.stack)) + ) + + try: + if not hasattr(self, inst.opname): + unimplemented(f"missing: {inst.opname}") + TracingContext.set_current_loc( + self.f_code.co_filename, self.lineno, self.f_code.co_name + ) + getattr(self, inst.opname)(inst) + + return inst.opname != "RETURN_VALUE" + except Unsupported: + if self.current_speculation is None: + log.debug("empty checkpoint") + raise + log.debug("step triggered compile", exc_info=True) + + self.current_speculation.fail_and_restart_analysis() + + def step_graph_break(self, continue_inst): + # generate code from checkpoint + assert not self.output.output_instructions + assert self.current_speculation is not None + self.output.compile_subgraph( + self, + partial_convert=True, + reason=GraphCompileReason("step_unsupported", [self.frame_summary()]), + ) + self.output.add_output_instructions( + [create_jump_absolute(continue_inst)] + self.instructions + ) + + def run_ctx_mgr(self): + # NB: Don't push the top level frame summary; set_current_loc will + # take care of it. However, DO make sure we attach real_stack to + # exceptions + return TracingContext.current_frame(None) + + def run(self): + with self.run_ctx_mgr(): + try: + self.output.push_tx(self) + while ( + self.instruction_pointer is not None + and not self.output.should_exit + and self.step() + ): + pass + except BackendCompilerFailed: + raise + except Exception as e: + if config.replay_record_enabled: + e.exec_record = self.exec_recorder.get_record() # type: ignore[attr-defined] + raise + finally: + self.output.pop_tx() + # Cleanup the outputGraph to delete the held tensors. We perform the + # cleanup only for InstructionTranslator and not + # InliningInstructionTranslator. The InliningInstructionTranslator + # mutates the output object and is restored to original state if + # there was an exception. + if isinstance(self, InstructionTranslator): + self.output.cleanup() + + def push(self, val: Optional[VariableTracker]): + assert val is None or isinstance( + val, VariableTracker + ), f"push expects VariableTracker, got {typestr(val)}" + self.stack.append(val) # type: ignore[arg-type] + + def push_many(self, vals: List[VariableTracker]): + for val in vals: + self.push(val) + + def pop(self) -> VariableTracker: + return self.stack.pop() + + def popn(self, n: int) -> List[VariableTracker]: + assert n >= 0 + return list(reversed([self.pop() for _ in range(n)])) + + def LOAD_FAST(self, inst): + name = inst.argval + if name in self.f_locals and config.replay_record_enabled: + self.exec_recorder.add_local_var(name, self.f_locals[name]) + + if name.startswith(".") and name not in self.symbolic_locals: + # This happens in dict/list comprehensions + name = name.replace(".", "implicit") + assert name not in self.cell_and_freevars() + if name not in self.symbolic_locals: + unimplemented("undefined LOAD_FAST") + self.push(self.symbolic_locals[name]) + if name.startswith("___stack"): + self.symbolic_locals.pop(name) + + def LOAD_DEREF(self, inst): + assert inst.argval in self.cell_and_freevars() + + if inst.argval in self.f_locals and config.replay_record_enabled: + self.exec_recorder.add_local_var(inst.argval, self.f_locals[inst.argval]) + + if inst.argval not in self.symbolic_locals: + unimplemented(f"undefined LOAD_DEREF {inst.argval}") + self.push(self.symbolic_locals[inst.argval]) + + def STORE_FAST(self, inst): + loaded_vt = self.pop() + name = inst.argval + # Only rename at the top-level scope, this is to avoid the confusion between + # mutating a variable vs renaming it (e.g. a = b) during speculating a higher order op, + # where mutation is prohibited and it's difficult to differentiate it with renaming. + if _is_top_level_scope(current_scope_id()): + loaded_vt = loaded_vt.rename(self, name) + self.symbolic_locals[name] = loaded_vt + + def DELETE_FAST(self, inst): + del self.symbolic_locals[inst.argval] + + STORE_DEREF = STORE_FAST + + def LOAD_CLOSURE(self, inst): + self.push(ClosureVariable(name=inst.argval)) + + def LOAD_CONST(self, inst): + # For empty tuples, create empty TupleVariable + if isinstance(inst.argval, tuple) and not inst.argval: + self.push(TupleVariable([])) + else: + self.push(ConstantVariable.create(value=inst.argval)) + + def get_global_source(self, name): + source: Source + if self.output.global_scope is self.f_globals: + source = GlobalSource(name) + else: + if "__name__" in self.f_globals: + source = AttrSource( + self.import_source(self.f_globals["__name__"]), name + ) + else: + mangled_name = self.output.install_global_by_id( + "___unnamed_scope", self.f_globals + ) + source = GetItemSource(GlobalSource(mangled_name), name) + return source + + def LOAD_GLOBAL(self, inst): + if sys.version_info >= (3, 11): + if inst.arg % 2: + self.PUSH_NULL(inst) + + name = inst.argval + + if config.replay_record_enabled: + if name in self.f_globals: + self.exec_recorder.add_global_var(name, self.f_globals[name]) + else: + assert name in self.f_builtins + self.exec_recorder.builtins[name] = self.f_builtins[name] + + if inst.argval == "AssertionError": + unimplemented("assert with non-string message") + + if name in self.symbolic_globals: + variable = self.output.side_effects[self.symbolic_globals[name]] + self.push(self.output.side_effects.load_global(variable, name)) + return + + try: + value = self.f_globals[name] + except KeyError: + return self.load_builtin(inst) + + source = self.get_global_source(name) + self.push(VariableBuilder(self, source)(value)) + + def STORE_GLOBAL(self, inst): + value = self.pop() + name = inst.argval + source = self.get_global_source(name) + if name not in self.symbolic_globals: + self.symbolic_globals[name] = object() # type: ignore[assignment] # sentinel object + variable = self.output.side_effects.track_global_existing( + source, self.symbolic_globals[name] + ) + if isinstance(value, RemovableHandleVariable): + unimplemented("Storing handles in globals - NYI") + self.output.side_effects.store_global(variable, name, value) + + def import_source(self, module_name): + """Create an alias to a module for use in guards""" + if "torch_package" in module_name: + value = torch.package.package_importer._package_imported_modules[ + module_name + ] + alias = ( + module_name.replace(">", "_").replace("<", "_").replace(".", "_dot_") + ) + else: + value = importlib.import_module(module_name) + alias = f"__import_{module_name.replace('.', '_dot_')}" + f_globals = self.output.global_scope + assert alias not in f_globals or f_globals[alias] is value + f_globals[alias] = value + self.output.update_co_names(alias) + return GlobalSource(alias) + + def resolve_name(self, name, package, level): + """ + Copied from the Cpython implementation of __import__ + Resolve a relative module name to an absolute one. + https://github.com/python/cpython/blob/5a094f0255eea1db58fb2cf14c200971e64ec36e/Lib/importlib/_bootstrap.py#L902 + """ + bits = package.rsplit(".", level - 1) + if len(bits) < level: + raise ImportError("attempted relative import beyond top-level package") + base = bits[0] + return f"{base}.{name}" if name else base + + def calc_package(self): + """ + Copied from the Cpython implementation of __import__ + https://github.com/python/cpython/blob/5a094f0255eea1db58fb2cf14c200971e64ec36e/Lib/importlib/_bootstrap.py#L1090 + """ + package = self.f_globals.get("__package__") + spec = self.f_globals.get("__spec__") + if package is not None: + if spec is not None and package != spec.parent: + log.warning( + "__package__ != __spec__.parent (%r != %r)", + package, + spec.parent, + stacklevel=3, + ) + return package + elif spec is not None: + return spec.parent + else: + log.warning( + "can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + stacklevel=3, + ) + package = self.f_globals["__name__"] + if "__path__" not in self.f_globals: + package = package.rpartition(".")[0] + return package + + def IMPORT_NAME(self, inst): + level, fromlist = self.popn(2) + level = level.as_python_constant() + fromlist = fromlist.as_python_constant() + module_name = inst.argval + + # Are we replaying? if so, load recorded module + recorded_name = ( + f"{ExecutionRecorder.LOCAL_MOD_PREFIX}_{level}_{fromlist}_{module_name}" + ) + if recorded_name in self.f_globals: + value = self.f_globals[recorded_name] + source = GlobalSource(recorded_name) + else: + value = __import__( + module_name, + fromlist=fromlist, + level=level, + globals=self.f_globals, + ) + + if level != 0: + pkg = self.calc_package() + module_name = self.resolve_name(module_name, pkg, level) + + # For __import__, when the name variable is of the form package.module, + # normally, the top-level package (the name up till the first dot) is + # returned, not the module named by module_name. However, when a + # non-empty fromlist argument is given, the module named by name is + # returned. Therefore, we set the source correctly here. + if not fromlist: + top_level_module_name = module_name.partition(".")[0] + source = self.import_source(top_level_module_name) + else: + source = self.import_source(module_name) + + if config.replay_record_enabled: + self.exec_recorder.add_local_mod(recorded_name, value) + + if istype(value, (types.ModuleType, DummyModule)): + self.push(PythonModuleVariable(value, source=source)) + else: + unimplemented(f"IMPORT_NAME {typestr(value)}") + + def IMPORT_FROM(self, inst): + self.DUP_TOP(inst) + self.LOAD_ATTR(inst) + + def load_builtin(self, inst): + if inst.argval not in self.f_builtins: + raise NameError(f"name '{inst.argval}' is not defined") + val = self.f_builtins[inst.argval] + + if callable(val): + self.push(VariableBuilder(self, GlobalSource(inst.argval))(val)) + else: + assert is_builtin_constant(val) + self.push(ConstantVariable.create(value=val)) + + def jump(self, inst): + self.instruction_pointer = self.indexof[inst.target] + + JUMP_FORWARD = jump + JUMP_ABSOLUTE = jump + + POP_JUMP_IF_FALSE = generic_jump(operator.not_, False) + POP_JUMP_IF_TRUE = generic_jump(operator.truth, False) + JUMP_IF_FALSE_OR_POP = generic_jump(operator.not_, True) + JUMP_IF_TRUE_OR_POP = generic_jump(operator.truth, True) + + def SETUP_LOOP(self, inst): + # only exists in python<=3.7 + self.block_stack.append(BlockStackEntry(inst.target)) + + def SETUP_EXCEPT(self, inst): + # only exists in python<=3.7 + self.block_stack.append(BlockStackEntry(inst.target)) + + def POP_BLOCK(self, inst): + self.block_stack.pop() + + def SETUP_WITH(self, inst): + self.setup_or_before_with(inst) + + def SETUP_FINALLY(self, inst): + self.block_stack.append(BlockStackEntry(inst.target)) + + def BEGIN_FINALLY(self, inst): + self.push(None) + + def WITH_CLEANUP_START(self, inst): + exit, exc = self.popn(2) + assert exc is None + self.push(exc) + self.push(exit.call_function(self, [ConstantVariable.create(None)] * 3, {})) + + def WITH_CLEANUP_FINISH(self, inst): + self.popn(2) + self.push(None) + + def CALL_FINALLY(self, inst): + """ + pushes the address of the next instruction onto the stack and increments + bytecode counter by delta + """ + # Python 3.8 only + assert self.next_instruction is not None + addr = self.indexof[self.next_instruction] + self.push(ConstantVariable.create(addr)) + self.instruction_pointer = self.indexof[inst.target] + + def END_FINALLY(self, inst): + # Python 3.8 only + # https://docs.python.org/3.8/library/dis.html#opcode-END_FINALLY + tos = self.pop() + if isinstance(tos, ConstantVariable): + self.instruction_pointer = tos.as_python_constant() + else: + pass + + def POP_FINALLY(self, inst): + # Python 3.8 only + preserve_tos = inst.argval + if preserve_tos: + tos = self.pop() + _ = self.pop() + if preserve_tos: + self.push(tos) # type: ignore[possibly-undefined] + + def FOR_ITER(self, inst): + it = self.pop().realize() + if isinstance(it, (variables.ListIteratorVariable, variables.IteratorVariable)): + try: + val, next_iter = it.next_variables(self) + self.push(next_iter) + self.push(val) + except StopIteration: + self.jump(inst) + else: + unimplemented(f"FOR_ITER {typestr(it)}") + + def COMPARE_OP(self, inst): + left, right = self.popn(2) + op = inst.argval + supported_any = dict( + itertools.chain( + supported_tensor_comparison_ops.items(), + supported_const_comparison_ops.items(), + ) + ) + if ( + isinstance( + left, + ( + TensorVariable, + SymNodeVariable, + NNModuleVariable, + BaseListVariable, + UserDefinedVariable, + BaseUserFunctionVariable, + ConstDictVariable, + ), + ) + and isinstance(right, ConstantVariable) + and right.value is None + and op in supported_const_comparison_ops + ): + # is None + self.push( + ConstantVariable.create( + supported_const_comparison_ops[op](object(), right.value) + ) + ) + + elif ( + left.is_python_constant() + and right.is_python_constant() + and op in supported_any + ): + # constant fold + self.push( + ConstantVariable.create( + supported_any[op]( + left.as_python_constant(), right.as_python_constant() + ), + ) + ) + elif op in ("in", "not in"): + self.push(right.call_method(self, "__contains__", [left], {})) + if op == "not in": + self.UNARY_NOT(inst) + else: + self.push( + BuiltinVariable(supported_any[op]).call_function( + self, [left, right], {} + ) + ) + + def GET_ITER(self, inst): + self.call_function(BuiltinVariable(iter), [self.pop()], {}) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION(self, inst): + args = self.popn(inst.argval) + fn = self.pop() + self.call_function(fn, args, {}) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION_EX(self, inst): + kwargsvars: VariableTracker + if inst.argval == 0: + kwargsvars = ConstDictVariable({}) + argsvars = self.pop() + elif inst.argval == 1: + kwargsvars = self.pop() + argsvars = self.pop() + else: + unimplemented("CALL_FUNCTION_EX") + fn = self.pop() + if sys.version_info >= (3, 11): + null = self.pop() + assert isinstance(null, NullVariable) + + if ( + isinstance(fn, GetAttrVariable) + and isinstance(fn.obj, TensorVariable) + and fn.name == "view" + and isinstance(argsvars, (ConstantVariable, TensorVariable)) + ): + # Hack to handle special case in some bert models. Converts + # x.view(*shape) into x.view(shape), which is correct for view() + # but not generally. See test_transpose_for_scores(). + argsvars = TupleVariable([argsvars]) + + if not isinstance( + argsvars, BaseListVariable + ) and argsvars.has_unpack_var_sequence(self): + argsvars = TupleVariable(argsvars.unpack_var_sequence(self)) + + if not isinstance(argsvars, BaseListVariable) or not isinstance( + kwargsvars, ConstDictVariable + ): + unimplemented(f"non-static call {typestr(argsvars)} {typestr(kwargsvars)}") + + # Map to a dictionary of str -> VariableTracker + kwargsvars = kwargsvars.keys_as_python_constant() + self.call_function(fn, argsvars.items, kwargsvars) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION_KW(self, inst): + argnames = self.pop() + args = self.popn(inst.argval) + fn = self.pop() + assert isinstance(argnames, TupleVariable) and argnames.is_python_constant() + argnames = argnames.as_python_constant() + args, kwargs_list = args[: -len(argnames)], args[-len(argnames) :] + kwargs = dict(zip(argnames, kwargs_list)) + assert len(kwargs) == len(argnames) + self.call_function(fn, args, kwargs) + + def LOAD_METHOD_SUPER(self, inst): + self.CALL_FUNCTION(dataclasses.replace(inst, argval=2)) + arg = inst.argval[0] + argval = self.code_options["co_names"][arg] + if sys.version_info < (3, 11): + self.LOAD_ATTR(dataclasses.replace(inst, argval=argval)) + else: + self.LOAD_METHOD(dataclasses.replace(inst, argval=argval)) + + def LOAD_ATTR_SUPER(self, inst): + self.CALL_FUNCTION(dataclasses.replace(inst, argval=2)) + arg = inst.argval[0] + argval = self.code_options["co_names"][arg] + self.LOAD_ATTR(dataclasses.replace(inst, argval=argval)) + + def LOAD_METHOD(self, inst): + self.LOAD_ATTR(inst) + obj = self.pop() + if sys.version_info >= (3, 11): + # always follow the NULL + fn convention, since if obj + # is actually a method, self is already bound to it, so it + # doesn't need to be passed in as an arg. + self.PUSH_NULL(inst) + self.push(obj) + else: + self.push(obj) + self.push(None) + + def CALL_METHOD(self, inst): + args = self.popn(inst.argval) + dummy = self.pop() + assert dummy is None + fn = self.pop() + self.call_function(fn, args, {}) + + def LOAD_ATTR(self, inst): + obj = self.pop() + result = BuiltinVariable(getattr).call_function( + self, [obj, ConstantVariable.create(inst.argval)], {} + ) + self.push(result) + + def STORE_ATTR(self, inst): + speculation = self.speculate() + if speculation.failed: + return self.store_attr_graph_break(inst) + val, obj = self.popn(2) + + if isinstance(obj, NNModuleVariable): + # We don't allow side effects during export + # https://github.com/pytorch/torchdynamo/issues/1475 + assert ( + not self.export + ), f"Mutating module attribute {inst.argval} during export." + + try: + BuiltinVariable(setattr).call_function( + self, [obj, ConstantVariable.create(inst.argval), val], {} + ) + return + except Unsupported as e: + if not self.should_compile_partial_graph(): + raise + log.debug("STORE_ATTR triggered compile", exc_info=True) + e.remove_from_stats() + e.add_to_stats("graph_break") + speculation.fail_and_restart_analysis() + + def store_attr_graph_break(self, inst): + self.output.compile_subgraph( + self, reason=GraphCompileReason("store_attr", [self.frame_summary()]) + ) + self.output.add_output_instructions([copy.copy(inst)]) + self.popn(2) + self.output.add_output_instructions( + self.create_call_resume_at(self.next_instruction) + ) + + def DELETE_ATTR(self, inst): + obj = self.pop() + BuiltinVariable(delattr).call_function( + self, [obj, ConstantVariable.create(inst.argval)], {} + ) + + def create_call_resume_at(self, offset): + raise AssertionError( + f"create_call_resume_at not overridden by subclass {type(self)}" + ) + + def should_compile_partial_graph(self) -> bool: + raise AssertionError( + f"should_compile_partial_graph not overridden by subclass {type(self)}" + ) + + @break_graph_if_unsupported(push=0) + def STORE_SUBSCR(self, inst): + val, obj, key = self.popn(3) + result = obj.call_method(self, "__setitem__", [key, val], {}) + + def BUILD_TUPLE(self, inst): + items = self.popn(inst.argval) + self.push(TupleVariable(items)) + + def BUILD_SLICE(self, inst): + items = self.popn(inst.argval) + self.push(SliceVariable(items)) + + def BUILD_LIST(self, inst): + items = self.popn(inst.argval) + self.push(ListVariable(items, mutable_local=MutableLocal())) + + def BUILD_SET(self, inst): + if config.inject_BUILD_SET_unimplemented_TESTING_ONLY: + unimplemented("missing: BUILD_SET") + items = self.popn(inst.argval) + new_set = SetVariable(items, mutable_local=MutableLocal()) + self.push(new_set) + + def BUILD_LIST_UNPACK(self, inst, cls=ListVariable): + seqs = self.popn(inst.argval) + items = list() + for seq in seqs: + try: + items.extend(seq.unpack_var_sequence(self)) + except NotImplementedError: + unimplemented(f"BUILD_LIST_UNPACK {seq}") + self.push(cls(items, mutable_local=MutableLocal())) + + def BUILD_TUPLE_UNPACK(self, inst): + self.BUILD_LIST_UNPACK(inst, cls=TupleVariable) + + BUILD_TUPLE_UNPACK_WITH_CALL = BUILD_TUPLE_UNPACK + + def BUILD_MAP(self, inst): + items = self.popn(inst.argval * 2) + d = dict(zip(items[::2], items[1::2])) + self.push(ConstDictVariable(d, mutable_local=MutableLocal())) + + def BUILD_MAP_UNPACK(self, inst): + items = self.popn(inst.argval) + # ensure everything is a dict + items = [BuiltinVariable(dict).call_function(self, [x], {}) for x in items] + result = dict() + for x in items: + assert isinstance(x, ConstDictVariable) + result.update(x.items) + self.push( + ConstDictVariable( + result, + mutable_local=MutableLocal(), + ) + ) + + BUILD_MAP_UNPACK_WITH_CALL = BUILD_MAP_UNPACK + + def BUILD_CONST_KEY_MAP(self, inst): + keys = self.pop() + values = self.popn(inst.argval) + assert isinstance(keys, TupleVariable) + assert keys.is_python_constant() + + keys = keys.unpack_var_sequence(self) + assert len(keys) == len(values) + + self.push( + ConstDictVariable( + dict(zip(keys, values)), + mutable_local=MutableLocal(), + ) + ) + + def MAP_ADD(self, inst): + k, v = self.popn(2) + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ConstDictVariable) + obj.call_method(self, "__setitem__", (k, v), {}) # type: ignore[arg-type] + + def SET_ADD(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg] + assert isinstance(obj, SetVariable) + assert obj.mutable_local + return obj.call_method(self, "add", [v], {}) + + def LIST_APPEND(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ListVariable) + assert obj.mutable_local + self.output.side_effects.mutation(obj) + obj.items.append(v) + + def MAKE_FUNCTION(self, inst): + flags = inst.arg + old_stack = list(self.stack) + if sys.version_info < (3, 11): + fn_name = self.pop() + code = self.pop() + if sys.version_info >= (3, 11): + # MAKE_FUNCTION behavior actually changed in 3.11, see + # https://github.com/python/cpython/pull/93189/ + assert hasattr(code.value, "co_qualname") # type: ignore[attr-defined] + fn_name = ConstantVariable.create(value=code.value.co_qualname) # type: ignore[attr-defined] + defaults = None + closure = None + annotations = None + kwdefaults = None + + if flags & 0x08: + closure = self.pop() + if flags & 0x04: + annotations = self.pop() + if flags & 0x02: + kwdefaults = self.pop() + if flags & 0x01: + defaults = self.pop() + + self.push( + NestedUserFunctionVariable( + fn_name, + code, + self.f_globals, + defaults, + kwdefaults, + annotations, + closure, + closure_scope=self, + ) + ) + + def UNPACK_SEQUENCE(self, inst): + seq = self.pop() + if isinstance(seq, TensorVariable): + val = seq.unpack_var_sequence(self, idxes=range(inst.argval)) + elif isinstance(seq, GetAttrVariable) and isinstance(seq.obj, TensorVariable): + # x, y = a.shape + proxy = getattr(seq.obj.as_proxy(), seq.name) + val = [wrap_fx_proxy(self, proxy[i]) for i in range(inst.argval)] + elif seq.has_unpack_var_sequence(self): + val = seq.unpack_var_sequence(self) + else: + unimplemented(f"UNPACK_SEQUENCE {seq}") + if len(val) != inst.argval: + unimplemented("UNPACK_SEQUENCE length mismatch") + for i in reversed(val): + self.push(i) + + def UNPACK_EX(self, inst): + assert 0 <= inst.argval <= 0xFFFF + prefix = inst.argval & 0xFF # low byte + suffix = inst.argval >> 8 # high byte + seq = self.pop() + if seq.has_unpack_var_sequence(self): + vals = list(seq.unpack_var_sequence(self)) + assert len(vals) >= prefix + suffix + vals_prefix = vals[:prefix] + vals_list = vals[prefix : len(vals) - suffix] + vals_suffix = vals[len(vals) - suffix :] + for item in reversed(vals_suffix): + self.push(item) + self.push(TupleVariable(vals_list)) + for item in reversed(vals_prefix): + self.push(item) + else: + unimplemented(f"UNPACK_EX {seq}") + + def NOP(self, inst): + pass + + def POP_TOP(self, inst): + self.pop() + + def ROT_TWO(self, inst): + a = self.pop() + b = self.pop() + self.push(a) + self.push(b) + + def ROT_THREE(self, inst): + a = self.pop() + b = self.pop() + c = self.pop() + self.push(a) + self.push(c) + self.push(b) + + def ROT_FOUR(self, inst): + a = self.pop() + b = self.pop() + c = self.pop() + d = self.pop() + self.push(a) + self.push(d) + self.push(c) + self.push(b) + + def DUP_TOP(self, inst): + a = self.pop() + self.push(a) + self.push(a) + + def DUP_TOP_TWO(self, inst): + a = self.pop() + b = self.pop() + self.push(b) + self.push(a) + self.push(b) + self.push(a) + + def FORMAT_VALUE(self, inst): + flags = inst.arg + if (flags & 0x04) == 0x04: + fmt_spec = self.pop() + else: + fmt_spec = ConstantVariable.create("") + + value = self.pop() + if isinstance(value, SymNodeVariable): + value = ConstantVariable.create(str(value.sym_num)) + if (flags & 0x03) == 0x01: + value = BuiltinVariable(str).call_function(self, [value], {}) + elif (flags & 0x03) == 0x02: + value = BuiltinVariable(repr).call_function(self, [value], {}) + elif (flags & 0x03) == 0x03: + value = BuiltinVariable(ascii).call_function(self, [value], {}) + + fmt_var = ConstantVariable.create("{:" + fmt_spec.as_python_constant() + "}") + + self.call_function(BuiltinVariable(str.format), [fmt_var, value], {}) + + def BUILD_STRING(self, inst): + format_string_parts: List[str] = [] + args: List[VariableTracker] = [] + kwargs: Dict[str, VariableTracker] = {} + for part in self.popn(inst.arg): + if isinstance(part, ConstantVariable): + format_string_parts.append("{}") + args.append(part) + elif isinstance(part, variables.StringFormatVariable): + format_string_parts.append(part.format_string) + args.extend(part.sym_args) + if set(kwargs.keys()) & set(part.sym_kwargs.keys()): + unimplemented( + f"BUILD_STRING key conflict {kwargs} & {part.sym_kwargs}" + ) + kwargs.update(part.sym_kwargs) + else: + unimplemented(f"BUILD_STRING {part}") + self.push( + variables.StringFormatVariable.create( + "".join(format_string_parts), args, kwargs + ) + ) + + def IS_OP(self, inst): + assert inst.argval == 0 or inst.argval == 1 + if inst.argval == 0: + new_argval = "is" + else: + new_argval = "is not" + new_inst = create_instruction("COMPARE_OP", argval=new_argval) + self.COMPARE_OP(new_inst) + + def CONTAINS_OP(self, inst): + assert inst.argval == 0 or inst.argval == 1 + left, right = self.popn(2) + op = inst.argval + self.push(right.call_method(self, "__contains__", [left], {})) + if op == 1: + self.UNARY_NOT(inst) + + def LIST_EXTEND(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg] + assert isinstance(obj, ListVariable) + assert obj.mutable_local + obj.call_method(self, "extend", [v], {}) + + def LIST_TO_TUPLE(self, inst): + self.push(BuiltinVariable(tuple).call_function(self, [self.pop()], {})) + + def DICT_MERGE(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ConstDictVariable) + assert obj.mutable_local + obj.call_method(self, "update", [v], {}) + + DICT_UPDATE = DICT_MERGE + + def GEN_START(self, inst): + self.pop() + + def GET_LEN(self, inst): + tos = self.stack[-1] + if tos.is_python_constant(): + self.push(ConstantVariable.create(len(tos.as_python_constant()))) + else: + self.push(tos.call_method(self, "__len__", [], {})) + + def MATCH_MAPPING(self, inst): + tos = self.stack[-1] + assert isinstance(tos, ConstDictVariable) + if isinstance(tos.items, collections.abc.Mapping): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(False)) + + def MATCH_SEQUENCE(self, inst): + tos = self.stack[-1] + assert tos.is_python_constant() + tos_value = tos.as_python_constant() + if isinstance(tos_value, collections.abc.Sequence) and not isinstance( + tos_value, (str, bytes, bytearray) + ): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(False)) + + def MATCH_KEYS(self, inst): + tos = self.stack[-1] + tos1 = self.stack[-2] + assert isinstance(tos1, ConstDictVariable) + + if all(k in tos1 for k in tos): # type: ignore[attr-defined] + self.push(TupleVariable([tos1.getitem_const(k) for k in tos])) # type: ignore[attr-defined] + if sys.version_info < (3, 11): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(None)) + if sys.version_info < (3, 11): + self.push(ConstantVariable.create(False)) + + def LOAD_ASSERTION_ERROR(self, inst): + unimplemented("assert with non-string message") + + UNARY_POSITIVE = stack_op(operator.pos) + UNARY_NEGATIVE = stack_op(operator.neg) + UNARY_NOT = stack_op(operator.not_) + UNARY_INVERT = stack_op(operator.invert) + + BINARY_POWER = stack_op(operator.pow) + BINARY_MULTIPLY = stack_op(operator.mul) + BINARY_MATRIX_MULTIPLY = stack_op(operator.matmul) + BINARY_FLOOR_DIVIDE = stack_op(operator.floordiv) + BINARY_TRUE_DIVIDE = stack_op(operator.truediv) + BINARY_MODULO = stack_op(operator.mod) + BINARY_REMAINDER = stack_op(operator.mod) + BINARY_ADD = stack_op(operator.add) + BINARY_SUBTRACT = stack_op(operator.sub) + BINARY_SUBSCR = break_graph_if_unsupported(push=1)(stack_op(operator.getitem)) + BINARY_LSHIFT = stack_op(operator.lshift) + BINARY_RSHIFT = stack_op(operator.rshift) + BINARY_AND = stack_op(operator.and_) + BINARY_OR = stack_op(operator.or_) + BINARY_XOR = stack_op(operator.xor) + + INPLACE_POWER = stack_op(operator.ipow) + INPLACE_MULTIPLY = stack_op(operator.imul) + INPLACE_MATRIX_MULTIPLY = stack_op(operator.imatmul) + INPLACE_FLOOR_DIVIDE = stack_op(operator.ifloordiv) + INPLACE_TRUE_DIVIDE = stack_op(operator.itruediv) + INPLACE_MODULO = stack_op(operator.imod) + INPLACE_REMAINDER = stack_op(operator.imod) + INPLACE_ADD = stack_op(operator.iadd) + INPLACE_SUBTRACT = stack_op(operator.isub) + INPLACE_LSHIFT = stack_op(operator.ilshift) + INPLACE_RSHIFT = stack_op(operator.irshift) + INPLACE_AND = stack_op(operator.iand) + INPLACE_XOR = stack_op(operator.ixor) + INPLACE_OR = stack_op(operator.ior) + + # 3.11 opcodes + def RESUME(self, inst): + if inst.arg == 0: + self.append_prefix_inst(inst) + self.accept_prefix_inst = False + else: + assert not self.accept_prefix_inst + + def BINARY_OP(self, inst): + if sys.version_info >= (3, 11): + opname = dis._nb_ops[inst.arg][0][3:] # type: ignore[attr-defined] + if opname.startswith("INPLACE"): + return getattr(self, "INPLACE_" + opname[8:])(inst) + return getattr(self, "BINARY_" + opname)(inst) + else: + unimplemented("BINARY_OP requires Python 3.11+") + + def PRECALL(self, inst): + pass + + def KW_NAMES(self, inst): + kw_names = self.code_options["co_consts"][inst.arg] + assert isinstance(kw_names, tuple) + for name in kw_names: + assert isinstance(name, str) + assert self.kw_names is None + self.kw_names = ConstantVariable.create(value=kw_names) # type: ignore[assignment] + + def PUSH_NULL(self, inst): + self.push(NullVariable()) + + @break_graph_if_unsupported(push=1) + def CALL(self, inst): + # see https://docs.python.org/3.11/library/dis.html#opcode-CALL + # for convention + contents = self.popn(inst.arg + 2) + if isinstance(contents[0], NullVariable): + fn = contents[1] + args = [] + else: + fn = contents[0] + args = [contents[1]] + kw_names = self.kw_names.value if self.kw_names else () + if kw_names: + args = args + contents[2 : -len(kw_names)] + kwargs_list = contents[-len(kw_names) :] + kwargs = dict(zip(kw_names, kwargs_list)) + assert len(kwargs) == len(kw_names) + else: + args = args + contents[2:] + kwargs = {} + self.call_function(fn, args, kwargs) + self.kw_names = None + + def COPY(self, inst): + self.push(self.stack[-inst.arg]) + + def SWAP(self, inst): + self.stack[-1], self.stack[-inst.arg] = self.stack[-inst.arg], self.stack[-1] + + JUMP_BACKWARD = jump + JUMP_BACKWARD_NO_INTERRUPT = jump + + POP_JUMP_FORWARD_IF_TRUE = generic_jump(operator.truth, False) + POP_JUMP_BACKWARD_IF_TRUE = generic_jump(operator.truth, False) + POP_JUMP_FORWARD_IF_FALSE = generic_jump(operator.not_, False) + POP_JUMP_BACKWARD_IF_FALSE = generic_jump(operator.not_, False) + + def CACHE(self, inst): + pass + + def BEFORE_WITH(self, inst): + self.setup_or_before_with(inst) + + def setup_or_before_with(self, inst): + ctx = self.pop() + if not isinstance(ctx, ContextWrappingVariable): + unimplemented(f"{inst.opname} {ctx}") + + if isinstance(ctx, GenericContextWrappingVariable): + self.generic_context_manager_depth += 1 + + exit = WithExitFunctionVariable( + ctx, + inst.target, + ) + if sys.version_info >= (3, 11): + # see create_call_resume_at for block stack details + assert self.next_instruction + assert self.next_instruction.exn_tab_entry + target = self.next_instruction.exn_tab_entry.target + else: + target = inst.target + if isinstance(self, InstructionTranslator): + self.block_stack.append(BlockStackEntry(target, len(self.stack), ctx)) + else: + self.block_stack.append(BlockStackEntry(target)) + + self.push(exit) + self.push(ctx.enter(self)) + + def append_prefix_inst(self, inst): + assert self.accept_prefix_inst + self.prefix_insts.append(inst) + + def MAKE_CELL(self, inst): + self.append_prefix_inst(inst) + + def COPY_FREE_VARS(self, inst): + self.append_prefix_inst(inst) + + def RETURN_GENERATOR(self, inst): + self.append_prefix_inst(inst) + + def copy_graphstate(self) -> InstructionTranslatorGraphState: + """Create a checkpoint of the current state by copying everything""" + return InstructionTranslatorGraphState( + self.output.copy_graphstate(), + dict(self.symbolic_locals), + list(self.stack), + list(self.block_stack), + self.instruction_pointer, + self.current_instruction, + self.next_instruction, + self.lineno, + ) + + def restore_graphstate(self, state: InstructionTranslatorGraphState): + """Restore a checkpoint created by self.copy_graphstate()""" + ( + output_state, + self.symbolic_locals, + self.stack, + self.block_stack, + self.instruction_pointer, + self.current_instruction, + self.next_instruction, + self.lineno, + ) = state + self.output.restore_graphstate(output_state) + + def is_non_empty_graph(self): + if self.output.count_calls() > 1: + # perf optimization only + self.is_non_empty_graph = lambda: True # type: ignore[method-assign] + return True + return False + + def format_frame_summary(self, additional_stack_frames=None): + if additional_stack_frames is None: + additional_stack_frames = [] + return "".join( + traceback.format_list( + [self.frame_summary()] + list(reversed(additional_stack_frames)) + ) + ) + + def frame_summary(self): + return traceback.FrameSummary( + getattr(self.f_code, "co_filename", ""), + self.lineno, + getattr(self.f_code, "co_name", ""), + lookup_line=False, + ) + + def store_global_weakref_by_id(self, prefix, value): + global_name = self.output.install_global_by_id(prefix, weakref.ref(value)) + install_guard( + GlobalWeakRefSource(global_name).make_guard(GuardBuilder.WEAKREF_ALIVE) + ) + return global_name + + @property + def fake_mode(self): + return self.output.tracing_context.fake_mode + + def find_symbolic_locals_name(self, tensor_variable): + for key, value in self.symbolic_locals.items(): + if value is tensor_variable: + return key + return None + + @contextlib.contextmanager + def strict_translation_mode(self): + self.strict_checks_enabled = True + try: + yield + finally: + self.strict_checks_enabled = False + + def speculate(self) -> SpeculationEntry: + return self.speculation_log.next( + self.f_code.co_filename, self.lineno, self.instruction_pointer + ) + + def __init__( + self, + output: OutputGraph, + instructions: List[Instruction], + f_locals: Dict[str, Any], + f_globals: Dict[str, Any], + f_builtins: Dict[str, Any], + code_options: Dict[str, Any], + symbolic_locals: Dict[str, VariableTracker], + symbolic_globals: Dict[str, VariableTracker], + f_code: types.CodeType, + export: bool, + inline_depth: int, + speculation_log: SpeculationLog, + ): + super().__init__() + self.speculation_log = speculation_log + + # Mutable state checkpointed by copy_graphstate() + self.output = output + self.symbolic_locals = symbolic_locals + self.symbolic_globals = symbolic_globals + self.stack = [] + self.instruction_pointer = 0 + self.current_instruction = create_instruction("NOP") + self.next_instruction = None + self.block_stack = [] + # states before SETUP_WITH for checkpointing and fallback + self.generic_context_manager_depth = 0 + self.lineno = code_options["co_firstlineno"] + self.kw_names = None + self.accept_prefix_inst = True + self.prefix_insts = [] + + # Properties of the input/output code + self.instructions: List[Instruction] = instructions + self.indexof: Dict[Instruction, int] = get_indexof(self.instructions) + self.f_locals: Dict[ + str, Any + ] = f_locals # needed for recording accessed locals for replay + self.f_globals: Dict[str, Any] = f_globals + self.f_builtins: Dict[str, Any] = f_builtins + self.code_options: Dict[str, Any] = code_options + self.f_code: types.CodeType = f_code + + # Execution record for replaying errors + self.exec_recorder = ExecutionRecorder(code=f_code, code_options=code_options) + # Stack of module being parsed, current nn.module is at the end of ordered dict. + # The first field of tuple is the fully qualified name of current module + # in original hierarchy. The second field is the type of current nn.module + self.nn_module_stack: Dict[str, Tuple[str, Type[Any]]] = {} + # Flag to indicate whether tracing is used for export. + self.export = export + + self.current_speculation = None + + self.strict_checks_enabled = False + + if sys.version_info >= (3, 10): + from .resume_execution import ( + CO_ASYNC_GENERATOR, + CO_COROUTINE, + CO_GENERATOR, + CO_ITERABLE_COROUTINE, + ) + + if f_code.co_flags & ( + CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR + ): + self.push(BuiltinVariable(None)) + + self.inline_depth = inline_depth + self.inconsistent_side_effects = False + linecache.lazycache(f_code.co_filename, f_globals) + self.log_starts_line() + + +class InstructionTranslator(InstructionTranslatorBase): + mutated_closure_cell_contents: Set[str] + + @staticmethod + def current_tx() -> "InstructionTranslator": + return tls.current_tx + + @contextlib.contextmanager + def set_current_tx(self): + prior = getattr(tls, "current_tx", None) + tls.current_tx = self + try: + yield + finally: + tls.current_tx = prior + + def __init__( + self, + instructions: List[Instruction], + f_code, + f_locals, + f_globals, + f_builtins, + code_options, + compiler_fn, + one_graph, + export, + export_constraints, + mutated_closure_cell_contents: Set[str], + frame_state, + speculation_log: SpeculationLog, + ): + _step_logger()( + logging.INFO, + f"torchdynamo start tracing {f_code.co_name} {code_options['co_filename']}:{code_options['co_firstlineno']}", + ) + super().__init__( + output=OutputGraph( + code_options, + compiler_fn, + self, + export, + export_constraints, + frame_state, + local_scope=f_locals, + global_scope=f_globals, + f_code=f_code, + ), + instructions=instructions, + f_locals=f_locals, + f_globals=f_globals, + f_builtins=f_builtins, + code_options=code_options, + symbolic_locals={}, # set below + # A global var is inserted only after a STORE_GLOBAL happens to it + symbolic_globals={}, + f_code=f_code, + export=export, + inline_depth=0, + speculation_log=speculation_log, + ) + + self._throw_if_in_functorch() + + # as soon as we create the tracing context we should keep it active, so any calls + # into dynamo apis can rely on finding it + with tracing(self.output.tracing_context), self.set_current_tx(): + self.one_graph: bool = one_graph + self.export = export + self.mutated_closure_cell_contents = mutated_closure_cell_contents + if self.export: + assert ( + self.one_graph + ), "Export without one graph - something has gone wrong." + + vars = list(code_options["co_varnames"]) + cells_and_freevars = [x for x in self.cell_and_freevars() if x not in vars] + vars.extend(cells_and_freevars) + cells_and_freevars_set = set(cells_and_freevars) + + self.symbolic_locals = { + k: variables.LazyVariableTracker.create( + f_locals[k], + source=LocalSource(k, cell_or_freevar=k in cells_and_freevars_set), + ) + for k in vars + if k in f_locals + } + self.debug_locals: List[Tuple[VariableTracker, List[VariableTracker]]] = [] + if export: + # export gets confused if we never realize unused inputs + # in export mode just eagerly realize everything + self.symbolic_locals = VariableTracker.apply( + lambda x: x.realize(), self.symbolic_locals + ) + + self._freevars_ids = dict() + for name in self.code_options["co_freevars"]: + if name in f_locals: + self._freevars_ids[name] = id(f_locals[name]) + + def _throw_if_in_functorch(self): + # Fallback to eager in case of a graph break inside vmap + eager = torch._dynamo.lookup_backend("eager") + compiler_fn = inspect.getattr_static( + self.output.compiler_fn, "compiler_fn", self.output.compiler_fn + ) + ci = torch._C._functorch.peek_interpreter_stack() + forbidden_keys = ( + torch._C._functorch.TransformType.Vmap, + torch._C._functorch.TransformType.Grad, + ) + if ci is not None and ci.key() in forbidden_keys and compiler_fn is not eager: + # if it reaches here, it means Dynamo failed to inline a functorch function + name = ci.key().name.lower() + msg = f"torch.func.{name}(fn) requires the function to be inlined by dynamo" + unimplemented(msg) + + def get_example_value(self, source: Source): + if isinstance(source, LocalSource): + return self.f_locals[source.local_name] + if isinstance(source, GlobalSource): + return self.f_globals[source.global_name] + raise KeyError() + + def run(self): + super().run() + + def match_nested_cell(self, name, cell): + """Match a cell in this method to one in a function we are inlining""" + try: + value = cell.cell_contents + except ValueError: + return None + # TODO(jansel): check the id of the cell rather than the contents + if id(value) != self._freevars_ids.get(name): + return None + return self.symbolic_locals[name] + + def should_compile_partial_graph(self): + return ( + all(b.can_restore() for b in self.block_stack) + and not self.one_graph + and self.generic_context_manager_depth == 0 + ) + + def create_call_resume_at(self, inst): + self.instruction_pointer = None + + if inst.opname == "RETURN_VALUE": + return [create_instruction("RETURN_VALUE")] + + reads = livevars_analysis(self.instructions, inst) + argnames = tuple( + k + for k in self.symbolic_locals.keys() + if k in reads and k not in self.cell_and_freevars() + ) + + cg = PyCodegen(self) + + # Python does not allow null to be an arg to a function, so + # we remove nulls from the stack and restore them in the + # prologue of the resume function + + # sorted list of indices of nulls on the stack + null_idxes: List[int] = [] + if sys.version_info >= (3, 11): + # find indices of NullVariables + for i, var in enumerate(self.stack): + if isinstance(var, NullVariable): + null_idxes.append(i) + # generate bytecode to pop the nulls + null_cnt = 0 + for i, var in enumerate(reversed(self.stack)): + if isinstance(var, NullVariable): + for j in range(2, i + 2 - null_cnt): + cg.append_output(create_instruction("SWAP", arg=j)) + cg.extend_output(cg.pop_null()) + null_cnt += 1 + + # we popped all nulls from the stack at runtime, + # so we should not count NullVariables + stack_len = len(self.stack) - len(null_idxes) + nargs = stack_len + len(argnames) + + name = unique_id(f"__resume_at_{inst.offset}") + + new_code: types.CodeType = ContinueExecutionCache.lookup( + self.f_code, + self.lineno, + inst.offset, + tuple(b.target.offset for b in self.block_stack), + stack_len, + argnames, + tuple(b.resume_fn() for b in self.block_stack), + tuple(null_idxes), + ) + + # Add original GraphModule context to the resume function to handle + # the case of a graph break while tracing a GraphModule + orig_graphmodule_maybe = code_context.get_context(self.f_code).get( + "orig_graphmodule", lambda: None + )() + if orig_graphmodule_maybe is not None: + code_context.get_context(new_code)["orig_graphmodule"] = weakref.ref( + orig_graphmodule_maybe + ) + + if new_code.co_freevars: + cg.make_function_with_closure(name, new_code, True, stack_len) + else: + # This is safe: we pre-generate a unique name + self.output.install_global_unsafe( + name, types.FunctionType(new_code, self.f_globals, name) + ) + cg.extend_output(cg.load_function_name(name, True, stack_len)) + + cg.extend_output([cg.create_load(k) for k in argnames]) + cg.extend_output(create_call_function(nargs, False)) + cg.append_output(create_instruction("RETURN_VALUE")) + return cg.get_instructions() + + def symbolic_locals_contain_module_class(self): + for v in self.symbolic_locals.values(): + if isinstance(v, UserDefinedClassVariable) and issubclass( + v.as_python_constant(), torch.nn.Module + ): + return True + return False + + def RETURN_VALUE(self, inst): + if ( + self.output.count_calls() == 0 + and not self.inconsistent_side_effects + and not self.symbolic_locals_contain_module_class() + and not self.export + ): + raise exc.SkipFrame("because no content in function call") + self.instruction_pointer = None + _step_logger()( + logging.INFO, + f"torchdynamo done tracing {self.f_code.co_name} (RETURN_VALUE)", + ) + log.debug("RETURN_VALUE triggered compile") + self.output.compile_subgraph( + self, + reason=GraphCompileReason( + "return_value", [self.frame_summary()], graph_break=False + ), + ) + self.output.add_output_instructions([create_instruction("RETURN_VALUE")]) + + +class InliningInstructionTranslator(InstructionTranslatorBase): + """Trace and inline a called method""" + + symbolic_result: Optional[TensorVariable] + + @classmethod + def inline_call(cls, parent, func, args, kwargs): + with patch.dict(counters, {"unimplemented": counters["inline_call"]}): + return cls.inline_call_(parent, func, args, kwargs) + + @staticmethod + def check_inlineable(func): + if func.has_self(): + unimplemented("inline with __self__") + + result = trace_rules.check_verbose(func, is_inlined_call=True) + if result.skipped: + from torch._dynamo.variables.misc import produce_trampoline_autograd_apply + + # _origin marks this as coming from an internal dynamo known function that is safe to + # trace through. + if hasattr(getattr(func, "fn", None), "_origin") and func.fn._origin in [ + produce_trampoline_autograd_apply, + ]: + # Known sound + return trace_rules.SkipResult( + False, "allowlist in dynamo known function" + ) + fn_qualname = func.fn.__qualname__ if hasattr(func, "fn") else "" + unimplemented( + f"'inline in skipfiles: {fn_qualname} | {func.get_name()} {func.get_filename()}, {result.reason}'" + ) + + if isinstance(func, UserFunctionVariable) and inspect.getattr_static( + func.get_function(), "_torchdynamo_disable", False + ): + unimplemented( + f"call torch._dynamo.disable() wrapped function {func.get_function()}" + ) + else: + return result + + @staticmethod + def inline_call_( + parent, func: VariableTracker, args: List[VariableTracker], kwargs + ): + if isinstance(func, SkipFunctionVariable): + unimplemented("inline with functions in skip files") + assert isinstance( + func, + (UserFunctionVariable, NestedUserFunctionVariable), + ) + result = InliningInstructionTranslator.check_inlineable(func) + assert result.skipped is False + try: + sub_locals, closure_cells = func.bind_args(parent, args, kwargs) + except TypeError as e: + # Wrap the general TypeError during bind_args() to the internal ArgsMismatchError with detailed info + raise ArgsMismatchError( # noqa: TRY200 + "{reason}.\n func = {func}, args = {args}, kwargs = {kwargs}".format( + reason=str(e), + func=f"'{func.get_name()}' {func.get_filename()}:{func.get_code().co_firstlineno}", + args=[arg.python_type() for arg in args], + kwargs=kwargs, + ), + ) + + for v in itertools.chain(sub_locals.values(), closure_cells.values()): + if not isinstance(v, VariableTracker): + unimplemented(f"unconverted arg {v}") + + code: types.CodeType = func.get_code() + if code.co_name in ("__setitem__", "__setattr__") and not ( + args is not None + and len(args) > 0 + and isinstance(args[0], variables.CustomizedDictVariable) + ): + unimplemented(f"inline {code.co_name}") + + suffix = "" + # TODO: mlazos, add support for enabling multiple artifact logs + # with a single alias + if torch._logging._internal.log_state.is_artifact_enabled("output_code"): + suffix = f"\n{dis.Bytecode(code).dis()}" + if sys.version_info >= (3, 11): + cur_inst = parent.current_instruction + parent_code = parent.f_code + header = parent.get_line_of_code_header(lineno=cur_inst.positions.lineno) + + def get_trace_call_log_str(): + line = get_instruction_source_311(parent_code, cur_inst).rstrip() + return f"TRACE inlined call {code.co_name} from {header}\n{line}" + + trace_call_log.debug("%s", LazyString(get_trace_call_log_str)) + log.debug("INLINING %s%s, %s", code, suffix, result.reason) + + # Detect inline GraphModule calls in order to propagate node metadata, + # by checking if the first argument (self) is a variable tracking a GraphModule. + if args and isinstance(args[0], NNModuleVariable): + module = parent.output.get_submodule(args[0].module_key) + if isinstance(module, torch.fx.GraphModule): + # The inline call might not actually be a call to `forward`, + # but it is enough to add a context for `forward` in case it is called. + code_context.get_context(module.forward.__code__)[ + "orig_graphmodule" + ] = weakref.ref(module) + + tracer: InliningInstructionTranslator + if is_generator(code): + tracer = InliningGeneratorInstructionTranslator( + parent, code, sub_locals, parent.symbolic_globals, closure_cells, func + ) + else: + tracer = InliningInstructionTranslator( + parent, code, sub_locals, parent.symbolic_globals, closure_cells, func + ) + + strict_ctx: Any = contextlib.nullcontext() + if parent.strict_checks_enabled: + strict_ctx = tracer.strict_translation_mode() + try: + with strict_ctx: + tracer.run() + except exc.SkipFrame as e: + msg = f"SKIPPED INLINING {code}: {e}" + log.debug(msg) + raise Unsupported(msg) from e + except Exception as e: + log.debug("FAILED INLINING %s", code) + raise + assert tracer.symbolic_result is not None + func.export_freevars(parent, tracer) + + if tracer.f_globals is parent.f_globals: + # Merge symbolic_globals back if parent and child are in the same namespace + parent.symbolic_globals.update(tracer.symbolic_globals) + + parent.inconsistent_side_effects |= tracer.inconsistent_side_effects + + log.debug("DONE INLINING %s", code) + + if is_generator(code): + assert isinstance(tracer, InliningGeneratorInstructionTranslator) + assert tracer.symbolic_result.as_python_constant() is None + return ListIteratorVariable( + tracer.generated_items, + mutable_local=MutableLocal(), + ) + else: + return tracer.symbolic_result + + def __init__( + self, + parent: InstructionTranslatorBase, + code: types.CodeType, + symbolic_locals: Dict[str, VariableTracker], + symbolic_globals: Dict[str, VariableTracker], + closure_cells: Dict[str, VariableTracker], + funcvar: BaseUserFunctionVariable, + ): + f_globals = funcvar.get_globals() # type: ignore[attr-defined] + f_builtins = f_globals["__builtins__"] + if not isinstance(f_builtins, dict): + f_builtins = f_builtins.__dict__ + instructions = cleaned_instructions(code) + propagate_line_nums(instructions) + super().__init__( + output=parent.output, + f_locals={}, + f_globals=f_globals, + f_builtins=f_builtins, + symbolic_locals=symbolic_locals, + symbolic_globals=symbolic_globals, + instructions=instructions, + code_options={k: getattr(code, k) for k in dir(code)}, + f_code=code, + export=parent.export, + inline_depth=parent.inline_depth + 1, + speculation_log=parent.speculation_log, + ) + self.parent = parent + self.symbolic_result = None + self.closure_cells = closure_cells + self.nn_module_stack = parent.nn_module_stack.copy() + + @property + def fake_mode(self): + return self.parent.fake_mode + + def run_ctx_mgr(self): + return TracingContext.current_frame(self.parent.frame_summary()) + + def STORE_DEREF(self, inst): + if inst.argval in self.closure_cells: + cell = self.closure_cells[inst.argval] + val = self.pop() + if isinstance(cell, ClosureVariable): + if not self.output.is_root_tracer(): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (ClosureVariable)" + ) + self.output.root_tx.symbolic_locals[cell.name] = val + else: + self.output.side_effects.store_cell(cell, val) + else: + maybe_cell = self.symbolic_locals.get(inst.argval) + if isinstance( + maybe_cell, + variables.NewCellVariable, + ): + self.output.side_effects.store_cell( + self.symbolic_locals[inst.argval], self.pop() + ) + else: + if ( + maybe_cell is not None + and maybe_cell.source.name() + not in self.output.root_tx.mutated_closure_cell_contents + ): + # Why is the source name here unique? + # mutated_closure_cell_contents is a per-frame + # concept, and sources identify, e.g., particular + # locals from the frame. If you had two locals, + # they'll get different source names, and therefore + # differ here. + self.output.root_tx.mutated_closure_cell_contents.add( + maybe_cell.source.name() + ) + raise exc.UnspecializeRestartAnalysis() + unimplemented("write to __closure__ while inlining") + + def LOAD_DEREF(self, inst): + if inst.argval in self.closure_cells: + cell = self.closure_cells[inst.argval] + if isinstance(cell, ClosureVariable): + self.push(self.output.root_tx.symbolic_locals[cell.name]) + else: + self.push(self.output.side_effects.load_cell(cell)) + else: + maybe_sym_local = self.symbolic_locals.get(inst.argval, None) + if isinstance(maybe_sym_local, variables.NewCellVariable): + self.push(self.output.side_effects.load_cell(maybe_sym_local)) + else: + super().LOAD_DEREF(inst) + + def LOAD_CLOSURE(self, inst): + assert inst.argval in self.cell_and_freevars() + if inst.argval in self.closure_cells: + self.push(self.closure_cells[inst.argval]) + else: + self.push(InlinedClosureVariable(name=inst.argval)) + + def check_replace_is_safe(self, oldvar): + if not is_side_effect_safe(oldvar.mutable_local): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (replace_all)" + ) + + def should_compile_partial_graph(self): + return False # inlining functions is all-or-nothing + + def create_call_resume_at(self, offset): + unimplemented("cant resume while inlining") + + def RETURN_VALUE(self, inst): + self.symbolic_result = self.pop() # type: ignore[assignment] + self.instruction_pointer = None + + +class InliningGeneratorInstructionTranslator(InliningInstructionTranslator): + generated_items: List[VariableTracker] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.generated_items = [] + + def YIELD_VALUE(self, inst: Instruction): + self.generated_items.append(self.pop()) + # TODO(jansel): figure out why this is needed, it isn't in the docs for YIELD_VALUE + self.push(ConstantVariable.create(None)) + + def GET_YIELD_FROM_ITER(self, inst): + tos = self.stack[-1] + if not isinstance(tos, ListIteratorVariable): + self.pop() + res = BuiltinVariable(iter).call_function(self, [tos], {}) + self.push(res) + return self.YIELD_FROM(inst) + + def YIELD_FROM(self, inst): + while True: + tos = self.stack[-1].realize() + if isinstance(tos, ConstantVariable) and tos.value is None: + self.pop() + return + if isinstance( + tos, (variables.ListIteratorVariable, variables.IteratorVariable) + ): + try: + val, next_iter = tos.next_variables(self) + self.push(val) + # TODO(voz): Unclear if we need the push None in YIELD_VALUE? + self.YIELD_VALUE(inst) + self.pop() + self.push(next_iter) + except StopIteration: + return + else: + unimplemented(f"YIELD_FROM {typestr(tos)}") + + def SEND(self, inst): + assert len(self.stack) >= 2 + val = self.pop() + tos = self.stack[-1] + if isinstance(tos, ListIteratorVariable): + if isinstance(val, ConstantVariable) and val.value is None: + self.push(val) + self.instruction_pointer = self.indexof[inst.target] + else: + # invoke send + # Unreachable code - if you hit this, you are implementing generator support and have + # lifted the `unimplemented("generator")` in frame conversion. This codepath handles + # subgenerator and lines up with this line in Python 3.11 + # https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2597 + unimplemented("Unreachable sub-generator code") + else: + unimplemented(f"SEND {typestr(tos)}") diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py b/venv/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f12ed95b58c0fe809103b35140b16aaf7c168d6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py @@ -0,0 +1,57 @@ +import torch +from torch._prims import _make_prim, RETURN_TYPE +from torch._subclasses import FakeTensorMode +from torch._subclasses.functional_tensor import FunctionalTensorMode + +_tensor_version = _make_prim( + schema="_tensor_version(Tensor self) -> SymInt", + return_type=RETURN_TYPE.NEW, + meta=torch.ops.aten._version.default, + impl_aten=torch.ops.aten._version.default, + doc="Tracable unbacked SymInt version of torch.Tensor._version", +) + + +@_tensor_version.py_impl(FakeTensorMode) +def _tensor_version_fake(self): + """ + The initial dynamo capture of _tensor_version + _unsafe_set_version_counter turns the + `._version` into an unbacked SymInt so that we don't need to specialize on the `._version` + of input tensors to the graph. + """ + return self.fake_mode.shape_env.create_unbacked_symint() + + +_unsafe_set_version_counter = _make_prim( + schema="_unsafe_set_version_counter(Tensor self, SymInt version) -> ()", + return_type=RETURN_TYPE.NEW, + meta=lambda self, version: None, + impl_aten=torch._C._autograd._unsafe_set_version_counter, + doc="Tracable+SymInt version of torch._C._autograd._unsafe_set_version_counter", +) +torch.fx.node.has_side_effect(_unsafe_set_version_counter) + + +""" +When we functionalize _tensor_version + _unsafe_set_version_counter, +the ops disappear from the traced graph. We run them eagerly on the +fake tensors used for tracing, in order to get past asserts that would +fail in autograd. + +Why is this ok? +1) Versions on functional tensors don't make any sense since you can't mutate a functional tensor. +2) The whole point of version munging is to trick autograd into doing what we want, and after + AotAtuograd there is no longer any need for these ops. + +Note this is similar to how no_grad is handled. +""" + + +@_tensor_version.py_impl(FunctionalTensorMode) +def _tensor_version_functional(self): + return self._version + + +@_unsafe_set_version_counter.py_impl(FunctionalTensorMode) +def _unsafe_set_version_counter_functional(self, version): + torch._C._autograd._unsafe_set_version_counter(self, version) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/test_case.py b/venv/lib/python3.10/site-packages/torch/_dynamo/test_case.py new file mode 100644 index 0000000000000000000000000000000000000000..e3cbef09eaae05474fe8b64207824c6bb58439e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/test_case.py @@ -0,0 +1,78 @@ +import contextlib +import importlib +import logging +import sys + +import torch +import torch.testing +from torch.testing._internal.common_utils import ( # type: ignore[attr-defined] + IS_WINDOWS, + TEST_WITH_CROSSREF, + TEST_WITH_TORCHDYNAMO, + TestCase as TorchTestCase, +) + +from . import config, reset, utils + +log = logging.getLogger(__name__) + + +def run_tests(needs=()): + from torch.testing._internal.common_utils import run_tests + + if ( + TEST_WITH_TORCHDYNAMO + or IS_WINDOWS + or TEST_WITH_CROSSREF + or sys.version_info >= (3, 12) + ): + return # skip testing + + if isinstance(needs, str): + needs = (needs,) + for need in needs: + if need == "cuda" and not torch.cuda.is_available(): + return + else: + try: + importlib.import_module(need) + except ImportError: + return + run_tests() + + +class TestCase(TorchTestCase): + _exit_stack: contextlib.ExitStack + + @classmethod + def tearDownClass(cls): + cls._exit_stack.close() + super().tearDownClass() + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack = contextlib.ExitStack() # type: ignore[attr-defined] + cls._exit_stack.enter_context( # type: ignore[attr-defined] + config.patch( + raise_on_ctx_manager_usage=True, + suppress_errors=False, + log_compilation_metrics=False, + ), + ) + + def setUp(self): + self._prior_is_grad_enabled = torch.is_grad_enabled() + super().setUp() + reset() + utils.counters.clear() + + def tearDown(self): + for k, v in utils.counters.items(): + print(k, v.most_common()) + reset() + utils.counters.clear() + super().tearDown() + if self._prior_is_grad_enabled is not torch.is_grad_enabled(): + log.warning("Running test changed grad mode") + torch.set_grad_enabled(self._prior_is_grad_enabled) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py b/venv/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py new file mode 100644 index 0000000000000000000000000000000000000000..d12e5a92315a498e8f90586a47c5a93de1217df0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py @@ -0,0 +1,244 @@ +import dataclasses +import io +import logging +import os +import re +import shutil +import subprocess +import sys +import tempfile +import traceback +from typing import Optional +from unittest.mock import patch + +import torch +import torch._dynamo +import torch._dynamo.test_case +from torch.utils._traceback import report_compile_source_on_error + + +@dataclasses.dataclass +class MinifierTestResult: + minifier_code: str + repro_code: str + + def _get_module(self, t): + match = re.search(r"class Repro\(torch\.nn\.Module\):\s+([ ].*\n| *\n)+", t) + assert match is not None, "failed to find module" + r = match.group(0) + r = re.sub(r"\s+$", "\n", r, flags=re.MULTILINE) + r = re.sub(r"\n{3,}", "\n\n", r) + return r.strip() + + def minifier_module(self): + return self._get_module(self.minifier_code) + + def repro_module(self): + return self._get_module(self.repro_code) + + +class MinifierTestBase(torch._dynamo.test_case.TestCase): + DEBUG_DIR = tempfile.mkdtemp() + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack.enter_context( # type: ignore[attr-defined] + torch._dynamo.config.patch(debug_dir_root=cls.DEBUG_DIR) + ) + # These configurations make new process startup slower. Disable them + # for the minification tests to speed them up. + cls._exit_stack.enter_context( # type: ignore[attr-defined] + torch._inductor.config.patch( + { + # https://github.com/pytorch/pytorch/issues/100376 + "pattern_matcher": False, + # multiprocess compilation takes a long time to warmup + "compile_threads": 1, + # https://github.com/pytorch/pytorch/issues/100378 + "cpp.vec_isa_ok": False, + } + ) + ) + + @classmethod + def tearDownClass(cls): + if os.getenv("PYTORCH_KEEP_TMPDIR", "0") != "1": + shutil.rmtree(cls.DEBUG_DIR) + else: + print(f"test_minifier_common tmpdir kept at: {cls.DEBUG_DIR}") + cls._exit_stack.close() # type: ignore[attr-defined] + + def _gen_codegen_fn_patch_code(self, device, bug_type): + assert bug_type in ("compile_error", "runtime_error", "accuracy") + return f"""\ +{torch._dynamo.config.codegen_config()} +{torch._inductor.config.codegen_config()} +torch._inductor.config.{"cpp" if device == "cpu" else "triton"}.inject_relu_bug_TESTING_ONLY = {bug_type!r} +""" + + def _maybe_subprocess_run(self, args, *, isolate, cwd=None): + if not isolate: + assert len(args) >= 2, args + assert args[0] == "python3", args + if args[1] == "-c": + assert len(args) == 3, args + code = args[2] + args = ["-c"] + else: + assert len(args) >= 2, args + with open(args[1]) as f: + code = f.read() + args = args[1:] + + # WARNING: This is not a perfect simulation of running + # the program out of tree. We only interpose on things we KNOW we + # need to handle for tests. If you need more stuff, you will + # need to augment this appropriately. + + # NB: Can't use save_config because that will omit some fields, + # but we must save and reset ALL fields + dynamo_config = torch._dynamo.config.shallow_copy_dict() + inductor_config = torch._inductor.config.shallow_copy_dict() + try: + stderr = io.StringIO() + log_handler = logging.StreamHandler(stderr) + log = logging.getLogger("torch._dynamo") + log.addHandler(log_handler) + try: + prev_cwd = os.getcwd() + if cwd is not None: + os.chdir(cwd) + with patch("sys.argv", args), report_compile_source_on_error(): + exec(code, {"__name__": "__main__", "__compile_source__": code}) + rc = 0 + except Exception: + rc = 1 + traceback.print_exc(file=stderr) + finally: + log.removeHandler(log_handler) + if cwd is not None: + os.chdir(prev_cwd) # type: ignore[possibly-undefined] + # Make sure we don't leave buggy compiled frames lying + # around + torch._dynamo.reset() + finally: + torch._dynamo.config.load_config(dynamo_config) + torch._inductor.config.load_config(inductor_config) + + # TODO: return a more appropriate data structure here + return subprocess.CompletedProcess( + args, + rc, + b"", + stderr.getvalue().encode("utf-8"), + ) + else: + return subprocess.run(args, capture_output=True, cwd=cwd, check=False) + + # Run `code` in a separate python process. + # Returns the completed process state and the directory containing the + # minifier launcher script, if `code` outputted it. + def _run_test_code(self, code, *, isolate): + proc = self._maybe_subprocess_run( + ["python3", "-c", code], isolate=isolate, cwd=self.DEBUG_DIR + ) + + print("test stdout:", proc.stdout.decode("utf-8")) + print("test stderr:", proc.stderr.decode("utf-8")) + repro_dir_match = re.search( + r"(\S+)minifier_launcher.py", proc.stderr.decode("utf-8") + ) + if repro_dir_match is not None: + return proc, repro_dir_match.group(1) + return proc, None + + # Runs the minifier launcher script in `repro_dir` + def _run_minifier_launcher(self, repro_dir, isolate, *, minifier_args=()): + self.assertIsNotNone(repro_dir) + launch_file = os.path.join(repro_dir, "minifier_launcher.py") + with open(launch_file) as f: + launch_code = f.read() + self.assertTrue(os.path.exists(launch_file)) + + args = ["python3", launch_file, "minify", *minifier_args] + if not isolate: + args.append("--no-isolate") + launch_proc = self._maybe_subprocess_run(args, isolate=isolate, cwd=repro_dir) + print("minifier stdout:", launch_proc.stdout.decode("utf-8")) + stderr = launch_proc.stderr.decode("utf-8") + print("minifier stderr:", stderr) + self.assertNotIn("Input graph did not fail the tester", stderr) + + return launch_proc, launch_code + + # Runs the repro script in `repro_dir` + def _run_repro(self, repro_dir, *, isolate=True): + self.assertIsNotNone(repro_dir) + repro_file = os.path.join(repro_dir, "repro.py") + with open(repro_file) as f: + repro_code = f.read() + self.assertTrue(os.path.exists(repro_file)) + + repro_proc = self._maybe_subprocess_run( + ["python3", repro_file], isolate=isolate, cwd=repro_dir + ) + print("repro stdout:", repro_proc.stdout.decode("utf-8")) + print("repro stderr:", repro_proc.stderr.decode("utf-8")) + return repro_proc, repro_code + + # Template for testing code. + # `run_code` is the code to run for the test case. + # `patch_code` is the code to be patched in every generated file; usually + # just use this to turn on bugs via the config + def _gen_test_code(self, run_code, repro_after, repro_level): + return f"""\ +import torch +import torch._dynamo +{torch._dynamo.config.codegen_config()} +{torch._inductor.config.codegen_config()} +torch._dynamo.config.repro_after = "{repro_after}" +torch._dynamo.config.repro_level = {repro_level} +torch._dynamo.config.debug_dir_root = "{self.DEBUG_DIR}" +{run_code} +""" + + # Runs a full minifier test. + # Minifier tests generally consist of 3 stages: + # 1. Run the problematic code + # 2. Run the generated minifier launcher script + # 3. Run the generated repro script + # + # If possible, you should run the test with isolate=False; use + # isolate=True only if the bug you're testing would otherwise + # crash the process + def _run_full_test( + self, run_code, repro_after, expected_error, *, isolate, minifier_args=() + ) -> Optional[MinifierTestResult]: + if isolate: + repro_level = 3 + elif expected_error is None or expected_error == "AccuracyError": + repro_level = 4 + else: + repro_level = 2 + test_code = self._gen_test_code(run_code, repro_after, repro_level) + print("running test", file=sys.stderr) + test_proc, repro_dir = self._run_test_code(test_code, isolate=isolate) + if expected_error is None: + # Just check that there was no error + self.assertEqual(test_proc.returncode, 0) + self.assertIsNone(repro_dir) + return None + # NB: Intentionally do not test return code; we only care about + # actually generating the repro, we don't have to crash + self.assertIn(expected_error, test_proc.stderr.decode("utf-8")) + self.assertIsNotNone(repro_dir) + print("running minifier", file=sys.stderr) + minifier_proc, minifier_code = self._run_minifier_launcher( + repro_dir, isolate=isolate, minifier_args=minifier_args + ) + print("running repro", file=sys.stderr) + repro_proc, repro_code = self._run_repro(repro_dir, isolate=isolate) + self.assertIn(expected_error, repro_proc.stderr.decode("utf-8")) + self.assertNotEqual(repro_proc.returncode, 0) + return MinifierTestResult(minifier_code=minifier_code, repro_code=repro_code) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/testing.py b/venv/lib/python3.10/site-packages/torch/_dynamo/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..fac20cf55508011f0d06511b30d846c1b26eeb4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/testing.py @@ -0,0 +1,378 @@ +import contextlib +import dis +import functools +import logging +import os.path +import random +import re +import sys +import types +import unittest +from typing import List, Optional, Sequence, Union +from unittest.mock import patch + +np: Optional[types.ModuleType] = None +try: + import numpy as np +except ModuleNotFoundError: + np = None + +import torch +from torch import fx +from torch._dynamo.output_graph import OutputGraph + +from . import config, eval_frame, optimize_assert, reset +from .bytecode_transformation import ( + create_instruction, + debug_checks, + is_generator, + transform_code_object, +) +from .guards import CheckFunctionManager, GuardedCode +from .utils import same + +unsupported = eval_frame.unsupported +three = 3 + +log = logging.getLogger(__name__) + + +def clone_me(x): + if x is None: + return None + return x.detach().clone().requires_grad_(x.requires_grad) + + +def named_parameters_for_optimized_module(mod): + assert isinstance(mod, eval_frame.OptimizedModule) + return mod._orig_mod.named_parameters + + +def named_buffers_for_optimized_module(mod): + assert isinstance(mod, eval_frame.OptimizedModule) + return mod._orig_mod.named_buffers + + +def remove_optimized_module_prefix(name) -> str: + return re.sub(r"^_orig_mod[.]", "", name) + + +def collect_results(model, prediction, loss, example_inputs): + results = [] + results.append(prediction) + results.append(loss) + # if isinstance(loss, torch.Tensor) and loss.item() > 1: + # log.warning( + # f"High loss value alert - {loss:.2f}. Can result in unstable gradients." + # ) + + grads = dict() + params = dict() + for name, param in model.named_parameters(): + if isinstance(model, eval_frame.OptimizedModule): + name = remove_optimized_module_prefix(name) + param_copy = param + grad = param.grad + # Treat None and zero grad as same + if param.grad is None: + grad = torch.zeros_like(param) + grads[name + ".grad"] = grad + params[name] = param_copy + results.append(grads) + results.append(params) + buffers = dict() + for name, buffer in model.named_buffers(): + if isinstance(model, eval_frame.OptimizedModule): + name = remove_optimized_module_prefix(name) + buffers[name] = buffer + results.append(buffers) + for example in example_inputs: + if isinstance(example, (tuple, list)): + for inp in example: + if isinstance(inp, torch.Tensor): + results.append(inp.grad) + else: + if isinstance(example, torch.Tensor): + results.append(example.grad) + return results + + +def requires_bwd_pass(out): + if isinstance(out, torch.Tensor): + return out.requires_grad + elif isinstance(out, (list, tuple)): + return any(requires_bwd_pass(x) for x in out) + elif out is None: + return False + elif isinstance(out, int): + return False + raise NotImplementedError("Don't know how to reduce", type(out)) + + +def reduce_to_scalar_loss(out): + """Reduce the output of a model to get scalar loss""" + if isinstance(out, torch.Tensor): + # Mean does not work on integer tensors + return out.sum() / out.numel() + elif isinstance(out, (list, tuple)): + return sum([reduce_to_scalar_loss(x) for x in out]) / len(out) + elif type(out).__name__ in ( + "MaskedLMOutput", + "Seq2SeqLMOutput", + "CausalLMOutputWithCrossAttentions", + ): + return reduce_to_scalar_loss(out.logits) + elif type(out).__name__ == "SquashedNormal": + return out.mean.sum() + elif isinstance(out, dict): + return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len( + out.keys() + ) + raise NotImplementedError("Don't know how to reduce", type(out)) + + +def debug_dir() -> str: + path = os.path.join(os.path.dirname(__file__), "../debug") + if not os.path.exists(path): + os.mkdir(path) + return path + + +def debug_dump(name, code: types.CodeType, extra="") -> None: + with open(os.path.join(debug_dir(), name), "w") as fd: + fd.write( + f"{dis.Bytecode(code).info()}\n\n{dis.Bytecode(code).dis()}\n\n{extra}\n" + ) + + +def debug_insert_nops( + frame, cache_size, hooks, _, *, skip: int = 0 +) -> Optional[GuardedCode]: + """used to debug jump updates""" + + def insert_nops(instructions, code_options): + instructions.insert(0, create_instruction("NOP")) + instructions.insert(0, create_instruction("NOP")) + + if is_generator(frame.f_code): + return None + + debug_checks(frame.f_code) + code = transform_code_object(frame.f_code, insert_nops) + graph = OutputGraph( + code_options={}, + compiler_fn=None, + root_tx=None, + export=False, + export_constraints=None, + frame_state={"_id": 0}, + # TODO: shouldn't this be f_locals/f_globals from frame? + local_scope=locals(), + global_scope=globals(), + f_code=frame.f_code, + ) + + return GuardedCode(code, CheckFunctionManager(graph).check_fn) + + +class CompileCounter: + def __init__(self): + self.frame_count = 0 + self.op_count = 0 + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + return gm.forward + + def clear(self): + self.frame_count = 0 + self.op_count = 0 + + +class CompileCounterWithBackend: + def __init__(self, backend): + self.frame_count = 0 + self.op_count = 0 + self.backend = backend + self.graphs = [] + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + from .backends.registry import lookup_backend + + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + self.graphs.append(gm) + return lookup_backend(self.backend)(gm, example_inputs) + + +# Equivalent to backend="eager", but also records graphs that +# we can assert on +class EagerAndRecordGraphs: + def __init__(self): + self.graphs = [] + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.graphs.append(gm) + return gm + + +def strip_comment(code) -> str: + code = str(code) + return re.sub(r"(?m)^ *#.*\n?", "", code) + + +def remove_trailing_space(code) -> str: + return "\n".join([line.rstrip() for line in code.split("\n")]) + + +def normalize_gm(gm_str) -> str: + # strip comments as comments have path to files which may differ from + # system to system. + return remove_trailing_space(strip_comment(gm_str)) + + +def standard_test( + self, + fn, + nargs, + expected_ops=None, + expected_ops_dynamic=None, + expected_frame_count=1, +): + if not config.assume_static_by_default and expected_ops_dynamic is not None: + expected_ops = expected_ops_dynamic + + actual = CompileCounter() + + args1 = [torch.randn(10, 10) for _ in range(nargs)] + args2 = [torch.randn(10, 10) for _ in range(nargs)] + correct1 = fn(*args1) + correct2 = fn(*args2) + reset() + opt_fn = optimize_assert(actual)(fn) + val1a = opt_fn(*args1) + val2a = opt_fn(*args2) + val1b = opt_fn(*args1) + val2b = opt_fn(*args2) + reset() + self.assertTrue(same(val1a, correct1)) + self.assertTrue(same(val1b, correct1)) + self.assertTrue(same(val2a, correct2)) + self.assertTrue(same(val2b, correct2)) + self.assertEqual(actual.frame_count, expected_frame_count) + if expected_ops is not None: + self.assertEqual(actual.op_count, expected_ops) + + +def dummy_fx_compile(gm: fx.GraphModule, example_inputs): + return gm.forward + + +def format_speedup(speedup, pvalue, is_correct=True, pvalue_threshold=0.1): + if not is_correct: + return "ERROR" + if pvalue > pvalue_threshold: + return f"{speedup:.3f}x SAME" + return f"{speedup:.3f}x p={pvalue:.2f}" + + +def rand_strided( + size: Sequence[int], + stride: Sequence[int], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + extra_size: int = 0, +): + needed_size = ( + sum((shape - 1) * stride for shape, stride in zip(size, stride)) + + 1 + + extra_size + ) + if dtype.is_floating_point: + buffer = torch.randn(needed_size, dtype=dtype, device=device) + else: + buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device) + return torch.as_strided(buffer, size, stride) + + +def _make_fn_with_patches(fn, *patches): + @functools.wraps(fn) + def _fn(*args, **kwargs): + with contextlib.ExitStack() as stack: + for module, attr, val in patches: + stack.enter_context(patch.object(module, attr, val)) + + return fn(*args, **kwargs) + + return _fn + + +def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=None): + DummyTestClass = type(f"{cls_prefix}{cls.__name__}", cls.__bases__, {}) + DummyTestClass.__qualname__ = DummyTestClass.__name__ + + for name in dir(cls): + if name.startswith("test_"): + fn = getattr(cls, name) + if not callable(fn): + setattr(DummyTestClass, name, getattr(cls, name)) + continue + new_name = f"{name}{fn_suffix}" + new_fn = _make_fn_with_patches(fn, *patches) + new_fn.__name__ = new_name + if xfail_prop is not None and hasattr(fn, xfail_prop): + new_fn = unittest.expectedFailure(new_fn) + setattr(DummyTestClass, new_name, new_fn) + # NB: Doesn't handle slots correctly, but whatever + elif not hasattr(DummyTestClass, name): + setattr(DummyTestClass, name, getattr(cls, name)) + + return DummyTestClass + + +# test Python 3.11+ specific features +def skipIfNotPy311(fn): + if sys.version_info >= (3, 11): + return fn + return unittest.skip(fn) + + +def xfailIfPy311(fn): + if sys.version_info >= (3, 11): + return unittest.expectedFailure(fn) + return fn + + +# Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py +# and test/dynamo/test_dynamic_shapes.py +def expectedFailureDynamic(fn): + fn._expected_failure_dynamic = True + return fn + + +# Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py +def expectedFailureCodegenDynamic(fn): + fn._expected_failure_codegen_dynamic = True + return fn + + +# Controls test generated in test/inductor/test_cpp_wrapper.py +def expectedFailureDynamicWrapper(fn): + fn._expected_failure_dynamic_wrapper = True + return fn + + +def reset_rng_state(use_xla=False): + torch.manual_seed(1337) + random.seed(1337) + if np: + np.random.seed(1337) + if use_xla: + import torch_xla.core.xla_model as xm + + xm.set_rng_state(1337, str(xm.xla_device())) diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py b/venv/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..6210160607cd2feb3b0ce0fff9181e37e4a68f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py @@ -0,0 +1,3460 @@ +import _collections_abc +import _weakrefset +import abc +import builtins +import collections +import contextlib +import copy +import copyreg +import dataclasses +import enum +import functools +import importlib +import inspect +import itertools +import linecache +import logging +import multiprocessing +import operator +import os +import posixpath +import random +import re +import selectors +import signal +import sys +import tempfile +import threading +import tokenize +import traceback +import types +import typing +import unittest +import weakref +from collections import defaultdict +from typing import Any, Callable, cast, Dict, List, Optional, Set, Union + +np: Optional[types.ModuleType] = None +try: + import numpy as np +except ModuleNotFoundError: + pass + +import torch +import torch._inductor.test_operators +import torch.distributed +import torch.utils._content_store +from ..utils import _config_module +from .utils import getfile, hashable, NP_SUPPORTED_MODULES, unwrap_if_wrapper + +from .variables import ( + BuiltinVariable, + FunctorchHigherOrderVariable, + NestedUserFunctionVariable, + SkipFunctionVariable, + TorchInGraphFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) + +from .variables.base import VariableTracker + + +""" +Map of function objects to their tracing rules (Dynamo variables). +* TorchInGraphFunctionVariable: The functions should be put into the FX graph or can be constant folded. E.g., + - torch.add: should be put into the FX graph. + - torch.is_floating_point: constant folded. +* SkipFunctionVariable: The objects should be skipped from tracing. +* UserFunctionVariable: The functions should be inlined. + +For developers: If you add/remove a torch level API, it may trigger failures from +test/dynamo/test_trace_rules.py:test_torch_name_rule_map_updated. To fix the failures: +If you are adding a new torch level API or Dynamo implementation: +* Add the name with the corresponding tracing rule to this map + if you are adding a new in graph function or Dynamo implementation for an existing function. +* Remove the object name from test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names if it's there. + +If you are removing an existing torch level API: +* Remove the entry represented the API from this map or test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names + depends on where it is. + + +""" +manual_torch_name_rule_map = { + "torch.onnx.is_in_onnx_export": TorchInGraphFunctionVariable, + "torch.onnx.operators.shape_as_tensor": TorchInGraphFunctionVariable, + "torch.overrides.is_tensor_like": TorchInGraphFunctionVariable, + "torch.jit.is_scripting": TorchInGraphFunctionVariable, + "torch.jit.is_tracing": TorchInGraphFunctionVariable, + "torch.jit.annotate": TorchInGraphFunctionVariable, + "torch.distributed.is_available": TorchInGraphFunctionVariable, + "torch.distributed.is_initialized": TorchInGraphFunctionVariable, + "torch.distributed.get_rank": TorchInGraphFunctionVariable, + "torch.distributed.get_world_size": TorchInGraphFunctionVariable, + "torch.distributed._tensor.api.DTensor#from_local": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_size_by_name": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._resolve_group_name_by_ranks_and_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d.get_process_group_ranks": TorchInGraphFunctionVariable, + "torch._utils.is_compiling": TorchInGraphFunctionVariable, + "torch.overrides.get_default_nowrap_functions": TorchInGraphFunctionVariable, + "torch.fx._symbolic_trace.is_fx_tracing": TorchInGraphFunctionVariable, + "torch._dynamo.external_utils.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_dynamo_compiling": TorchInGraphFunctionVariable, + "torch.autograd._profiler_enabled": SkipFunctionVariable, + # We graph break on RNG state setters or getters like + # `torch.get_rng_state` or `torch.set_rng_state`. These functions + # are not aten operations and therefore they are completely ignored + # by the AOT dispatcher. As a result, the AOT graph does not have + # these setter or getter functions, producing an incorrect graph + # when it comes to rng states. + "torch.default_generator#get_state": SkipFunctionVariable, + "torch._C.Generator#get_state": SkipFunctionVariable, + "torch.get_rng_state": SkipFunctionVariable, + "torch.cuda.get_rng_state": SkipFunctionVariable, + "torch.default_generator#set_state": SkipFunctionVariable, + "torch._C.Generator#set_state": SkipFunctionVariable, + "torch.set_rng_state": SkipFunctionVariable, + "torch.cuda.set_rng_state": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/107187 + "torch.manual_seed": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/93501 + "torch.nn.utils.rnn.pack_padded_sequence": SkipFunctionVariable, + "torch.nn.Parameter": TorchInGraphFunctionVariable, + "torch._nested_tensor_from_mask": SkipFunctionVariable, + "torch._nested_from_padded": SkipFunctionVariable, + # symbol operators implemented in Python + "torch.sym_not": TorchInGraphFunctionVariable, + "torch.sym_float": TorchInGraphFunctionVariable, + "torch.sym_int": TorchInGraphFunctionVariable, + "torch.sym_max": TorchInGraphFunctionVariable, + "torch.sym_min": TorchInGraphFunctionVariable, + "torch.sym_sqrt": TorchInGraphFunctionVariable, + "torch.sym_ite": TorchInGraphFunctionVariable, + "torch.Tensor#_make_wrapper_subclass": SkipFunctionVariable, + "torch.Tensor#__init__": SkipFunctionVariable, + "torch.cuda.set_device": SkipFunctionVariable, + "torch.cuda.current_device": SkipFunctionVariable, + "torch._C.autocast_decrement_nesting": SkipFunctionVariable, + "torch._C.autocast_increment_nesting": SkipFunctionVariable, + "torch.autograd.grad": SkipFunctionVariable, + "torch._C.clear_autocast_cache": SkipFunctionVariable, + "torch.distributions.constraints.is_dependent": SkipFunctionVariable, + "torch.jit.isinstance": SkipFunctionVariable, + "torch._C.set_anomaly_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cache_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_cpu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_enabled": SkipFunctionVariable, + "torch._C.set_autocast_gpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_xla_dtype": SkipFunctionVariable, + "torch._C.set_autocast_xla_enabled": SkipFunctionVariable, + "torch.resize_as_": SkipFunctionVariable, + "torch.resize_as_sparse_": SkipFunctionVariable, + "torch.get_default_device": TorchInGraphFunctionVariable, + # functorch/vmap + "torch._functorch.vmap._check_int_or_none": UserFunctionVariable, + "torch._functorch.vmap._check_out_dims_is_int_or_int_pytree": UserFunctionVariable, + "torch._functorch.vmap._check_randomness_arg": UserFunctionVariable, + "torch._functorch.vmap._chunked_vmap": UserFunctionVariable, + "torch._functorch.vmap._concat_chunked_outputs": UserFunctionVariable, + "torch._functorch.vmap._create_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._flat_vmap": UserFunctionVariable, + "torch._functorch.vmap._flatten_chunks_output": UserFunctionVariable, + "torch._functorch.vmap._get_chunked_inputs": UserFunctionVariable, + "torch._functorch.vmap._get_name": UserFunctionVariable, + "torch._functorch.vmap._maybe_remove_batch_dim": UserFunctionVariable, + "torch._functorch.vmap._num_outputs": UserFunctionVariable, + "torch._functorch.vmap._process_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap._validate_and_get_batch_size": UserFunctionVariable, + "torch._functorch.vmap.doesnt_support_saved_tensors_hooks": UserFunctionVariable, + "torch._functorch.vmap.get_chunk_sizes": UserFunctionVariable, + # lazy_load_decompositions uses a lock that is not supported yet in dynamo + # "torch._functorch.vmap.lazy_load_decompositions": UserFunctionVariable, + "torch._functorch.vmap.restore_vmap": UserFunctionVariable, + "torch._functorch.apis.vmap": UserFunctionVariable, + "torch._functorch.vmap.unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap.vmap_impl": FunctorchHigherOrderVariable, + "torch._functorch.vmap.wrap_batched": UserFunctionVariable, + # functorch/grad + "torch._functorch.eager_transforms.grad_impl": FunctorchHigherOrderVariable, + "torch._functorch.apis.grad_and_value": UserFunctionVariable, + "torch._functorch.eager_transforms._as_tuple": UserFunctionVariable, + "torch._functorch.eager_transforms._check_unique_non_empty": UserFunctionVariable, + "torch._functorch.eager_transforms._create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._slice_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._undo_create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnum": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_all_tensors": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_tensor_for_grad": UserFunctionVariable, + # functorch/jacrev + "torch._functorch.eager_transforms.jacrev": UserFunctionVariable, + "torch._functorch.eager_transforms.error_if_complex": UserFunctionVariable, + "torch._functorch.eager_transforms._chunked_standard_basis_for_": UserFunctionVariable, + "torch._functorch.eager_transforms._safe_zero_index": UserFunctionVariable, + # functorch/vjp + "torch._functorch.eager_transforms.vjp": UserFunctionVariable, + "torch._functorch.eager_transforms._vjp_with_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms.assert_non_empty_tensor_output": UserFunctionVariable, + "torch._constrain_as_size": UserFunctionVariable, + "torch._constrain_as_value": UserFunctionVariable, + "torch._tensor._convert": UserFunctionVariable, + "torch.jit._unwrap_optional": UserFunctionVariable, + "torch.backends.mha.get_fastpath_enabled": UserFunctionVariable, + "torch._C._functorch._add_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._remove_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._wrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch._unwrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch.is_batchedtensor": TorchInGraphFunctionVariable, + "torch._dynamo.mark_static": UserFunctionVariable, + "torch.fx.experimental.symbolic_shapes.guard_size_oblivious": TorchInGraphFunctionVariable, + "torch.cuda._get_device_properties": TorchInGraphFunctionVariable, + "torch.utils.hooks.BackwardHook": TorchInGraphFunctionVariable, + "torch.sparse_bsc_tensor": SkipFunctionVariable, + "torch.sparse_bsr_tensor": SkipFunctionVariable, + "torch.sparse_csc_tensor": SkipFunctionVariable, + "torch.sparse_csr_tensor": SkipFunctionVariable, + "torch.sparse_compressed_tensor": SkipFunctionVariable, + "torch._C._autograd._unsafe_set_version_counter": TorchInGraphFunctionVariable, +} + + +# In graph functions (including constant folding) that are C bindings +torch_c_binding_in_graph_functions = dict.fromkeys( + [ + "math.acos", + "math.acosh", + "math.asin", + "math.asinh", + "math.atan", + "math.atan2", + "math.atanh", + "math.ceil", + "math.comb", + "math.copysign", + "math.cos", + "math.cosh", + "math.degrees", + "math.dist", + "math.erf", + "math.erfc", + "math.exp", + "math.expm1", + "math.fabs", + "math.factorial", + "math.floor", + "math.fmod", + "math.frexp", + "math.fsum", + "math.gamma", + "math.gcd", + "math.hypot", + "math.isclose", + "math.isfinite", + "math.isinf", + "math.isnan", + "math.isqrt", + "math.ldexp", + "math.lgamma", + "math.log", + "math.log10", + "math.log1p", + "math.log2", + "math.modf", + "math.nextafter", + "math.perm", + "math.pow", + "math.prod", + "math.radians", + "math.remainder", + "math.sin", + "math.sinh", + "math.tan", + "math.tanh", + "math.trunc", + "math.ulp", + "torch._adaptive_avg_pool2d", + "torch._adaptive_avg_pool3d", + "torch._add_batch_dim", + "torch._add_relu_", + "torch._add_relu", + "torch._addmm_activation", + "torch._aminmax", + "torch._amp_foreach_non_finite_check_and_unscale_", + "torch._amp_update_scale_", + "torch._assert_async", + "torch._assert_tensor_metadata", + "torch._batch_norm_impl_index", + "torch._C._activate_cuda_trace", + "torch._C._add_cached_tensor", + "torch._C._add_docstr", + "torch._C._are_functorch_transforms_active", + "torch._C._autograd_init", + "torch._C._awaitable_nowait", + "torch._C._awaitable_wait", + "torch._C._awaitable", + "torch._C._backport_for_mobile_from_buffer_to_buffer", + "torch._C._backport_for_mobile_from_buffer", + "torch._C._backport_for_mobile_to_buffer", + "torch._C._backport_for_mobile", + "torch._C._broadcast_coalesced", + "torch._C._broadcast_out", + "torch._C._broadcast", + "torch._C._c10d_init", + "torch._C._calculate_package_version_based_on_upgraders", + "torch._C._can_use_flash_attention", + "torch._C._can_use_mem_efficient_attention", + "torch._C._check_onnx_proto", + "torch._C._check_sparse_tensor_invariants", + "torch._C._collect_all", + "torch._C._commit_update", + "torch._C._compile_graph_to_code_table", + "torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata", + "torch._C._construct_storage_from_data_pointer", + "torch._C._conv_determine_backend_memory_format", + "torch._C._cpu._is_cpu_support_vnni", + "torch._C._crash_if_aten_asan", + "torch._C._crash_if_csrc_asan", + "torch._C._crash_if_csrc_ubsan", + "torch._C._crash_if_debug_asserts_fail", + "torch._C._crash_if_vptr_ubsan", + "torch._C._create_function_from_graph", + "torch._C._create_function_from_trace_with_dict", + "torch._C._create_function_from_trace", + "torch._C._create_graph_by_tracing", + "torch._C._create_module_with_type", + "torch._C._create_object_with_type", + "torch._C._cuda_attach_out_of_memory_observer", + "torch._C._cuda_beginAllocateCurrentStreamToPool", + "torch._C._cuda_canDeviceAccessPeer", + "torch._C._cuda_changeCurrentAllocator", + "torch._C._cuda_checkPoolLiveAllocations", + "torch._C._cuda_clearCublasWorkspaces", + "torch._C._cuda_cudaCachingAllocator_raw_alloc", + "torch._C._cuda_cudaCachingAllocator_raw_delete", + "torch._C._cuda_cudaCachingAllocator_set_allocator_settings", + "torch._C._cuda_cudaHostAllocator", + "torch._C._cuda_customAllocator", + "torch._C._cuda_emptyCache", + "torch._C._cuda_endAllocateCurrentStreamToPool", + "torch._C._cuda_exchangeDevice", + "torch._C._cuda_get_conv_benchmark_empty_cache", + "torch._C._cuda_get_cudnn_benchmark_limit", + "torch._C._cuda_get_sync_debug_mode", + "torch._C._cuda_getAllocator", + "torch._C._cuda_getAllocatorBackend", + "torch._C._cuda_getArchFlags", + "torch._C._cuda_getCheckpointState", + "torch._C._cuda_getCompiledVersion", + "torch._C._cuda_getCurrentBlasHandle", + "torch._C._cuda_getCurrentRawStream", + "torch._C._cuda_getCurrentStream", + "torch._C._cuda_getDefaultStream", + "torch._C._cuda_getDevice", + "torch._C._cuda_getDeviceCount", + "torch._C._cuda_hasPrimaryContext", + "torch._C._cuda_init", + "torch._C._cuda_ipc_collect", + "torch._C._cuda_isCurrentStreamCapturing", + "torch._C._cuda_isHistoryEnabled", + "torch._C._cuda_isInBadFork", + "torch._C._cuda_jiterator_compile_and_launch_kernel", + "torch._C._cuda_lock_mutex", + "torch._C._cuda_maybeExchangeDevice", + "torch._C._cuda_memorySnapshot", + "torch._C._cuda_memoryStats", + "torch._C._cuda_record_memory_history_legacy", + "torch._C._cuda_record_memory_history", + "torch._C._cuda_releasePool", + "torch._C._cuda_resetAccumulatedMemoryStats", + "torch._C._cuda_resetPeakMemoryStats", + "torch._C._cuda_set_cudnn_benchmark_limit", + "torch._C._cuda_set_sync_debug_mode", + "torch._C._cuda_setCheckpointPoolState", + "torch._C._cuda_setDevice", + "torch._C._cuda_setMemoryFraction", + "torch._C._cuda_setStream", + "torch._C._cuda_sleep", + "torch._C._cuda_synchronize", + "torch._C._cuda_unlock_mutex", + "torch._C._cudnn_set_conv_benchmark_empty_cache", + "torch._C._cudnn.getCompileVersion", + "torch._C._cudnn.getRuntimeVersion", + "torch._C._cudnn.getVersionInt", + "torch._C._current_autograd_node", + "torch._C._current_graph_task_execution_order", + "torch._C._current_graph_task_id", + "torch._C._cxx_flags", + "torch._C._debug_get_fusion_group_inlining", + "torch._C._debug_only_are_vmap_fallback_warnings_enabled", + "torch._C._debug_only_display_vmap_fallback_warnings", + "torch._C._debug_set_autodiff_subgraph_inlining", + "torch._C._debug_set_fusion_group_inlining", + "torch._C._demangle", + "torch._C._disabled_torch_dispatch_impl", + "torch._C._disabled_torch_function_impl", + "torch._C._dispatch_call_boxed", + "torch._C._dispatch_check_all_invariants", + "torch._C._dispatch_check_invariants", + "torch._C._dispatch_dump_table", + "torch._C._dispatch_dump", + "torch._C._dispatch_find_dangling_impls", + "torch._C._dispatch_find_schema_or_throw", + "torch._C._dispatch_get_all_op_names", + "torch._C._dispatch_get_backend_keyset_from_autograd", + "torch._C._dispatch_get_registrations_for_dispatch_key", + "torch._C._dispatch_has_backend_fallback", + "torch._C._dispatch_has_computed_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel_for_any_dispatch_key", + "torch._C._dispatch_has_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel", + "torch._C._dispatch_is_alias_key", + "torch._C._dispatch_is_included_in_alias", + "torch._C._dispatch_is_main_interpreter", + "torch._C._dispatch_isTensorSubclassLike", + "torch._C._dispatch_key_for_device", + "torch._C._dispatch_key_name", + "torch._C._dispatch_key_parse", + "torch._C._dispatch_key_set", + "torch._C._dispatch_keys", + "torch._C._dispatch_keyset_full_after", + "torch._C._dispatch_keyset_full", + "torch._C._dispatch_keyset_to_string", + "torch._C._dispatch_library", + "torch._C._dispatch_num_backends", + "torch._C._dispatch_print_registrations_for_dispatch_key", + "torch._C._dispatch_pystub", + "torch._C._dispatch_set_report_error_callback", + "torch._C._dispatch_tls_is_dispatch_key_excluded", + "torch._C._dispatch_tls_is_dispatch_key_included", + "torch._C._dispatch_tls_local_exclude_set", + "torch._C._dispatch_tls_local_include_set", + "torch._C._dispatch_tls_set_dispatch_key_excluded", + "torch._C._dispatch_tls_set_dispatch_key_included", + "torch._C._dist_autograd_init", + "torch._C._dump_local_tls_set", + "torch._C._dump_upgraders_map", + "torch._C._enable_mobile_interface_call_export", + "torch._C._enter_dual_level", + "torch._C._error_if_any_worker_fails", + "torch._C._exit_dual_level", + "torch._C._export_operator_list", + "torch._C._export_opnames", + "torch._C._faulty_agent_init", + "torch._C._fft.fft_fft", + "torch._C._fft.fft_fft2", + "torch._C._fft.fft_fftfreq", + "torch._C._fft.fft_fftn", + "torch._C._fft.fft_fftshift", + "torch._C._fft.fft_hfft", + "torch._C._fft.fft_hfft2", + "torch._C._fft.fft_hfftn", + "torch._C._fft.fft_ifft", + "torch._C._fft.fft_ifft2", + "torch._C._fft.fft_ifftn", + "torch._C._fft.fft_ifftshift", + "torch._C._fft.fft_ihfft", + "torch._C._fft.fft_ihfft2", + "torch._C._fft.fft_ihfftn", + "torch._C._fft.fft_irfft", + "torch._C._fft.fft_irfft2", + "torch._C._fft.fft_irfftn", + "torch._C._fft.fft_rfft", + "torch._C._fft.fft_rfft2", + "torch._C._fft.fft_rfftfreq", + "torch._C._fft.fft_rfftn", + "torch._C._free_And_Remove_DeleterFn", + "torch._C._freeze_module", + "torch._C._from_dlpack", + "torch._C._functionality_to_backend_keys", + "torch._C._functionalization_reapply_views_tls", + "torch._C._fuse_to_static_module", + "torch._C._gather_out", + "torch._C._gather", + "torch._C._generate_upgraders_graph", + "torch._C._get_autograd_fallback_mode", + "torch._C._get_backcompat_broadcast_warn", + "torch._C._get_backcompat_keepdim_warn", + "torch._C._get_caught_jit_exception_class_name", + "torch._C._get_caught_jit_exception_original_msg", + "torch._C._get_constant_bool_symnode", + "torch._C._get_cpp_backtrace", + "torch._C._get_cpu_capability", + "torch._C._get_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._get_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._get_cublas_allow_tf32", + "torch._C._get_cudnn_allow_tf32", + "torch._C._get_cudnn_benchmark", + "torch._C._get_cudnn_deterministic", + "torch._C._get_cudnn_enabled", + "torch._C._get_custom_class_python_wrapper", + "torch._C._get_default_device", + "torch._C._get_deterministic_algorithms_warn_only", + "torch._C._get_deterministic_algorithms", + "torch._C._get_deterministic_fill_uninitialized_memory", + "torch._C._get_dispatch_mode", + "torch._C._get_dispatch_stack_at", + "torch._C._get_file_format", + "torch._C._get_flash_sdp_enabled", + "torch._C._get_float32_matmul_precision", + "torch._C._get_function_stack_at", + "torch._C._get_graph_executor_optimize", + "torch._C._get_linalg_preferred_backend", + "torch._C._get_math_sdp_enabled", + "torch._C._get_max_operator_version", + "torch._C._get_mem_efficient_sdp_enabled", + "torch._C._get_mkldnn_enabled", + "torch._C._get_cudnn_sdp_enabled", + "torch._C._set_sdp_use_cudnn", + "torch._C._get_mobile_model_contained_types_from_buffer", + "torch._C._get_mobile_model_contained_types", + "torch._C._get_model_bytecode_version_from_buffer", + "torch._C._get_model_bytecode_version", + "torch._C._get_model_extra_files_from_buffer", + "torch._C._get_model_extra_files", + "torch._C._get_model_ops_and_info_from_buffer", + "torch._C._get_model_ops_and_info", + "torch._C._get_module_info_from_flatbuffer", + "torch._C._get_nnpack_enabled", + "torch._C._get_obj_in_tls", + "torch._C._get_operation_overload", + "torch._C._get_operator_version_map", + "torch._C._get_privateuse1_backend_name", + "torch._C._get_qengine", + "torch._C._get_schema", + "torch._C._get_nested_int", + "torch._C._get_tensor_metadata", + "torch._C._get_tracing_state", + "torch._C._get_upgrader_ranges", + "torch._C._get_upgraders_entry_map", + "torch._C._get_upgraders_map_size", + "torch._C._get_value_trace", + "torch._C._get_version_calculator_flag", + "torch._C._get_warnAlways", + "torch._C._graph_pool_handle", + "torch._C._group_tensors_by_device_and_dtype", + "torch._C._hack_do_not_use_clone_module_with_class", + "torch._C._has_distributed", + "torch._C._has_Standard_Deleter", + "torch._C._has_storage", + "torch._C._has_tensorexpr_cpp_tests", + "torch._C._run_tensorexpr_cpp_tests", + "torch._C._has_torch_function_unary", + "torch._C._has_torch_function_variadic", + "torch._C._has_torch_function", + "torch._C._import_ir_module_from_package", + "torch._C._increment_version", + "torch._C._infer_size", + "torch._C._init_names", + "torch._C._initExtension", + "torch._C._is_alias_of", + "torch._C._is_any_autocast_enabled", + "torch._C._is_cached_tensor", + "torch._C._is_fwd_grad_enabled", + "torch._C._is_key_in_tls", + "torch._C._is_multithreading_enabled", + "torch._C._is_torch_function_enabled", + "torch._C._is_torch_function_mode_enabled", + "torch._C._is_tracing", + "torch._C._is_view_replay_enabled", + "torch._C._is_xnnpack_enabled", + "torch._C._itt.is_available", + "torch._C._itt.mark", + "torch._C._itt.rangePop", + "torch._C._itt.rangePush", + "torch._C._ivalue_debug_python_object", + "torch._C._ivalue_tags_match", + "torch._C._jit_assert_is_instance", + "torch._C._jit_can_fuse_on_cpu_legacy", + "torch._C._jit_can_fuse_on_cpu", + "torch._C._jit_can_fuse_on_gpu", + "torch._C._jit_cat_wo_conditionals", + "torch._C._jit_check_alias_annotation", + "torch._C._jit_clear_class_registry", + "torch._C._jit_debug_fuser_num_cached_kernel_specs", + "torch._C._jit_debug_module_iterators", + "torch._C._jit_decay_packed_param_input_types", + "torch._C._jit_decomposition_graph_for_node", + "torch._C._jit_differentiate", + "torch._C._jit_erase_non_input_shape_information", + "torch._C._jit_flatten", + "torch._C._jit_fuser_get_fused_kernel_code", + "torch._C._jit_get_all_schemas", + "torch._C._jit_get_custom_class_schemas", + "torch._C._jit_get_emit_hooks", + "torch._C._jit_get_inline_everything_mode", + "torch._C._jit_get_logging_option", + "torch._C._jit_get_num_profiled_runs", + "torch._C._jit_get_operation", + "torch._C._jit_get_schemas_for_operator", + "torch._C._jit_get_te_cuda_pointwise_block_count", + "torch._C._jit_get_te_cuda_pointwise_block_size", + "torch._C._jit_get_te_cuda_pointwise_loop_levels", + "torch._C._jit_get_te_generate_block_code", + "torch._C._jit_get_te_must_use_llvm_cpu", + "torch._C._jit_get_tracer_state_warn", + "torch._C._jit_has_cpp_tests", + "torch._C._jit_init", + "torch._C._jit_interpret_graph", + "torch._C._jit_is_onnx_log_enabled", + "torch._C._jit_is_script_object", + "torch._C._jit_llga_enabled", + "torch._C._jit_nvfuser_can_be_enabled", + "torch._C._jit_nvfuser_clear_comparison_callback", + "torch._C._jit_nvfuser_enabled", + "torch._C._jit_nvfuser_horizontal_mode", + "torch._C._jit_nvfuser_set_comparison_callback", + "torch._C._jit_nvfuser_single_node_mode", + "torch._C._jit_object_is_non_holding", + "torch._C._jit_onnx_convert_pattern_from_subblock", + "torch._C._jit_onnx_create_full_scope_name", + "torch._C._jit_onnx_list_model_parameters", + "torch._C._jit_onnx_log", + "torch._C._jit_opt_conditionals", + "torch._C._jit_override_can_fuse_on_cpu_legacy", + "torch._C._jit_override_can_fuse_on_cpu", + "torch._C._jit_override_can_fuse_on_gpu", + "torch._C._jit_pass_autocast", + "torch._C._jit_pass_batch_mm", + "torch._C._jit_pass_canonicalize_graph_fuser_ops", + "torch._C._jit_pass_canonicalize", + "torch._C._jit_pass_complete_shape_analysis", + "torch._C._jit_pass_concat_frozen_linear", + "torch._C._jit_pass_constant_loop_unrolling", + "torch._C._jit_pass_constant_pooling", + "torch._C._jit_pass_constant_propagation_immutable_types", + "torch._C._jit_pass_constant_propagation", + "torch._C._jit_pass_convert_frozen_ops_to_mkldnn", + "torch._C._jit_pass_create_autodiff_subgraphs", + "torch._C._jit_pass_create_functional_graphs", + "torch._C._jit_pass_cse", + "torch._C._jit_pass_custom_pattern_based_rewrite_graph", + "torch._C._jit_pass_custom_pattern_based_rewrite", + "torch._C._jit_pass_dbr_quant_remove_redundant_aliases", + "torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects", + "torch._C._jit_pass_dce", + "torch._C._jit_pass_decompose_ops", + "torch._C._jit_pass_dedup_module_uses", + "torch._C._jit_pass_erase_number_types", + "torch._C._jit_pass_erase_shape_information", + "torch._C._jit_pass_filter_non_tensor_arguments", + "torch._C._jit_pass_fixup_onnx_controlflow_node", + "torch._C._jit_pass_fold_convbn", + "torch._C._jit_pass_fold_frozen_conv_add_or_sub", + "torch._C._jit_pass_fold_frozen_conv_bn", + "torch._C._jit_pass_fold_frozen_conv_mul_or_div", + "torch._C._jit_pass_fold_frozen_linear_bn", + "torch._C._jit_pass_fold_prepacking_ops", + "torch._C._jit_pass_functional_to_inplace_activation", + "torch._C._jit_pass_fuse_add_relu", + "torch._C._jit_pass_fuse_addmm", + "torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv", + "torch._C._jit_pass_fuse_frozen_conv_add_relu", + "torch._C._jit_pass_fuse_linear", + "torch._C._jit_pass_fuse_quantized_add_relu", + "torch._C._jit_pass_fuse_tensorexprs", + "torch._C._jit_pass_fuse", + "torch._C._jit_pass_inline_fork_wait", + "torch._C._jit_pass_inline_functional_graphs", + "torch._C._jit_pass_inline", + "torch._C._jit_pass_inplace_to_functional_activation", + "torch._C._jit_pass_insert_observer_method_for_ondevice_ptq", + "torch._C._jit_pass_insert_observers", + "torch._C._jit_pass_insert_prepack_unpack", + "torch._C._jit_pass_insert_prepacked_ops", + "torch._C._jit_pass_insert_quant_dequant_for_ondevice_ptq", + "torch._C._jit_pass_insert_quant_dequant", + "torch._C._jit_pass_integer_value_refinement", + "torch._C._jit_pass_lint", + "torch._C._jit_pass_loop_unrolling", + "torch._C._jit_pass_lower_all_tuples", + "torch._C._jit_pass_lower_graph", + "torch._C._jit_pass_metal_fold_prepacking_ops", + "torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_metal_insert_prepacked_ops", + "torch._C._jit_pass_metal_optimize_for_mobile", + "torch._C._jit_pass_onnx_assign_output_shape", + "torch._C._jit_pass_onnx_assign_scoped_names_for_node_and_value", + "torch._C._jit_pass_onnx_autograd_function_process", + "torch._C._jit_pass_onnx_block", + "torch._C._jit_pass_onnx_cast_all_constant_to_floating", + "torch._C._jit_pass_onnx_clear_scope_records", + "torch._C._jit_pass_onnx_constant_fold", + "torch._C._jit_pass_onnx_deduplicate_initializers", + "torch._C._jit_pass_onnx_eliminate_unused_items", + "torch._C._jit_pass_onnx_eval_peephole", + "torch._C._jit_pass_onnx_function_extraction", + "torch._C._jit_pass_onnx_function_substitution", + "torch._C._jit_pass_onnx_graph_shape_type_inference", + "torch._C._jit_pass_onnx_lint", + "torch._C._jit_pass_onnx_node_shape_type_inference", + "torch._C._jit_pass_onnx_peephole", + "torch._C._jit_pass_onnx_preprocess_caffe2", + "torch._C._jit_pass_onnx_preprocess", + "torch._C._jit_pass_onnx_quantization_insert_permutes", + "torch._C._jit_pass_onnx_remove_inplace_ops_for_onnx", + "torch._C._jit_pass_onnx_remove_print", + "torch._C._jit_pass_onnx_scalar_type_analysis", + "torch._C._jit_pass_onnx_set_dynamic_input_shape", + "torch._C._jit_pass_onnx_track_scope_attributes", + "torch._C._jit_pass_onnx_unpack_quantized_weights", + "torch._C._jit_pass_onnx", + "torch._C._jit_pass_optimize_for_inference", + "torch._C._jit_pass_optimize_for_mobile", + "torch._C._jit_pass_optimize_frozen_graph", + "torch._C._jit_pass_pattern_based_rewrite", + "torch._C._jit_pass_peephole_list_idioms", + "torch._C._jit_pass_peephole", + "torch._C._jit_pass_prepare_division_for_onnx", + "torch._C._jit_pass_propagate_device", + "torch._C._jit_pass_propagate_dtype", + "torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute", + "torch._C._jit_pass_propagate_shapes_on_graph", + "torch._C._jit_pass_quant_finalize_for_ondevice_ptq", + "torch._C._jit_pass_quant_finalize", + "torch._C._jit_pass_quant_fusion", + "torch._C._jit_pass_refine_integer_values", + "torch._C._jit_pass_refine_tuple_types", + "torch._C._jit_pass_remove_dropout", + "torch._C._jit_pass_remove_expands", + "torch._C._jit_pass_remove_inplace_ops", + "torch._C._jit_pass_remove_mutation", + "torch._C._jit_pass_replace_old_ops_with_upgraders", + "torch._C._jit_pass_replicate_dequantize", + "torch._C._jit_pass_run_decompositions", + "torch._C._jit_pass_specialize_autogradzero", + "torch._C._jit_pass_swap_functional_linear", + "torch._C._jit_pass_transform_conv1d_to_conv2d", + "torch._C._jit_pass_transpose_frozen_linear", + "torch._C._jit_pass_vulkan_fold_prepacking_ops", + "torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_vulkan_insert_prepacked_ops", + "torch._C._jit_pass_vulkan_optimize_for_mobile", + "torch._C._jit_register_decomposition_for_schema", + "torch._C._jit_register_shape_compute_graph_for_node", + "torch._C._jit_resolve_packet", + "torch._C._jit_run_cpp_tests", + "torch._C._jit_script_class_compile", + "torch._C._jit_script_compile_overload", + "torch._C._jit_script_compile", + "torch._C._jit_script_interface_compile", + "torch._C._jit_set_autocast_mode", + "torch._C._jit_set_bailout_depth", + "torch._C._jit_set_emit_hooks", + "torch._C._jit_set_fusion_strategy", + "torch._C._jit_set_inline_everything_mode", + "torch._C._jit_set_llga_enabled", + "torch._C._jit_set_logging_option", + "torch._C._jit_set_logging_stream", + "torch._C._jit_set_num_profiled_runs", + "torch._C._jit_set_nvfuser_enabled", + "torch._C._jit_set_nvfuser_guard_mode", + "torch._C._jit_set_nvfuser_horizontal_mode", + "torch._C._jit_set_nvfuser_single_node_mode", + "torch._C._jit_set_nvfuser_skip_node_kind", + "torch._C._jit_set_onnx_log_enabled", + "torch._C._jit_set_onnx_log_output_stream", + "torch._C._jit_set_profiling_executor", + "torch._C._jit_set_profiling_mode", + "torch._C._jit_set_symbolic_shapes_test_mode", + "torch._C._jit_set_te_cuda_pointwise_block_count", + "torch._C._jit_set_te_cuda_pointwise_block_size", + "torch._C._jit_set_te_cuda_pointwise_loop_levels", + "torch._C._jit_set_te_generate_block_code", + "torch._C._jit_set_te_must_use_llvm_cpu", + "torch._C._jit_set_texpr_dynamic_shape_enabled", + "torch._C._jit_set_texpr_fuser_enabled", + "torch._C._jit_set_texpr_reductions_enabled", + "torch._C._jit_set_tracer_state_warn", + "torch._C._jit_set_utf8_decoding_ignore", + "torch._C._jit_shape_compute_graph_for_node", + "torch._C._jit_symbolic_shapes_test_mode_enabled", + "torch._C._jit_texpr_dynamic_shape_enabled", + "torch._C._jit_texpr_fallback_allowed", + "torch._C._jit_texpr_fuser_enabled", + "torch._C._jit_texpr_reductions_enabled", + "torch._C._jit_texpr_set_fallback_allowed", + "torch._C._jit_to_backend_selective", + "torch._C._jit_to_backend", + "torch._C._jit_to_static_module", + "torch._C._jit_trace_graph", + "torch._C._jit_trace_module", + "torch._C._jit_tree_views.FalseLiteral", + "torch._C._jit_tree_views.NoneLiteral", + "torch._C._jit_tree_views.TrueLiteral", + "torch._C._jit_try_infer_type", + "torch._C._jit_unflatten", + "torch._C._last_executed_optimized_graph", + "torch._C._len_torch_dispatch_stack", + "torch._C._len_torch_function_stack", + "torch._C._linalg._linalg_eigvals", + "torch._C._linalg.linalg_cholesky_ex", + "torch._C._linalg.linalg_cholesky", + "torch._C._linalg.linalg_cond", + "torch._C._linalg.linalg_cross", + "torch._C._linalg.linalg_det", + "torch._C._linalg.linalg_diagonal", + "torch._C._linalg.linalg_eig", + "torch._C._linalg.linalg_eigh", + "torch._C._linalg.linalg_eigvals", + "torch._C._linalg.linalg_eigvalsh", + "torch._C._linalg.linalg_householder_product", + "torch._C._linalg.linalg_inv_ex", + "torch._C._linalg.linalg_inv", + "torch._C._linalg.linalg_ldl_factor_ex", + "torch._C._linalg.linalg_ldl_factor", + "torch._C._linalg.linalg_ldl_solve", + "torch._C._linalg.linalg_lstsq", + "torch._C._linalg.linalg_lu_factor_ex", + "torch._C._linalg.linalg_lu_factor", + "torch._C._linalg.linalg_lu_solve", + "torch._C._linalg.linalg_lu", + "torch._C._linalg.linalg_matmul", + "torch._C._linalg.linalg_matrix_exp", + "torch._C._linalg.linalg_matrix_norm", + "torch._C._linalg.linalg_matrix_power", + "torch._C._linalg.linalg_matrix_rank", + "torch._C._linalg.linalg_multi_dot", + "torch._C._linalg.linalg_norm", + "torch._C._linalg.linalg_pinv", + "torch._C._linalg.linalg_qr", + "torch._C._linalg.linalg_slogdet", + "torch._C._linalg.linalg_solve_ex", + "torch._C._linalg.linalg_solve_triangular", + "torch._C._linalg.linalg_solve", + "torch._C._linalg.linalg_svd", + "torch._C._linalg.linalg_svdvals", + "torch._C._linalg.linalg_tensorinv", + "torch._C._linalg.linalg_tensorsolve", + "torch._C._linalg.linalg_vander", + "torch._C._linalg.linalg_vecdot", + "torch._C._linalg.linalg_vector_norm", + "torch._C._llvm_enabled", + "torch._C._load_for_lite_interpreter_from_buffer", + "torch._C._load_for_lite_interpreter", + "torch._C._load_jit_module_from_bytes", + "torch._C._load_jit_module_from_file", + "torch._C._load_mobile_module_from_bytes", + "torch._C._load_mobile_module_from_file", + "torch._C._log_api_usage_metadata", + "torch._C._log_api_usage_once", + "torch._C._logging_set_logger", + "torch._C._meta_in_tls_dispatch_include", + "torch._C._mps_acquireEvent", + "torch._C._mps_currentAllocatedMemory", + "torch._C._mps_deviceSynchronize", + "torch._C._mps_driverAllocatedMemory", + "torch._C._mps_elapsedTimeOfEvents", + "torch._C._mps_emptyCache", + "torch._C._mps_get_default_generator", + "torch._C._mps_is_available", + "torch._C._mps_is_in_bad_fork", + "torch._C._mps_is_on_macos_13_or_newer", + "torch._C._mps_profilerStartTrace", + "torch._C._mps_profilerStopTrace", + "torch._C._mps_queryEvent", + "torch._C._mps_recordEvent", + "torch._C._mps_releaseEvent", + "torch._C._mps_setMemoryFraction", + "torch._C._mps_synchronizeEvent", + "torch._C._mps_waitForEvent", + "torch._C._multiprocessing_init", + "torch._C._nccl_all_gather", + "torch._C._nccl_all_reduce", + "torch._C._nccl_broadcast", + "torch._C._nccl_init_rank", + "torch._C._nccl_reduce_scatter", + "torch._C._nccl_reduce", + "torch._C._nccl_unique_id", + "torch._C._nccl_version_suffix", + "torch._C._nccl_version", + "torch._C._nested.nested_tensor", + "torch._C._nested.nested_to_padded_tensor", + "torch._C._new_symbolic_shape_symbol", + "torch._C._nn_module_to_mobile", + "torch._C._nn._conv_depthwise2d", + "torch._C._nn._pad_circular", + "torch._C._nn._pad_enum", + "torch._C._nn._parse_to", + "torch._C._nn._test_ambiguous_defaults", + "torch._C._nn._test_optional_filled_intlist", + "torch._C._nn._test_optional_floatlist", + "torch._C._nn._test_optional_intlist", + "torch._C._nn._test_string_default", + "torch._C._nn._test_warn_in_autograd", + "torch._C._nn._upsample_bicubic2d_aa", + "torch._C._nn._upsample_bilinear2d_aa", + "torch._C._nn._upsample_nearest_exact1d", + "torch._C._nn._upsample_nearest_exact2d", + "torch._C._nn._upsample_nearest_exact3d", + "torch._C._nn.adaptive_avg_pool2d", + "torch._C._nn.adaptive_avg_pool3d", + "torch._C._nn.adaptive_max_pool2d", + "torch._C._nn.adaptive_max_pool3d", + "torch._C._nn.avg_pool2d", + "torch._C._nn.avg_pool3d", + "torch._C._nn.binary_cross_entropy", + "torch._C._nn.col2im", + "torch._C._nn.conv_depthwise3d", + "torch._C._nn.cross_entropy_loss", + "torch._C._nn.elu_", + "torch._C._nn.elu", + "torch._C._nn.flatten_dense_tensors", + "torch._C._nn.fractional_max_pool2d", + "torch._C._nn.fractional_max_pool3d", + "torch._C._nn.gelu_", + "torch._C._nn.gelu", + "torch._C._nn.glu", + "torch._C._nn.hardsigmoid_", + "torch._C._nn.hardsigmoid", + "torch._C._nn.hardswish_", + "torch._C._nn.hardswish", + "torch._C._nn.hardtanh_", + "torch._C._nn.hardtanh", + "torch._C._nn.huber_loss", + "torch._C._nn.im2col", + "torch._C._nn.l1_loss", + "torch._C._nn.leaky_relu_", + "torch._C._nn.leaky_relu", + "torch._C._nn.linear", + "torch._C._nn.log_sigmoid", + "torch._C._nn.max_pool2d_with_indices", + "torch._C._nn.max_pool3d_with_indices", + "torch._C._nn.max_unpool2d", + "torch._C._nn.max_unpool3d", + "torch._C._nn.mish_", + "torch._C._nn.mish", + "torch._C._nn.mkldnn_linear", + "torch._C._nn.mkldnn_reorder_conv2d_weight", + "torch._C._nn.mkldnn_reorder_conv3d_weight", + "torch._C._nn.mse_loss", + "torch._C._nn.multi_margin_loss", + "torch._C._nn.multilabel_margin_loss", + "torch._C._nn.nll_loss_nd", + "torch._C._nn.nll_loss", + "torch._C._nn.nll_loss2d", + "torch._C._nn.one_hot", + "torch._C._nn.pad_sequence", + "torch._C._nn.pad", + "torch._C._nn.reflection_pad1d", + "torch._C._nn.reflection_pad2d", + "torch._C._nn.reflection_pad3d", + "torch._C._nn.relu6_", + "torch._C._nn.relu6", + "torch._C._nn.replication_pad1d", + "torch._C._nn.replication_pad2d", + "torch._C._nn.replication_pad3d", + "torch._C._nn.rrelu_with_noise_", + "torch._C._nn.rrelu_with_noise", + "torch._C._nn.scaled_dot_product_attention", + "torch._C._nn.silu_", + "torch._C._nn.silu", + "torch._C._nn.slow_conv_dilated2d", + "torch._C._nn.slow_conv_dilated3d", + "torch._C._nn.slow_conv_transpose2d", + "torch._C._nn.slow_conv_transpose3d", + "torch._C._nn.slow_conv3d", + "torch._C._nn.smooth_l1_loss", + "torch._C._nn.soft_margin_loss", + "torch._C._nn.softplus", + "torch._C._nn.softshrink", + "torch._C._nn.thnn_conv2d", + "torch._C._nn.unflatten_dense_tensors", + "torch._C._nn.upsample_bicubic2d", + "torch._C._nn.upsample_bilinear2d", + "torch._C._nn.upsample_linear1d", + "torch._C._nn.upsample_nearest1d", + "torch._C._nn.upsample_nearest2d", + "torch._C._nn.upsample_nearest3d", + "torch._C._nn.upsample_trilinear3d", + "torch._C._non_sym_sizes", + "torch._C._overlaps", + "torch._C._parallel_info", + "torch._C._parse_dispatch_key", + "torch._C._parse_source_def", + "torch._C._pop_torch_dispatch_stack", + "torch._C._pop_torch_function_stack", + "torch._C._propagate_and_assign_input_shapes", + "torch._C._propagate_shapes", + "torch._C._propagate_xla_data", + "torch._C._push_on_torch_dispatch_stack", + "torch._C._push_on_torch_function_stack", + "torch._C._quantize_ondevice_ptq_dynamic", + "torch._C._register_py_class_for_device", + "torch._C._remove_cached_tensor", + "torch._C._remove_worker_pids", + "torch._C._rename_privateuse1_backend", + "torch._C._replace_", + "torch._C._replace_overloaded_method_decl", + "torch._C._resolve_type_from_object", + "torch._C._resolve_type", + "torch._C._rocm_is_backward_pass", + "torch._C._rpc_init", + "torch._C._run_emit_module_hook", + "torch._C._save_jit_module_to_bytes", + "torch._C._save_jit_module", + "torch._C._save_mobile_module_to_bytes", + "torch._C._save_mobile_module", + "torch._C._save_parameters", + "torch._C._scatter_out", + "torch._C._scatter", + "torch._C._select_conv_backend", + "torch._C._set_autograd_fallback_mode", + "torch._C._set_backcompat_broadcast_warn", + "torch._C._set_backcompat_keepdim_warn", + "torch._C._set_cached_tensors_enabled", + "torch._C._set_check_sparse_tensor_invariants", + "torch._C._set_conj", + "torch._C._set_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._set_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._set_cublas_allow_tf32", + "torch._C._set_cudnn_allow_tf32", + "torch._C._set_cudnn_benchmark", + "torch._C._set_cudnn_deterministic", + "torch._C._set_cudnn_enabled", + "torch._C._set_default_dtype", + "torch._C._set_default_mobile_cpu_allocator", + "torch._C._set_default_tensor_type", + "torch._C._set_deterministic_algorithms", + "torch._C._set_deterministic_fill_uninitialized_memory", + "torch._C._set_dispatch_mode", + "torch._C._set_float32_matmul_precision", + "torch._C._set_fwd_grad_enabled", + "torch._C._set_grad_enabled", + "torch._C._set_graph_executor_optimize", + "torch._C._set_linalg_preferred_backend", + "torch._C._set_meta_in_tls_dispatch_include", + "torch._C._set_mkldnn_enabled", + "torch._C._set_multithreading_enabled", + "torch._C._set_neg", + "torch._C._set_nnpack_enabled", + "torch._C._set_print_stack_traces_on_fatal_signal", + "torch._C._set_qengine", + "torch._C._set_sdp_use_flash", + "torch._C._set_sdp_use_math", + "torch._C._set_sdp_use_mem_efficient", + "torch._C._set_should_use_format_with_string_table", + "torch._C._set_storage_access_error_msg", + "torch._C._set_tensor_metadata", + "torch._C._set_tracing_state", + "torch._C._set_value_trace", + "torch._C._set_view_replay_enabled", + "torch._C._set_warnAlways", + "torch._C._set_worker_pids", + "torch._C._set_worker_signal_handlers", + "torch._C._should_allow_numbers_as_tensors", + "torch._C._show_config", + "torch._C._sparse._sparse_addmm", + "torch._C._sparse._sparse_log_softmax", + "torch._C._sparse._sparse_mm_reduce_impl", + "torch._C._sparse._sparse_mm", + "torch._C._sparse._sparse_softmax", + "torch._C._sparse._spdiags", + "torch._C._sparse.sparse_sampled_addmm", + "torch._C._special.special_airy_ai", + "torch._C._special.special_bessel_j0", + "torch._C._special.special_bessel_j1", + "torch._C._special.special_bessel_y0", + "torch._C._special.special_bessel_y1", + "torch._C._special.special_chebyshev_polynomial_t", + "torch._C._special.special_chebyshev_polynomial_u", + "torch._C._special.special_chebyshev_polynomial_v", + "torch._C._special.special_chebyshev_polynomial_w", + "torch._C._special.special_digamma", + "torch._C._special.special_entr", + "torch._C._special.special_erf", + "torch._C._special.special_erfc", + "torch._C._special.special_erfcx", + "torch._C._special.special_erfinv", + "torch._C._special.special_exp2", + "torch._C._special.special_expit", + "torch._C._special.special_expm1", + "torch._C._special.special_gammainc", + "torch._C._special.special_gammaincc", + "torch._C._special.special_gammaln", + "torch._C._special.special_hermite_polynomial_h", + "torch._C._special.special_hermite_polynomial_he", + "torch._C._special.special_i0", + "torch._C._special.special_i0e", + "torch._C._special.special_i1", + "torch._C._special.special_i1e", + "torch._C._special.special_laguerre_polynomial_l", + "torch._C._special.special_legendre_polynomial_p", + "torch._C._special.special_log_ndtr", + "torch._C._special.special_log_softmax", + "torch._C._special.special_log1p", + "torch._C._special.special_logit", + "torch._C._special.special_logsumexp", + "torch._C._special.special_modified_bessel_i0", + "torch._C._special.special_modified_bessel_i1", + "torch._C._special.special_modified_bessel_k0", + "torch._C._special.special_modified_bessel_k1", + "torch._C._special.special_multigammaln", + "torch._C._special.special_ndtr", + "torch._C._special.special_ndtri", + "torch._C._special.special_polygamma", + "torch._C._special.special_psi", + "torch._C._special.special_round", + "torch._C._special.special_scaled_modified_bessel_k0", + "torch._C._special.special_scaled_modified_bessel_k1", + "torch._C._special.special_shifted_chebyshev_polynomial_t", + "torch._C._special.special_shifted_chebyshev_polynomial_u", + "torch._C._special.special_shifted_chebyshev_polynomial_v", + "torch._C._special.special_shifted_chebyshev_polynomial_w", + "torch._C._special.special_sinc", + "torch._C._special.special_softmax", + "torch._C._special.special_spherical_bessel_j0", + "torch._C._special.special_xlog1py", + "torch._C._special.special_xlogy", + "torch._C._special.special_zeta", + "torch._C._stash_obj_in_tls", + "torch._C._storage_id", + "torch._C._storage_Use_Count", + "torch._C._supported_qengines", + "torch._C._te.abs", + "torch._C._te.acos", + "torch._C._te.annotate_input_shapes", + "torch._C._te.asin", + "torch._C._te.atan", + "torch._C._te.atan2", + "torch._C._te.ceil", + "torch._C._te.Compute", + "torch._C._te.Compute2", + "torch._C._te.construct_codegen", + "torch._C._te.cos", + "torch._C._te.cosh", + "torch._C._te.erf", + "torch._C._te.erfc", + "torch._C._te.exp", + "torch._C._te.expm1", + "torch._C._te.fixup_missing_shape_info", + "torch._C._te.floor", + "torch._C._te.fmod", + "torch._C._te.frac", + "torch._C._te.ifThenElse", + "torch._C._te.is_graph_compilable", + "torch._C._te.isnan", + "torch._C._te.lgamma", + "torch._C._te.log", + "torch._C._te.log10", + "torch._C._te.log1p", + "torch._C._te.log2", + "torch._C._te.lower", + "torch._C._te.make_shapes_symbolic", + "torch._C._te.pow", + "torch._C._te.Reduce", + "torch._C._te.remainder", + "torch._C._te.remove_graph_output", + "torch._C._te.remove_unused_self_argument", + "torch._C._te.replace_list_output_with_tuple", + "torch._C._te.round", + "torch._C._te.rsqrt", + "torch._C._te.sigmoid", + "torch._C._te.simplify", + "torch._C._te.sin", + "torch._C._te.sinh", + "torch._C._te.sqrt", + "torch._C._te.tan", + "torch._C._te.tanh", + "torch._C._te.trim_graph", + "torch._C._te.trunc", + "torch._C._tensor_impl_raw_handle", + "torch._C._test_only_add_entry_to_op_version_map", + "torch._C._test_only_populate_upgraders", + "torch._C._test_only_remove_entry_to_op_version_map", + "torch._C._test_only_remove_upgraders", + "torch._C._to_dlpack", + "torch._C._to_functionality_key", + "torch._C._tracer_set_force_outplace", + "torch._C._tracer_set_get_unique_name_fn", + "torch._C._tracer_warn_use_python", + "torch._C._unset_default_mobile_cpu_allocator", + "torch._C._unset_dispatch_mode", + "torch._C._valgrind_supported_platform", + "torch._C._valgrind_toggle_and_dump_stats", + "torch._C._valgrind_toggle", + "torch._C._verbose.mkl_set_verbose", + "torch._C._verbose.mkldnn_set_verbose", + "torch._C._vmapmode_decrement_nesting", + "torch._C._vmapmode_increment_nesting", + "torch._C._warn_deprecation", + "torch._C._warn", + "torch._C._will_engine_execute_node", + "torch._C._wrap_tensor_impl", + "torch._C.fork", + "torch._C.get_autocast_cpu_dtype", + "torch._C.get_autocast_gpu_dtype", + "torch._C.get_autocast_ipu_dtype", + "torch._C.get_autocast_xla_dtype", + "torch._C.get_default_dtype", + "torch._C.get_num_interop_threads", + "torch._C.get_num_threads", + "torch._C.import_ir_module_from_buffer", + "torch._C.import_ir_module", + "torch._C.init_num_threads", + "torch._C.is_anomaly_check_nan_enabled", + "torch._C.is_anomaly_enabled", + "torch._C.is_autocast_cache_enabled", + "torch._C.is_autocast_cpu_enabled", + "torch._C.is_autocast_enabled", + "torch._C.is_autocast_ipu_enabled", + "torch._C.is_autocast_xla_enabled", + "torch._C.is_grad_enabled", + "torch._C.is_inference_mode_enabled", + "torch._C.merge_type_from_type_comment", + "torch._C.parse_ir", + "torch._C.parse_schema", + "torch._C.parse_type_comment", + "torch._C.read_vitals", + "torch._C.set_flush_denormal", + "torch._C.set_num_interop_threads", + "torch._C.set_num_threads", + "torch._C.set_vital", + "torch._C.unify_type_list", + "torch._C.vitals_enabled", + "torch._C.wait", + "torch._cast_Byte", + "torch._cast_Char", + "torch._cast_Double", + "torch._cast_Float", + "torch._cast_Half", + "torch._cast_Int", + "torch._cast_Long", + "torch._cast_Short", + "torch._choose_qparams_per_tensor", + "torch._chunk_cat", + "torch._coalesce", + "torch._compute_linear_combination", + "torch._conj_copy", + "torch._conj_physical", + "torch._conj", + "torch._convert_indices_from_coo_to_csr", + "torch._convert_indices_from_csr_to_coo", + "torch._convert_weight_to_int4pack", + "torch._convolution_mode", + "torch._convolution", + "torch._copy_from_and_resize", + "torch._copy_from", + "torch._cslt_compress", + "torch._cslt_sparse_mm", + "torch._ctc_loss", + "torch._cudnn_ctc_loss", + "torch._cudnn_init_dropout_state", + "torch._cudnn_rnn_flatten_weight", + "torch._cudnn_rnn", + "torch._cufft_clear_plan_cache", + "torch._cufft_get_plan_cache_max_size", + "torch._cufft_get_plan_cache_size", + "torch._cufft_set_plan_cache_max_size", + "torch._cummax_helper", + "torch._cummin_helper", + "torch._debug_has_internal_overlap", + "torch._dim_arange", + "torch._dirichlet_grad", + "torch._disable_functionalization", + "torch._efficientzerotensor", + "torch._embedding_bag_forward_only", + "torch._embedding_bag", + "torch._empty_affine_quantized", + "torch._empty_per_channel_affine_quantized", + "torch._enable_functionalization", + "torch._euclidean_dist", + "torch._fake_quantize_learnable_per_channel_affine", + "torch._fake_quantize_learnable_per_tensor_affine", + "torch._fake_quantize_per_tensor_affine_cachemask_tensor_qparams", + "torch._fft_c2c", + "torch._fft_c2r", + "torch._fft_r2c", + "torch._fill_mem_eff_dropout_mask_", + "torch._foobar", + "torch._foreach_abs_", + "torch._foreach_abs", + "torch._foreach_acos_", + "torch._foreach_acos", + "torch._foreach_add_", + "torch._foreach_add", + "torch._foreach_addcdiv_", + "torch._foreach_addcdiv", + "torch._foreach_addcmul_", + "torch._foreach_addcmul", + "torch._foreach_asin_", + "torch._foreach_asin", + "torch._foreach_atan_", + "torch._foreach_atan", + "torch._foreach_ceil_", + "torch._foreach_ceil", + "torch._foreach_clamp_max_", + "torch._foreach_clamp_max", + "torch._foreach_clamp_min_", + "torch._foreach_clamp_min", + "torch._foreach_copy_", + "torch._foreach_cos_", + "torch._foreach_cos", + "torch._foreach_cosh_", + "torch._foreach_cosh", + "torch._foreach_div_", + "torch._foreach_div", + "torch._foreach_erf_", + "torch._foreach_erf", + "torch._foreach_erfc_", + "torch._foreach_erfc", + "torch._foreach_exp_", + "torch._foreach_exp", + "torch._foreach_expm1_", + "torch._foreach_expm1", + "torch._foreach_floor_", + "torch._foreach_floor", + "torch._foreach_frac_", + "torch._foreach_frac", + "torch._foreach_lerp_", + "torch._foreach_lerp", + "torch._foreach_lgamma_", + "torch._foreach_lgamma", + "torch._foreach_log_", + "torch._foreach_log", + "torch._foreach_log10_", + "torch._foreach_log10", + "torch._foreach_log1p_", + "torch._foreach_log1p", + "torch._foreach_log2_", + "torch._foreach_log2", + "torch._foreach_maximum_", + "torch._foreach_maximum", + "torch._foreach_minimum_", + "torch._foreach_minimum", + "torch._foreach_mul_", + "torch._foreach_mul", + "torch._foreach_neg_", + "torch._foreach_neg", + "torch._foreach_norm", + "torch._foreach_pow_", + "torch._foreach_pow", + "torch._foreach_reciprocal_", + "torch._foreach_reciprocal", + "torch._foreach_round_", + "torch._foreach_round", + "torch._foreach_sigmoid_", + "torch._foreach_sigmoid", + "torch._foreach_sign_", + "torch._foreach_sign", + "torch._foreach_sin_", + "torch._foreach_sin", + "torch._foreach_sinh_", + "torch._foreach_sinh", + "torch._foreach_sqrt_", + "torch._foreach_sqrt", + "torch._foreach_sub_", + "torch._foreach_sub", + "torch._foreach_tan_", + "torch._foreach_tan", + "torch._foreach_tanh_", + "torch._foreach_tanh", + "torch._foreach_trunc_", + "torch._foreach_trunc", + "torch._foreach_zero_", + "torch._freeze_functional_tensor", + "torch._from_functional_tensor", + "torch._functional_assert_async", + "torch._functional_sym_constrain_range_for_size", + "torch._functional_sym_constrain_range", + "torch._functionalize_are_all_mutations_hidden_from_autograd", + "torch._functionalize_commit_update", + "torch._functionalize_enable_reapply_views", + "torch._functionalize_has_data_mutation", + "torch._functionalize_has_metadata_mutation", + "torch._functionalize_is_multi_output_view", + "torch._functionalize_mark_mutation_hidden_from_autograd", + "torch._functionalize_replace", + "torch._functionalize_sync", + "torch._functionalize_was_storage_changed", + "torch._fused_adam_", + "torch._fused_adamw_", + "torch._fused_dropout", + "torch._fused_moving_avg_obs_fq_helper", + "torch._fused_sdp_choice", + "torch._fw_primal_copy", + "torch._grid_sampler_2d_cpu_fallback", + "torch._has_compatible_shallow_copy_type", + "torch._histogramdd_bin_edges", + "torch._histogramdd_from_bin_cts", + "torch._histogramdd_from_bin_tensors", + "torch._index_put_impl_", + "torch._indices_copy", + "torch._int_mm", + "torch._is_all_true", + "torch._is_any_true", + "torch._is_functional_tensor", + "torch._is_zerotensor", + "torch._linalg_check_errors", + "torch._linalg_det", + "torch._linalg_eigh", + "torch._linalg_slogdet", + "torch._linalg_solve_ex", + "torch._linalg_svd", + "torch._log_softmax_backward_data", + "torch._log_softmax", + "torch._logcumsumexp", + "torch._lstm_mps", + "torch._lu_with_info", + "torch._make_dep_token", + "torch._make_dual_copy", + "torch._make_dual", + "torch._make_per_channel_quantized_tensor", + "torch._make_per_tensor_quantized_tensor", + "torch._masked_scale", + "torch._masked_softmax", + "torch._mirror_autograd_meta_to", + "torch._mixed_dtypes_linear", + "torch._mkldnn_reshape", + "torch._mkldnn_transpose_", + "torch._mkldnn_transpose", + "torch._mps_convolution_transpose", + "torch._mps_convolution", + "torch._native_batch_norm_legit_no_training", + "torch._native_batch_norm_legit", + "torch._native_multi_head_attention", + "torch._neg_view_copy", + "torch._neg_view", + "torch._nested_from_padded_and_nested_example", + "torch._nested_tensor_from_mask_left_aligned", + "torch._nested_tensor_from_tensor_list", + "torch._nested_tensor_softmax_with_shape", + "torch._nested_view_from_buffer_copy", + "torch._nested_view_from_buffer", + "torch._nnpack_available", + "torch._nnpack_spatial_convolution", + "torch._pack_padded_sequence", + "torch._pad_packed_sequence", + "torch._pin_memory", + "torch._prelu_kernel", + "torch._propagate_xla_data", + "torch._remove_batch_dim", + "torch._reshape_alias_copy", + "torch._reshape_from_tensor", + "torch._resize_output_", + "torch._rowwise_prune", + "torch._sample_dirichlet", + "torch._saturate_weight_to_fp16", + "torch._scaled_dot_product_attention_math", + "torch._scaled_dot_product_efficient_attention", + "torch._scaled_dot_product_flash_attention", + "torch._scaled_dot_product_flash_attention_for_cpu", + "torch._scaled_dot_product_cudnn_attention", + "torch._scaled_mm", + "torch._shape_as_tensor", + "torch._sobol_engine_draw", + "torch._sobol_engine_ff_", + "torch._sobol_engine_initialize_state_", + "torch._sobol_engine_scramble_", + "torch._softmax_backward_data", + "torch._softmax", + "torch._sparse_broadcast_to_copy", + "torch._sparse_broadcast_to", + "torch._sparse_csr_prod", + "torch._sparse_csr_sum", + "torch._sparse_log_softmax_backward_data", + "torch._sparse_semi_structured_linear", + "torch._sparse_softmax_backward_data", + "torch._sparse_sparse_matmul", + "torch._sparse_sum", + "torch._stack", + "torch._standard_gamma_grad", + "torch._standard_gamma", + "torch._test_autograd_multiple_dispatch_view_copy", + "torch._test_autograd_multiple_dispatch_view", + "torch._test_autograd_multiple_dispatch", + "torch._test_check_tensor", + "torch._test_functorch_fallback", + "torch._test_serialization_subcmul", + "torch._to_cpu", + "torch._to_functional_tensor", + "torch._to_sparse_semi_structured", + "torch._transform_bias_rescale_qkv", + "torch._transformer_encoder_layer_fwd", + "torch._trilinear", + "torch._triton_multi_head_attention", + "torch._triton_scaled_dot_attention", + "torch._unique", + "torch._unique2", + "torch._unpack_dual", + "torch._unsafe_index_put", + "torch._unsafe_index", + "torch._use_cudnn_ctc_loss", + "torch._use_cudnn_rnn_flatten_weight", + "torch._values_copy", + "torch._weight_int4pack_mm", + "torch._weight_int8pack_mm", + "torch._weight_norm_interface", + "torch._weight_norm", + "torch.abs_", + "torch.abs", + "torch.absolute", + "torch.acos_", + "torch.acos", + "torch.acosh_", + "torch.acosh", + "torch.adaptive_avg_pool1d", + "torch.adaptive_max_pool1d", + "torch.add", + "torch.addbmm", + "torch.addcdiv", + "torch.addcmul", + "torch.addmm", + "torch.addmv_", + "torch.addmv", + "torch.addr", + "torch.adjoint", + "torch.affine_grid_generator", + "torch.alias_copy", + "torch.all", + "torch.allclose", + "torch.alpha_dropout_", + "torch.alpha_dropout", + "torch.amax", + "torch.amin", + "torch.aminmax", + "torch.angle", + "torch.any", + "torch.arange", + "torch.arccos_", + "torch.arccos", + "torch.arccosh_", + "torch.arccosh", + "torch.arcsin_", + "torch.arcsin", + "torch.arcsinh_", + "torch.arcsinh", + "torch.arctan_", + "torch.arctan", + "torch.arctan2", + "torch.arctanh_", + "torch.arctanh", + "torch.argmax", + "torch.argmin", + "torch.argsort", + "torch.argwhere", + "torch.as_strided_", + "torch.as_strided_copy", + "torch.as_strided_scatter", + "torch.as_strided", + "torch.as_tensor", + "torch.asarray", + "torch.asin_", + "torch.asin", + "torch.asinh_", + "torch.asinh", + "torch.atan_", + "torch.atan", + "torch.atan2", + "torch.atanh_", + "torch.atanh", + "torch.avg_pool1d", + "torch.baddbmm", + "torch.bartlett_window", + "torch.batch_norm_backward_elemt", + "torch.batch_norm_backward_reduce", + "torch.batch_norm_elemt", + "torch.batch_norm_gather_stats_with_counts", + "torch.batch_norm_gather_stats", + "torch.batch_norm_stats", + "torch.batch_norm_update_stats", + "torch.batch_norm", + "torch.bernoulli", + "torch.bilinear", + "torch.binary_cross_entropy_with_logits", + "torch.bincount", + "torch.binomial", + "torch.bitwise_and", + "torch.bitwise_left_shift", + "torch.bitwise_not", + "torch.bitwise_or", + "torch.bitwise_right_shift", + "torch.bitwise_xor", + "torch.blackman_window", + "torch.bmm", + "torch.broadcast_to", + "torch.bucketize", + "torch.can_cast", + "torch.cat", + "torch.ccol_indices_copy", + "torch.ceil_", + "torch.ceil", + "torch.celu_", + "torch.celu", + "torch.channel_shuffle", + "torch.cholesky_inverse", + "torch.cholesky_solve", + "torch.cholesky", + "torch.choose_qparams_optimized", + "torch.chunk", + "torch.clamp_", + "torch.clamp_max_", + "torch.clamp_max", + "torch.clamp_min_", + "torch.clamp_min", + "torch.clamp", + "torch.clip_", + "torch.clip", + "torch.clone", + "torch.col_indices_copy", + "torch.column_stack", + "torch.combinations", + "torch.complex", + "torch.concat", + "torch.concatenate", + "torch.conj_physical_", + "torch.conj_physical", + "torch.conj", + "torch.constant_pad_nd", + "torch.conv_tbc", + "torch.conv_transpose1d", + "torch.conv_transpose2d", + "torch.conv_transpose3d", + "torch.conv1d", + "torch.conv2d", + "torch.conv3d", + "torch.convolution", + "torch.copysign", + "torch.corrcoef", + "torch.cos_", + "torch.cos", + "torch.cosh_", + "torch.cosh", + "torch.cosine_embedding_loss", + "torch.cosine_similarity", + "torch.count_nonzero", + "torch.cov", + "torch.cross", + "torch.crow_indices_copy", + "torch.ctc_loss", + "torch.cudnn_affine_grid_generator", + "torch.cudnn_batch_norm", + "torch.cudnn_convolution_add_relu", + "torch.cudnn_convolution_relu", + "torch.cudnn_convolution_transpose", + "torch.cudnn_convolution", + "torch.cudnn_grid_sampler", + "torch.cudnn_is_acceptable", + "torch.cummax", + "torch.cummin", + "torch.cumprod", + "torch.cumsum", + "torch.cumulative_trapezoid", + "torch.deg2rad_", + "torch.deg2rad", + "torch.dequantize", + "torch.det", + "torch.detach_", + "torch.detach_copy", + "torch.detach", + "torch.diag_embed", + "torch.diag", + "torch.diagflat", + "torch.diagonal_copy", + "torch.diagonal_scatter", + "torch.diagonal", + "torch.diff", + "torch.digamma", + "torch.dist", + "torch.div", + "torch.divide", + "torch.dot", + "torch.dropout_", + "torch.dropout", + "torch.dsmm", + "torch.dsplit", + "torch.dstack", + "torch.embedding_bag", + "torch.embedding_renorm_", + "torch.embedding", + "torch.empty_like", + "torch.empty_permuted", + "torch.empty_quantized", + "torch.empty_strided", + "torch.empty", + "torch.eq", + "torch.equal", + "torch.erf_", + "torch.erf", + "torch.erfc_", + "torch.erfc", + "torch.erfinv", + "torch.exp_", + "torch.exp", + "torch.exp2_", + "torch.exp2", + "torch.expand_copy", + "torch.expm1_", + "torch.expm1", + "torch.eye", + "torch.fake_quantize_per_channel_affine", + "torch.fake_quantize_per_tensor_affine", + "torch.fbgemm_linear_fp16_weight_fp32_activation", + "torch.fbgemm_linear_fp16_weight", + "torch.fbgemm_linear_int8_weight_fp32_activation", + "torch.fbgemm_linear_int8_weight", + "torch.fbgemm_linear_quantize_weight", + "torch.fbgemm_pack_gemm_matrix_fp16", + "torch.fbgemm_pack_quantized_matrix", + "torch.feature_alpha_dropout_", + "torch.feature_alpha_dropout", + "torch.feature_dropout_", + "torch.feature_dropout", + "torch.fill_", + "torch.fill", + "torch.fix_", + "torch.fix", + "torch.flatten", + "torch.flip", + "torch.fliplr", + "torch.flipud", + "torch.float_power", + "torch.floor_", + "torch.floor_divide", + "torch.floor", + "torch.fmax", + "torch.fmin", + "torch.fmod", + "torch.frac_", + "torch.frac", + "torch.frexp", + "torch.frobenius_norm", + "torch.from_file", + "torch.from_numpy", + "torch.frombuffer", + "torch.full_like", + "torch.full", + "torch.fused_moving_avg_obs_fake_quant", + "torch.gather", + "torch.gcd_", + "torch.gcd", + "torch.ge", + "torch.geqrf", + "torch.ger", + "torch.get_device", + "torch.gradient", + "torch.greater_equal", + "torch.greater", + "torch.grid_sampler_2d", + "torch.grid_sampler_3d", + "torch.grid_sampler", + "torch.group_norm", + "torch.gru_cell", + "torch.gru", + "torch.gt", + "torch.hamming_window", + "torch.hann_window", + "torch.hardshrink", + "torch.heaviside", + "torch.hinge_embedding_loss", + "torch.histc", + "torch.histogram", + "torch.histogramdd", + "torch.hsmm", + "torch.hsplit", + "torch.hspmm", + "torch.hstack", + "torch.hypot", + "torch.i0_", + "torch.i0", + "torch.igamma", + "torch.igammac", + "torch.imag", + "torch.index_add", + "torch.index_copy", + "torch.index_fill", + "torch.index_put_", + "torch.index_put", + "torch.index_reduce", + "torch.index_select", + "torch.indices_copy", + "torch.inner", + "torch.instance_norm", + "torch.int_repr", + "torch.inverse", + "torch.is_complex", + "torch.is_conj", + "torch.is_distributed", + "torch.is_floating_point", + "torch.is_inference", + "torch.is_neg", + "torch.is_nonzero", + "torch.is_same_size", + "torch.is_signed", + "torch.is_vulkan_available", + "torch.isclose", + "torch.isfinite", + "torch.isin", + "torch.isinf", + "torch.isnan", + "torch.isneginf", + "torch.isposinf", + "torch.isreal", + "torch.istft", + "torch.kaiser_window", + "torch.kl_div", + "torch.kron", + "torch.kthvalue", + "torch.layer_norm", + "torch.lcm_", + "torch.lcm", + "torch.ldexp_", + "torch.ldexp", + "torch.le", + "torch.lerp", + "torch.less_equal", + "torch.less", + "torch.lgamma", + "torch.linspace", + "torch.log_", + "torch.log_softmax", + "torch.log", + "torch.log10_", + "torch.log10", + "torch.log1p_", + "torch.log1p", + "torch.log2_", + "torch.log2", + "torch.logaddexp", + "torch.logaddexp2", + "torch.logcumsumexp", + "torch.logdet", + "torch.logical_and", + "torch.logical_not", + "torch.logical_or", + "torch.logical_xor", + "torch.logit_", + "torch.logit", + "torch.logspace", + "torch.logsumexp", + "torch.lstm_cell", + "torch.lstm", + "torch.lt", + "torch.lu_solve", + "torch.lu_unpack", + "torch.margin_ranking_loss", + "torch.masked_fill", + "torch.masked_scatter", + "torch.masked_select", + "torch.matmul", + "torch.matrix_exp", + "torch.matrix_power", + "torch.max_pool1d_with_indices", + "torch.max_pool1d", + "torch.max_pool2d", + "torch.max_pool3d", + "torch.max", + "torch.maximum", + "torch.mean", + "torch.median", + "torch.min", + "torch.minimum", + "torch.miopen_batch_norm", + "torch.miopen_convolution_add_relu", + "torch.miopen_convolution_relu", + "torch.miopen_convolution_transpose", + "torch.miopen_convolution", + "torch.miopen_depthwise_convolution", + "torch.miopen_rnn", + "torch.mkldnn_adaptive_avg_pool2d", + "torch.mkldnn_convolution", + "torch.mkldnn_linear_backward_weights", + "torch.mkldnn_max_pool2d", + "torch.mkldnn_max_pool3d", + "torch.mkldnn_rnn_layer", + "torch.mm", + "torch.mode", + "torch.moveaxis", + "torch.movedim", + "torch.msort", + "torch.mul", + "torch.multinomial", + "torch.multiply", + "torch.mv", + "torch.mvlgamma", + "torch.nan_to_num_", + "torch.nan_to_num", + "torch.nanmean", + "torch.nanmedian", + "torch.nanquantile", + "torch.nansum", + "torch.narrow_copy", + "torch.narrow", + "torch.native_batch_norm", + "torch.native_channel_shuffle", + "torch.native_dropout", + "torch.native_group_norm", + "torch.native_layer_norm", + "torch.native_norm", + "torch.ne", + "torch.neg_", + "torch.neg", + "torch.negative_", + "torch.negative", + "torch.nextafter", + "torch.nonzero_static", + "torch.nonzero", + "torch.norm_except_dim", + "torch.normal", + "torch.not_equal", + "torch.nuclear_norm", + "torch.numel", + "torch.obj", + "torch.ones_like", + "torch.ones", + "torch.orgqr", + "torch.ormqr", + "torch.outer", + "torch.pairwise_distance", + "torch.pdist", + "torch.permute_copy", + "torch.permute", + "torch.pinverse", + "torch.pixel_shuffle", + "torch.pixel_unshuffle", + "torch.poisson_nll_loss", + "torch.poisson", + "torch.polar", + "torch.polygamma", + "torch.positive", + "torch.pow", + "torch.prelu", + "torch._print", + "torch.prod", + "torch.promote_types", + "torch.put", + "torch.q_per_channel_axis", + "torch.q_per_channel_scales", + "torch.q_per_channel_zero_points", + "torch.q_scale", + "torch.q_zero_point", + "torch.qr", + "torch.quantile", + "torch.quantize_per_channel", + "torch.quantize_per_tensor_dynamic", + "torch.quantize_per_tensor", + "torch.quantized_batch_norm", + "torch.quantized_gru_cell", + "torch.quantized_lstm_cell", + "torch.quantized_max_pool1d", + "torch.quantized_max_pool2d", + "torch.quantized_max_pool3d", + "torch.quantized_rnn_relu_cell", + "torch.quantized_rnn_tanh_cell", + "torch.rad2deg_", + "torch.rad2deg", + "torch.rand_like", + "torch.rand", + "torch.randint_like", + "torch.randint", + "torch.randn_like", + "torch.randn", + "torch.randperm", + "torch.range", + "torch.ravel", + "torch.real", + "torch.reciprocal_", + "torch.reciprocal", + "torch.relu_", + "torch.relu", + "torch.remainder", + "torch.renorm", + "torch.repeat_interleave", + "torch.reshape", + "torch.resolve_conj", + "torch.resolve_neg", + "torch.result_type", + "torch.rnn_relu_cell", + "torch.rnn_relu", + "torch.rnn_tanh_cell", + "torch.rnn_tanh", + "torch.roll", + "torch.rot90", + "torch.round_", + "torch.round", + "torch.row_indices_copy", + "torch.row_stack", + "torch.rrelu_", + "torch.rrelu", + "torch.rsqrt_", + "torch.rsqrt", + "torch.rsub", + "torch.saddmm", + "torch.scalar_tensor", + "torch.scatter_add", + "torch.scatter_reduce", + "torch.scatter", + "torch.searchsorted", + "torch.segment_reduce", + "torch.select_copy", + "torch.select_scatter", + "torch.select", + "torch.selu_", + "torch.selu", + "torch.sgn", + "torch.sigmoid_", + "torch.sigmoid", + "torch.sign", + "torch.signal.windows.windows.sqrt", + "torch.signbit", + "torch.sin_", + "torch.sin", + "torch.sinc_", + "torch.sinc", + "torch.sinh_", + "torch.sinh", + "torch.slice_copy", + "torch.slice_scatter", + "torch.slogdet", + "torch.smm", + "torch.softmax", + "torch.sort", + "torch.split_copy", + "torch.split_with_sizes_copy", + "torch.split_with_sizes", + "torch.spmm", + "torch.sqrt_", + "torch.sqrt", + "torch.square_", + "torch.square", + "torch.squeeze_copy", + "torch.squeeze", + "torch.sspaddmm", + "torch.stack", + "torch.std_mean", + "torch.std", + "torch.sub", + "torch.subtract", + "torch.sum", + "torch.svd", + "torch.swapaxes", + "torch.swapdims", + "torch.sym_constrain_range_for_size", + "torch.sym_constrain_range", + "torch.t_copy", + "torch.t", + "torch.take_along_dim", + "torch.take", + "torch.tan_", + "torch.tan", + "torch.tanh_", + "torch.tanh", + "torch.tensor_split", + "torch.tensor", + "torch.threshold_", + "torch.threshold", + "torch.tile", + "torch.topk", + "torch.trace", + "torch.transpose_copy", + "torch.transpose", + "torch.trapezoid", + "torch.trapz", + "torch.triangular_solve", + "torch.tril_indices", + "torch.tril", + "torch.triplet_margin_loss", + "torch.triu_indices", + "torch.triu", + "torch.true_divide", + "torch.trunc_", + "torch.trunc", + "torch.unbind_copy", + "torch.unbind", + "torch.unflatten", + "torch.unfold_copy", + "torch.unsafe_chunk", + "torch.unsafe_split_with_sizes", + "torch.unsafe_split", + "torch.unsqueeze_copy", + "torch.unsqueeze", + "torch.values_copy", + "torch.vander", + "torch.var_mean", + "torch.var", + "torch.vdot", + "torch.view_as_complex_copy", + "torch.view_as_complex", + "torch.view_as_real_copy", + "torch.view_as_real", + "torch.view_copy", + "torch.vsplit", + "torch.vstack", + "torch.where", + "torch.xlogy_", + "torch.xlogy", + "torch.zero_", + "torch.zeros", + "torch._fused_sgd_", + "torch.slice_inverse", + "torch._assert_scalar", + "torch._functional_assert_scalar", + ], + TorchInGraphFunctionVariable, +) + + +if sys.version_info >= (3, 9): + torch_c_binding_in_graph_functions["math.lcm"] = TorchInGraphFunctionVariable +if sys.version_info >= (3, 11): + torch_c_binding_in_graph_functions["math.exp2"] = TorchInGraphFunctionVariable + torch_c_binding_in_graph_functions["math.cbrt"] = TorchInGraphFunctionVariable + + +# In graph functions (including constant folding) that are not C bindings +torch_non_c_binding_in_graph_functions = dict.fromkeys( + [ + "torch.__future__.get_overwrite_module_params_on_conversion", + "torch.__future__.set_overwrite_module_params_on_conversion", + "torch.__getattr__", + "torch._assert", + "torch._check_index", + "torch._check_is_size", + "torch._check_not_implemented", + "torch._check_tensor_all_with", + "torch._check_tensor_all", + "torch._check_type", + "torch._check_value", + "torch._check_with", + "torch._check", + "torch._compile._disable_dynamo", + "torch._functorch.apis.chunk_vmap", + "torch._functorch.autograd_function.custom_function_call_functionalize", + "torch._functorch.autograd_function.custom_function_call_grad", + "torch._functorch.autograd_function.custom_function_call_vmap_generate_rule", + "torch._functorch.autograd_function.custom_function_call_vmap", + "torch._functorch.autograd_function.generate_single_level_function", + "torch._functorch.autograd_function.get_tangents_in_dims", + "torch._functorch.autograd_function.has_overriden_vmap_rule", + "torch._functorch.autograd_function.reductify_leaf", + "torch._functorch.autograd_function.reductify", + "torch._functorch.autograd_function.validate_vmap_returns_tuple_of_two_elements", + "torch._functorch.autograd_function.vmapify_autograd_function", + "torch._functorch.autograd_function.wrap_outputs_maintaining_identity", + "torch._functorch.batch_norm_replacement.batch_norm_without_running_stats", + "torch._functorch.batch_norm_replacement.replace_all_batch_norm_modules_", + "torch._functorch.deprecated.combine_state_for_ensemble", + "torch._functorch.deprecated.functionalize", + "torch._functorch.deprecated.get_warning", + "torch._functorch.deprecated.grad_and_value", + "torch._functorch.deprecated.hessian", + "torch._functorch.deprecated.jacfwd", + "torch._functorch.deprecated.jacrev", + "torch._functorch.deprecated.jvp", + "torch._functorch.deprecated.make_functional_with_buffers", + "torch._functorch.deprecated.make_functional", + "torch._functorch.deprecated.setup_docs", + "torch._functorch.deprecated.vjp", + "torch._functorch.deprecated.warn_deprecated", + "torch._functorch.eager_transforms._any_differentiable", + "torch._functorch.eager_transforms._autograd_grad", + "torch._functorch.eager_transforms._construct_standard_basis_for", + "torch._functorch.eager_transforms._vjp_treespec_compare", + "torch._functorch.eager_transforms._set_tensor_requires_grad", + "torch._functorch.eager_transforms._is_differentiable", + "torch._functorch.eager_transforms._jvp_with_argnums", + "torch._functorch.eager_transforms._maybe_unwrap_functional_tensor", + "torch._functorch.eager_transforms._maybe_wrap_functional_tensor", + "torch._functorch.eager_transforms._replace_args", + "torch._functorch.eager_transforms._unwrap_all_tensors_from_functional", + "torch._functorch.eager_transforms._wrap_all_tensors_to_functional", + "torch._functorch.eager_transforms.assert_flat_tuple_of_tensors", + "torch._functorch.eager_transforms.assert_non_empty_list_of_tensors", + "torch._functorch.eager_transforms.assert_output_is_tensor_or_tensors", + "torch._functorch.eager_transforms.functionalize", + "torch._functorch.eager_transforms.hessian", + "torch._functorch.eager_transforms.jacfwd", + "torch._functorch.eager_transforms.jvp", + "torch._functorch.eager_transforms.lazy_dynamo_disable", + "torch._functorch.eager_transforms.linearize", + "torch._functorch.eager_transforms.noop", + "torch._functorch.eager_transforms.safe_unflatten", + "torch._functorch.eager_transforms.safe_unpack_dual", + "torch._functorch.functional_call.construct_stacked_leaf", + "torch._functorch.functional_call.functional_call", + "torch._functorch.functional_call.stack_module_state", + "torch._functorch.pyfunctorch.coerce_cinterpreter", + "torch._functorch.pyfunctorch.dispatch_functorch", + "torch._functorch.pyfunctorch.nested", + "torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter", + "torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack", + "torch._functorch.utils.enable_single_level_autograd_function", + "torch._functorch.utils.exposed_in", + "torch._functorch.utils.unwrap_dead_wrappers", + "torch._functorch.vmap.lazy_load_decompositions", + "torch._guards.compile_context", + "torch._guards.detect_fake_mode", + "torch._guards.tracing", + "torch._higher_order_ops.map._has_potential_branch_input_alias", + "torch._higher_order_ops.map._has_potential_branch_input_mutation", + "torch._higher_order_ops.map._stack_pytree", + "torch._higher_order_ops.map._unstack_pytree", + "torch._higher_order_ops.map.create_fw_bw_graph", + "torch._higher_order_ops.map.map_autograd", + "torch._higher_order_ops.map.map_dense", + "torch._higher_order_ops.map.map_fake_tensor_mode", + "torch._higher_order_ops.map.map_functionalize", + "torch._higher_order_ops.map.map_proxy_torch_dispatch_mode", + "torch._higher_order_ops.map.map_wrapper", + "torch._higher_order_ops.map.trace_map", + "torch._higher_order_ops.out_dtype.elementwise_dtypes", + "torch._higher_order_ops.out_dtype.is_int_mm", + "torch._higher_order_ops.out_dtype.out_dtype_dense", + "torch._higher_order_ops.out_dtype.out_dtype_fake_tensor_mode", + "torch._higher_order_ops.out_dtype.out_dtype_fallback", + "torch._higher_order_ops.out_dtype.out_dtype_func", + "torch._higher_order_ops.out_dtype.out_dtype_proxy", + "torch._higher_order_ops.out_dtype.trace_out_dtype", + "torch._higher_order_ops.utils.autograd_not_implemented_inner", + "torch._higher_order_ops.utils.autograd_not_implemented", + "torch._linalg_utils._symeig", + "torch._linalg_utils.basis", + "torch._linalg_utils.bform", + "torch._linalg_utils.conjugate", + "torch._linalg_utils.eig", + "torch._linalg_utils.get_floating_dtype", + "torch._linalg_utils.is_sparse", + "torch._linalg_utils.lstsq", + "torch._linalg_utils.matmul", + "torch._linalg_utils.matrix_rank", + "torch._linalg_utils.qform", + "torch._linalg_utils.solve", + "torch._linalg_utils.symeig", + "torch._linalg_utils.transjugate", + "torch._linalg_utils.transpose", + "torch._load_global_deps", + "torch._lowrank._svd_lowrank", + "torch._lowrank.get_approximate_basis", + "torch._lowrank.pca_lowrank", + "torch._lowrank.svd_lowrank", + "torch._ops._compute_keyset", + "torch._ops._get_tensors", + "torch._ops._to_flat_tuple", + "torch._ops.add_cached_op", + "torch._ops.dl_open_guard", + "torch._ops.get_cached_ops", + "torch._ops.key_extractor", + "torch._ops.reset_cached_ops", + "torch._ops.resolve_key", + "torch._preload_cuda_deps", + "torch._register_device_module", + "torch._running_with_deploy", + "torch._utils._dummy_type", + "torch._weights_only_unpickler._get_allowed_globals", + "torch._weights_only_unpickler.load", + "torch.align_tensors", + "torch.amp.autocast_mode._enter_autocast", + "torch.amp.autocast_mode._exit_autocast", + "torch.amp.autocast_mode.autocast_decorator", + "torch.are_deterministic_algorithms_enabled", + "torch.atleast_1d", + "torch.atleast_2d", + "torch.atleast_3d", + "torch.autograd._calculate_shape", + "torch.autograd._is_checkpoint_valid", + "torch.autograd._make_grads", + "torch.autograd._register_py_tensor_class_for_device", + "torch.autograd._tensor_or_tensors_to_tuple", + "torch.autograd.backward", + "torch.autograd.forward_ad.enter_dual_level", + "torch.autograd.forward_ad.exit_dual_level", + "torch.autograd.forward_ad.make_dual", + "torch.autograd.forward_ad.unpack_dual", + "torch.autograd.function._iter_filter", + "torch.autograd.function._iter_jit_values", + "torch.autograd.function._iter_None_tensors", + "torch.autograd.function._iter_tensors_permissive", + "torch.autograd.function._iter_tensors", + "torch.autograd.function._jit_unwrap_structured", + "torch.autograd.function._map_tensor_data", + "torch.autograd.function._nested_map", + "torch.autograd.function._unflatten", + "torch.autograd.function.once_differentiable", + "torch.autograd.function.traceable", + "torch.autograd.functional._as_tuple_nocheck", + "torch.autograd.functional._as_tuple", + "torch.autograd.functional._autograd_grad", + "torch.autograd.functional._check_requires_grad", + "torch.autograd.functional._construct_standard_basis_for", + "torch.autograd.functional._fill_in_zeros", + "torch.autograd.functional._grad_postprocess", + "torch.autograd.functional._grad_preprocess", + "torch.autograd.functional._jacfwd", + "torch.autograd.functional._tuple_postprocess", + "torch.autograd.functional._validate_v", + "torch.autograd.functional.hessian", + "torch.autograd.functional.hvp", + "torch.autograd.functional.jacobian", + "torch.autograd.functional.jvp", + "torch.autograd.functional.vhp", + "torch.autograd.functional.vjp", + "torch.autograd.grad_mode._enter_inference_mode", + "torch.autograd.grad_mode._exit_inference_mode", + "torch.autograd.graph._get_sid", + "torch.autograd.graph._get_tid", + "torch.autograd.graph.allow_mutation_on_saved_tensors", + "torch.autograd.graph.get_gradient_edge", + "torch.autograd.graph.increment_version", + "torch.autograd.graph.register_multi_grad_hook", + "torch.autograd.variable", + "torch.backends.__allow_nonbracketed_mutation", + "torch.backends.cpu.get_cpu_capability", + "torch.backends.cuda.can_use_efficient_attention", + "torch.backends.cuda.can_use_flash_attention", + "torch.backends.cuda.enable_flash_sdp", + "torch.backends.cuda.enable_math_sdp", + "torch.backends.cuda.enable_mem_efficient_sdp", + "torch.backends.cuda.flash_sdp_enabled", + "torch.backends.cuda.is_built", + "torch.backends.cuda.math_sdp_enabled", + "torch.backends.cuda.mem_efficient_sdp_enabled", + "torch.backends.cuda.cudnn_sdp_enabled", + "torch.backends.cuda.enable_cudnn_sdp", + "torch.backends.cuda.preferred_linalg_library", + "torch.backends.cuda.sdp_kernel", + "torch.backends.cudnn._init", + "torch.backends.cudnn.flags", + "torch.backends.cudnn.is_acceptable", + "torch.backends.cudnn.is_available", + "torch.backends.cudnn.set_flags", + "torch.backends.cudnn.version", + "torch.backends.disable_global_flags", + "torch.backends.flags_frozen", + "torch.backends.mkl.is_available", + "torch.backends.mkldnn.flags", + "torch.backends.mkldnn.is_available", + "torch.backends.mkldnn.set_flags", + "torch.backends.mps._init", + "torch.backends.mps.is_available", + "torch.backends.mps.is_built", + "torch.backends.mps.is_macos13_or_newer", + "torch.backends.openmp.is_available", + "torch.backends.quantized._get_qengine_id", + "torch.backends.quantized._get_qengine_str", + "torch.block_diag", + "torch.broadcast_tensors", + "torch.cartesian_prod", + "torch.cdist", + "torch.chain_matmul", + "torch.compile", + "torch.compiled_with_cxx11_abi", + "torch.cpu._is_cpu_support_vnni", + "torch.cpu.current_device", + "torch.cpu.current_stream", + "torch.cpu.device_count", + "torch.cpu.is_available", + "torch.cpu.set_device", + "torch.cpu.stream", + "torch.cpu.synchronize", + "torch.cuda._check_capability", + "torch.cuda._check_cubins", + "torch.cuda._device_count_nvml", + "torch.cuda._get_device", + "torch.cuda._get_generator", + "torch.cuda._get_nvml_device_index", + "torch.cuda._get_pynvml_handler", + "torch.cuda._get_rng_state_offset", + "torch.cuda._is_compiled", + "torch.cuda._lazy_call", + "torch.cuda._lazy_init", + "torch.cuda._memory_viz._block_extra_legacy", + "torch.cuda._memory_viz._block_extra", + "torch.cuda._memory_viz._format_size", + "torch.cuda._memory_viz._format_viz", + "torch.cuda._memory_viz._frame_filter", + "torch.cuda._memory_viz._frame_fmt", + "torch.cuda._memory_viz._frames_fmt", + "torch.cuda._memory_viz._profile_to_snapshot", + "torch.cuda._memory_viz._report_free", + "torch.cuda._memory_viz._write_blocks", + "torch.cuda._memory_viz.calc_active", + "torch.cuda._memory_viz.compare", + "torch.cuda._memory_viz.format_flamegraph", + "torch.cuda._memory_viz.memory", + "torch.cuda._memory_viz.profile_plot", + "torch.cuda._memory_viz.segment_plot", + "torch.cuda._memory_viz.segments", + "torch.cuda._memory_viz.segsum", + "torch.cuda._memory_viz.trace_plot", + "torch.cuda._memory_viz.trace", + "torch.cuda._nvml_based_avail", + "torch.cuda._parse_visible_devices", + "torch.cuda._raw_device_count_nvml", + "torch.cuda._raw_device_uuid_nvml", + "torch.cuda._register_triton_kernels", + "torch.cuda._set_rng_state_offset", + "torch.cuda._set_stream_by_id", + "torch.cuda._sleep", + "torch.cuda._transform_uuid_to_ordinals", + "torch.cuda._utils._get_device_index", + "torch.cuda.amp.autocast_mode._cast", + "torch.cuda.amp.autocast_mode.custom_bwd", + "torch.cuda.amp.autocast_mode.custom_fwd", + "torch.cuda.amp.common.amp_definitely_not_available", + "torch.amp.grad_scaler._refresh_per_optimizer_state", + "torch.cuda.can_device_access_peer", + "torch.cuda.check_error", + "torch.cuda.clock_rate", + "torch.cuda.cudart", + "torch.cuda.current_blas_handle", + "torch.cuda.current_stream", + "torch.cuda.default_stream", + "torch.cuda.device_count", + "torch.cuda.get_arch_list", + "torch.cuda.get_device_capability", + "torch.cuda.get_device_name", + "torch.cuda.get_device_properties", + "torch.cuda.get_gencode_flags", + "torch.cuda.get_sync_debug_mode", + "torch.cuda.graphs.graph_pool_handle", + "torch.cuda.graphs.is_current_stream_capturing", + "torch.cuda.graphs.make_graphed_callables", + "torch.cuda.init", + "torch.cuda.ipc_collect", + "torch.cuda.is_available", + "torch.cuda.is_bf16_supported", + "torch.cuda.is_initialized", + "torch.cuda.jiterator._create_jit_fn", + "torch.cuda.jiterator._create_multi_output_jit_fn", + "torch.cuda.memory_usage", + "torch.cuda.memory._dump_snapshot", + "torch.cuda.memory._free_mutex", + "torch.cuda.memory._get_current_allocator", + "torch.cuda.memory._host_allocator", + "torch.cuda.memory._record_memory_history_impl", + "torch.cuda.memory._record_memory_history_legacy", + "torch.cuda.memory._record_memory_history", + "torch.cuda.memory._save_memory_usage", + "torch.cuda.memory._save_segment_usage", + "torch.cuda.memory._set_allocator_settings", + "torch.cuda.memory._snapshot", + "torch.cuda.memory.caching_allocator_alloc", + "torch.cuda.memory.caching_allocator_delete", + "torch.cuda.memory.change_current_allocator", + "torch.cuda.memory.empty_cache", + "torch.cuda.memory.get_allocator_backend", + "torch.cuda.memory.list_gpu_processes", + "torch.cuda.memory.max_memory_allocated", + "torch.cuda.memory.max_memory_cached", + "torch.cuda.memory.max_memory_reserved", + "torch.cuda.memory.mem_get_info", + "torch.cuda.memory.memory_allocated", + "torch.cuda.memory.memory_cached", + "torch.cuda.memory.memory_reserved", + "torch.cuda.memory.memory_snapshot", + "torch.cuda.memory.memory_stats_as_nested_dict", + "torch.cuda.memory.memory_stats", + "torch.cuda.memory.memory_summary", + "torch.cuda.memory.reset_accumulated_memory_stats", + "torch.cuda.memory.reset_max_memory_allocated", + "torch.cuda.memory.reset_max_memory_cached", + "torch.cuda.memory.reset_peak_memory_stats", + "torch.cuda.memory.set_per_process_memory_fraction", + "torch.cuda.nccl._check_sequence_type", + "torch.cuda.nccl.all_gather", + "torch.cuda.nccl.all_reduce", + "torch.cuda.nccl.broadcast", + "torch.cuda.nccl.init_rank", + "torch.cuda.nccl.is_available", + "torch.cuda.nccl.reduce_scatter", + "torch.cuda.nccl.reduce", + "torch.cuda.nccl.unique_id", + "torch.cuda.nccl.version", + "torch.cuda.nvtx.mark", + "torch.cuda.nvtx.range_end", + "torch.cuda.nvtx.range_pop", + "torch.cuda.nvtx.range_push", + "torch.cuda.nvtx.range_start", + "torch.cuda.nvtx.range", + "torch.cuda.power_draw", + "torch.cuda.profiler.init", + "torch.cuda.profiler.profile", + "torch.cuda.profiler.start", + "torch.cuda.profiler.stop", + "torch.cuda.random.get_rng_state_all", + "torch.cuda.random.initial_seed", + "torch.cuda.random.manual_seed_all", + "torch.cuda.random.manual_seed", + "torch.cuda.random.seed_all", + "torch.cuda.random.seed", + "torch.cuda.random.set_rng_state_all", + "torch.cuda.set_stream", + "torch.cuda.set_sync_debug_mode", + "torch.cuda.stream", + "torch.cuda.synchronize", + "torch.cuda.temperature", + "torch.cuda.utilization", + "torch.einsum", + "torch.functional._check_list_size", + "torch.functional._consecutive_return_counts", + "torch.functional._consecutive_return_inverse_false", + "torch.functional._consecutive_return_inverse_true", + "torch.functional._consecutive_return_inverse", + "torch.functional._consecutive_return_output", + "torch.functional._lu_impl", + "torch.functional._lu_no_infos", + "torch.functional._lu_with_infos", + "torch.functional._meshgrid", + "torch.functional._return_counts", + "torch.functional._return_inverse_false", + "torch.functional._return_inverse_true", + "torch.functional._return_inverse", + "torch.functional._return_output", + "torch.functional._unique_consecutive_impl", + "torch.functional._unique_impl", + "torch.functional._unravel_index", + "torch.functional.broadcast_shapes", + "torch.functional.lu", + "torch.functional.unique", + "torch.functional.unravel_index", + "torch.futures.collect_all", + "torch.futures.wait_all", + "torch.get_deterministic_debug_mode", + "torch.get_float32_matmul_precision", + "torch.is_deterministic_algorithms_warn_only_enabled", + "torch.is_storage", + "torch.is_tensor", + "torch.is_warn_always_enabled", + "torch.masked._ops._any", + "torch.masked._ops._apply_docstring_templates", + "torch.masked._ops._canonical_dim", + "torch.masked._ops._combine_input_and_mask", + "torch.masked._ops._generate_docstring", + "torch.masked._ops._input_mask", + "torch.masked._ops._output_mask", + "torch.masked._ops._reduction_identity", + "torch.masked._ops._sparse_coo_flatten_indices", + "torch.masked._ops._sparse_coo_scatter_reduction_helper", + "torch.masked._ops._sparse_coo_where", + "torch.masked._ops._sparse_csr_segment_reduction_helper", + "torch.masked._ops._sparse_csr_where", + "torch.masked._ops._std_var", + "torch.masked._ops._where", + "torch.masked._ops.amax", + "torch.masked._ops.amin", + "torch.masked._ops.argmax", + "torch.masked._ops.argmin", + "torch.masked._ops.corresponding_real_dtype", + "torch.masked._ops.cumprod", + "torch.masked._ops.cumsum", + "torch.masked._ops.log_softmax", + "torch.masked._ops.logaddexp", + "torch.masked._ops.logsumexp", + "torch.masked._ops.mean", + "torch.masked._ops.median", + "torch.masked._ops.norm", + "torch.masked._ops.normalize", + "torch.masked._ops.prod", + "torch.masked._ops.softmax", + "torch.masked._ops.softmin", + "torch.masked._ops.std", + "torch.masked._ops.sum", + "torch.masked._ops.var", + "torch.meshgrid", + "torch.mps._get_default_mps_generator", + "torch.mps.current_allocated_memory", + "torch.mps.driver_allocated_memory", + "torch.mps.empty_cache", + "torch.mps.get_rng_state", + "torch.mps.manual_seed", + "torch.mps.profiler.profile", + "torch.mps.profiler.start", + "torch.mps.profiler.stop", + "torch.mps.seed", + "torch.mps.set_per_process_memory_fraction", + "torch.mps.set_rng_state", + "torch.mps.synchronize", + "torch.nested._internal.nested_tensor.get_tensor_symint", + "torch.nested._internal.nested_tensor.is_expandable_to", + "torch.nested._internal.nested_tensor.jagged_from_list", + "torch.nested._internal.nested_tensor.jagged_from_tensor_and_lengths", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets_lengths", + "torch.nested.as_nested_tensor", + "torch.nested.narrow", + "torch.nested.nested_tensor", + "torch.nn._reduction.get_enum", + "torch.nn._reduction.legacy_get_enum", + "torch.nn._reduction.legacy_get_string", + "torch.nn.factory_kwargs", + "torch.nn.functional._adaptive_max_pool1d", + "torch.nn.functional._adaptive_max_pool2d", + "torch.nn.functional._adaptive_max_pool3d", + "torch.nn.functional._canonical_mask", + "torch.nn.functional._fractional_max_pool2d", + "torch.nn.functional._fractional_max_pool3d", + "torch.nn.functional._get_softmax_dim", + "torch.nn.functional._in_projection_packed", + "torch.nn.functional._in_projection", + "torch.nn.functional._is_integer", + "torch.nn.functional._max_pool1d", + "torch.nn.functional._max_pool2d", + "torch.nn.functional._max_pool3d", + "torch.nn.functional._mha_shape_check", + "torch.nn.functional._no_grad_embedding_renorm_", + "torch.nn.functional._none_or_dtype", + "torch.nn.functional._threshold", + "torch.nn.functional._unpool_output_size", + "torch.nn.functional._verify_batch_size", + "torch.nn.functional._verify_spatial_size", + "torch.nn.functional.adaptive_avg_pool2d", + "torch.nn.functional.adaptive_avg_pool3d", + "torch.nn.functional.adaptive_max_pool1d_with_indices", + "torch.nn.functional.adaptive_max_pool1d", + "torch.nn.functional.adaptive_max_pool2d_with_indices", + "torch.nn.functional.adaptive_max_pool2d", + "torch.nn.functional.adaptive_max_pool3d_with_indices", + "torch.nn.functional.adaptive_max_pool3d", + "torch.nn.functional.affine_grid", + "torch.nn.functional.alpha_dropout", + "torch.nn.functional.assert_int_or_pair", + "torch.nn.functional.batch_norm", + "torch.nn.functional.binary_cross_entropy_with_logits", + "torch.nn.functional.binary_cross_entropy", + "torch.nn.functional.celu", + "torch.nn.functional.cosine_embedding_loss", + "torch.nn.functional.cross_entropy", + "torch.nn.functional.ctc_loss", + "torch.nn.functional.dropout", + "torch.nn.functional.dropout1d", + "torch.nn.functional.dropout2d", + "torch.nn.functional.dropout3d", + "torch.nn.functional.elu", + "torch.nn.functional.embedding_bag", + "torch.nn.functional.embedding", + "torch.nn.functional.feature_alpha_dropout", + "torch.nn.functional.fold", + "torch.nn.functional.fractional_max_pool2d_with_indices", + "torch.nn.functional.fractional_max_pool2d", + "torch.nn.functional.fractional_max_pool3d_with_indices", + "torch.nn.functional.fractional_max_pool3d", + "torch.nn.functional.gaussian_nll_loss", + "torch.nn.functional.glu", + "torch.nn.functional.grid_sample", + "torch.nn.functional.group_norm", + "torch.nn.functional.gumbel_softmax", + "torch.nn.functional.hardsigmoid", + "torch.nn.functional.hardswish", + "torch.nn.functional.hardtanh", + "torch.nn.functional.hinge_embedding_loss", + "torch.nn.functional.huber_loss", + "torch.nn.functional.instance_norm", + "torch.nn.functional.interpolate", + "torch.nn.functional.kl_div", + "torch.nn.functional.l1_loss", + "torch.nn.functional.layer_norm", + "torch.nn.functional.leaky_relu", + "torch.nn.functional.local_response_norm", + "torch.nn.functional.log_softmax", + "torch.nn.functional.lp_pool1d", + "torch.nn.functional.lp_pool2d", + "torch.nn.functional.margin_ranking_loss", + "torch.nn.functional.max_pool1d_with_indices", + "torch.nn.functional.max_pool1d", + "torch.nn.functional.max_pool2d_with_indices", + "torch.nn.functional.max_pool2d", + "torch.nn.functional.max_pool3d_with_indices", + "torch.nn.functional.max_pool3d", + "torch.nn.functional.max_unpool1d", + "torch.nn.functional.max_unpool2d", + "torch.nn.functional.max_unpool3d", + "torch.nn.functional.mish", + "torch.nn.functional.mse_loss", + "torch.nn.functional.multi_head_attention_forward", + "torch.nn.functional.multi_margin_loss", + "torch.nn.functional.multilabel_margin_loss", + "torch.nn.functional.multilabel_soft_margin_loss", + "torch.nn.functional.nll_loss", + "torch.nn.functional.normalize", + "torch.nn.functional.poisson_nll_loss", + "torch.nn.functional.relu", + "torch.nn.functional.relu6", + "torch.nn.functional.rrelu", + "torch.nn.functional.selu", + "torch.nn.functional.sigmoid", + "torch.nn.functional.silu", + "torch.nn.functional.smooth_l1_loss", + "torch.nn.functional.soft_margin_loss", + "torch.nn.functional.softmax", + "torch.nn.functional.softmin", + "torch.nn.functional.softsign", + "torch.nn.functional.tanh", + "torch.nn.functional.tanhshrink", + "torch.nn.functional.triplet_margin_loss", + "torch.nn.functional.unfold", + "torch.nn.functional.upsample_bilinear", + "torch.nn.functional.upsample_nearest", + "torch.nn.functional.upsample", + "torch.nn.grad._pair", + "torch.nn.grad._single", + "torch.nn.grad._triple", + "torch.nn.grad.conv1d_input", + "torch.nn.grad.conv1d_weight", + "torch.nn.grad.conv2d_input", + "torch.nn.grad.conv2d_weight", + "torch.nn.grad.conv3d_input", + "torch.nn.grad.conv3d_weight", + "torch.nn.modules.activation._arg_requires_grad", + "torch.nn.modules.activation._check_arg_device", + "torch.nn.modules.activation._is_make_fx_tracing", + "torch.nn.modules.container._addindent", + "torch.nn.modules.transformer._detect_is_causal_mask", + "torch.nn.modules.transformer._generate_square_subsequent_mask", + "torch.nn.modules.transformer._get_activation_fn", + "torch.nn.modules.transformer._get_clones", + "torch.nn.modules.transformer._get_seq_len", + "torch.nn.modules.utils._list_with_default", + "torch.nn.modules.utils._ntuple", + "torch.nn.modules.utils._quadruple", + "torch.nn.modules.utils._reverse_repeat_tuple", + "torch.nn.modules.utils.consume_prefix_in_state_dict_if_present", + "torch.nn.parameter.is_lazy", + "torch.norm", + "torch.quantization.default_eval_fn", + "torch.random._seed_custom_device", + "torch.random.fork_rng", + "torch.random.initial_seed", + "torch.random.seed", + "torch.return_types.pytree_register_structseq", + "torch.set_default_device", + "torch.set_default_dtype", + "torch.set_default_tensor_type", + "torch.set_deterministic_debug_mode", + "torch.set_float32_matmul_precision", + "torch.set_warn_always", + "torch.signal.windows.windows._add_docstr", + "torch.signal.windows.windows._window_function_checks", + "torch.signal.windows.windows.bartlett", + "torch.signal.windows.windows.blackman", + "torch.signal.windows.windows.cosine", + "torch.signal.windows.windows.exponential", + "torch.signal.windows.windows.gaussian", + "torch.signal.windows.windows.general_cosine", + "torch.signal.windows.windows.general_hamming", + "torch.signal.windows.windows.hamming", + "torch.signal.windows.windows.hann", + "torch.signal.windows.windows.kaiser", + "torch.signal.windows.windows.merge_dicts", + "torch.signal.windows.windows.nuttall", + "torch.signal.windows.windows.parse_kwargs", + "torch.sparse.semi_structured.to_sparse_semi_structured", + "torch.sparse.sum", + "torch.split", + "torch.stft", + "torch.sym_float", + "torch.sym_int", + "torch.sym_ite", + "torch.sym_max", + "torch.sym_min", + "torch.sym_not", + "torch.tensordot", + "torch.typename", + "torch.unique_consecutive", + "torch.use_deterministic_algorithms", + ], + TorchInGraphFunctionVariable, +) + + +torch_name_rule_map = [ + manual_torch_name_rule_map, + torch_c_binding_in_graph_functions, + torch_non_c_binding_in_graph_functions, +] + + +""" +Generate the torch object - Dynamo tracing rule (the wrapping variable) map. +""" + + +@functools.lru_cache(None) +def get_torch_obj_rule_map(): + d: Dict[Any, VariableTracker] = dict() + for m in torch_name_rule_map: + for k, v in m.items(): # type: ignore[attr-defined] + obj = load_object(k) + if obj is not None: + if obj in d and d[obj] != v: + raise AssertionError( + f"Duplicate torch object {obj} with different rules: {v}, {d[obj]}" + ) + else: + d[obj] = v + return d + + +def _load_obj_from_str(fully_qualified_name): + module, obj_name = fully_qualified_name.rsplit(".", maxsplit=1) + return getattr(importlib.import_module(module), obj_name) + + +""" +Load string represented torch objects. +""" + + +def load_object(name): + try: + x = name.split("#") + if len(x) == 2: + obj = _load_obj_from_str(x[0]) + val = getattr(obj, x[1]) + else: + assert len(x) == 1, f"Invalid obj name {name}" + val = _load_obj_from_str(x[0]) + val = unwrap_if_wrapper(val) + except (AttributeError, ImportError): + val = None + return val + + +""" +Get all torch.Tensor methods which are allowed to be in graph functions. +""" + + +@functools.lru_cache(None) +def get_tensor_method(): + s = set() + for name in dir(torch.Tensor): + method = getattr(torch.Tensor, name) + if isinstance( + method, (types.MethodDescriptorType, types.WrapperDescriptorType) + ): + s.add(method) + return frozenset(s) + + +""" +Return if a torch object is ATen op or torch.Tensor method. +""" + + +def is_aten_op_or_tensor_method(obj): + return obj in get_tensor_method() or isinstance( + obj, + (torch._ops.OpOverloadPacket, torch._ops.OpOverload), + ) + + +class FunctionIdSet: + """ + Track a set of `id()`s of objects which are either allowed or not + allowed to go into the generated FX graph. Use to test for torch.*, + numpy.*, builtins.*, etc. + + Support user modification to permit customization of what can be + added to the graph and what will cause a graph break. + """ + + function_ids: Optional[Set[int]] = None + function_names: Optional[Dict[int, str]] = None + + def __init__(self, lazy_initializer: Callable[[], Union[Dict[int, str], Set[int]]]): + self.lazy_initializer = lazy_initializer + + def __call__(self): + if self.function_ids is None: + value = self.lazy_initializer() + if isinstance(value, dict): + self.function_ids = set(value.keys()) + self.function_names = value + else: + assert isinstance(value, set) + self.function_ids = value + return self.function_ids + + def get_name(self, idx: int, default: str): + self() # lazy init + assert self.function_names is not None + return self.function_names.get(idx, default) + + def add(self, idx: int): + function_ids = self() # lazy init + function_ids.add(idx) + + def remove(self, idx: int): + function_ids = self() + if idx in function_ids: + function_ids.remove(idx) + + def __contains__(self, idx: int): + return idx in self() + + +@FunctionIdSet +def _allowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _disallowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _builtin_function_ids() -> Dict[int, str]: + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and callable(v) + } + rv.update( + { + id(v): f"operator.{k}" + for k, v in operator.__dict__.items() + if not k.startswith("_") and callable(v) + } + ) + rv.update( + {id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)} + ) + rv.update( + { + id(cast): "typing.cast", + id(functools.reduce): "functools.reduce", + id(copy.deepcopy): "copy.deepcopy", + } + ) + return rv + + +@FunctionIdSet +def _numpy_function_ids() -> Dict[int, str]: + rv = dict() + for mod in NP_SUPPORTED_MODULES: + rv.update( + { + id(v): f"{mod.__name__}.{k}" + for k, v in mod.__dict__.items() + if callable(v) + and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__ + } + ) + return rv + + +@FunctionIdSet +def _builtin_constant_ids() -> Dict[int, str]: + """ + Collects constant builtins by eliminating callable items. + """ + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and not callable(v) + } + return rv + + +_lazy_module_init: Dict[str, List[Callable[[], None]]] = defaultdict(list) + + +def add_module_init_func(name: str, init_func: Callable[[], None]) -> None: + """Register a module without eagerly importing it""" + # If the module is already imported, eagerly run init + assert "." not in name, f"Expected a root module name, but got {name}" + if name in sys.modules: + init_func() + + # Module is not yet imported, delay processing until needed + assert name not in _lazy_module_init + _lazy_module_init[name].append(init_func) + + +def _maybe_init_lazy_module(obj: object) -> None: + module = getattr(obj, "__module__", None) + if module is None: + return + + base_module = module.split(".")[0] + init_funcs = _lazy_module_init.pop(base_module, None) + if init_funcs is not None: + for fn in init_funcs: + fn() + + +def is_callable_allowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _allowed_callable_ids + + +def is_callable_disallowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _disallowed_callable_ids + + +def is_forbidden(obj) -> bool: + _maybe_init_lazy_module(obj) + return getattr(obj, "_dynamo_forbidden", False) + + +def is_builtin_callable(obj) -> bool: + return id(obj) in _builtin_function_ids + + +def is_builtin_constant(obj) -> bool: + return id(obj) in _builtin_constant_ids + + +def is_numpy(obj) -> bool: + if np is None: + return False + return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids + + +""" +A note on skip/inline rules: + +Dynamo consults this file to determine whether function should be inlined or skipped. + +A skip applies at the frame boundary, meaning dynamo either triggers a graph break +at the beginning of the frame or attempts to trace/inline the whole frame. When skipping +a frame, recursively called frames are still traced by dynamo unless also skipped. + +Skipfiles (skipped at the file level instead of function level) still apply on a +frame-by-frame boundary as dynamo traces, but apply to all functions in that file. + +@skip is a helper decorator that can be applied to your function to cause it to be +included here. + +Dynamo skip/inline rules & priorities are defined as follows: +* Inline is the default behavior and will be used unless explicitly skipped. +* Dynamo has two SKIPLIST: BUILTIN_SKIPLIST and THIRDPARTY_SKIPLIST. + * BUILTIN_SKIPLIST contains builtin python modules, such as abc, collections, etc. + * THIRDPARTY_SKIPLIST contains common third party libraries, such as numpy, pandas, etc. +* Functions in these two SKIPLISTs are always skipped, except: + * They have explicitly defined rule in `manual_torch_name_rule_map`; + * The corresponding python module has been put into MOD_INLINELIST. +* PyTorch(torch) is in the BUILTIN_SKIPLIST by default, but there are many cases + where we want inline the functions under torch namespace. + We should specify inline for the functions in `manual_torch_name_rule_map` or + put the corresponding python module into MOD_INLINELIST to make dynamo inline them. +* If you call functions under skipped modules/files, Dynamo will wrap these functions + as SkipFunctionVariable. There are a few functions(e.g, collections.OrderedDict) that + we have special handling at SkipFunctionVariable.call_function. + +Overall: *_INLINELIST has precedence over *_SKIPLIST has precedence over DEFAULT (inline) + +To figure out what the behavior is, check the following list in order: +* `manual_torch_name_rule_map` (Inline if YES) +* MOD_INLINELIST (Inline if YES) +* BUILTIN_SKIPLIST & THIRDPARTY_SKIPLIST (Skip if YES) +* Inline by default + +In general, if you want to force inline a function or module, please consider adding +the function's python module to MOD_INLINELIST first. +Use the `manual_torch_name_rule_map` only when there are other functions under the same module that +you don't want to inline them. +""" + + +BUILTIN_SKIPLIST = ( + abc, + collections, + contextlib, + copy, + copyreg, + dataclasses, + enum, + functools, + importlib, + inspect, + linecache, + logging, + multiprocessing, + operator, + os, + posixpath, + random, + re, + selectors, + signal, + tempfile, + threading, + tokenize, + torch, # torch/* is skipped by default unless specified in FUNC_INLINELIST or MOD_INLINELIST + traceback, + types, + typing, + unittest, + weakref, + _collections_abc, + _weakrefset, +) + +# third party libraries skiplist is defined by str, because users may not use these libraries. +# we should use lazy import & skip in the future. +THIRDPARTY_SKIPLIST = ( + "fx2trt_oss", + "hypothesis", + "networkx", + "numpy", + "omegaconf", + "onnx", + "onnxruntime", + "onnx_tf", + "pandas", + "sklearn", + "tabulate", + "tensorflow", + "tensorrt", + "torch2trt", + "tqdm", + "tree", + "tvm", + "xarray", +) + + +def _strip_init_py(s): + # TODO: Once we require py3.9 use removesuffix instead. + suffix = "__init__.py" + if s.endswith(suffix): + return s[: -len(suffix)] + else: + return s + + +def _module_dir(m: types.ModuleType): + # Protect against a module not exporting __file__ - this can happen for + # frozen modules, for example. + file = getattr(m, "__file__", None) + return file and _strip_init_py(file) + + +# These are legacy workarounds, don't add new modules to this list. +# Please use the MOD_INLINELIST instead to force inline functions under particular modules. +LEGACY_MOD_INLINELIST = { + "torch._dynamo.external_utils", + "torch._export.db.examples", + "torch._export.wrappers", + "torch._functorch.apis", + "torch._functorch.deprecated", + "torch._higher_order_ops.cond", + "torch.ao.quantization.pt2e.export_utils", + "torch.ao.quantization.pt2e.qat_utils", + "torch.ao.quantization.pt2e.representation.rewrite", + "torch.ao.quantization.pt2e.utils", + "torch.ao.quantization.quantizer.xnnpack_quantizer", + "torch.optim", +} + +if torch.distributed.is_available(): + LEGACY_MOD_INLINELIST |= { + "torch.distributed._tensor.api", + "torch.distributed._tensor.device_mesh", + "torch.distributed.device_mesh", + "torch.distributed.algorithms._checkpoint.checkpoint_wrapper", + "torch.distributed.tensor.parallel._data_parallel_utils", + "torch.distributed.tensor.parallel._utils", + "torch.distributed.tensor.parallel.style", + # we have to add replicate to LEGACY_MOD_INLINELIST to ensure + # the forward_hook won't be ignored. + "torch.distributed._composable.replicate", + } + + +# Force inline functions under these modules, even they are in *_SKIPLIST. +# We are using python module name instead of file or directory object to avoid circular dependency. +# Please keep this sorted alphabetically. +MOD_INLINELIST = { + "torch._refs", + "torch._prims", + "torch._decomp", + "torch._dynamo._trace_wrapped_higher_order_op", + "torch._dynamo.comptime", + "torch._dynamo.polyfill", + "torch._functorch.vmap", + "torch._functorch.eager_transforms", + "torch._inductor.test_operators", + "torch.amp.autocast_mode", + "torch.ao.nn", + "torch.autograd.function", + "torch.backends.cuda", + "torch.cuda.amp.autocast_mode", + "torch.distributions", + "torch.fx._pytree", + "torch.fx.passes.shape_prop", + "torch.nn", + "torch.random", + "torch.sparse", + "torch.testing", + "torch.testing._internal.hypothesis_utils", + "torch.utils._content_store", + "torch.utils._contextlib", + "torch.utils._foreach_utils", + "torch.utils._pytree", + "torch.utils.hooks", + "torch._tensor", + "torch._higher_order_ops.strict_mode", + "torch._higher_order_ops.while_loop", +} + + +if torch.distributed.is_available(): + MOD_INLINELIST.add("torch.distributed") + MOD_INLINELIST.add("torch.distributed._functional_collectives") + MOD_INLINELIST.add("torch.distributed._composable.replicate") + + +@functools.lru_cache(None) +def get_legacy_mod_inlinelist(): + inlinelist = set() + for m in LEGACY_MOD_INLINELIST: + inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + return inlinelist + + +@functools.lru_cache(None) +def get_mod_inlinelist(): + inlinelist = set() + for m in MOD_INLINELIST: + inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + return inlinelist + + +# skip some standard python builtin libs +SKIP_DIRS = [ + "", + _config_module.__file__, +] +SKIP_DIRS.extend(filter(None, (_module_dir(m) for m in BUILTIN_SKIPLIST))) + +SKIP_DIRS_RE = re.compile(r"match nothing^") + +is_fbcode = importlib.import_module("torch._inductor.config").is_fbcode() +# Skip fbcode paths(including torch.package paths) containing +# one of the following strings. +FBCODE_SKIP_DIRS = { + "torchrec/distributed", + "torchrec/fb/distributed", + "caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py", +} +FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})") + + +def _recompile_re(): + global SKIP_DIRS_RE + SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})") + + +def add(import_name: str): + if isinstance(import_name, types.ModuleType): + return add(import_name.__name__) + assert isinstance(import_name, str) + from importlib.util import find_spec + + module_spec = find_spec(import_name) + if not module_spec: + return + origin = module_spec.origin + if origin is None: + return + global SKIP_DIRS_RE + SKIP_DIRS.append(_strip_init_py(origin)) + _recompile_re() + + +@dataclasses.dataclass +class SkipResult: + skipped: bool + reason: Optional[str] + + +def check_file(filename, is_inlined_call=False): + """Should skip this file?""" + if filename is None: + return SkipResult(True, "filename is None") + if any(filename.startswith(d) for d in get_legacy_mod_inlinelist()): + return SkipResult( + False, + "inlined according trace_rules.LEGACY_MOD_INLINELIST", + ) + if is_inlined_call and is_torch_inline_allowed(filename): + return SkipResult( + False, + "inlined according trace_rules.MOD_INLINELIST", + ) + if is_fbcode and bool(FBCODE_SKIP_DIRS_RE.match(filename)): + return SkipResult( + True, + "skipped according trace_rules.FBCODE_SKIP_DIRS", + ) + if bool(SKIP_DIRS_RE.match(filename)): + return SkipResult(True, "skipped according trace_rules.SKIP_DIRS") + else: + return SkipResult(False, "inlined by default") + + +@dataclasses.dataclass +class FunctionInfo: + py_obj: Optional[object] + name: Optional[str] + filename: str + code: Optional[types.CodeType] + + +""" +This is the main entry point to determine whether an object (function) should be inlined or skipped. +Let's illustrate the logic with an example: + @torch.compile + def f1(x, y): + ...... + f2(x, y) + ...... + + def f2(x, y): + ...... + f3(x, y) + ...... + + def f3(x, y): + ...... + +There are mainly three call sites of check/check_verbose: +* The compile region entrance (like function f1), the correspoinding code is located at eval_frame.py. +* When tracing the recursively called functions (like function f2 and f3). + * Dynamo decides inline/skip everytime it encounters a new recursively function call, and the call site + is in InliningInstructionTranslator.check_inlineable of symbolic_convert.py. + * If f2 is skipped by Dynamo, when evaluating the frame of f3, Dynamo need the inline/skip check again + and the call site is in catch_errors_wrapper.catch_errors of convert_frame.py. +* For global variables and function arguments, Dynamo needs to decide if they are wrapped as SkipFunctionVariable in builder.py. + +`is_inlined_call` is used to indicate if the current function call is inlined (f2 is inlined call if it passes check) +or not (f3 is not inlined call if f2 is skipped). Inside of the `check_verbose` function, there are more rules +to be checked if this `is_inlined_call`. +The reason to have this flag is that if the upper level function call (e.g, f2) is skipped, +we don't want to inline the lower level function call (e.g, f3) by default. +""" + + +def check_verbose(obj, is_inlined_call=False): + if isinstance( + obj, (UserFunctionVariable, UserMethodVariable, NestedUserFunctionVariable) + ): + try: + py_obj = obj.get_function() + except NotImplementedError: + py_obj = None + fi = FunctionInfo(py_obj, obj.get_name(), obj.get_filename(), obj.get_code()) + elif isinstance(obj, types.CodeType): + fi = FunctionInfo(None, obj.co_name, obj.co_filename, obj) + elif isinstance(obj, (types.FunctionType, types.MethodType)): + fi = FunctionInfo( + obj, obj.__name__, getfile(obj), obj.__code__ # type: ignore[union-attr] # FIXME Add MethodType.__code__ to typeshed + ) + else: + fi = FunctionInfo(obj, None, getfile(obj), None) + + # Consulte the central trace rules defined in torch._dynamo.trace_rules. + rule = torch._dynamo.trace_rules.lookup_inner( + fi.py_obj, fi.name, fi.filename, is_inlined_call + ) + if rule in [UserFunctionVariable, FunctorchHigherOrderVariable]: + return SkipResult( + False, + "inlined according trace_rules.lookup", + ) + else: + assert rule == SkipFunctionVariable, rule + return SkipResult( + True, + "skipped according trace_rules.lookup", + ) + + +def check(obj, is_inlined_call=False): + return check_verbose(obj, is_inlined_call).skipped + + +# skip common third party libs +for _name in THIRDPARTY_SKIPLIST: + add(_name) + +_recompile_re() + + +def is_torch_inline_allowed(filename): + return any(filename.startswith(d) for d in get_mod_inlinelist()) + + +@functools.lru_cache(None) +def dynamo_dir(): + import torch._dynamo + + return _module_dir(torch._dynamo) + + +def is_torch(filename): + if filename.startswith(dynamo_dir()): + return False + return filename.startswith(_module_dir(torch)) + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given callable object. +""" + + +def lookup_callable(obj): + if not hashable(obj): + return None + # Custom allow/disallow in graph takes precedence over the general lookup. + if is_callable_disallowed(obj): + return SkipFunctionVariable + if is_callable_allowed(obj): + return TorchInGraphFunctionVariable + if is_builtin_callable(obj): + return BuiltinVariable + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given function object. +E.g, the lookup result of `torch.sin` is `TorchInGraphFunctionVariable`. +""" + + +def lookup(obj): + return lookup_inner(obj) + + +def lookup_inner(obj, name=None, filename=None, is_direct_call=True): + # Step 1: lookup obj's tracing rule in `torch_name_rule_map`. + # The rules defined in `torch_name_rule_map` mainly includes two parts: + # - Manually defined rules for any functions. + # - The list of torch in graph functions. + if not hashable(obj): + return None + if obj is not None: + if is_aten_op_or_tensor_method(obj): + return TorchInGraphFunctionVariable + rule = get_torch_obj_rule_map().get(obj, None) + if rule is not None: + return rule + + # Step 2: lookup obj's tracing rule by function name. + if is_direct_call: + if name == "patched_init": + return SkipFunctionVariable + elif name == "__torch_function__": + return UserFunctionVariable + + # Step 3: lookup obj's tracing rule by filename. + if filename is None: + filename = getfile(obj) + + if check_file(filename, is_direct_call).skipped: + return SkipFunctionVariable + else: + return UserFunctionVariable diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/types.py b/venv/lib/python3.10/site-packages/torch/_dynamo/types.py new file mode 100644 index 0000000000000000000000000000000000000000..7156ec55e70263f6afb3df1be35b30b7a4a917c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/types.py @@ -0,0 +1,99 @@ +import dataclasses +import sys +import types +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Protocol, Union + +from typing_extensions import TypeAlias + + +if sys.version_info >= (3, 11): + from torch._C._dynamo import eval_frame + + DynamoFrameType: TypeAlias = eval_frame._PyInterpreterFrame +else: + DynamoFrameType: TypeAlias = types.FrameType + +import torch + +# This class has a `check_fn` field for the guard, +# and a `code` field for the code object. +CacheEntry = torch._C._dynamo.eval_frame._CacheEntry + +ExtraState = torch._C._dynamo.eval_frame._ExtraState + +# We use a dict to store additional data per frame. +FrameState = Dict[Any, Any] + + +class GuardFail(NamedTuple): + # A string repr of the piece of failed guard code we eval-ed + reason: str + # A code object where we failed a guard + orig_code: types.CodeType + + +class GuardFn(Protocol): + closure_vars: Dict[str, object] + args: List[str] + code_parts: List[str] + verbose_code_parts: List[str] + global_scope: Dict[str, object] + guard_fail_fn: Optional[Callable[[GuardFail], None]] + cache_entry: Optional[CacheEntry] + extra_state: Optional[ExtraState] + + # maps locals of user function to bool + def __call__(self, f_locals: Dict[str, object]) -> bool: + ... + + +@dataclasses.dataclass +class GuardedCode: + code: types.CodeType + check_fn: GuardFn + + +class DynamoCallbackFn(Protocol): + def __call__( + self, + frame: DynamoFrameType, + cache_entry: Optional[CacheEntry], + frame_state: FrameState, + ) -> Optional[GuardedCode]: + ... + + +DynamoCallback = Union[DynamoCallbackFn, None, bool] + + +class DynamoGuardHook(Protocol): + def __call__( + self, + guard_fn: GuardFn, + code: types.CodeType, + f_locals: Dict[str, object], + index: int, + last: bool, + ) -> None: + ... + + +class ProfilerStartHook(Protocol): + def __call__( + self, + name: str, + # TODO(whc) how do I annotate a _RecordFunction here? + ) -> Any: + ... + + +class ProfilerEndHook(Protocol): + def __call__(self, record: Any) -> None: + ... + + +class BytecodeHook(Protocol): + def __call__( + self, code: types.CodeType, new_code: types.CodeType + ) -> Optional[types.CodeType]: + ... diff --git a/venv/lib/python3.10/site-packages/torch/_dynamo/utils.py b/venv/lib/python3.10/site-packages/torch/_dynamo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f97acb33086b92c2b38d2b71e083a3ab24669866 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dynamo/utils.py @@ -0,0 +1,2548 @@ +import atexit +import collections +import contextlib +import copy +import cProfile +import dataclasses +import datetime +import dis +import enum +import functools +import gc +import inspect +import itertools +import linecache +import logging +import math +import operator +import os +import pstats +import re +import subprocess +import sys +import textwrap +import threading +import time +import types +import typing +import weakref +from contextlib import contextmanager +from functools import lru_cache, wraps +from pathlib import Path +from types import MethodWrapperType +from typing import ( + Any, + Callable, + cast, + ClassVar, + Counter, + DefaultDict, + Deque, + Dict, + Iterator, + KeysView, + List, + Optional, + Set, + Tuple, + Type, + Union, + ValuesView, +) + +from ..utils.hooks import RemovableHandle + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +try: + import torch._logging + import torch._numpy as tnp + from torch._guards import detect_fake_mode # noqa: F401n + from torch._logging import LazyString + from . import config + + # NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync. + if np: + NP_SUPPORTED_MODULES: Tuple[types.ModuleType, ...] = ( + np, + np.fft, + np.linalg, + np.random, + ) + + NP_TO_TNP_MODULE = { + np: tnp, + np.fft: tnp.fft, + np.linalg: tnp.linalg, + np.random: tnp.random, + } + else: + NP_SUPPORTED_MODULES = tuple() + + NP_TO_TNP_MODULE = {} + from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode +except ImportError: + pass + +import importlib + +import torch +import torch._functorch.config +import torch.fx.experimental.symbolic_shapes +from torch import fx +from torch._dispatch.python import enable_python_dispatcher +from torch._utils_internal import log_compilation_event + +from torch.nn.modules.lazy import LazyModuleMixin +from torch.utils._pytree import tree_map_only + + +counters: DefaultDict[str, Counter[str]] = collections.defaultdict(collections.Counter) +optimus_scuba_log: Dict[str, Any] = {} +troubleshooting_url = "https://pytorch.org/docs/master/compile/troubleshooting.html" +nnmodule_doc_url = "https://pytorch.org/docs/master/compile/nn-module.html" +nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations." +log = logging.getLogger(__name__) + +# profiling compilation time by function +compilation_time_metrics: Dict[str, List[float]] = {} + +# profiling compilation time by frame phase +frame_phase_timing: Dict[str, Dict[str, float]] = {} + +timer_counter = itertools.count() + + +def tabulate(rows, headers): + try: + import tabulate + + return tabulate.tabulate(rows, headers=headers) + except ImportError: + return "\n".join( + ", ".join(map(str, row)) for row in itertools.chain([headers], rows) + ) + + +def maybe_cprofile(func): + if config.cprofile: + return cprofile_wrapper(func) + return func + + +def cprofile_wrapper(func): + @wraps(func) + def profile_wrapper(*args, **kwargs): + global timer_counter + profile_cnt = next(timer_counter) + profile_path = Path(func.__name__ + f"{profile_cnt}.profile") + prof = cProfile.Profile() + prof.enable() + start_ts = time.time() + retval = prof.runcall(func, *args, **kwargs) + profile_latency = time.time() - start_ts + prof.disable() + print( + f"### Cprofile for {func.__name__} iter {profile_cnt} took {profile_latency:.3f} seconds ###" + ) + ps = pstats.Stats(prof) + prof.dump_stats(profile_path) + svg_path = profile_path.with_suffix(".svg") + try: + gprof2dot_process = subprocess.Popen( + [ + "gprof2dot", + "-f", + "pstats", + "--node-label=total-time-percentage", + "--node-label=self-time-percentage", + "--node-label=total-time", + str(profile_path), + ], + stdout=subprocess.PIPE, + ) + subprocess.check_call( + ["dot", "-Tsvg", "-o", str(svg_path)], + stdin=gprof2dot_process.stdout, + ) + print(f"Generated SVG from profile at {str(svg_path)}") + except FileNotFoundError: + print( + "Failed to generate SVG from profile -- dumping stats instead." + "Try installing gprof2dot and dot for a better visualization" + ) + ps.sort_stats(pstats.SortKey.TIME).print_stats(20) + ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20) + return retval + + return profile_wrapper + + +curr_frame = 0 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def increment_frame(): + global curr_frame + curr_frame = curr_frame + 1 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def reset_frame_count(): + global curr_frame + frame_phase_timing.clear() + compilation_time_metrics.clear() + curr_frame = 0 + + +op_count = 0 + + +def increment_op_count(cnt): + global op_count + op_count += cnt + + +# Print a report of time spent so far +# Ex: +# TIMING: +# entire_frame_compile:8.574629999999999 +# backend_compile:5.26806 +def print_time_report(): + total = 0.0 + total_by_key = {} + for timings in frame_phase_timing.values(): + for key, timing in timings.items(): + total += timing + if key not in total_by_key: + total_by_key[key] = timing + else: + total_by_key[key] += timing + + out = "TIMING:" + for key, value in total_by_key.items(): + out = f"{out} {key}:{round(value, 5)}" + + print(out) + + +# dynamo_timed API works as a function decorator +# By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics +# where the key is the functions name. +# For example: +# +# @dynamo_timed +# def _foo(...): +# +# Would show up as an entry in our timing dict: +# OrderedDict([('bar.._foo', [0.083690, 0.23949, 3.1425e-05])]) +# This is extremely useful for granular debugging. +# +# For a higher-level mode, pass a phase_name into dynamo_timed +# phase_names record an extra record into a separate compilation timing structure, +# one keyed on frame+name rather than function. +# The frame is incremented outside of this function, in def increment_frame() above. + + +def dynamo_timed(original_function=None, phase_name=None): + def dynamo_timed_inner(func): + if config.cprofile: + return func + + @wraps(func) + def time_wrapper(*args, **kwargs): + key = func.__qualname__ + if key not in compilation_time_metrics: + compilation_time_metrics[key] = [] + with torch.profiler.record_function(f"{key} (dynamo_timed)"): + t0 = time.time() + r = func(*args, **kwargs) + time_spent = time.time() - t0 + compilation_time_metrics[key].append(time_spent) + if phase_name: + frame_key = str(curr_frame) + if frame_key not in frame_phase_timing: + frame_phase_timing[frame_key] = {} + if phase_name not in frame_phase_timing[frame_key]: + frame_phase_timing[frame_key][phase_name] = time_spent + else: + frame_phase_timing[frame_key][phase_name] += time_spent + return r + + return time_wrapper + + if original_function: + return dynamo_timed_inner(original_function) + return dynamo_timed_inner + + +def compile_times(repr="str", aggregate=False): + """ + Get metrics about torchdynamo frontend/backend compilation times. + + Accumulates information from functions tagged with `@dynamo_timed`. + + repr='str' returns a printable string for user interaction, and 'csv' + returns headers, rows which can be logged for output + + aggregate causes values from multiple compilations (e.g. split graphs) + to be accumulated into one value. If false, expect more than one value + per metric. + """ + + def fmt_fn(values, item_fn=lambda x: x): + if aggregate: + return item_fn(sum(values)) + return ", ".join(map(item_fn, values)) + + if repr == "str": + rows = [ + (k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}")) + for k in compilation_time_metrics + ] + out = "TorchDynamo compilation metrics:\n" + out += tabulate(rows, headers=("Function", "Runtimes (s)")) + return out + elif repr == "csv": + values = [ + fmt_fn(v, item_fn=lambda x: f"{x:.6f}") + for v in compilation_time_metrics.values() + ] + headers = list(compilation_time_metrics.keys()) + return headers, values + + +@atexit.register +def dump_compile_times(): + log.info(compile_times(repr="str", aggregate=True)) + + +tensortype_to_dtype = { + torch.FloatTensor: (torch.float32, torch.float), + torch.DoubleTensor: (torch.float64, torch.double), + torch.HalfTensor: (torch.float16, torch.half), + torch.BFloat16Tensor: (torch.bfloat16,), + torch.ByteTensor: (torch.uint8,), + torch.CharTensor: (torch.int8,), + torch.LongTensor: (torch.int64, torch.long), + torch.IntTensor: (torch.int32, torch.int), + torch.ShortTensor: (torch.int16, torch.short), + torch.BoolTensor: (torch.bool,), +} + + +class DuplicateWarningChecker: + def __init__(self, maxsize=4096): + self.maxsize = maxsize + self.reset() + + def reset(self): + self.set = collections.OrderedDict() + + def add(self, key): + if key in self.set: + self.set.move_to_end(key, last=True) + if not config.verbose: + return False + else: + self.set[key] = None + while len(self.set) > self.maxsize: + self.set.popitem(last=False) + return True + + +graph_break_dup_warning_checker = DuplicateWarningChecker() + + +def setup_compile_debug(): + compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1" + + if compile_debug: + torch._logging.set_logs( + dynamo=logging.DEBUG, + aot=logging.DEBUG, + inductor=logging.DEBUG, + output_code=True, # this is off by default + ) + return add_file_handler() + + return contextlib.ExitStack() + + +def reset_graph_break_dup_checker(): + graph_break_dup_warning_checker.reset() + + +def add_file_handler(): + log_path = os.path.join(get_debug_dir(), "torchdynamo") + os.makedirs(log_path, exist_ok=True) + + log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log")) + logger = logging.getLogger("torch._dynamo") + logger.addHandler(log_file_handler) + + exitstack = contextlib.ExitStack() + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + +def setup_log_file(): + exitstack = contextlib.ExitStack() + if config.log_file_name is not None: + log_file_handler = logging.FileHandler(config.log_file_name) + for logger in torch._logging._internal.get_loggers(): + logger.addHandler(log_file_handler) + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + return exitstack + + +def gen_record_file_name(exc, code): + return f"{get_debug_dir()}/error_recordings/\ +{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec" + + +def write_record_to_file(filename, exec_record): + try: + if os.path.exists(filename): + log.warning( + "Unable to write execution record %s; file already exists.", filename + ) + else: + os.makedirs(os.path.dirname(filename), exist_ok=True) + with open(filename, "wb") as f: + exec_record.dump(f) + except Exception: + log.exception("Unable to write execution record %s", filename) + + +def count_calls(g: fx.Graph): + c = 0 + for n in g.nodes: + if "call" in n.op: + c += 1 + return c + + +def identity(x): + return x + + +def hashable(x): + try: + hash(x) + return True + except TypeError: + return False + # cannot hash writable memoryview object + except ValueError: + return False + + +def nothing(*args, **kwargs): + pass + + +class ExactWeakKeyDictionary: + """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality""" + + def __init__(self): + self.values = dict() + self.refs = dict() + + def __getitem__(self, key): + return self.values[id(key)] + + def get(self, key, default=None): + return self.values.get(id(key), default) + + def __contains__(self, key): + return id(key) in self.values + + def __setitem__(self, key, value): + idx = id(key) + if idx not in self.refs: + self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx)) + self.values[idx] = value + + def _remove_id(self, idx): + if idx in self.values: + del self.values[idx] + if idx in self.refs: + del self.refs[idx] + + def clear(self): + self.refs.clear() + self.values.clear() + + +def istype(obj, allowed_types): + """isinstance() without subclasses""" + if isinstance(allowed_types, (tuple, list, set)): + return type(obj) in allowed_types + return type(obj) is allowed_types + + +def is_typing(value): + # _Final catches most of typing classes: + # - Any + # - Callable + # - Union + # ... + # + # NB: we intentionally ignore classes that inherit from Generic, since they + # can be used as both TypingVariable as well as UserDefinedClassVariable. + return isinstance(value, typing._Final) or value is typing.Generic # type: ignore[attr-defined] + + +def is_numpy_int_type(value): + if not np: + return False + + return istype( + value, + ( + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ), + ) + + +def is_numpy_float_type(value): + if not np: + return False + + return istype( + value, + ( + np.float16, + np.float32, + np.float64, + ), + ) + + +def is_function_or_wrapper(value): + return ( + is_function(value) + or isinstance(value, functools._lru_cache_wrapper) + and is_function(inspect.getattr_static(value, "__wrapped__")) + or isinstance(value, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)) + ) + + +def is_function(value): + return isinstance( + value, + ( + types.FunctionType, + types.BuiltinFunctionType, + types.MethodDescriptorType, + types.WrapperDescriptorType, + torch.jit.ScriptFunction, + ), + ) + + +def unwrap_if_wrapper(fn): + return unwrap_with_attr_name_if_wrapper(fn)[0] + + +def unwrap_with_attr_name_if_wrapper(fn): + # unpack @functools.lru_cache wrapped function + if isinstance(fn, functools._lru_cache_wrapper): + fn = inspect.getattr_static(fn, "__wrapped__") + attr_name = "__wrapped__" + # unpack @torch._dynamo.optimize()(fn) wrapped function + elif is_function(fn) and inspect.getattr_static(fn, "_torchdynamo_inline", False): + fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) + attr_name = "_torchdynamo_inline" + # unpack torch.jit.script_if_tracing + elif is_function(fn) and inspect.getattr_static( + fn, "__script_if_tracing_wrapper", False + ): + fn = inspect.getattr_static(fn, "__original_fn", fn) + attr_name = "__original_fn" + else: + attr_name = None + return fn, attr_name + + +def is_numpy_ndarray(value): + if not np: + return False + + return istype(value, np.ndarray) + + +def istensor(obj): + """Check of obj is a tensor""" + tensor_list = ( + torch.Tensor, + torch.nn.Parameter, + *config.traceable_tensor_subclasses, + ) + tensor_list = tensor_list + (torch._subclasses.FakeTensor,) + return istype(obj, tensor_list) + + +def is_lazy_module(mod): + return isinstance(mod, LazyModuleMixin) + + +@functools.lru_cache(4096) +def print_once(*args): + print(*args) + + +def make_cell(val=None): + """Some black magic to create a cell object that usually only exists in a closure""" + x = val + + def f(): + return x + + assert f.__closure__ is not None and len(f.__closure__) == 1 + return f.__closure__[0] + + +def proxy_args_kwargs(args, kwargs): + try: + proxy_args = tuple(arg.as_proxy() for arg in args) + proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()} + return proxy_args, proxy_kwargs + except NotImplementedError as e: + from .exc import unimplemented + from .variables.base import typestr + + raise unimplemented( + f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}" + ) from e + + +@dataclasses.dataclass +class CompilationMetrics: + frame_key: str + co_name: str + co_filename: str + co_firstlineno: int + cache_size: int + accumulated_cache_size: int + guard_count: Optional[int] + shape_env_guard_count: Optional[int] + graph_op_count: Optional[int] + graph_node_count: Optional[int] + graph_input_count: Optional[int] + start_time: float + entire_frame_compile_time_s: Optional[float] + backend_compile_time_s: Optional[float] + inductor_compile_time_s: Optional[float] + code_gen_time_s: Optional[float] + fail_type: Optional[str] + fail_reason: Optional[str] + fail_user_frame_filename: Optional[str] + fail_user_frame_lineno: Optional[int] + non_compliant_ops: Set[str] + compliant_custom_ops: Set[str] + + +DEFAULT_COMPILATION_METRICS_LIMIT = 64 + + +_compilation_metrics: Deque[CompilationMetrics] = collections.deque( + maxlen=DEFAULT_COMPILATION_METRICS_LIMIT +) + + +def record_compilation_metrics(compilation_metrics: CompilationMetrics): + global _compilation_metrics + _compilation_metrics.append(compilation_metrics) + if config.log_compilation_metrics: + log_compilation_event(compilation_metrics) + + +def set_compilation_metrics_limit(new_size: int) -> None: + global _compilation_metrics + while len(_compilation_metrics) > new_size: + _compilation_metrics.popleft() + new_deque = collections.deque(_compilation_metrics, maxlen=new_size) + _compilation_metrics = new_deque + + +def clear_compilation_metrics() -> None: + global _compilation_metrics + _compilation_metrics.clear() + + +def get_compilation_metrics() -> List[CompilationMetrics]: + return list(_compilation_metrics) + + +@dataclasses.dataclass +class CleanupHook: + """Remove a global variable when hook is called""" + + scope: Dict[str, Any] + name: str + + def __call__(self, *args): + CleanupManager.count -= 1 + del self.scope[self.name] + + @staticmethod + def create(scope, name, val): + assert name not in scope + CleanupManager.count += 1 + scope[name] = val + return CleanupHook(scope, name) + + +class CleanupManager(ExactWeakKeyDictionary): + count = 0 + instance: ClassVar["CleanupManager"] + + def _remove_id(self, idx): + for hook in self.values[idx]: + hook() + super()._remove_id(idx) + + +CleanupManager.instance = CleanupManager() + + +def clone_tensor(x): + """Clone the tensor and its gradient""" + y = x.clone().requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = x.grad.clone() + return y + + +def clone_input(x, *, dtype=None): + """copy while preserving strides""" + # TODO: this is questionable + if is_fake(x): + # this func fails on fake tensors in __torch_dispatch__ + return x + + def torch_clone(x): + y = torch.clone(x) + if x.is_leaf: + y.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = clone_input(x.grad, dtype=dtype) + if hasattr(x, "_dynamo_dynamic_indices"): + y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return y + + with torch.no_grad(): + if x.device.type == "xla": + # Access data_ptr() for a xla tensor will cause crash + return torch_clone(x) + + needed_size = sum( + (shape - 1) * stride for shape, stride in zip(x.size(), x.stride()) + ) + if x.is_quantized: + result = torch.empty_quantized((needed_size + 32,), x) + else: + result = torch.empty( + needed_size + 32, dtype=dtype or x.dtype, device=x.device + ) + cache_line_offset = ( + (x.data_ptr() - result.data_ptr()) % 32 + ) // x.element_size() + result.as_strided_(x.size(), x.stride(), cache_line_offset) + try: + result.copy_(x.clone()) + if x.is_leaf: + result.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + result.grad = clone_input(x.grad, dtype=dtype) + except RuntimeError: + # RuntimeError: unsupported operation: more than one element of the written-to + # tensor refers to a single memory location. Please clone() the tensor before + # performing the operation. + return torch_clone(x) + if hasattr(x, "_dynamo_dynamic_indices"): + result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return result + + +def clone_inputs(example_inputs): + res: Union[Dict[Any, Any], List[Any]] + if type(example_inputs) is dict: + res = dict(example_inputs) + for key, value in res.items(): + if isinstance(value, tuple): + res[key] = clone_inputs(value) + else: + assert isinstance(value, torch.Tensor), type(value) + res[key] = clone_input(value) + return res + + res = list(example_inputs) + for i in range(len(res)): + if isinstance(res[i], torch.Tensor): + res[i] = clone_input(res[i]) + return res + + +def skip_frame_if_in_functorch_mode(val: torch.Tensor): + try: + val.data_ptr() # will throw for functorch tensors + except RuntimeError as e: + from .exc import SkipFrame + + # This will be GradTrackingTensor/BatchedTensor/etc + functorch_subclass_name = re.sub(r"\(.*", "", repr(val)) + raise SkipFrame( + f"torch.compile cannot be run in context: {functorch_subclass_name}" + ) from e + + +@contextmanager +def preserve_rng_state(): + disable_functorch = torch._C._DisableFuncTorch + disable_current_modes = torch.utils._python_dispatch._disable_current_modes + with disable_current_modes(), disable_functorch(): + rng_state = torch.clone(torch.random.get_rng_state()) + skip_frame_if_in_functorch_mode(rng_state) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + try: + yield + finally: + with torch.utils._python_dispatch._disable_current_modes(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + + +def is_jit_model(model0): + return isinstance( + model0, + ( + torch.jit._trace.TopLevelTracedModule, + torch.jit._script.RecursiveScriptModule, + torch.jit.ScriptFunction, + torch.jit.ScriptModule, + ), + ) + + +def torchscript(model, example_inputs, verbose=False): + if is_jit_model(model): + # already done? + return model + + try: + return torch.jit.trace(model, example_inputs) + except Exception: + try: + return torch.jit.script(model) + except Exception: + if verbose: + log.exception("jit error") + else: + log.error("Both torch.jit.trace and torch.jit.script failed") + return None + + +def getfile(obj): + try: + return inspect.getfile(obj) + except (TypeError, OSError): + return None + + +def is_namedtuple(obj): + """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple""" + return is_namedtuple_cls(type(obj)) + + +def is_namedtuple_cls(cls): + """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple""" + try: + if issubclass(cls, tuple): + bases = getattr(cls, "__bases__", []) or [None] + module = getattr(cls, "__module__", None) + return module == "torch.return_types" or ( + bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields") + ) + except TypeError: + pass + return False + + +@functools.lru_cache(1) +def namedtuple_fields(cls): + """Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple""" + if cls is slice: + return ["start", "stop", "step"] + + assert issubclass(cls, tuple) + if hasattr(cls, "_fields"): + # normal namedtuples + return cls._fields + + @dataclasses.dataclass + class Marker: + index: int + + # frustrating ones e.g. torch.return_types.max + assert cls.__module__ == "torch.return_types" + obj = cls(map(Marker, range(cls.n_fields))) + fields: List[Optional[str]] = [None] * cls.n_fields + for name in dir(obj): + if name[0] != "_" and isinstance(getattr(obj, name), Marker): + fields[getattr(obj, name).index] = name + return fields + + +def checkpoint_params(gm): + with torch.no_grad(): + rng_state = torch.clone(torch.random.get_rng_state()) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + saved_state = [] + for param in itertools.chain(gm.parameters(), gm.buffers()): + saved_state.append((param, param._version, torch.clone(param))) + + def restore(): + with torch.no_grad(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + for param, version, original_value in saved_state: + if param._version != version: + param.copy_(original_value) + + return restore + + +def timed(model, example_inputs, times=1): + if torch.cuda.is_available(): + synchronize = torch.cuda.synchronize + else: + synchronize = nothing + + synchronize() + gc.collect() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(times): + result = model(*example_inputs) + synchronize() + t1 = time.perf_counter() + return result, t1 - t0 # type: ignore[possibly-undefined] + + +def check_is_cuda(gm, example_inputs): + return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True))) + + +@lru_cache(32) +def rot_n_helper(n): + assert n > 1 + vars = [f"v{i}" for i in range(n)] + rotated = reversed(vars[-1:] + vars[:-1]) + fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})") + fn.__name__ = f"rot_{n}_helper" + return fn + + +common_constant_types = { + int, + float, + complex, + bool, + str, + bytes, + type(None), + Ellipsis.__class__, + types.CodeType, + torch.device, + torch.dtype, + torch.memory_format, + torch.layout, +} + + +def is_safe_constant(v): + if istype(v, (tuple, frozenset)): + return all(map(is_safe_constant, v)) + return isinstance(v, (enum.Enum, type)) or istype( + v, + common_constant_types | {slice}, + ) + + +def specialize_symnode(arg): + from .variables import ConstantVariable, SymNodeVariable + + # Guard and specialize + if isinstance(arg, SymNodeVariable): + return ConstantVariable.create(arg.evaluate_expr()) + + return arg + + +def guard_if_dyn(arg): + from .variables import ConstantVariable + + arg = specialize_symnode(arg) + + if isinstance(arg, ConstantVariable): + return arg.as_python_constant() + + return arg + + +def check_constant_args(args, kwargs): + return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values())) + + +def check_unspec_python_args(args, kwargs): + from .variables.constant import ConstantVariable + from .variables.tensor import UnspecializedPythonVariable + + unspec_count = 0 + for x in itertools.chain(args, kwargs.values()): + if isinstance(x, UnspecializedPythonVariable): + unspec_count += 1 + elif not isinstance(x, (UnspecializedPythonVariable, ConstantVariable)): + return False + else: + pass + + return unspec_count > 0 + + +def check_numpy_ndarray_args(args, kwargs): + from .variables.tensor import NumpyNdarrayVariable + + return any( + isinstance(x, NumpyNdarrayVariable) + for x in itertools.chain(args, kwargs.values()) + ) + + +dict_keys: Type[KeysView[Any]] = type(dict().keys()) +dict_values: Type[ValuesView[Any]] = type(dict().values()) +odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values()) +tuple_iterator: Type[Iterator[Any]] = type(iter(tuple())) +tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined] +object_new = object.__new__ + + +def nn_module_new(cls): + obj = object_new(cls) + torch.nn.Module.__init__(obj) + return obj + + +def product(it): + return functools.reduce(operator.mul, it, 1) + + +def tuple_iterator_getitem(it, index): + _, (obj,), start = it.__reduce__() + return obj[start + index] + + +iter_next = next + + +def to_subclass(t, cls): + return t.as_subclass(cls) + + +def dict_keys_getitem(d, n): + return next(itertools.islice(iter(d), n, n + 1)) + + +def enum_repr(value, local): + # enum class can override __str__ method. Use __class__ and name attribute + # to extract the class name and key name. + name = value.__class__.__name__ + val = value.name + scope = "L" if local else "G" + local_name = f'{scope}["{name}"].{val}' + return local_name + + +def _get_fake_tensor(vt): + fake_tensor = vt.as_proxy().node.meta.get("example_value") + if not is_fake(fake_tensor): + from .exc import unimplemented + + unimplemented("Cannot check Tensor object identity without its fake value") + return fake_tensor + + +def iter_contains(items, search, tx, check_tensor_identity=False): + from .variables import ( + BuiltinVariable, + ConstantVariable, + TensorVariable, + VariableTracker, + ) + + if search.is_python_constant(): + found_const = any( + x.is_python_constant() + and x.as_python_constant() == search.as_python_constant() + for x in items + ) + return ConstantVariable.create(found_const) + + must_check_tensor_id = False + if check_tensor_identity and isinstance(search, TensorVariable): + must_check_tensor_id = True + # Match of Tensor means match of FakeTensor + search = _get_fake_tensor(search) + + found: Optional[VariableTracker] = None + for x in items: + if must_check_tensor_id: + if isinstance(x, TensorVariable): + if search is _get_fake_tensor(x): # Object equivalence + return ConstantVariable.create(True) + else: + check = BuiltinVariable(operator.eq).call_function(tx, [x, search], {}) + if found is None: + found = check + else: + found = BuiltinVariable(operator.or_).call_function( + tx, [check, found], {} + ) + if found is None: + found = ConstantVariable.create(False) + return found + + +def key_is_id(k): + """Returns whether it indexes dictionaries using its id""" + return isinstance(k, (torch.Tensor, torch.nn.Module, MethodWrapperType)) + + +def key_to_id(value): + return [id(k) if key_is_id(k) else k for k in value.keys()] + + +def const_repr(x, *, local) -> str: + from .trace_rules import is_builtin_callable + + if isinstance(x, (list, tuple)): + elems_repr = ",".join(const_repr(s, local=local) for s in x) + if isinstance(x, list): + return f"[{elems_repr}]" + else: + assert isinstance(x, tuple) + if len(x) == 1: + return f"({elems_repr},)" + else: + return f"({elems_repr})" + elif isinstance(x, enum.Enum): + # To workaround repr(Enum) returning invalid global reference before python 3.11 + # by calling enum_repr and removing quotes to render enum in guard code. + return enum_repr(x, local=local).replace("'", "") + elif is_builtin_callable(x): + return x.__name__ + elif isinstance(x, type): + + def fullname(o): + klass = o.__class__ + module = klass.__module__ + if module == "builtins": + return klass.__qualname__ # avoid outputs like 'builtins.str' + return module + "." + klass.__qualname__ + + return fullname(x) + else: + return f"{x!r}" + + +def dict_keys_repr(const_keys, *, local) -> str: + keys_str = ",".join(const_repr(s, local=local) for s in const_keys) + return "[" + keys_str + "]" + + +GLOBAL_KEY_PREFIX = "__dict_key" + + +from torch._subclasses import UnsupportedFakeTensorException # noqa: F401 + + +def wrap_fake_exception(fn): + try: + return fn() + except UnsupportedFakeTensorException as e: + from .exc import unimplemented + + msg = f"Unsupported: {e.reason} with fake tensor propagation." + log.warning(msg) + raise unimplemented(msg) from e + + +def deepcopy_to_fake_tensor(obj, fake_mode): + with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode): + return wrap_fake_exception(lambda: copy.deepcopy(obj)) + + +def rmse(ref, res): + """ + Calculate root mean squared error + """ + return torch.sqrt(torch.mean(torch.square(ref - res))) + + +def same( + ref, + res, + fp64_ref=None, + cos_similarity=False, + tol=1e-4, + equal_nan=False, + exact_dtype=True, + relax_numpy_equality=False, + ignore_non_fp=False, + log_error=log.error, +): + """Check correctness to see if ref and res match""" + if fp64_ref is None: + fp64_ref = ref + if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)): + assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}" + if len(ref) != len(res): + log_error("Length mismatch") + return False + return len(ref) == len(res) and all( + same( + ai, + bi, + fp64_refi, + cos_similarity, + tol, + equal_nan, + exact_dtype, + relax_numpy_equality, + ignore_non_fp, + log_error=log_error, + ) + for ai, bi, fp64_refi in zip(ref, res, fp64_ref) + ) + elif isinstance(ref, dict): + assert isinstance(res, dict) + assert set(ref.keys()) == set( + res.keys() + ), f"keys mismatch {set(ref.keys())} == {set(res.keys())}" + for k in sorted(ref.keys()): + if not ( + same( + ref[k], + res[k], + fp64_ref[k], + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + ): + log_error("Accuracy failed for key name %s", k) + return False + return True + elif isinstance(ref, (torch.Tensor, float)): + assert not isinstance(ref, torch._subclasses.FakeTensor) + assert not isinstance(res, torch._subclasses.FakeTensor) + + def to_tensor(t): + return t if isinstance(t, torch.Tensor) else torch.tensor(t) + + ref, res, fp64_ref = (to_tensor(val) for val in (ref, res, fp64_ref)) + + if ref.is_sparse: + assert res.is_sparse + ref = ref.to_dense() + res = res.to_dense() + assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}" + if exact_dtype: + if ref.dtype != res.dtype: + log_error("dtype mismatch %s, %s", ref.dtype, res.dtype) + return False + if ref.dtype == torch.bool: + if ignore_non_fp: + return True + # triton stores bool as int8, so add this for more accurate checking + r = torch.allclose( + ref.to(dtype=torch.uint8), + res.to(dtype=torch.uint8), + atol=tol, + rtol=tol, + equal_nan=equal_nan, + ) + if not r: + log_error("Accuracy failed: uint8 tensor did not match") + return r + + if cos_similarity: + ref = ref.flatten().to(torch.float32) + res = res.flatten().to(torch.float32) + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True): + # early exit that handles zero/nan better + # cosine_similarity(zeros(10), zeros(10), dim=0) is 0 + return True + score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6) + if score < 0.99: + log.warning("Similarity score=%s", score.cpu().detach().item()) + return score >= 0.99 + else: + if not exact_dtype: + ref = ref.to(res.dtype) + + # First try usual allclose + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan): + return True + + # Check error from fp64 version + if fp64_ref.dtype == torch.float64: + ref_error = rmse(fp64_ref, ref).item() + # ref unable to produce this with stable numerics in this precision, ignore + if math.isnan(ref_error): + log.warning( + "Found nan in reference. Consider running in higher precision." + ) + + res_error = rmse(fp64_ref, res).item() + + # In the case of using AMP (Automatic Mixed Precision), certain models have + # failed the benchmark's correctness check. However, the end-to-end model's + # accuracy when comparing AMP with FP32 is within a difference of less than 0.1%. + # Thus, it's possible that the correctness check failures for these models are + # false alarms. We use multiplier of 3 instead of 2 to avoid these false alarms. + multiplier = 3.0 if res.dtype == torch.bfloat16 else 2.0 + + if ( + fp64_ref.numel() < 1000 + or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1) + # large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE + or tol >= 2 * 1e-2 + ): + # In the presence of noise, noise might dominate our error + # metric for smaller tensors. + # Similary, for 1x1 kernels, there seems to be high noise with amp. + multiplier = 3.0 + + passes_test = res_error <= (multiplier * ref_error + tol / 10.0) + if not passes_test: + log_error( + "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s", + res_error, + ref_error, + res.size(), + ) + # import pdb; pdb.set_trace() + return passes_test + + if ignore_non_fp: + return True + + log_error("Accuracy failed: allclose not within tol=%s", tol) + return False + elif isinstance(ref, (str, int, type(None), bool, torch.device)): + if ignore_non_fp: + return True + r = ref == res + if not r: + log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res) + return r + elif is_numpy_int_type(ref) or is_numpy_float_type(ref): + if relax_numpy_equality and not ( + is_numpy_int_type(res) or is_numpy_float_type(res) + ): + ref = ref.item() + r = (type(ref) is type(res)) and (ref == res) + if not r: + log_error("Accuracy failed (numpy): %s != %s", ref, res) + return r + elif is_numpy_ndarray(ref): + return (type(ref) is type(res)) and same( + torch.as_tensor(ref), + torch.as_tensor(res), + fp64_ref, + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + elif type(ref).__name__ in ( + "MaskedLMOutput", + "Seq2SeqLMOutput", + "CausalLMOutputWithCrossAttentions", + "LongformerMaskedLMOutput", + "Instances", + "SquashedNormal", + "Boxes", + "Normal", + "TanhTransform", + "Foo", + "Variable", + ): + assert type(ref) is type(res) + return all( + same( + getattr(ref, key), + getattr(res, key), + getattr(fp64_ref, key), + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + for key in ref.__dict__.keys() + ) + else: + raise RuntimeError(f"unsupported type: {type(ref).__name__}") + + +def format_func_info(code): + short_filename = code.co_filename.split("/")[-1] + return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})" + + +@contextlib.contextmanager +def disable_cache_limit(): + prior = config.cache_size_limit + config.cache_size_limit = sys.maxsize + prior_acc_limit = config.accumulated_cache_size_limit + config.accumulated_cache_size_limit = sys.maxsize + + try: + yield + finally: + config.cache_size_limit = prior + config.accumulated_cache_size_limit = prior_acc_limit + + +# map from transformed code back to original user code +orig_code_map = ExactWeakKeyDictionary() + +# keep a record of code_obj -> list of guard failure reasons for logging +guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list) + +# Keep a record of graph break reasons for logging +graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = list() + +# keep record of compiled code, if we are in "error if recompile" +# to track code that dynamo has compiled previously +seen_code_map = ExactWeakKeyDictionary() + + +class CompileProfiler: + """Utility for profiling how and what dynamo would compile. + + Can be used for + * diagnosing recompilation issues + * determining an appropriate compile cache limit + * (TODO)confirming which functions got compiled/skipped + """ + + def __init__(self): + self.frame_count = 0 + self.op_count = 0 + self.backend_ctx_ctor = disable_cache_limit + + def __call__(self, gm: torch.fx.GraphModule, example_inputs): + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + return gm.forward + + # no-op __enter__ and __exit__ to preserve BC + def __enter__(self): + return self + + def __exit__(self, typ, val, traceback): + pass + + def get_metrics(self): + return {"guard_failures": guard_failures} + + def report(self): + metrics = self.get_metrics() + gf = metrics["guard_failures"] + + def num_recompiles(code): + return len(gf[code]) + + def recompile_reasons(code): + return "\n".join([str(x) for x in gf[code]]) + + summarized_gf = [ + [format_func_info(code), num_recompiles(code), recompile_reasons(code)] + for code in gf + ] + + def graph_break_report(): + if "graph_break" in counters: + graph_breaks = counters["graph_break"] + return tabulate( + [[msg, graph_breaks[msg]] for msg in graph_breaks], + headers=["Graph Break Reason", "Count"], + ) + + def recompilation_report(): + if len(gf): + max_recompiles = max([num_recompiles(code) for code in gf]) + recomp_table = tabulate( + summarized_gf, + headers=["Function", "Recompiles", "Recompile Reasons"], + ) + return recomp_table + textwrap.dedent( + f""" + + Set torch._dynamo.config.cache_size_limit to {max_recompiles} to avoid being cache limited. + """ + ) + + report = textwrap.dedent( + """ + Torchdynamo Profiler Report + =========================== + + Graph Breaks + ------------ + Graph breaks happen when torchdynamo encounters code it can't safely trace. + If you want to find out why breaks are happening, check below for each break reason + You may gain additional insight by passing `fullgraph=True` to torch.compile, + to stop at the first break. + + """ + ) + report += graph_break_report() or "No graph breaks detected." + report += textwrap.dedent( + """ + + Recompilation + ------------- + These subgraphs were recompiled more than once due to guard failures + Guard failures indicate some condition assumed to be static by the tracer changed, + making it unsafe to reuse the compiled program. + + """ + ) + report += recompilation_report() or "No recompilation detected.\n" + return report + + +# return same dir unless user changes config between calls +@functools.lru_cache(None) +def _get_debug_dir(root_dir): + dir_name = ( + "run_" + + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f") + # use pid to avoid conflicts among ranks + + "-pid_" + + str(os.getpid()) + ) + return os.path.join(root_dir, dir_name) + + +def get_debug_dir(): + debug_root = config.debug_dir_root + return _get_debug_dir(debug_root) + + +def extract_fake_example_value(node, required=True): + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + elif required: + from torch._dynamo.exc import unimplemented + + unimplemented("`FakeTensor` example value was required but not available") + else: + return None + + +def ensure_graph_fake(e, tx): + assert maybe_get_fake_mode(e) is tx.fake_mode + return e + + +def get_fake_values_from_nodes(tx, nodes, allow_non_graph_fake): + def visit(n: torch.fx.Node): + if n.op == "call_function" and "example_value" not in n.meta: + # fake tensor validity is checked inside get_fake_value using + # ensure_graph_fake + return get_fake_value(n, tx, allow_non_graph_fake) + + out = n.meta["example_value"] + if not allow_non_graph_fake and isinstance(out, torch.Tensor): + return ensure_graph_fake(out, tx) + return out + + return torch.fx.node.map_arg(nodes, visit) + + +def get_fake_value(node, tx, allow_non_graph_fake=False): + """ + Run the computation represented by `node` using fake tensors and return the result. + + allow_non_graph_fake: whether to allow the return result to be: + 1. non-fake or 2. fake that is not created by this instance of Dynamo. + If `True`, you must be prepared to deal with such return values, ideally + by further wrapping them as this graph's fakes. + """ + from torch.utils._sympy.value_ranges import ValueRangeError + from .exc import ( + TorchRuntimeError, + unimplemented, + Unsupported, + UserError, + UserErrorType, + ) + + op = node.op + + # FX Node should always return the same fake value + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + + args, kwargs = get_fake_values_from_nodes( + tx, (node.args, node.kwargs), allow_non_graph_fake + ) + + nnmodule = None + if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module): + # If the first argument is nn.Module, should copy to fake mode. + args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:]) + + if op == "call_module": + nnmodule = tx.output.nn_modules[node.target] + + if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"): + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it. + # Afterwards, lazy module deletes its pre-hooks + # to avoid treating it as lazy on subsequent recompile. + nnmodule._infer_parameters(nnmodule, args) + + # no matter it's lazy module or not, we should copy to fake mode. + nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode) + + try: + with tx.fake_mode, enable_python_dispatcher(): + ret_val = wrap_fake_exception( + lambda: run_node(tx.output, node, args, kwargs, nnmodule) + ) + except Unsupported: + raise + except RuntimeError as e: + cause: BaseException = e + if e.__cause__ is not None: + cause = e.__cause__ + + if isinstance( + cause, torch._subclasses.fake_tensor.DataDependentOutputException + ): + unimplemented( + f"data dependent operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_scalar_outputs = True" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.DynamicOutputShapeException + ): + unimplemented( + f"dynamic shape operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_dynamic_output_shape_ops = True" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.UnsupportedOperatorException + ): + op = cause.func + import_suggestion = "" + if isinstance(op, torch._ops.OpOverload): + maybe_pystub = torch._C._dispatch_pystub( + op._schema.name, op._schema.overload_name + ) + if maybe_pystub is not None: + module, ctx = maybe_pystub + import_suggestion = ( + f"It's possible that the support was implemented in " + f"module `{module}` and you may need to `import {module}`" + f"({ctx}), otherwise " + ) + unimplemented( + f"unsupported operator: {cause.func} ({import_suggestion}see " + "https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0" + " for how to fix)" + ) + elif isinstance( + cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode + ): + raise UserError( # noqa: TRY200 + UserErrorType.CONSTRAINT_VIOLATION, + "Tried to use data-dependent value in the subsequent computation. " + "This can happen when we encounter unbounded dynamic value that is unknown during tracing time. " + "You will need to explicitly give hint to the compiler. Please take a look at " + f"constrain_as_value OR constrain_as_size APIs. {cause}", + case_name="constrain_as_size_example", + ) + elif isinstance(cause, ValueRangeError): + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + + if not allow_non_graph_fake: + _ = tree_map_only( + torch.Tensor, functools.partial(ensure_graph_fake, tx=tx), ret_val + ) + return ret_val + + +_current_node = threading.local() + + +def get_current_node(): + return getattr(_current_node, "value", None) + + +@contextmanager +def set_current_node(node): + old = get_current_node() + _current_node.value = node + try: + yield + finally: + _current_node.value = old + + +def run_node(tracer, node, args, kwargs, nnmodule): + """ + Runs a given node, with the given args and kwargs. + + Behavior is dictated by a node's op. + + run_node is useful for extracting real values out of nodes. + See get_real_value for more info on common usage. + + Note: The tracer arg is only used for 'get_attr' ops + Note: The nnmodule arg is only used for 'call_module' ops + + Nodes that are not call_function, call_method, call_module, or get_attr will + raise an AssertionError. + """ + op = node.op + + with set_current_node(node): + + def make_error_message(e): + return f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n" + str(e) + + try: + if op == "call_function": + return node.target(*args, **kwargs) + elif op == "call_method": + return getattr(args[0], node.target)(*args[1:], **kwargs) + elif op == "call_module": + assert nnmodule is not None + return nnmodule(*args, **kwargs) + elif op == "get_attr": + return tracer.get_submodule(node.target) + elif op == "placeholder": + assert "example_value" in node.meta + return node.meta["example_value"] + + except (NotImplementedError, UnsupportedFakeTensorException) as e: + # NB: mimic how wrap_fake_exception does it + from .exc import unimplemented + + raise unimplemented(make_error_message(e)) from e + except Exception as e: + raise RuntimeError(make_error_message(e)).with_traceback( + e.__traceback__ + ) from e + + raise AssertionError(op) + + +def get_real_value(node, tracer): + """ + Run the actual computation represented by `node` and return the result. + This will execute any dependent nodes in the graph as well. + """ + from .exc import TorchRuntimeError + + cache = tracer.real_value_cache + if node in cache: + return cache[node] + + op = node.op + args, kwargs = torch.fx.node.map_arg( + (node.args, node.kwargs), + lambda n: get_real_value(n, tracer), + ) + + if op == "call_module": + nn_module = tracer.output_graph.nn_modules[node.target] + if not is_lazy_module(nn_module): + nn_module = copy.deepcopy(nn_module) + else: + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it + nn_module(*args, **kwargs) + else: + nn_module = None + + try: + real_value = run_node(tracer, node, args, kwargs, nn_module) + cache[node] = real_value + except RuntimeError as e: + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + return real_value + + +def assert_no_fake_params_or_buffers(gm): + from torch._subclasses.fake_tensor import FakeTensorConfig + + def stack_or_hint(t): + if FakeTensorConfig.debug: + import traceback + + return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}" + else: + return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors." + + for name, buffer in gm.named_buffers(): + assert not isinstance( + buffer, torch._subclasses.FakeTensor + ), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}" + for name, param in gm.named_parameters(): + assert not isinstance( + param, torch._subclasses.FakeTensor + ), f"Unexpected fake param {name} {stack_or_hint(param)}" + + +def fqn(obj: Any): + """ + Returns the fully qualified name of the object. + """ + return f"{obj.__module__}.{obj.__qualname__}" + + +def ifdynstaticdefault(count1, count2): + if torch._dynamo.config.assume_static_by_default: + return count1 + else: + return count2 + + +def import_submodule(mod: types.ModuleType): + """ + Ensure all the files in a given submodule are imported + """ + for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))): + if filename.endswith(".py") and filename[0] != "_": + importlib.import_module(f"{mod.__name__}.{filename[:-3]}") + + +def object_has_getattribute(value: Any): + try: + if isinstance( + inspect.getattr_static(type(value), "__getattribute__"), + types.FunctionType, + ): + return True + except AttributeError: + pass + return False + + +def get_custom_getattr(value: Any): + try: + getattr_fn = inspect.getattr_static(type(value), "__getattr__") + except AttributeError: + getattr_fn = None + if getattr_fn is torch.nn.Module.__getattr__: + # ignore this case of getattr + getattr_fn = None + return getattr_fn + + +class TensorStaticReason(enum.Enum): + PARAMETER = 2 + NOT_TENSOR = 4 + NN_MODULE_PROPERTY = 5 + + +def tensor_static_reason_to_message(reason: TensorStaticReason): + if reason == TensorStaticReason.PARAMETER: + return "mark_dynamic on parameter, parameters are always static today." + if reason == TensorStaticReason.NOT_TENSOR: + return "mark_dynamic on a non tensor, how did this happen?" + if reason == TensorStaticReason.NN_MODULE_PROPERTY: + return "tensor is static because it is nn module associated." + raise AssertionError(f"Illegal reason {reason}") + + +def tensor_always_has_static_shape( + tensor: Union[torch.Tensor, Any], + is_tensor: bool, + guard_source: "torch._guards.GuardSource", +) -> Tuple[bool, Optional[TensorStaticReason]]: + """ + Given a tensor, source, and is_tensor flag, determine if a shape should be static. + + Args: + tensor - the real tensor to evaluate, parameters force a static shape. + is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable, + tensors not in a TensorVariable for whatever reason are forced static. + + Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape. + The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed. + """ + if guard_source.is_nn_module() and config.force_nn_module_property_static_shapes: + return True, TensorStaticReason.NN_MODULE_PROPERTY + if type(tensor) is torch.nn.Parameter and config.force_parameter_static_shapes: + return True, TensorStaticReason.PARAMETER + if not is_tensor: + return True, TensorStaticReason.NOT_TENSOR + return False, None + + +def lazy_format_graph_code(name, gm, maybe_id=None): + def format_name(): + if maybe_id is not None: + return f"{name} {maybe_id}" + else: + return name + + return LazyString( + lambda: _format_graph_code( + f"===== {format_name()} =====\n", + gm.forward.__code__.co_filename, + gm.print_readable(print_output=False), + ) + ) + + +def _format_graph_code(name, filename, graph_str): + return f"TRACED GRAPH\n {name} {filename} {graph_str}\n" + + +def lazy_format_graph_tabular(fn_name, gm): + def inner(): + try: + from tabulate import tabulate # TODO: Check that this is installed + except ImportError: + return ( + "Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n" + + str(lazy_format_graph_code(fn_name, gm)) + ) + + node_specs = [ + [n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes + ] + graph_str = tabulate( + node_specs, headers=["opcode", "name", "target", "args", "kwargs"] + ) + return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str) + + return LazyString(inner) + + +def format_bytecode(prefix, name, filename, line_no, code): + return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n" + + +forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"] +backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"] +state_dict_hook_names = [ + "_state_dict_pre_hooks", + "_state_dict_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", +] +all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names + + +def nn_module_get_all_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + reset_code = torch._C._dynamo.eval_frame.reset_code + """ + Sometimes its useful to differentiate between types of hooks such as forward/backward/pre + hooks executed during module.__call__, and state_dict hooks which are executed separately. + """ + hook_dicts_to_check = [] + check_all_hooks = ( + not check_forward_hooks + and not check_backward_hooks + and not check_state_dict_hooks + ) + if check_forward_hooks or check_all_hooks: + hook_dicts_to_check.extend(forward_hook_names) + if check_backward_hooks or check_all_hooks: + hook_dicts_to_check.extend(backward_hook_names) + if check_state_dict_hooks: + hook_dicts_to_check.extend(state_dict_hook_names) + + all_hooks = [] + for hook_dict_name in hook_dicts_to_check: + hooks = getattr(mod, hook_dict_name, []) + for hook_name in hooks: + hook = hooks[hook_name] + + all_hooks.append(hook) + return all_hooks + + +def nnmodule_has_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + """ + Helper function to check if a module has any hooks attached to it. + """ + hooks = nn_module_get_all_hooks( + mod, + check_forward_hooks=check_forward_hooks, + check_backward_hooks=check_backward_hooks, + check_state_dict_hooks=check_state_dict_hooks, + ) + return bool(hooks) + + +def to_numpy_helper(value): + """Convert tensor and tnp.ndarray to numpy.ndarray.""" + if is_fake(value): + return value + if isinstance(value, tnp.ndarray): + return to_numpy_helper(value.tensor) + elif isinstance(value, torch.Tensor): + return value.numpy(force=True) + elif isinstance(value, (tuple, list)): + return type(value)(to_numpy_helper(obj) for obj in value) + else: + return value + + +def numpy_to_tensor(value): + """Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert.""" + assert np is not None + if isinstance(value, np.ndarray): + return torch.as_tensor(value) + if isinstance(value, tnp.ndarray): + return value.tensor + elif isinstance(value, (tuple, list)): + return type(value)(numpy_to_tensor(obj) for obj in value) + else: + return value + + +class numpy_to_tensor_wrapper: + def __init__(self, f): + self.f = f + self.__name__ = "wrapped_" + self.f.__name__ + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + out = self.f(*args, **kwargs) + return numpy_to_tensor(out) + + +def numpy_attr_wrapper(obj, name): + if isinstance(obj, tnp.ndarray): + out = getattr(obj, name) + return numpy_to_tensor(out) + elif isinstance(obj, torch.Tensor): + out = getattr(tnp.ndarray(obj), name) + return numpy_to_tensor(out) + + +class numpy_method_wrapper: + """Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor.""" + + def __init__(self, method: str): + self.method = method + self.__name__ = "wrapped_" + self.method + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + obj = args[0] + if isinstance(obj, torch.Tensor): + obj = tnp.ndarray(obj) + method_callable = getattr(obj, self.method) + out = method_callable(*args[1:], **kwargs) + return numpy_to_tensor(out) + + +class numpy_operator_wrapper: + """Implements dunder methods for tnp.ndarray via functions from the operator library""" + + def __init__(self, op: Callable[..., Any]): + self.op = op + self.__name__ = f"wrapped_{op.__name__}" + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + assert not kwargs + + args = ( + tnp.ndarray(arg) if isinstance(arg, torch.Tensor) else arg for arg in args + ) + out = self.op(*args) + return numpy_to_tensor(out) + + +def defake(x): + if not isinstance(x, FakeTensor): + return x + size: "torch._prims_common.ShapeType" + stride: "torch._prims_common.StrideType" + if x._has_symbolic_sizes_strides: + size = [] + for s in x.size(): + if isinstance(s, torch.SymInt): + size.append(s.node.shape_env.size_hint(s.node.expr)) + else: + size.append(s) + stride = [] + for s in x.stride(): + if isinstance(s, torch.SymInt): + stride.append(s.node.shape_env.size_hint(s.node.expr)) + else: + stride.append(s) + else: + size = x.size() + stride = x.stride() + y = torch.empty_strided( + size, + stride, + dtype=x.dtype, + device=x.device, + requires_grad=x.requires_grad, + ) + y.zero_() + return y + + +def is_utils_checkpoint(obj): + # Lazy import to avoid circular dependencies + import torch.utils.checkpoint + + return obj is torch.utils.checkpoint.checkpoint + + +def build_checkpoint_variable(**options): + import torch._higher_order_ops.wrap as higher_order_ops + from .variables.higher_order_ops import TorchHigherOrderOperatorVariable + + # TODO - This is a temporary situation where we have two versions of + # checkpointing implementation. We will converge on one and remove the other. + activation_checkpoint_op: "torch._ops.HigherOrderOperator" = ( + higher_order_ops.tag_activation_checkpoint + ) + if torch._functorch.config.functionalize_rng_ops: + activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint + + return TorchHigherOrderOperatorVariable.make( + activation_checkpoint_op, + **options, + ) + + +def is_compile_supported(device_type): + from .eval_frame import is_dynamo_supported + + compile_supported = is_dynamo_supported() + if device_type == "cpu": + pass + elif device_type == "cuda" and compile_supported: + from torch.utils._triton import has_triton + + compile_supported = has_triton() + else: + compile_supported = False + return compile_supported + + +# The following 3.11 source code functions are adapted from +# https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py +# in order to output source code corresponding to bytecode in 3.11+. +# We need our own versions since we want to support multiline expressions. +def _fix_offset(str: str, offset: int) -> int: + """ + Convert byte offset `offset` of `str` into character offset. + Byte offset is used for 3.11+ instruction column data. + Takes things like unicode characters into consideration. + + Unchanged from CPython implementation. + """ + as_utf8 = str.encode("utf-8") + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +@dataclasses.dataclass +class _Anchors: + # inclusive + left_end_lineno: int + left_end_offset: int + right_start_lineno: int + # exclusive + right_start_offset: int + + +def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]: + """ + Given source code `segment` corresponding to a bytecode + instruction, determine: + - for binary ops, the location of the binary op + - for indexing, the location of the brackets. + `segment` is expected to be a valid Python expression + """ + assert sys.version_info >= (3, 11) + + import ast + + try: + # Without brackets, `segment` is parsed as a statement. + # We expect an expression, so wrap `segment` in + # brackets to handle multi-line expressions. + tree = ast.parse("(\n" + segment + "\n)") + except SyntaxError: + return None + + if len(tree.body) != 1: + return None + + lines = segment.split("\n") + + # get character index given byte offset + def normalize(lineno, offset): + return _fix_offset(lines[lineno], offset) + + # Gets the next valid character index in `lines`, if + # the current location is not valid. Handles empty lines. + def next_valid_char(lineno, col): + while lineno < len(lines) and col >= len(lines[lineno]): + col = 0 + lineno += 1 + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character index in `lines`. + def increment(lineno, col): + col += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character at least on the next line + def nextline(lineno, col): + col = 0 + lineno += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + statement = tree.body[0] + if isinstance(statement, ast.Expr): + expr = statement.value + if isinstance(expr, ast.BinOp): + # ast gives locations for BinOp subexpressions, e.g. + # ( left_expr ) + ( right_expr ) + # left^^^^^ right^^^^^ + # -2 since end_lineno is 1-indexed and because we added an extra + # bracket to `segment` when calling ast.parse + cur_lineno = cast(int, expr.left.end_lineno) - 2 + cur_col = normalize(cur_lineno, expr.left.end_col_offset) + cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col) + + # Heuristic to find the operator character. + # The original CPython implementation did not look for ), \, or #, + # leading to incorrect anchor location, e.g. + # (x) + (y) + # ~~^~~~~~~ + while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#": + if ch in "\\#": + cur_lineno, cur_col = nextline(cur_lineno, cur_col) + else: + cur_lineno, cur_col = increment(cur_lineno, cur_col) + + # binary op is 1 or 2 characters long, on the same line + right_col = cur_col + 1 + if ( + right_col < len(lines[cur_lineno]) + and not (ch := lines[cur_lineno][right_col]).isspace() + and ch not in "\\#" + ): + right_col += 1 + # right_col can be invalid since it is exclusive + + return _Anchors(cur_lineno, cur_col, cur_lineno, right_col) + elif isinstance(expr, ast.Subscript): + # ast gives locations for value and slice subexpressions, e.g. + # ( value_expr ) [ slice_expr ] + # value^^^^^ slice^^^^^ + # subscript^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '[' after value) + left_lineno = cast(int, expr.value.end_lineno) - 2 + left_col = normalize(left_lineno, expr.value.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "[": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + elif isinstance(expr, ast.Call): + # ( func_expr ) (args, kwargs) + # func^^^^^ + # call^^^^^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '(' after func) + left_lineno = cast(int, expr.func.end_lineno) - 2 + left_col = normalize(left_lineno, expr.func.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "(": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + + return None + + +def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str: + """ + Python 3.11+ only. Returns lines of source code (from code object `code`) + corresponding to `inst`'s location data, and underlines relevant code to `inst`. + + Example: CALL on `g`: + f(g( + ^^ + h(x))) + ^^^^^ + + We need our own implementation since `format_frame_summary` in + Python's `traceback` module doesn't handle multi-line expressions + (and their anchor extraction code is not completely correct). + """ + assert inst.positions is not None + if inst.positions.lineno is None: + return "" + # The rstrip + "\n" pattern is used throughout this function to handle + # linecache.getline errors. Error lines are treated as empty strings "", but we want + # to treat them as blank lines "\n". + first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip() + if inst.positions.end_lineno is None: + return first_line + if inst.positions.col_offset is None or inst.positions.end_col_offset is None: + return first_line + + # character index of the start of the instruction + start_offset = _fix_offset(first_line, inst.positions.col_offset) + # character index of the end of the instruction + # compute later since end may be a different line + end_offset = None + # expression corresponding to the instruction so we can get anchors + segment = "" + # underline markers to be printed - start with `~` marker and replace with `^` later + markers = [] + + # Compute segment and initial markers + if inst.positions.end_lineno == inst.positions.lineno: + end_offset = _fix_offset(first_line, inst.positions.end_col_offset) + segment = first_line[start_offset:end_offset] + markers.append(" " * start_offset + "~" * (end_offset - start_offset)) + else: + segment = first_line[start_offset:] + "\n" + markers.append(" " * start_offset + "~" * (len(first_line) - start_offset)) + last_line = linecache.getline( + code.co_filename, inst.positions.end_lineno + ).rstrip() + end_offset = _fix_offset(last_line, inst.positions.end_col_offset) + for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno): + line = linecache.getline(code.co_filename, lineno).rstrip() + segment += line + "\n" + # don't underline leading spaces + num_spaces = len(line) - len(line.lstrip()) + markers.append(" " * num_spaces + "~" * (len(line) - num_spaces)) + segment += last_line[:end_offset] + num_spaces = len(last_line) - len(last_line.lstrip()) + markers.append(" " * num_spaces + "~" * (end_offset - num_spaces)) + + anchors: Optional[_Anchors] = None + try: + anchors = _extract_anchors_from_expr(segment) + except AssertionError: + pass + + # replace `~` markers with `^` where necessary + if anchors is None: + markers = [marker.replace("~", "^") for marker in markers] + else: + # make markers mutable + mutable_markers: List[List[str]] = [list(marker) for marker in markers] + + # anchor positions do not take start_offset into account + if anchors.left_end_lineno == 0: + anchors.left_end_offset += start_offset + if anchors.right_start_lineno == 0: + anchors.right_start_offset += start_offset + + # Turn `~`` markers between anchors to `^` + for lineno in range(len(markers)): + for col in range(len(mutable_markers[lineno])): + if lineno < anchors.left_end_lineno: + continue + if lineno == anchors.left_end_lineno and col < anchors.left_end_offset: + continue + if ( + lineno == anchors.right_start_lineno + and col >= anchors.right_start_offset + ): + continue + if lineno > anchors.right_start_lineno: + continue + if mutable_markers[lineno][col] == "~": + mutable_markers[lineno][col] = "^" + + # make markers into strings again + markers = ["".join(marker) for marker in mutable_markers] + + result = "" + for i in range(len(markers)): + result += ( + linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip() + + "\n" + ) + result += markers[i] + "\n" + return result + + +def get_static_address_type(t): + if isinstance(t, torch.Tensor): + return getattr(t, "_dynamo_static_input_type", None) + + return None + + +def is_rng_state_getter_or_setter(value): + getters = ( + # The following two functions are not identical, so don't remove anyone! + torch._C.Generator.get_state, + torch.default_generator.get_state, + torch.get_rng_state, + torch.cuda.get_rng_state, + ) + setters = ( + torch._C.Generator.set_state, + torch.default_generator.set_state, + torch.set_rng_state, + torch.cuda.set_rng_state, + ) + return value in (*setters, *getters) + + +def is_tensor_base_attr_getter(value): + return ( + isinstance(value, types.MethodWrapperType) + and value.__name__ == "__get__" + and value.__self__.__objclass__ is torch._C._TensorBase # type: ignore[attr-defined] + ) + + +def is_torch_function_object(value): + return hasattr(value, "__torch_function__") + + +def has_torch_function(vt: "torch._dynamo.variables.base.VariableTracker") -> bool: + from torch._dynamo.variables import UserDefinedObjectVariable + from torch._dynamo.variables.torch_function import TensorWithTFOverrideVariable + + return isinstance(vt, TensorWithTFOverrideVariable) or ( + isinstance(vt, UserDefinedObjectVariable) + and hasattr(vt.value, "__torch_function__") + ) + + +# see note [Tensor Fakification and Symbol Caching] +def to_fake_tensor(t, fake_mode): + symbolic_context = None + source = None + if tracing_context := torch._guards.TracingContext.try_get(): + if t in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[t] + source = symbolic_context.tensor_source + + return fake_mode.from_tensor( + t, static_shapes=False, symbolic_context=symbolic_context, source=source + ) + + +def get_first_attr(obj, *attrs): + """ + Return the first available attribute or throw an exception if none is present. + """ + for attr in attrs: + if hasattr(obj, attr): + return getattr(obj, attr) + + raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") + + +@contextlib.contextmanager +def maybe_enable_compiled_autograd(should_enable): + def compiler_fn(gm): + def inner_compiler(gm_, example_inputs_): + torch._dynamo.utils.counters["compiled_autograd"]["compiles"] += 1 + return torch._inductor.compile(gm_, example_inputs_) + + return torch.compile(gm, backend=inner_compiler, fullgraph=True, dynamic=True) + + if should_enable: + with torch._dynamo.compiled_autograd.enable(compiler_fn) as ctx: + yield ctx + else: + yield + + +def invalid_removeable_handle(): + # need a subclass so weakref works + class Invalid(dict): # type: ignore[type-arg] + pass + + return RemovableHandle(Invalid()) diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ffc9c5a08a9e8558ed6767143b9db20a210924f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_conversions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..707107a628f599c6cba2809b53b4e5bd36662381 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_conversions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4672d88d9686a517eb0edd4e2226f6c698816262 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e119f7971e7b77fd997b44c4dfbda205be77fb40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61e8fbc6fcf634c76496551aef3051cbc722043d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/semi_structured.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/semi_structured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4f1cae80cb596168d0ff49447de6198f9b2f6f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/sparse/__pycache__/semi_structured.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py b/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..c487b151495df9b1a4e6b7a7ca553277a0586bc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py @@ -0,0 +1,275 @@ +import torch + + +# This is PyTorch implementation of main part of reorder_meta() +# function, from tools/util/include/cutlass/util/host_reorder.h file +# of CUTLASS source tree. Furthermore, CUTLASS template for sparse +# GEMM decides upon layout of this matrix, and at the moment for the +# sparse GEMM executed on tensor cores, this is layout described by +# ColumnMajorInterleaved<2> data structure, in +# include/cutlass/layout/matrix.h of CUTLASS source tree. The +# reordering of meta matrix into meta_reordered matrix calculated +# according to these segments of CUTLASS code is re-implemented here. +# Note that this calculation produces offsets for scattering metadata +# matrix elements into reordered metadata matrix elements (or, +# equivalently, for gathering reordered metadata matrix element back +# into metadata matrix elements). +def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device): + dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols) + dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1) + + # Reorder the rows, then swizzle the 2x2 blocks. + group = 32 if meta_dtype.itemsize == 2 else 16 + interweave = 4 if meta_dtype.itemsize == 2 else 2 + dst_rows = ( + dst_rows // group * group + + (dst_rows % 8) * interweave + + (dst_rows % group) // 8 + ) + + topright = ((dst_rows % 2 == 0) & (dst_cols % 2 == 1)).to(torch.int8) + bottomleft = ((dst_rows % 2 == 1) & (dst_cols % 2 == 0)).to(torch.int8) + dst_rows += topright - bottomleft + dst_cols -= topright - bottomleft + + # Assumed that meta tensor is to be stored in CUTLASS + # InterleavedColumnMajor layout, and reverse engineered + # corresponding code to store values into this tensor. + interleave = 2 + cols_maj = dst_cols // interleave + cols_min = dst_cols % interleave + return (cols_maj * m * interleave + dst_rows * interleave + cols_min).view(-1) + + +# This function converts dense matrix into sparse semi-structured +# representation, producing "compressed" matrix, in the layout used by +# CUTLASS backend, and corresponding metadata matrix. +def sparse_semi_structured_from_dense_cutlass(dense): + if dense.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor" + ) + + m, k = dense.shape + device = dense.device + + meta_dtype = torch.int8 + if dense.dtype == torch.int8: + meta_dtype = torch.int32 + elif dense.dtype in [torch.half, torch.bfloat16, torch.float]: + meta_dtype = torch.int16 + else: + raise RuntimeError(f"Invalid datatype {dense.dtype} of dense matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + if quadbits_per_meta_elem not in (4, 8): + raise RuntimeError("Invalid number of elements per meta element calculated") + + if meta_dtype == torch.int32: + if m % 16 != 0: + raise RuntimeError( + f"Number of rows of dense matrix {m} must be divisible by 16" + ) + else: + if m % 32 != 0: + raise RuntimeError( + f"Number of rows of dense matrix {m} must be divisible by 32" + ) + if k % (4 * quadbits_per_meta_elem) != 0: + raise RuntimeError( + f"Number of columns of dense matrix {k} must be divisible by {4 * quadbits_per_meta_elem}" + ) + + if dense.dtype != torch.float: + ksparse = 4 + dense_4 = dense.view(-1, k // ksparse, ksparse) + m0, m1, m2, m3 = (dense_4 != 0).unbind(-1) + else: + ksparse = 2 + dense_2 = dense.view(-1, k // ksparse, ksparse) + m0, m2 = m1, m3 = (dense_2 != 0).unbind(-1) + meta_ncols = k // (ksparse * quadbits_per_meta_elem) + + # Encoding quadruples of True/False values as follows: + # [True, True, False, False] -> 0b0100 + # [True, False, True, False] -> 0b1000 + # [False, True, True, False] -> 0b1001 + # [True, False, False, True ] -> 0b1100 + # [False, True, False, True ] -> 0b1101 + # [False, False, True, True ] -> 0b1110 + # Thus, lower two bits in the encoding are index of the True value + # at the lowest index in the quadruple, and the higher two bits in + # the encoding are index of the other True value in the quadruple. + # In case there are less than two True values, than False value or + # values at some index or indices are considered True for the + # encoding. In case there are more than two True values, then the + # excess True value(s) at some indices are considered False for + # the encoding. The exact encodings used for these cases are as + # follows: + # [False, False, False, False] -> 0b1110 + # [False, False, False, True ] -> 0b1110 + # [False, False, True, False] -> 0b1110 + # [False, True, False, False] -> 0b1001 + # [False, True, True, True ] -> 0b1101 + # [True, False, False, False] -> 0b1000 + # [True, False, True, True ] -> 0b1100 + # [True, True, False, True ] -> 0b0100 + # [True, True, True, False] -> 0b0100 + # [True, True, True, True ] -> 0b0100 + # These particular encodings are chosen, with the help of Espresso + # logic minimizer software, for the purpose of minimization of + # corresponding Boolean functions, that translate non-zero flags + # into encoding bits. Note also possible choices for the first + # and last of these encodings were limited only to (0b0100, + # 0b1110), in order to produce valid encodings for 1:2 sparsity + # case. + + expr0 = m0 & m1 + expr1 = ~m0 & m1 + expr2 = ~m0 & ~m1 + bit0 = expr1 + bit1 = expr2 + bit2 = expr0 | expr2 | m3 + bit3 = expr1 | ~m1 + idxs0 = bit0 | (bit1.to(torch.int64) << 1) + idxs1 = bit2 | (bit3.to(torch.int64) << 1) + + if dense.dtype != torch.float: + sparse0 = dense_4.gather(-1, idxs0.unsqueeze(-1)) # type: ignore[possibly-undefined] + sparse1 = dense_4.gather(-1, idxs1.unsqueeze(-1)) + sparse = torch.stack((sparse0, sparse1), dim=-1).view(m, k // 2) + else: + sparse = dense_2.gather(-1, idxs0.unsqueeze(-1) // 2).view(m, k // 2) # type: ignore[possibly-undefined] + + meta_4 = idxs0 | (idxs1 << 2) + meta_n = meta_4.view((-1, meta_ncols, quadbits_per_meta_elem)).to(meta_dtype) + + if quadbits_per_meta_elem == 4: + meta = ( + meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12) + ) + elif quadbits_per_meta_elem == 8: + meta = ( + meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12) + | (meta_n[:, :, 4] << 16) + | (meta_n[:, :, 5] << 20) + | (meta_n[:, :, 6] << 24) + | (meta_n[:, :, 7] << 28) + ) + + # Reorder meta tensor elements. + meta_reordered = meta.new_empty((m * meta_ncols,)) # type: ignore[possibly-undefined] + meta_offsets = _calculate_meta_reordering_scatter_offsets( + m, meta_ncols, meta_dtype, device + ) + meta_reordered.scatter_(0, meta_offsets, meta.view(-1)) + + return (sparse, meta_reordered.view(m, meta_ncols)) + + +# This function performs reverse of the function above - it +# reconstructs dense matrix from a pair of "compressed" matrix, given +# in the layout used by CUTLASS backend, and accompanying metadata +# matrix. +def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered): + if sparse.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor" + ) + + m, k = sparse.shape + device = sparse.device + + if meta_reordered.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional meta tensor, got {meta_reordered.dim()}-dimensional tensor" + ) + if meta_reordered.device != device: + raise RuntimeError( + f"Expected meta matrix to be on {device} device, got matrix on {meta_reordered.device} device" + ) + + meta_dtype = meta_reordered.dtype + if meta_dtype not in (torch.int16, torch.int32): + raise RuntimeError(f"Invalid datatype {meta_dtype} of meta matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + + if sparse.dtype != torch.float: + ksparse = 4 + else: + ksparse = 2 + + meta_nrows, meta_ncols = meta_reordered.shape + if meta_nrows != m: + raise RuntimeError( + f"Number of rows of meta matrix {meta_nrows} must be equal to number of columns of spase matrix {m}" + ) + if meta_ncols * ksparse * quadbits_per_meta_elem != 2 * k: + raise RuntimeError( + f"Number of columns of sparse matrix {k} different from the {meta_ncols * ksparse * quadbits_per_meta_elem // 2}, " + "expected according to the number of columns of meta matrix" + ) + + # Undo meta tensor elements reordering. + meta_offsets = _calculate_meta_reordering_scatter_offsets( + m, meta_ncols, meta_dtype, device + ) + meta = torch.gather(meta_reordered.view(-1), 0, meta_offsets).view(m, meta_ncols) + + # Unpack sparse tensor back to original dense tensor, using + # information provided by meta tensor. Note that torch.float + # datatype is handled pretty much the same as + # torch.half/torch.bfloat16, as metadata for a pair of torch.float + # value is encoded as if underlying 8 bytes contain four + # torch.half/torch.bfloat16 values, where either first two or last + # two are zeros. + meta_2 = torch.empty( + (m, meta_ncols, 2 * quadbits_per_meta_elem), + dtype=meta_dtype, + device=device, + ) + if quadbits_per_meta_elem == 4: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + elif quadbits_per_meta_elem == 8: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + meta_2[:, :, 8] = (meta >> 16) & 0b11 + meta_2[:, :, 9] = (meta >> 18) & 0b11 + meta_2[:, :, 10] = (meta >> 20) & 0b11 + meta_2[:, :, 11] = (meta >> 22) & 0b11 + meta_2[:, :, 12] = (meta >> 24) & 0b11 + meta_2[:, :, 13] = (meta >> 26) & 0b11 + meta_2[:, :, 14] = (meta >> 28) & 0b11 + meta_2[:, :, 15] = (meta >> 30) & 0b11 + + dense_offsets = meta_2.view(-1) + ( + torch.arange(0, 2 * m * k // ksparse, device=device) * 4 + ).view(-1, 1).repeat(1, 2).view(-1) + + dense = torch.zeros((m * 2 * k,), dtype=sparse.dtype, device=device) + if sparse.dtype != torch.float: + dense.scatter_(0, dense_offsets, sparse.view(-1)) + else: + dense.view(torch.half).scatter_( + 0, dense_offsets, sparse.view(torch.half).view(-1) + ) + + return dense.view(m, 2 * k) diff --git a/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops.py b/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1b055352e5fbec809da64fa9e7ede76d114c18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops.py @@ -0,0 +1,1928 @@ +import math +import os +import torch +import weakref +from functools import lru_cache +from torch.utils._triton import has_triton +from ._triton_ops_meta import get_meta +from typing import Optional, Tuple + +TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE = int(os.getenv('TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE', 2)) + + +def check(cond, msg): + if not cond: + raise ValueError(msg) + + +def check_bsr_layout(f_name, t): + check( + t.layout == torch.sparse_bsr, + f"{f_name}(): only BSR sparse format is supported for the sparse argument.", + ) + + +def check_device(f_name, t, device): + check( + t.device == device and t.device.type == "cuda", + f"{f_name}(): all inputs are expected to be on the same GPU device.", + ) + + +def check_mm_compatible_shapes(f_name, lhs, rhs): + check( + lhs.dim() >= 2 and rhs.dim() >= 2, + f"{f_name}(): all inputs involved in the matrix product are expected to be at least 2D, " + f"but got lhs.dim() == {lhs.dim()} and rhs.dim() == {rhs.dim()}." + ) + + m, kl = lhs.shape[-2:] + kr, n = rhs.shape[-2:] + + check( + kl == kr, + f"{f_name}(): arguments' sizes involved in the matrix product are not compatible for matrix multiplication, " + f"got lhs.shape[-1] == {kl} which is not equal to rhs.shape[-2] == {kr}.", + ) + + +def check_dtype(f_name, t, dtype, *additional_dtypes): + check( + t.dtype == dtype + and t.dtype in ((torch.half, torch.bfloat16, torch.float) + tuple(*additional_dtypes)), + f"{f_name}(): all inputs are expected to be of the same dtype " + f"and one of (half, bfloat16, float32) or {additional_dtypes}, " + f"but got dtype == {t.dtype}.", + ) + + +def check_blocksize(f_name, blocksize): + assert len(blocksize) == 2 + + def is_power_of_two(v): + return not (v & (v - 1)) + + def is_compatible_blocksize(b): + res = True + for blocksize in b: + # Triton loads only blocks which are at least 16 and powers of 2. + res = (blocksize >= 16 and is_power_of_two(blocksize)) and res + return res + + check( + is_compatible_blocksize(blocksize), + f"{f_name}(): sparse inputs' blocksize ({blocksize[0]}, {blocksize[1]}) " + "should be at least 16 and a power of 2 in each dimension.", + ) + + +def make_triton_contiguous(t): + """Return input as a triton-contiguous tensor. + + A triton-contiguous tensor is defined as a tensor that has strides + with minimal value equal to 1. + + While triton kernels support triton-non-contiguous tensors (all + strides being greater than 1 or having 0 strides) arguments, a + considerable slow-down occurs because tensor data is copied + element-wise rather than chunk-wise. + """ + if min(t.stride()) != 1: + # TODO: investigate if contiguity along other axes than the + # last one can be beneficial for performance + return t.contiguous() + else: + return t + + +def broadcast_batch_dims(f_name, *tensors): + try: + return torch.broadcast_shapes(*(t.shape[:-2] for t in tensors)) + except Exception: + check(False, f"{f_name}(): inputs' batch dimensions are not broadcastable!") + + +def slicer(dim, slice_range, *tensors): + for t in tensors: + slices = [slice(None)] * t.dim() + slices[dim] = slice_range + yield t[slices] + + +def multidim_slicer(dims, slices, *tensors): + for t in tensors: + s = [slice(None)] * t.dim() + for d, d_slice in zip(dims, slices): + if d is not None: + s[d] = d_slice + yield t[s] + + +def ptr_stride_extractor(*tensors): + for t in tensors: + yield t + yield from t.stride() + + +def grid_partitioner(full_grid, grid_blocks, tensor_dims_map): + assert 0 <= len(full_grid) <= 3 + assert 0 <= len(grid_blocks) <= 3 + + import itertools + + def generate_grid_points(): + for fg, mg in zip(full_grid, grid_blocks): + yield range(0, fg, mg) + + def generate_sliced_tensors(slices): + for t, t_dims in tensor_dims_map.items(): + yield next(multidim_slicer(t_dims, slices, t)) + + for grid_point in itertools.product(*generate_grid_points()): + grid = [min(fg - gp, mg) for fg, gp, mg in zip(full_grid, grid_point, grid_blocks)] + slices = [slice(gp, gp + g) for gp, g in zip(grid_point, grid)] + # grid_points are iterated in a "contiguous" order, i.e. + # left dimensions traversed slower than right dimensions. + # This order is reversed for CUDA grids. + yield grid[::-1], *generate_sliced_tensors(slices) + + +def launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks=None): + # cuda_max_grid = (2 ** 31 - 1, 2 ** 16 - 1, 2 ** 16 - 1) + cuda_max_grid = (2147483647, 65535, 65535)[::-1] + if grid_blocks is None: + grid_blocks = cuda_max_grid + else: + + def valid_grid_dim(g, mg): + if g is None: + return mg + else: + # grid must be at least 1 and no greater than mg + return max(1, min(g, mg)) + + grid_blocks = tuple( + valid_grid_dim(g, mg) for g, mg in zip(grid_blocks, cuda_max_grid) + ) # type: ignore[assignment] + + for grid, *sliced_tensors in grid_partitioner(full_grid, grid_blocks, tensor_dims_map): + kernel(grid, *sliced_tensors) + + +def prepare_inputs(bsr, *dense_tensors): + # Introduce fake batch dimension if not present for convenience. + crow_indices = bsr.crow_indices().unsqueeze(0) + col_indices = bsr.col_indices().unsqueeze(0) + values = make_triton_contiguous(bsr.values().unsqueeze(0)) + tensors = [make_triton_contiguous(t.unsqueeze(0)) for t in dense_tensors] + + # Compute broadcasted batch dimension + batch_dims_broadcasted = torch.broadcast_shapes(values.shape[:-3], *(t.shape[:-2] for t in tensors)) + + # Broadcast batch dimensions and squash. + # The result can be either a view or a copy. + def batch_broadcast_and_squash(t, batch_dims, invariant_dims): + return t.broadcast_to(batch_dims + invariant_dims).flatten( + 0, len(batch_dims) - 1 + ) + + crow_indices = batch_broadcast_and_squash( + crow_indices, batch_dims_broadcasted, (-1,) + ) + + col_indices = batch_broadcast_and_squash( + col_indices, batch_dims_broadcasted, (-1,) + ) + values = batch_broadcast_and_squash( + values, batch_dims_broadcasted, values.shape[-3:] + ) + tensors = [ + batch_broadcast_and_squash(t, batch_dims_broadcasted, t.shape[-2:]) for t in tensors + ] + + return crow_indices, col_indices, values, *tensors + + +def broadcast_batch_dims_bsr(f_name, bsr, *tensors): + batch_shape = broadcast_batch_dims(f_name, bsr, *tensors) + + crow_indices = bsr.crow_indices().broadcast_to(batch_shape + (-1,)) + col_indices = bsr.col_indices().broadcast_to(batch_shape + (-1,)) + values = bsr.values().broadcast_to(batch_shape + bsr.values().shape[-3:]) + size = batch_shape + bsr.shape[-2:] + return torch.sparse_compressed_tensor(crow_indices, col_indices, values, size=size, layout=bsr.layout) + + +# NOTE: this function will ALWAYS create a view +def tile_to_blocksize(t, blocksize): + *rest, m, n = t.shape + new_shape = rest + [ + m // blocksize[0], + blocksize[0], + n // blocksize[1], + blocksize[1], + ] + # using .view instead of .reshape to ensure that the result is + # indeed a view: + return t.view(new_shape).transpose(-3, -2) + + +def as1Dbatch(tensor): + """Return tensor as 3D tensor by either prepending new dimensions to + the tensor shape (when ``tensor.ndim < 3``), or by collapsing + starting dimensions into the first dimension (when ``tensor.ndim > + 3``). + """ + while tensor.ndim < 3: + tensor = tensor.unsqueeze(0) + if tensor.ndim > 3: + tensor = tensor.flatten(0, tensor.ndim - 3) + assert tensor.ndim == 3, tensor.shape + return tensor + + +def scatter_mm(blocks, others, indices_data, *, accumulators=None): + """Scattered matrix multiplication of tensors. + + A scattered matrix multiplication is defined as a series of matrix + multiplications applied to input tensors according to the input + and output mappings specified by indices data. + + The following indices data formats are supported for defining a + scattered matrix multiplication operation (:attr:`indices_data[0]` + holds the name of the indices data format as specified below): + + - ``"scatter_mm"`` - matrix multiplications scattered in batches + of tensors. + + If :attr:`blocks` is a :math:`(* \times M \times K) tensor, + :attr:`others` is a :math:`(* \times K \times N)` tensor, + :attr:`accumulators` is a :math:`(* \times M \times N)` tensor, + and :attr:`indices = indices_data['indices']` is a :math:`(* + \times 3)` tensor, then the operation is equivalent to the + following code:: + + c_offsets, pq = indices_data[1:] + for r in range(len(c_offsets) - 1): + for g in range(c_offsets[r], c_offsets[r + 1]): + p, q = pq[g] + accumulators[r] += blocks[p] @ others[q] + + - ``"bsr_strided_mm"`` - matrix multiplications scattered in + batches of tensors and a tensor. + + If :attr:`blocks` is a :math:`(Ms \times Ks) tensor, + :attr:`others` is a :math:`(* \times K \times N)` tensor, + :attr:`accumulators` is a :math:`(* \times M \times N)` tensor, then + the operation is equivalent to the following code:: + + c_indices, r_offsets, p_offsets, q_offsets, meta = indices_data[1:] + for b in range(nbatches): + for i, r in enumerate(r_offsets): + r0, r1 = divmod(r, N) + acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] + for g in range(c_indices[i], c_indices[i+1]): + p = p_offsets[g] + q0, q1 = divmod(q_offsets[g], N) + acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] + + where ``Ns = N // meta['SPLIT_N']``, and ``M`` and ``K`` are + integer multiples of ``Ms`` and ``Ks``, respectively. + + - ``"bsr_strided_mm_compressed"`` - matrix multiplications + scattered in batches of tensors and a tensor. A memory and + processor efficient version of ``"bsr_strided_mm"`` format. If + :attr:`blocks` is a :math:`(Ms \times Ks) tensor, :attr:`others` + is a :math:`(* \times K \times N)` tensor, :attr:`accumulators` + is a :math:`(* \times M \times N)` tensor, then the operation is + equivalent to the following code:: + + c_indices, r_offsets, q_offsets, meta = indices_data[1:] + for b in range(nbatches): + for r in r_offsets: + m = (r // N) // Ms + n = (r % N) // Ns + r0, r1 = divmod(r, N) + c0, c1 = c_indices[m], c_indices[m + 1] + acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] + for i, p in enumerate(range(c0, c1)): + q = q_offsets[n * c1 + (SPLIT_N - n) * c0 + i] + q0, q1 = divmod(q, N) + acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] + + where ``Ns = N // meta['SPLIT_N']``, and ``M`` and ``K`` are + integer multiples of ``Ms`` and ``Ks``, respectively. + + Notice that the order of ``r_offsets`` items can be arbitrary; + this property enables defining swizzle operators via + rearrangements of ``r_offsets`` items.. + + Auxilary functions are provided for pre-computing + :attr:`indices_data`. For example, + :func:`bsr_scatter_mm_indices_data` is used to define indices data + for matrix multiplication of BSR and strided tensors. + + Parameters + ---------- + blocks (Tensor): a 3-D tensor of first matrices to be multiplied + + others (Tensor): a tensor of second matrices to be multiplied. If + ``indices_data[0]=="scatter_mm"``, the tensor is a 1-D batch + tensor of second input matrices to be multiplied. Otherwise, the + second input matrices are slices of the :attr:`others` tensor. + indices_data (tuple): a format data that defines the inputs and + outputs of scattered matrix multiplications. + + Keyword arguments + ----------------- + + accumulators (Tensor, optional): a tensor of matrix product + accumulators. If ``indices_data[0]=="scatter_mm"``, the tensor + is a 1-D batch tensor of output matrices. Otherwise, output + matrices are slices of the :attr:`accumulators` tensor. + """ + indices_format = indices_data[0] + + assert blocks.ndim == 3 + P, Ms, Ks = blocks.shape + + if indices_format == 'scatter_mm': + c_offsets, pq = indices_data[1:] + + assert others.ndim == 3 + Q, Ks_, Ns = others.shape + assert Ks == Ks_ + + if accumulators is None: + R = c_offsets.shape[0] - 1 + accumulators = torch.zeros((R, Ms, Ns), dtype=blocks.dtype, device=blocks.device) + else: + R, Ms_, Ns_ = accumulators.shape + assert Ms_ == Ms + assert Ns_ == Ns + + if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm2 is None: + for r in range(c_offsets.shape[0] - 1): + g0 = c_offsets[r] + g1 = c_offsets[r + 1] + for g in range(g0, g1): + p, q = pq[g] + accumulators[r] += blocks[p] @ others[q] + else: + _scatter_mm2(blocks, others, c_offsets, pq, accumulators) + return accumulators + + elif indices_format == 'bsr_strided_mm': + others_shape = others.shape + others = as1Dbatch(others) + + B, K, N = others.shape + assert K % Ks == 0 + + c_indices, r_offsets, p_offsets, q_offsets, meta = indices_data[1:] + SPLIT_N = meta['SPLIT_N'] + + if accumulators is None: + M = Ms + (r_offsets.max().item() + 1) // N + accumulators = torch.zeros((*others_shape[:-2], M, N), dtype=blocks.dtype, device=blocks.device) + else: + M, N_ = accumulators.shape[-2:] + assert N_ == N + + accumulators_shape = accumulators.shape + accumulators = as1Dbatch(accumulators) + + Ns = N // SPLIT_N + + if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm6 is None: + accumulators.zero_() + for b in range(B): + for r in range(r_offsets.shape[0]): + r_ = r_offsets[r].item() + g0 = c_indices[r].item() + g1 = c_indices[r + 1].item() + r0, r1 = divmod(r_, N) + acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] + for g in range(g0, g1): + p, q = p_offsets[g], q_offsets[g] + q0, q1 = divmod(q.item(), N) + acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] + else: + _scatter_mm6(blocks, others, c_indices, r_offsets, p_offsets, q_offsets, meta, accumulators) + return accumulators.view(accumulators_shape) + + elif indices_format == 'bsr_strided_mm_compressed': + others_shape = others.shape + others = as1Dbatch(others) + + B, K, N = others.shape + assert K % Ks == 0 + + c_indices, r_offsets, q_offsets, meta = indices_data[1:] + SPLIT_N = meta['SPLIT_N'] + + if accumulators is None: + M = Ms + (r_offsets.max().item() + 1) // N + accumulators = torch.zeros((*others_shape[:-2], M, N), dtype=blocks.dtype, device=blocks.device) + else: + M, N_ = accumulators.shape[-2:] + assert N_ == N + + accumulators_shape = accumulators.shape + accumulators = as1Dbatch(accumulators) + + Ns = N // SPLIT_N + + if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm6 is None: + for b in range(B): + for j in range(len(r_offsets)): + r0, r1 = divmod(r_offsets[j].item(), N) + m = r0 // Ms + n = r1 // Ns + c0 = c_indices[m].item() + c1 = c_indices[m + 1].item() + acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] + for i, p in enumerate(range(c0, c1)): + q = q_offsets[n * c1 + (SPLIT_N - n) * c0 + i].item() + q0, q1 = divmod(q, N) + acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] + else: + p_offsets = torch.empty((0, ), dtype=q_offsets.dtype, device=q_offsets.device) + _scatter_mm6(blocks, others, c_indices, r_offsets, p_offsets, q_offsets, meta, accumulators) + return accumulators.view(accumulators_shape) + + else: + raise NotImplementedError(indices_format) + + +def scatter_mm_meta(M, K, N, Ms, Ks, + GROUP_SIZE=None, TILE_M=None, TILE_N=None, SPLIT_N=None, num_warps=None, num_stages=None, **extra): + if {TILE_M, TILE_N, SPLIT_N, num_warps, num_stages, GROUP_SIZE} == {None}: + device_name = torch.cuda.get_device_name() + meta = get_meta('scatter_mm', (M, K, N, Ms, Ks), device_name, + version=(0, torch.float16, 0.5)) + if meta is not None: + meta.update(**extra) + return meta + # The following parameters are optimized for the performance + # equilibrium points of bsr-dense and dense-dense matrix + # multiplications when using GPU card NVIDIA GeForce RTX 2060 + # SUPER. For points far from the performance equilibrium + # points as well as for other GPU cards, the optimal + # parameters are likely different from what specified below. + if (M, K, N) == (256,) * 3: + if (Ms, Ks) == (16, 16): + SPLIT_N=1;TILE_M=16;TILE_N=16;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (32, 32): + SPLIT_N=2;TILE_M=32;TILE_N=16;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (64, 64): + SPLIT_N=1;TILE_M=32;TILE_N=32;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (128, 128): + SPLIT_N=1;TILE_M=32;TILE_N=32;GROUP_SIZE=2;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (M, K, N) == (512,) * 3: + if (Ms, Ks) == (16, 16): + SPLIT_N=8;TILE_M=16;TILE_N=64;GROUP_SIZE=2;num_stages=1;num_warps=2 # noqa: E225,E231,E702 + elif (Ms, Ks) == (32, 32): + SPLIT_N=8;TILE_M=32;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=2 # noqa: E225,E231,E702 + elif (Ms, Ks) == (64, 64): + SPLIT_N=4;TILE_M=32;TILE_N=128;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (128, 128): + SPLIT_N=8;TILE_M=64;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (M, K, N) == (1024,) * 3: + if (Ms, Ks) == (16, 16): + SPLIT_N=4;TILE_M=16;TILE_N=128;GROUP_SIZE=2;num_stages=1;num_warps=1 # noqa: E225,E231,E702 + elif (Ms, Ks) == (32, 32): + SPLIT_N=8;TILE_M=32;TILE_N=64;GROUP_SIZE=2;num_stages=1;num_warps=1 # noqa: E225,E231,E702 + elif (Ms, Ks) == (64, 64): + SPLIT_N=16;TILE_M=64;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=2 # noqa: E225,E231,E702 + elif (Ms, Ks) == (128, 128): + SPLIT_N=16;TILE_M=64;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (256, 256): + SPLIT_N=16;TILE_M=64;TILE_N=64;GROUP_SIZE=2;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (M, K, N) == (2048,) * 3: + if (Ms, Ks) == (16, 16): + SPLIT_N=4;TILE_M=16;TILE_N=128;GROUP_SIZE=8;num_stages=1;num_warps=1 # noqa: E225,E231,E702 + elif (Ms, Ks) == (32, 32): + SPLIT_N=4;TILE_M=32;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=1 # noqa: E225,E231,E702 + elif (Ms, Ks) == (64, 64): + SPLIT_N=4;TILE_M=64;TILE_N=128;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (128, 128): + SPLIT_N=8;TILE_M=64;TILE_N=64;GROUP_SIZE=4;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (Ms, Ks) == (256, 256): + SPLIT_N=4;TILE_M=64;TILE_N=64;GROUP_SIZE=2;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + elif (M, K, N) == (4096,) * 3: + if (Ms, Ks) == (16, 16): + SPLIT_N=2;TILE_M=16;TILE_N=256;GROUP_SIZE=2;num_stages=1;num_warps=2 # noqa: E225,E231,E702 + elif (Ms, Ks) == (32, 32): + SPLIT_N=2;TILE_M=32;TILE_N=64;GROUP_SIZE=2;num_stages=1;num_warps=1 # noqa: E225,E231,E702 + elif (Ms, Ks) == (64, 64): + SPLIT_N=2;TILE_M=64;TILE_N=128;GROUP_SIZE=2;num_stages=1;num_warps=4 # noqa: E225,E231,E702 + + if SPLIT_N is None: + # Assume NVIDIA GeForce RTX 2060 SUPER: + # With the probality of 92% (99.9% when N > 512), the + # performance will not be worse more than 2% from the + # performance when using an optimal value. Otherwise, when N + # <= 512, using the following heuristics may give upto 15% + # lower performance. + SPLIT_N = {16: 1, 32: 2, 64: 4, 128: 8, 256: 16, 512: 8, 1024: 16, 4096: 32, 8192: 64}.get(N, 16) + if Ms >= 512 and N >= 2048: + SPLIT_N = 1 + Ns = N // SPLIT_N + if TILE_M is None: + TILE_M = min(64 if Ns < 512 else 32, Ms) + if TILE_N is None: + TILE_N = min(64 if Ns < 512 else 32, Ns) + num_stages = num_stages or 1 + if num_warps is None: + if min(M, N) > 1024: + num_warps = {16: 1, 32: 1, 64: 2}.get(Ms, 4) + elif min(M, N) == 1024: + num_warps = {16: 1, 32: 1, 64: 2}.get(Ms, 4) + elif min(M, N) == 256: + num_warps = {16: 1, 32: 4}.get(Ms, 4) + else: + num_warps = {16: 1, 32: 2}.get(Ms, 4) + GROUP_SIZE = GROUP_SIZE or 4 + + assert TILE_M <= Ms, dict(TILE_M=TILE_M, Ms=Ms) + assert TILE_N <= Ns, dict(TILE_N=TILE_N, Ns=Ns) + assert Ms <= M, dict(M=M, Ms=Ms) + assert Ns <= N, dict(N=N, Ns=Ns) + assert Ks <= K, dict(K=K, Ks=Ks) + + return dict(TILE_M=TILE_M, TILE_N=TILE_N, GROUP_SIZE=GROUP_SIZE, + num_stages=num_stages, num_warps=num_warps, SPLIT_N=SPLIT_N, **extra) + + +def bsr_dense_addmm_meta(M, K, N, Ms, Ks, beta, alpha, + SPLIT_N=None, GROUP_SIZE_ROW=None, num_warps=None, num_stages=None, sparsity=None, dtype=None, **extra): + if dtype is None: + dtype = torch.float16 + if sparsity is None: + sparsity = 0.5 + if {SPLIT_N, num_warps, num_stages, GROUP_SIZE_ROW} == {None}: + device_name = torch.cuda.get_device_name() + key = (M, K, N, Ms, Ks, beta == 0, beta == 1, alpha == 1) + meta = get_meta('bsr_dense_addmm', key, + device_name, version=(0, dtype, sparsity)) + if meta is None and sparsity != 0.5: + meta = get_meta('bsr_dense_addmm', key, + device_name, version=(0, dtype, 0.5)) + if meta is not None: + meta.update(**extra) + return meta + SPLIT_N = SPLIT_N or max(N // Ms, 1) + GROUP_SIZE_ROW = GROUP_SIZE_ROW or 4 + num_stages = num_stages or 1 + num_warps = num_warps or 4 + return dict(SPLIT_N=SPLIT_N, GROUP_SIZE_ROW=GROUP_SIZE_ROW, num_stages=num_stages, num_warps=num_warps, **extra) + + +class TensorAsKey: + """A light-weight wrapper of a tensor that enables storing tensors as + keys with efficient memory reference based comparision as an + approximation to data equality based keys. + + Motivation: the hash value of a torch tensor is tensor instance + based that does not use data equality and makes the usage of + tensors as keys less useful. For instance, the result of + ``len({a.crow_indices(), a.crow_indices()})`` is `2`, although, + the tensor results from `crow_indices` method call are equal, in + fact, these share the same data storage. + On the other hand, for efficient caching of tensors we want to + avoid calling torch.equal that compares tensors item-wise. + + TensorAsKey offers a compromise in that it guarantees key equality + of tensors that references data in the same storage in the same + manner and without accessing underlying data. However, this + approach does not always guarantee correctness. For instance, for + a complex tensor ``x``, we have ``TensorAsKey(x) == + TensorAsKey(x.conj())`` while ``torch.equal(x, x.conj())`` would + return False. + """ + + def __init__(self, obj): + + def get_tensor_key(obj): + # Warning: TensorAsKey does not track negative nor + # conjugate bits of its input object because in the use + # case of wrapping compressed/plain indices of compressed + # sparse tensors (that are always integer tensors with + # non-negative items) these bits are never set. However, + # when extending the use of TensorAsKey to float or + # complex tensors, the values of these bits (see is_neg + # and is_conj methods) must be included in the key as + # well. + assert not (obj.dtype.is_floating_point or obj.dtype.is_complex), obj.dtype + return (obj.data_ptr(), obj.storage_offset(), obj.shape, obj.stride(), obj.dtype) + + self._obj_ref = weakref.ref(obj) + if obj.layout is torch.strided: + self.key = get_tensor_key(obj) + elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}: + self.key = (get_tensor_key(obj.crow_indices()), get_tensor_key(obj.col_indices())) + elif obj.layout in {torch.sparse_csc, torch.sparse_bsc}: + self.key = (get_tensor_key(obj.ccol_indices()), get_tensor_key(obj.row_indices())) + else: + raise NotImplementedError(obj.layout) + self._hash = hash(self.key) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if not isinstance(other, TensorAsKey): + return False + if self.obj is None or other.obj is None: + # dead objects always compare unequal unless these are + # same objects + return self is other + return self.key == other.key + + @property + def obj(self): + """Return object if alive, otherwise None.""" + return self._obj_ref() + + +@lru_cache(maxsize=TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE) +def _bsr_scatter_mm_indices_data(indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, compressed_sparse_tensor_as_key): + bsr = compressed_sparse_tensor_as_key.obj + assert bsr is not None + crow_indices, col_indices = bsr.crow_indices(), bsr.col_indices() + device = crow_indices.device + indices_dtype = torch.int32 + + if indices_format == 'bsr_strided_mm_compressed': + Ns = N // SPLIT_N + q_offsets_lst = [] + b = torch.arange(SPLIT_N, dtype=indices_dtype, device=device) * Ns + for m in range(M // Ms): + r0 = crow_indices[m].item() + r1 = crow_indices[m + 1].item() + if r1 == r0: + continue + q_offsets_lst.append((col_indices[r0:r1] * (Ks * N)).repeat(SPLIT_N) + b.repeat_interleave(r1 - r0)) + q_offsets = torch.cat(q_offsets_lst) + crow_indices_diff = crow_indices.diff() + non_zero_row_indices = crow_indices_diff.nonzero() + a = non_zero_row_indices * (Ms * N) + r_offsets = (a + b).view(-1) + c_indices = crow_indices + # swizzle operation: mm elements with longer sums are computed first: + nnz_per_row = crow_indices_diff[non_zero_row_indices].repeat_interleave(SPLIT_N) + nnz_per_row, indices = nnz_per_row.sort(descending=True, stable=True) + r_offsets = r_offsets[indices] + return (indices_format, c_indices, r_offsets, q_offsets) + + elif indices_format == 'bsr_strided_mm': + Ns = N // SPLIT_N + p_offsets_lst = [] + q_offsets_lst = [] + b = torch.arange(SPLIT_N, dtype=indices_dtype, device=device) * Ns + for m in range(M // Ms): + r0 = crow_indices[m].item() + r1 = crow_indices[m + 1].item() + if r1 == r0: + continue + p_offsets_lst.append(torch.arange(r0, r1, dtype=indices_dtype, device=device).repeat(SPLIT_N)) + q_offsets_lst.append((col_indices[r0:r1] * (Ks * N)).repeat(SPLIT_N) + b.repeat_interleave(r1 - r0)) + q_offsets = torch.cat(q_offsets_lst) + crow_indices_diff = crow_indices.diff() + non_zero_row_indices = crow_indices_diff.nonzero() + a = non_zero_row_indices * (Ms * N) + r_offsets = (a + b).view(-1) + c_indices = torch.cat((crow_indices[:1], + torch.cumsum(crow_indices_diff[non_zero_row_indices].repeat_interleave(SPLIT_N), 0))) + p_offsets = torch.cat(p_offsets_lst) + return (indices_format, c_indices, r_offsets, p_offsets, q_offsets) + + elif indices_format == 'scatter_mm': + Ns = Ms + c_indices = [0] + pq_offsets = [] + # todo: eliminate inner for-loops for efficiency + for b in range(nbatches): + for m in range(M // Ms): + r0 = crow_indices[m].item() + r1 = crow_indices[m + 1].item() + for n in range(N // Ns): + c_indices.append(c_indices[-1] + r1 - r0) + for t in range(r1 - r0): + p = r0 + t + q = (col_indices[p].item() + b * (K // Ks)) * (N // Ns) + n + pq_offsets.append([p, q]) + + return (indices_format, + torch.tensor(c_indices, dtype=indices_dtype, device=device), + torch.tensor(pq_offsets, dtype=indices_dtype, device=device)) + + else: + raise ValueError(f'Invalid {indices_format=}. Expected bsr_strided_mm_compressed|bsr_strided_mm|scatter_mm') + + +def bsr_scatter_mm_indices_data(bsr, other, indices_format='bsr_strided_mm_compressed', **meta_input): + """Computes indices data for :func:`scatter_mm` used in BSR and + strided tensor matrix multiplication. + """ + assert bsr.dense_dim() == 0 + assert bsr.ndim == 2 # no batch dims + crow_indices = bsr.crow_indices() + col_indices = bsr.col_indices() + blocksize = bsr.values().shape[-2:] + M, K = bsr.shape + Ms, Ks = blocksize + K_, N = other.shape[-2:] + assert K_ == K + nbatches = other.shape[:-2].numel() + + meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input) + if 'allow_tf32' not in meta_input: + meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16}) + SPLIT_N = meta['SPLIT_N'] + indices_data = _bsr_scatter_mm_indices_data( + indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)) + + if indices_format == 'bsr_strided_mm_compressed': + meta.update(is_compressed=True) + return indices_data + (meta,) + elif indices_format == 'bsr_strided_mm': + meta.update(is_compressed=False) + return indices_data + (meta,) + else: + return indices_data + + +def bsr_scatter_mm(bsr, other, indices_data=None, out=None): + """BSR @ strided -> strided + """ + + assert bsr.ndim == 2 + assert other.ndim >= 2 + + Ms, Ks, Ns = bsr.shape[-2], bsr.shape[-1], other.shape[-1] + blocksize = bsr.values().shape[-2:] + + if indices_data is None: + indices_data = bsr_scatter_mm_indices_data(bsr, other, indices_format='bsr_strided_mm_compressed') + + indices_format = indices_data[0] + + if out is None: + out = torch.empty((*other.shape[:-2], Ms, Ns), dtype=bsr.dtype, device=bsr.device) + out_shape = out.shape + out = as1Dbatch(out) + + if bsr._nnz() == 0: + out.zero_() + elif indices_format in {'bsr_strided_mm_compressed', 'bsr_strided_mm'}: + out.zero_() + scatter_mm(bsr.values(), other, indices_data, accumulators=out) + elif indices_format == 'scatter_mm': + nbatches = other.shape[:-2].numel() + accumulators = torch.zeros((nbatches * Ms // blocksize[0] * Ns // blocksize[0], blocksize[0], blocksize[0]), + dtype=bsr.dtype, device=bsr.device) + others = (as1Dbatch(other) + .transpose(-2, -1) + .view(nbatches, Ns // blocksize[0], blocksize[0], Ks // blocksize[1], blocksize[1]) + .movedim((3, 1, 4, 2), (1, 2, 3, 4)) # equivalent to .transpose(-3, -2).transpose(-2, -1).transpose(-4, -3) + .flatten(0, 2) + ) + scatter_mm(bsr.values(), others, indices_data, accumulators=accumulators) + out.copy_(accumulators + .unflatten(0, (nbatches, Ms // blocksize[0], Ns // blocksize[0])) + .movedim((1, 2, 3, 4), (3, 1, 4, 2)) # equivalent to .transpose(-4, -3).transpose(-2, -1).transpose(-3, -2) + .reshape(nbatches, Ns, Ms) + .transpose(-2, -1)) + else: + raise NotImplementedError(indices_format) + + return out.view(out_shape) + + +def bsr_dense_addmm( + input: torch.Tensor, + bsr: torch.Tensor, + dense: torch.Tensor, + *, + beta=1, + alpha=1, + out: Optional[torch.Tensor] = None, + skip_checks: bool = False, + max_grid: Optional[Tuple[Optional[int], Optional[int], Optional[int]]] = None, + meta: Optional[dict] = None): + f_name = 'bsr_dense_addmm' + values = bsr.values() + crow_indices = bsr.crow_indices() + col_indices = bsr.col_indices() + batch_ndim = crow_indices.dim() - 1 + M, K = bsr.shape[batch_ndim:batch_ndim + 2] + blocksize = values.shape[batch_ndim + 1:batch_ndim + 3] + N = dense.shape[-1] + + # todo: implement checks + + if out is None: + original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) + out = dense.new_empty(original_batch_dims_broadcasted + (M, N)) + + if bsr._nnz() == 0 or alpha == 0 or N == 0 or M == 0 or K == 0: + if beta == 0: + out.zero_() + else: + out.copy_(input) + if beta != 1: + out.mul_(beta) + return out + + if meta is None: + sparsity = round(1 - bsr._nnz() * blocksize[0] * blocksize[1] / (M * K), 2) + meta = bsr_dense_addmm_meta(M, K, N, blocksize[0], blocksize[1], beta, alpha, sparsity=sparsity, dtype=out.dtype) + out_backup = out + + crow_indices, col_indices, values, input, dense, out = prepare_inputs(bsr, input, dense, out) + + BM, BK = blocksize + SPLIT_N = meta.get('SPLIT_N', N // BM) + BN = N // SPLIT_N + + out_untiled = out + out = tile_to_blocksize(out, (BM, BN)) + dense = tile_to_blocksize(dense, (BK, BN)) + input = tile_to_blocksize(input, (BM, BN)) + + dot_out_dtype = {torch.float16: tl.float32, + torch.bfloat16: tl.float32, + torch.float32: tl.float64, + torch.float64: tl.float64}[out.dtype] + + n_batches = dense.size(0) + n_block_rows = crow_indices.size(-1) - 1 + n_block_cols = dense.size(-3) + + full_grid = (n_batches, n_block_cols, n_block_rows) + if max_grid is not None: + grid_blocks = tuple(max_grid[:3][::-1]) + (None,) * (3 - len(max_grid[:3])) + else: + grid_blocks = None + + tensor_dims_map = { + values: (0, None, None), + crow_indices: (0, None, -1), + col_indices: (0, None, None), + input: (0, -3, -4), + dense: (0, -3, None), + out: (0, -3, -4), + } + + assert alpha != 0 + + def kernel(grid, *sliced_tensors): + _bsr_strided_addmm_kernel[grid]( + *ptr_stride_extractor(*sliced_tensors), + beta, alpha, + beta_is_one=beta == 1, + beta_is_nonzero=beta != 0, + alpha_is_one=alpha == 1, + BLOCKSIZE_ROW=BM, + BLOCKSIZE_INNER=BK, + BLOCKSIZE_COL=BN, + allow_tf32=dot_out_dtype == tl.float32, + acc_dtype=dot_out_dtype, + **meta) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + if out.data_ptr() != out_backup.data_ptr(): + # prepare_inputs has made a copy of out, copy its content back + # to out_backup: + out_backup.copy_(out_untiled.view(out_backup.shape)) + + return out_backup + + +if has_triton(): + import triton + import triton.language as tl + + @triton.jit + def _sampled_addmm_kernel( + alpha, + beta, + IS_BETA_ZERO: tl.constexpr, + BLOCKSIZE_ROW: tl.constexpr, + BLOCKSIZE_COL: tl.constexpr, + k, + TILE_K: tl.constexpr, + values_ptr, + values_batch_stride, + values_nnz_stride, + values_row_block_stride, + values_col_block_stride, + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + col_indices_ptr, + col_indices_batch_stride, + col_indices_stride, + mat1_ptr, + mat1_batch_stride, + mat1_tiled_row_stride, + mat1_tiled_col_stride, + mat1_row_block_stride, + mat1_col_block_stride, + mat2_ptr, + mat2_batch_stride, + mat2_tiled_row_stride, + mat2_tiled_col_stride, + mat2_row_block_stride, + mat2_col_block_stride, + acc_dtype: tl.constexpr, + allow_tf32: tl.constexpr, + ): + batch_pid = tl.program_id(axis=1) + row_block_pid = tl.program_id(axis=0) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_block_arange = tl.arange(0, BLOCKSIZE_ROW) + col_block_arange = tl.arange(0, BLOCKSIZE_COL) + + # Pointers are set to the first block of the current row. + values_block_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_nnz_stride * nnz_offset + + values_row_block_stride * row_block_arange[:, None] + + values_col_block_stride * col_block_arange[None, :] + ) + + col_index_nnz_ptr = ( + col_indices_ptr + + col_indices_batch_stride * batch_pid + + col_indices_stride * nnz_offset + ) + + # Advance mat1 to the current tiled row, ignore columns. + mat1_block_ptrs = ( + mat1_ptr + + mat1_batch_stride * batch_pid + + mat1_tiled_row_stride * row_block_pid + + mat1_row_block_stride * row_block_arange[:, None] + ) + + # Advance mat2 in batch and block col dimension. + mat2_block_ptrs = ( + mat2_ptr + + mat2_batch_stride * batch_pid + + mat2_col_block_stride * col_block_arange[None, :] + ) + + k_tile_arange = tl.arange(0, TILE_K) + for _ in range(row_nnz): + acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) + + # find column block index + col_block = tl.load(col_index_nnz_ptr) + + for k_tile in range(0, k, TILE_K): + k_offsets = k_tile + k_tile_arange + mask_k = k_offsets < k + + mat1_block = tl.load( + mat1_block_ptrs + + mat1_col_block_stride * k_offsets[None, :], + mask=mask_k[None, :], other=0.0 + ) + + mat2_block = tl.load( + mat2_block_ptrs + + mat2_tiled_col_stride * col_block + + mat2_row_block_stride * k_offsets[:, None], + mask=mask_k[:, None], other=0.0 + ) + + acc_block += tl.dot(mat1_block, mat2_block, allow_tf32=allow_tf32, out_dtype=acc_dtype) + + if IS_BETA_ZERO: + acc_block *= alpha + else: + acc_block = alpha * acc_block + beta * tl.load(values_block_ptrs) + + # write result + tl.store(values_block_ptrs, acc_block.to(values_ptr.dtype.element_ty)) + + # advance val/col_index ptrs to the next block in the row. + values_block_ptrs += values_nnz_stride + col_index_nnz_ptr += col_indices_stride + + @triton.jit + def _bsr_strided_dense_rowspace_kernel( + # values prologue + values_ptr, + values_batch_stride, + values_nnz_stride, + values_row_block_stride, + values_col_block_stride, + # values epilogue + # crow_indices prologue + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + # crow_indices epilogue + # col_indices prologue + col_indices_ptr, + col_indices_batch_stride, + col_indices_stride, + # col_indices epilogue + # dense prologue + dense_ptr, + dense_batch_stride, + dense_tiled_row_stride, + dense_tiled_col_stride, + dense_row_block_stride, + dense_col_block_stride, + # dense epilogue + # output prologue + output_ptr, + output_batch_stride, + output_tiled_row_stride, + output_tiled_col_stride, + output_row_block_stride, + output_col_block_stride, + # output epilogue + # + # gh-113754: Always keep all constexpr arguments at the end of + # triton kernel arguments list because with triton 2.1 or + # earlier non-contiguous outputs will corrupt CUDA state due + # to a triton bug (fixed in openai/triton#2262). + BLOCKSIZE_ROW: tl.constexpr, + BLOCKSIZE_COL: tl.constexpr, + acc_dtype: tl.constexpr, + allow_tf32: tl.constexpr, + GROUP_SIZE_ROW: tl.constexpr, + ): + batch_pid = tl.program_id(axis=2) + row_block_pid = tl.program_id(axis=0) + col_block_pid = tl.program_id(axis=1) + n_block_rows = tl.num_programs(axis=0) + n_block_cols = tl.num_programs(axis=1) + + row_block_pid, col_block_pid = tl.swizzle2d( + row_block_pid, col_block_pid, n_block_rows, n_block_cols, GROUP_SIZE_ROW + ) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_block_arange = tl.arange(0, BLOCKSIZE_ROW) + col_block_arange = tl.arange(0, BLOCKSIZE_COL) + + # Pointers are set to the first block of the current row. + values_block_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_nnz_stride * nnz_offset + + values_row_block_stride * row_block_arange[:, None] + + values_col_block_stride * col_block_arange[None, :] + ) + + # NOTE: dense is advanced into all dimensions but the tiled row one. + # That will be advanced in the loop according to values in col_indices. + dense_block_ptrs = ( + dense_ptr + + dense_batch_stride * batch_pid + + dense_tiled_col_stride * col_block_pid + + dense_row_block_stride * col_block_arange[:, None] + + dense_col_block_stride * row_block_arange[None, :] + ) + + # Pointers are set to exact write-to locations + output_ptrs = ( + output_ptr + + output_batch_stride * batch_pid + + output_tiled_row_stride * row_block_pid + + output_tiled_col_stride * col_block_pid + + output_row_block_stride * row_block_arange[:, None] + + output_col_block_stride * row_block_arange[None, :] + ) + + # Set pointer to the first nonzero element in the current row + col_index_nnz_ptr = ( + col_indices_ptr + + col_indices_batch_stride * batch_pid + + col_indices_stride * nnz_offset + ) + + output_acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) + for _ in range(row_nnz): + values_block = tl.load(values_block_ptrs) + + # find which row of dense needs to get loaded + # for multiplication with values_block. + dense_row_idx = tl.load(col_index_nnz_ptr) + dense_block = tl.load(dense_block_ptrs + dense_tiled_row_stride * dense_row_idx) + + # do block mm + output_acc_block += tl.dot(values_block, dense_block, allow_tf32=allow_tf32, out_dtype=acc_dtype) + + # move val/col_index ptrs to the next block in the row + values_block_ptrs += values_nnz_stride + col_index_nnz_ptr += col_indices_stride + + # write back the result + tl.store(output_ptrs, output_acc_block.to(output_ptr.dtype.element_ty)) + + + def _run_sampled_addmm_kernel( + alpha, beta, is_beta_zero, + blocksize, k, tile_k, + values, crow_indices, col_indices, + mat1, mat2, + max_grid + ): + n_batches = values.size(0) + n_block_rows = crow_indices.size(-1) - 1 + + full_grid = (n_batches, n_block_rows) + if max_grid is not None: + grid_blocks = tuple(max_grid[:2][::-1]) + (None,) * (2 - len(max_grid[:2])) + else: + grid_blocks = None + tensor_dims_map = { + values: (0, None), + crow_indices: (0, -1), + col_indices: (0, None), + mat1: (0, -4), + mat2: (0, None), + } + if values.dtype in (torch.half, torch.bfloat16): + acc_dtype = tl.float32 + allow_tf32 = True + else: + acc_dtype = tl.float64 + allow_tf32 = False + + def kernel(grid, *sliced_tensors): + _sampled_addmm_kernel[grid]( + alpha, beta, is_beta_zero, + *blocksize, k, tile_k, + *ptr_stride_extractor(*sliced_tensors), + acc_dtype=acc_dtype, + allow_tf32=allow_tf32, + num_stages=1, + num_warps=4 + ) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + + def sampled_addmm( + input: torch.Tensor, + mat1: torch.Tensor, + mat2: torch.Tensor, + *, + beta=1.0, + alpha=1.0, + out: Optional[torch.Tensor] = None, + skip_checks: bool = False, + max_grid: Optional[Tuple[Optional[int], Optional[int], Optional[int]]] = None, + ): + f_name = "sampled_addmm" + + check_bsr_layout(f_name, input) + input_broadcasted = broadcast_batch_dims_bsr(f_name, input, mat1, mat2) + + if not skip_checks: + check_device(f_name, mat1, input.device) + check_device(f_name, mat2, input.device) + if beta != 0.0 and input.dtype is torch.bool: + check( + False, + f"{f_name}(): having beta == {beta} not equal to 0.0 with boolean mask is not allowed." + ) + if input.dtype is not torch.bool: + check_dtype(f_name, mat1, input.dtype) + check_dtype(f_name, mat2, input.dtype) + else: + check_dtype(f_name, mat1, mat2.dtype) + check_mm_compatible_shapes(f_name, mat1, mat2) + if out is not None: + check_bsr_layout(f_name, out) + check_device(f_name, out, mat1.device) + check_dtype(f_name, out, input.dtype) + check( + out.shape == input_broadcasted.shape + and out._nnz() == input._nnz(), + f"{f_name}(): Expects `out` to be of shape {input_broadcasted.shape} " + f"and with nnz equal to {input_broadcasted._nnz()} " + f"but got out.shape = {out.shape} and out.nnz = {out._nnz()}" + ) + + if out is None: + out = input_broadcasted.to(mat1.dtype, copy=True) + else: + out.copy_(input_broadcasted) + + if out.numel() == 0 or out._nnz() == 0: + return out + + blocksize = out.values().shape[-2:] + m = mat1.size(-2) + n = mat2.size(-1) + k = mat1.size(-1) + + # NOTE: (m, 0) @ (0, n) == zeros(m, n) + if alpha == 0.0 or k == 0: + out.values().mul_(beta) + return out + + # prepare inputs by reshaping them to be kernel-compatible + out_backup = out + crow_indices, col_indices, values, mat1, mat2 = prepare_inputs(out, mat1, mat2) + + mat1 = tile_to_blocksize(mat1, (blocksize[0], k)) + mat2 = tile_to_blocksize(mat2, (k, blocksize[1])) + tile_k = max(*blocksize) + + _run_sampled_addmm_kernel( + alpha, beta, beta == 0.0, + blocksize, k, tile_k, + values, crow_indices, col_indices, + mat1, mat2, + max_grid + ) + + # If nnz x block strides are not the same in out_backup.values and values, + # it means that out_backup.values and values are not the views of each other, + # so we have to copy. + if out_backup.values().stride()[-3:] != values.stride()[-3:]: + out_backup.values().copy_(values.reshape(out_backup.values().shape)) + return out_backup + + + def bsr_dense_mm( + bsr: torch.Tensor, + dense: torch.Tensor, + *, + out: Optional[torch.Tensor] = None, + skip_checks: bool = False, + max_grid: Optional[Tuple[Optional[int], Optional[int], Optional[int]]] = None, + meta: Optional[dict] = None + ): + f_name = "bsr_dense_mm" + m, kl = bsr.shape[-2:] + if not skip_checks: + check_bsr_layout(f_name, bsr) + check_device(f_name, bsr, dense.device) + check_dtype(f_name, bsr, dense.dtype) + check_mm_compatible_shapes(f_name, bsr, dense) + + n = dense.size(-1) + row_block, col_block = bsr.values().shape[-2:] + check_blocksize(f_name, (row_block, col_block)) + check( + not n % 16, + f"{f_name}(): dense.size(-1) == {n} should be divisible by 16" + ) + else: + kr, n = dense.shape[-2:] + + original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) + + if out is not None and not skip_checks: + expected_out_shape = original_batch_dims_broadcasted + (m, n) + check( + out.shape == expected_out_shape, + "bsr_dense_mm(): `out` argument has wrong shape, " + f"expected {expected_out_shape}, but got {out.shape}.", + ) + check( + out.is_contiguous() or out.transpose(-2, -1).is_contiguous(), + "bsr_dense_mm(): only row-major/col-major `out` arguments are supported, " + "i.e. (out.is_contiguous() or out.transpose(-2, -1).is_contiguous()) " + "should be True.", + ) + + # Allocate out + if out is None: + out = dense.new_empty(original_batch_dims_broadcasted + (m, n)) + + # Short circuit if lhs is zero + if bsr._nnz() == 0: + return out.zero_() + + # with beta==0, addmm ignores input content, so we can use out + # as a placeholder for input because their shapes match: + return bsr_dense_addmm(out, bsr, dense, alpha=1, beta=0, out=out) + + + @triton.jit + def _bsr_softmax_kernel( + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + values_ptr, + values_batch_stride, + values_row_block_stride, + values_nnz_col_block_stride, + row_block, col_block, + MAX_ROW_NNZ: tl.constexpr, + TILE: tl.constexpr + ): + batch_pid = tl.program_id(axis=2) + row_block_offset_pid = tl.program_id(axis=1) + row_block_pid = tl.program_id(axis=0) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_arange = tl.arange(0, TILE) + mask = row_arange < row_nnz * col_block + + curr_row_values_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_row_block_stride * row_block_offset_pid + + nnz_offset * col_block + ) + + # find max in the row + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + max_row_value = tl.max(row_tile, axis=0) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange += TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + curr_max_row_value = tl.max(row_tile, axis=0) + max_row_value = tl.where(max_row_value > curr_max_row_value, max_row_value, curr_max_row_value) + + # find denominator for stable softmax + num = tl.exp(row_tile - max_row_value) + denom = tl.sum(num, axis=0) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange -= TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + num = tl.exp(row_tile - max_row_value) + denom += tl.sum(num, axis=0) + + # populate output + tl.store(curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange += TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + num = tl.exp(row_tile - max_row_value) + tl.store(curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask) + + + def bsr_softmax(input, max_row_nnz=None): + f_name = "bsr_softmax" + + check_bsr_layout(f_name, input) + check_dtype(f_name, input, input.dtype) + + if input._nnz() == 0 or input.numel() == 0: + return input.clone() + + m, n = input.shape[-2:] + nnz = input._nnz() + row_block, col_block = input.values().shape[-2:] + + if max_row_nnz is None: + max_row_nnz = triton.next_power_of_2(n) + else: + max_row_nnz = triton.next_power_of_2(max_row_nnz) + + crow_indices = input.crow_indices().unsqueeze(0).flatten(0, -2) + # reshape values from + # (b1, ..., bn, nnz, row_block, col_block) to + # (b1 * ... * bn, row_block, nnz * col_block). + # This simplifies batch dim manipulation and unlocks + # the possibility to access all nnzs in any given row. + if input.values().transpose(-3, -2).is_contiguous(): + # Need to clone to avoid `contiguous` returning a view. + values = input.values().clone() + else: + values = input.values() + values = values.transpose(-3, -2).contiguous().unsqueeze(0).flatten(0, -4).reshape(-1, row_block, nnz * col_block) + full_grid = (values.shape[0], row_block, m // row_block) + grid_blocks = None + tensor_dims_map = { + # We span nnz number of blocks, not nnz + 1, + # hence crow_indices[..., :-1] + crow_indices[..., :-1]: (0, None, -1), + values: (0, None, None), + } + + def kernel(grid, *sliced_tensors): + _bsr_softmax_kernel[grid]( + *ptr_stride_extractor(*sliced_tensors), + row_block, col_block, + max_row_nnz, + # Triton's max numel is bounded by 2 ** 17. + min(2 ** 17, max_row_nnz) + ) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + values = values.reshape(-1, row_block, nnz, col_block).transpose(-3, -2).reshape(*input.values().shape) + + return torch.sparse_compressed_tensor( + input.crow_indices().clone(), + input.col_indices().clone(), + values, + size=input.shape, + layout=input.layout + ) + + def _scaled_dot_product_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_mask: Optional[torch.Tensor], + dropout_p: float = 0.0, + is_causal: bool = False, + scale: Optional[float] = None + ): + f_name = "_scaled_dot_product_attention" + check( + not is_causal, + f"{f_name}(): is_causal == True is not supported." + ) + check( + attn_mask is not None, + f"{f_name}(): attn_mask == None is not supported." + ) + assert attn_mask is not None + + check( + attn_mask.layout == torch.sparse_bsr, + f"{f_name}(): " + f"attn_mask.layout must be {torch.sparse_bsr}, but got " + f"attn_mask.layout == {attn_mask.layout}." + ) + + check_device(f_name, key, query.device) + check_device(f_name, value, query.device) + check_device(f_name, attn_mask, query.device) + + check_dtype(f_name, key, query.dtype) + check_dtype(f_name, value, query.dtype) + if attn_mask.dtype is not torch.bool: + check_dtype(f_name, attn_mask, query.dtype) + + sdpa = sampled_addmm(attn_mask, query, key.transpose(-2, -1), beta=0.0, skip_checks=False) + if scale is None and query.size(-1) == 0 or scale == 0.0: + check( + False, + f"{f_name}(): current value of scale == {scale} " + "results in division by zero." + ) + scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale + sdpa.values().mul_(scale_factor) + sdpa = bsr_softmax(sdpa) + torch.nn.functional.dropout(sdpa.values(), p=dropout_p, inplace=True) + sdpa = bsr_dense_mm(sdpa, value) + return sdpa + + @triton.jit + def _scatter_mm2_kernel( + M: tl.constexpr, K: tl.constexpr, N: tl.constexpr, + blocks_ptr, blocks_stride_P, blocks_stride_M, blocks_stride_K, + others_ptr, others_stride_Q, others_stride_K, others_stride_N, + accumulators_ptr, accumulators_stride_R, accumulators_stride_M, accumulators_stride_N, + pq_offsets_ptr, pq_offsets_stride, + pq_ptr, pq_stride_T, pq_stride_1, + dot_out_dtype: tl.constexpr, + TILE_M: tl.constexpr, + TILE_N: tl.constexpr, + allow_tf32: tl.constexpr): + + Ms = M // TILE_M + Ns = N // TILE_N + + pid_t = tl.program_id(axis=0) + + pid = tl.program_id(axis=1) + pid_m = pid // Ms + pid_n = pid % Ms + + rm = (pid_m * TILE_M + tl.arange(0, TILE_M)) + rn = (pid_n * TILE_N + tl.arange(0, TILE_N)) + rk = tl.arange(0, K) + + A_ptr = blocks_ptr + (rm[:, None] * blocks_stride_M + rk[None, :] * blocks_stride_K) + B_ptr = others_ptr + (rk[:, None] * others_stride_K + rn[None, :] * others_stride_N) + + g0 = tl.load(pq_offsets_ptr + pid_t * pq_offsets_stride) + g1 = tl.load(pq_offsets_ptr + (pid_t + 1) * pq_offsets_stride) + + if g0 == g1: + return + + acc_block = tl.zeros((TILE_M, TILE_N), dtype=dot_out_dtype) + + for i in range(g0, g1): + p = tl.load(pq_ptr + i * pq_stride_T) + q = tl.load(pq_ptr + i * pq_stride_T + pq_stride_1) + A = tl.load(A_ptr + p * blocks_stride_P) + B = tl.load(B_ptr + q * others_stride_Q) + acc_block += tl.dot(A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) + + C_ptr = accumulators_ptr + pid_t * accumulators_stride_R + ( + rm[:, None] * accumulators_stride_M + rn[None, :] * accumulators_stride_N) + tl.store(C_ptr, acc_block.to(accumulators_ptr.dtype.element_ty)) + + def _scatter_mm2( + blocks: torch.Tensor, + others: torch.Tensor, + pq_offsets: torch.Tensor, + pq_indices: torch.Tensor, + accumulators: torch.Tensor + ): + P, M, K = blocks.shape + Q, _, N = others.shape + R, _, _ = accumulators.shape + + meta = dict(TILE_M=max(16, M // 4), TILE_N=max(16, N // 4), num_stages=1, num_warps=2) + + def grid(META): + return (pq_offsets.shape[0] - 1, triton.cdiv(M, META['TILE_M']) * triton.cdiv(N, META['TILE_N']), 1) + + dot_out_dtype = {torch.float16: tl.float32, + torch.bfloat16: tl.float32, + torch.float32: tl.float64, + torch.float64: tl.float64}[accumulators.dtype] + if 'allow_tf32' not in meta: + meta.update(allow_tf32=dot_out_dtype == tl.float32) + _scatter_mm2_kernel[grid]( + M, K, N, + blocks, blocks.stride(0), blocks.stride(1), blocks.stride(2), + others, others.stride(0), others.stride(1), others.stride(2), + accumulators, accumulators.stride(0), accumulators.stride(1), accumulators.stride(2), + pq_offsets, pq_offsets.stride(0), + pq_indices, pq_indices.stride(0), pq_indices.stride(1), + dot_out_dtype=dot_out_dtype, + **meta + ) + + @triton.jit + def _scatter_mm6_kernel( + nbatches, Ms, Ks: tl.constexpr, N, + blocks_ptr, blocks_stride_P, blocks_stride_M, blocks_stride_K, + others_ptr, others_stride_B, others_stride_K, others_stride_N, + accumulators_ptr, accumulators_stride_B, accumulators_stride_M, accumulators_stride_N, + c_indices_ptr, r_offsets_ptr, + p_offsets_ptr, q_offsets_ptr, + is_compressed: tl.constexpr, + dot_out_dtype: tl.constexpr, + SPLIT_N: tl.constexpr, + TILE_M: tl.constexpr, + TILE_N: tl.constexpr, + GROUP_SIZE: tl.constexpr, + allow_tf32: tl.constexpr): + Ns = N // SPLIT_N + BLOCKS_M = Ms // TILE_M + BLOCKS_N = Ns // TILE_N + + pid_t_ = tl.program_id(axis=0) + pid = tl.program_id(axis=1) + pid_b = pid_t_ % nbatches + pid_t = pid_t_ // nbatches + + num_pid_in_group = GROUP_SIZE * BLOCKS_N + group_id = pid // num_pid_in_group + first_pid_m = group_id * GROUP_SIZE + group_size_m = min(BLOCKS_M - first_pid_m, GROUP_SIZE) + pid_m = first_pid_m + (pid % group_size_m) + pid_n = (pid % num_pid_in_group) // group_size_m + + rm = (pid_m * TILE_M + tl.arange(0, TILE_M)) + rn = (pid_n * TILE_N + tl.arange(0, TILE_N)) + rk = tl.arange(0, Ks) + A_ptr = blocks_ptr + (rm[:, None] * blocks_stride_M + rk[None, :] * blocks_stride_K) + B_ptr = others_ptr + pid_b * others_stride_B + (rk[:, None] * others_stride_K + rn[None, :] * others_stride_N) + + # When is_compressed is True, r is the only variable that + # depends on pid_t. This property allows sorting r values + # before calling the kernel. The sorting of r is equivalent to + # defining swizzle operator outside of the kernel. + r = tl.load(r_offsets_ptr + pid_t) + + if is_compressed: + m = (r // N) // Ms + n = (r % N) // Ns + r0 = tl.load(c_indices_ptr + m) + r1 = tl.load(c_indices_ptr + m + 1) + g0 = n * r1 + (SPLIT_N - n) * r0 + nnz = r1 - r0 + else: + g0 = tl.load(c_indices_ptr + pid_t) + g1 = tl.load(c_indices_ptr + pid_t + 1) + nnz = g1 - g0 + + q_ptr = q_offsets_ptr + g0 + acc_block = tl.zeros((TILE_M, TILE_N), dtype=dot_out_dtype) + + if is_compressed: + A_ptr += r0 * blocks_stride_P # type: ignore[possibly-undefined] + for _ in range(nnz): + q = tl.load(q_ptr) + B = tl.load(B_ptr + q) + A = tl.load(A_ptr) + acc_block += tl.dot(A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) + A_ptr += blocks_stride_P + q_ptr += 1 + else: + p_ptr = p_offsets_ptr + g0 + for _ in range(nnz): + q = tl.load(q_ptr) + B = tl.load(B_ptr + q) + p = tl.load(p_ptr) + A = tl.load(A_ptr + p * blocks_stride_P) + p_ptr += 1 + q_ptr += 1 + acc_block += tl.dot(A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) + + C_ptr = accumulators_ptr + r + pid_b * accumulators_stride_B + ( + rm[:, None] * accumulators_stride_M + rn[None, :] * accumulators_stride_N) + tl.store(C_ptr, acc_block.to(accumulators_ptr.dtype.element_ty)) + + def _scatter_mm6( + blocks: torch.Tensor, + others: torch.Tensor, + c_indices: torch.Tensor, + r_offsets: torch.Tensor, + p_offsets: torch.Tensor, + q_offsets: torch.Tensor, + meta: dict, + accumulators: torch.Tensor, + force_contiguous: bool = True, + ): + SPLIT_N = meta['SPLIT_N'] + P, Ms, Ks = blocks.shape + B, K_, N = others.shape + B_, M, N_ = accumulators.shape + assert N_ == N + Ns = N // SPLIT_N + assert B_ == B + + def grid(META): + return (r_offsets.shape[0] * B, triton.cdiv(Ms, META['TILE_M']) * triton.cdiv(Ns, META['TILE_N'])) + + dot_out_dtype = {torch.float16: tl.float32, + torch.bfloat16: tl.float32, + torch.float32: tl.float64, + torch.float64: tl.float64}[accumulators.dtype] + if 'allow_tf32' not in meta: + meta.update(allow_tf32=dot_out_dtype == tl.float32) + + assert c_indices.stride(0) == 1 + assert r_offsets.stride(0) == 1 + assert p_offsets.stride(0) == 1 + assert q_offsets.stride(0) == 1 + + # Re non-contiguous tensor arguments. Sometimes triton kernel + # launches may fail with + # + # RuntimeError: Triton Error [CUDA]: an illegal memory access was encountered + # + # that appears to be case when the size of a non-contiguous + # tensor argument is larger than a certain threshold. Could + # this be related to shared memory or L1 cache size of a GPU + # card? In anycase, ensuring that tensor arguments are + # contiguous seems to avoid the above exception. So, in the + # following we'll always convert tensor arguments to + # C-contiguous tensors. + + if force_contiguous: + blocks = blocks.contiguous() + others = others.contiguous() + if not accumulators.is_contiguous(): + accumulators_ = accumulators.contiguous() + else: + accumulators_ = accumulators + else: + accumulators_ = accumulators + + _scatter_mm6_kernel[grid]( + B, Ms, Ks, N, + blocks, blocks.stride(0), blocks.stride(1), blocks.stride(2), + others, others.stride(0), others.stride(1), others.stride(2), + accumulators_, accumulators_.stride(0), accumulators_.stride(1), accumulators_.stride(2), + c_indices, + r_offsets, + p_offsets, + q_offsets, + dot_out_dtype=dot_out_dtype, + **meta + ) + + if force_contiguous and not accumulators.is_contiguous(): + accumulators.copy_(accumulators_) + + @triton.jit + def _bsr_strided_addmm_kernel( + # values prologue + values_ptr, + values_batch_stride, + values_nnz_stride, + values_row_block_stride, + values_col_block_stride, + # values epilogue + # crow_indices prologue + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + # crow_indices epilogue + # col_indices prologue + col_indices_ptr, + col_indices_batch_stride, + col_indices_stride, + # col_indices epilogue + # input prologue + input_ptr, + input_batch_stride, + input_tiled_row_stride, + input_tiled_col_stride, + input_row_block_stride, + input_col_block_stride, + # input epilogue + # dense prologue + dense_ptr, + dense_batch_stride, + dense_tiled_row_stride, + dense_tiled_col_stride, + dense_row_block_stride, + dense_col_block_stride, + # dense epilogue + # output prologue + output_ptr, + output_batch_stride, + output_tiled_row_stride, + output_tiled_col_stride, + output_row_block_stride, + output_col_block_stride, + # output epilogue + beta, + alpha, + beta_is_one: tl.constexpr, + beta_is_nonzero: tl.constexpr, + alpha_is_one: tl.constexpr, + BLOCKSIZE_ROW: tl.constexpr, + BLOCKSIZE_COL: tl.constexpr, + BLOCKSIZE_INNER: tl.constexpr, + acc_dtype: tl.constexpr, + allow_tf32: tl.constexpr, + GROUP_SIZE_ROW: tl.constexpr, + SPLIT_N: tl.constexpr + ): + + batch_pid = tl.program_id(axis=2) + row_block_pid = tl.program_id(axis=0) + col_block_pid = tl.program_id(axis=1) + n_block_rows = tl.num_programs(axis=0) + n_block_cols = tl.num_programs(axis=1) + + row_block_pid, col_block_pid = tl.swizzle2d( + row_block_pid, col_block_pid, n_block_rows, n_block_cols, GROUP_SIZE_ROW + ) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + row_nnz = nnz_offset_next - nnz_offset + + row_block_arange = tl.arange(0, BLOCKSIZE_ROW) + inner_block_arange = tl.arange(0, BLOCKSIZE_INNER) + col_block_arange = tl.arange(0, BLOCKSIZE_COL) + + if beta_is_nonzero: + # Pointers are set to exact write-to locations + input_ptrs = ( + input_ptr + + input_batch_stride * batch_pid + + input_tiled_row_stride * row_block_pid + + input_tiled_col_stride * col_block_pid + + input_row_block_stride * row_block_arange[:, None] + + input_col_block_stride * col_block_arange[None, :] + ) + + # Pointers are set to the first block of the current row. + values_block_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_nnz_stride * nnz_offset + + values_row_block_stride * row_block_arange[:, None] + + values_col_block_stride * inner_block_arange[None, :] + ) + + # NOTE: dense is advanced into all dimensions but the tiled row one. + # That will be advanced in the loop according to values in col_indices. + dense_block_ptrs = ( + dense_ptr + + dense_batch_stride * batch_pid + + dense_tiled_col_stride * col_block_pid + + dense_row_block_stride * inner_block_arange[:, None] + + dense_col_block_stride * col_block_arange[None, :] + ) + + # Pointers are set to exact write-to locations + output_ptrs = ( + output_ptr + + output_batch_stride * batch_pid + + output_tiled_row_stride * row_block_pid + + output_tiled_col_stride * col_block_pid + + output_row_block_stride * row_block_arange[:, None] + + output_col_block_stride * col_block_arange[None, :] + ) + + # Set pointer to the first nonzero element in the current row + col_index_nnz_ptr = ( + col_indices_ptr + + col_indices_batch_stride * batch_pid + + col_indices_stride * nnz_offset + ) + + # alpha is never 0 + if beta_is_nonzero: + output_acc_block = tl.load(input_ptrs).to(acc_dtype) # type: ignore[possibly-undefined] + if not (beta_is_one and alpha_is_one): + beta_alpha = beta / alpha + output_acc_block *= beta_alpha + else: + output_acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) + + for _ in range(row_nnz): + values_block = tl.load(values_block_ptrs) + + # find which row of dense needs to get loaded + # for multiplication with values_block. + dense_row_idx = tl.load(col_index_nnz_ptr) + dense_block = tl.load(dense_block_ptrs + dense_tiled_row_stride * dense_row_idx) + + # do block mm + output_acc_block += tl.dot(values_block, dense_block, allow_tf32=allow_tf32, out_dtype=acc_dtype) + + # move val/col_index ptrs to the next block in the row + values_block_ptrs += values_nnz_stride + col_index_nnz_ptr += col_indices_stride + + if not alpha_is_one: + output_acc_block *= alpha + + # write back the result + tl.store(output_ptrs, output_acc_block.to(output_ptr.dtype.element_ty)) + + +else: + bsr_softmax = None # type: ignore[assignment] + bsr_dense_mm = None # type: ignore[assignment] + sampled_addmm = None # type: ignore[assignment] + _scaled_dot_product_attention = None # type: ignore[assignment] + _scatter_mm2 = None # type: ignore[assignment] + _scatter_mm6 = None # type: ignore[assignment] + _bsr_strided_addmm_kernel = None # type: ignore[assignment] diff --git a/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops_meta.py b/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..15e00d11e7926fa290877ea84b1c93885b66ecfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/_triton_ops_meta.py @@ -0,0 +1,4029 @@ +"""Provides optimal triton kernel parameters. + +Aim +--- + +The usage of optimal triton kernel parameters may increase the +performance of operations several times. For example, for large tensor +shapes, the usage of a bsr tensor as mat1 argument in addmm-based +operations typically outperforms the corresponding operation with +strided-only inputs when the blocked representation of a tensor +provides a better alignement with memory access than what the strided +representation would provide. + +Pre-computed kernel parameters +------------------------------ + +This script finds and stores the optimal triton kernel parameters for +a specific set of shape configurations. For instance, the set of shape +configurations of the bsr_dense_addmm kernel is defined as + + input, out: M x N strided tensor + mat1: M x K bsr tensor with blocksize (BM, BK) and given sparsity + mat2: M x N strided tensor + dtype = float16, bfloat16, float32 + sparsity = 0.5 + M = 256, 512, ..., 16384 + K = M + N = 256, 512, ..., 131072 + BM = 16, 32, ..., 128 + BK = BM + alpha = 1 + beta = 0, 1 + GPUs: NVIDIA A100-SXM4-80GB + +Approximations +-------------- + +It is practically infeasible to pre-compute optimal kernel parameter +for all possible shape configurations as well as for all existing +GPUs. Therefore, we'll assume that the pre-computed optimal parameters +are good enough approximations when +1) the used GPU is any of NVIDIA A100 Tensor Core GPUs, +2) the actual sparsity of mat1 is different from sparsity value 0.5. + +If a particular shape configuration does not fall in the set of +pre-computed kernel parameters, or it does not match with the listed +approximations above, or the used GPU device is not a NVIDIA A100 GPU, +then a reference set of triton kernel parameters will be used when +executing operations. The reference kernel parameters are defined in +torch/sparse/_triton_ops.py, see bsr_dense_addmm_meta function, for +instance. + +Computing optimal kernel parameters +----------------------------------- + +If the approximations listed above are unacceptable, e.g. when one +seeks a maximal performance possible, the optimal kernel parameters +for a particular GPU can be computed by simply running this script in +the pytorch developement tree:: + + cd /path/to/pytorch + python setup.py develop + python torch/sparse/_triton_ops_meta.py + +This will compute the optimal kernel parameters for the GPU device +available in the host system for all shape configurations listed in +"Pre-computed kernel parameters" above. The results will be stored in +the database of kernel parameters. Currently, this database is defined +as this module (see "BEGIN GENERATED DATA" comment below) that will be +modified when the script is run. Create a pytorch PR with the +corresponding modifications in this file to make the computed optimal +kernel parameters available for other users as pre-computed kernel +parameters. + +Moreover, one can compute the optimal kernel parameters for a specific +set of shape configurations and specific sparsity patterns. For that, +use tuning functions provided by this module: + + tune_bsr_dense_addmm(input, mat1, mat2, beta=1, alpha=1, out=None, verbose=False, store=False) -> meta + +The tuning functions return a dictionary of optimal kernel parameters +that can be passed to the corresponding operation, e.g. + + bsr_dense_addmm(..., meta=meta) + +Or, when store==True, the optimal kernel parameters will be stored in +the database of pre-computed kernel parameters in runtime so that all +addmm-based operations such as torch.addmm, torch.mm, +torch.nn.functional.linear will benefit from using the computed +optimal set of kernel parameters. + +Note that running tune_bsr_dense_addmm can take several minutes. So, +use it wisely, e.g. by implementing persisten storage of optimized +kernel parameters. See the source code of get_meta and +tune_bsr_dense_addmm to learn how to register a custom set of optimal +kernel parameters for addmm-based operations. + +""" +__all__ = ["get_meta", "tune_bsr_dense_addmm"] + +import inspect +import itertools +import re +import warnings +from typing import Any, Dict + +import torch +from torch.hub import tqdm +from torch.testing import make_tensor + + +def get_meta(op, key, device_name=None, version=(0, torch.float16, 0.5), exact=False): + """Return triton kernel meta parameters of the specified op and its inputs key. + + Parameters + ---------- + op (str): The name of an operation that implementation uses meta parameters. + key (tuple): A tuple of op input parameters, e.g. shapes, etc. + device_name (optional, str): The name of a device for which op + parameters are provided. + version (optional, hashable): Specifies the version of parameters. + exact (optional, bool): When True, the returned data (if + available) corresponds exactly to the specified device_name and + version information. Otherwise, if the corresponding data is not + available but there exists a data set that is computed for a + similar GPU device, then this data set will be returned. + + Returns + ------- + result (dict): The requested mapping of parameter names and + values, or None when no data is available. + """ + if device_name is None: + device_name = torch.cuda.get_device_name() + op_data = _operation_device_version_data.get((op, device_name, version)) + if op_data is None and not exact: + # A lack of op data could be due to using a (slightly) + # different GPU model compared to a model for which optimal + # meta parameters have been computed. In the following we'll + # assume that there is a set of GPU models that all have + # a similar set of optimal meta parameters. + if re.match(r"NVIDIA A100[^\d]", device_name) is not None: + device_name = "NVIDIA A100-SXM4-80GB" + else: + return + op_data = _operation_device_version_data.get((op, device_name, version)) + if op_data is None: + return + values = op_data.get(key) + if values is not None: + if op == "scatter_mm": + names = ( + "GROUP_SIZE", + "SPLIT_N", + "TILE_M", + "TILE_N", + "num_stages", + "num_warps", + ) + return dict(zip(names, values)) + elif op == "bsr_dense_addmm": + return dict( + zip(("GROUP_SIZE_ROW", "SPLIT_N", "num_stages", "num_warps"), values) + ) + raise NotImplementedError(f"names for {op=}") + + +def update(op, device_name, version, key, value): + """Update the db of op parameters.""" + if (op, device_name, version) in _operation_device_version_data: + if _operation_device_version_data[op, device_name, version].get(key) == value: + return + _operation_device_version_data[op, device_name, version][key] = value + else: + _operation_device_version_data[op, device_name, version] = {key: value} + + +def dump(): + """Store the current runtime db state to the module file.""" + current_file = inspect.getfile(dump) + f = open(current_file) + current_content = f.read() + f.close() + begin_data_str = "# BEGIN GENERATED DATA\n" + begin_data_index = current_content.find(begin_data_str) + end_data_index = current_content.find(" # END GENERATED DATA\n") + if begin_data_index == -1 or end_data_index == -1: + warnings.warn( + f"{current_file} cannot be updated:" + " BEGIN/END GENERATED DATA comment blocks appear to be corrupted" + ) + return + + def sort_key(key): + op, device_name, version = key + version = tuple( + (str(item) if isinstance(item, torch.dtype) else item) for item in version + ) + return (op, device_name, version) + + part1 = current_content[: begin_data_index + len(begin_data_str)] + part2 = current_content[end_data_index:] + data_part = [] + for op_key in sorted(_operation_device_version_data, key=sort_key): + data_part.append(" " + repr(op_key).replace("'", '"') + ": {") + op_data = _operation_device_version_data[op_key] + for key in sorted(op_data): + data_part.append(f" {key}: {op_data[key]},") + data_part.append(" },") + new_content = part1 + "\n".join(data_part) + "\n" + part2 + if current_content != new_content: + f = open(current_file, "w") + f.write(new_content) + f.close() + + +def minimize( + target_func, + initial_parameters, + reference_parameters, + step_func, + max_step=2, + verbose=False, + all_values=None, +): + """Find a dict of parameters that minimizes the target function using + the initial dict of parameters and a step function that progresses + a specified parameter in a dict of parameters. + + Parameters + ---------- + target_func (callable): a functional with the signature + ``target_func(parameters: dict) -> float`` + initial_parameters (dict): a set of parameters used as an initial + value to the minimization process. + reference_parameters (dict): a set of parameters used as an + reference value with respect to which the speed up is computed. + step_func (callable): a functional with the signature + ``step_func(parameter_name:str, parameter_value:int, direction:int, parameters:dict) -> int`` + that increments or decrements (when ``direction`` is positive or + negative, respectively) the parameter with given name and value. + When return value is equal to ``parameter_value``, it means that + no step along the given direction can be made. + + Returns + ------- + parameters (dict): a set of parameters that minimizes the target + function. + speedup_incr (float): a speedup change given in percentage. + timing (float): the value of the target function at the parameters. + sensitivity_message (str): a message containing sensitivity. + information of parameters around the target function minimizer. + """ + + def to_key(parameters): + return tuple(parameters[k] for k in sorted(parameters)) + + def from_key(key, parameters): + return dict(zip(sorted(parameters), key)) + + if all_values is None: + all_values = dict() + + directions = list(range(-max_step, max_step + 1)) + names = sorted(initial_parameters) + all_directions = [] + for d_tuple in itertools.product(*((directions,) * len(names))): + dist = sum(map(abs, d_tuple)) + if dist > 0 and dist <= max_step: + all_directions.append((dist, d_tuple)) + all_directions.sort() + + try: + reference_target = target_func(reference_parameters) + except Exception as msg: + if verbose and "out of resource" not in str(msg): + print(f"{reference_parameters=} lead to failure: {msg}.") + reference_target = None + if reference_target is not None: + all_values[to_key(reference_parameters)] = reference_target + + parameters = initial_parameters + try: + initial_target = target_func(parameters) + except Exception as msg: + if reference_target is None: + if verbose: + print( + f"{initial_parameters=} lead to failure: {msg}. Optimization failed!" + ) + return {}, -1, -1, f"{msg}" + if verbose and "out of resource" not in str(msg): + print( + f"{initial_parameters=} lead to failure: {msg}. Using reference parameters instead of initial parameters." + ) + parameters = reference_parameters + initial_target = reference_target + + if reference_target is None: + if verbose: + print("Using initial parameters instead of reference parameters.") + reference_target = initial_target + + initial_key = to_key(parameters) + minimal_target = all_values[initial_key] = initial_target + pbar = tqdm( + total=len(all_directions), + desc="Tuning...", + disable=not verbose, + ncols=75, + ) + while True: + for i, (_, d_tuple) in enumerate(all_directions): + pbar.update(1) + next_parameters = parameters.copy() + for name, direction in zip(names, d_tuple): + value = next_parameters[name] + if direction == 0: + continue + next_value = step_func(name, value, direction, parameters) + if next_value == value: + break + next_parameters[name] = next_value + else: + next_key = to_key(next_parameters) + if next_key in all_values: + continue + try: + next_target = target_func(next_parameters) + except Exception as msg: + all_values[next_key] = str(msg) + if verbose and "out of resource" not in str(msg): + print(f"{next_parameters=} lead to failure: {msg}. Skipping.") + continue + all_values[next_key] = next_target + + if next_target < minimal_target: + minimal_target = next_target + parameters = next_parameters + pbar.total += i + 1 + break + else: + # ensure stable minimizer: + minimizer_keys = { + k + for k, v in all_values.items() + if isinstance(v, float) and abs(1 - v / minimal_target) < 0.001 + } + minimizer_key = ( + initial_key if initial_key in minimizer_keys else min(minimizer_keys) + ) + minimizer_target = all_values[minimizer_key] + parameters = from_key(minimizer_key, parameters) + speedup_incr = (1 - minimal_target / reference_target) * 100 + if speedup_incr < 0: + if verbose: + print( + f"{speedup_incr=} is negative. Rerunning minimize with reference parameters as initial parameters." + ) + return minimize( + target_func, + reference_parameters, + reference_parameters, + step_func, + max_step=max_step, + verbose=verbose, + all_values=all_values, + ) + sensitivity = [] + for name in parameters: + value = parameters[name] + rel_diffs = [] + for direction in range(-max_step, max_step + 1): + if direction == 0: + continue + next_value = step_func(name, value, direction, parameters) + if next_value == value: + rel_diffs.append(0) + continue + next_parameters = parameters.copy() + next_parameters[name] = next_value + next_key = to_key(next_parameters) + next_target = all_values.get(next_key) + if next_target is None or isinstance(next_target, str): + rel_diffs.append(0) + continue + rel_diff = (next_target / minimal_target - 1) * 100 + rel_diffs.append(rel_diff) + sensitivity.append((max(rel_diffs), rel_diffs, name)) + + sensitivity_message = [f"timing0={initial_target:.3f}"] + for _, rel_diffs, name in sorted(sensitivity, reverse=True): + left_diffs = "|".join( + [f"{rel_diff:.1f}" for rel_diff in rel_diffs[:max_step]] + ) + right_diffs = "|".join( + [f"{rel_diff:.1f}" for rel_diff in rel_diffs[max_step:]] + ) + sensitivity_message.append( + f"{name}={parameters[name]} ({left_diffs}...{right_diffs} %)" + ) + sensitivity_message = ", ".join(sensitivity_message) + return parameters, speedup_incr, minimal_target, sensitivity_message + + +def create_blocked_tensor(B, M, N, blocksize, sparsity, dtype, device): + assert ( + sparsity <= 1.0 and sparsity >= 0.0 + ), "sparsity should be a value between 0 and 1" + assert M % blocksize[0] == 0 + assert N % blocksize[1] == 0 + shape = (B, M // blocksize[0], N // blocksize[1])[int(B == 0) :] + A = torch.bernoulli(torch.full(shape, 1 - sparsity, dtype=dtype, device=device)) + expected_nnz = int((1 - sparsity) * M * N / (blocksize[0] * blocksize[1])) + nonzero_indices = A.flatten().nonzero() + actual_nnz = nonzero_indices.shape[0] + if actual_nnz > expected_nnz: + selected_nonzeros = torch.randperm(actual_nnz)[: actual_nnz - expected_nnz] + A.flatten()[nonzero_indices[selected_nonzeros]] = 0 + elif actual_nnz < expected_nnz: + zero_indices = (A == 0).flatten().nonzero() + selected_zeros = torch.randperm(zero_indices.shape[0])[ + : expected_nnz - actual_nnz + ] + A.flatten()[zero_indices[selected_zeros]] = 1 + A = torch.repeat_interleave(A, blocksize[0], dim=-2) + A = torch.repeat_interleave(A, blocksize[1], dim=-1) + return A + + +def optimize_scatter_mm( + m, k, n, bm, bk, dtype=torch.float16, device="cuda", sparsity=0.5, force=False +): + import triton + + from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data + + key = (m, k, n, bm, bk) + + version = (0, dtype, sparsity) + device_name = torch.cuda.get_device_name() + + reference_meta = dict( + GROUP_SIZE=1, + TILE_M=16, + TILE_N=16, + SPLIT_N=n // 16, + num_stages=1, + num_warps=1, + ) + + initial_meta = get_meta( + "scatter_mm", key, device_name=device_name, version=version, exact=True + ) + + if initial_meta is None: + initial_meta = get_meta( + "bsr_dense_addmm", + key, + device_name=device_name, + version=(0, dtype, 0.5), + exact=True, + ) + if initial_meta is None: + initial_meta = reference_meta + elif not force: + return + + print(f"{m, k, n, bm, bk, initial_meta, reference_meta=}") + torch.manual_seed(0) + bsr = create_blocked_tensor( + 0, m, k, (bm, bk), sparsity, dtype, device + ).to_sparse_bsr((bm, bk)) + dense = make_tensor(k, n, dtype=dtype, device=device) + + def bench(meta, bsr=bsr, dense=dense): + indices_data = bsr_scatter_mm_indices_data( + bsr, dense, indices_format="bsr_strided_mm_compressed", **meta + ) + + def test_func(): + return bsr_scatter_mm(bsr, dense, indices_data=indices_data) + + ms_min = triton.testing.do_bench( + test_func, warmup=500, rep=100, fast_flush=False + ) + + return ms_min + + def step_meta_parameter(name, value, direction, meta, m=m, n=n, k=k, bm=bm, bk=bk): + # return next value in positive or negative direction, or + # input value if the step will result an invalid + # value. The input value is assumed to be valid. + + is_log = name in {"SPLIT_N", "TILE_M", "TILE_N", "num_warps"} + min_value = dict( + SPLIT_N=1, TILE_M=16, TILE_N=16, num_warps=1, num_stages=1, GROUP_SIZE=1 + )[name] + max_value = dict( + SPLIT_N=n // meta["TILE_N"], TILE_M=bm, TILE_N=n // meta["SPLIT_N"] + ).get(name) + value_step = dict( + SPLIT_N=2, TILE_M=2, TILE_N=2, num_warps=2, num_stages=1, GROUP_SIZE=1 + )[name] + if is_log: + next_value = ( + value * value_step**direction + if direction > 0 + else value // (value_step ** abs(direction)) + ) + else: + next_value = value + value_step * direction + if min_value is not None: + next_value = max(next_value, min_value) + if max_value is not None: + next_value = min(next_value, max_value) + if name == "SPLIT_N" and n % next_value != 0: + return value + # Hard-skip parameter combinations that break CUDA state for pytorch: + if (dtype, name, next_value, m, n, k, bm, bk) in { + (torch.float32, "num_warps", 32, 256, 256, 256, 16, 16), + (torch.float32, "num_warps", 16, 256, 256, 256, 32, 32), + (torch.float32, "num_warps", 16, 256, 256, 256, 64, 64), + (torch.float32, "num_warps", 16, 256, 256, 256, 128, 128), + (torch.float32, "num_warps", 16, 512, 512, 256, 128, 128), + } and re.match(r"NVIDIA A100[^\d]", device_name) is not None: + return value + return next_value + + meta, speedup, timing, sensitivity_message = minimize( + bench, initial_meta, reference_meta, step_meta_parameter + ) + if initial_meta is not reference_meta and initial_meta == meta and not force: + return + print(f"{meta=} {speedup=:.1f} % {timing=:.3f} ms") + if speedup < 0: + return + device_name = torch.cuda.get_device_name() + + update( + "scatter_mm", device_name, version, key, tuple(meta[k] for k in sorted(meta)) + ) + + +def tune_bsr_dense_addmm( + input, + bsr, + dense, + *, + beta=1, + alpha=1, + out=None, + store=False, + verbose=False, + force=False, +): + """Tune bsr_dense_addmm kernel parameters against the given inputs. + + When store is True, the tuning results will be stored in the + database of kernel parameters. + """ + import triton + + from torch.sparse._triton_ops import bsr_dense_addmm + + N = dense.shape[-1] + values = bsr.values() + crow_indices = bsr.crow_indices() + batch_ndim = crow_indices.dim() - 1 + M, K = bsr.shape[batch_ndim : batch_ndim + 2] + BM, BK = values.shape[batch_ndim + 1 : batch_ndim + 3] + + # Reference parameters is a set of parameters that leads to a + # successful kernel call and the corresponding timing is used as a + # reference for computing speedups. Avoid changing the reference + # parameters when possible. + reference_meta = dict( + GROUP_SIZE_ROW=1, num_stages=1, num_warps=4, SPLIT_N=max(N // BM, 1) + ) + + # Compute the key of parameters: + sparsity = round(1 - bsr._nnz() * BM * BK / (M * K), 2) + dtype = bsr.dtype + version = (0, dtype, sparsity) + key = (M, K, N, BM, BK, beta == 0, beta == 1, alpha == 1) + + # For tuning, for an initial state, use parameters from the + # database if available, otherwise, use the reference parameters. + initial_meta = get_meta("bsr_dense_addmm", key, version=version, exact=True) + if initial_meta is None: + may_skip_update = False + initial_meta = get_meta( + "bsr_dense_addmm", key, version=(0, dtype, 0.5), exact=True + ) + if initial_meta is None: + initial_meta = reference_meta + elif not force: + return initial_meta + else: + may_skip_update = True + + # The target function that is minimized in the tuning process: + def bench(meta, input=input, bsr=bsr, dense=dense, alpha=alpha, out=out): + def test_func(): + return bsr_dense_addmm( + input, bsr, dense, beta=beta, alpha=alpha, meta=meta, out=out + ) + + return triton.testing.do_bench(test_func, warmup=500, rep=100, fast_flush=False) + + # The step function that increments a specified meta parameter: + def step_meta_parameter(name, value, direction, meta, M=M, N=N, K=K, BM=BM, BK=BK): + # return next value in positive or negative direction, or + # input value if the step will result an invalid + # value. The input value is assumed to be valid. + is_log = name in {"SPLIT_N", "num_warps"} + min_value = dict(SPLIT_N=1, num_warps=1, num_stages=1, GROUP_SIZE_ROW=1)[name] + max_value = dict(SPLIT_N=max(N // BM, 1)).get(name) + value_step = dict(SPLIT_N=2, num_warps=2, num_stages=1, GROUP_SIZE_ROW=1)[name] + if is_log: + next_value = ( + value * value_step**direction + if direction > 0 + else value // (value_step ** abs(direction)) + ) + else: + next_value = value + value_step * direction + if min_value is not None: + next_value = max(next_value, min_value) + if max_value is not None: + next_value = min(next_value, max_value) + if name == "SPLIT_N" and N % next_value != 0: + return value + return next_value + + # Tune: + meta, speedup, timing, sensitivity_message = minimize( + bench, + initial_meta, + reference_meta, + step_meta_parameter, + max_step=2, + verbose=verbose, + ) + if verbose: + print(f"-> {sensitivity_message}, {speedup=:.1f} %, {timing=:.3f} ms") + + if store and not ( + may_skip_update and meta == initial_meta and initial_meta is not reference_meta + ): + device_name = torch.cuda.get_device_name() + update( + "bsr_dense_addmm", + device_name, + version, + key, + tuple(meta[k] for k in sorted(meta)), + ) + + return meta + + +def optimize_bsr_dense_addmm( + m, + k, + n, + bm, + bk, + beta=1, + alpha=1, + dtype=torch.float16, + device="cuda", + sparsity=0.5, + force=False, + verbose=False, +): + torch.manual_seed(0) + bsr = create_blocked_tensor( + 0, m, k, (bm, bk), sparsity, dtype, device + ).to_sparse_bsr((bm, bk)) + dense = make_tensor(k, n, dtype=dtype, device=device) + input = make_tensor(m, n, dtype=dtype, device=device) + tune_bsr_dense_addmm( + input, + bsr, + dense, + beta=beta, + alpha=alpha, + store=True, + force=force, + verbose=verbose, + ) + + +def main(op="scatter_mm", force=False, dtype=torch.float16, verbose=True): + import itertools + + sizes_lst = [256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072] + shapes_lst = [(sz, sz) for sz in sizes_lst[:-3]] + blocksize_lst = [(16, 16), (32, 32), (64, 64), (128, 128)] + sparsity_lst = [0.5, 0.7, 0.3][:1] + for sparsity in sparsity_lst: + print(f"{op, dtype, sparsity=}") + try: + for (M, K), N, (BM, BK) in itertools.product( + shapes_lst, sizes_lst, blocksize_lst + ): + if op == "scatter_mm": + optimize_scatter_mm( + M, K, N, BM, BK, force=force, sparsity=sparsity, dtype=dtype + ) + elif op == "bsr_dense_addmm": + for alpha, beta in [(1, 1), (1, 0)]: + optimize_bsr_dense_addmm( + M, + K, + N, + BM, + BK, + beta=beta, + alpha=alpha, + force=force, + sparsity=sparsity, + dtype=dtype, + verbose=verbose, + ) + else: + raise NotImplementedError(op) + except KeyboardInterrupt: + break + except Exception as msg: + dump() + raise + dump() + + if 0: + # Check performance dependence on sparsity and apply + # adjustments when differences are noticable (more than 10%). + # + # When using NVIDIA A100 GPU, the performance dependence on + # sparsity is insignificant (0 % ... 10 %) for majority of + # shapes/blocksizes combinations. However, for a very few + # specific size combinations, the effect of sparsity on + # performance can be up to 20 %. + for (M, K), N, (BM, BK) in itertools.product( + shapes_lst, sizes_lst, blocksize_lst + ): + meta_lst: list = [] + key = (M, K, N, BM, BK) + for sparsity1 in sparsity_lst: + torch.manual_seed(0) + bsr = create_blocked_tensor( + 0, M, K, (BM, BK), sparsity1, dtype, device="cuda" + ).to_sparse_bsr((BM, BK)) + dense = make_tensor(K, N, dtype=dtype, device="cuda") + meta_lst = [] + for sparsity in sparsity_lst: + meta = get_meta(op, key, version=(0, dtype, sparsity), exact=True) + if meta is None: + continue + + def bench(meta, bsr=bsr, dense=dense): + import triton + + if op == "scatter_mm": + from torch.sparse._triton_ops import ( + bsr_scatter_mm, + bsr_scatter_mm_indices_data, + ) + + indices_data = bsr_scatter_mm_indices_data( + bsr, + dense, + indices_format="bsr_strided_mm_compressed", + **meta, + ) + + def test_func(): + return bsr_scatter_mm( + bsr, dense, indices_data=indices_data + ) + + else: + raise NotImplementedError(op) + + ms_min = triton.testing.do_bench( + test_func, warmup=500, rep=100, fast_flush=False + ) + + return ms_min + + meta_lst.append( + (bench(meta), sparsity, tuple(meta[k] for k in sorted(meta))) + ) + if not meta_lst: + continue + meta_lst = sorted(meta_lst) + index = next( + i for i, item in enumerate(meta_lst) if item[1] == sparsity1 + ) + if meta_lst[0][2] == meta_lst[index][2]: + continue + speeddiff = (1 - meta_lst[index][0] / meta_lst[0][0]) * 100 + if abs(speeddiff) < 10: + continue + + print(sparsity1, index, key, meta_lst, speeddiff) + + if index > 0: + device_name = torch.cuda.get_device_name() + meta = get_meta( + op, key, version=(0, dtype, meta_lst[0][1]), exact=True + ) + update( + op, + device_name, + (0, dtype, sparsity1), + key, + tuple(meta[k] for k in sorted(meta)), + ) + print("update") + dump() + + +_operation_device_version_data: Dict[Any, Dict] = { + # Warning: the data in between the BEGIN/END DATA comment lines + # below is generated. It can be updated either manually or via + # calling dump function defined above. + # + # Legend [op: key -> data]: + # scatter_mm : M, K, N, Ms, Ks -> GROUP_SIZE, SPLIT_N, TILE_M, TILE_N, num_stages, num_warps + # bsr_dense_addmm : M, K, N, Ms, Ks, beta==0, beta==1, alpha==1 -> GROUP_SIZE_ROW, SPLIT_N, num_stages, num_warps + # + # BEGIN GENERATED DATA + ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.bfloat16, 0.5)): { + (16, 16, 16, 16, 16, False, False, False): (2, 1, 1, 2), + (16, 16, 16, 16, 16, False, False, True): (1, 1, 1, 4), + (16, 16, 16, 16, 16, False, True, False): (1, 1, 3, 16), + (16, 16, 16, 16, 16, False, True, True): (1, 1, 1, 8), + (16, 16, 16, 16, 16, True, False, False): (2, 1, 1, 8), + (16, 16, 16, 16, 16, True, False, True): (1, 1, 1, 8), + (16, 16, 32, 16, 16, False, False, False): (1, 2, 1, 8), + (16, 16, 32, 16, 16, False, False, True): (1, 2, 2, 4), + (16, 16, 32, 16, 16, False, True, False): (1, 1, 2, 4), + (16, 16, 32, 16, 16, False, True, True): (1, 1, 2, 4), + (16, 16, 32, 16, 16, True, False, False): (1, 1, 2, 4), + (16, 16, 32, 16, 16, True, False, True): (2, 2, 1, 2), + (16, 16, 64, 16, 16, False, False, False): (1, 4, 2, 4), + (16, 16, 64, 16, 16, False, False, True): (1, 2, 1, 2), + (16, 16, 64, 16, 16, False, True, False): (2, 1, 1, 2), + (16, 16, 64, 16, 16, False, True, True): (1, 4, 1, 8), + (16, 16, 64, 16, 16, True, False, False): (1, 4, 1, 1), + (16, 16, 64, 16, 16, True, False, True): (1, 4, 2, 4), + (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 2), + (16, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), + (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), + (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 1), + (16, 32, 16, 16, 16, True, False, False): (1, 1, 1, 2), + (16, 32, 16, 16, 16, True, False, True): (2, 1, 1, 2), + (16, 32, 16, 16, 32, False, False, False): (1, 1, 1, 4), + (16, 32, 16, 16, 32, False, False, True): (1, 1, 1, 8), + (16, 32, 16, 16, 32, False, True, False): (1, 1, 1, 8), + (16, 32, 16, 16, 32, False, True, True): (1, 1, 2, 4), + (16, 32, 16, 16, 32, True, False, False): (1, 1, 1, 2), + (16, 32, 16, 16, 32, True, False, True): (1, 1, 1, 1), + (16, 32, 32, 16, 16, False, False, False): (2, 2, 1, 4), + (16, 32, 32, 16, 16, False, False, True): (2, 2, 1, 2), + (16, 32, 32, 16, 16, False, True, False): (1, 1, 2, 8), + (16, 32, 32, 16, 16, False, True, True): (1, 2, 1, 1), + (16, 32, 32, 16, 16, True, False, False): (1, 1, 1, 8), + (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), + (16, 32, 32, 16, 32, False, False, False): (1, 1, 2, 8), + (16, 32, 32, 16, 32, False, False, True): (2, 1, 1, 8), + (16, 32, 32, 16, 32, False, True, False): (1, 1, 1, 4), + (16, 32, 32, 16, 32, False, True, True): (1, 1, 1, 4), + (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 8), + (16, 32, 32, 16, 32, True, False, True): (1, 1, 1, 4), + (16, 32, 64, 16, 16, False, False, False): (1, 4, 3, 8), + (16, 32, 64, 16, 16, False, False, True): (1, 4, 1, 4), + (16, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), + (16, 32, 64, 16, 16, False, True, True): (2, 4, 1, 4), + (16, 32, 64, 16, 16, True, False, False): (1, 2, 1, 4), + (16, 32, 64, 16, 16, True, False, True): (1, 2, 1, 4), + (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 8), + (16, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (16, 32, 64, 16, 32, False, True, False): (1, 4, 1, 2), + (16, 32, 64, 16, 32, False, True, True): (1, 2, 1, 4), + (16, 32, 64, 16, 32, True, False, False): (1, 2, 1, 4), + (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 2), + (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), + (16, 64, 16, 16, 32, False, False, True): (1, 1, 2, 2), + (16, 64, 16, 16, 32, False, True, False): (1, 1, 2, 8), + (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 8), + (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 2), + (16, 64, 32, 16, 32, False, False, True): (1, 2, 1, 4), + (16, 64, 32, 16, 32, False, True, False): (1, 2, 1, 4), + (16, 64, 32, 16, 32, False, True, True): (2, 2, 1, 4), + (16, 64, 32, 16, 32, True, False, False): (1, 2, 1, 4), + (16, 64, 32, 16, 32, True, False, True): (1, 2, 1, 8), + (16, 64, 64, 16, 32, False, False, False): (1, 2, 1, 4), + (16, 64, 64, 16, 32, False, False, True): (1, 4, 2, 2), + (16, 64, 64, 16, 32, False, True, False): (1, 1, 1, 4), + (16, 64, 64, 16, 32, False, True, True): (1, 4, 1, 2), + (16, 64, 64, 16, 32, True, False, False): (1, 2, 1, 4), + (16, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), + (32, 16, 16, 16, 16, False, False, False): (1, 1, 1, 8), + (32, 16, 16, 16, 16, False, False, True): (1, 1, 2, 4), + (32, 16, 16, 16, 16, False, True, False): (1, 1, 1, 4), + (32, 16, 16, 16, 16, False, True, True): (1, 1, 2, 4), + (32, 16, 16, 16, 16, True, False, False): (1, 1, 1, 2), + (32, 16, 16, 16, 16, True, False, True): (1, 1, 1, 4), + (32, 16, 32, 16, 16, False, False, False): (1, 1, 1, 4), + (32, 16, 32, 16, 16, False, False, True): (2, 2, 1, 4), + (32, 16, 32, 16, 16, False, True, False): (1, 2, 2, 2), + (32, 16, 32, 16, 16, False, True, True): (2, 2, 1, 4), + (32, 16, 32, 16, 16, True, False, False): (1, 2, 2, 8), + (32, 16, 32, 16, 16, True, False, True): (1, 2, 1, 2), + (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (32, 16, 64, 16, 16, False, False, True): (1, 4, 2, 4), + (32, 16, 64, 16, 16, False, True, False): (1, 2, 2, 2), + (32, 16, 64, 16, 16, False, True, True): (3, 4, 1, 4), + (32, 16, 64, 16, 16, True, False, False): (1, 2, 1, 2), + (32, 16, 64, 16, 16, True, False, True): (1, 2, 1, 4), + (32, 32, 16, 16, 16, False, False, False): (1, 1, 3, 4), + (32, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), + (32, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), + (32, 32, 16, 16, 16, False, True, True): (1, 1, 1, 4), + (32, 32, 16, 16, 16, True, False, False): (1, 1, 1, 4), + (32, 32, 16, 16, 16, True, False, True): (1, 1, 2, 2), + (32, 32, 16, 16, 32, False, False, False): (2, 1, 1, 4), + (32, 32, 16, 16, 32, False, False, True): (1, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, False): (1, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, True): (3, 1, 2, 4), + (32, 32, 16, 16, 32, True, False, False): (1, 1, 1, 4), + (32, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (32, 32, 16, 32, 32, False, False, False): (1, 1, 1, 8), + (32, 32, 16, 32, 32, False, False, True): (1, 1, 1, 4), + (32, 32, 16, 32, 32, False, True, False): (1, 1, 2, 1), + (32, 32, 16, 32, 32, False, True, True): (2, 1, 2, 2), + (32, 32, 16, 32, 32, True, False, False): (1, 1, 1, 8), + (32, 32, 16, 32, 32, True, False, True): (2, 1, 3, 4), + (32, 32, 32, 16, 16, False, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 16, False, False, True): (2, 2, 1, 4), + (32, 32, 32, 16, 16, False, True, False): (1, 1, 1, 8), + (32, 32, 32, 16, 16, False, True, True): (2, 2, 1, 4), + (32, 32, 32, 16, 16, True, False, False): (1, 1, 1, 4), + (32, 32, 32, 16, 16, True, False, True): (2, 2, 2, 4), + (32, 32, 32, 16, 32, False, False, False): (2, 2, 1, 8), + (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 2), + (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 4), + (32, 32, 32, 16, 32, True, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, True, False, True): (1, 2, 1, 2), + (32, 32, 32, 32, 32, False, False, False): (1, 1, 3, 8), + (32, 32, 32, 32, 32, False, False, True): (1, 1, 1, 8), + (32, 32, 32, 32, 32, False, True, False): (2, 1, 3, 4), + (32, 32, 32, 32, 32, False, True, True): (2, 1, 1, 2), + (32, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), + (32, 32, 32, 32, 32, True, False, True): (4, 1, 1, 1), + (32, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (32, 32, 64, 16, 16, False, False, True): (1, 4, 1, 4), + (32, 32, 64, 16, 16, False, True, False): (1, 2, 1, 8), + (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 2), + (32, 32, 64, 16, 16, True, False, False): (2, 4, 1, 2), + (32, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), + (32, 32, 64, 16, 32, False, False, False): (1, 2, 1, 8), + (32, 32, 64, 16, 32, False, False, True): (1, 4, 2, 2), + (32, 32, 64, 16, 32, False, True, False): (1, 2, 1, 4), + (32, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (32, 32, 64, 16, 32, True, False, False): (1, 4, 2, 2), + (32, 32, 64, 16, 32, True, False, True): (3, 4, 2, 2), + (32, 32, 64, 32, 32, False, False, False): (2, 2, 1, 4), + (32, 32, 64, 32, 32, False, False, True): (1, 2, 1, 4), + (32, 32, 64, 32, 32, False, True, False): (1, 1, 1, 8), + (32, 32, 64, 32, 32, False, True, True): (1, 1, 1, 4), + (32, 32, 64, 32, 32, True, False, False): (1, 2, 1, 2), + (32, 32, 64, 32, 32, True, False, True): (3, 2, 1, 8), + (32, 64, 16, 16, 32, False, False, False): (1, 1, 2, 2), + (32, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), + (32, 64, 16, 16, 32, False, True, False): (1, 1, 2, 4), + (32, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (32, 64, 16, 16, 32, True, False, False): (1, 1, 1, 2), + (32, 64, 16, 16, 32, True, False, True): (2, 1, 2, 2), + (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 1), + (32, 64, 16, 32, 32, False, False, True): (2, 1, 1, 4), + (32, 64, 16, 32, 32, False, True, False): (1, 1, 1, 1), + (32, 64, 16, 32, 32, False, True, True): (1, 1, 2, 2), + (32, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), + (32, 64, 16, 32, 32, True, False, True): (1, 1, 1, 4), + (32, 64, 32, 16, 32, False, False, False): (2, 2, 1, 4), + (32, 64, 32, 16, 32, False, False, True): (1, 2, 1, 4), + (32, 64, 32, 16, 32, False, True, False): (1, 1, 1, 4), + (32, 64, 32, 16, 32, False, True, True): (2, 2, 3, 4), + (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 2), + (32, 64, 32, 16, 32, True, False, True): (1, 2, 1, 2), + (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 2), + (32, 64, 32, 32, 32, False, False, True): (2, 1, 1, 4), + (32, 64, 32, 32, 32, False, True, False): (1, 1, 1, 8), + (32, 64, 32, 32, 32, False, True, True): (1, 1, 2, 4), + (32, 64, 32, 32, 32, True, False, False): (2, 1, 1, 4), + (32, 64, 32, 32, 32, True, False, True): (1, 1, 2, 4), + (32, 64, 64, 16, 32, False, False, False): (1, 4, 1, 4), + (32, 64, 64, 16, 32, False, False, True): (1, 4, 2, 4), + (32, 64, 64, 16, 32, False, True, False): (1, 4, 2, 2), + (32, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 8), + (32, 64, 64, 16, 32, True, False, True): (1, 4, 2, 1), + (32, 64, 64, 32, 32, False, False, False): (1, 1, 1, 4), + (32, 64, 64, 32, 32, False, False, True): (2, 2, 1, 4), + (32, 64, 64, 32, 32, False, True, False): (1, 1, 1, 4), + (32, 64, 64, 32, 32, False, True, True): (2, 2, 1, 4), + (32, 64, 64, 32, 32, True, False, False): (1, 2, 2, 4), + (32, 64, 64, 32, 32, True, False, True): (2, 2, 3, 4), + (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 4), + (64, 32, 16, 32, 32, False, False, True): (1, 1, 1, 4), + (64, 32, 16, 32, 32, False, True, False): (1, 1, 1, 8), + (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 4), + (64, 32, 16, 32, 32, True, False, False): (1, 1, 1, 16), + (64, 32, 16, 32, 32, True, False, True): (2, 1, 1, 4), + (64, 32, 32, 32, 32, False, False, False): (1, 1, 3, 4), + (64, 32, 32, 32, 32, False, False, True): (2, 1, 1, 4), + (64, 32, 32, 32, 32, False, True, False): (1, 1, 2, 4), + (64, 32, 32, 32, 32, False, True, True): (2, 1, 1, 4), + (64, 32, 32, 32, 32, True, False, False): (2, 1, 1, 16), + (64, 32, 32, 32, 32, True, False, True): (2, 1, 1, 4), + (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), + (64, 32, 64, 32, 32, False, False, True): (2, 2, 1, 4), + (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 4), + (64, 32, 64, 32, 32, False, True, True): (2, 2, 1, 4), + (64, 32, 64, 32, 32, True, False, False): (1, 2, 1, 8), + (64, 32, 64, 32, 32, True, False, True): (2, 2, 3, 4), + (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 16), + (64, 64, 16, 32, 32, False, False, True): (1, 1, 3, 4), + (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 2), + (64, 64, 16, 32, 32, False, True, True): (2, 1, 1, 4), + (64, 64, 16, 32, 32, True, False, False): (2, 1, 3, 2), + (64, 64, 16, 32, 32, True, False, True): (1, 1, 2, 4), + (64, 64, 32, 32, 32, False, False, False): (1, 1, 1, 8), + (64, 64, 32, 32, 32, False, False, True): (2, 1, 2, 4), + (64, 64, 32, 32, 32, False, True, False): (2, 1, 1, 4), + (64, 64, 32, 32, 32, False, True, True): (1, 1, 2, 4), + (64, 64, 32, 32, 32, True, False, False): (2, 1, 1, 4), + (64, 64, 32, 32, 32, True, False, True): (1, 1, 2, 4), + (64, 64, 64, 32, 32, False, False, False): (1, 2, 2, 4), + (64, 64, 64, 32, 32, False, False, True): (1, 2, 2, 2), + (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 2), + (64, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), + (64, 64, 64, 32, 32, True, False, False): (1, 2, 1, 4), + (64, 64, 64, 32, 32, True, False, True): (1, 2, 1, 4), + (256, 256, 256, 16, 16, False, True, True): (4, 8, 5, 1), + (256, 256, 256, 16, 16, True, False, True): (2, 8, 4, 2), + (256, 256, 256, 32, 32, False, True, True): (2, 8, 5, 2), + (256, 256, 256, 32, 32, True, False, True): (1, 8, 5, 4), + (256, 256, 256, 64, 64, False, True, True): (2, 4, 4, 4), + (256, 256, 256, 64, 64, True, False, True): (1, 4, 3, 4), + (256, 256, 256, 128, 128, False, True, True): (4, 2, 2, 8), + (256, 256, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (256, 256, 512, 16, 16, False, True, True): (1, 16, 5, 1), + (256, 256, 512, 16, 16, True, False, True): (3, 16, 3, 2), + (256, 256, 512, 32, 32, False, True, True): (2, 8, 5, 2), + (256, 256, 512, 32, 32, True, False, True): (1, 16, 4, 4), + (256, 256, 512, 64, 64, False, True, True): (1, 8, 4, 4), + (256, 256, 512, 64, 64, True, False, True): (3, 8, 3, 4), + (256, 256, 512, 128, 128, False, True, True): (1, 4, 2, 8), + (256, 256, 512, 128, 128, True, False, True): (1, 4, 2, 8), + (256, 256, 1024, 16, 16, False, True, True): (1, 16, 5, 4), + (256, 256, 1024, 16, 16, True, False, True): (5, 16, 4, 2), + (256, 256, 1024, 32, 32, False, True, True): (1, 32, 5, 2), + (256, 256, 1024, 32, 32, True, False, True): (2, 16, 5, 2), + (256, 256, 1024, 64, 64, False, True, True): (1, 16, 4, 4), + (256, 256, 1024, 64, 64, True, False, True): (1, 16, 4, 4), + (256, 256, 1024, 128, 128, False, True, True): (1, 8, 2, 8), + (256, 256, 1024, 128, 128, True, False, True): (1, 8, 2, 8), + (256, 256, 2048, 16, 16, False, True, True): (1, 16, 4, 4), + (256, 256, 2048, 16, 16, True, False, True): (2, 32, 5, 1), + (256, 256, 2048, 32, 32, False, True, True): (1, 64, 4, 1), + (256, 256, 2048, 32, 32, True, False, True): (2, 32, 4, 2), + (256, 256, 2048, 64, 64, False, True, True): (8, 16, 5, 4), + (256, 256, 2048, 64, 64, True, False, True): (1, 16, 4, 4), + (256, 256, 2048, 128, 128, False, True, True): (2, 16, 2, 8), + (256, 256, 2048, 128, 128, True, False, True): (1, 16, 2, 8), + (256, 256, 4096, 16, 16, False, True, True): (1, 64, 1, 4), + (256, 256, 4096, 16, 16, True, False, True): (1, 16, 3, 2), + (256, 256, 4096, 32, 32, False, True, True): (6, 32, 3, 2), + (256, 256, 4096, 32, 32, True, False, True): (4, 32, 4, 2), + (256, 256, 4096, 64, 64, False, True, True): (6, 64, 3, 4), + (256, 256, 4096, 64, 64, True, False, True): (2, 64, 3, 4), + (256, 256, 4096, 128, 128, False, True, True): (1, 32, 2, 8), + (256, 256, 4096, 128, 128, True, False, True): (1, 32, 2, 8), + (256, 256, 8192, 16, 16, False, True, True): (2, 32, 3, 4), + (256, 256, 8192, 16, 16, True, False, True): (4, 64, 3, 2), + (256, 256, 8192, 32, 32, False, True, True): (1, 64, 3, 4), + (256, 256, 8192, 32, 32, True, False, True): (3, 128, 1, 2), + (256, 256, 8192, 64, 64, False, True, True): (9, 128, 1, 4), + (256, 256, 8192, 64, 64, True, False, True): (8, 128, 1, 4), + (256, 256, 8192, 128, 128, False, True, True): (7, 64, 1, 4), + (256, 256, 8192, 128, 128, True, False, True): (1, 32, 1, 16), + (256, 256, 16384, 16, 16, False, True, True): (3, 128, 3, 2), + (256, 256, 16384, 16, 16, True, False, True): (5, 64, 3, 2), + (256, 256, 16384, 32, 32, False, True, True): (3, 128, 3, 2), + (256, 256, 16384, 32, 32, True, False, True): (1, 128, 3, 2), + (256, 256, 16384, 64, 64, False, True, True): (3, 128, 1, 4), + (256, 256, 16384, 64, 64, True, False, True): (2, 128, 1, 4), + (256, 256, 16384, 128, 128, False, True, True): (7, 128, 1, 4), + (256, 256, 16384, 128, 128, True, False, True): (1, 128, 2, 8), + (256, 256, 32768, 16, 16, False, True, True): (2, 128, 3, 2), + (256, 256, 32768, 16, 16, True, False, True): (1, 128, 3, 2), + (256, 256, 32768, 32, 32, False, True, True): (1, 256, 3, 4), + (256, 256, 32768, 32, 32, True, False, True): (3, 256, 3, 2), + (256, 256, 32768, 64, 64, False, True, True): (1, 256, 1, 4), + (256, 256, 32768, 64, 64, True, False, True): (3, 256, 1, 4), + (256, 256, 32768, 128, 128, False, True, True): (9, 256, 1, 4), + (256, 256, 32768, 128, 128, True, False, True): (2, 256, 1, 4), + (256, 256, 65536, 16, 16, False, True, True): (1, 256, 3, 2), + (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 2), + (256, 256, 65536, 32, 32, False, True, True): (2, 512, 3, 2), + (256, 256, 65536, 32, 32, True, False, True): (2, 512, 3, 2), + (256, 256, 65536, 64, 64, False, True, True): (2, 512, 1, 4), + (256, 256, 65536, 64, 64, True, False, True): (1, 512, 1, 4), + (256, 256, 65536, 128, 128, False, True, True): (7, 512, 1, 4), + (256, 256, 65536, 128, 128, True, False, True): (2, 512, 1, 4), + (256, 256, 131072, 16, 16, False, True, True): (1, 512, 3, 2), + (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 2), + (256, 256, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), + (256, 256, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), + (256, 256, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), + (256, 256, 131072, 64, 64, True, False, True): (1, 1024, 1, 4), + (256, 256, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), + (256, 256, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), + (512, 512, 256, 16, 16, False, True, True): (2, 4, 5, 4), + (512, 512, 256, 16, 16, True, False, True): (3, 4, 5, 4), + (512, 512, 256, 32, 32, False, True, True): (1, 4, 5, 2), + (512, 512, 256, 32, 32, True, False, True): (4, 8, 5, 1), + (512, 512, 256, 64, 64, False, True, True): (4, 4, 5, 4), + (512, 512, 256, 64, 64, True, False, True): (5, 4, 5, 4), + (512, 512, 256, 128, 128, False, True, True): (3, 2, 2, 8), + (512, 512, 256, 128, 128, True, False, True): (2, 2, 2, 8), + (512, 512, 512, 16, 16, False, True, True): (1, 8, 5, 4), + (512, 512, 512, 16, 16, True, False, True): (4, 8, 5, 2), + (512, 512, 512, 32, 32, False, True, True): (1, 16, 4, 1), + (512, 512, 512, 32, 32, True, False, True): (1, 8, 5, 2), + (512, 512, 512, 64, 64, False, True, True): (4, 8, 5, 4), + (512, 512, 512, 64, 64, True, False, True): (2, 8, 5, 4), + (512, 512, 512, 128, 128, False, True, True): (2, 4, 2, 8), + (512, 512, 512, 128, 128, True, False, True): (1, 4, 2, 8), + (512, 512, 1024, 16, 16, False, True, True): (2, 8, 4, 4), + (512, 512, 1024, 16, 16, True, False, True): (1, 8, 4, 4), + (512, 512, 1024, 32, 32, False, True, True): (3, 16, 4, 2), + (512, 512, 1024, 32, 32, True, False, True): (1, 16, 5, 2), + (512, 512, 1024, 64, 64, False, True, True): (2, 8, 3, 4), + (512, 512, 1024, 64, 64, True, False, True): (2, 16, 3, 4), + (512, 512, 1024, 128, 128, False, True, True): (2, 8, 2, 8), + (512, 512, 1024, 128, 128, True, False, True): (3, 8, 2, 8), + (512, 512, 2048, 16, 16, False, True, True): (4, 16, 3, 2), + (512, 512, 2048, 16, 16, True, False, True): (1, 16, 4, 2), + (512, 512, 2048, 32, 32, False, True, True): (3, 32, 3, 2), + (512, 512, 2048, 32, 32, True, False, True): (2, 32, 3, 2), + (512, 512, 2048, 64, 64, False, True, True): (6, 32, 3, 2), + (512, 512, 2048, 64, 64, True, False, True): (1, 32, 3, 2), + (512, 512, 2048, 128, 128, False, True, True): (4, 16, 2, 8), + (512, 512, 2048, 128, 128, True, False, True): (1, 16, 2, 8), + (512, 512, 4096, 16, 16, False, True, True): (1, 16, 3, 2), + (512, 512, 4096, 16, 16, True, False, True): (4, 32, 3, 2), + (512, 512, 4096, 32, 32, False, True, True): (3, 32, 3, 2), + (512, 512, 4096, 32, 32, True, False, True): (2, 32, 3, 2), + (512, 512, 4096, 64, 64, False, True, True): (1, 32, 3, 4), + (512, 512, 4096, 64, 64, True, False, True): (1, 64, 3, 4), + (512, 512, 4096, 128, 128, False, True, True): (4, 32, 1, 4), + (512, 512, 4096, 128, 128, True, False, True): (4, 32, 2, 8), + (512, 512, 8192, 16, 16, False, True, True): (8, 64, 3, 2), + (512, 512, 8192, 16, 16, True, False, True): (4, 64, 3, 2), + (512, 512, 8192, 32, 32, False, True, True): (3, 64, 3, 2), + (512, 512, 8192, 32, 32, True, False, True): (3, 64, 3, 2), + (512, 512, 8192, 64, 64, False, True, True): (1, 64, 3, 4), + (512, 512, 8192, 64, 64, True, False, True): (7, 64, 3, 4), + (512, 512, 8192, 128, 128, False, True, True): (1, 64, 1, 4), + (512, 512, 8192, 128, 128, True, False, True): (4, 64, 2, 8), + (512, 512, 16384, 16, 16, False, True, True): (1, 64, 3, 2), + (512, 512, 16384, 16, 16, True, False, True): (1, 128, 3, 2), + (512, 512, 16384, 32, 32, False, True, True): (3, 128, 3, 2), + (512, 512, 16384, 32, 32, True, False, True): (1, 128, 3, 2), + (512, 512, 16384, 64, 64, False, True, True): (4, 64, 2, 4), + (512, 512, 16384, 64, 64, True, False, True): (2, 64, 2, 4), + (512, 512, 16384, 128, 128, False, True, True): (4, 128, 1, 4), + (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 4), + (512, 512, 32768, 16, 16, False, True, True): (1, 128, 3, 2), + (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 2), + (512, 512, 32768, 32, 32, False, True, True): (1, 256, 3, 2), + (512, 512, 32768, 32, 32, True, False, True): (1, 256, 3, 2), + (512, 512, 32768, 64, 64, False, True, True): (1, 256, 3, 4), + (512, 512, 32768, 64, 64, True, False, True): (2, 256, 3, 4), + (512, 512, 32768, 128, 128, False, True, True): (5, 256, 1, 4), + (512, 512, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (512, 512, 65536, 16, 16, False, True, True): (1, 256, 3, 2), + (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 1), + (512, 512, 65536, 32, 32, False, True, True): (1, 512, 3, 2), + (512, 512, 65536, 32, 32, True, False, True): (1, 512, 3, 2), + (512, 512, 65536, 64, 64, False, True, True): (4, 256, 2, 4), + (512, 512, 65536, 64, 64, True, False, True): (2, 512, 3, 4), + (512, 512, 65536, 128, 128, False, True, True): (6, 512, 1, 4), + (512, 512, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 2), + (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 1), + (512, 512, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), + (512, 512, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), + (512, 512, 131072, 64, 64, False, True, True): (4, 512, 2, 4), + (512, 512, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), + (512, 512, 131072, 128, 128, False, True, True): (6, 1024, 1, 4), + (512, 512, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (1024, 1024, 256, 16, 16, False, True, True): (1, 4, 5, 4), + (1024, 1024, 256, 16, 16, True, False, True): (3, 4, 4, 4), + (1024, 1024, 256, 32, 32, False, True, True): (4, 4, 5, 2), + (1024, 1024, 256, 32, 32, True, False, True): (3, 4, 5, 2), + (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 5, 4), + (1024, 1024, 256, 64, 64, True, False, True): (1, 4, 5, 4), + (1024, 1024, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (1024, 1024, 256, 128, 128, True, False, True): (2, 2, 2, 8), + (1024, 1024, 512, 16, 16, False, True, True): (3, 4, 4, 4), + (1024, 1024, 512, 16, 16, True, False, True): (4, 8, 5, 2), + (1024, 1024, 512, 32, 32, False, True, True): (1, 8, 4, 2), + (1024, 1024, 512, 32, 32, True, False, True): (1, 8, 4, 2), + (1024, 1024, 512, 64, 64, False, True, True): (4, 8, 4, 4), + (1024, 1024, 512, 64, 64, True, False, True): (2, 8, 3, 4), + (1024, 1024, 512, 128, 128, False, True, True): (2, 4, 2, 8), + (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 2, 8), + (1024, 1024, 1024, 16, 16, False, True, True): (3, 8, 4, 4), + (1024, 1024, 1024, 16, 16, True, False, True): (4, 8, 4, 2), + (1024, 1024, 1024, 32, 32, False, True, True): (1, 16, 3, 2), + (1024, 1024, 1024, 32, 32, True, False, True): (1, 16, 3, 2), + (1024, 1024, 1024, 64, 64, False, True, True): (1, 16, 3, 4), + (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 3, 2), + (1024, 1024, 1024, 128, 128, False, True, True): (1, 8, 2, 8), + (1024, 1024, 1024, 128, 128, True, False, True): (2, 8, 2, 8), + (1024, 1024, 2048, 16, 16, False, True, True): (3, 8, 3, 4), + (1024, 1024, 2048, 16, 16, True, False, True): (3, 8, 3, 2), + (1024, 1024, 2048, 32, 32, False, True, True): (5, 16, 3, 4), + (1024, 1024, 2048, 32, 32, True, False, True): (1, 16, 3, 2), + (1024, 1024, 2048, 64, 64, False, True, True): (6, 16, 4, 4), + (1024, 1024, 2048, 64, 64, True, False, True): (5, 16, 3, 4), + (1024, 1024, 2048, 128, 128, False, True, True): (4, 16, 2, 8), + (1024, 1024, 2048, 128, 128, True, False, True): (4, 16, 2, 8), + (1024, 1024, 4096, 16, 16, False, True, True): (8, 32, 3, 2), + (1024, 1024, 4096, 16, 16, True, False, True): (4, 32, 3, 2), + (1024, 1024, 4096, 32, 32, False, True, True): (2, 32, 3, 4), + (1024, 1024, 4096, 32, 32, True, False, True): (3, 32, 3, 2), + (1024, 1024, 4096, 64, 64, False, True, True): (3, 32, 3, 4), + (1024, 1024, 4096, 64, 64, True, False, True): (1, 32, 3, 4), + (1024, 1024, 4096, 128, 128, False, True, True): (4, 32, 2, 8), + (1024, 1024, 4096, 128, 128, True, False, True): (1, 32, 2, 8), + (1024, 1024, 8192, 16, 16, False, True, True): (4, 64, 3, 2), + (1024, 1024, 8192, 16, 16, True, False, True): (4, 64, 3, 2), + (1024, 1024, 8192, 32, 32, False, True, True): (8, 64, 3, 4), + (1024, 1024, 8192, 32, 32, True, False, True): (4, 32, 3, 4), + (1024, 1024, 8192, 64, 64, False, True, True): (4, 64, 3, 4), + (1024, 1024, 8192, 64, 64, True, False, True): (2, 64, 3, 4), + (1024, 1024, 8192, 128, 128, False, True, True): (4, 64, 2, 8), + (1024, 1024, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (1024, 1024, 16384, 16, 16, False, True, True): (1, 64, 3, 2), + (1024, 1024, 16384, 16, 16, True, False, True): (1, 64, 3, 2), + (1024, 1024, 16384, 32, 32, False, True, True): (1, 128, 3, 2), + (1024, 1024, 16384, 32, 32, True, False, True): (1, 64, 3, 4), + (1024, 1024, 16384, 64, 64, False, True, True): (1, 128, 3, 4), + (1024, 1024, 16384, 64, 64, True, False, True): (1, 128, 3, 4), + (1024, 1024, 16384, 128, 128, False, True, True): (2, 128, 1, 4), + (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (1024, 1024, 32768, 16, 16, False, True, True): (1, 128, 3, 2), + (1024, 1024, 32768, 16, 16, True, False, True): (1, 128, 3, 2), + (1024, 1024, 32768, 32, 32, False, True, True): (1, 256, 3, 2), + (1024, 1024, 32768, 32, 32, True, False, True): (1, 128, 3, 4), + (1024, 1024, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (1024, 1024, 32768, 64, 64, True, False, True): (1, 256, 3, 4), + (1024, 1024, 32768, 128, 128, False, True, True): (2, 256, 1, 4), + (1024, 1024, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (1024, 1024, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (1024, 1024, 65536, 16, 16, True, False, True): (1, 256, 3, 4), + (1024, 1024, 65536, 32, 32, False, True, True): (9, 256, 3, 4), + (1024, 1024, 65536, 32, 32, True, False, True): (7, 256, 3, 4), + (1024, 1024, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (1024, 1024, 65536, 64, 64, True, False, True): (2, 512, 3, 4), + (1024, 1024, 65536, 128, 128, False, True, True): (2, 512, 1, 4), + (1024, 1024, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (1024, 1024, 131072, 16, 16, False, True, True): (11, 512, 3, 2), + (1024, 1024, 131072, 16, 16, True, False, True): (11, 512, 3, 2), + (1024, 1024, 131072, 32, 32, False, True, True): (4, 512, 3, 4), + (1024, 1024, 131072, 32, 32, True, False, True): (6, 512, 3, 4), + (1024, 1024, 131072, 64, 64, False, True, True): (2, 512, 2, 4), + (1024, 1024, 131072, 64, 64, True, False, True): (2, 1024, 3, 4), + (1024, 1024, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), + (1024, 1024, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (2048, 2048, 256, 16, 16, False, True, True): (1, 4, 5, 2), + (2048, 2048, 256, 16, 16, True, False, True): (4, 4, 5, 2), + (2048, 2048, 256, 32, 32, False, True, True): (3, 4, 6, 2), + (2048, 2048, 256, 32, 32, True, False, True): (2, 4, 5, 2), + (2048, 2048, 256, 64, 64, False, True, True): (2, 4, 4, 4), + (2048, 2048, 256, 64, 64, True, False, True): (2, 4, 3, 4), + (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 2, 8), + (2048, 2048, 256, 128, 128, True, False, True): (3, 2, 2, 8), + (2048, 2048, 512, 16, 16, False, True, True): (3, 4, 4, 4), + (2048, 2048, 512, 16, 16, True, False, True): (1, 4, 4, 4), + (2048, 2048, 512, 32, 32, False, True, True): (1, 4, 3, 4), + (2048, 2048, 512, 32, 32, True, False, True): (1, 4, 4, 2), + (2048, 2048, 512, 64, 64, False, True, True): (1, 8, 3, 4), + (2048, 2048, 512, 64, 64, True, False, True): (1, 8, 3, 4), + (2048, 2048, 512, 128, 128, False, True, True): (3, 4, 2, 8), + (2048, 2048, 512, 128, 128, True, False, True): (2, 4, 2, 8), + (2048, 2048, 1024, 16, 16, False, True, True): (3, 4, 3, 4), + (2048, 2048, 1024, 16, 16, True, False, True): (4, 8, 3, 2), + (2048, 2048, 1024, 32, 32, False, True, True): (3, 8, 3, 4), + (2048, 2048, 1024, 32, 32, True, False, True): (1, 8, 3, 2), + (2048, 2048, 1024, 64, 64, False, True, True): (1, 8, 3, 4), + (2048, 2048, 1024, 64, 64, True, False, True): (1, 8, 3, 4), + (2048, 2048, 1024, 128, 128, False, True, True): (4, 8, 1, 4), + (2048, 2048, 1024, 128, 128, True, False, True): (2, 8, 1, 4), + (2048, 2048, 2048, 16, 16, False, True, True): (4, 16, 3, 2), + (2048, 2048, 2048, 16, 16, True, False, True): (4, 16, 3, 2), + (2048, 2048, 2048, 32, 32, False, True, True): (1, 16, 3, 2), + (2048, 2048, 2048, 32, 32, True, False, True): (1, 16, 3, 2), + (2048, 2048, 2048, 64, 64, False, True, True): (4, 16, 3, 4), + (2048, 2048, 2048, 64, 64, True, False, True): (4, 16, 3, 4), + (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 2, 8), + (2048, 2048, 2048, 128, 128, True, False, True): (3, 16, 1, 4), + (2048, 2048, 4096, 16, 16, False, True, True): (4, 32, 4, 2), + (2048, 2048, 4096, 16, 16, True, False, True): (4, 32, 3, 2), + (2048, 2048, 4096, 32, 32, False, True, True): (4, 16, 3, 8), + (2048, 2048, 4096, 32, 32, True, False, True): (4, 16, 3, 8), + (2048, 2048, 4096, 64, 64, False, True, True): (1, 32, 3, 4), + (2048, 2048, 4096, 64, 64, True, False, True): (3, 32, 3, 4), + (2048, 2048, 4096, 128, 128, False, True, True): (2, 32, 1, 4), + (2048, 2048, 4096, 128, 128, True, False, True): (2, 32, 1, 4), + (2048, 2048, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (2048, 2048, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (2048, 2048, 8192, 32, 32, False, True, True): (4, 32, 4, 8), + (2048, 2048, 8192, 32, 32, True, False, True): (4, 32, 3, 8), + (2048, 2048, 8192, 64, 64, False, True, True): (4, 64, 3, 4), + (2048, 2048, 8192, 64, 64, True, False, True): (4, 64, 3, 4), + (2048, 2048, 8192, 128, 128, False, True, True): (2, 64, 1, 4), + (2048, 2048, 8192, 128, 128, True, False, True): (2, 64, 1, 4), + (2048, 2048, 16384, 16, 16, False, True, True): (4, 64, 3, 2), + (2048, 2048, 16384, 16, 16, True, False, True): (1, 64, 3, 2), + (2048, 2048, 16384, 32, 32, False, True, True): (4, 64, 3, 4), + (2048, 2048, 16384, 32, 32, True, False, True): (4, 64, 3, 4), + (2048, 2048, 16384, 64, 64, False, True, True): (4, 128, 3, 4), + (2048, 2048, 16384, 64, 64, True, False, True): (4, 128, 3, 4), + (2048, 2048, 16384, 128, 128, False, True, True): (2, 128, 1, 4), + (2048, 2048, 16384, 128, 128, True, False, True): (2, 128, 1, 4), + (2048, 2048, 32768, 16, 16, False, True, True): (8, 128, 3, 2), + (2048, 2048, 32768, 16, 16, True, False, True): (8, 128, 3, 4), + (2048, 2048, 32768, 32, 32, False, True, True): (8, 128, 3, 4), + (2048, 2048, 32768, 32, 32, True, False, True): (8, 128, 3, 4), + (2048, 2048, 32768, 64, 64, False, True, True): (1, 128, 2, 4), + (2048, 2048, 32768, 64, 64, True, False, True): (8, 256, 3, 4), + (2048, 2048, 32768, 128, 128, False, True, True): (2, 256, 1, 4), + (2048, 2048, 32768, 128, 128, True, False, True): (2, 256, 1, 4), + (2048, 2048, 65536, 16, 16, False, True, True): (9, 256, 4, 4), + (2048, 2048, 65536, 16, 16, True, False, True): (7, 256, 4, 4), + (2048, 2048, 65536, 32, 32, False, True, True): (7, 256, 3, 4), + (2048, 2048, 65536, 32, 32, True, False, True): (3, 256, 3, 4), + (2048, 2048, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (2048, 2048, 65536, 64, 64, True, False, True): (6, 512, 3, 4), + (2048, 2048, 65536, 128, 128, False, True, True): (2, 512, 1, 4), + (2048, 2048, 65536, 128, 128, True, False, True): (2, 512, 1, 4), + (2048, 2048, 131072, 16, 16, False, True, True): (9, 512, 4, 4), + (2048, 2048, 131072, 16, 16, True, False, True): (9, 512, 4, 4), + (2048, 2048, 131072, 32, 32, False, True, True): (7, 512, 4, 4), + (2048, 2048, 131072, 32, 32, True, False, True): (3, 512, 3, 4), + (2048, 2048, 131072, 64, 64, False, True, True): (2, 512, 2, 4), + (2048, 2048, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), + (2048, 2048, 131072, 128, 128, False, True, True): (1, 1024, 1, 4), + (2048, 2048, 131072, 128, 128, True, False, True): (2, 1024, 1, 4), + (4096, 4096, 256, 16, 16, False, True, True): (2, 2, 5, 4), + (4096, 4096, 256, 16, 16, True, False, True): (2, 2, 4, 2), + (4096, 4096, 256, 32, 32, False, True, True): (1, 2, 4, 4), + (4096, 4096, 256, 32, 32, True, False, True): (3, 2, 4, 2), + (4096, 4096, 256, 64, 64, False, True, True): (3, 4, 3, 4), + (4096, 4096, 256, 64, 64, True, False, True): (1, 4, 3, 2), + (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (4096, 4096, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (4096, 4096, 512, 16, 16, False, True, True): (4, 2, 3, 4), + (4096, 4096, 512, 16, 16, True, False, True): (1, 2, 3, 4), + (4096, 4096, 512, 32, 32, False, True, True): (1, 4, 3, 4), + (4096, 4096, 512, 32, 32, True, False, True): (3, 4, 3, 2), + (4096, 4096, 512, 64, 64, False, True, True): (4, 4, 4, 4), + (4096, 4096, 512, 64, 64, True, False, True): (3, 4, 3, 4), + (4096, 4096, 512, 128, 128, False, True, True): (2, 4, 2, 8), + (4096, 4096, 512, 128, 128, True, False, True): (2, 4, 1, 4), + (4096, 4096, 1024, 16, 16, False, True, True): (2, 8, 3, 2), + (4096, 4096, 1024, 16, 16, True, False, True): (2, 8, 3, 2), + (4096, 4096, 1024, 32, 32, False, True, True): (1, 8, 3, 4), + (4096, 4096, 1024, 32, 32, True, False, True): (1, 8, 3, 2), + (4096, 4096, 1024, 64, 64, False, True, True): (1, 8, 3, 4), + (4096, 4096, 1024, 64, 64, True, False, True): (1, 8, 3, 4), + (4096, 4096, 1024, 128, 128, False, True, True): (4, 8, 1, 4), + (4096, 4096, 1024, 128, 128, True, False, True): (2, 8, 2, 8), + (4096, 4096, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (4096, 4096, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (4096, 4096, 2048, 32, 32, False, True, True): (4, 8, 3, 8), + (4096, 4096, 2048, 32, 32, True, False, True): (4, 8, 4, 8), + (4096, 4096, 2048, 64, 64, False, True, True): (4, 16, 3, 4), + (4096, 4096, 2048, 64, 64, True, False, True): (4, 16, 3, 4), + (4096, 4096, 2048, 128, 128, False, True, True): (1, 16, 1, 4), + (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (4096, 4096, 4096, 16, 16, False, True, True): (4, 32, 4, 4), + (4096, 4096, 4096, 16, 16, True, False, True): (2, 32, 4, 4), + (4096, 4096, 4096, 32, 32, False, True, True): (4, 16, 4, 8), + (4096, 4096, 4096, 32, 32, True, False, True): (4, 16, 4, 8), + (4096, 4096, 4096, 64, 64, False, True, True): (4, 32, 3, 4), + (4096, 4096, 4096, 64, 64, True, False, True): (2, 32, 3, 4), + (4096, 4096, 4096, 128, 128, False, True, True): (2, 32, 1, 4), + (4096, 4096, 4096, 128, 128, True, False, True): (2, 32, 1, 4), + (4096, 4096, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (4096, 4096, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (4096, 4096, 8192, 32, 32, False, True, True): (4, 32, 4, 8), + (4096, 4096, 8192, 32, 32, True, False, True): (4, 32, 4, 8), + (4096, 4096, 8192, 64, 64, False, True, True): (4, 64, 3, 4), + (4096, 4096, 8192, 64, 64, True, False, True): (4, 64, 3, 4), + (4096, 4096, 8192, 128, 128, False, True, True): (1, 64, 1, 4), + (4096, 4096, 8192, 128, 128, True, False, True): (1, 64, 1, 4), + (4096, 4096, 16384, 16, 16, False, True, True): (4, 64, 4, 4), + (4096, 4096, 16384, 16, 16, True, False, True): (4, 64, 4, 4), + (4096, 4096, 16384, 32, 32, False, True, True): (4, 64, 4, 8), + (4096, 4096, 16384, 32, 32, True, False, True): (4, 64, 4, 8), + (4096, 4096, 16384, 64, 64, False, True, True): (4, 128, 3, 4), + (4096, 4096, 16384, 64, 64, True, False, True): (4, 128, 3, 4), + (4096, 4096, 16384, 128, 128, False, True, True): (1, 128, 1, 4), + (4096, 4096, 16384, 128, 128, True, False, True): (1, 128, 1, 4), + (4096, 4096, 32768, 16, 16, False, True, True): (8, 128, 4, 4), + (4096, 4096, 32768, 16, 16, True, False, True): (5, 128, 4, 4), + (4096, 4096, 32768, 32, 32, False, True, True): (5, 128, 4, 4), + (4096, 4096, 32768, 32, 32, True, False, True): (3, 128, 4, 8), + (4096, 4096, 32768, 64, 64, False, True, True): (3, 256, 3, 4), + (4096, 4096, 32768, 64, 64, True, False, True): (2, 256, 3, 4), + (4096, 4096, 32768, 128, 128, False, True, True): (1, 256, 1, 4), + (4096, 4096, 32768, 128, 128, True, False, True): (1, 256, 1, 4), + (4096, 4096, 65536, 16, 16, False, True, True): (5, 256, 4, 4), + (4096, 4096, 65536, 16, 16, True, False, True): (5, 256, 4, 4), + (4096, 4096, 65536, 32, 32, False, True, True): (4, 256, 4, 8), + (4096, 4096, 65536, 32, 32, True, False, True): (4, 256, 4, 8), + (4096, 4096, 65536, 64, 64, False, True, True): (1, 512, 3, 4), + (4096, 4096, 65536, 64, 64, True, False, True): (3, 512, 3, 4), + (4096, 4096, 65536, 128, 128, False, True, True): (1, 512, 1, 4), + (4096, 4096, 65536, 128, 128, True, False, True): (1, 512, 1, 4), + (4096, 4096, 131072, 16, 16, False, True, True): (5, 512, 4, 4), + (4096, 4096, 131072, 16, 16, True, False, True): (5, 512, 4, 4), + (4096, 4096, 131072, 32, 32, False, True, True): (4, 512, 4, 4), + (4096, 4096, 131072, 32, 32, True, False, True): (2, 512, 3, 4), + (4096, 4096, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), + (4096, 4096, 131072, 64, 64, True, False, True): (3, 1024, 3, 4), + (4096, 4096, 131072, 128, 128, False, True, True): (1, 1024, 1, 4), + (4096, 4096, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), + (8192, 8192, 256, 16, 16, False, True, True): (1, 1, 3, 4), + (8192, 8192, 256, 16, 16, True, False, True): (4, 1, 3, 4), + (8192, 8192, 256, 32, 32, False, True, True): (1, 2, 3, 4), + (8192, 8192, 256, 32, 32, True, False, True): (1, 2, 3, 4), + (8192, 8192, 256, 64, 64, False, True, True): (6, 2, 3, 8), + (8192, 8192, 256, 64, 64, True, False, True): (4, 2, 3, 8), + (8192, 8192, 256, 128, 128, False, True, True): (1, 2, 1, 4), + (8192, 8192, 256, 128, 128, True, False, True): (1, 2, 1, 4), + (8192, 8192, 512, 16, 16, False, True, True): (4, 4, 3, 2), + (8192, 8192, 512, 16, 16, True, False, True): (4, 4, 3, 4), + (8192, 8192, 512, 32, 32, False, True, True): (1, 4, 3, 4), + (8192, 8192, 512, 32, 32, True, False, True): (3, 4, 3, 2), + (8192, 8192, 512, 64, 64, False, True, True): (1, 4, 3, 4), + (8192, 8192, 512, 64, 64, True, False, True): (1, 4, 3, 4), + (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 2, 8), + (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 2, 8), + (8192, 8192, 1024, 16, 16, False, True, True): (4, 8, 4, 4), + (8192, 8192, 1024, 16, 16, True, False, True): (2, 8, 4, 4), + (8192, 8192, 1024, 32, 32, False, True, True): (2, 4, 4, 8), + (8192, 8192, 1024, 32, 32, True, False, True): (1, 4, 3, 4), + (8192, 8192, 1024, 64, 64, False, True, True): (4, 8, 3, 4), + (8192, 8192, 1024, 64, 64, True, False, True): (2, 8, 3, 4), + (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 1, 4), + (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 4), + (8192, 8192, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (8192, 8192, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (8192, 8192, 2048, 32, 32, False, True, True): (2, 8, 4, 8), + (8192, 8192, 2048, 32, 32, True, False, True): (2, 8, 4, 8), + (8192, 8192, 2048, 64, 64, False, True, True): (4, 8, 2, 4), + (8192, 8192, 2048, 64, 64, True, False, True): (4, 16, 3, 4), + (8192, 8192, 2048, 128, 128, False, True, True): (4, 16, 1, 4), + (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (8192, 8192, 4096, 16, 16, False, True, True): (4, 16, 4, 4), + (8192, 8192, 4096, 16, 16, True, False, True): (4, 32, 4, 2), + (8192, 8192, 4096, 32, 32, False, True, True): (2, 16, 4, 8), + (8192, 8192, 4096, 32, 32, True, False, True): (2, 16, 4, 8), + (8192, 8192, 4096, 64, 64, False, True, True): (4, 32, 3, 4), + (8192, 8192, 4096, 64, 64, True, False, True): (4, 16, 2, 4), + (8192, 8192, 4096, 128, 128, False, True, True): (4, 32, 1, 4), + (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 4), + (8192, 8192, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (8192, 8192, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (8192, 8192, 8192, 32, 32, False, True, True): (2, 32, 4, 8), + (8192, 8192, 8192, 32, 32, True, False, True): (2, 32, 4, 8), + (8192, 8192, 8192, 64, 64, False, True, True): (4, 32, 3, 8), + (8192, 8192, 8192, 64, 64, True, False, True): (4, 32, 2, 4), + (8192, 8192, 8192, 128, 128, False, True, True): (4, 64, 1, 4), + (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (8192, 8192, 16384, 16, 16, False, True, True): (4, 64, 4, 4), + (8192, 8192, 16384, 16, 16, True, False, True): (4, 64, 4, 4), + (8192, 8192, 16384, 32, 32, False, True, True): (4, 64, 3, 4), + (8192, 8192, 16384, 32, 32, True, False, True): (4, 64, 4, 8), + (8192, 8192, 16384, 64, 64, False, True, True): (4, 64, 2, 4), + (8192, 8192, 16384, 64, 64, True, False, True): (4, 64, 2, 4), + (8192, 8192, 16384, 128, 128, False, True, True): (4, 128, 1, 4), + (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (8192, 8192, 32768, 16, 16, False, True, True): (3, 128, 4, 4), + (8192, 8192, 32768, 16, 16, True, False, True): (3, 128, 4, 4), + (8192, 8192, 32768, 32, 32, False, True, True): (2, 128, 4, 8), + (8192, 8192, 32768, 32, 32, True, False, True): (2, 128, 4, 8), + (8192, 8192, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (8192, 8192, 32768, 64, 64, True, False, True): (2, 128, 2, 4), + (8192, 8192, 32768, 128, 128, False, True, True): (4, 256, 1, 4), + (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (8192, 8192, 65536, 16, 16, False, True, True): (3, 256, 4, 4), + (8192, 8192, 65536, 16, 16, True, False, True): (3, 256, 4, 4), + (8192, 8192, 65536, 32, 32, False, True, True): (2, 256, 3, 4), + (8192, 8192, 65536, 32, 32, True, False, True): (2, 256, 3, 4), + (8192, 8192, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (8192, 8192, 65536, 64, 64, True, False, True): (2, 256, 3, 8), + (8192, 8192, 65536, 128, 128, False, True, True): (4, 512, 1, 4), + (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (8192, 8192, 131072, 16, 16, False, True, True): (3, 512, 4, 4), + (8192, 8192, 131072, 16, 16, True, False, True): (3, 512, 4, 4), + (8192, 8192, 131072, 32, 32, False, True, True): (2, 512, 4, 4), + (8192, 8192, 131072, 32, 32, True, False, True): (2, 512, 3, 4), + (8192, 8192, 131072, 64, 64, False, True, True): (4, 512, 2, 4), + (8192, 8192, 131072, 64, 64, True, False, True): (2, 512, 2, 4), + (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), + (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (16384, 16384, 256, 16, 16, False, True, True): (2, 2, 6, 4), + (16384, 16384, 256, 16, 16, True, False, True): (2, 2, 6, 4), + (16384, 16384, 256, 32, 32, False, True, True): (4, 2, 3, 2), + (16384, 16384, 256, 32, 32, True, False, True): (4, 2, 3, 2), + (16384, 16384, 256, 64, 64, False, True, True): (2, 2, 4, 4), + (16384, 16384, 256, 64, 64, True, False, True): (4, 2, 3, 8), + (16384, 16384, 256, 128, 128, False, True, True): (4, 2, 2, 8), + (16384, 16384, 256, 128, 128, True, False, True): (4, 2, 2, 8), + (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 4, 4), + (16384, 16384, 512, 16, 16, True, False, True): (1, 2, 4, 4), + (16384, 16384, 512, 32, 32, False, True, True): (2, 2, 4, 8), + (16384, 16384, 512, 32, 32, True, False, True): (2, 2, 4, 8), + (16384, 16384, 512, 64, 64, False, True, True): (4, 4, 3, 4), + (16384, 16384, 512, 64, 64, True, False, True): (4, 4, 3, 4), + (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 2, 8), + (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 2, 8), + (16384, 16384, 1024, 16, 16, False, True, True): (3, 4, 4, 4), + (16384, 16384, 1024, 16, 16, True, False, True): (2, 8, 4, 4), + (16384, 16384, 1024, 32, 32, False, True, True): (2, 4, 4, 8), + (16384, 16384, 1024, 32, 32, True, False, True): (1, 4, 4, 8), + (16384, 16384, 1024, 64, 64, False, True, True): (2, 8, 3, 4), + (16384, 16384, 1024, 64, 64, True, False, True): (2, 8, 3, 4), + (16384, 16384, 1024, 128, 128, False, True, True): (4, 8, 1, 4), + (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 4), + (16384, 16384, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (16384, 16384, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (16384, 16384, 2048, 32, 32, False, True, True): (1, 8, 4, 8), + (16384, 16384, 2048, 32, 32, True, False, True): (2, 8, 4, 8), + (16384, 16384, 2048, 64, 64, False, True, True): (2, 8, 2, 4), + (16384, 16384, 2048, 64, 64, True, False, True): (2, 8, 2, 4), + (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 1, 4), + (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (16384, 16384, 4096, 16, 16, False, True, True): (2, 16, 4, 4), + (16384, 16384, 4096, 16, 16, True, False, True): (2, 16, 4, 4), + (16384, 16384, 4096, 32, 32, False, True, True): (1, 8, 3, 8), + (16384, 16384, 4096, 32, 32, True, False, True): (2, 16, 3, 4), + (16384, 16384, 4096, 64, 64, False, True, True): (2, 16, 2, 4), + (16384, 16384, 4096, 64, 64, True, False, True): (2, 16, 2, 4), + (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 1, 4), + (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 4), + (16384, 16384, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (16384, 16384, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (16384, 16384, 8192, 32, 32, False, True, True): (2, 32, 4, 8), + (16384, 16384, 8192, 32, 32, True, False, True): (2, 32, 3, 4), + (16384, 16384, 8192, 64, 64, False, True, True): (2, 32, 4, 8), + (16384, 16384, 8192, 64, 64, True, False, True): (2, 32, 3, 8), + (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 1, 4), + (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 4, 4), + (16384, 16384, 16384, 16, 16, True, False, True): (1, 64, 4, 4), + (16384, 16384, 16384, 32, 32, False, True, True): (1, 64, 3, 8), + (16384, 16384, 16384, 32, 32, True, False, True): (1, 64, 3, 4), + (16384, 16384, 16384, 64, 64, False, True, True): (1, 64, 2, 4), + (16384, 16384, 16384, 64, 64, True, False, True): (1, 64, 4, 8), + (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 4), + (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (16384, 16384, 32768, 16, 16, False, True, True): (1, 128, 4, 4), + (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 4, 4), + (16384, 16384, 32768, 32, 32, False, True, True): (1, 128, 4, 2), + (16384, 16384, 32768, 32, 32, True, False, True): (1, 128, 3, 8), + (16384, 16384, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (16384, 16384, 32768, 64, 64, True, False, True): (1, 128, 3, 8), + (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 1, 4), + (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 4, 4), + (16384, 16384, 65536, 16, 16, True, False, True): (1, 256, 4, 4), + (16384, 16384, 65536, 32, 32, False, True, True): (1, 256, 3, 4), + (16384, 16384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), + (16384, 16384, 65536, 64, 64, False, True, True): (1, 256, 2, 4), + (16384, 16384, 65536, 64, 64, True, False, True): (2, 256, 2, 4), + (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 1, 4), + (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (16384, 16384, 131072, 16, 16, False, True, True): (2, 512, 4, 4), + (16384, 16384, 131072, 16, 16, True, False, True): (1, 512, 4, 4), + (16384, 16384, 131072, 32, 32, False, True, True): (1, 512, 4, 8), + (16384, 16384, 131072, 32, 32, True, False, True): (1, 512, 3, 4), + (16384, 16384, 131072, 64, 64, False, True, True): (2, 512, 2, 4), + (16384, 16384, 131072, 64, 64, True, False, True): (1, 512, 2, 4), + (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), + (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + }, + ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float16, 0.5)): { + (16, 16, 16, 16, 16, False, False, False): (1, 1, 1, 1), + (16, 16, 16, 16, 16, False, False, True): (1, 1, 2, 2), + (16, 16, 16, 16, 16, False, True, False): (1, 1, 1, 1), + (16, 16, 16, 16, 16, False, True, True): (1, 1, 1, 8), + (16, 16, 16, 16, 16, True, False, False): (3, 1, 3, 4), + (16, 16, 16, 16, 16, True, False, True): (1, 1, 2, 1), + (16, 16, 32, 16, 16, False, False, False): (1, 2, 1, 8), + (16, 16, 32, 16, 16, False, False, True): (1, 2, 1, 2), + (16, 16, 32, 16, 16, False, True, False): (2, 1, 1, 4), + (16, 16, 32, 16, 16, False, True, True): (1, 2, 1, 4), + (16, 16, 32, 16, 16, True, False, False): (1, 1, 1, 4), + (16, 16, 32, 16, 16, True, False, True): (1, 2, 1, 2), + (16, 16, 64, 16, 16, False, False, False): (1, 4, 1, 1), + (16, 16, 64, 16, 16, False, False, True): (1, 2, 2, 4), + (16, 16, 64, 16, 16, False, True, False): (1, 4, 1, 4), + (16, 16, 64, 16, 16, False, True, True): (1, 2, 1, 4), + (16, 16, 64, 16, 16, True, False, False): (1, 4, 1, 2), + (16, 16, 64, 16, 16, True, False, True): (1, 1, 1, 2), + (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 4), + (16, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), + (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), + (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 2), + (16, 32, 16, 16, 16, True, False, False): (1, 1, 2, 16), + (16, 32, 16, 16, 16, True, False, True): (1, 1, 1, 4), + (16, 32, 16, 16, 32, False, False, False): (2, 1, 1, 8), + (16, 32, 16, 16, 32, False, False, True): (2, 1, 1, 8), + (16, 32, 16, 16, 32, False, True, False): (1, 1, 2, 1), + (16, 32, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (16, 32, 16, 16, 32, True, False, False): (2, 1, 1, 8), + (16, 32, 16, 16, 32, True, False, True): (1, 1, 2, 4), + (16, 32, 32, 16, 16, False, False, False): (1, 1, 1, 16), + (16, 32, 32, 16, 16, False, False, True): (1, 2, 1, 2), + (16, 32, 32, 16, 16, False, True, False): (1, 2, 1, 8), + (16, 32, 32, 16, 16, False, True, True): (3, 2, 1, 4), + (16, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), + (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 2), + (16, 32, 32, 16, 32, False, False, False): (1, 2, 1, 2), + (16, 32, 32, 16, 32, False, False, True): (1, 1, 1, 4), + (16, 32, 32, 16, 32, False, True, False): (1, 1, 2, 4), + (16, 32, 32, 16, 32, False, True, True): (1, 2, 1, 2), + (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 2), + (16, 32, 32, 16, 32, True, False, True): (1, 2, 1, 16), + (16, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (16, 32, 64, 16, 16, False, False, True): (2, 4, 1, 4), + (16, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), + (16, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), + (16, 32, 64, 16, 16, True, False, False): (3, 4, 1, 2), + (16, 32, 64, 16, 16, True, False, True): (1, 4, 1, 1), + (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 16), + (16, 32, 64, 16, 32, False, False, True): (1, 2, 1, 2), + (16, 32, 64, 16, 32, False, True, False): (1, 4, 2, 2), + (16, 32, 64, 16, 32, False, True, True): (1, 4, 1, 8), + (16, 32, 64, 16, 32, True, False, False): (1, 4, 1, 8), + (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 4), + (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), + (16, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), + (16, 64, 16, 16, 32, False, True, False): (2, 1, 2, 4), + (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 4), + (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 2), + (16, 64, 32, 16, 32, False, False, True): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, True, False): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, True, True): (1, 2, 3, 2), + (16, 64, 32, 16, 32, True, False, False): (1, 1, 1, 4), + (16, 64, 32, 16, 32, True, False, True): (1, 1, 2, 4), + (16, 64, 64, 16, 32, False, False, False): (1, 4, 1, 8), + (16, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (16, 64, 64, 16, 32, False, True, False): (1, 4, 1, 1), + (16, 64, 64, 16, 32, False, True, True): (2, 4, 1, 4), + (16, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), + (16, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), + (32, 16, 16, 16, 16, False, False, False): (2, 1, 2, 4), + (32, 16, 16, 16, 16, False, False, True): (2, 1, 1, 2), + (32, 16, 16, 16, 16, False, True, False): (1, 1, 2, 4), + (32, 16, 16, 16, 16, False, True, True): (1, 1, 1, 2), + (32, 16, 16, 16, 16, True, False, False): (1, 1, 1, 4), + (32, 16, 16, 16, 16, True, False, True): (2, 1, 1, 2), + (32, 16, 32, 16, 16, False, False, False): (1, 1, 1, 4), + (32, 16, 32, 16, 16, False, False, True): (1, 1, 1, 4), + (32, 16, 32, 16, 16, False, True, False): (1, 2, 1, 4), + (32, 16, 32, 16, 16, False, True, True): (2, 2, 1, 4), + (32, 16, 32, 16, 16, True, False, False): (2, 1, 1, 4), + (32, 16, 32, 16, 16, True, False, True): (2, 2, 1, 2), + (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 2), + (32, 16, 64, 16, 16, False, False, True): (1, 4, 1, 4), + (32, 16, 64, 16, 16, False, True, False): (1, 2, 1, 4), + (32, 16, 64, 16, 16, False, True, True): (1, 4, 1, 2), + (32, 16, 64, 16, 16, True, False, False): (1, 4, 2, 8), + (32, 16, 64, 16, 16, True, False, True): (1, 4, 1, 1), + (32, 32, 16, 16, 16, False, False, False): (1, 1, 1, 4), + (32, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), + (32, 32, 16, 16, 16, False, True, False): (1, 1, 2, 4), + (32, 32, 16, 16, 16, False, True, True): (1, 1, 2, 2), + (32, 32, 16, 16, 16, True, False, False): (1, 1, 1, 8), + (32, 32, 16, 16, 16, True, False, True): (1, 1, 1, 4), + (32, 32, 16, 16, 32, False, False, False): (1, 1, 3, 2), + (32, 32, 16, 16, 32, False, False, True): (2, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, False): (3, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (32, 32, 16, 16, 32, True, False, False): (2, 1, 1, 8), + (32, 32, 16, 16, 32, True, False, True): (1, 1, 3, 2), + (32, 32, 16, 32, 32, False, False, False): (1, 1, 1, 2), + (32, 32, 16, 32, 32, False, False, True): (2, 1, 1, 8), + (32, 32, 16, 32, 32, False, True, False): (1, 1, 1, 2), + (32, 32, 16, 32, 32, False, True, True): (1, 1, 1, 8), + (32, 32, 16, 32, 32, True, False, False): (1, 1, 2, 4), + (32, 32, 16, 32, 32, True, False, True): (1, 1, 1, 2), + (32, 32, 32, 16, 16, False, False, False): (1, 1, 1, 4), + (32, 32, 32, 16, 16, False, False, True): (1, 2, 1, 4), + (32, 32, 32, 16, 16, False, True, False): (1, 2, 1, 4), + (32, 32, 32, 16, 16, False, True, True): (1, 2, 1, 2), + (32, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 2), + (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 2), + (32, 32, 32, 16, 32, True, False, False): (1, 2, 1, 1), + (32, 32, 32, 16, 32, True, False, True): (1, 2, 1, 2), + (32, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), + (32, 32, 32, 32, 32, False, False, True): (2, 1, 1, 4), + (32, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), + (32, 32, 32, 32, 32, False, True, True): (1, 1, 1, 8), + (32, 32, 32, 32, 32, True, False, False): (1, 1, 3, 4), + (32, 32, 32, 32, 32, True, False, True): (1, 1, 1, 8), + (32, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (32, 32, 64, 16, 16, False, False, True): (1, 4, 1, 2), + (32, 32, 64, 16, 16, False, True, False): (1, 1, 1, 4), + (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), + (32, 32, 64, 16, 16, True, False, False): (1, 4, 1, 8), + (32, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), + (32, 32, 64, 16, 32, False, False, False): (1, 1, 1, 4), + (32, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (32, 32, 64, 16, 32, False, True, False): (1, 1, 1, 4), + (32, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (32, 32, 64, 16, 32, True, False, False): (2, 2, 1, 8), + (32, 32, 64, 16, 32, True, False, True): (1, 2, 1, 2), + (32, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), + (32, 32, 64, 32, 32, False, False, True): (1, 2, 1, 1), + (32, 32, 64, 32, 32, False, True, False): (1, 2, 2, 8), + (32, 32, 64, 32, 32, False, True, True): (1, 1, 1, 4), + (32, 32, 64, 32, 32, True, False, False): (1, 2, 1, 4), + (32, 32, 64, 32, 32, True, False, True): (2, 2, 1, 4), + (32, 64, 16, 16, 32, False, False, False): (1, 1, 1, 8), + (32, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), + (32, 64, 16, 16, 32, False, True, False): (2, 1, 1, 4), + (32, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (32, 64, 16, 16, 32, True, False, False): (1, 1, 2, 4), + (32, 64, 16, 16, 32, True, False, True): (1, 1, 2, 2), + (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 8), + (32, 64, 16, 32, 32, False, False, True): (2, 1, 1, 4), + (32, 64, 16, 32, 32, False, True, False): (1, 1, 1, 4), + (32, 64, 16, 32, 32, False, True, True): (1, 1, 2, 2), + (32, 64, 16, 32, 32, True, False, False): (1, 1, 1, 2), + (32, 64, 16, 32, 32, True, False, True): (2, 1, 2, 4), + (32, 64, 32, 16, 32, False, False, False): (1, 1, 1, 4), + (32, 64, 32, 16, 32, False, False, True): (1, 2, 1, 2), + (32, 64, 32, 16, 32, False, True, False): (1, 2, 3, 4), + (32, 64, 32, 16, 32, False, True, True): (2, 2, 1, 4), + (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 4), + (32, 64, 32, 16, 32, True, False, True): (1, 2, 2, 1), + (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 8), + (32, 64, 32, 32, 32, False, False, True): (1, 1, 1, 4), + (32, 64, 32, 32, 32, False, True, False): (1, 1, 2, 4), + (32, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), + (32, 64, 32, 32, 32, True, False, False): (2, 1, 1, 2), + (32, 64, 32, 32, 32, True, False, True): (1, 1, 1, 4), + (32, 64, 64, 16, 32, False, False, False): (1, 4, 2, 1), + (32, 64, 64, 16, 32, False, False, True): (3, 4, 1, 4), + (32, 64, 64, 16, 32, False, True, False): (1, 1, 1, 8), + (32, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), + (32, 64, 64, 16, 32, True, False, True): (2, 2, 3, 4), + (32, 64, 64, 32, 32, False, False, False): (1, 2, 1, 4), + (32, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), + (32, 64, 64, 32, 32, False, True, False): (1, 2, 2, 8), + (32, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), + (32, 64, 64, 32, 32, True, False, False): (1, 2, 2, 4), + (32, 64, 64, 32, 32, True, False, True): (1, 2, 1, 4), + (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 1), + (64, 32, 16, 32, 32, False, False, True): (1, 1, 2, 4), + (64, 32, 16, 32, 32, False, True, False): (2, 1, 1, 8), + (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 4), + (64, 32, 16, 32, 32, True, False, False): (2, 1, 1, 2), + (64, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), + (64, 32, 32, 32, 32, False, False, False): (3, 1, 1, 4), + (64, 32, 32, 32, 32, False, False, True): (1, 1, 1, 4), + (64, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), + (64, 32, 32, 32, 32, False, True, True): (1, 1, 1, 2), + (64, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), + (64, 32, 32, 32, 32, True, False, True): (1, 1, 1, 4), + (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 2), + (64, 32, 64, 32, 32, False, False, True): (3, 2, 1, 4), + (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 1), + (64, 32, 64, 32, 32, False, True, True): (1, 2, 1, 4), + (64, 32, 64, 32, 32, True, False, False): (1, 1, 3, 4), + (64, 32, 64, 32, 32, True, False, True): (1, 2, 2, 4), + (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 2), + (64, 64, 16, 32, 32, False, False, True): (1, 1, 3, 2), + (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 8), + (64, 64, 16, 32, 32, False, True, True): (1, 1, 2, 4), + (64, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), + (64, 64, 16, 32, 32, True, False, True): (2, 1, 2, 4), + (64, 64, 32, 32, 32, False, False, False): (1, 1, 2, 8), + (64, 64, 32, 32, 32, False, False, True): (1, 1, 2, 4), + (64, 64, 32, 32, 32, False, True, False): (1, 1, 1, 4), + (64, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), + (64, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), + (64, 64, 32, 32, 32, True, False, True): (2, 1, 2, 4), + (64, 64, 64, 32, 32, False, False, False): (1, 2, 1, 4), + (64, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), + (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), + (64, 64, 64, 32, 32, False, True, True): (3, 2, 1, 4), + (64, 64, 64, 32, 32, True, False, False): (1, 2, 1, 8), + (64, 64, 64, 32, 32, True, False, True): (1, 2, 3, 4), + (256, 256, 256, 16, 16, False, True, True): (4, 8, 6, 2), + (256, 256, 256, 16, 16, True, False, True): (5, 16, 5, 1), + (256, 256, 256, 32, 32, False, True, True): (1, 8, 7, 4), + (256, 256, 256, 32, 32, True, False, True): (1, 8, 5, 4), + (256, 256, 256, 64, 64, False, True, True): (1, 4, 5, 4), + (256, 256, 256, 64, 64, True, False, True): (2, 4, 3, 4), + (256, 256, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (256, 256, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (256, 256, 512, 16, 16, False, True, True): (4, 8, 4, 4), + (256, 256, 512, 16, 16, True, False, True): (4, 8, 6, 2), + (256, 256, 512, 32, 32, False, True, True): (3, 8, 5, 4), + (256, 256, 512, 32, 32, True, False, True): (2, 8, 5, 4), + (256, 256, 512, 64, 64, False, True, True): (2, 8, 4, 4), + (256, 256, 512, 64, 64, True, False, True): (1, 8, 7, 4), + (256, 256, 512, 128, 128, False, True, True): (2, 4, 2, 8), + (256, 256, 512, 128, 128, True, False, True): (5, 4, 2, 8), + (256, 256, 1024, 16, 16, False, True, True): (1, 8, 4, 4), + (256, 256, 1024, 16, 16, True, False, True): (1, 16, 4, 2), + (256, 256, 1024, 32, 32, False, True, True): (5, 32, 5, 1), + (256, 256, 1024, 32, 32, True, False, True): (1, 16, 4, 2), + (256, 256, 1024, 64, 64, False, True, True): (1, 16, 4, 4), + (256, 256, 1024, 64, 64, True, False, True): (2, 16, 3, 4), + (256, 256, 1024, 128, 128, False, True, True): (9, 8, 2, 8), + (256, 256, 1024, 128, 128, True, False, True): (1, 8, 2, 8), + (256, 256, 2048, 16, 16, False, True, True): (6, 32, 5, 2), + (256, 256, 2048, 16, 16, True, False, True): (2, 32, 4, 2), + (256, 256, 2048, 32, 32, False, True, True): (1, 32, 3, 2), + (256, 256, 2048, 32, 32, True, False, True): (1, 32, 3, 2), + (256, 256, 2048, 64, 64, False, True, True): (2, 32, 4, 4), + (256, 256, 2048, 64, 64, True, False, True): (2, 16, 4, 4), + (256, 256, 2048, 128, 128, False, True, True): (3, 16, 2, 8), + (256, 256, 2048, 128, 128, True, False, True): (4, 16, 2, 8), + (256, 256, 4096, 16, 16, False, True, True): (1, 32, 3, 4), + (256, 256, 4096, 16, 16, True, False, True): (3, 16, 3, 2), + (256, 256, 4096, 32, 32, False, True, True): (3, 32, 3, 2), + (256, 256, 4096, 32, 32, True, False, True): (1, 32, 3, 2), + (256, 256, 4096, 64, 64, False, True, True): (2, 32, 3, 4), + (256, 256, 4096, 64, 64, True, False, True): (2, 32, 3, 4), + (256, 256, 4096, 128, 128, False, True, True): (5, 32, 2, 8), + (256, 256, 4096, 128, 128, True, False, True): (1, 32, 2, 8), + (256, 256, 8192, 16, 16, False, True, True): (8, 32, 3, 4), + (256, 256, 8192, 16, 16, True, False, True): (1, 32, 3, 2), + (256, 256, 8192, 32, 32, False, True, True): (3, 64, 3, 4), + (256, 256, 8192, 32, 32, True, False, True): (2, 128, 1, 2), + (256, 256, 8192, 64, 64, False, True, True): (7, 128, 1, 4), + (256, 256, 8192, 64, 64, True, False, True): (4, 128, 1, 4), + (256, 256, 8192, 128, 128, False, True, True): (2, 64, 1, 4), + (256, 256, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (256, 256, 16384, 16, 16, False, True, True): (4, 128, 3, 2), + (256, 256, 16384, 16, 16, True, False, True): (5, 64, 3, 2), + (256, 256, 16384, 32, 32, False, True, True): (5, 128, 3, 2), + (256, 256, 16384, 32, 32, True, False, True): (5, 128, 3, 2), + (256, 256, 16384, 64, 64, False, True, True): (1, 256, 1, 4), + (256, 256, 16384, 64, 64, True, False, True): (5, 128, 3, 4), + (256, 256, 16384, 128, 128, False, True, True): (11, 128, 2, 8), + (256, 256, 16384, 128, 128, True, False, True): (3, 128, 1, 4), + (256, 256, 32768, 16, 16, False, True, True): (1, 128, 3, 4), + (256, 256, 32768, 16, 16, True, False, True): (2, 128, 3, 2), + (256, 256, 32768, 32, 32, False, True, True): (4, 256, 3, 2), + (256, 256, 32768, 32, 32, True, False, True): (1, 256, 3, 2), + (256, 256, 32768, 64, 64, False, True, True): (2, 256, 1, 4), + (256, 256, 32768, 64, 64, True, False, True): (2, 256, 1, 4), + (256, 256, 32768, 128, 128, False, True, True): (3, 256, 1, 4), + (256, 256, 32768, 128, 128, True, False, True): (2, 256, 1, 4), + (256, 256, 65536, 16, 16, False, True, True): (1, 256, 3, 2), + (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 2), + (256, 256, 65536, 32, 32, False, True, True): (1, 512, 3, 2), + (256, 256, 65536, 32, 32, True, False, True): (4, 512, 3, 2), + (256, 256, 65536, 64, 64, False, True, True): (2, 512, 1, 4), + (256, 256, 65536, 64, 64, True, False, True): (5, 512, 1, 4), + (256, 256, 65536, 128, 128, False, True, True): (3, 512, 1, 4), + (256, 256, 65536, 128, 128, True, False, True): (1, 512, 1, 4), + (256, 256, 131072, 16, 16, False, True, True): (1, 512, 3, 1), + (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 2), + (256, 256, 131072, 32, 32, False, True, True): (2, 1024, 3, 2), + (256, 256, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), + (256, 256, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), + (256, 256, 131072, 64, 64, True, False, True): (1, 1024, 1, 4), + (256, 256, 131072, 128, 128, False, True, True): (7, 1024, 1, 4), + (256, 256, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), + (512, 512, 256, 16, 16, False, True, True): (1, 8, 5, 1), + (512, 512, 256, 16, 16, True, False, True): (2, 16, 5, 1), + (512, 512, 256, 32, 32, False, True, True): (2, 8, 5, 2), + (512, 512, 256, 32, 32, True, False, True): (4, 4, 5, 2), + (512, 512, 256, 64, 64, False, True, True): (1, 4, 5, 4), + (512, 512, 256, 64, 64, True, False, True): (3, 4, 5, 4), + (512, 512, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (512, 512, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (512, 512, 512, 16, 16, False, True, True): (1, 8, 4, 4), + (512, 512, 512, 16, 16, True, False, True): (4, 16, 5, 1), + (512, 512, 512, 32, 32, False, True, True): (4, 8, 5, 2), + (512, 512, 512, 32, 32, True, False, True): (7, 16, 4, 1), + (512, 512, 512, 64, 64, False, True, True): (3, 8, 5, 4), + (512, 512, 512, 64, 64, True, False, True): (1, 8, 4, 4), + (512, 512, 512, 128, 128, False, True, True): (4, 4, 2, 8), + (512, 512, 512, 128, 128, True, False, True): (4, 4, 2, 8), + (512, 512, 1024, 16, 16, False, True, True): (2, 8, 4, 4), + (512, 512, 1024, 16, 16, True, False, True): (2, 16, 4, 2), + (512, 512, 1024, 32, 32, False, True, True): (3, 16, 4, 2), + (512, 512, 1024, 32, 32, True, False, True): (3, 16, 3, 2), + (512, 512, 1024, 64, 64, False, True, True): (5, 8, 5, 4), + (512, 512, 1024, 64, 64, True, False, True): (4, 16, 3, 4), + (512, 512, 1024, 128, 128, False, True, True): (6, 8, 2, 8), + (512, 512, 1024, 128, 128, True, False, True): (4, 8, 2, 8), + (512, 512, 2048, 16, 16, False, True, True): (2, 16, 3, 4), + (512, 512, 2048, 16, 16, True, False, True): (1, 16, 4, 2), + (512, 512, 2048, 32, 32, False, True, True): (2, 32, 3, 2), + (512, 512, 2048, 32, 32, True, False, True): (2, 32, 3, 2), + (512, 512, 2048, 64, 64, False, True, True): (1, 32, 3, 4), + (512, 512, 2048, 64, 64, True, False, True): (1, 32, 3, 2), + (512, 512, 2048, 128, 128, False, True, True): (3, 16, 2, 8), + (512, 512, 2048, 128, 128, True, False, True): (1, 16, 2, 8), + (512, 512, 4096, 16, 16, False, True, True): (4, 32, 3, 2), + (512, 512, 4096, 16, 16, True, False, True): (1, 32, 3, 2), + (512, 512, 4096, 32, 32, False, True, True): (3, 32, 3, 2), + (512, 512, 4096, 32, 32, True, False, True): (3, 32, 3, 2), + (512, 512, 4096, 64, 64, False, True, True): (1, 32, 3, 4), + (512, 512, 4096, 64, 64, True, False, True): (1, 64, 1, 4), + (512, 512, 4096, 128, 128, False, True, True): (7, 32, 2, 8), + (512, 512, 4096, 128, 128, True, False, True): (1, 32, 2, 8), + (512, 512, 8192, 16, 16, False, True, True): (4, 64, 3, 2), + (512, 512, 8192, 16, 16, True, False, True): (1, 64, 3, 2), + (512, 512, 8192, 32, 32, False, True, True): (3, 64, 3, 2), + (512, 512, 8192, 32, 32, True, False, True): (1, 64, 3, 2), + (512, 512, 8192, 64, 64, False, True, True): (1, 64, 3, 4), + (512, 512, 8192, 64, 64, True, False, True): (1, 64, 3, 4), + (512, 512, 8192, 128, 128, False, True, True): (7, 64, 2, 8), + (512, 512, 8192, 128, 128, True, False, True): (1, 64, 1, 4), + (512, 512, 16384, 16, 16, False, True, True): (1, 128, 3, 2), + (512, 512, 16384, 16, 16, True, False, True): (1, 64, 3, 2), + (512, 512, 16384, 32, 32, False, True, True): (1, 128, 3, 2), + (512, 512, 16384, 32, 32, True, False, True): (1, 128, 3, 2), + (512, 512, 16384, 64, 64, False, True, True): (1, 128, 3, 4), + (512, 512, 16384, 64, 64, True, False, True): (4, 128, 3, 4), + (512, 512, 16384, 128, 128, False, True, True): (5, 128, 2, 8), + (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 4), + (512, 512, 32768, 16, 16, False, True, True): (1, 128, 3, 4), + (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 2), + (512, 512, 32768, 32, 32, False, True, True): (1, 256, 3, 2), + (512, 512, 32768, 32, 32, True, False, True): (1, 256, 3, 2), + (512, 512, 32768, 64, 64, False, True, True): (1, 256, 3, 4), + (512, 512, 32768, 64, 64, True, False, True): (1, 256, 3, 4), + (512, 512, 32768, 128, 128, False, True, True): (5, 256, 1, 4), + (512, 512, 32768, 128, 128, True, False, True): (1, 256, 1, 4), + (512, 512, 65536, 16, 16, False, True, True): (1, 256, 3, 2), + (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 1), + (512, 512, 65536, 32, 32, False, True, True): (1, 512, 3, 2), + (512, 512, 65536, 32, 32, True, False, True): (1, 512, 3, 2), + (512, 512, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (512, 512, 65536, 64, 64, True, False, True): (1, 512, 3, 4), + (512, 512, 65536, 128, 128, False, True, True): (7, 512, 1, 4), + (512, 512, 65536, 128, 128, True, False, True): (5, 512, 1, 4), + (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 1), + (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 1), + (512, 512, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), + (512, 512, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), + (512, 512, 131072, 64, 64, False, True, True): (4, 512, 2, 4), + (512, 512, 131072, 64, 64, True, False, True): (2, 512, 2, 4), + (512, 512, 131072, 128, 128, False, True, True): (5, 1024, 1, 4), + (512, 512, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (1024, 1024, 256, 16, 16, False, True, True): (3, 4, 5, 4), + (1024, 1024, 256, 16, 16, True, False, True): (3, 4, 5, 4), + (1024, 1024, 256, 32, 32, False, True, True): (2, 4, 6, 2), + (1024, 1024, 256, 32, 32, True, False, True): (2, 4, 6, 2), + (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 4, 4), + (1024, 1024, 256, 64, 64, True, False, True): (2, 4, 6, 4), + (1024, 1024, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (1024, 1024, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (1024, 1024, 512, 16, 16, False, True, True): (3, 4, 5, 4), + (1024, 1024, 512, 16, 16, True, False, True): (3, 8, 4, 2), + (1024, 1024, 512, 32, 32, False, True, True): (1, 8, 4, 2), + (1024, 1024, 512, 32, 32, True, False, True): (1, 8, 4, 2), + (1024, 1024, 512, 64, 64, False, True, True): (2, 8, 3, 4), + (1024, 1024, 512, 64, 64, True, False, True): (1, 4, 4, 4), + (1024, 1024, 512, 128, 128, False, True, True): (7, 4, 2, 8), + (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 2, 8), + (1024, 1024, 1024, 16, 16, False, True, True): (4, 8, 4, 2), + (1024, 1024, 1024, 16, 16, True, False, True): (3, 8, 5, 2), + (1024, 1024, 1024, 32, 32, False, True, True): (1, 8, 4, 4), + (1024, 1024, 1024, 32, 32, True, False, True): (1, 8, 4, 2), + (1024, 1024, 1024, 64, 64, False, True, True): (1, 16, 3, 4), + (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 3, 4), + (1024, 1024, 1024, 128, 128, False, True, True): (6, 8, 2, 8), + (1024, 1024, 1024, 128, 128, True, False, True): (4, 8, 2, 8), + (1024, 1024, 2048, 16, 16, False, True, True): (3, 8, 3, 4), + (1024, 1024, 2048, 16, 16, True, False, True): (3, 8, 3, 4), + (1024, 1024, 2048, 32, 32, False, True, True): (1, 16, 3, 4), + (1024, 1024, 2048, 32, 32, True, False, True): (1, 16, 3, 2), + (1024, 1024, 2048, 64, 64, False, True, True): (5, 16, 3, 4), + (1024, 1024, 2048, 64, 64, True, False, True): (5, 16, 3, 4), + (1024, 1024, 2048, 128, 128, False, True, True): (3, 16, 2, 8), + (1024, 1024, 2048, 128, 128, True, False, True): (4, 16, 2, 16), + (1024, 1024, 4096, 16, 16, False, True, True): (4, 32, 3, 2), + (1024, 1024, 4096, 16, 16, True, False, True): (8, 32, 3, 2), + (1024, 1024, 4096, 32, 32, False, True, True): (9, 32, 3, 2), + (1024, 1024, 4096, 32, 32, True, False, True): (1, 32, 3, 2), + (1024, 1024, 4096, 64, 64, False, True, True): (6, 32, 3, 4), + (1024, 1024, 4096, 64, 64, True, False, True): (1, 32, 3, 4), + (1024, 1024, 4096, 128, 128, False, True, True): (4, 32, 2, 8), + (1024, 1024, 4096, 128, 128, True, False, True): (4, 32, 1, 4), + (1024, 1024, 8192, 16, 16, False, True, True): (4, 64, 3, 2), + (1024, 1024, 8192, 16, 16, True, False, True): (4, 64, 3, 2), + (1024, 1024, 8192, 32, 32, False, True, True): (8, 64, 3, 2), + (1024, 1024, 8192, 32, 32, True, False, True): (6, 64, 3, 2), + (1024, 1024, 8192, 64, 64, False, True, True): (2, 64, 3, 4), + (1024, 1024, 8192, 64, 64, True, False, True): (2, 64, 3, 4), + (1024, 1024, 8192, 128, 128, False, True, True): (3, 64, 1, 4), + (1024, 1024, 8192, 128, 128, True, False, True): (2, 64, 1, 4), + (1024, 1024, 16384, 16, 16, False, True, True): (1, 64, 3, 4), + (1024, 1024, 16384, 16, 16, True, False, True): (1, 64, 3, 2), + (1024, 1024, 16384, 32, 32, False, True, True): (1, 128, 3, 4), + (1024, 1024, 16384, 32, 32, True, False, True): (1, 64, 3, 4), + (1024, 1024, 16384, 64, 64, False, True, True): (1, 128, 3, 4), + (1024, 1024, 16384, 64, 64, True, False, True): (1, 128, 3, 4), + (1024, 1024, 16384, 128, 128, False, True, True): (11, 128, 1, 4), + (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (1024, 1024, 32768, 16, 16, False, True, True): (1, 128, 3, 4), + (1024, 1024, 32768, 16, 16, True, False, True): (1, 128, 3, 1), + (1024, 1024, 32768, 32, 32, False, True, True): (1, 256, 3, 2), + (1024, 1024, 32768, 32, 32, True, False, True): (1, 128, 3, 4), + (1024, 1024, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (1024, 1024, 32768, 64, 64, True, False, True): (1, 256, 3, 4), + (1024, 1024, 32768, 128, 128, False, True, True): (7, 256, 1, 4), + (1024, 1024, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (1024, 1024, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (1024, 1024, 65536, 16, 16, True, False, True): (1, 256, 3, 1), + (1024, 1024, 65536, 32, 32, False, True, True): (1, 512, 3, 2), + (1024, 1024, 65536, 32, 32, True, False, True): (1, 256, 3, 4), + (1024, 1024, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (1024, 1024, 65536, 64, 64, True, False, True): (1, 512, 3, 4), + (1024, 1024, 65536, 128, 128, False, True, True): (10, 512, 1, 4), + (1024, 1024, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (1024, 1024, 131072, 16, 16, False, True, True): (11, 512, 3, 2), + (1024, 1024, 131072, 16, 16, True, False, True): (11, 512, 3, 2), + (1024, 1024, 131072, 32, 32, False, True, True): (7, 1024, 3, 2), + (1024, 1024, 131072, 32, 32, True, False, True): (6, 512, 3, 4), + (1024, 1024, 131072, 64, 64, False, True, True): (1, 512, 2, 4), + (1024, 1024, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), + (1024, 1024, 131072, 128, 128, False, True, True): (12, 1024, 1, 4), + (1024, 1024, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (2048, 2048, 256, 16, 16, False, True, True): (4, 4, 6, 2), + (2048, 2048, 256, 16, 16, True, False, True): (2, 8, 4, 1), + (2048, 2048, 256, 32, 32, False, True, True): (3, 4, 4, 2), + (2048, 2048, 256, 32, 32, True, False, True): (1, 4, 5, 2), + (2048, 2048, 256, 64, 64, False, True, True): (2, 4, 4, 4), + (2048, 2048, 256, 64, 64, True, False, True): (2, 4, 4, 4), + (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 2, 8), + (2048, 2048, 256, 128, 128, True, False, True): (5, 2, 2, 8), + (2048, 2048, 512, 16, 16, False, True, True): (5, 4, 4, 4), + (2048, 2048, 512, 16, 16, True, False, True): (2, 4, 4, 2), + (2048, 2048, 512, 32, 32, False, True, True): (1, 4, 3, 4), + (2048, 2048, 512, 32, 32, True, False, True): (3, 4, 4, 2), + (2048, 2048, 512, 64, 64, False, True, True): (1, 8, 3, 4), + (2048, 2048, 512, 64, 64, True, False, True): (1, 8, 3, 2), + (2048, 2048, 512, 128, 128, False, True, True): (3, 4, 2, 8), + (2048, 2048, 512, 128, 128, True, False, True): (2, 4, 2, 8), + (2048, 2048, 1024, 16, 16, False, True, True): (3, 4, 3, 4), + (2048, 2048, 1024, 16, 16, True, False, True): (2, 8, 3, 2), + (2048, 2048, 1024, 32, 32, False, True, True): (3, 8, 3, 4), + (2048, 2048, 1024, 32, 32, True, False, True): (1, 8, 3, 2), + (2048, 2048, 1024, 64, 64, False, True, True): (1, 8, 3, 4), + (2048, 2048, 1024, 64, 64, True, False, True): (1, 8, 3, 4), + (2048, 2048, 1024, 128, 128, False, True, True): (4, 8, 2, 8), + (2048, 2048, 1024, 128, 128, True, False, True): (4, 8, 1, 4), + (2048, 2048, 2048, 16, 16, False, True, True): (4, 16, 3, 2), + (2048, 2048, 2048, 16, 16, True, False, True): (2, 16, 3, 2), + (2048, 2048, 2048, 32, 32, False, True, True): (1, 16, 3, 4), + (2048, 2048, 2048, 32, 32, True, False, True): (1, 16, 3, 2), + (2048, 2048, 2048, 64, 64, False, True, True): (1, 16, 3, 4), + (2048, 2048, 2048, 64, 64, True, False, True): (1, 16, 3, 4), + (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 2, 8), + (2048, 2048, 2048, 128, 128, True, False, True): (5, 16, 1, 4), + (2048, 2048, 4096, 16, 16, False, True, True): (4, 32, 4, 2), + (2048, 2048, 4096, 16, 16, True, False, True): (4, 32, 3, 2), + (2048, 2048, 4096, 32, 32, False, True, True): (4, 16, 3, 8), + (2048, 2048, 4096, 32, 32, True, False, True): (4, 16, 3, 4), + (2048, 2048, 4096, 64, 64, False, True, True): (4, 32, 3, 4), + (2048, 2048, 4096, 64, 64, True, False, True): (4, 32, 3, 4), + (2048, 2048, 4096, 128, 128, False, True, True): (4, 32, 2, 8), + (2048, 2048, 4096, 128, 128, True, False, True): (2, 32, 1, 4), + (2048, 2048, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (2048, 2048, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (2048, 2048, 8192, 32, 32, False, True, True): (4, 32, 3, 8), + (2048, 2048, 8192, 32, 32, True, False, True): (4, 32, 4, 8), + (2048, 2048, 8192, 64, 64, False, True, True): (2, 64, 3, 4), + (2048, 2048, 8192, 64, 64, True, False, True): (4, 64, 3, 4), + (2048, 2048, 8192, 128, 128, False, True, True): (3, 64, 1, 4), + (2048, 2048, 8192, 128, 128, True, False, True): (2, 64, 1, 4), + (2048, 2048, 16384, 16, 16, False, True, True): (4, 64, 3, 4), + (2048, 2048, 16384, 16, 16, True, False, True): (1, 64, 3, 4), + (2048, 2048, 16384, 32, 32, False, True, True): (4, 64, 3, 4), + (2048, 2048, 16384, 32, 32, True, False, True): (4, 64, 3, 4), + (2048, 2048, 16384, 64, 64, False, True, True): (4, 128, 3, 4), + (2048, 2048, 16384, 64, 64, True, False, True): (4, 128, 3, 4), + (2048, 2048, 16384, 128, 128, False, True, True): (3, 128, 1, 4), + (2048, 2048, 16384, 128, 128, True, False, True): (2, 128, 1, 4), + (2048, 2048, 32768, 16, 16, False, True, True): (8, 128, 3, 2), + (2048, 2048, 32768, 16, 16, True, False, True): (8, 128, 3, 4), + (2048, 2048, 32768, 32, 32, False, True, True): (8, 128, 3, 4), + (2048, 2048, 32768, 32, 32, True, False, True): (8, 128, 3, 4), + (2048, 2048, 32768, 64, 64, False, True, True): (8, 256, 3, 4), + (2048, 2048, 32768, 64, 64, True, False, True): (8, 256, 3, 4), + (2048, 2048, 32768, 128, 128, False, True, True): (3, 256, 1, 4), + (2048, 2048, 32768, 128, 128, True, False, True): (1, 256, 1, 4), + (2048, 2048, 65536, 16, 16, False, True, True): (9, 256, 3, 2), + (2048, 2048, 65536, 16, 16, True, False, True): (9, 256, 4, 4), + (2048, 2048, 65536, 32, 32, False, True, True): (7, 256, 3, 4), + (2048, 2048, 65536, 32, 32, True, False, True): (7, 256, 3, 4), + (2048, 2048, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (2048, 2048, 65536, 64, 64, True, False, True): (9, 512, 3, 4), + (2048, 2048, 65536, 128, 128, False, True, True): (5, 512, 1, 4), + (2048, 2048, 65536, 128, 128, True, False, True): (1, 512, 1, 4), + (2048, 2048, 131072, 16, 16, False, True, True): (9, 512, 3, 2), + (2048, 2048, 131072, 16, 16, True, False, True): (9, 512, 4, 4), + (2048, 2048, 131072, 32, 32, False, True, True): (7, 512, 3, 4), + (2048, 2048, 131072, 32, 32, True, False, True): (3, 512, 3, 4), + (2048, 2048, 131072, 64, 64, False, True, True): (1, 512, 2, 4), + (2048, 2048, 131072, 64, 64, True, False, True): (2, 1024, 3, 4), + (2048, 2048, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), + (2048, 2048, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), + (4096, 4096, 256, 16, 16, False, True, True): (2, 2, 6, 4), + (4096, 4096, 256, 16, 16, True, False, True): (2, 2, 5, 4), + (4096, 4096, 256, 32, 32, False, True, True): (7, 2, 4, 4), + (4096, 4096, 256, 32, 32, True, False, True): (1, 2, 4, 4), + (4096, 4096, 256, 64, 64, False, True, True): (3, 4, 3, 4), + (4096, 4096, 256, 64, 64, True, False, True): (3, 4, 3, 4), + (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 2, 8), + (4096, 4096, 256, 128, 128, True, False, True): (1, 2, 2, 8), + (4096, 4096, 512, 16, 16, False, True, True): (4, 2, 3, 4), + (4096, 4096, 512, 16, 16, True, False, True): (2, 4, 3, 2), + (4096, 4096, 512, 32, 32, False, True, True): (3, 4, 3, 4), + (4096, 4096, 512, 32, 32, True, False, True): (3, 4, 3, 2), + (4096, 4096, 512, 64, 64, False, True, True): (3, 4, 3, 4), + (4096, 4096, 512, 64, 64, True, False, True): (3, 4, 3, 4), + (4096, 4096, 512, 128, 128, False, True, True): (2, 4, 2, 8), + (4096, 4096, 512, 128, 128, True, False, True): (2, 4, 1, 4), + (4096, 4096, 1024, 16, 16, False, True, True): (2, 8, 3, 2), + (4096, 4096, 1024, 16, 16, True, False, True): (2, 8, 3, 2), + (4096, 4096, 1024, 32, 32, False, True, True): (3, 8, 3, 4), + (4096, 4096, 1024, 32, 32, True, False, True): (1, 8, 3, 2), + (4096, 4096, 1024, 64, 64, False, True, True): (1, 8, 3, 4), + (4096, 4096, 1024, 64, 64, True, False, True): (1, 8, 3, 4), + (4096, 4096, 1024, 128, 128, False, True, True): (2, 8, 2, 8), + (4096, 4096, 1024, 128, 128, True, False, True): (2, 8, 2, 8), + (4096, 4096, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (4096, 4096, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (4096, 4096, 2048, 32, 32, False, True, True): (4, 8, 4, 8), + (4096, 4096, 2048, 32, 32, True, False, True): (4, 8, 4, 8), + (4096, 4096, 2048, 64, 64, False, True, True): (1, 16, 3, 4), + (4096, 4096, 2048, 64, 64, True, False, True): (4, 16, 3, 4), + (4096, 4096, 2048, 128, 128, False, True, True): (2, 16, 2, 8), + (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (4096, 4096, 4096, 16, 16, False, True, True): (4, 32, 4, 4), + (4096, 4096, 4096, 16, 16, True, False, True): (4, 32, 4, 2), + (4096, 4096, 4096, 32, 32, False, True, True): (4, 16, 4, 8), + (4096, 4096, 4096, 32, 32, True, False, True): (4, 16, 3, 8), + (4096, 4096, 4096, 64, 64, False, True, True): (1, 32, 3, 4), + (4096, 4096, 4096, 64, 64, True, False, True): (1, 32, 3, 4), + (4096, 4096, 4096, 128, 128, False, True, True): (3, 32, 1, 4), + (4096, 4096, 4096, 128, 128, True, False, True): (2, 32, 1, 4), + (4096, 4096, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (4096, 4096, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (4096, 4096, 8192, 32, 32, False, True, True): (4, 32, 4, 8), + (4096, 4096, 8192, 32, 32, True, False, True): (4, 32, 4, 8), + (4096, 4096, 8192, 64, 64, False, True, True): (2, 64, 3, 4), + (4096, 4096, 8192, 64, 64, True, False, True): (2, 64, 3, 4), + (4096, 4096, 8192, 128, 128, False, True, True): (3, 64, 1, 4), + (4096, 4096, 8192, 128, 128, True, False, True): (1, 64, 1, 4), + (4096, 4096, 16384, 16, 16, False, True, True): (4, 64, 3, 4), + (4096, 4096, 16384, 16, 16, True, False, True): (4, 64, 4, 4), + (4096, 4096, 16384, 32, 32, False, True, True): (4, 64, 4, 8), + (4096, 4096, 16384, 32, 32, True, False, True): (4, 64, 4, 8), + (4096, 4096, 16384, 64, 64, False, True, True): (1, 64, 2, 4), + (4096, 4096, 16384, 64, 64, True, False, True): (1, 64, 3, 8), + (4096, 4096, 16384, 128, 128, False, True, True): (3, 128, 1, 4), + (4096, 4096, 16384, 128, 128, True, False, True): (1, 128, 1, 4), + (4096, 4096, 32768, 16, 16, False, True, True): (8, 128, 3, 2), + (4096, 4096, 32768, 16, 16, True, False, True): (5, 128, 4, 4), + (4096, 4096, 32768, 32, 32, False, True, True): (3, 128, 4, 4), + (4096, 4096, 32768, 32, 32, True, False, True): (3, 128, 4, 8), + (4096, 4096, 32768, 64, 64, False, True, True): (1, 128, 2, 4), + (4096, 4096, 32768, 64, 64, True, False, True): (3, 256, 3, 4), + (4096, 4096, 32768, 128, 128, False, True, True): (3, 256, 1, 4), + (4096, 4096, 32768, 128, 128, True, False, True): (1, 256, 1, 4), + (4096, 4096, 65536, 16, 16, False, True, True): (5, 256, 4, 4), + (4096, 4096, 65536, 16, 16, True, False, True): (5, 256, 4, 4), + (4096, 4096, 65536, 32, 32, False, True, True): (4, 256, 4, 8), + (4096, 4096, 65536, 32, 32, True, False, True): (4, 256, 3, 8), + (4096, 4096, 65536, 64, 64, False, True, True): (1, 256, 2, 4), + (4096, 4096, 65536, 64, 64, True, False, True): (1, 512, 3, 4), + (4096, 4096, 65536, 128, 128, False, True, True): (3, 512, 1, 4), + (4096, 4096, 65536, 128, 128, True, False, True): (1, 512, 1, 4), + (4096, 4096, 131072, 16, 16, False, True, True): (4, 512, 3, 4), + (4096, 4096, 131072, 16, 16, True, False, True): (5, 512, 4, 4), + (4096, 4096, 131072, 32, 32, False, True, True): (1, 512, 4, 8), + (4096, 4096, 131072, 32, 32, True, False, True): (4, 512, 4, 8), + (4096, 4096, 131072, 64, 64, False, True, True): (1, 512, 2, 4), + (4096, 4096, 131072, 64, 64, True, False, True): (1, 512, 2, 4), + (4096, 4096, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), + (4096, 4096, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), + (8192, 8192, 256, 16, 16, False, True, True): (2, 2, 6, 4), + (8192, 8192, 256, 16, 16, True, False, True): (2, 4, 2, 2), + (8192, 8192, 256, 32, 32, False, True, True): (4, 2, 3, 4), + (8192, 8192, 256, 32, 32, True, False, True): (4, 2, 3, 4), + (8192, 8192, 256, 64, 64, False, True, True): (2, 2, 3, 8), + (8192, 8192, 256, 64, 64, True, False, True): (6, 2, 3, 8), + (8192, 8192, 256, 128, 128, False, True, True): (3, 2, 1, 4), + (8192, 8192, 256, 128, 128, True, False, True): (1, 2, 1, 4), + (8192, 8192, 512, 16, 16, False, True, True): (4, 4, 3, 2), + (8192, 8192, 512, 16, 16, True, False, True): (4, 4, 3, 4), + (8192, 8192, 512, 32, 32, False, True, True): (1, 4, 3, 4), + (8192, 8192, 512, 32, 32, True, False, True): (5, 4, 3, 2), + (8192, 8192, 512, 64, 64, False, True, True): (1, 4, 3, 4), + (8192, 8192, 512, 64, 64, True, False, True): (2, 2, 3, 8), + (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 2, 8), + (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 2, 8), + (8192, 8192, 1024, 16, 16, False, True, True): (4, 8, 4, 4), + (8192, 8192, 1024, 16, 16, True, False, True): (4, 8, 4, 4), + (8192, 8192, 1024, 32, 32, False, True, True): (2, 4, 4, 8), + (8192, 8192, 1024, 32, 32, True, False, True): (1, 4, 3, 4), + (8192, 8192, 1024, 64, 64, False, True, True): (4, 8, 3, 4), + (8192, 8192, 1024, 64, 64, True, False, True): (2, 8, 3, 4), + (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 2, 8), + (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 4), + (8192, 8192, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (8192, 8192, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (8192, 8192, 2048, 32, 32, False, True, True): (2, 8, 4, 8), + (8192, 8192, 2048, 32, 32, True, False, True): (2, 8, 4, 8), + (8192, 8192, 2048, 64, 64, False, True, True): (4, 8, 2, 4), + (8192, 8192, 2048, 64, 64, True, False, True): (4, 16, 3, 4), + (8192, 8192, 2048, 128, 128, False, True, True): (6, 16, 1, 4), + (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (8192, 8192, 4096, 16, 16, False, True, True): (4, 32, 4, 2), + (8192, 8192, 4096, 16, 16, True, False, True): (4, 32, 4, 2), + (8192, 8192, 4096, 32, 32, False, True, True): (2, 16, 4, 8), + (8192, 8192, 4096, 32, 32, True, False, True): (4, 16, 4, 8), + (8192, 8192, 4096, 64, 64, False, True, True): (4, 16, 2, 4), + (8192, 8192, 4096, 64, 64, True, False, True): (4, 16, 2, 4), + (8192, 8192, 4096, 128, 128, False, True, True): (6, 32, 1, 4), + (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 4), + (8192, 8192, 8192, 16, 16, False, True, True): (4, 64, 4, 2), + (8192, 8192, 8192, 16, 16, True, False, True): (4, 64, 4, 2), + (8192, 8192, 8192, 32, 32, False, True, True): (2, 32, 4, 8), + (8192, 8192, 8192, 32, 32, True, False, True): (2, 32, 4, 8), + (8192, 8192, 8192, 64, 64, False, True, True): (2, 32, 2, 4), + (8192, 8192, 8192, 64, 64, True, False, True): (4, 32, 2, 4), + (8192, 8192, 8192, 128, 128, False, True, True): (6, 64, 1, 4), + (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (8192, 8192, 16384, 16, 16, False, True, True): (4, 64, 3, 4), + (8192, 8192, 16384, 16, 16, True, False, True): (4, 64, 4, 4), + (8192, 8192, 16384, 32, 32, False, True, True): (4, 64, 4, 8), + (8192, 8192, 16384, 32, 32, True, False, True): (4, 64, 4, 8), + (8192, 8192, 16384, 64, 64, False, True, True): (4, 64, 2, 4), + (8192, 8192, 16384, 64, 64, True, False, True): (4, 64, 3, 8), + (8192, 8192, 16384, 128, 128, False, True, True): (6, 128, 1, 4), + (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (8192, 8192, 32768, 16, 16, False, True, True): (3, 128, 4, 4), + (8192, 8192, 32768, 16, 16, True, False, True): (3, 128, 4, 4), + (8192, 8192, 32768, 32, 32, False, True, True): (2, 128, 4, 8), + (8192, 8192, 32768, 32, 32, True, False, True): (2, 128, 4, 8), + (8192, 8192, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (8192, 8192, 32768, 64, 64, True, False, True): (2, 128, 3, 8), + (8192, 8192, 32768, 128, 128, False, True, True): (6, 256, 1, 4), + (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (8192, 8192, 65536, 16, 16, False, True, True): (3, 256, 4, 4), + (8192, 8192, 65536, 16, 16, True, False, True): (4, 256, 4, 4), + (8192, 8192, 65536, 32, 32, False, True, True): (2, 256, 4, 8), + (8192, 8192, 65536, 32, 32, True, False, True): (2, 256, 3, 8), + (8192, 8192, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (8192, 8192, 65536, 64, 64, True, False, True): (4, 256, 3, 8), + (8192, 8192, 65536, 128, 128, False, True, True): (6, 512, 1, 4), + (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (8192, 8192, 131072, 16, 16, False, True, True): (4, 512, 4, 4), + (8192, 8192, 131072, 16, 16, True, False, True): (3, 512, 4, 4), + (8192, 8192, 131072, 32, 32, False, True, True): (2, 512, 4, 8), + (8192, 8192, 131072, 32, 32, True, False, True): (2, 512, 4, 8), + (8192, 8192, 131072, 64, 64, False, True, True): (2, 512, 2, 4), + (8192, 8192, 131072, 64, 64, True, False, True): (2, 512, 2, 4), + (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), + (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + (16384, 16384, 256, 16, 16, False, True, True): (2, 2, 3, 2), + (16384, 16384, 256, 16, 16, True, False, True): (2, 2, 6, 4), + (16384, 16384, 256, 32, 32, False, True, True): (4, 2, 3, 4), + (16384, 16384, 256, 32, 32, True, False, True): (4, 2, 3, 2), + (16384, 16384, 256, 64, 64, False, True, True): (2, 2, 5, 4), + (16384, 16384, 256, 64, 64, True, False, True): (2, 2, 3, 8), + (16384, 16384, 256, 128, 128, False, True, True): (4, 2, 2, 8), + (16384, 16384, 256, 128, 128, True, False, True): (2, 2, 1, 4), + (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 4, 4), + (16384, 16384, 512, 16, 16, True, False, True): (1, 2, 4, 4), + (16384, 16384, 512, 32, 32, False, True, True): (2, 2, 3, 8), + (16384, 16384, 512, 32, 32, True, False, True): (2, 2, 4, 8), + (16384, 16384, 512, 64, 64, False, True, True): (4, 4, 3, 4), + (16384, 16384, 512, 64, 64, True, False, True): (2, 4, 3, 4), + (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 2, 8), + (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 2, 8), + (16384, 16384, 1024, 16, 16, False, True, True): (4, 8, 4, 4), + (16384, 16384, 1024, 16, 16, True, False, True): (2, 4, 4, 4), + (16384, 16384, 1024, 32, 32, False, True, True): (2, 4, 4, 8), + (16384, 16384, 1024, 32, 32, True, False, True): (2, 4, 4, 8), + (16384, 16384, 1024, 64, 64, False, True, True): (4, 4, 2, 4), + (16384, 16384, 1024, 64, 64, True, False, True): (2, 4, 2, 4), + (16384, 16384, 1024, 128, 128, False, True, True): (6, 8, 1, 4), + (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 4), + (16384, 16384, 2048, 16, 16, False, True, True): (2, 8, 4, 4), + (16384, 16384, 2048, 16, 16, True, False, True): (2, 8, 4, 4), + (16384, 16384, 2048, 32, 32, False, True, True): (2, 8, 4, 8), + (16384, 16384, 2048, 32, 32, True, False, True): (2, 8, 4, 8), + (16384, 16384, 2048, 64, 64, False, True, True): (2, 8, 2, 4), + (16384, 16384, 2048, 64, 64, True, False, True): (2, 8, 2, 4), + (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 2, 8), + (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 4), + (16384, 16384, 4096, 16, 16, False, True, True): (2, 16, 4, 4), + (16384, 16384, 4096, 16, 16, True, False, True): (2, 16, 4, 4), + (16384, 16384, 4096, 32, 32, False, True, True): (1, 16, 4, 8), + (16384, 16384, 4096, 32, 32, True, False, True): (2, 16, 3, 4), + (16384, 16384, 4096, 64, 64, False, True, True): (1, 16, 2, 4), + (16384, 16384, 4096, 64, 64, True, False, True): (2, 16, 2, 4), + (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 2, 8), + (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 4), + (16384, 16384, 8192, 16, 16, False, True, True): (2, 64, 4, 2), + (16384, 16384, 8192, 16, 16, True, False, True): (2, 64, 4, 2), + (16384, 16384, 8192, 32, 32, False, True, True): (2, 32, 4, 8), + (16384, 16384, 8192, 32, 32, True, False, True): (2, 32, 4, 8), + (16384, 16384, 8192, 64, 64, False, True, True): (2, 32, 2, 4), + (16384, 16384, 8192, 64, 64, True, False, True): (2, 32, 4, 8), + (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 2, 8), + (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 4), + (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 4, 4), + (16384, 16384, 16384, 16, 16, True, False, True): (1, 64, 4, 4), + (16384, 16384, 16384, 32, 32, False, True, True): (1, 64, 4, 8), + (16384, 16384, 16384, 32, 32, True, False, True): (1, 64, 4, 8), + (16384, 16384, 16384, 64, 64, False, True, True): (1, 64, 2, 4), + (16384, 16384, 16384, 64, 64, True, False, True): (1, 64, 3, 8), + (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 4), + (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 4), + (16384, 16384, 32768, 16, 16, False, True, True): (1, 128, 4, 4), + (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 4, 4), + (16384, 16384, 32768, 32, 32, False, True, True): (1, 128, 3, 4), + (16384, 16384, 32768, 32, 32, True, False, True): (1, 128, 3, 8), + (16384, 16384, 32768, 64, 64, False, True, True): (2, 128, 2, 4), + (16384, 16384, 32768, 64, 64, True, False, True): (1, 128, 4, 8), + (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 2, 8), + (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 4), + (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (16384, 16384, 65536, 16, 16, True, False, True): (1, 256, 4, 4), + (16384, 16384, 65536, 32, 32, False, True, True): (1, 256, 4, 8), + (16384, 16384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), + (16384, 16384, 65536, 64, 64, False, True, True): (2, 256, 2, 4), + (16384, 16384, 65536, 64, 64, True, False, True): (1, 256, 3, 8), + (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 2, 8), + (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 4), + (16384, 16384, 131072, 16, 16, False, True, True): (1, 512, 4, 4), + (16384, 16384, 131072, 16, 16, True, False, True): (1, 512, 3, 2), + (16384, 16384, 131072, 32, 32, False, True, True): (1, 512, 4, 8), + (16384, 16384, 131072, 32, 32, True, False, True): (1, 512, 3, 2), + (16384, 16384, 131072, 64, 64, False, True, True): (1, 512, 2, 4), + (16384, 16384, 131072, 64, 64, True, False, True): (1, 512, 2, 4), + (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), + (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), + }, + ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float32, 0.5)): { + (16, 16, 16, 16, 16, False, False, False): (2, 1, 1, 16), + (16, 16, 16, 16, 16, False, False, True): (1, 1, 2, 4), + (16, 16, 16, 16, 16, False, True, False): (1, 1, 2, 16), + (16, 16, 16, 16, 16, False, True, True): (2, 1, 2, 8), + (16, 16, 16, 16, 16, True, False, False): (1, 1, 1, 2), + (16, 16, 16, 16, 16, True, False, True): (2, 1, 1, 4), + (16, 16, 32, 16, 16, False, False, False): (1, 1, 1, 2), + (16, 16, 32, 16, 16, False, False, True): (1, 1, 2, 8), + (16, 16, 32, 16, 16, False, True, False): (1, 2, 1, 4), + (16, 16, 32, 16, 16, False, True, True): (1, 2, 2, 4), + (16, 16, 32, 16, 16, True, False, False): (1, 1, 2, 4), + (16, 16, 32, 16, 16, True, False, True): (1, 2, 2, 4), + (16, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (16, 16, 64, 16, 16, False, False, True): (2, 2, 1, 4), + (16, 16, 64, 16, 16, False, True, False): (1, 4, 1, 4), + (16, 16, 64, 16, 16, False, True, True): (1, 4, 1, 8), + (16, 16, 64, 16, 16, True, False, False): (1, 2, 1, 4), + (16, 16, 64, 16, 16, True, False, True): (1, 4, 2, 8), + (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 8), + (16, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), + (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 4), + (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 4), + (16, 32, 16, 16, 16, True, False, False): (1, 1, 1, 4), + (16, 32, 16, 16, 16, True, False, True): (1, 1, 2, 8), + (16, 32, 16, 16, 32, False, False, False): (1, 1, 2, 4), + (16, 32, 16, 16, 32, False, False, True): (2, 1, 2, 2), + (16, 32, 16, 16, 32, False, True, False): (1, 1, 1, 8), + (16, 32, 16, 16, 32, False, True, True): (1, 1, 1, 2), + (16, 32, 16, 16, 32, True, False, False): (3, 1, 1, 4), + (16, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (16, 32, 32, 16, 16, False, False, False): (1, 2, 1, 4), + (16, 32, 32, 16, 16, False, False, True): (2, 2, 1, 4), + (16, 32, 32, 16, 16, False, True, False): (1, 2, 1, 2), + (16, 32, 32, 16, 16, False, True, True): (1, 2, 1, 4), + (16, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), + (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), + (16, 32, 32, 16, 32, False, False, False): (1, 1, 2, 4), + (16, 32, 32, 16, 32, False, False, True): (1, 2, 1, 4), + (16, 32, 32, 16, 32, False, True, False): (1, 2, 2, 8), + (16, 32, 32, 16, 32, False, True, True): (1, 2, 1, 1), + (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 2), + (16, 32, 32, 16, 32, True, False, True): (1, 2, 1, 4), + (16, 32, 64, 16, 16, False, False, False): (1, 2, 1, 4), + (16, 32, 64, 16, 16, False, False, True): (2, 4, 1, 4), + (16, 32, 64, 16, 16, False, True, False): (1, 4, 2, 4), + (16, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), + (16, 32, 64, 16, 16, True, False, False): (1, 2, 2, 8), + (16, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), + (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 4), + (16, 32, 64, 16, 32, False, False, True): (1, 4, 3, 4), + (16, 32, 64, 16, 32, False, True, False): (1, 2, 1, 4), + (16, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (16, 32, 64, 16, 32, True, False, False): (1, 2, 1, 8), + (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 4), + (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), + (16, 64, 16, 16, 32, False, False, True): (1, 1, 1, 8), + (16, 64, 16, 16, 32, False, True, False): (1, 1, 1, 8), + (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), + (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 8), + (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 4), + (16, 64, 32, 16, 32, False, False, True): (1, 1, 1, 4), + (16, 64, 32, 16, 32, False, True, False): (1, 2, 1, 1), + (16, 64, 32, 16, 32, False, True, True): (1, 2, 1, 8), + (16, 64, 32, 16, 32, True, False, False): (2, 2, 1, 4), + (16, 64, 32, 16, 32, True, False, True): (2, 2, 1, 4), + (16, 64, 64, 16, 32, False, False, False): (1, 2, 1, 4), + (16, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (16, 64, 64, 16, 32, False, True, False): (1, 4, 1, 4), + (16, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), + (16, 64, 64, 16, 32, True, False, False): (1, 4, 1, 2), + (16, 64, 64, 16, 32, True, False, True): (3, 4, 1, 4), + (32, 16, 16, 16, 16, False, False, False): (1, 1, 2, 4), + (32, 16, 16, 16, 16, False, False, True): (1, 1, 1, 2), + (32, 16, 16, 16, 16, False, True, False): (1, 1, 2, 4), + (32, 16, 16, 16, 16, False, True, True): (1, 1, 2, 4), + (32, 16, 16, 16, 16, True, False, False): (1, 1, 3, 8), + (32, 16, 16, 16, 16, True, False, True): (1, 1, 2, 4), + (32, 16, 32, 16, 16, False, False, False): (1, 2, 1, 4), + (32, 16, 32, 16, 16, False, False, True): (1, 2, 3, 4), + (32, 16, 32, 16, 16, False, True, False): (1, 1, 1, 8), + (32, 16, 32, 16, 16, False, True, True): (1, 2, 1, 4), + (32, 16, 32, 16, 16, True, False, False): (1, 1, 1, 2), + (32, 16, 32, 16, 16, True, False, True): (1, 1, 1, 4), + (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), + (32, 16, 64, 16, 16, False, False, True): (3, 4, 1, 4), + (32, 16, 64, 16, 16, False, True, False): (1, 4, 1, 1), + (32, 16, 64, 16, 16, False, True, True): (1, 4, 1, 4), + (32, 16, 64, 16, 16, True, False, False): (1, 4, 1, 4), + (32, 16, 64, 16, 16, True, False, True): (1, 4, 1, 4), + (32, 32, 16, 16, 16, False, False, False): (1, 1, 1, 2), + (32, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), + (32, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), + (32, 32, 16, 16, 16, False, True, True): (2, 1, 1, 4), + (32, 32, 16, 16, 16, True, False, False): (3, 1, 2, 4), + (32, 32, 16, 16, 16, True, False, True): (1, 1, 2, 4), + (32, 32, 16, 16, 32, False, False, False): (2, 1, 1, 2), + (32, 32, 16, 16, 32, False, False, True): (1, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, False): (1, 1, 1, 4), + (32, 32, 16, 16, 32, False, True, True): (1, 1, 1, 8), + (32, 32, 16, 16, 32, True, False, False): (1, 1, 1, 8), + (32, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), + (32, 32, 16, 32, 32, False, False, False): (2, 1, 1, 4), + (32, 32, 16, 32, 32, False, False, True): (1, 1, 2, 4), + (32, 32, 16, 32, 32, False, True, False): (2, 1, 1, 1), + (32, 32, 16, 32, 32, False, True, True): (2, 1, 2, 4), + (32, 32, 16, 32, 32, True, False, False): (1, 1, 1, 8), + (32, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), + (32, 32, 32, 16, 16, False, False, False): (1, 1, 1, 4), + (32, 32, 32, 16, 16, False, False, True): (1, 2, 1, 2), + (32, 32, 32, 16, 16, False, True, False): (2, 2, 1, 4), + (32, 32, 32, 16, 16, False, True, True): (1, 2, 2, 4), + (32, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 16, True, False, True): (2, 2, 1, 4), + (32, 32, 32, 16, 32, False, False, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), + (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 4), + (32, 32, 32, 16, 32, True, False, False): (2, 1, 1, 2), + (32, 32, 32, 16, 32, True, False, True): (2, 2, 2, 4), + (32, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), + (32, 32, 32, 32, 32, False, False, True): (1, 1, 1, 2), + (32, 32, 32, 32, 32, False, True, False): (1, 1, 1, 4), + (32, 32, 32, 32, 32, False, True, True): (1, 1, 2, 2), + (32, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), + (32, 32, 32, 32, 32, True, False, True): (1, 1, 2, 1), + (32, 32, 64, 16, 16, False, False, False): (2, 4, 1, 4), + (32, 32, 64, 16, 16, False, False, True): (1, 4, 2, 4), + (32, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), + (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), + (32, 32, 64, 16, 16, True, False, False): (1, 2, 1, 4), + (32, 32, 64, 16, 16, True, False, True): (2, 4, 1, 4), + (32, 32, 64, 16, 32, False, False, False): (1, 4, 1, 8), + (32, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (32, 32, 64, 16, 32, False, True, False): (1, 4, 1, 4), + (32, 32, 64, 16, 32, False, True, True): (2, 4, 1, 4), + (32, 32, 64, 16, 32, True, False, False): (1, 2, 2, 4), + (32, 32, 64, 16, 32, True, False, True): (2, 4, 1, 4), + (32, 32, 64, 32, 32, False, False, False): (2, 2, 1, 4), + (32, 32, 64, 32, 32, False, False, True): (1, 1, 1, 4), + (32, 32, 64, 32, 32, False, True, False): (1, 1, 1, 8), + (32, 32, 64, 32, 32, False, True, True): (2, 1, 1, 4), + (32, 32, 64, 32, 32, True, False, False): (1, 1, 1, 4), + (32, 32, 64, 32, 32, True, False, True): (1, 2, 1, 1), + (32, 64, 16, 16, 32, False, False, False): (1, 1, 2, 2), + (32, 64, 16, 16, 32, False, False, True): (2, 1, 1, 4), + (32, 64, 16, 16, 32, False, True, False): (1, 1, 1, 8), + (32, 64, 16, 16, 32, False, True, True): (1, 1, 3, 4), + (32, 64, 16, 16, 32, True, False, False): (1, 1, 1, 2), + (32, 64, 16, 16, 32, True, False, True): (1, 1, 2, 4), + (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 2), + (32, 64, 16, 32, 32, False, False, True): (1, 1, 3, 4), + (32, 64, 16, 32, 32, False, True, False): (1, 1, 2, 4), + (32, 64, 16, 32, 32, False, True, True): (1, 1, 1, 8), + (32, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), + (32, 64, 16, 32, 32, True, False, True): (1, 1, 1, 8), + (32, 64, 32, 16, 32, False, False, False): (1, 2, 1, 4), + (32, 64, 32, 16, 32, False, False, True): (1, 2, 3, 4), + (32, 64, 32, 16, 32, False, True, False): (1, 2, 1, 8), + (32, 64, 32, 16, 32, False, True, True): (3, 2, 1, 4), + (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 8), + (32, 64, 32, 16, 32, True, False, True): (1, 2, 1, 4), + (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 1), + (32, 64, 32, 32, 32, False, False, True): (1, 1, 1, 4), + (32, 64, 32, 32, 32, False, True, False): (1, 1, 1, 4), + (32, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), + (32, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), + (32, 64, 32, 32, 32, True, False, True): (1, 1, 2, 8), + (32, 64, 64, 16, 32, False, False, False): (2, 4, 1, 4), + (32, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), + (32, 64, 64, 16, 32, False, True, False): (1, 4, 1, 4), + (32, 64, 64, 16, 32, False, True, True): (2, 4, 1, 4), + (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), + (32, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), + (32, 64, 64, 32, 32, False, False, False): (2, 2, 1, 4), + (32, 64, 64, 32, 32, False, False, True): (1, 2, 1, 8), + (32, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), + (32, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), + (32, 64, 64, 32, 32, True, False, False): (2, 2, 1, 4), + (32, 64, 64, 32, 32, True, False, True): (1, 2, 3, 8), + (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 4), + (64, 32, 16, 32, 32, False, False, True): (3, 1, 2, 4), + (64, 32, 16, 32, 32, False, True, False): (2, 1, 1, 2), + (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 8), + (64, 32, 16, 32, 32, True, False, False): (1, 1, 1, 2), + (64, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), + (64, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), + (64, 32, 32, 32, 32, False, False, True): (1, 1, 2, 8), + (64, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), + (64, 32, 32, 32, 32, False, True, True): (1, 1, 1, 4), + (64, 32, 32, 32, 32, True, False, False): (1, 1, 2, 4), + (64, 32, 32, 32, 32, True, False, True): (1, 1, 3, 8), + (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), + (64, 32, 64, 32, 32, False, False, True): (2, 2, 1, 4), + (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 4), + (64, 32, 64, 32, 32, False, True, True): (1, 2, 1, 8), + (64, 32, 64, 32, 32, True, False, False): (2, 2, 1, 4), + (64, 32, 64, 32, 32, True, False, True): (1, 2, 1, 8), + (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 8), + (64, 64, 16, 32, 32, False, False, True): (2, 1, 2, 4), + (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 2), + (64, 64, 16, 32, 32, False, True, True): (1, 1, 2, 4), + (64, 64, 16, 32, 32, True, False, False): (1, 1, 1, 2), + (64, 64, 16, 32, 32, True, False, True): (1, 1, 2, 4), + (64, 64, 32, 32, 32, False, False, False): (1, 1, 1, 4), + (64, 64, 32, 32, 32, False, False, True): (2, 1, 1, 4), + (64, 64, 32, 32, 32, False, True, False): (1, 1, 1, 8), + (64, 64, 32, 32, 32, False, True, True): (2, 1, 1, 4), + (64, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), + (64, 64, 32, 32, 32, True, False, True): (1, 1, 1, 8), + (64, 64, 64, 32, 32, False, False, False): (2, 2, 1, 4), + (64, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), + (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), + (64, 64, 64, 32, 32, False, True, True): (2, 2, 1, 4), + (64, 64, 64, 32, 32, True, False, False): (1, 1, 1, 8), + (64, 64, 64, 32, 32, True, False, True): (1, 2, 2, 4), + (256, 256, 256, 16, 16, False, True, True): (1, 16, 3, 4), + (256, 256, 256, 16, 16, True, False, True): (2, 16, 1, 4), + (256, 256, 256, 32, 32, False, True, True): (1, 8, 4, 8), + (256, 256, 256, 32, 32, True, False, True): (4, 8, 4, 4), + (256, 256, 256, 64, 64, False, True, True): (1, 4, 4, 8), + (256, 256, 256, 64, 64, True, False, True): (1, 4, 3, 8), + (256, 256, 256, 128, 128, False, True, True): (7, 2, 1, 32), + (256, 256, 256, 128, 128, True, False, True): (3, 2, 1, 32), + (256, 256, 512, 16, 16, False, True, True): (1, 16, 5, 4), + (256, 256, 512, 16, 16, True, False, True): (1, 16, 3, 2), + (256, 256, 512, 32, 32, False, True, True): (4, 16, 4, 4), + (256, 256, 512, 32, 32, True, False, True): (4, 16, 3, 4), + (256, 256, 512, 64, 64, False, True, True): (1, 8, 3, 8), + (256, 256, 512, 64, 64, True, False, True): (1, 8, 3, 8), + (256, 256, 512, 128, 128, False, True, True): (1, 4, 1, 32), + (256, 256, 512, 128, 128, True, False, True): (3, 4, 1, 32), + (256, 256, 1024, 16, 16, False, True, True): (3, 32, 5, 2), + (256, 256, 1024, 16, 16, True, False, True): (2, 32, 5, 2), + (256, 256, 1024, 32, 32, False, True, True): (1, 32, 4, 4), + (256, 256, 1024, 32, 32, True, False, True): (1, 32, 5, 4), + (256, 256, 1024, 64, 64, False, True, True): (4, 16, 3, 8), + (256, 256, 1024, 64, 64, True, False, True): (1, 16, 3, 8), + (256, 256, 1024, 128, 128, False, True, True): (1, 8, 1, 32), + (256, 256, 1024, 128, 128, True, False, True): (3, 8, 1, 32), + (256, 256, 2048, 16, 16, False, True, True): (3, 32, 3, 4), + (256, 256, 2048, 16, 16, True, False, True): (1, 64, 3, 2), + (256, 256, 2048, 32, 32, False, True, True): (1, 64, 3, 4), + (256, 256, 2048, 32, 32, True, False, True): (1, 64, 3, 4), + (256, 256, 2048, 64, 64, False, True, True): (2, 32, 1, 8), + (256, 256, 2048, 64, 64, True, False, True): (2, 32, 1, 8), + (256, 256, 2048, 128, 128, False, True, True): (4, 16, 1, 32), + (256, 256, 2048, 128, 128, True, False, True): (4, 16, 1, 32), + (256, 256, 4096, 16, 16, False, True, True): (1, 32, 2, 4), + (256, 256, 4096, 16, 16, True, False, True): (1, 32, 3, 4), + (256, 256, 4096, 32, 32, False, True, True): (1, 128, 2, 4), + (256, 256, 4096, 32, 32, True, False, True): (1, 128, 2, 4), + (256, 256, 4096, 64, 64, False, True, True): (2, 64, 4, 8), + (256, 256, 4096, 64, 64, True, False, True): (3, 64, 2, 8), + (256, 256, 4096, 128, 128, False, True, True): (3, 32, 1, 32), + (256, 256, 4096, 128, 128, True, False, True): (2, 32, 1, 32), + (256, 256, 8192, 16, 16, False, True, True): (1, 64, 3, 4), + (256, 256, 8192, 16, 16, True, False, True): (2, 128, 3, 2), + (256, 256, 8192, 32, 32, False, True, True): (3, 128, 3, 4), + (256, 256, 8192, 32, 32, True, False, True): (1, 128, 3, 4), + (256, 256, 8192, 64, 64, False, True, True): (3, 128, 1, 4), + (256, 256, 8192, 64, 64, True, False, True): (4, 128, 2, 8), + (256, 256, 8192, 128, 128, False, True, True): (6, 64, 1, 32), + (256, 256, 8192, 128, 128, True, False, True): (2, 64, 1, 32), + (256, 256, 16384, 16, 16, False, True, True): (4, 128, 3, 4), + (256, 256, 16384, 16, 16, True, False, True): (3, 128, 3, 4), + (256, 256, 16384, 32, 32, False, True, True): (4, 256, 3, 4), + (256, 256, 16384, 32, 32, True, False, True): (2, 256, 3, 4), + (256, 256, 16384, 64, 64, False, True, True): (3, 256, 1, 4), + (256, 256, 16384, 64, 64, True, False, True): (2, 256, 2, 4), + (256, 256, 16384, 128, 128, False, True, True): (1, 128, 1, 32), + (256, 256, 16384, 128, 128, True, False, True): (3, 128, 1, 32), + (256, 256, 32768, 16, 16, False, True, True): (1, 256, 3, 4), + (256, 256, 32768, 16, 16, True, False, True): (2, 128, 3, 4), + (256, 256, 32768, 32, 32, False, True, True): (2, 512, 3, 4), + (256, 256, 32768, 32, 32, True, False, True): (4, 512, 3, 4), + (256, 256, 32768, 64, 64, False, True, True): (1, 512, 1, 8), + (256, 256, 32768, 64, 64, True, False, True): (1, 512, 2, 4), + (256, 256, 32768, 128, 128, False, True, True): (1, 256, 1, 32), + (256, 256, 32768, 128, 128, True, False, True): (1, 256, 1, 32), + (256, 256, 65536, 16, 16, False, True, True): (2, 512, 3, 4), + (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 4), + (256, 256, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (256, 256, 65536, 32, 32, True, False, True): (2, 1024, 3, 4), + (256, 256, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), + (256, 256, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), + (256, 256, 65536, 128, 128, False, True, True): (1, 512, 1, 32), + (256, 256, 65536, 128, 128, True, False, True): (2, 512, 1, 32), + (256, 256, 131072, 16, 16, False, True, True): (1, 1024, 3, 4), + (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 4), + (256, 256, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), + (256, 256, 131072, 32, 32, True, False, True): (1, 2048, 3, 4), + (256, 256, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), + (256, 256, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), + (256, 256, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), + (256, 256, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), + (512, 512, 256, 16, 16, False, True, True): (1, 8, 4, 4), + (512, 512, 256, 16, 16, True, False, True): (1, 8, 3, 2), + (512, 512, 256, 32, 32, False, True, True): (4, 8, 3, 4), + (512, 512, 256, 32, 32, True, False, True): (4, 8, 3, 4), + (512, 512, 256, 64, 64, False, True, True): (3, 4, 3, 8), + (512, 512, 256, 64, 64, True, False, True): (5, 4, 3, 8), + (512, 512, 256, 128, 128, False, True, True): (1, 2, 1, 32), + (512, 512, 256, 128, 128, True, False, True): (3, 2, 1, 32), + (512, 512, 512, 16, 16, False, True, True): (2, 16, 3, 2), + (512, 512, 512, 16, 16, True, False, True): (1, 8, 4, 4), + (512, 512, 512, 32, 32, False, True, True): (3, 16, 3, 4), + (512, 512, 512, 32, 32, True, False, True): (5, 16, 2, 4), + (512, 512, 512, 64, 64, False, True, True): (1, 8, 3, 8), + (512, 512, 512, 64, 64, True, False, True): (3, 8, 3, 8), + (512, 512, 512, 128, 128, False, True, True): (1, 4, 1, 32), + (512, 512, 512, 128, 128, True, False, True): (3, 4, 1, 16), + (512, 512, 1024, 16, 16, False, True, True): (1, 16, 3, 4), + (512, 512, 1024, 16, 16, True, False, True): (3, 16, 3, 4), + (512, 512, 1024, 32, 32, False, True, True): (3, 32, 3, 4), + (512, 512, 1024, 32, 32, True, False, True): (3, 32, 2, 4), + (512, 512, 1024, 64, 64, False, True, True): (1, 16, 3, 8), + (512, 512, 1024, 64, 64, True, False, True): (4, 16, 3, 8), + (512, 512, 1024, 128, 128, False, True, True): (4, 8, 1, 32), + (512, 512, 1024, 128, 128, True, False, True): (4, 8, 1, 32), + (512, 512, 2048, 16, 16, False, True, True): (5, 16, 3, 4), + (512, 512, 2048, 16, 16, True, False, True): (5, 16, 3, 4), + (512, 512, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (512, 512, 2048, 32, 32, True, False, True): (1, 32, 4, 4), + (512, 512, 2048, 64, 64, False, True, True): (4, 32, 3, 8), + (512, 512, 2048, 64, 64, True, False, True): (4, 32, 3, 8), + (512, 512, 2048, 128, 128, False, True, True): (3, 16, 1, 32), + (512, 512, 2048, 128, 128, True, False, True): (3, 16, 1, 32), + (512, 512, 4096, 16, 16, False, True, True): (4, 32, 3, 4), + (512, 512, 4096, 16, 16, True, False, True): (4, 64, 3, 2), + (512, 512, 4096, 32, 32, False, True, True): (3, 64, 3, 4), + (512, 512, 4096, 32, 32, True, False, True): (3, 64, 3, 4), + (512, 512, 4096, 64, 64, False, True, True): (4, 64, 2, 4), + (512, 512, 4096, 64, 64, True, False, True): (1, 64, 2, 4), + (512, 512, 4096, 128, 128, False, True, True): (1, 32, 1, 32), + (512, 512, 4096, 128, 128, True, False, True): (1, 32, 1, 32), + (512, 512, 8192, 16, 16, False, True, True): (1, 64, 3, 4), + (512, 512, 8192, 16, 16, True, False, True): (4, 64, 3, 4), + (512, 512, 8192, 32, 32, False, True, True): (2, 128, 3, 4), + (512, 512, 8192, 32, 32, True, False, True): (3, 128, 3, 4), + (512, 512, 8192, 64, 64, False, True, True): (1, 128, 2, 4), + (512, 512, 8192, 64, 64, True, False, True): (1, 128, 2, 4), + (512, 512, 8192, 128, 128, False, True, True): (6, 64, 1, 32), + (512, 512, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (512, 512, 16384, 16, 16, False, True, True): (1, 128, 3, 4), + (512, 512, 16384, 16, 16, True, False, True): (1, 64, 3, 4), + (512, 512, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (512, 512, 16384, 32, 32, True, False, True): (4, 256, 3, 4), + (512, 512, 16384, 64, 64, False, True, True): (1, 256, 2, 4), + (512, 512, 16384, 64, 64, True, False, True): (1, 256, 2, 4), + (512, 512, 16384, 128, 128, False, True, True): (1, 128, 1, 32), + (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 32), + (512, 512, 32768, 16, 16, False, True, True): (1, 256, 3, 4), + (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 4), + (512, 512, 32768, 32, 32, False, True, True): (1, 512, 3, 4), + (512, 512, 32768, 32, 32, True, False, True): (1, 512, 3, 4), + (512, 512, 32768, 64, 64, False, True, True): (1, 512, 2, 4), + (512, 512, 32768, 64, 64, True, False, True): (2, 512, 2, 4), + (512, 512, 32768, 128, 128, False, True, True): (1, 256, 1, 32), + (512, 512, 32768, 128, 128, True, False, True): (2, 256, 1, 32), + (512, 512, 65536, 16, 16, False, True, True): (1, 512, 3, 4), + (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 4), + (512, 512, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (512, 512, 65536, 32, 32, True, False, True): (1, 1024, 3, 4), + (512, 512, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), + (512, 512, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), + (512, 512, 65536, 128, 128, False, True, True): (1, 512, 1, 32), + (512, 512, 65536, 128, 128, True, False, True): (4, 512, 1, 32), + (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 4), + (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 4), + (512, 512, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), + (512, 512, 131072, 32, 32, True, False, True): (1, 2048, 3, 4), + (512, 512, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), + (512, 512, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), + (512, 512, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), + (512, 512, 131072, 128, 128, True, False, True): (2, 1024, 1, 32), + (1024, 1024, 256, 16, 16, False, True, True): (4, 8, 3, 2), + (1024, 1024, 256, 16, 16, True, False, True): (2, 8, 3, 2), + (1024, 1024, 256, 32, 32, False, True, True): (1, 8, 3, 4), + (1024, 1024, 256, 32, 32, True, False, True): (1, 8, 3, 4), + (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 3, 8), + (1024, 1024, 256, 64, 64, True, False, True): (2, 4, 3, 8), + (1024, 1024, 256, 128, 128, False, True, True): (3, 2, 1, 32), + (1024, 1024, 256, 128, 128, True, False, True): (5, 2, 1, 32), + (1024, 1024, 512, 16, 16, False, True, True): (3, 8, 3, 4), + (1024, 1024, 512, 16, 16, True, False, True): (3, 8, 3, 4), + (1024, 1024, 512, 32, 32, False, True, True): (1, 16, 3, 4), + (1024, 1024, 512, 32, 32, True, False, True): (3, 16, 3, 4), + (1024, 1024, 512, 64, 64, False, True, True): (6, 8, 3, 8), + (1024, 1024, 512, 64, 64, True, False, True): (8, 8, 3, 8), + (1024, 1024, 512, 128, 128, False, True, True): (1, 4, 1, 32), + (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 1, 32), + (1024, 1024, 1024, 16, 16, False, True, True): (4, 8, 3, 4), + (1024, 1024, 1024, 16, 16, True, False, True): (1, 8, 3, 4), + (1024, 1024, 1024, 32, 32, False, True, True): (4, 16, 4, 4), + (1024, 1024, 1024, 32, 32, True, False, True): (5, 16, 3, 4), + (1024, 1024, 1024, 64, 64, False, True, True): (6, 16, 3, 8), + (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 2, 4), + (1024, 1024, 1024, 128, 128, False, True, True): (1, 8, 1, 32), + (1024, 1024, 1024, 128, 128, True, False, True): (2, 8, 1, 32), + (1024, 1024, 2048, 16, 16, False, True, True): (4, 16, 3, 4), + (1024, 1024, 2048, 16, 16, True, False, True): (1, 16, 3, 4), + (1024, 1024, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (1024, 1024, 2048, 32, 32, True, False, True): (2, 32, 3, 4), + (1024, 1024, 2048, 64, 64, False, True, True): (4, 32, 2, 4), + (1024, 1024, 2048, 64, 64, True, False, True): (8, 32, 2, 4), + (1024, 1024, 2048, 128, 128, False, True, True): (1, 16, 1, 32), + (1024, 1024, 2048, 128, 128, True, False, True): (1, 16, 1, 32), + (1024, 1024, 4096, 16, 16, False, True, True): (4, 32, 3, 4), + (1024, 1024, 4096, 16, 16, True, False, True): (1, 64, 3, 2), + (1024, 1024, 4096, 32, 32, False, True, True): (1, 64, 3, 4), + (1024, 1024, 4096, 32, 32, True, False, True): (1, 64, 3, 4), + (1024, 1024, 4096, 64, 64, False, True, True): (2, 64, 2, 4), + (1024, 1024, 4096, 64, 64, True, False, True): (2, 64, 2, 4), + (1024, 1024, 4096, 128, 128, False, True, True): (1, 32, 1, 32), + (1024, 1024, 4096, 128, 128, True, False, True): (4, 32, 1, 32), + (1024, 1024, 8192, 16, 16, False, True, True): (1, 128, 3, 1), + (1024, 1024, 8192, 16, 16, True, False, True): (1, 128, 3, 1), + (1024, 1024, 8192, 32, 32, False, True, True): (1, 128, 3, 4), + (1024, 1024, 8192, 32, 32, True, False, True): (1, 128, 3, 4), + (1024, 1024, 8192, 64, 64, False, True, True): (2, 128, 2, 4), + (1024, 1024, 8192, 64, 64, True, False, True): (2, 128, 2, 4), + (1024, 1024, 8192, 128, 128, False, True, True): (1, 64, 1, 32), + (1024, 1024, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (1024, 1024, 16384, 16, 16, False, True, True): (1, 128, 2, 4), + (1024, 1024, 16384, 16, 16, True, False, True): (4, 256, 3, 1), + (1024, 1024, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (1024, 1024, 16384, 32, 32, True, False, True): (1, 256, 3, 4), + (1024, 1024, 16384, 64, 64, False, True, True): (1, 256, 2, 4), + (1024, 1024, 16384, 64, 64, True, False, True): (1, 256, 2, 4), + (1024, 1024, 16384, 128, 128, False, True, True): (1, 128, 1, 32), + (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 32), + (1024, 1024, 32768, 16, 16, False, True, True): (1, 256, 2, 4), + (1024, 1024, 32768, 16, 16, True, False, True): (4, 512, 3, 1), + (1024, 1024, 32768, 32, 32, False, True, True): (1, 512, 3, 4), + (1024, 1024, 32768, 32, 32, True, False, True): (1, 512, 3, 4), + (1024, 1024, 32768, 64, 64, False, True, True): (1, 512, 2, 4), + (1024, 1024, 32768, 64, 64, True, False, True): (1, 512, 2, 4), + (1024, 1024, 32768, 128, 128, False, True, True): (1, 256, 1, 32), + (1024, 1024, 32768, 128, 128, True, False, True): (1, 256, 1, 32), + (1024, 1024, 65536, 16, 16, False, True, True): (1, 512, 2, 4), + (1024, 1024, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), + (1024, 1024, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (1024, 1024, 65536, 32, 32, True, False, True): (1, 512, 3, 4), + (1024, 1024, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), + (1024, 1024, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), + (1024, 1024, 65536, 128, 128, False, True, True): (1, 512, 1, 32), + (1024, 1024, 65536, 128, 128, True, False, True): (1, 512, 1, 32), + (1024, 1024, 131072, 16, 16, False, True, True): (4, 2048, 3, 1), + (1024, 1024, 131072, 16, 16, True, False, True): (4, 2048, 3, 1), + (1024, 1024, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), + (1024, 1024, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), + (1024, 1024, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), + (1024, 1024, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), + (1024, 1024, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), + (1024, 1024, 131072, 128, 128, True, False, True): (1, 1024, 1, 32), + (2048, 2048, 256, 16, 16, False, True, True): (1, 4, 3, 4), + (2048, 2048, 256, 16, 16, True, False, True): (1, 4, 3, 4), + (2048, 2048, 256, 32, 32, False, True, True): (3, 8, 3, 4), + (2048, 2048, 256, 32, 32, True, False, True): (3, 8, 3, 4), + (2048, 2048, 256, 64, 64, False, True, True): (4, 4, 4, 8), + (2048, 2048, 256, 64, 64, True, False, True): (8, 4, 4, 8), + (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 1, 32), + (2048, 2048, 256, 128, 128, True, False, True): (3, 2, 1, 32), + (2048, 2048, 512, 16, 16, False, True, True): (4, 8, 3, 2), + (2048, 2048, 512, 16, 16, True, False, True): (4, 8, 3, 2), + (2048, 2048, 512, 32, 32, False, True, True): (3, 8, 3, 4), + (2048, 2048, 512, 32, 32, True, False, True): (1, 16, 2, 4), + (2048, 2048, 512, 64, 64, False, True, True): (4, 8, 2, 4), + (2048, 2048, 512, 64, 64, True, False, True): (4, 8, 2, 4), + (2048, 2048, 512, 128, 128, False, True, True): (1, 4, 1, 32), + (2048, 2048, 512, 128, 128, True, False, True): (4, 4, 1, 32), + (2048, 2048, 1024, 16, 16, False, True, True): (4, 8, 3, 4), + (2048, 2048, 1024, 16, 16, True, False, True): (4, 8, 3, 4), + (2048, 2048, 1024, 32, 32, False, True, True): (4, 16, 3, 4), + (2048, 2048, 1024, 32, 32, True, False, True): (1, 16, 3, 4), + (2048, 2048, 1024, 64, 64, False, True, True): (2, 16, 2, 4), + (2048, 2048, 1024, 64, 64, True, False, True): (2, 16, 2, 4), + (2048, 2048, 1024, 128, 128, False, True, True): (8, 8, 1, 32), + (2048, 2048, 1024, 128, 128, True, False, True): (4, 8, 1, 32), + (2048, 2048, 2048, 16, 16, False, True, True): (4, 32, 3, 1), + (2048, 2048, 2048, 16, 16, True, False, True): (3, 32, 3, 2), + (2048, 2048, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (2048, 2048, 2048, 32, 32, True, False, True): (1, 32, 3, 4), + (2048, 2048, 2048, 64, 64, False, True, True): (2, 32, 2, 4), + (2048, 2048, 2048, 64, 64, True, False, True): (2, 32, 2, 4), + (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 1, 32), + (2048, 2048, 2048, 128, 128, True, False, True): (4, 16, 1, 32), + (2048, 2048, 4096, 16, 16, False, True, True): (4, 64, 3, 1), + (2048, 2048, 4096, 16, 16, True, False, True): (1, 64, 3, 1), + (2048, 2048, 4096, 32, 32, False, True, True): (1, 64, 3, 4), + (2048, 2048, 4096, 32, 32, True, False, True): (4, 64, 3, 4), + (2048, 2048, 4096, 64, 64, False, True, True): (2, 64, 2, 4), + (2048, 2048, 4096, 64, 64, True, False, True): (2, 64, 2, 4), + (2048, 2048, 4096, 128, 128, False, True, True): (4, 32, 1, 32), + (2048, 2048, 4096, 128, 128, True, False, True): (4, 32, 1, 32), + (2048, 2048, 8192, 16, 16, False, True, True): (4, 128, 3, 1), + (2048, 2048, 8192, 16, 16, True, False, True): (1, 128, 3, 1), + (2048, 2048, 8192, 32, 32, False, True, True): (4, 128, 3, 4), + (2048, 2048, 8192, 32, 32, True, False, True): (4, 64, 3, 4), + (2048, 2048, 8192, 64, 64, False, True, True): (1, 128, 2, 4), + (2048, 2048, 8192, 64, 64, True, False, True): (2, 128, 2, 4), + (2048, 2048, 8192, 128, 128, False, True, True): (1, 64, 1, 32), + (2048, 2048, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (2048, 2048, 16384, 16, 16, False, True, True): (4, 256, 3, 1), + (2048, 2048, 16384, 16, 16, True, False, True): (1, 256, 3, 1), + (2048, 2048, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (2048, 2048, 16384, 32, 32, True, False, True): (1, 128, 3, 4), + (2048, 2048, 16384, 64, 64, False, True, True): (1, 256, 2, 4), + (2048, 2048, 16384, 64, 64, True, False, True): (1, 256, 2, 4), + (2048, 2048, 16384, 128, 128, False, True, True): (1, 128, 1, 32), + (2048, 2048, 16384, 128, 128, True, False, True): (4, 128, 1, 32), + (2048, 2048, 32768, 16, 16, False, True, True): (8, 512, 3, 1), + (2048, 2048, 32768, 16, 16, True, False, True): (1, 512, 3, 1), + (2048, 2048, 32768, 32, 32, False, True, True): (1, 512, 3, 4), + (2048, 2048, 32768, 32, 32, True, False, True): (1, 256, 3, 4), + (2048, 2048, 32768, 64, 64, False, True, True): (1, 512, 2, 4), + (2048, 2048, 32768, 64, 64, True, False, True): (1, 512, 2, 4), + (2048, 2048, 32768, 128, 128, False, True, True): (1, 256, 1, 32), + (2048, 2048, 32768, 128, 128, True, False, True): (4, 256, 1, 32), + (2048, 2048, 65536, 16, 16, False, True, True): (4, 1024, 3, 1), + (2048, 2048, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), + (2048, 2048, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (2048, 2048, 65536, 32, 32, True, False, True): (1, 512, 3, 4), + (2048, 2048, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), + (2048, 2048, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), + (2048, 2048, 65536, 128, 128, False, True, True): (1, 512, 1, 32), + (2048, 2048, 65536, 128, 128, True, False, True): (4, 512, 1, 32), + (2048, 2048, 131072, 16, 16, False, True, True): (4, 2048, 3, 1), + (2048, 2048, 131072, 16, 16, True, False, True): (1, 2048, 3, 1), + (2048, 2048, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), + (2048, 2048, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), + (2048, 2048, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), + (2048, 2048, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), + (2048, 2048, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), + (2048, 2048, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), + (4096, 4096, 256, 16, 16, False, True, True): (1, 4, 3, 2), + (4096, 4096, 256, 16, 16, True, False, True): (1, 2, 3, 4), + (4096, 4096, 256, 32, 32, False, True, True): (4, 4, 4, 4), + (4096, 4096, 256, 32, 32, True, False, True): (4, 4, 4, 4), + (4096, 4096, 256, 64, 64, False, True, True): (1, 4, 3, 8), + (4096, 4096, 256, 64, 64, True, False, True): (4, 4, 2, 4), + (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 1, 32), + (4096, 4096, 256, 128, 128, True, False, True): (3, 2, 1, 32), + (4096, 4096, 512, 16, 16, False, True, True): (1, 4, 3, 4), + (4096, 4096, 512, 16, 16, True, False, True): (5, 8, 3, 2), + (4096, 4096, 512, 32, 32, False, True, True): (4, 8, 3, 4), + (4096, 4096, 512, 32, 32, True, False, True): (4, 8, 3, 4), + (4096, 4096, 512, 64, 64, False, True, True): (1, 8, 2, 4), + (4096, 4096, 512, 64, 64, True, False, True): (1, 8, 2, 4), + (4096, 4096, 512, 128, 128, False, True, True): (4, 4, 1, 32), + (4096, 4096, 512, 128, 128, True, False, True): (4, 4, 1, 32), + (4096, 4096, 1024, 16, 16, False, True, True): (1, 8, 3, 4), + (4096, 4096, 1024, 16, 16, True, False, True): (1, 8, 3, 4), + (4096, 4096, 1024, 32, 32, False, True, True): (1, 16, 3, 4), + (4096, 4096, 1024, 32, 32, True, False, True): (1, 16, 3, 4), + (4096, 4096, 1024, 64, 64, False, True, True): (4, 16, 2, 4), + (4096, 4096, 1024, 64, 64, True, False, True): (4, 16, 2, 4), + (4096, 4096, 1024, 128, 128, False, True, True): (4, 8, 1, 32), + (4096, 4096, 1024, 128, 128, True, False, True): (4, 8, 1, 32), + (4096, 4096, 2048, 16, 16, False, True, True): (1, 32, 3, 1), + (4096, 4096, 2048, 16, 16, True, False, True): (6, 8, 3, 4), + (4096, 4096, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (4096, 4096, 2048, 32, 32, True, False, True): (1, 32, 3, 4), + (4096, 4096, 2048, 64, 64, False, True, True): (4, 32, 2, 4), + (4096, 4096, 2048, 64, 64, True, False, True): (4, 32, 2, 4), + (4096, 4096, 2048, 128, 128, False, True, True): (4, 16, 1, 32), + (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 32), + (4096, 4096, 4096, 16, 16, False, True, True): (1, 16, 3, 4), + (4096, 4096, 4096, 16, 16, True, False, True): (1, 64, 3, 1), + (4096, 4096, 4096, 32, 32, False, True, True): (1, 64, 3, 4), + (4096, 4096, 4096, 32, 32, True, False, True): (1, 32, 3, 4), + (4096, 4096, 4096, 64, 64, False, True, True): (4, 64, 2, 4), + (4096, 4096, 4096, 64, 64, True, False, True): (4, 64, 2, 4), + (4096, 4096, 4096, 128, 128, False, True, True): (4, 32, 1, 32), + (4096, 4096, 4096, 128, 128, True, False, True): (4, 32, 1, 32), + (4096, 4096, 8192, 16, 16, False, True, True): (4, 128, 3, 1), + (4096, 4096, 8192, 16, 16, True, False, True): (1, 128, 3, 1), + (4096, 4096, 8192, 32, 32, False, True, True): (1, 128, 3, 4), + (4096, 4096, 8192, 32, 32, True, False, True): (1, 64, 3, 4), + (4096, 4096, 8192, 64, 64, False, True, True): (4, 128, 2, 4), + (4096, 4096, 8192, 64, 64, True, False, True): (4, 128, 2, 4), + (4096, 4096, 8192, 128, 128, False, True, True): (4, 64, 1, 32), + (4096, 4096, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (4096, 4096, 16384, 16, 16, False, True, True): (1, 64, 3, 4), + (4096, 4096, 16384, 16, 16, True, False, True): (1, 256, 3, 1), + (4096, 4096, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (4096, 4096, 16384, 32, 32, True, False, True): (1, 128, 3, 4), + (4096, 4096, 16384, 64, 64, False, True, True): (4, 256, 2, 4), + (4096, 4096, 16384, 64, 64, True, False, True): (4, 256, 2, 4), + (4096, 4096, 16384, 128, 128, False, True, True): (4, 128, 1, 32), + (4096, 4096, 16384, 128, 128, True, False, True): (4, 128, 1, 32), + (4096, 4096, 32768, 16, 16, False, True, True): (1, 128, 3, 4), + (4096, 4096, 32768, 16, 16, True, False, True): (1, 512, 3, 1), + (4096, 4096, 32768, 32, 32, False, True, True): (1, 512, 3, 4), + (4096, 4096, 32768, 32, 32, True, False, True): (1, 256, 3, 4), + (4096, 4096, 32768, 64, 64, False, True, True): (4, 512, 2, 4), + (4096, 4096, 32768, 64, 64, True, False, True): (4, 512, 2, 4), + (4096, 4096, 32768, 128, 128, False, True, True): (4, 256, 1, 32), + (4096, 4096, 32768, 128, 128, True, False, True): (4, 256, 1, 32), + (4096, 4096, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (4096, 4096, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), + (4096, 4096, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (4096, 4096, 65536, 32, 32, True, False, True): (1, 512, 3, 4), + (4096, 4096, 65536, 64, 64, False, True, True): (4, 1024, 2, 4), + (4096, 4096, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), + (4096, 4096, 65536, 128, 128, False, True, True): (4, 512, 1, 32), + (4096, 4096, 65536, 128, 128, True, False, True): (4, 512, 1, 32), + (4096, 4096, 131072, 16, 16, False, True, True): (2, 2048, 3, 1), + (4096, 4096, 131072, 16, 16, True, False, True): (1, 2048, 3, 1), + (4096, 4096, 131072, 32, 32, False, True, True): (2, 2048, 3, 4), + (4096, 4096, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), + (4096, 4096, 131072, 64, 64, False, True, True): (2, 2048, 2, 4), + (4096, 4096, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), + (4096, 4096, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), + (4096, 4096, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), + (8192, 8192, 256, 16, 16, False, True, True): (2, 2, 4, 4), + (8192, 8192, 256, 16, 16, True, False, True): (1, 1, 3, 4), + (8192, 8192, 256, 32, 32, False, True, True): (2, 4, 3, 4), + (8192, 8192, 256, 32, 32, True, False, True): (2, 4, 3, 4), + (8192, 8192, 256, 64, 64, False, True, True): (4, 4, 2, 4), + (8192, 8192, 256, 64, 64, True, False, True): (4, 4, 2, 4), + (8192, 8192, 256, 128, 128, False, True, True): (1, 2, 1, 32), + (8192, 8192, 256, 128, 128, True, False, True): (4, 2, 1, 32), + (8192, 8192, 512, 16, 16, False, True, True): (1, 4, 3, 4), + (8192, 8192, 512, 16, 16, True, False, True): (3, 4, 3, 4), + (8192, 8192, 512, 32, 32, False, True, True): (1, 8, 3, 4), + (8192, 8192, 512, 32, 32, True, False, True): (6, 8, 3, 4), + (8192, 8192, 512, 64, 64, False, True, True): (4, 8, 2, 4), + (8192, 8192, 512, 64, 64, True, False, True): (4, 8, 2, 4), + (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 1, 32), + (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 1, 32), + (8192, 8192, 1024, 16, 16, False, True, True): (1, 4, 3, 4), + (8192, 8192, 1024, 16, 16, True, False, True): (1, 32, 3, 1), + (8192, 8192, 1024, 32, 32, False, True, True): (1, 16, 3, 4), + (8192, 8192, 1024, 32, 32, True, False, True): (1, 16, 3, 4), + (8192, 8192, 1024, 64, 64, False, True, True): (4, 16, 2, 4), + (8192, 8192, 1024, 64, 64, True, False, True): (4, 16, 2, 4), + (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 1, 32), + (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 32), + (8192, 8192, 2048, 16, 16, False, True, True): (4, 8, 3, 4), + (8192, 8192, 2048, 16, 16, True, False, True): (1, 32, 3, 1), + (8192, 8192, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (8192, 8192, 2048, 32, 32, True, False, True): (1, 16, 4, 4), + (8192, 8192, 2048, 64, 64, False, True, True): (4, 32, 2, 4), + (8192, 8192, 2048, 64, 64, True, False, True): (4, 32, 2, 4), + (8192, 8192, 2048, 128, 128, False, True, True): (4, 16, 1, 32), + (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 32), + (8192, 8192, 4096, 16, 16, False, True, True): (3, 16, 3, 4), + (8192, 8192, 4096, 16, 16, True, False, True): (2, 64, 3, 1), + (8192, 8192, 4096, 32, 32, False, True, True): (1, 64, 3, 4), + (8192, 8192, 4096, 32, 32, True, False, True): (1, 32, 3, 4), + (8192, 8192, 4096, 64, 64, False, True, True): (4, 64, 2, 4), + (8192, 8192, 4096, 64, 64, True, False, True): (2, 64, 2, 4), + (8192, 8192, 4096, 128, 128, False, True, True): (4, 32, 1, 32), + (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 32), + (8192, 8192, 8192, 16, 16, False, True, True): (2, 128, 3, 1), + (8192, 8192, 8192, 16, 16, True, False, True): (2, 128, 3, 1), + (8192, 8192, 8192, 32, 32, False, True, True): (1, 128, 3, 4), + (8192, 8192, 8192, 32, 32, True, False, True): (1, 64, 3, 4), + (8192, 8192, 8192, 64, 64, False, True, True): (4, 128, 2, 4), + (8192, 8192, 8192, 64, 64, True, False, True): (2, 128, 2, 4), + (8192, 8192, 8192, 128, 128, False, True, True): (4, 64, 1, 32), + (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (8192, 8192, 16384, 16, 16, False, True, True): (1, 64, 3, 4), + (8192, 8192, 16384, 16, 16, True, False, True): (1, 256, 3, 1), + (8192, 8192, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (8192, 8192, 16384, 32, 32, True, False, True): (1, 128, 3, 4), + (8192, 8192, 16384, 64, 64, False, True, True): (2, 256, 2, 4), + (8192, 8192, 16384, 64, 64, True, False, True): (2, 256, 2, 4), + (8192, 8192, 16384, 128, 128, False, True, True): (4, 128, 1, 32), + (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 32), + (8192, 8192, 32768, 16, 16, False, True, True): (1, 512, 3, 1), + (8192, 8192, 32768, 16, 16, True, False, True): (1, 512, 3, 1), + (8192, 8192, 32768, 32, 32, False, True, True): (1, 512, 3, 4), + (8192, 8192, 32768, 32, 32, True, False, True): (1, 256, 3, 4), + (8192, 8192, 32768, 64, 64, False, True, True): (2, 512, 2, 4), + (8192, 8192, 32768, 64, 64, True, False, True): (2, 512, 2, 4), + (8192, 8192, 32768, 128, 128, False, True, True): (4, 256, 1, 32), + (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 32), + (8192, 8192, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (8192, 8192, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), + (8192, 8192, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (8192, 8192, 65536, 32, 32, True, False, True): (1, 512, 3, 4), + (8192, 8192, 65536, 64, 64, False, True, True): (4, 1024, 2, 4), + (8192, 8192, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), + (8192, 8192, 65536, 128, 128, False, True, True): (4, 512, 1, 32), + (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 32), + (8192, 8192, 131072, 16, 16, False, True, True): (1, 2048, 3, 1), + (8192, 8192, 131072, 16, 16, True, False, True): (2, 2048, 3, 1), + (8192, 8192, 131072, 32, 32, False, True, True): (4, 2048, 3, 4), + (8192, 8192, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), + (8192, 8192, 131072, 64, 64, False, True, True): (2, 2048, 2, 4), + (8192, 8192, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), + (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), + (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), + (16384, 16384, 256, 16, 16, False, True, True): (1, 2, 3, 4), + (16384, 16384, 256, 16, 16, True, False, True): (1, 2, 3, 4), + (16384, 16384, 256, 32, 32, False, True, True): (1, 4, 3, 4), + (16384, 16384, 256, 32, 32, True, False, True): (1, 4, 3, 4), + (16384, 16384, 256, 64, 64, False, True, True): (2, 4, 2, 4), + (16384, 16384, 256, 64, 64, True, False, True): (2, 4, 2, 4), + (16384, 16384, 256, 128, 128, False, True, True): (2, 2, 1, 32), + (16384, 16384, 256, 128, 128, True, False, True): (2, 2, 1, 32), + (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 3, 4), + (16384, 16384, 512, 16, 16, True, False, True): (5, 2, 3, 4), + (16384, 16384, 512, 32, 32, False, True, True): (1, 8, 3, 4), + (16384, 16384, 512, 32, 32, True, False, True): (1, 4, 3, 4), + (16384, 16384, 512, 64, 64, False, True, True): (4, 8, 2, 4), + (16384, 16384, 512, 64, 64, True, False, True): (4, 8, 2, 4), + (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 1, 32), + (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 1, 32), + (16384, 16384, 1024, 16, 16, False, True, True): (1, 4, 3, 4), + (16384, 16384, 1024, 16, 16, True, False, True): (2, 16, 3, 1), + (16384, 16384, 1024, 32, 32, False, True, True): (1, 16, 3, 4), + (16384, 16384, 1024, 32, 32, True, False, True): (1, 8, 3, 4), + (16384, 16384, 1024, 64, 64, False, True, True): (4, 16, 2, 4), + (16384, 16384, 1024, 64, 64, True, False, True): (4, 16, 2, 4), + (16384, 16384, 1024, 128, 128, False, True, True): (4, 8, 1, 32), + (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 32), + (16384, 16384, 2048, 16, 16, False, True, True): (1, 8, 3, 4), + (16384, 16384, 2048, 16, 16, True, False, True): (2, 32, 3, 1), + (16384, 16384, 2048, 32, 32, False, True, True): (1, 32, 3, 4), + (16384, 16384, 2048, 32, 32, True, False, True): (1, 16, 3, 4), + (16384, 16384, 2048, 64, 64, False, True, True): (4, 32, 2, 4), + (16384, 16384, 2048, 64, 64, True, False, True): (2, 32, 2, 4), + (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 1, 32), + (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 32), + (16384, 16384, 4096, 16, 16, False, True, True): (1, 16, 3, 4), + (16384, 16384, 4096, 16, 16, True, False, True): (2, 64, 3, 1), + (16384, 16384, 4096, 32, 32, False, True, True): (1, 64, 3, 4), + (16384, 16384, 4096, 32, 32, True, False, True): (1, 32, 3, 4), + (16384, 16384, 4096, 64, 64, False, True, True): (4, 64, 2, 4), + (16384, 16384, 4096, 64, 64, True, False, True): (2, 64, 2, 4), + (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 1, 32), + (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 32), + (16384, 16384, 8192, 16, 16, False, True, True): (1, 128, 3, 1), + (16384, 16384, 8192, 16, 16, True, False, True): (2, 128, 3, 1), + (16384, 16384, 8192, 32, 32, False, True, True): (1, 128, 3, 4), + (16384, 16384, 8192, 32, 32, True, False, True): (1, 64, 3, 4), + (16384, 16384, 8192, 64, 64, False, True, True): (2, 128, 2, 4), + (16384, 16384, 8192, 64, 64, True, False, True): (2, 128, 2, 4), + (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 1, 32), + (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 32), + (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 3, 4), + (16384, 16384, 16384, 16, 16, True, False, True): (2, 256, 3, 1), + (16384, 16384, 16384, 32, 32, False, True, True): (1, 256, 3, 4), + (16384, 16384, 16384, 32, 32, True, False, True): (1, 128, 3, 4), + (16384, 16384, 16384, 64, 64, False, True, True): (2, 256, 2, 4), + (16384, 16384, 16384, 64, 64, True, False, True): (2, 256, 2, 4), + (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 32), + (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 32), + (16384, 16384, 32768, 16, 16, False, True, True): (1, 512, 3, 1), + (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 3, 4), + (16384, 16384, 32768, 32, 32, False, True, True): (2, 512, 3, 4), + (16384, 16384, 32768, 32, 32, True, False, True): (1, 256, 4, 4), + (16384, 16384, 32768, 64, 64, False, True, True): (2, 512, 2, 4), + (16384, 16384, 32768, 64, 64, True, False, True): (2, 512, 2, 4), + (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 1, 32), + (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 32), + (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 3, 4), + (16384, 16384, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), + (16384, 16384, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), + (16384, 16384, 65536, 32, 32, True, False, True): (1, 512, 4, 4), + (16384, 16384, 65536, 64, 64, False, True, True): (2, 1024, 2, 4), + (16384, 16384, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), + (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 1, 32), + (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 32), + (16384, 16384, 131072, 16, 16, False, True, True): (1, 1024, 4, 4), + (16384, 16384, 131072, 16, 16, True, False, True): (2, 2048, 3, 1), + (16384, 16384, 131072, 32, 32, False, True, True): (1, 1024, 2, 4), + (16384, 16384, 131072, 32, 32, True, False, True): (1, 1024, 2, 4), + (16384, 16384, 131072, 64, 64, False, True, True): (4, 2048, 2, 4), + (16384, 16384, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), + (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), + (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), + }, + ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.bfloat16, 0.5)): { + (256, 256, 256, 16, 16): (1, 1, 16, 16, 1, 2), + (256, 256, 256, 32, 32): (1, 1, 16, 16, 1, 4), + (256, 256, 256, 64, 64): (1, 1, 16, 16, 1, 1), + (256, 256, 256, 128, 128): (2, 4, 16, 64, 1, 4), + (256, 256, 512, 16, 16): (1, 1, 16, 16, 1, 4), + (256, 256, 512, 32, 32): (1, 1, 16, 32, 1, 4), + (256, 256, 512, 64, 64): (1, 1, 16, 32, 1, 1), + (256, 256, 512, 128, 128): (1, 1, 32, 32, 1, 4), + (256, 256, 1024, 16, 16): (1, 1, 16, 16, 1, 4), + (256, 256, 1024, 32, 32): (1, 2, 16, 32, 1, 1), + (256, 256, 1024, 64, 64): (1, 1, 32, 32, 1, 2), + (256, 256, 1024, 128, 128): (1, 1, 32, 64, 1, 4), + (256, 256, 2048, 16, 16): (1, 1, 16, 64, 1, 8), + (256, 256, 2048, 32, 32): (2, 1, 32, 64, 1, 2), + (256, 256, 2048, 64, 64): (1, 1, 32, 32, 1, 1), + (256, 256, 2048, 128, 128): (1, 1, 64, 64, 1, 4), + (256, 256, 4096, 16, 16): (1, 1, 16, 64, 1, 1), + (256, 256, 4096, 32, 32): (2, 2, 32, 64, 1, 2), + (256, 256, 4096, 64, 64): (1, 1, 32, 128, 1, 4), + (256, 256, 4096, 128, 128): (1, 1, 64, 64, 1, 4), + (256, 256, 8192, 16, 16): (1, 2, 16, 64, 1, 2), + (256, 256, 8192, 32, 32): (1, 1, 32, 64, 1, 2), + (256, 256, 8192, 64, 64): (1, 1, 32, 64, 1, 2), + (256, 256, 8192, 128, 128): (1, 1, 64, 64, 1, 4), + (256, 256, 16384, 16, 16): (1, 1, 16, 64, 1, 2), + (256, 256, 16384, 32, 32): (1, 1, 32, 64, 1, 2), + (256, 256, 16384, 64, 64): (1, 1, 64, 64, 1, 2), + (256, 256, 16384, 128, 128): (2, 16, 64, 64, 1, 4), + (256, 256, 32768, 16, 16): (1, 1, 16, 128, 1, 2), + (256, 256, 32768, 32, 32): (1, 1, 32, 64, 1, 2), + (256, 256, 32768, 64, 64): (1, 1, 64, 64, 1, 2), + (256, 256, 32768, 128, 128): (2, 32, 64, 64, 1, 4), + (256, 256, 65536, 16, 16): (1, 1, 16, 64, 1, 1), + (256, 256, 65536, 32, 32): (1, 1, 32, 64, 1, 2), + (256, 256, 65536, 64, 64): (1, 1, 64, 32, 1, 1), + (256, 256, 65536, 128, 128): (2, 32, 64, 64, 1, 4), + (256, 256, 131072, 16, 16): (1, 1, 16, 64, 1, 1), + (256, 256, 131072, 32, 32): (1, 1, 32, 64, 1, 2), + (256, 256, 131072, 64, 64): (4, 1, 64, 32, 1, 1), + (256, 256, 131072, 128, 128): (2, 64, 64, 64, 1, 4), + (512, 512, 256, 16, 16): (1, 1, 16, 16, 1, 2), + (512, 512, 256, 32, 32): (1, 1, 16, 32, 1, 1), + (512, 512, 256, 64, 64): (1, 2, 16, 32, 1, 1), + (512, 512, 256, 128, 128): (2, 16, 64, 16, 2, 4), + (512, 512, 512, 16, 16): (1, 1, 16, 16, 1, 4), + (512, 512, 512, 32, 32): (1, 1, 16, 32, 1, 1), + (512, 512, 512, 64, 64): (1, 1, 32, 32, 1, 2), + (512, 512, 512, 128, 128): (2, 8, 32, 64, 1, 4), + (512, 512, 1024, 16, 16): (1, 1, 16, 64, 1, 8), + (512, 512, 1024, 32, 32): (1, 1, 32, 32, 3, 1), + (512, 512, 1024, 64, 64): (1, 4, 32, 64, 1, 2), + (512, 512, 1024, 128, 128): (1, 4, 64, 64, 1, 4), + (512, 512, 2048, 16, 16): (1, 1, 16, 64, 1, 2), + (512, 512, 2048, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 2048, 64, 64): (1, 1, 64, 64, 3, 4), + (512, 512, 2048, 128, 128): (1, 1, 64, 64, 1, 4), + (512, 512, 4096, 16, 16): (1, 1, 16, 64, 1, 2), + (512, 512, 4096, 32, 32): (2, 64, 32, 64, 1, 2), + (512, 512, 4096, 64, 64): (1, 1, 64, 64, 3, 4), + (512, 512, 4096, 128, 128): (1, 1, 64, 64, 1, 4), + (512, 512, 8192, 16, 16): (1, 2, 16, 128, 1, 2), + (512, 512, 8192, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 8192, 64, 64): (1, 1, 64, 64, 1, 2), + (512, 512, 8192, 128, 128): (1, 1, 64, 64, 1, 4), + (512, 512, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (512, 512, 16384, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 16384, 64, 64): (1, 1, 64, 64, 3, 2), + (512, 512, 16384, 128, 128): (2, 1, 64, 64, 1, 4), + (512, 512, 32768, 16, 16): (1, 2, 16, 128, 1, 2), + (512, 512, 32768, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 32768, 64, 64): (1, 1, 64, 64, 3, 4), + (512, 512, 32768, 128, 128): (2, 1, 64, 64, 1, 4), + (512, 512, 65536, 16, 16): (1, 2, 16, 128, 1, 2), + (512, 512, 65536, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 65536, 64, 64): (1, 1, 64, 64, 3, 4), + (512, 512, 65536, 128, 128): (2, 1, 64, 64, 1, 4), + (512, 512, 131072, 16, 16): (1, 1, 16, 64, 1, 1), + (512, 512, 131072, 32, 32): (1, 1, 32, 64, 1, 2), + (512, 512, 131072, 64, 64): (1, 1, 64, 64, 3, 4), + (512, 512, 131072, 128, 128): (2, 4, 64, 64, 1, 4), + (1024, 1024, 256, 16, 16): (1, 1, 16, 16, 1, 4), + (1024, 1024, 256, 32, 32): (2, 16, 32, 16, 3, 4), + (1024, 1024, 256, 64, 64): (1, 4, 32, 32, 1, 2), + (1024, 1024, 256, 128, 128): (1, 4, 128, 16, 3, 16), + (1024, 1024, 512, 16, 16): (1, 1, 16, 64, 1, 2), + (1024, 1024, 512, 32, 32): (2, 2, 32, 64, 1, 2), + (1024, 1024, 512, 64, 64): (2, 8, 64, 64, 3, 4), + (1024, 1024, 512, 128, 128): (1, 4, 64, 64, 1, 8), + (1024, 1024, 1024, 16, 16): (1, 1, 16, 64, 1, 2), + (1024, 1024, 1024, 32, 32): (1, 1, 32, 64, 1, 2), + (1024, 1024, 1024, 64, 64): (1, 8, 64, 64, 3, 4), + (1024, 1024, 1024, 128, 128): (1, 8, 64, 64, 1, 4), + (1024, 1024, 2048, 16, 16): (1, 2, 16, 64, 1, 2), + (1024, 1024, 2048, 32, 32): (1, 1, 32, 64, 1, 2), + (1024, 1024, 2048, 64, 64): (2, 16, 64, 64, 2, 2), + (1024, 1024, 2048, 128, 128): (2, 32, 64, 64, 1, 4), + (1024, 1024, 4096, 16, 16): (2, 16, 16, 128, 1, 2), + (1024, 1024, 4096, 32, 32): (1, 16, 32, 64, 3, 2), + (1024, 1024, 4096, 64, 64): (1, 1, 64, 64, 3, 4), + (1024, 1024, 4096, 128, 128): (2, 64, 128, 64, 1, 4), + (1024, 1024, 8192, 16, 16): (2, 16, 16, 128, 1, 2), + (1024, 1024, 8192, 32, 32): (1, 16, 32, 64, 3, 2), + (1024, 1024, 8192, 64, 64): (1, 1, 64, 64, 3, 4), + (1024, 1024, 8192, 128, 128): (2, 1, 64, 64, 1, 4), + (1024, 1024, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (1024, 1024, 16384, 32, 32): (1, 16, 32, 64, 3, 2), + (1024, 1024, 16384, 64, 64): (1, 1, 64, 64, 3, 4), + (1024, 1024, 16384, 128, 128): (2, 16, 128, 64, 1, 4), + (1024, 1024, 32768, 16, 16): (1, 1, 16, 128, 1, 2), + (1024, 1024, 32768, 32, 32): (1, 1, 32, 128, 1, 2), + (1024, 1024, 32768, 64, 64): (1, 32, 64, 32, 2, 1), + (1024, 1024, 32768, 128, 128): (2, 8, 128, 64, 1, 4), + (1024, 1024, 65536, 16, 16): (3, 2, 16, 128, 1, 2), + (1024, 1024, 65536, 32, 32): (1, 1, 32, 128, 1, 2), + (1024, 1024, 65536, 64, 64): (2, 4, 64, 32, 2, 1), + (1024, 1024, 65536, 128, 128): (2, 8, 128, 64, 1, 4), + (1024, 1024, 131072, 16, 16): (2, 1, 16, 128, 1, 2), + (1024, 1024, 131072, 32, 32): (1, 1, 32, 128, 1, 2), + (1024, 1024, 131072, 64, 64): (1, 4, 64, 32, 2, 1), + (1024, 1024, 131072, 128, 128): (4, 1, 128, 64, 1, 4), + (2048, 2048, 256, 16, 16): (1, 1, 16, 64, 1, 8), + (2048, 2048, 256, 32, 32): (1, 1, 32, 32, 3, 1), + (2048, 2048, 256, 64, 64): (1, 1, 32, 32, 2, 1), + (2048, 2048, 256, 128, 128): (1, 4, 64, 64, 1, 8), + (2048, 2048, 512, 16, 16): (1, 2, 16, 64, 1, 2), + (2048, 2048, 512, 32, 32): (1, 2, 32, 64, 1, 4), + (2048, 2048, 512, 64, 64): (1, 4, 64, 64, 1, 8), + (2048, 2048, 512, 128, 128): (1, 4, 64, 64, 1, 4), + (2048, 2048, 1024, 16, 16): (1, 2, 16, 128, 1, 2), + (2048, 2048, 1024, 32, 32): (1, 1, 32, 64, 1, 2), + (2048, 2048, 1024, 64, 64): (1, 8, 64, 64, 1, 4), + (2048, 2048, 1024, 128, 128): (1, 8, 128, 64, 1, 4), + (2048, 2048, 2048, 16, 16): (3, 4, 16, 128, 1, 2), + (2048, 2048, 2048, 32, 32): (1, 16, 32, 64, 5, 2), + (2048, 2048, 2048, 64, 64): (1, 1, 64, 64, 3, 4), + (2048, 2048, 2048, 128, 128): (1, 8, 128, 64, 1, 4), + (2048, 2048, 4096, 16, 16): (1, 2, 16, 128, 1, 2), + (2048, 2048, 4096, 32, 32): (1, 8, 32, 64, 3, 2), + (2048, 2048, 4096, 64, 64): (1, 1, 64, 64, 3, 4), + (2048, 2048, 4096, 128, 128): (1, 8, 128, 64, 1, 4), + (2048, 2048, 8192, 16, 16): (2, 4, 16, 128, 1, 2), + (2048, 2048, 8192, 32, 32): (1, 4, 32, 128, 3, 2), + (2048, 2048, 8192, 64, 64): (1, 8, 64, 64, 3, 2), + (2048, 2048, 8192, 128, 128): (1, 8, 128, 64, 1, 4), + (2048, 2048, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (2048, 2048, 16384, 32, 32): (1, 4, 32, 128, 3, 2), + (2048, 2048, 16384, 64, 64): (1, 8, 64, 64, 3, 2), + (2048, 2048, 16384, 128, 128): (1, 4, 128, 64, 1, 4), + (2048, 2048, 32768, 16, 16): (3, 2, 16, 128, 1, 2), + (2048, 2048, 32768, 32, 32): (1, 1, 32, 128, 3, 2), + (2048, 2048, 32768, 64, 64): (1, 1, 64, 64, 3, 2), + (2048, 2048, 32768, 128, 128): (1, 4, 128, 64, 1, 4), + (2048, 2048, 65536, 16, 16): (1, 2, 16, 128, 1, 2), + (2048, 2048, 65536, 32, 32): (1, 4, 32, 128, 1, 2), + (2048, 2048, 65536, 64, 64): (1, 1, 64, 64, 3, 2), + (2048, 2048, 65536, 128, 128): (1, 2, 128, 64, 1, 4), + (2048, 2048, 131072, 16, 16): (4, 2, 16, 128, 1, 2), + (2048, 2048, 131072, 32, 32): (1, 1, 32, 128, 3, 2), + (2048, 2048, 131072, 64, 64): (1, 1, 64, 64, 3, 2), + (2048, 2048, 131072, 128, 128): (1, 2, 128, 64, 1, 4), + (4096, 4096, 256, 16, 16): (1, 1, 16, 64, 1, 2), + (4096, 4096, 256, 32, 32): (1, 1, 32, 64, 3, 4), + (4096, 4096, 256, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 256, 128, 128): (3, 4, 128, 32, 1, 4), + (4096, 4096, 512, 16, 16): (1, 2, 16, 128, 1, 2), + (4096, 4096, 512, 32, 32): (1, 2, 32, 64, 3, 2), + (4096, 4096, 512, 64, 64): (1, 4, 64, 64, 1, 4), + (4096, 4096, 512, 128, 128): (1, 4, 128, 64, 1, 4), + (4096, 4096, 1024, 16, 16): (1, 2, 16, 128, 1, 2), + (4096, 4096, 1024, 32, 32): (1, 8, 32, 64, 3, 2), + (4096, 4096, 1024, 64, 64): (1, 4, 64, 64, 1, 4), + (4096, 4096, 1024, 128, 128): (2, 4, 128, 64, 1, 4), + (4096, 4096, 2048, 16, 16): (1, 1, 16, 128, 1, 2), + (4096, 4096, 2048, 32, 32): (1, 4, 32, 128, 1, 4), + (4096, 4096, 2048, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 2048, 128, 128): (1, 16, 128, 64, 1, 4), + (4096, 4096, 4096, 16, 16): (1, 1, 16, 64, 3, 1), + (4096, 4096, 4096, 32, 32): (1, 4, 32, 64, 3, 2), + (4096, 4096, 4096, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 4096, 128, 128): (5, 1, 128, 64, 1, 4), + (4096, 4096, 8192, 16, 16): (1, 1, 16, 128, 1, 2), + (4096, 4096, 8192, 32, 32): (1, 1, 32, 128, 3, 2), + (4096, 4096, 8192, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 8192, 128, 128): (2, 1, 128, 64, 1, 4), + (4096, 4096, 16384, 16, 16): (1, 1, 16, 128, 1, 2), + (4096, 4096, 16384, 32, 32): (1, 1, 32, 128, 3, 2), + (4096, 4096, 16384, 64, 64): (1, 1, 64, 64, 4, 4), + (4096, 4096, 16384, 128, 128): (2, 1, 128, 64, 1, 4), + (4096, 4096, 32768, 16, 16): (3, 1, 16, 128, 1, 2), + (4096, 4096, 32768, 32, 32): (1, 1, 32, 128, 3, 2), + (4096, 4096, 32768, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 32768, 128, 128): (2, 1, 128, 64, 1, 4), + (4096, 4096, 65536, 16, 16): (2, 2, 16, 128, 1, 2), + (4096, 4096, 65536, 32, 32): (1, 1, 32, 128, 4, 2), + (4096, 4096, 65536, 64, 64): (1, 1, 64, 64, 4, 4), + (4096, 4096, 65536, 128, 128): (2, 1, 128, 64, 1, 4), + (4096, 4096, 131072, 16, 16): (2, 1, 16, 128, 1, 2), + (4096, 4096, 131072, 32, 32): (1, 1, 32, 128, 3, 2), + (4096, 4096, 131072, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 131072, 128, 128): (2, 1, 128, 64, 1, 4), + (8192, 8192, 256, 16, 16): (1, 2, 16, 64, 1, 2), + (8192, 8192, 256, 32, 32): (1, 1, 32, 64, 1, 2), + (8192, 8192, 256, 64, 64): (1, 2, 64, 64, 1, 4), + (8192, 8192, 256, 128, 128): (3, 16, 128, 16, 1, 2), + (8192, 8192, 512, 16, 16): (1, 2, 16, 128, 1, 2), + (8192, 8192, 512, 32, 32): (1, 4, 32, 64, 3, 2), + (8192, 8192, 512, 64, 64): (2, 8, 64, 64, 4, 4), + (8192, 8192, 512, 128, 128): (1, 8, 128, 64, 1, 4), + (8192, 8192, 1024, 16, 16): (4, 2, 16, 128, 1, 2), + (8192, 8192, 1024, 32, 32): (1, 8, 32, 128, 1, 2), + (8192, 8192, 1024, 64, 64): (1, 16, 64, 64, 3, 2), + (8192, 8192, 1024, 128, 128): (2, 16, 128, 64, 2, 4), + (8192, 8192, 2048, 16, 16): (2, 1, 16, 64, 4, 1), + (8192, 8192, 2048, 32, 32): (1, 16, 32, 64, 5, 2), + (8192, 8192, 2048, 64, 64): (1, 16, 64, 64, 3, 2), + (8192, 8192, 2048, 128, 128): (2, 16, 128, 64, 2, 4), + (8192, 8192, 4096, 16, 16): (1, 1, 16, 64, 4, 1), + (8192, 8192, 4096, 32, 32): (1, 16, 32, 64, 5, 2), + (8192, 8192, 4096, 64, 64): (1, 16, 64, 64, 3, 2), + (8192, 8192, 4096, 128, 128): (2, 64, 128, 64, 2, 4), + (8192, 8192, 8192, 16, 16): (1, 1, 16, 64, 4, 1), + (8192, 8192, 8192, 32, 32): (1, 8, 32, 128, 5, 4), + (8192, 8192, 8192, 64, 64): (1, 8, 64, 64, 3, 2), + (8192, 8192, 8192, 128, 128): (2, 8, 128, 64, 1, 4), + (8192, 8192, 16384, 16, 16): (1, 1, 16, 64, 4, 1), + (8192, 8192, 16384, 32, 32): (1, 8, 32, 64, 5, 2), + (8192, 8192, 16384, 64, 64): (1, 8, 64, 64, 3, 2), + (8192, 8192, 16384, 128, 128): (1, 8, 128, 64, 1, 4), + (8192, 8192, 32768, 16, 16): (1, 1, 16, 64, 4, 1), + (8192, 8192, 32768, 32, 32): (1, 8, 32, 64, 5, 2), + (8192, 8192, 32768, 64, 64): (3, 8, 64, 64, 3, 2), + (8192, 8192, 32768, 128, 128): (2, 8, 128, 64, 1, 4), + (8192, 8192, 65536, 16, 16): (1, 1, 16, 64, 4, 1), + (8192, 8192, 65536, 32, 32): (5, 4, 32, 64, 3, 2), + (8192, 8192, 65536, 64, 64): (1, 8, 64, 64, 3, 2), + (8192, 8192, 65536, 128, 128): (2, 8, 128, 64, 1, 4), + (8192, 8192, 131072, 16, 16): (2, 1, 16, 64, 4, 1), + (8192, 8192, 131072, 32, 32): (1, 4, 32, 64, 5, 2), + (8192, 8192, 131072, 64, 64): (1, 4, 64, 128, 3, 4), + (8192, 8192, 131072, 128, 128): (2, 8, 128, 64, 1, 4), + (16384, 16384, 256, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 256, 32, 32): (1, 4, 32, 64, 3, 2), + (16384, 16384, 256, 64, 64): (2, 4, 64, 64, 4, 4), + (16384, 16384, 256, 128, 128): (1, 4, 128, 64, 1, 16), + (16384, 16384, 512, 16, 16): (1, 2, 16, 128, 3, 2), + (16384, 16384, 512, 32, 32): (1, 4, 32, 128, 5, 4), + (16384, 16384, 512, 64, 64): (1, 8, 64, 64, 3, 2), + (16384, 16384, 512, 128, 128): (2, 8, 128, 64, 1, 4), + (16384, 16384, 1024, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 1024, 32, 32): (1, 8, 32, 64, 5, 2), + (16384, 16384, 1024, 64, 64): (1, 16, 64, 64, 3, 2), + (16384, 16384, 1024, 128, 128): (5, 16, 128, 64, 2, 4), + (16384, 16384, 2048, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 2048, 32, 32): (1, 8, 32, 64, 5, 2), + (16384, 16384, 2048, 64, 64): (1, 16, 64, 64, 3, 2), + (16384, 16384, 2048, 128, 128): (4, 32, 128, 64, 2, 4), + (16384, 16384, 4096, 16, 16): (3, 2, 16, 128, 1, 2), + (16384, 16384, 4096, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 4096, 64, 64): (2, 16, 64, 64, 3, 2), + (16384, 16384, 4096, 128, 128): (3, 32, 128, 64, 2, 4), + (16384, 16384, 8192, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 8192, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 8192, 64, 64): (4, 8, 64, 64, 3, 2), + (16384, 16384, 8192, 128, 128): (5, 8, 128, 64, 1, 4), + (16384, 16384, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 16384, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 16384, 64, 64): (2, 4, 64, 128, 3, 4), + (16384, 16384, 16384, 128, 128): (4, 8, 128, 64, 1, 4), + (16384, 16384, 32768, 16, 16): (4, 2, 16, 128, 1, 2), + (16384, 16384, 32768, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 32768, 64, 64): (1, 8, 64, 64, 3, 2), + (16384, 16384, 32768, 128, 128): (2, 512, 128, 64, 2, 4), + (16384, 16384, 65536, 16, 16): (3, 2, 16, 128, 1, 2), + (16384, 16384, 65536, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 65536, 64, 64): (1, 4, 64, 128, 3, 4), + (16384, 16384, 65536, 128, 128): (2, 1024, 128, 64, 2, 4), + (16384, 16384, 131072, 16, 16): (1, 2, 16, 128, 1, 2), + (16384, 16384, 131072, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 131072, 64, 64): (3, 4, 64, 128, 3, 4), + (16384, 16384, 131072, 128, 128): (4, 2048, 128, 64, 2, 4), + }, + ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.float16, 0.5)): { + (256, 256, 256, 16, 16): (5, 4, 16, 16, 1, 4), + (256, 256, 256, 32, 32): (5, 2, 32, 16, 1, 4), + (256, 256, 256, 64, 64): (4, 1, 32, 32, 1, 8), + (256, 256, 256, 128, 128): (2, 1, 32, 32, 1, 4), + (256, 256, 512, 16, 16): (2, 2, 16, 32, 1, 4), + (256, 256, 512, 32, 32): (4, 8, 32, 32, 1, 8), + (256, 256, 512, 64, 64): (4, 8, 32, 64, 1, 4), + (256, 256, 512, 128, 128): (4, 8, 32, 64, 1, 4), + (256, 256, 1024, 16, 16): (4, 2, 16, 64, 1, 2), + (256, 256, 1024, 32, 32): (4, 16, 32, 64, 1, 2), + (256, 256, 1024, 64, 64): (4, 16, 32, 64, 1, 4), + (256, 256, 1024, 128, 128): (4, 16, 64, 64, 1, 8), + (256, 256, 2048, 16, 16): (2, 16, 16, 64, 1, 8), + (256, 256, 2048, 32, 32): (4, 16, 32, 64, 1, 2), + (256, 256, 2048, 64, 64): (4, 16, 32, 64, 1, 4), + (256, 256, 2048, 128, 128): (4, 16, 64, 64, 1, 4), + (256, 256, 4096, 16, 16): (4, 32, 16, 64, 1, 1), + (256, 256, 4096, 32, 32): (2, 64, 32, 64, 1, 2), + (256, 256, 4096, 64, 64): (4, 64, 64, 64, 1, 4), + (256, 256, 4096, 128, 128): (4, 32, 64, 64, 1, 4), + (256, 256, 8192, 16, 16): (4, 64, 16, 64, 1, 1), + (256, 256, 8192, 32, 32): (4, 128, 32, 64, 1, 2), + (256, 256, 8192, 64, 64): (4, 64, 64, 64, 1, 4), + (256, 256, 8192, 128, 128): (4, 64, 64, 64, 1, 4), + (256, 256, 16384, 16, 16): (4, 128, 16, 64, 1, 1), + (256, 256, 16384, 32, 32): (2, 128, 32, 64, 1, 2), + (256, 256, 16384, 64, 64): (4, 32, 32, 128, 1, 4), + (256, 256, 16384, 128, 128): (4, 16, 64, 64, 1, 4), + (256, 256, 32768, 16, 16): (4, 64, 16, 64, 1, 1), + (256, 256, 32768, 32, 32): (2, 256, 32, 64, 1, 2), + (256, 256, 32768, 64, 64): (4, 32, 32, 128, 1, 4), + (256, 256, 32768, 128, 128): (4, 32, 64, 64, 1, 4), + (256, 256, 65536, 16, 16): (4, 128, 16, 64, 1, 1), + (256, 256, 65536, 32, 32): (4, 1, 32, 64, 1, 2), + (256, 256, 65536, 64, 64): (2, 1, 64, 64, 1, 2), + (256, 256, 65536, 128, 128): (4, 32, 64, 64, 1, 4), + (256, 256, 131072, 16, 16): (4, 64, 16, 64, 1, 1), + (256, 256, 131072, 32, 32): (2, 1, 32, 64, 1, 2), + (256, 256, 131072, 64, 64): (4, 32, 32, 128, 1, 4), + (256, 256, 131072, 128, 128): (4, 32, 64, 64, 1, 4), + (512, 512, 256, 16, 16): (4, 16, 16, 16, 1, 4), + (512, 512, 256, 32, 32): (2, 4, 32, 16, 1, 4), + (512, 512, 256, 64, 64): (2, 16, 64, 16, 3, 8), + (512, 512, 256, 128, 128): (4, 16, 64, 16, 1, 4), + (512, 512, 512, 16, 16): (1, 1, 16, 64, 1, 8), + (512, 512, 512, 32, 32): (2, 4, 16, 32, 1, 1), + (512, 512, 512, 64, 64): (2, 1, 32, 32, 1, 2), + (512, 512, 512, 128, 128): (4, 8, 32, 64, 1, 4), + (512, 512, 1024, 16, 16): (2, 8, 16, 64, 1, 8), + (512, 512, 1024, 32, 32): (4, 16, 32, 64, 1, 2), + (512, 512, 1024, 64, 64): (4, 16, 64, 64, 1, 4), + (512, 512, 1024, 128, 128): (2, 8, 64, 64, 1, 4), + (512, 512, 2048, 16, 16): (4, 16, 16, 64, 1, 4), + (512, 512, 2048, 32, 32): (4, 16, 32, 64, 1, 2), + (512, 512, 2048, 64, 64): (4, 16, 64, 64, 1, 8), + (512, 512, 2048, 128, 128): (4, 16, 64, 64, 1, 4), + (512, 512, 4096, 16, 16): (4, 32, 16, 128, 1, 2), + (512, 512, 4096, 32, 32): (4, 32, 32, 64, 1, 2), + (512, 512, 4096, 64, 64): (4, 32, 64, 64, 1, 4), + (512, 512, 4096, 128, 128): (4, 32, 64, 64, 1, 4), + (512, 512, 8192, 16, 16): (2, 32, 16, 128, 1, 2), + (512, 512, 8192, 32, 32): (4, 64, 32, 64, 1, 2), + (512, 512, 8192, 64, 64): (4, 128, 64, 64, 1, 2), + (512, 512, 8192, 128, 128): (4, 64, 64, 64, 1, 4), + (512, 512, 16384, 16, 16): (4, 32, 16, 64, 1, 1), + (512, 512, 16384, 32, 32): (4, 64, 32, 64, 1, 2), + (512, 512, 16384, 64, 64): (4, 16, 64, 64, 1, 4), + (512, 512, 16384, 128, 128): (4, 32, 64, 64, 1, 4), + (512, 512, 32768, 16, 16): (7, 16, 16, 128, 1, 2), + (512, 512, 32768, 32, 32): (4, 64, 32, 64, 1, 2), + (512, 512, 32768, 64, 64): (2, 32, 64, 64, 3, 2), + (512, 512, 32768, 128, 128): (2, 32, 64, 64, 1, 4), + (512, 512, 65536, 16, 16): (2, 32, 16, 64, 1, 1), + (512, 512, 65536, 32, 32): (4, 64, 32, 64, 1, 2), + (512, 512, 65536, 64, 64): (3, 32, 64, 64, 3, 2), + (512, 512, 65536, 128, 128): (4, 16, 64, 64, 1, 4), + (512, 512, 131072, 16, 16): (3, 32, 16, 128, 1, 2), + (512, 512, 131072, 32, 32): (4, 64, 32, 64, 1, 2), + (512, 512, 131072, 64, 64): (2, 32, 64, 64, 3, 2), + (512, 512, 131072, 128, 128): (3, 1, 64, 64, 1, 4), + (1024, 1024, 256, 16, 16): (4, 16, 16, 16, 1, 4), + (1024, 1024, 256, 32, 32): (4, 16, 32, 16, 1, 4), + (1024, 1024, 256, 64, 64): (4, 4, 64, 32, 1, 16), + (1024, 1024, 256, 128, 128): (4, 16, 64, 16, 1, 8), + (1024, 1024, 512, 16, 16): (2, 8, 16, 64, 1, 8), + (1024, 1024, 512, 32, 32): (3, 2, 32, 64, 1, 2), + (1024, 1024, 512, 64, 64): (4, 8, 32, 64, 1, 8), + (1024, 1024, 512, 128, 128): (4, 8, 64, 64, 1, 8), + (1024, 1024, 1024, 16, 16): (2, 2, 16, 64, 1, 2), + (1024, 1024, 1024, 32, 32): (2, 8, 32, 64, 1, 2), + (1024, 1024, 1024, 64, 64): (2, 8, 32, 128, 1, 4), + (1024, 1024, 1024, 128, 128): (2, 8, 64, 64, 1, 4), + (1024, 1024, 2048, 16, 16): (2, 16, 16, 128, 3, 2), + (1024, 1024, 2048, 32, 32): (4, 32, 32, 64, 1, 2), + (1024, 1024, 2048, 64, 64): (4, 16, 64, 64, 1, 4), + (1024, 1024, 2048, 128, 128): (4, 32, 64, 64, 1, 4), + (1024, 1024, 4096, 16, 16): (4, 16, 16, 128, 1, 2), + (1024, 1024, 4096, 32, 32): (3, 32, 32, 64, 1, 2), + (1024, 1024, 4096, 64, 64): (4, 32, 64, 64, 1, 4), + (1024, 1024, 4096, 128, 128): (4, 32, 64, 64, 1, 4), + (1024, 1024, 8192, 16, 16): (5, 16, 16, 128, 1, 2), + (1024, 1024, 8192, 32, 32): (2, 32, 32, 64, 3, 2), + (1024, 1024, 8192, 64, 64): (1, 16, 64, 64, 3, 2), + (1024, 1024, 8192, 128, 128): (4, 32, 64, 64, 1, 4), + (1024, 1024, 16384, 16, 16): (4, 16, 16, 128, 1, 2), + (1024, 1024, 16384, 32, 32): (1, 32, 32, 64, 3, 2), + (1024, 1024, 16384, 64, 64): (4, 16, 64, 64, 3, 2), + (1024, 1024, 16384, 128, 128): (4, 32, 128, 64, 1, 4), + (1024, 1024, 32768, 16, 16): (3, 16, 16, 128, 1, 2), + (1024, 1024, 32768, 32, 32): (1, 8, 32, 64, 3, 2), + (1024, 1024, 32768, 64, 64): (4, 16, 64, 64, 3, 2), + (1024, 1024, 32768, 128, 128): (4, 8, 128, 64, 2, 4), + (1024, 1024, 65536, 16, 16): (1, 2, 16, 128, 1, 2), + (1024, 1024, 65536, 32, 32): (2, 4, 32, 64, 3, 2), + (1024, 1024, 65536, 64, 64): (5, 16, 64, 64, 3, 2), + (1024, 1024, 65536, 128, 128): (5, 8, 128, 64, 2, 4), + (1024, 1024, 131072, 16, 16): (5, 2, 16, 128, 1, 2), + (1024, 1024, 131072, 32, 32): (1, 2, 32, 64, 3, 2), + (1024, 1024, 131072, 64, 64): (5, 16, 64, 64, 3, 2), + (1024, 1024, 131072, 128, 128): (2, 1, 128, 64, 2, 4), + (2048, 2048, 256, 16, 16): (4, 4, 16, 64, 1, 8), + (2048, 2048, 256, 32, 32): (4, 8, 32, 32, 1, 8), + (2048, 2048, 256, 64, 64): (4, 16, 64, 16, 1, 8), + (2048, 2048, 256, 128, 128): (4, 4, 128, 32, 3, 8), + (2048, 2048, 512, 16, 16): (2, 2, 16, 64, 1, 2), + (2048, 2048, 512, 32, 32): (2, 4, 32, 64, 3, 2), + (2048, 2048, 512, 64, 64): (4, 4, 64, 64, 1, 8), + (2048, 2048, 512, 128, 128): (4, 8, 64, 64, 1, 4), + (2048, 2048, 1024, 16, 16): (1, 8, 16, 64, 1, 2), + (2048, 2048, 1024, 32, 32): (2, 16, 32, 64, 3, 2), + (2048, 2048, 1024, 64, 64): (4, 8, 64, 64, 1, 4), + (2048, 2048, 1024, 128, 128): (4, 8, 128, 64, 1, 4), + (2048, 2048, 2048, 16, 16): (5, 4, 16, 128, 1, 2), + (2048, 2048, 2048, 32, 32): (1, 16, 32, 64, 3, 2), + (2048, 2048, 2048, 64, 64): (2, 8, 64, 64, 1, 4), + (2048, 2048, 2048, 128, 128): (2, 8, 128, 64, 1, 4), + (2048, 2048, 4096, 16, 16): (4, 2, 16, 128, 1, 2), + (2048, 2048, 4096, 32, 32): (2, 16, 32, 64, 3, 2), + (2048, 2048, 4096, 64, 64): (2, 8, 64, 64, 3, 2), + (2048, 2048, 4096, 128, 128): (4, 8, 128, 64, 1, 4), + (2048, 2048, 8192, 16, 16): (5, 4, 16, 128, 1, 2), + (2048, 2048, 8192, 32, 32): (2, 8, 32, 64, 3, 2), + (2048, 2048, 8192, 64, 64): (4, 8, 64, 64, 3, 2), + (2048, 2048, 8192, 128, 128): (4, 8, 128, 64, 1, 4), + (2048, 2048, 16384, 16, 16): (3, 2, 16, 128, 1, 2), + (2048, 2048, 16384, 32, 32): (2, 4, 32, 128, 3, 2), + (2048, 2048, 16384, 64, 64): (4, 8, 64, 64, 3, 2), + (2048, 2048, 16384, 128, 128): (4, 4, 128, 64, 1, 4), + (2048, 2048, 32768, 16, 16): (3, 2, 16, 128, 1, 2), + (2048, 2048, 32768, 32, 32): (3, 4, 32, 128, 3, 2), + (2048, 2048, 32768, 64, 64): (6, 4, 64, 64, 3, 2), + (2048, 2048, 32768, 128, 128): (3, 4, 128, 64, 1, 4), + (2048, 2048, 65536, 16, 16): (6, 2, 16, 128, 1, 2), + (2048, 2048, 65536, 32, 32): (1, 2, 32, 128, 1, 2), + (2048, 2048, 65536, 64, 64): (5, 4, 64, 64, 3, 2), + (2048, 2048, 65536, 128, 128): (5, 1, 128, 64, 2, 4), + (2048, 2048, 131072, 16, 16): (3, 2, 16, 128, 1, 2), + (2048, 2048, 131072, 32, 32): (2, 1, 32, 128, 3, 2), + (2048, 2048, 131072, 64, 64): (4, 1, 64, 64, 3, 2), + (2048, 2048, 131072, 128, 128): (3, 1, 128, 64, 2, 4), + (4096, 4096, 256, 16, 16): (5, 8, 16, 32, 1, 4), + (4096, 4096, 256, 32, 32): (4, 16, 32, 16, 2, 4), + (4096, 4096, 256, 64, 64): (2, 1, 64, 64, 3, 4), + (4096, 4096, 256, 128, 128): (4, 4, 128, 32, 1, 4), + (4096, 4096, 512, 16, 16): (4, 2, 16, 128, 1, 2), + (4096, 4096, 512, 32, 32): (4, 8, 32, 64, 1, 2), + (4096, 4096, 512, 64, 64): (4, 4, 64, 64, 1, 4), + (4096, 4096, 512, 128, 128): (4, 8, 128, 64, 2, 4), + (4096, 4096, 1024, 16, 16): (1, 2, 16, 128, 1, 2), + (4096, 4096, 1024, 32, 32): (6, 8, 32, 64, 3, 2), + (4096, 4096, 1024, 64, 64): (2, 16, 64, 64, 4, 4), + (4096, 4096, 1024, 128, 128): (2, 4, 128, 64, 2, 4), + (4096, 4096, 2048, 16, 16): (3, 1, 16, 128, 1, 2), + (4096, 4096, 2048, 32, 32): (1, 4, 32, 64, 5, 2), + (4096, 4096, 2048, 64, 64): (3, 16, 64, 64, 3, 2), + (4096, 4096, 2048, 128, 128): (4, 32, 128, 64, 2, 4), + (4096, 4096, 4096, 16, 16): (1, 2, 16, 128, 1, 2), + (4096, 4096, 4096, 32, 32): (1, 4, 32, 64, 3, 2), + (4096, 4096, 4096, 64, 64): (1, 1, 64, 64, 4, 4), + (4096, 4096, 4096, 128, 128): (2, 1, 128, 128, 1, 8), + (4096, 4096, 8192, 16, 16): (3, 1, 16, 128, 1, 2), + (4096, 4096, 8192, 32, 32): (2, 2, 32, 64, 5, 2), + (4096, 4096, 8192, 64, 64): (4, 16, 64, 64, 3, 2), + (4096, 4096, 8192, 128, 128): (4, 16, 128, 64, 2, 4), + (4096, 4096, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (4096, 4096, 16384, 32, 32): (4, 2, 32, 64, 5, 2), + (4096, 4096, 16384, 64, 64): (4, 16, 64, 64, 3, 2), + (4096, 4096, 16384, 128, 128): (4, 16, 128, 64, 2, 4), + (4096, 4096, 32768, 16, 16): (3, 1, 16, 128, 1, 2), + (4096, 4096, 32768, 32, 32): (3, 1, 32, 128, 1, 4), + (4096, 4096, 32768, 64, 64): (3, 1, 64, 64, 3, 4), + (4096, 4096, 32768, 128, 128): (5, 16, 128, 64, 2, 4), + (4096, 4096, 65536, 16, 16): (5, 1, 16, 128, 1, 2), + (4096, 4096, 65536, 32, 32): (5, 1, 32, 128, 1, 4), + (4096, 4096, 65536, 64, 64): (1, 1, 64, 64, 3, 4), + (4096, 4096, 65536, 128, 128): (3, 16, 128, 64, 2, 4), + (4096, 4096, 131072, 16, 16): (3, 1, 16, 128, 1, 2), + (4096, 4096, 131072, 32, 32): (3, 1, 32, 128, 3, 2), + (4096, 4096, 131072, 64, 64): (2, 1, 64, 64, 3, 4), + (4096, 4096, 131072, 128, 128): (1, 1, 128, 64, 1, 4), + (8192, 8192, 256, 16, 16): (4, 16, 16, 16, 1, 4), + (8192, 8192, 256, 32, 32): (1, 16, 32, 16, 4, 4), + (8192, 8192, 256, 64, 64): (4, 16, 64, 16, 3, 8), + (8192, 8192, 256, 128, 128): (4, 16, 128, 16, 1, 2), + (8192, 8192, 512, 16, 16): (2, 8, 16, 64, 1, 4), + (8192, 8192, 512, 32, 32): (4, 8, 32, 64, 3, 2), + (8192, 8192, 512, 64, 64): (2, 8, 64, 64, 4, 4), + (8192, 8192, 512, 128, 128): (4, 8, 128, 64, 2, 4), + (8192, 8192, 1024, 16, 16): (4, 16, 16, 64, 1, 8), + (8192, 8192, 1024, 32, 32): (2, 8, 32, 64, 5, 2), + (8192, 8192, 1024, 64, 64): (1, 16, 64, 64, 3, 2), + (8192, 8192, 1024, 128, 128): (5, 16, 128, 64, 2, 4), + (8192, 8192, 2048, 16, 16): (7, 2, 16, 128, 1, 2), + (8192, 8192, 2048, 32, 32): (1, 16, 32, 64, 5, 2), + (8192, 8192, 2048, 64, 64): (4, 16, 64, 64, 3, 2), + (8192, 8192, 2048, 128, 128): (6, 16, 128, 64, 2, 4), + (8192, 8192, 4096, 16, 16): (4, 2, 16, 128, 1, 2), + (8192, 8192, 4096, 32, 32): (2, 8, 32, 64, 5, 2), + (8192, 8192, 4096, 64, 64): (3, 16, 64, 64, 3, 2), + (8192, 8192, 4096, 128, 128): (3, 64, 128, 64, 2, 4), + (8192, 8192, 8192, 16, 16): (4, 2, 16, 128, 1, 2), + (8192, 8192, 8192, 32, 32): (1, 4, 32, 128, 5, 4), + (8192, 8192, 8192, 64, 64): (4, 4, 64, 64, 1, 4), + (8192, 8192, 8192, 128, 128): (2, 2, 128, 128, 3, 8), + (8192, 8192, 16384, 16, 16): (1, 2, 16, 128, 1, 2), + (8192, 8192, 16384, 32, 32): (4, 8, 32, 64, 5, 2), + (8192, 8192, 16384, 64, 64): (5, 8, 64, 64, 3, 2), + (8192, 8192, 16384, 128, 128): (3, 16, 128, 64, 2, 4), + (8192, 8192, 32768, 16, 16): (7, 2, 16, 128, 1, 2), + (8192, 8192, 32768, 32, 32): (3, 4, 32, 64, 3, 2), + (8192, 8192, 32768, 64, 64): (2, 8, 64, 64, 3, 2), + (8192, 8192, 32768, 128, 128): (6, 16, 128, 64, 2, 4), + (8192, 8192, 65536, 16, 16): (9, 2, 16, 128, 1, 2), + (8192, 8192, 65536, 32, 32): (7, 4, 32, 64, 5, 2), + (8192, 8192, 65536, 64, 64): (4, 8, 64, 64, 3, 2), + (8192, 8192, 65536, 128, 128): (3, 16, 128, 64, 2, 4), + (8192, 8192, 131072, 16, 16): (9, 2, 16, 128, 1, 2), + (8192, 8192, 131072, 32, 32): (1, 8, 32, 64, 5, 2), + (8192, 8192, 131072, 64, 64): (1, 8, 64, 64, 3, 2), + (8192, 8192, 131072, 128, 128): (4, 16, 128, 64, 2, 4), + (16384, 16384, 256, 16, 16): (5, 16, 16, 16, 1, 4), + (16384, 16384, 256, 32, 32): (4, 16, 32, 16, 4, 4), + (16384, 16384, 256, 64, 64): (4, 16, 64, 16, 3, 8), + (16384, 16384, 256, 128, 128): (4, 16, 128, 16, 1, 2), + (16384, 16384, 512, 16, 16): (2, 8, 16, 64, 1, 4), + (16384, 16384, 512, 32, 32): (1, 4, 32, 64, 5, 2), + (16384, 16384, 512, 64, 64): (4, 8, 64, 64, 1, 4), + (16384, 16384, 512, 128, 128): (3, 8, 128, 64, 2, 4), + (16384, 16384, 1024, 16, 16): (4, 2, 16, 128, 1, 2), + (16384, 16384, 1024, 32, 32): (4, 8, 32, 64, 5, 2), + (16384, 16384, 1024, 64, 64): (6, 16, 64, 64, 3, 2), + (16384, 16384, 1024, 128, 128): (3, 16, 128, 64, 2, 4), + (16384, 16384, 2048, 16, 16): (3, 2, 16, 128, 1, 2), + (16384, 16384, 2048, 32, 32): (1, 8, 32, 64, 5, 2), + (16384, 16384, 2048, 64, 64): (5, 16, 64, 64, 3, 2), + (16384, 16384, 2048, 128, 128): (2, 32, 128, 64, 2, 4), + (16384, 16384, 4096, 16, 16): (2, 2, 16, 128, 1, 2), + (16384, 16384, 4096, 32, 32): (1, 4, 32, 64, 3, 2), + (16384, 16384, 4096, 64, 64): (2, 8, 64, 64, 3, 2), + (16384, 16384, 4096, 128, 128): (3, 16, 128, 64, 2, 4), + (16384, 16384, 8192, 16, 16): (3, 2, 16, 128, 1, 2), + (16384, 16384, 8192, 32, 32): (2, 4, 32, 64, 5, 2), + (16384, 16384, 8192, 64, 64): (4, 8, 64, 64, 3, 2), + (16384, 16384, 8192, 128, 128): (8, 32, 128, 64, 2, 4), + (16384, 16384, 16384, 16, 16): (1, 2, 16, 256, 1, 4), + (16384, 16384, 16384, 32, 32): (1, 4, 32, 128, 3, 4), + (16384, 16384, 16384, 64, 64): (5, 4, 64, 64, 1, 4), + (16384, 16384, 16384, 128, 128): (4, 8, 128, 64, 2, 4), + (16384, 16384, 32768, 16, 16): (2, 2, 16, 128, 1, 2), + (16384, 16384, 32768, 32, 32): (1, 4, 32, 64, 3, 2), + (16384, 16384, 32768, 64, 64): (5, 4, 64, 64, 1, 4), + (16384, 16384, 32768, 128, 128): (5, 8, 128, 64, 2, 4), + (16384, 16384, 65536, 16, 16): (8, 2, 16, 128, 1, 2), + (16384, 16384, 65536, 32, 32): (6, 4, 32, 64, 5, 2), + (16384, 16384, 65536, 64, 64): (2, 4, 64, 64, 1, 4), + (16384, 16384, 65536, 128, 128): (4, 8, 128, 64, 2, 4), + (16384, 16384, 131072, 16, 16): (3, 1, 16, 128, 1, 2), + (16384, 16384, 131072, 32, 32): (1, 4, 32, 64, 3, 2), + (16384, 16384, 131072, 64, 64): (4, 4, 64, 64, 1, 4), + (16384, 16384, 131072, 128, 128): (1, 8, 128, 64, 2, 4), + (32768, 32768, 256, 16, 16): (4, 16, 16, 16, 1, 4), + (32768, 32768, 512, 16, 16): (4, 2, 16, 128, 1, 2), + (32768, 32768, 1024, 16, 16): (3, 2, 16, 128, 1, 2), + (32768, 32768, 2048, 16, 16): (4, 2, 16, 128, 1, 2), + (32768, 32768, 4096, 16, 16): (5, 4, 16, 64, 1, 1), + (32768, 32768, 8192, 16, 16): (4, 4, 16, 64, 1, 1), + (32768, 32768, 16384, 16, 16): (4, 4, 16, 64, 1, 1), + (32768, 32768, 32768, 16, 16): (5, 4, 16, 64, 1, 1), + }, + ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.float32, 0.5)): { + (256, 256, 256, 16, 16): (1, 1, 16, 16, 1, 8), + (256, 256, 256, 32, 32): (1, 1, 16, 16, 1, 4), + (256, 256, 256, 64, 64): (1, 1, 16, 16, 1, 4), + (256, 256, 256, 128, 128): (1, 1, 16, 16, 1, 1), + (256, 256, 512, 16, 16): (1, 1, 16, 16, 1, 4), + (256, 256, 512, 32, 32): (1, 16, 16, 16, 1, 1), + (256, 256, 512, 64, 64): (1, 1, 16, 16, 1, 1), + (256, 256, 512, 128, 128): (1, 1, 32, 32, 1, 4), + (256, 256, 1024, 16, 16): (1, 1, 16, 32, 1, 2), + (256, 256, 1024, 32, 32): (1, 4, 16, 16, 1, 1), + (256, 256, 1024, 64, 64): (1, 1, 32, 32, 1, 4), + (256, 256, 1024, 128, 128): (1, 1, 32, 32, 1, 4), + (256, 256, 2048, 16, 16): (1, 2, 16, 32, 1, 2), + (256, 256, 2048, 32, 32): (1, 1, 16, 32, 1, 2), + (256, 256, 2048, 64, 64): (2, 1, 16, 32, 1, 2), + (256, 256, 2048, 128, 128): (1, 1, 16, 16, 1, 1), + (256, 256, 4096, 16, 16): (1, 1, 16, 32, 1, 2), + (256, 256, 4096, 32, 32): (1, 1, 16, 32, 1, 2), + (256, 256, 4096, 64, 64): (1, 1, 32, 32, 1, 4), + (256, 256, 4096, 128, 128): (3, 1, 32, 64, 1, 4), + (256, 256, 8192, 16, 16): (1, 32, 16, 64, 1, 2), + (256, 256, 8192, 32, 32): (1, 1, 32, 64, 1, 4), + (256, 256, 8192, 64, 64): (1, 1, 32, 64, 1, 4), + (256, 256, 8192, 128, 128): (2, 1, 64, 32, 1, 4), + (256, 256, 16384, 16, 16): (1, 1, 16, 64, 1, 2), + (256, 256, 16384, 32, 32): (1, 1, 32, 64, 1, 4), + (256, 256, 16384, 64, 64): (1, 128, 64, 64, 1, 4), + (256, 256, 16384, 128, 128): (2, 1, 64, 32, 1, 4), + (256, 256, 32768, 16, 16): (2, 128, 16, 64, 1, 1), + (256, 256, 32768, 32, 32): (1, 1, 32, 64, 1, 4), + (256, 256, 32768, 64, 64): (1, 128, 64, 64, 1, 4), + (256, 256, 32768, 128, 128): (2, 1, 64, 64, 1, 4), + (256, 256, 65536, 16, 16): (1, 1, 16, 64, 1, 2), + (256, 256, 65536, 32, 32): (1, 1, 32, 64, 1, 4), + (256, 256, 65536, 64, 64): (2, 1, 64, 64, 1, 4), + (256, 256, 65536, 128, 128): (1, 1, 128, 32, 1, 4), + (256, 256, 131072, 16, 16): (3, 128, 16, 64, 1, 1), + (256, 256, 131072, 32, 32): (1, 1, 32, 64, 1, 4), + (256, 256, 131072, 64, 64): (2, 1, 64, 64, 1, 4), + (256, 256, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), + (512, 512, 256, 16, 16): (1, 2, 16, 16, 1, 1), + (512, 512, 256, 32, 32): (1, 4, 16, 16, 1, 1), + (512, 512, 256, 64, 64): (1, 16, 16, 16, 1, 1), + (512, 512, 256, 128, 128): (1, 1, 16, 32, 1, 4), + (512, 512, 512, 16, 16): (1, 8, 16, 32, 1, 2), + (512, 512, 512, 32, 32): (1, 8, 16, 32, 1, 2), + (512, 512, 512, 64, 64): (1, 2, 16, 32, 1, 2), + (512, 512, 512, 128, 128): (1, 1, 32, 32, 1, 4), + (512, 512, 1024, 16, 16): (1, 1, 16, 32, 1, 2), + (512, 512, 1024, 32, 32): (1, 1, 16, 32, 1, 2), + (512, 512, 1024, 64, 64): (1, 1, 16, 32, 1, 2), + (512, 512, 1024, 128, 128): (1, 1, 64, 32, 1, 4), + (512, 512, 2048, 16, 16): (1, 16, 16, 64, 1, 2), + (512, 512, 2048, 32, 32): (1, 1, 32, 32, 1, 4), + (512, 512, 2048, 64, 64): (1, 1, 32, 32, 1, 4), + (512, 512, 2048, 128, 128): (2, 1, 32, 32, 1, 4), + (512, 512, 4096, 16, 16): (2, 64, 16, 64, 1, 1), + (512, 512, 4096, 32, 32): (1, 64, 32, 64, 1, 4), + (512, 512, 4096, 64, 64): (1, 1, 32, 32, 1, 4), + (512, 512, 4096, 128, 128): (1, 1, 64, 32, 1, 4), + (512, 512, 8192, 16, 16): (2, 64, 16, 64, 1, 1), + (512, 512, 8192, 32, 32): (1, 256, 32, 32, 1, 1), + (512, 512, 8192, 64, 64): (1, 64, 64, 64, 1, 4), + (512, 512, 8192, 128, 128): (2, 1, 64, 32, 1, 8), + (512, 512, 16384, 16, 16): (2, 64, 16, 64, 1, 1), + (512, 512, 16384, 32, 32): (1, 128, 32, 32, 1, 1), + (512, 512, 16384, 64, 64): (1, 64, 64, 64, 1, 4), + (512, 512, 16384, 128, 128): (3, 1, 64, 32, 1, 8), + (512, 512, 32768, 16, 16): (2, 64, 16, 64, 1, 1), + (512, 512, 32768, 32, 32): (1, 128, 32, 32, 1, 1), + (512, 512, 32768, 64, 64): (1, 64, 64, 64, 1, 4), + (512, 512, 32768, 128, 128): (2, 1, 64, 32, 1, 8), + (512, 512, 65536, 16, 16): (2, 32, 16, 64, 1, 1), + (512, 512, 65536, 32, 32): (1, 128, 32, 32, 1, 1), + (512, 512, 65536, 64, 64): (1, 64, 64, 64, 1, 4), + (512, 512, 65536, 128, 128): (2, 1, 64, 32, 1, 8), + (512, 512, 131072, 16, 16): (2, 32, 16, 64, 1, 1), + (512, 512, 131072, 32, 32): (1, 128, 32, 32, 1, 1), + (512, 512, 131072, 64, 64): (3, 64, 64, 64, 1, 4), + (512, 512, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), + (1024, 1024, 256, 16, 16): (1, 4, 16, 32, 1, 2), + (1024, 1024, 256, 32, 32): (1, 4, 16, 32, 1, 2), + (1024, 1024, 256, 64, 64): (1, 1, 16, 32, 1, 2), + (1024, 1024, 256, 128, 128): (1, 1, 16, 16, 1, 1), + (1024, 1024, 512, 16, 16): (1, 8, 16, 32, 1, 2), + (1024, 1024, 512, 32, 32): (1, 8, 16, 32, 1, 1), + (1024, 1024, 512, 64, 64): (1, 8, 32, 32, 1, 4), + (1024, 1024, 512, 128, 128): (2, 1, 32, 32, 1, 4), + (1024, 1024, 1024, 16, 16): (1, 16, 16, 32, 1, 2), + (1024, 1024, 1024, 32, 32): (1, 16, 32, 64, 1, 4), + (1024, 1024, 1024, 64, 64): (1, 16, 32, 64, 1, 4), + (1024, 1024, 1024, 128, 128): (1, 1, 32, 32, 1, 4), + (1024, 1024, 2048, 16, 16): (2, 32, 16, 64, 1, 1), + (1024, 1024, 2048, 32, 32): (1, 32, 32, 64, 1, 4), + (1024, 1024, 2048, 64, 64): (1, 32, 64, 64, 1, 4), + (1024, 1024, 2048, 128, 128): (1, 1, 32, 64, 1, 4), + (1024, 1024, 4096, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 4096, 32, 32): (1, 64, 32, 32, 1, 1), + (1024, 1024, 4096, 64, 64): (1, 64, 64, 64, 1, 4), + (1024, 1024, 4096, 128, 128): (2, 64, 64, 32, 1, 8), + (1024, 1024, 8192, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 8192, 32, 32): (1, 64, 32, 32, 1, 1), + (1024, 1024, 8192, 64, 64): (1, 64, 64, 64, 1, 4), + (1024, 1024, 8192, 128, 128): (4, 1, 32, 64, 1, 4), + (1024, 1024, 16384, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 16384, 32, 32): (1, 64, 32, 32, 1, 1), + (1024, 1024, 16384, 64, 64): (1, 32, 64, 64, 1, 4), + (1024, 1024, 16384, 128, 128): (2, 64, 64, 32, 1, 4), + (1024, 1024, 32768, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 32768, 32, 32): (1, 64, 32, 32, 1, 1), + (1024, 1024, 32768, 64, 64): (1, 32, 64, 64, 1, 4), + (1024, 1024, 32768, 128, 128): (4, 1, 32, 64, 1, 4), + (1024, 1024, 65536, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 65536, 32, 32): (1, 32, 32, 32, 1, 1), + (1024, 1024, 65536, 64, 64): (2, 32, 64, 64, 1, 4), + (1024, 1024, 65536, 128, 128): (4, 1, 64, 32, 1, 4), + (1024, 1024, 131072, 16, 16): (2, 16, 16, 64, 1, 1), + (1024, 1024, 131072, 32, 32): (1, 32, 32, 32, 1, 1), + (1024, 1024, 131072, 64, 64): (1, 16, 64, 64, 1, 4), + (1024, 1024, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), + (2048, 2048, 256, 16, 16): (1, 4, 16, 32, 1, 2), + (2048, 2048, 256, 32, 32): (1, 8, 16, 32, 1, 1), + (2048, 2048, 256, 64, 64): (1, 8, 32, 32, 1, 4), + (2048, 2048, 256, 128, 128): (1, 4, 64, 64, 1, 8), + (2048, 2048, 512, 16, 16): (2, 8, 16, 32, 1, 2), + (2048, 2048, 512, 32, 32): (2, 8, 32, 64, 1, 4), + (2048, 2048, 512, 64, 64): (2, 4, 64, 64, 1, 4), + (2048, 2048, 512, 128, 128): (1, 8, 32, 64, 1, 4), + (2048, 2048, 1024, 16, 16): (2, 16, 16, 64, 3, 1), + (2048, 2048, 1024, 32, 32): (1, 32, 32, 32, 1, 1), + (2048, 2048, 1024, 64, 64): (1, 16, 64, 64, 1, 4), + (2048, 2048, 1024, 128, 128): (2, 4, 64, 64, 1, 8), + (2048, 2048, 2048, 16, 16): (2, 16, 16, 64, 1, 1), + (2048, 2048, 2048, 32, 32): (1, 32, 32, 32, 1, 1), + (2048, 2048, 2048, 64, 64): (1, 16, 64, 64, 1, 4), + (2048, 2048, 2048, 128, 128): (2, 32, 32, 64, 1, 4), + (2048, 2048, 4096, 16, 16): (3, 2, 16, 64, 1, 1), + (2048, 2048, 4096, 32, 32): (3, 4, 32, 32, 1, 1), + (2048, 2048, 4096, 64, 64): (1, 16, 64, 64, 1, 4), + (2048, 2048, 4096, 128, 128): (2, 32, 64, 32, 1, 4), + (2048, 2048, 8192, 16, 16): (3, 4, 16, 64, 1, 1), + (2048, 2048, 8192, 32, 32): (2, 4, 32, 32, 1, 1), + (2048, 2048, 8192, 64, 64): (2, 32, 64, 32, 1, 2), + (2048, 2048, 8192, 128, 128): (4, 1, 32, 64, 1, 4), + (2048, 2048, 16384, 16, 16): (3, 4, 16, 64, 1, 1), + (2048, 2048, 16384, 32, 32): (1, 4, 32, 32, 1, 1), + (2048, 2048, 16384, 64, 64): (2, 8, 64, 32, 1, 2), + (2048, 2048, 16384, 128, 128): (2, 8, 64, 32, 1, 4), + (2048, 2048, 32768, 16, 16): (2, 4, 16, 64, 1, 1), + (2048, 2048, 32768, 32, 32): (2, 8, 32, 32, 1, 1), + (2048, 2048, 32768, 64, 64): (1, 16, 64, 32, 1, 2), + (2048, 2048, 32768, 128, 128): (4, 1, 32, 64, 1, 4), + (2048, 2048, 65536, 16, 16): (3, 4, 16, 64, 1, 1), + (2048, 2048, 65536, 32, 32): (1, 8, 32, 32, 1, 1), + (2048, 2048, 65536, 64, 64): (1, 8, 64, 32, 1, 2), + (2048, 2048, 65536, 128, 128): (4, 1, 64, 32, 1, 4), + (2048, 2048, 131072, 16, 16): (2, 4, 16, 64, 1, 1), + (2048, 2048, 131072, 32, 32): (1, 8, 32, 32, 1, 1), + (2048, 2048, 131072, 64, 64): (3, 1, 64, 32, 1, 2), + (2048, 2048, 131072, 128, 128): (1, 8192, 128, 16, 1, 8), + (4096, 4096, 256, 16, 16): (2, 4, 16, 32, 1, 2), + (4096, 4096, 256, 32, 32): (1, 4, 32, 64, 1, 4), + (4096, 4096, 256, 64, 64): (1, 4, 64, 64, 1, 4), + (4096, 4096, 256, 128, 128): (1, 4, 32, 64, 1, 4), + (4096, 4096, 512, 16, 16): (2, 8, 16, 64, 3, 1), + (4096, 4096, 512, 32, 32): (2, 16, 32, 32, 1, 1), + (4096, 4096, 512, 64, 64): (1, 8, 64, 64, 1, 4), + (4096, 4096, 512, 128, 128): (1, 8, 32, 64, 1, 4), + (4096, 4096, 1024, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 1024, 32, 32): (1, 16, 32, 32, 1, 1), + (4096, 4096, 1024, 64, 64): (1, 16, 64, 32, 1, 2), + (4096, 4096, 1024, 128, 128): (1, 16, 32, 64, 1, 4), + (4096, 4096, 2048, 16, 16): (1, 16, 16, 64, 3, 1), + (4096, 4096, 2048, 32, 32): (1, 16, 32, 32, 1, 1), + (4096, 4096, 2048, 64, 64): (3, 16, 64, 32, 1, 2), + (4096, 4096, 2048, 128, 128): (4, 8, 32, 64, 1, 4), + (4096, 4096, 4096, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 4096, 32, 32): (1, 1, 32, 32, 1, 1), + (4096, 4096, 4096, 64, 64): (2, 16, 64, 32, 1, 2), + (4096, 4096, 4096, 128, 128): (4, 8, 32, 64, 1, 4), + (4096, 4096, 8192, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 8192, 32, 32): (2, 1, 32, 32, 1, 1), + (4096, 4096, 8192, 64, 64): (1, 16, 64, 32, 1, 2), + (4096, 4096, 8192, 128, 128): (2, 1, 32, 64, 1, 4), + (4096, 4096, 16384, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 16384, 32, 32): (1, 1, 32, 32, 1, 1), + (4096, 4096, 16384, 64, 64): (2, 8, 64, 32, 1, 2), + (4096, 4096, 16384, 128, 128): (2, 1, 32, 64, 1, 4), + (4096, 4096, 32768, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 32768, 32, 32): (1, 1, 32, 32, 1, 1), + (4096, 4096, 32768, 64, 64): (1, 8, 64, 32, 1, 2), + (4096, 4096, 32768, 128, 128): (2, 1, 32, 64, 1, 4), + (4096, 4096, 65536, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 65536, 32, 32): (3, 1, 32, 32, 1, 1), + (4096, 4096, 65536, 64, 64): (3, 4, 64, 32, 1, 2), + (4096, 4096, 65536, 128, 128): (2, 1, 32, 64, 1, 4), + (4096, 4096, 131072, 16, 16): (1, 8, 16, 64, 3, 1), + (4096, 4096, 131072, 32, 32): (1, 1, 32, 32, 1, 1), + (4096, 4096, 131072, 64, 64): (2, 8, 64, 32, 1, 2), + (4096, 4096, 131072, 128, 128): (1, 8192, 128, 16, 1, 8), + (8192, 8192, 256, 16, 16): (2, 4, 16, 64, 3, 1), + (8192, 8192, 256, 32, 32): (1, 8, 32, 32, 1, 1), + (8192, 8192, 256, 64, 64): (1, 4, 64, 64, 1, 4), + (8192, 8192, 256, 128, 128): (1, 4, 32, 64, 1, 4), + (8192, 8192, 512, 16, 16): (1, 4, 16, 64, 3, 1), + (8192, 8192, 512, 32, 32): (1, 16, 32, 32, 1, 1), + (8192, 8192, 512, 64, 64): (2, 4, 64, 64, 1, 4), + (8192, 8192, 512, 128, 128): (2, 1, 32, 64, 1, 4), + (8192, 8192, 1024, 16, 16): (3, 8, 16, 64, 3, 1), + (8192, 8192, 1024, 32, 32): (1, 16, 32, 32, 1, 1), + (8192, 8192, 1024, 64, 64): (1, 8, 64, 32, 1, 2), + (8192, 8192, 1024, 128, 128): (2, 4, 32, 64, 1, 4), + (8192, 8192, 2048, 16, 16): (1, 8, 16, 64, 3, 1), + (8192, 8192, 2048, 32, 32): (1, 16, 32, 32, 1, 1), + (8192, 8192, 2048, 64, 64): (2, 8, 64, 32, 1, 2), + (8192, 8192, 2048, 128, 128): (4, 1, 32, 64, 1, 4), + (8192, 8192, 4096, 16, 16): (1, 8, 16, 64, 3, 1), + (8192, 8192, 4096, 32, 32): (1, 16, 32, 32, 1, 1), + (8192, 8192, 4096, 64, 64): (1, 4, 64, 32, 1, 2), + (8192, 8192, 4096, 128, 128): (3, 1, 32, 64, 1, 4), + (8192, 8192, 8192, 16, 16): (1, 8, 16, 64, 3, 1), + (8192, 8192, 8192, 32, 32): (1, 8, 32, 32, 1, 1), + (8192, 8192, 8192, 64, 64): (1, 8, 64, 32, 1, 2), + (8192, 8192, 8192, 128, 128): (4, 1, 32, 64, 1, 4), + (8192, 8192, 16384, 16, 16): (3, 4, 16, 64, 3, 1), + (8192, 8192, 16384, 32, 32): (1, 8, 32, 32, 1, 1), + (8192, 8192, 16384, 64, 64): (2, 2, 64, 32, 1, 2), + (8192, 8192, 16384, 128, 128): (7, 1, 32, 64, 1, 4), + (8192, 8192, 32768, 16, 16): (1, 4, 16, 64, 3, 1), + (8192, 8192, 32768, 32, 32): (1, 8, 32, 32, 1, 1), + (8192, 8192, 32768, 64, 64): (3, 2, 64, 32, 1, 2), + (8192, 8192, 32768, 128, 128): (6, 1, 32, 64, 1, 4), + (8192, 8192, 65536, 16, 16): (1, 4, 16, 64, 3, 1), + (8192, 8192, 65536, 32, 32): (4, 8, 32, 32, 1, 1), + (8192, 8192, 65536, 64, 64): (1, 2, 64, 32, 1, 2), + (8192, 8192, 65536, 128, 128): (4, 1, 32, 64, 1, 4), + (8192, 8192, 131072, 16, 16): (1, 4, 16, 64, 3, 1), + (8192, 8192, 131072, 32, 32): (1, 8, 32, 32, 1, 1), + (8192, 8192, 131072, 64, 64): (5, 4, 64, 32, 1, 2), + (8192, 8192, 131072, 128, 128): (1, 4096, 128, 16, 1, 8), + (16384, 16384, 256, 16, 16): (1, 4, 16, 64, 3, 1), + (16384, 16384, 256, 32, 32): (1, 8, 32, 32, 1, 1), + (16384, 16384, 256, 64, 64): (1, 4, 64, 32, 1, 2), + (16384, 16384, 256, 128, 128): (1, 4, 32, 64, 1, 4), + (16384, 16384, 512, 16, 16): (1, 8, 16, 64, 3, 1), + (16384, 16384, 512, 32, 32): (1, 16, 32, 32, 1, 1), + (16384, 16384, 512, 64, 64): (1, 4, 64, 32, 1, 2), + (16384, 16384, 512, 128, 128): (3, 1, 32, 64, 1, 4), + (16384, 16384, 1024, 16, 16): (1, 8, 16, 64, 3, 1), + (16384, 16384, 1024, 32, 32): (1, 16, 32, 32, 1, 1), + (16384, 16384, 1024, 64, 64): (2, 4, 64, 32, 1, 2), + (16384, 16384, 1024, 128, 128): (1, 2, 32, 64, 1, 4), + (16384, 16384, 2048, 16, 16): (1, 4, 16, 64, 3, 1), + (16384, 16384, 2048, 32, 32): (1, 16, 32, 32, 1, 1), + (16384, 16384, 2048, 64, 64): (3, 4, 64, 32, 1, 2), + (16384, 16384, 2048, 128, 128): (2, 1, 32, 64, 1, 4), + (16384, 16384, 4096, 16, 16): (4, 8, 16, 64, 3, 1), + (16384, 16384, 4096, 32, 32): (5, 16, 32, 32, 1, 1), + (16384, 16384, 4096, 64, 64): (3, 2, 64, 32, 1, 2), + (16384, 16384, 4096, 128, 128): (2, 1, 32, 64, 1, 4), + (16384, 16384, 8192, 16, 16): (1, 4, 16, 64, 3, 1), + (16384, 16384, 8192, 32, 32): (1, 4, 32, 32, 1, 1), + (16384, 16384, 8192, 64, 64): (1, 2, 64, 32, 1, 2), + (16384, 16384, 8192, 128, 128): (2, 1, 32, 64, 1, 4), + (16384, 16384, 16384, 16, 16): (1, 8, 16, 64, 3, 1), + (16384, 16384, 16384, 32, 32): (1, 4, 32, 32, 1, 1), + (16384, 16384, 16384, 64, 64): (1, 2, 64, 32, 1, 2), + (16384, 16384, 16384, 128, 128): (3, 1, 32, 64, 1, 4), + (16384, 16384, 32768, 16, 16): (1, 4, 16, 64, 3, 1), + (16384, 16384, 32768, 32, 32): (1, 2, 32, 32, 1, 1), + (16384, 16384, 32768, 64, 64): (3, 2, 64, 32, 1, 2), + (16384, 16384, 32768, 128, 128): (3, 1, 32, 64, 1, 4), + (16384, 16384, 65536, 16, 16): (1, 8, 16, 64, 3, 1), + (16384, 16384, 65536, 32, 32): (1, 4, 32, 32, 1, 1), + (16384, 16384, 65536, 64, 64): (4, 4, 64, 32, 1, 2), + (16384, 16384, 65536, 128, 128): (5, 1, 32, 64, 1, 4), + (16384, 16384, 131072, 16, 16): (1, 2, 16, 64, 3, 1), + (16384, 16384, 131072, 32, 32): (1, 4, 32, 32, 1, 1), + (16384, 16384, 131072, 64, 64): (1, 2, 64, 32, 1, 2), + (16384, 16384, 131072, 128, 128): (1, 4096, 128, 16, 1, 8), + }, + # END GENERATED DATA +} + +if __name__ == "__main__": + for dtype in [torch.float16, torch.bfloat16, torch.float32]: + for op in ["bsr_dense_addmm"]: + main(op=op, force=False, dtype=dtype) diff --git a/venv/lib/python3.10/site-packages/torch/sparse/semi_structured.py b/venv/lib/python3.10/site-packages/torch/sparse/semi_structured.py new file mode 100644 index 0000000000000000000000000000000000000000..03c15c0eee518ddaccbe4fb99302aa72c829756c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/semi_structured.py @@ -0,0 +1,518 @@ +import warnings +from collections import namedtuple +from typing import Any, Optional, Tuple, List, Callable, Dict + +import torch +from torch.sparse._semi_structured_conversions import ( + sparse_semi_structured_from_dense_cutlass, + sparse_semi_structured_to_dense_cutlass, +) +from torch.sparse._semi_structured_ops import ( + fallback_dispatcher, + semi_sparse_values, + semi_sparse_indices, + semi_sparse_detach, + semi_sparse_t, + semi_sparse_view, + semi_sparse_mm, + semi_sparse_addmm, + semi_sparse_linear, +) + +__all__ = [ + "SparseSemiStructuredTensor", + "SparseSemiStructuredTensorCUTLASS", + "SparseSemiStructuredTensorCUSPARSELT", + "to_sparse_semi_structured", +] + +_SEMI_STRUCTURED_SPARSE_CONFIG = namedtuple( + "_SEMI_STRUCTURED_SPARSE_CONFIG", + "sparse_min_rows sparse_min_cols dense_min_rows dense_min_cols", +) + + +class SparseSemiStructuredTensor(torch.Tensor): + """ + This class implementes semi-structured sparsity as a Tensor subclass. + + Semi-structured sparsity describes a sparsity pattern where n in every 2n elements are sparse, + depending on the datatype. It is also referred to as 2:4 sparsity or fine-grained + structured sparsity. + + There are two backends available for semi_structred sparsity, either cuSPARSELt or CUTLASS. + This class is meant to serve as a base class for both implementations. SparseSemiStructuredCUTLASS + and SparseSemiStructuredCUSPARSELT both inherit from this class and define three backend-specific items. + Note that as such, this class cannot be insantiated directly. + + -`_DTYPE_SHAPE_CONSTRAINTS` - A dictionary holding backend specific dense/sparse min shape constraints + - `def from_dense()` - backend specific compression routines + - `def _mm()` - backend specifc mm op (either torch._cslt_sparse_mm or torch._sparse_semi_structured_linear) + """ + + _DEFAULT_ALG_ID: int = 0 + _DTYPE_SHAPE_CONSTRAINTS: Dict[torch.dtype, _SEMI_STRUCTURED_SPARSE_CONFIG] + _FORCE_CUTLASS: bool = True + _FUSE_TRANSPOSE: bool = False + _PROTOTYPE_WARNING_SHOWN: bool = False + + SPARSE_DISPATCH: Dict[Callable, Callable] + + packed: Optional[torch.Tensor] + meta: Optional[torch.Tensor] + packed_t: Optional[torch.Tensor] + meta_t: Optional[torch.Tensor] + threads_masks: Optional[torch.Tensor] + fuse_transpose_cusparselt: bool + alg_id_cusparselt: int + + __slots__ = ["packed", "meta", "packed_t", "meta_t", "threads_masks"] + + @staticmethod + def __new__( # noqa: PYI034 + cls, + shape: torch.Size, + packed: Optional[torch.Tensor], + meta: Optional[torch.Tensor], + packed_t: Optional[torch.Tensor], + meta_t: Optional[torch.Tensor], + threads_masks: Optional[torch.Tensor], + fuse_transpose_cusparselt: bool = False, + alg_id_cusparselt: int = 0, + requires_grad: bool = False, + ): + """ + Create a new instance of the tensor subclass from the compressed sparse representation. + + We have the option to create the subclass with the compressed representations of both X and X', for training. + For inference, we only need a single representation (either X or X'), while the corresponding other set will be None. + + Depending on the backend selected, certain fields will be set to None. (CUSPARSELT vs CUTLASS) + + Args: + shape: The shape of the original dense tensor + packed: The compressed representation of the original dense tensor + meta: The metadata of the original dense tensor, if it is stored separately + packed_t: The compressed representation of the transposed original dense tensor + meta_t: The metadata of the transposed original dense tensor, if it is stored separately + threads_masks: The masks used by the CUTLASS backend to determine which threads should participate in the computation. + Used for pointwise ops. + fuse_transpose_cusparselt: When running with cuSPARSELt, we have the option to fuse a transposition + with a matmul, which is useful in the case of 2:4 sparse training. + alg_id_cusparselt: The algorithm id to use when using cuSPARSELT, will have effect on performance + + Returns: + torch.Tensor: A torch.Tensor wrapper subclass. + + Raises: + ValueError: If all of the tensor arguments are None. + """ + if not cls._PROTOTYPE_WARNING_SHOWN: + warnings.warn( + ( + "The PyTorch API of SparseSemiStructuredTensor is in prototype stage " + "and will change in the near future. Please open a Github issue " + "for features requests and see our documentation on the torch.sparse " + "module for further information about the project." + ), + UserWarning, + ) + cls._PROTOTYPE_WARNING_SHOWN = True + + # Because this only runs onces, we also load the dispatch table here as well. + # We can't define the dispatch table explicitly because of torch.ops import errors, so we do this instead + # But this is useful since it allows users to overload the dispatch table for debugging / testing. + cls._load_dispatch_table() + + if packed is not None: + previous_tensor = packed + elif packed_t is not None: + previous_tensor = packed_t + else: + raise ValueError("At least one of packed or packed_t must be provided") + + kwargs = { + "device": previous_tensor.device, + "dtype": previous_tensor.dtype, + "layout": previous_tensor.layout, + "requires_grad": requires_grad, + } + tensor = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] + + tensor.packed = packed + tensor.meta = meta + tensor.packed_t = packed_t + tensor.meta_t = meta_t + tensor.threads_masks = threads_masks + tensor.fuse_transpose_cusparselt = fuse_transpose_cusparselt + tensor.alg_id_cusparselt = alg_id_cusparselt + return tensor + + def __repr__(self) -> str: # type: ignore[override] + assert hasattr(self, "shape") + return f"{self.__class__.__name__}(shape={self.shape})" + + def __tensor_flatten__( + self, + ) -> Tuple[List[str], Tuple[torch.Size, bool, int, bool]]: + inner_tensors = list( + filter(lambda x: getattr(self, x) is not None, self.__slots__) + ) + tensor_meta = ( + self.shape, + self.fuse_transpose_cusparselt, + self.alg_id_cusparselt, + self.requires_grad, + ) + return inner_tensors, tensor_meta + + @classmethod + def __tensor_unflatten__( + cls, + inner_tensors, + tensor_meta : Tuple[torch.Size, bool, int, bool], + outer_size, + outer_stride, + ) -> torch.Tensor: + shape, fuse_transpose_cusparselt, alg_id_cusparselt, requires_grad = tensor_meta + return cls( + shape=shape, + packed=inner_tensors.get("packed", None), + meta=inner_tensors.get("meta", None), + packed_t=inner_tensors.get("packed_t", None), + meta_t=inner_tensors.get("meta_t", None), + threads_masks=inner_tensors.get("threads_masks", None), + fuse_transpose_cusparselt=fuse_transpose_cusparselt, + alg_id_cusparselt=alg_id_cusparselt, + requires_grad=requires_grad, + ) + + __torch_function__ = torch._C._disabled_torch_function_impl + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs) -> Any: + if func._overloadpacket not in cls.SPARSE_DISPATCH: + raise NotImplementedError( + f"{cls.__name__} only supports a specific set of operations, " + f"can't perform requested op ({func.__name__})" + ) + return cls.SPARSE_DISPATCH[func._overloadpacket](func, types, args, kwargs) + + @classmethod + def _load_dispatch_table(cls, custom_dispatch_table=None) -> None: + """ + Loads the op overload sparse dispatch table for the current class. + """ + if getattr(cls, "SPARSE_DISPATCH", None) is None: + cls.SPARSE_DISPATCH = { + torch.ops.aten.values: semi_sparse_values, + torch.ops.aten.indices: semi_sparse_indices, + torch.ops.aten.is_same_size: fallback_dispatcher, + torch.ops.aten.detach_: fallback_dispatcher, + torch.ops.aten.detach: semi_sparse_detach, + torch.ops.aten.t: semi_sparse_t, + torch.ops.aten.view: semi_sparse_view, + torch.ops.aten.mm: semi_sparse_mm, + torch.ops.aten.matmul: semi_sparse_mm, + torch.ops.aten.addmm: semi_sparse_addmm, + torch.ops.aten.linear: semi_sparse_linear, + } + if custom_dispatch_table is not None: + cls.SPARSE_DISPATCH.update(custom_dispatch_table) + + @classmethod + def _validate_device_dim_dtype_shape(cls, original_tensor : torch.Tensor) -> None: + """ + Assert that the given tensor is valid for semi-structured sparse compression. + """ + # check device + if not original_tensor.is_cuda: + raise RuntimeError( + f"Error original_tensor.device= {original_tensor.device} is not supported! " + "Only CUDA tensors are currently supported." + ) + + # check dim + if original_tensor.dim() != 2: + raise RuntimeError( + f"Error original_tensor.dim = {original_tensor.dim()} is not supported! " + "Only 2d tensors are currently supported." + ) + + # check contiguous + if not original_tensor.is_contiguous(): + raise RuntimeError( + "Error original_tensor is not contiguous!" + "Only contiguous tensors are currently supported." + ) + + # check dtype + if original_tensor.dtype not in cls._DTYPE_SHAPE_CONSTRAINTS: + raise RuntimeError( + f"Error original_tensor.dtype {original_tensor.dtype} is not a supported dtype! " + "dtype must be one of: {cls._DTYPE_SHAPE_CONSTRAINTS}" + ) + + # check shape + m, n = original_tensor.shape + min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_rows + min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_cols + if m < min_rows or m % min_rows or n < min_cols or n % min_cols: + # TODO in the future we can add in padding to support sparse dimensions that aren't perfect multiples + raise RuntimeError( + f"Error original_tensor.shape {original_tensor.shape} is not supported! " + f"Both dimensions must be larger or equal than and a multiple of ({min_rows}, {min_cols})" + ) + + @classmethod + def _pad_dense_input(cls, dense_input: torch.Tensor) -> torch.Tensor: + """ + Calculates padding for dense tensor and pads tensor if necessary. + If padding is not required, this function returns the original tensor. + """ + # only 2d matmul + assert dense_input.dim() == 2 + + # check shape + m, n = dense_input.shape + min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_rows + min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_cols + + # calculate padding + to_pad_m = -m % min_rows if m < min_rows or m % min_rows else 0 + to_pad_n = -n % min_cols if n < min_cols or n % min_rows else 0 + if to_pad_m or to_pad_n: + return torch.nn.functional.pad(dense_input, (0, to_pad_n, 0, to_pad_m)) + else: + return dense_input + + def to_dense(self): + col = self.shape[-1] + return torch.mm(self, torch.eye(col, dtype=self.dtype, device=self.device)) + + @classmethod + def from_dense(cls, original_tensor : torch.Tensor) -> "SparseSemiStructuredTensor": + raise NotImplementedError + + def _mm( + self, + B: torch.Tensor, + *, + bias: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + raise NotImplementedError + + +def to_sparse_semi_structured( + original_tensor: torch.Tensor, + transposed: bool = False, +) -> SparseSemiStructuredTensor: + """ + This function converts a dense tensor into a sparse semi-structured tensor. + It will return a SparseSemiStructuredTensor, a subclass of torch.Tensor. + + This function will check to ensure the dense tensor has the right dtype, size, dims, and device. + We currently only support semi-structured sparse tensors for 2d CUDA tensors. + Additionally, your tensor must be a positive multiple of the mininum sparse block size, given in + `_DTYPE_TO_SHAPE_CONSTRAINTS` for each dtype (float32, float16, bfloat16, int8). + + Args: + original_tensor (Tensor): the dense tensor to convert + transposed (bool, optional): deprecated arg to be removed in another release. Do not use. + Returns: + SparseSemiStructuredTensor: A sparse semi-structured tensor created from the given original_tensor + Raises: + None + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> A = torch.Tensor([0, 0, 1, 1]).tile((128, 32)).half().cuda() + tensor([[0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + ..., + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.]], device='cuda:0', dtype=torch.float16) + >>> A_sparse = to_sparse_semi_structured(A) + SparseSemiStructuredTensor(shape=torch.Size([128, 128])) + >>> A_sparse.values() + tensor([[1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + ..., + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.]], device='cuda:0', dtype=torch.float16), + >>> A_sparse.indices() + tensor([[-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + ..., + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370]], device='cuda:0', dtype=torch.int16)) + """ + if transposed: + raise DeprecationWarning( + "Setting transpose from to_sparse_semi_structured is deprecated and will be removed in a future release." + "SparseSemiStructuredTensor only support contiguous input tensors. " + ) + + sparse_subclass = ( + torch.sparse.SparseSemiStructuredTensorCUTLASS + if SparseSemiStructuredTensor._FORCE_CUTLASS + else torch.sparse.SparseSemiStructuredTensorCUSPARSELT + ) + return sparse_subclass.from_dense(original_tensor) + + +class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor): + """ + This class implements semi-structured sparsity for the CUTLASS backend. + + In this implementation, the specified elements and metadata are stored seprately, + in packed and meta respectively. + + When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_linear + and sparse_semi_structured_from_dense for conversion to the compressed format. + """ + + _DTYPE_SHAPE_CONSTRAINTS = { + torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 128, 16, 16), + torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8), + torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8), + torch.float32: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 4, 4), + } + + @classmethod + def from_dense( + cls, original_tensor: torch.Tensor + ) -> "SparseSemiStructuredTensorCUTLASS": + cls._validate_device_dim_dtype_shape(original_tensor) + ( + sparse_tensor_cutlass, + meta_tensor_cutlass, + ) = sparse_semi_structured_from_dense_cutlass(original_tensor) + return cls( + original_tensor.shape, + packed=sparse_tensor_cutlass, + meta=meta_tensor_cutlass, + packed_t=None, + meta_t=None, + threads_masks=None, + requires_grad=original_tensor.requires_grad, + ) + + def to_dense(self): + assert self.meta is not None and self.packed is not None + return ( + sparse_semi_structured_to_dense_cutlass( + self.packed, + self.meta, + ) + if self.meta.ndim == 2 + else super().to_dense() + ) + + def _mm( + self, + B: torch.Tensor, + *, + bias: Optional[torch.Tensor] = None, + **kwargs + ) -> torch.Tensor: + if isinstance(B, SparseSemiStructuredTensor): + raise ValueError( + "`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware" + ) + cls_name = self.__class__.__name__ + if self.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + f"`{cls_name}` matmul: Broadcasting is not implemented" + ) + if self.packed is None or self.meta is None: + raise NotImplementedError( + f"`{cls_name}` matmul: operation is not supported" + ) + else: + res = torch._sparse_semi_structured_linear( + B.t(), self.packed, self.meta, bias=bias + ).t() + return res[: self.shape[0]] + + +class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor): + """ + The cuSPARSELt backend expects the specified elements and the metadata to be stored in a single tensor: + packed = [ specified elements of original tensor | metadata ] + For an original tensor of size (m, k) we expect the first m * k // 2 elements to be the kept elements + The rest of the tensor is metadata. Since there is only one tensor, we only use the packed and packed_t + attributes respectively. + + cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well + as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes. + """ + + _DTYPE_SHAPE_CONSTRAINTS = { + torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16), + torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8), + torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8), + torch.float32: _SEMI_STRUCTURED_SPARSE_CONFIG(8, 8, 4, 4), + } + + @classmethod + def from_dense(cls, original_tensor : torch.Tensor) -> "SparseSemiStructuredTensorCUSPARSELT": + cls._validate_device_dim_dtype_shape(original_tensor) + return cls( + shape=original_tensor.shape, + packed=torch._cslt_compress(original_tensor), + meta=None, + packed_t=None, + meta_t=None, + threads_masks=None, + fuse_transpose_cusparselt=SparseSemiStructuredTensor._FUSE_TRANSPOSE, + alg_id_cusparselt=SparseSemiStructuredTensor._DEFAULT_ALG_ID, + requires_grad=original_tensor.requires_grad, + ) + + def _mm( + self, + B: torch.Tensor, + *, + bias: Optional[torch.Tensor] = None, + **kwargs + ) -> torch.Tensor: + if isinstance(B, SparseSemiStructuredTensor): + raise ValueError( + "`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware" + ) + if self.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: Broadcasting is not implemented" + ) + if B.dtype != self.dtype: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, " + f"with A.dtype={self.dtype} and B.dtype={B.dtype}. " + "This operation is only supported when A and B have the same data type." + ) + if bias is not None and bias.dtype != self.dtype: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)} + C`, " + "with A.dtype=B.dtype={self.dtype} and C.dtype={B.dtype}. " + "This operation is only supported when A, B and C have the same data type." + ) + if self.packed is None: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: operation is not supported" + ) + else: + res = torch._cslt_sparse_mm( + self.packed, + B, + bias=bias, + transpose_result=self.fuse_transpose_cusparselt, + alg_id=self.alg_id_cusparselt, + ) + return res.t() if self.fuse_transpose_cusparselt else res