applied-ai-018 commited on
Commit
a618689
·
verified ·
1 Parent(s): 590f3ca

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/16.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/_dynamo/__init__.py +96 -0
  6. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/codegen.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/source.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/testing.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/utils.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py +120 -0
  33. venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py +250 -0
  34. venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py +1114 -0
  35. venv/lib/python3.10/site-packages/torch/_dynamo/cache_size.py +172 -0
  36. venv/lib/python3.10/site-packages/torch/_dynamo/callback.py +82 -0
  37. venv/lib/python3.10/site-packages/torch/_dynamo/code_context.py +29 -0
  38. venv/lib/python3.10/site-packages/torch/_dynamo/codegen.py +398 -0
  39. venv/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py +280 -0
  40. venv/lib/python3.10/site-packages/torch/_dynamo/comptime.py +373 -0
  41. venv/lib/python3.10/site-packages/torch/_dynamo/config.py +423 -0
  42. venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py +924 -0
  43. venv/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py +23 -0
  44. venv/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py +802 -0
  45. venv/lib/python3.10/site-packages/torch/_dynamo/decorators.py +347 -0
  46. venv/lib/python3.10/site-packages/torch/_dynamo/device_interface.py +199 -0
  47. venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py +1561 -0
  48. venv/lib/python3.10/site-packages/torch/_dynamo/exc.py +335 -0
  49. venv/lib/python3.10/site-packages/torch/_dynamo/external_utils.py +103 -0
  50. venv/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py +57 -0
ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f203f4a2aafa1ea7c74d256be45c559177c572601dc4749c187d297a1ec66f0
3
+ size 16778411
ckpts/universal/global_step120/zero/16.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301e1add3c4acd1ed87b54a117b1235368f80b5dd37d35c9bf39295de9e13318
3
+ size 16778317
ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ca0c5fdd534e75e8973663c4bfdae3b96b2122d129c2b197f95ef0ea9f183da
3
+ size 9372
ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:566fefd513470ca286b9454e42c6c19724e407e6964316c14c091a0c02dac226
3
+ size 9293
venv/lib/python3.10/site-packages/torch/_dynamo/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import convert_frame, eval_frame, resume_execution
3
+ from .backends.registry import list_backends, lookup_backend, register_backend
4
+ from .callback import callback_handler, on_compile_end, on_compile_start
5
+ from .code_context import code_context
6
+ from .convert_frame import replay
7
+ from .decorators import (
8
+ allow_in_graph,
9
+ assume_constant_result,
10
+ disable,
11
+ disallow_in_graph,
12
+ forbid_in_graph,
13
+ graph_break,
14
+ mark_dynamic,
15
+ mark_static,
16
+ mark_static_address,
17
+ maybe_mark_dynamic,
18
+ run,
19
+ )
20
+ from .eval_frame import (
21
+ _reset_guarded_backend_cache,
22
+ explain,
23
+ export,
24
+ is_dynamo_supported,
25
+ is_inductor_supported,
26
+ optimize,
27
+ optimize_assert,
28
+ OptimizedModule,
29
+ reset_code,
30
+ )
31
+ from .external_utils import is_compiling
32
+ from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count
33
+
34
+ __all__ = [
35
+ "allow_in_graph",
36
+ "assume_constant_result",
37
+ "disallow_in_graph",
38
+ "forbid_in_graph",
39
+ "graph_break",
40
+ "mark_dynamic",
41
+ "maybe_mark_dynamic",
42
+ "mark_static",
43
+ "mark_static_address",
44
+ "optimize",
45
+ "optimize_assert",
46
+ "export",
47
+ "explain",
48
+ "run",
49
+ "replay",
50
+ "disable",
51
+ "reset",
52
+ "OptimizedModule",
53
+ "is_compiling",
54
+ "register_backend",
55
+ "list_backends",
56
+ "lookup_backend",
57
+ ]
58
+
59
+ if torch.manual_seed is torch.random.manual_seed:
60
+ import torch.jit._builtins
61
+
62
+ # Wrap manual_seed with the disable decorator.
63
+ # Can't do it at its implementation due to dependency issues.
64
+ torch.manual_seed = disable(torch.manual_seed)
65
+ # Add the new manual_seed to the builtin registry.
66
+ torch.jit._builtins._register_builtin(torch.manual_seed, "aten::manual_seed")
67
+
68
+
69
+ def reset() -> None:
70
+ """Clear all compile caches and restore initial state"""
71
+ with convert_frame.compile_lock:
72
+ reset_code_caches()
73
+ convert_frame.input_codes.clear()
74
+ convert_frame.output_codes.clear()
75
+ orig_code_map.clear()
76
+ guard_failures.clear()
77
+ graph_break_reasons.clear()
78
+ resume_execution.ContinueExecutionCache.cache.clear()
79
+ _reset_guarded_backend_cache()
80
+ reset_frame_count()
81
+ torch._C._dynamo.compiled_autograd.clear_cache()
82
+ convert_frame.FRAME_COUNTER = 0
83
+ convert_frame.FRAME_COMPILE_COUNTER.clear()
84
+ callback_handler.clear()
85
+
86
+
87
+ def reset_code_caches() -> None:
88
+ """Clear compile caches that are keyed by code objects"""
89
+ with convert_frame.compile_lock:
90
+ for weak_code in (
91
+ convert_frame.input_codes.seen + convert_frame.output_codes.seen
92
+ ):
93
+ code = weak_code()
94
+ if code:
95
+ reset_code(code)
96
+ code_context.clear()
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc ADDED
Binary file (31.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/codegen.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc ADDED
Binary file (9.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc ADDED
Binary file (638 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (658 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc ADDED
Binary file (48.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc ADDED
Binary file (4.21 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/source.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc ADDED
Binary file (73.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/testing.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/utils.cpython-310.pyc ADDED
Binary file (71 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C import DispatchKey
3
+ from torch._higher_order_ops.utils import autograd_not_implemented
4
+
5
+ from torch._ops import HigherOrderOperator
6
+ from torch._subclasses import FakeTensorMode
7
+ from torch.fx.experimental._backward_state import BackwardState
8
+
9
+ from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
10
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
11
+ from torch.utils._pytree import tree_map_only
12
+
13
+
14
+ __all__ = ["trace_wrapped"]
15
+
16
+
17
+ # trace_wrapped(*args, fn) is equivalent to fn(*args), but with a twist:
18
+ # if you make_fx trace through this call, we will not actually trace into fn; instead,
19
+ # we will directly insert it as a call_function to fn in the graph.
20
+ # (Unlike make_fx, Dynamo WILL inline into fn.)
21
+ # You can think of this as a one off allow_in_graph equivalent for proxy tensor tracing.
22
+ #
23
+ # Because proxy tensor tracing does not actually run the function, there are
24
+ # requirements on the behavior of fn. We are still figuring it out, but here is the current state:
25
+ #
26
+ # 1) fn SHOULD only take a single argument, which must be a tensor
27
+ # 2) fn MUST return a new tensor with the same metadata as the original tensor
28
+ # (e.g., zeros_like(input) is a permissible implementation of fn).
29
+ # This is verified via an extra assert that is inserted into the traced graph.
30
+ # 3) fn MAY have side effects, but it MAY NOT perform metadata mutation on other tensors
31
+ # participating in proxy tensor tracing (it MAY mutate other tensors, it MAY mutate Python state)
32
+ # These requirements stem from the requirement that we need to continue performing proxy tensor tracing,
33
+ # which assumes accurate fake tensor metadata, without actually running fn.
34
+ # In the future, we may allow for a "meta" function associated with fn to allow for more interesting input-output patterns.
35
+ #
36
+ # Note that tensors / Python state are allowed to be mutated.
37
+ # This is relaxed constraint is not always sound, but it is sound for backward tracing with fake
38
+ # tensors as it takes place in AOTAutograd, as the backward pass is guaranteed not to depend on concrete
39
+ # tensor values (via fake tensor) or Python state (because the autograd engine doesn't depend on Python).
40
+ #
41
+ # The intended use case for this function is to allow AOTAutograd to defer complex
42
+ # backward hooks to compiled autograd. AOTAutograd performs a make_fx trace which preserves
43
+ # the function call as is in the graph, and only when we Dynamo through the backward graph in
44
+ # compiled autograd do we inline into the function.
45
+
46
+
47
+ def trace_wrapped(*args, **kwargs):
48
+ with torch.no_grad():
49
+ return _trace_wrapped_op(*args, **kwargs)
50
+
51
+
52
+ # TODO(jansel): need to ensure this does not get DCEed
53
+ _trace_wrapped_op = HigherOrderOperator("trace_wrapped")
54
+
55
+
56
+ def _assert_meta(grad, size, stride, dtype):
57
+ assert grad.size() == size, "size mismatch"
58
+ assert grad.stride() == stride, "stride mismatch"
59
+ assert grad.dtype == dtype, "dtype mismatch"
60
+ return grad
61
+
62
+
63
+ @_trace_wrapped_op.py_impl(ProxyTorchDispatchMode)
64
+ def inner_trace(mode, *args, bw_state=None, **kwargs):
65
+ def self_invoke(*args, **dyn_kwargs):
66
+ with torch.no_grad():
67
+ return _trace_wrapped_op(*args, **dyn_kwargs, **kwargs)
68
+
69
+ def unwrap_proxies(x):
70
+ if isinstance(x, torch.Tensor):
71
+ return mode.tracer.unwrap_proxy(x)
72
+ if isinstance(x, (list, tuple)):
73
+ return type(x)(map(unwrap_proxies, x))
74
+ if x is None:
75
+ return None
76
+ raise AssertionError(f"unhandled type: {type(x)}")
77
+
78
+ proxy_kwargs = {}
79
+ if bw_state is not None:
80
+ assert isinstance(bw_state, BackwardState) and bw_state.proxy is not None
81
+ proxy_kwargs["bw_state"] = bw_state.proxy
82
+ out_proxy = mode.tracer.create_proxy(
83
+ "call_function",
84
+ self_invoke,
85
+ unwrap_proxies(args),
86
+ proxy_kwargs,
87
+ name="trace_wrapped",
88
+ )
89
+
90
+ if args[0] is None:
91
+ grad = args[1] # module backward hooks
92
+ else:
93
+ grad = args[0] # other backward hooks
94
+ grad = tree_map_only(torch.Tensor, torch.empty_like, grad)
95
+ track_tensor_tree(grad, out_proxy, constant=None, tracer=mode.tracer)
96
+ return grad
97
+
98
+
99
+ @_trace_wrapped_op.py_impl(FakeTensorMode)
100
+ def inner_fake(*args, **kwargs):
101
+ raise RuntimeError("This op should never be invoked here")
102
+
103
+
104
+ @_trace_wrapped_op.py_impl(DispatchKey.CompositeExplicitAutograd)
105
+ def _trace_wrapped_op_dense(*args, fn, **kwargs):
106
+ mode = _get_current_dispatch_mode()
107
+ assert mode is None, "Mode should never be enabled for CPU/CUDA key"
108
+ return fn(*args, **kwargs)
109
+
110
+
111
+ _trace_wrapped_op.py_impl(DispatchKey.Autograd)(
112
+ autograd_not_implemented(_trace_wrapped_op, deferred_error=True)
113
+ )
114
+
115
+
116
+ @_trace_wrapped_op.py_functionalize_impl
117
+ def _trace_wrapped_functionalized(ctx, *args, **kwargs):
118
+ unwrapped_args = ctx.unwrap_tensors(args)
119
+ with ctx.redispatch_to_next():
120
+ return ctx.wrap_tensors(_trace_wrapped_op(*unwrapped_args, **kwargs))
venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bisect
2
+ import dataclasses
3
+ import dis
4
+ import sys
5
+ from typing import Any, Set, Union
6
+
7
+ TERMINAL_OPCODES = {
8
+ dis.opmap["RETURN_VALUE"],
9
+ dis.opmap["JUMP_FORWARD"],
10
+ dis.opmap["RAISE_VARARGS"],
11
+ # TODO(jansel): double check exception handling
12
+ }
13
+ if sys.version_info >= (3, 9):
14
+ TERMINAL_OPCODES.add(dis.opmap["RERAISE"])
15
+ if sys.version_info >= (3, 11):
16
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_BACKWARD"])
17
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_FORWARD"])
18
+ else:
19
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_ABSOLUTE"])
20
+ JUMP_OPCODES = set(dis.hasjrel + dis.hasjabs)
21
+ JUMP_OPNAMES = {dis.opname[opcode] for opcode in JUMP_OPCODES}
22
+ HASLOCAL = set(dis.haslocal)
23
+ HASFREE = set(dis.hasfree)
24
+
25
+ stack_effect = dis.stack_effect
26
+
27
+
28
+ def get_indexof(insts):
29
+ """
30
+ Get a mapping from instruction memory address to index in instruction list.
31
+ Additionally checks that each instruction only appears once in the list.
32
+ """
33
+ indexof = {}
34
+ for i, inst in enumerate(insts):
35
+ assert inst not in indexof
36
+ indexof[inst] = i
37
+ return indexof
38
+
39
+
40
+ def remove_dead_code(instructions):
41
+ """Dead code elimination"""
42
+ indexof = get_indexof(instructions)
43
+ live_code = set()
44
+
45
+ def find_live_code(start):
46
+ for i in range(start, len(instructions)):
47
+ if i in live_code:
48
+ return
49
+ live_code.add(i)
50
+ inst = instructions[i]
51
+ if inst.exn_tab_entry:
52
+ find_live_code(indexof[inst.exn_tab_entry.target])
53
+ if inst.opcode in JUMP_OPCODES:
54
+ find_live_code(indexof[inst.target])
55
+ if inst.opcode in TERMINAL_OPCODES:
56
+ return
57
+
58
+ find_live_code(0)
59
+
60
+ # change exception table entries if start/end instructions are dead
61
+ # assumes that exception table entries have been propagated,
62
+ # e.g. with bytecode_transformation.propagate_inst_exn_table_entries,
63
+ # and that instructions with an exn_tab_entry lies within its start/end.
64
+ if sys.version_info >= (3, 11):
65
+ live_idx = sorted(live_code)
66
+ for i, inst in enumerate(instructions):
67
+ if i in live_code and inst.exn_tab_entry:
68
+ # find leftmost live instruction >= start
69
+ start_idx = bisect.bisect_left(
70
+ live_idx, indexof[inst.exn_tab_entry.start]
71
+ )
72
+ assert start_idx < len(live_idx)
73
+ # find rightmost live instruction <= end
74
+ end_idx = (
75
+ bisect.bisect_right(live_idx, indexof[inst.exn_tab_entry.end]) - 1
76
+ )
77
+ assert end_idx >= 0
78
+ assert live_idx[start_idx] <= i <= live_idx[end_idx]
79
+ inst.exn_tab_entry.start = instructions[live_idx[start_idx]]
80
+ inst.exn_tab_entry.end = instructions[live_idx[end_idx]]
81
+
82
+ return [inst for i, inst in enumerate(instructions) if i in live_code]
83
+
84
+
85
+ def remove_pointless_jumps(instructions):
86
+ """Eliminate jumps to the next instruction"""
87
+ pointless_jumps = {
88
+ id(a)
89
+ for a, b in zip(instructions, instructions[1:])
90
+ if a.opname == "JUMP_ABSOLUTE" and a.target is b
91
+ }
92
+ return [inst for inst in instructions if id(inst) not in pointless_jumps]
93
+
94
+
95
+ def propagate_line_nums(instructions):
96
+ """Ensure every instruction has line number set in case some are removed"""
97
+ cur_line_no = None
98
+
99
+ def populate_line_num(inst):
100
+ nonlocal cur_line_no
101
+ if inst.starts_line:
102
+ cur_line_no = inst.starts_line
103
+
104
+ inst.starts_line = cur_line_no
105
+
106
+ for inst in instructions:
107
+ populate_line_num(inst)
108
+
109
+
110
+ def remove_extra_line_nums(instructions):
111
+ """Remove extra starts line properties before packing bytecode"""
112
+
113
+ cur_line_no = None
114
+
115
+ def remove_line_num(inst):
116
+ nonlocal cur_line_no
117
+ if inst.starts_line is None:
118
+ return
119
+ elif inst.starts_line == cur_line_no:
120
+ inst.starts_line = None
121
+ else:
122
+ cur_line_no = inst.starts_line
123
+
124
+ for inst in instructions:
125
+ remove_line_num(inst)
126
+
127
+
128
+ @dataclasses.dataclass
129
+ class ReadsWrites:
130
+ reads: Set[Any]
131
+ writes: Set[Any]
132
+ visited: Set[Any]
133
+
134
+
135
+ def livevars_analysis(instructions, instruction):
136
+ indexof = get_indexof(instructions)
137
+ must = ReadsWrites(set(), set(), set())
138
+ may = ReadsWrites(set(), set(), set())
139
+
140
+ def walk(state, start):
141
+ if start in state.visited:
142
+ return
143
+ state.visited.add(start)
144
+
145
+ for i in range(start, len(instructions)):
146
+ inst = instructions[i]
147
+ if inst.opcode in HASLOCAL or inst.opcode in HASFREE:
148
+ if "LOAD" in inst.opname or "DELETE" in inst.opname:
149
+ if inst.argval not in must.writes:
150
+ state.reads.add(inst.argval)
151
+ elif "STORE" in inst.opname:
152
+ state.writes.add(inst.argval)
153
+ elif inst.opname == "MAKE_CELL":
154
+ pass
155
+ else:
156
+ raise NotImplementedError(f"unhandled {inst.opname}")
157
+ if inst.exn_tab_entry:
158
+ walk(may, indexof[inst.exn_tab_entry.target])
159
+ if inst.opcode in JUMP_OPCODES:
160
+ walk(may, indexof[inst.target])
161
+ state = may
162
+ if inst.opcode in TERMINAL_OPCODES:
163
+ return
164
+
165
+ walk(must, indexof[instruction])
166
+ return must.reads | may.reads
167
+
168
+
169
+ @dataclasses.dataclass
170
+ class FixedPointBox:
171
+ value: bool = True
172
+
173
+
174
+ @dataclasses.dataclass
175
+ class StackSize:
176
+ low: Union[int, float]
177
+ high: Union[int, float]
178
+ fixed_point: FixedPointBox
179
+
180
+ def zero(self):
181
+ self.low = 0
182
+ self.high = 0
183
+ self.fixed_point.value = False
184
+
185
+ def offset_of(self, other, n):
186
+ prior = (self.low, self.high)
187
+ self.low = min(self.low, other.low + n)
188
+ self.high = max(self.high, other.high + n)
189
+ if (self.low, self.high) != prior:
190
+ self.fixed_point.value = False
191
+
192
+ def exn_tab_jump(self, depth):
193
+ prior = (self.low, self.high)
194
+ self.low = min(self.low, depth)
195
+ self.high = max(self.high, depth)
196
+ if (self.low, self.high) != prior:
197
+ self.fixed_point.value = False
198
+
199
+
200
+ def stacksize_analysis(instructions) -> Union[int, float]:
201
+ assert instructions
202
+ fixed_point = FixedPointBox()
203
+ stack_sizes = {
204
+ inst: StackSize(float("inf"), float("-inf"), fixed_point)
205
+ for inst in instructions
206
+ }
207
+ stack_sizes[instructions[0]].zero()
208
+
209
+ for _ in range(100):
210
+ if fixed_point.value:
211
+ break
212
+ fixed_point.value = True
213
+
214
+ for inst, next_inst in zip(instructions, instructions[1:] + [None]):
215
+ stack_size = stack_sizes[inst]
216
+ # CALL_FINALLY in Python 3.8 is handled differently when determining stack depth.
217
+ # See https://github.com/python/cpython/blob/3.8/Python/compile.c#L5450.
218
+ # Essentially, the stack effect of CALL_FINALLY is computed with jump=True,
219
+ # but the resulting stack depth is propagated to the next instruction, not the
220
+ # jump target.
221
+ is_call_finally = (
222
+ sys.version_info < (3, 9) and inst.opcode == dis.opmap["CALL_FINALLY"]
223
+ )
224
+ if inst.opcode not in TERMINAL_OPCODES:
225
+ assert next_inst is not None, f"missing next inst: {inst}"
226
+ stack_sizes[next_inst].offset_of(
227
+ stack_size,
228
+ stack_effect(inst.opcode, inst.arg, jump=is_call_finally),
229
+ )
230
+ if inst.opcode in JUMP_OPCODES and not is_call_finally:
231
+ stack_sizes[inst.target].offset_of(
232
+ stack_size, stack_effect(inst.opcode, inst.arg, jump=True)
233
+ )
234
+ if inst.exn_tab_entry:
235
+ # see https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt
236
+ # on why depth is computed this way.
237
+ depth = inst.exn_tab_entry.depth + int(inst.exn_tab_entry.lasti) + 1
238
+ stack_sizes[inst.exn_tab_entry.target].exn_tab_jump(depth)
239
+
240
+ if False:
241
+ for inst in instructions:
242
+ stack_size = stack_sizes[inst]
243
+ print(stack_size.low, stack_size.high, inst)
244
+
245
+ low = min([x.low for x in stack_sizes.values()])
246
+ high = max([x.high for x in stack_sizes.values()])
247
+
248
+ assert fixed_point.value, "failed to reach fixed point"
249
+ assert low >= 0
250
+ return high
venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py ADDED
@@ -0,0 +1,1114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import dis
4
+ import itertools
5
+ import sys
6
+ import types
7
+ from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple
8
+
9
+ from .bytecode_analysis import (
10
+ get_indexof,
11
+ propagate_line_nums,
12
+ remove_extra_line_nums,
13
+ stacksize_analysis,
14
+ )
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class InstructionExnTabEntry:
19
+ start: "Instruction"
20
+ end: "Instruction"
21
+ target: "Instruction"
22
+ depth: int
23
+ lasti: bool
24
+
25
+ def __repr__(self) -> str:
26
+ return (
27
+ f"InstructionExnTabEntry(start={self.start.short_inst_repr()}, "
28
+ f"end={self.end.short_inst_repr()}, "
29
+ f"target={self.target.short_inst_repr()}, "
30
+ f"depth={self.depth}, lasti={self.lasti})"
31
+ )
32
+
33
+ def __eq__(self, o) -> bool:
34
+ return (
35
+ self.start is o.start
36
+ and self.end is o.end
37
+ and self.target is o.target
38
+ and self.depth == o.depth
39
+ and self.lasti == o.lasti
40
+ )
41
+
42
+
43
+ @dataclasses.dataclass
44
+ class Instruction:
45
+ """A mutable version of dis.Instruction"""
46
+
47
+ opcode: int
48
+ opname: str
49
+ arg: Optional[int]
50
+ argval: Any
51
+ offset: Optional[int] = None
52
+ starts_line: Optional[int] = None
53
+ is_jump_target: bool = False
54
+ positions: Optional["dis.Positions"] = None
55
+ # extra fields to make modification easier:
56
+ target: Optional["Instruction"] = None
57
+ exn_tab_entry: Optional[InstructionExnTabEntry] = None
58
+
59
+ def __hash__(self) -> int:
60
+ return id(self)
61
+
62
+ def __eq__(self, other) -> bool:
63
+ return id(self) == id(other)
64
+
65
+ def short_inst_repr(self) -> str:
66
+ return f"Instruction(opname={self.opname}, offset={self.offset})"
67
+
68
+
69
+ def convert_instruction(i: dis.Instruction) -> Instruction:
70
+ return Instruction(
71
+ i.opcode,
72
+ i.opname,
73
+ i.arg,
74
+ i.argval,
75
+ i.offset,
76
+ i.starts_line,
77
+ i.is_jump_target,
78
+ getattr(i, "positions", None),
79
+ )
80
+
81
+
82
+ class _NotProvided:
83
+ def __repr__(self) -> str:
84
+ return "_NotProvided"
85
+
86
+
87
+ def create_instruction(
88
+ name, *, arg=None, argval=_NotProvided, target=None
89
+ ) -> Instruction:
90
+ """
91
+ At most one of `arg`, `argval`, and `target` can be not None/_NotProvided.
92
+ This is to prevent ambiguity, e.g. does
93
+ create_instruction("LOAD_CONST", 5)
94
+ mean load the constant at co_consts[5], or load the constant 5?
95
+
96
+ If `arg` is not provided, it will be computed during assembly from
97
+ `argval` or `target`.
98
+
99
+ Do not use for LOAD_GLOBAL - use create_load_global instead.
100
+ """
101
+ assert name != "LOAD_GLOBAL"
102
+ cnt = (arg is not None) + (argval is not _NotProvided) + (target is not None)
103
+ if cnt > 1:
104
+ raise RuntimeError(
105
+ "only one of arg, argval, and target can be not None/_NotProvided"
106
+ )
107
+ if arg is not None and not isinstance(arg, int):
108
+ raise RuntimeError("instruction arg must be int or None")
109
+ return Instruction(
110
+ opcode=dis.opmap[name], opname=name, arg=arg, argval=argval, target=target
111
+ )
112
+
113
+
114
+ # Python 3.11 remaps
115
+ def create_jump_absolute(target) -> Instruction:
116
+ inst = "JUMP_FORWARD" if sys.version_info >= (3, 11) else "JUMP_ABSOLUTE"
117
+ return create_instruction(inst, target=target)
118
+
119
+
120
+ def create_load_global(name, push_null) -> Instruction:
121
+ """
122
+ `name` is the name of the global to be loaded.
123
+ `push_null` specifies whether or not a NULL should be pushed to the stack
124
+ before the global (Python 3.11+ only).
125
+
126
+ Python 3.11 changed the LOAD_GLOBAL instruction in that the first bit of
127
+ the instruction arg specifies whether a NULL should be pushed to the stack
128
+ before the global. The remaining bits of the instruction arg contain the
129
+ name index. See `create_call_function` for why this NULL is needed.
130
+
131
+ The instruction's `arg` is actually computed when assembling the bytecode.
132
+ For Python 3.11, push_null information is propagated through the arg.
133
+
134
+ NOTE: we don't use create_instruction since LOAD_GLOBAL is the only instruction
135
+ where both arg and argval need to be specified.
136
+ """
137
+ return Instruction(
138
+ opcode=dis.opmap["LOAD_GLOBAL"],
139
+ opname="LOAD_GLOBAL",
140
+ arg=push_null,
141
+ argval=name,
142
+ )
143
+
144
+
145
+ def create_dup_top() -> Instruction:
146
+ if sys.version_info >= (3, 11):
147
+ return create_instruction("COPY", arg=1)
148
+ return create_instruction("DUP_TOP")
149
+
150
+
151
+ def create_rot_n(n) -> List[Instruction]:
152
+ """
153
+ Returns a "simple" sequence of instructions that rotates TOS to the n-th
154
+ position in the stack. For Python < 3.11, returns a single ROT_*
155
+ instruction. If no such instruction exists, an error is raised and the
156
+ caller is expected to generate an equivalent sequence of instructions.
157
+ For Python >= 3.11, any rotation can be expressed as a simple sequence of
158
+ swaps.
159
+ """
160
+ if n <= 1:
161
+ # don't rotate
162
+ return []
163
+
164
+ if sys.version_info >= (3, 11):
165
+ # rotate can be expressed as a sequence of swap operations
166
+ # e.g. rotate 3 is equivalent to swap 3, swap 2
167
+ return [create_instruction("SWAP", arg=i) for i in range(n, 1, -1)]
168
+
169
+ # ensure desired rotate function exists
170
+ if sys.version_info < (3, 8) and n >= 4:
171
+ raise AttributeError(f"rotate {n} not supported for Python < 3.8")
172
+ if sys.version_info < (3, 10) and n >= 5:
173
+ raise AttributeError(f"rotate {n} not supported for Python < 3.10")
174
+
175
+ if n <= 4:
176
+ return [create_instruction("ROT_" + ["TWO", "THREE", "FOUR"][n - 2])]
177
+ return [create_instruction("ROT_N", arg=n)]
178
+
179
+
180
+ def create_call_function(nargs, push_null) -> List[Instruction]:
181
+ """
182
+ Creates a sequence of instructions that makes a function call.
183
+
184
+ `push_null` is used in Python 3.11+ only. It is used in codegen when
185
+ a function call is intended to be made with the NULL + fn convention,
186
+ and we know that the NULL has not been pushed yet. We will push a
187
+ NULL and rotate it to the correct position immediately before making
188
+ the function call.
189
+ push_null should default to True unless you know you are calling a function
190
+ that you codegen'd with a null already pushed, for example
191
+ (assume `math` is available in the global scope),
192
+
193
+ create_load_global("math", True) # pushes a null
194
+ create_instruction("LOAD_ATTR", argval="sqrt")
195
+ create_instruction("LOAD_CONST", argval=25)
196
+ create_call_function(1, False)
197
+ """
198
+ if sys.version_info >= (3, 11):
199
+ output = []
200
+ if push_null:
201
+ output.append(create_instruction("PUSH_NULL"))
202
+ output.extend(create_rot_n(nargs + 2))
203
+ output.append(create_instruction("PRECALL", arg=nargs))
204
+ output.append(create_instruction("CALL", arg=nargs))
205
+ return output
206
+ return [create_instruction("CALL_FUNCTION", arg=nargs)]
207
+
208
+
209
+ def create_call_method(nargs) -> List[Instruction]:
210
+ if sys.version_info >= (3, 11):
211
+ return [
212
+ create_instruction("PRECALL", arg=nargs),
213
+ create_instruction("CALL", arg=nargs),
214
+ ]
215
+ return [create_instruction("CALL_METHOD", arg=nargs)]
216
+
217
+
218
+ def lnotab_writer(
219
+ lineno: int, byteno: int = 0
220
+ ) -> Tuple[List[int], Callable[[int, int], None]]:
221
+ """
222
+ Used to create typing.CodeType.co_lnotab
223
+ See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt
224
+ This is the internal format of the line number table if Python < 3.10
225
+ """
226
+ assert sys.version_info < (3, 10)
227
+ lnotab: List[int] = []
228
+
229
+ def update(lineno_new, byteno_new):
230
+ nonlocal byteno, lineno
231
+ while byteno_new != byteno or lineno_new != lineno:
232
+ byte_offset = max(0, min(byteno_new - byteno, 255))
233
+ line_offset = max(-128, min(lineno_new - lineno, 127))
234
+ assert byte_offset != 0 or line_offset != 0
235
+ byteno += byte_offset
236
+ lineno += line_offset
237
+ lnotab.extend((byte_offset, line_offset & 0xFF))
238
+
239
+ return lnotab, update
240
+
241
+
242
+ def linetable_310_writer(first_lineno):
243
+ """
244
+ Used to create typing.CodeType.co_linetable
245
+ See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt
246
+ This is the internal format of the line number table for Python 3.10
247
+ """
248
+ assert sys.version_info >= (3, 10) and sys.version_info < (3, 11)
249
+ linetable: List[int] = []
250
+ lineno = first_lineno
251
+ lineno_delta = 0
252
+ byteno = 0
253
+
254
+ def _update(byteno_delta, lineno_delta):
255
+ while byteno_delta != 0 or lineno_delta != 0:
256
+ byte_offset = max(0, min(byteno_delta, 254))
257
+ line_offset = max(-127, min(lineno_delta, 127))
258
+ assert byte_offset != 0 or line_offset != 0
259
+ byteno_delta -= byte_offset
260
+ lineno_delta -= line_offset
261
+ linetable.extend((byte_offset, line_offset & 0xFF))
262
+
263
+ def update(lineno_new, byteno_new):
264
+ nonlocal lineno, lineno_delta, byteno
265
+ byteno_delta = byteno_new - byteno
266
+ byteno = byteno_new
267
+ _update(byteno_delta, lineno_delta)
268
+ lineno_delta = lineno_new - lineno
269
+ lineno = lineno_new
270
+
271
+ def end(total_bytes):
272
+ _update(total_bytes - byteno, lineno_delta)
273
+
274
+ return linetable, update, end
275
+
276
+
277
+ def encode_varint(n: int) -> List[int]:
278
+ """
279
+ 6-bit chunk encoding of an unsigned integer
280
+ See https://github.com/python/cpython/blob/3.11/Objects/locations.md
281
+ """
282
+ assert n >= 0
283
+ b = [n & 63]
284
+ n >>= 6
285
+ while n > 0:
286
+ b[-1] |= 64
287
+ b.append(n & 63)
288
+ n >>= 6
289
+ return b
290
+
291
+
292
+ def linetable_311_writer(first_lineno: int):
293
+ """
294
+ Used to create typing.CodeType.co_linetable
295
+ See https://github.com/python/cpython/blob/3.11/Objects/locations.md
296
+ This is the internal format of the line number table for Python 3.11
297
+ """
298
+ assert sys.version_info >= (3, 11)
299
+ linetable = []
300
+ lineno = first_lineno
301
+
302
+ def update(positions: "dis.Positions", inst_size):
303
+ nonlocal lineno
304
+ lineno_new = positions.lineno if positions else None
305
+
306
+ def _update(delta, size):
307
+ assert 0 < size <= 8
308
+ # first byte - use 13 (no column info) is positions is
309
+ # malformed, otherwise use 14 (long form)
310
+ other_varints: Tuple[int, ...] = ()
311
+ if (
312
+ positions
313
+ and positions.lineno is not None
314
+ and positions.end_lineno is not None
315
+ and positions.col_offset is not None
316
+ and positions.end_col_offset is not None
317
+ ):
318
+ linetable.append(0b1_1110_000 + size - 1)
319
+ # for whatever reason, column offset needs `+ 1`
320
+ # https://github.com/python/cpython/blob/1931c2a438c50e6250725c84dff94fc760b9b951/Python/compile.c#L7603
321
+ other_varints = (
322
+ positions.end_lineno - positions.lineno,
323
+ positions.col_offset + 1,
324
+ positions.end_col_offset + 1,
325
+ )
326
+ else:
327
+ linetable.append(0b1_1101_000 + size - 1)
328
+ # encode signed int
329
+ if delta < 0:
330
+ delta = ((-delta) << 1) | 1
331
+ else:
332
+ delta <<= 1
333
+ # encode unsigned int
334
+ linetable.extend(encode_varint(delta))
335
+ for n in other_varints:
336
+ linetable.extend(encode_varint(n))
337
+
338
+ if lineno_new is None:
339
+ lineno_delta = 0
340
+ else:
341
+ lineno_delta = lineno_new - lineno
342
+ lineno = lineno_new
343
+ while inst_size > 8:
344
+ _update(lineno_delta, 8)
345
+ inst_size -= 8
346
+ _update(lineno_delta, inst_size)
347
+
348
+ return linetable, update
349
+
350
+
351
+ @dataclasses.dataclass
352
+ class ExceptionTableEntry:
353
+ start: int
354
+ end: int
355
+ target: int
356
+ depth: int
357
+ lasti: bool
358
+
359
+
360
+ def encode_exception_table_varint(n: int) -> List[int]:
361
+ """
362
+ Similar to `encode_varint`, but the 6-bit chunks are ordered in reverse.
363
+ """
364
+ assert n >= 0
365
+ b = [n & 63]
366
+ n >>= 6
367
+ while n > 0:
368
+ b.append(n & 63)
369
+ n >>= 6
370
+ b.reverse()
371
+ for i in range(len(b) - 1):
372
+ b[i] |= 64
373
+ return b
374
+
375
+
376
+ def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int:
377
+ """
378
+ Inverse of `encode_exception_table_varint`.
379
+ """
380
+ b = next(bytes_iter)
381
+ val = b & 63
382
+ while b & 64:
383
+ val <<= 6
384
+ b = next(bytes_iter)
385
+ val |= b & 63
386
+ return val
387
+
388
+
389
+ def check_exception_table(tab: List[ExceptionTableEntry]) -> None:
390
+ """
391
+ Verifies that a list of ExceptionTableEntries will make a well-formed
392
+ jump table: entries are non-empty, sorted, and do not overlap.
393
+ """
394
+ for i in range(len(tab) - 1):
395
+ assert (
396
+ tab[i].start <= tab[i].end
397
+ and tab[i].end < tab[i + 1].start
398
+ and tab[i + 1].start <= tab[i + 1].end
399
+ )
400
+
401
+
402
+ def parse_exception_table(exntab: bytes) -> List[ExceptionTableEntry]:
403
+ """
404
+ Parse the exception table according to
405
+ https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt
406
+ """
407
+ exntab_iter = iter(exntab)
408
+ tab = []
409
+ try:
410
+ while True:
411
+ start = decode_exception_table_varint(exntab_iter) * 2
412
+ length = decode_exception_table_varint(exntab_iter) * 2
413
+ end = start + length - 2
414
+ target = decode_exception_table_varint(exntab_iter) * 2
415
+ dl = decode_exception_table_varint(exntab_iter)
416
+ depth = dl >> 1
417
+ lasti = bool(dl & 1)
418
+ tab.append(ExceptionTableEntry(start, end, target, depth, lasti))
419
+ except StopIteration:
420
+ check_exception_table(tab)
421
+ return tab
422
+
423
+
424
+ def assemble_exception_table(tab: List[ExceptionTableEntry]) -> bytes:
425
+ """
426
+ Inverse of parse_exception_table - encodes list of exception
427
+ table entries into bytes.
428
+ """
429
+ b = []
430
+ for entry in tab:
431
+ first_entry = encode_exception_table_varint(entry.start // 2)
432
+ first_entry[0] |= 1 << 7
433
+ b.extend(first_entry)
434
+ length = entry.end - entry.start + 2
435
+ b.extend(encode_exception_table_varint(length // 2))
436
+ b.extend(encode_exception_table_varint(entry.target // 2))
437
+ dl = (entry.depth << 1) + entry.lasti
438
+ b.extend(encode_exception_table_varint(dl))
439
+ return bytes(b)
440
+
441
+
442
+ def assemble(instructions: List[Instruction], firstlineno: int) -> Tuple[bytes, bytes]:
443
+ """Do the opposite of dis.get_instructions()"""
444
+ code: List[int] = []
445
+ if sys.version_info >= (3, 11):
446
+ lnotab, update_lineno = linetable_311_writer(firstlineno)
447
+ num_ext = 0
448
+ for i, inst in enumerate(instructions):
449
+ if inst.opname == "EXTENDED_ARG":
450
+ inst_size = 1
451
+ num_ext += 1
452
+ # copy positions from the actual instruction
453
+ for j in (1, 2, 3):
454
+ if instructions[i + j].opname != "EXTENDED_ARG":
455
+ inst.positions = instructions[i + j].positions
456
+ break
457
+ else:
458
+ inst_size = instruction_size(inst) // 2 + num_ext
459
+ num_ext = 0
460
+ update_lineno(inst.positions, inst_size)
461
+ num_ext = 0
462
+ arg = inst.arg or 0
463
+ code.extend((inst.opcode, arg & 0xFF))
464
+ for _ in range(instruction_size(inst) // 2 - 1):
465
+ code.extend((0, 0))
466
+ else:
467
+ if sys.version_info < (3, 10):
468
+ lnotab, update_lineno = lnotab_writer(firstlineno)
469
+ else:
470
+ lnotab, update_lineno, end = linetable_310_writer(firstlineno)
471
+
472
+ for inst in instructions:
473
+ if inst.starts_line is not None:
474
+ update_lineno(inst.starts_line, len(code))
475
+ arg = inst.arg or 0
476
+ code.extend((inst.opcode, arg & 0xFF))
477
+
478
+ if sys.version_info >= (3, 10):
479
+ end(len(code))
480
+
481
+ return bytes(code), bytes(lnotab)
482
+
483
+
484
+ def _get_instruction_by_offset(offset_to_inst: Dict[int, Instruction], offset: int):
485
+ """
486
+ Get the instruction located at a given offset, accounting for EXTENDED_ARGs
487
+ """
488
+ for n in (0, 2, 4, 6):
489
+ if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG:
490
+ return offset_to_inst[offset + n]
491
+ return None
492
+
493
+
494
+ def virtualize_jumps(instructions) -> None:
495
+ """Replace jump targets with pointers to make editing easier"""
496
+ jump_targets = {inst.offset: inst for inst in instructions}
497
+
498
+ for inst in instructions:
499
+ if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel:
500
+ inst.target = _get_instruction_by_offset(jump_targets, inst.argval)
501
+
502
+
503
+ _REL_JUMPS = set(dis.hasjrel)
504
+
505
+
506
+ def flip_jump_direction(instruction: Instruction) -> None:
507
+ if sys.version_info < (3, 11):
508
+ raise RuntimeError("Cannot flip jump direction in Python < 3.11")
509
+ if "FORWARD" in instruction.opname:
510
+ instruction.opname = instruction.opname.replace("FORWARD", "BACKWARD")
511
+ elif "BACKWARD" in instruction.opname:
512
+ instruction.opname = instruction.opname.replace("BACKWARD", "FORWARD")
513
+ else:
514
+ raise AttributeError("Instruction is not a forward or backward jump")
515
+ instruction.opcode = dis.opmap[instruction.opname]
516
+ assert instruction.opcode in _REL_JUMPS
517
+
518
+
519
+ def _get_instruction_front(instructions: List[Instruction], idx: int):
520
+ """
521
+ i.e. get the first EXTENDED_ARG instruction (if any) when targeting
522
+ instructions[idx] with a jump.
523
+ """
524
+ target = instructions[idx]
525
+ for offset in (1, 2, 3):
526
+ if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG:
527
+ target = instructions[idx - offset]
528
+ else:
529
+ break
530
+ return target
531
+
532
+
533
+ def devirtualize_jumps(instructions):
534
+ """Fill in args for virtualized jump target after instructions may have moved"""
535
+ indexof = get_indexof(instructions)
536
+ jumps = set(dis.hasjabs).union(set(dis.hasjrel))
537
+
538
+ for inst in instructions:
539
+ if inst.opcode in jumps:
540
+ target = _get_instruction_front(instructions, indexof[inst.target])
541
+ if inst.opcode in dis.hasjabs:
542
+ if sys.version_info < (3, 10):
543
+ inst.arg = target.offset
544
+ elif sys.version_info < (3, 11):
545
+ # `arg` is expected to be bytecode offset, whereas `offset` is byte offset.
546
+ # Divide since bytecode is 2 bytes large.
547
+ inst.arg = int(target.offset / 2)
548
+ else:
549
+ raise RuntimeError("Python 3.11+ should not have absolute jumps")
550
+ else: # relative jump
551
+ # byte offset between target and next instruction
552
+ inst.arg = int(target.offset - inst.offset - instruction_size(inst))
553
+ if inst.arg < 0:
554
+ if sys.version_info < (3, 11):
555
+ raise RuntimeError("Got negative jump offset for Python < 3.11")
556
+ inst.arg = -inst.arg
557
+ # forward jumps become backward
558
+ if "FORWARD" in inst.opname:
559
+ flip_jump_direction(inst)
560
+ elif inst.arg > 0:
561
+ # backward jumps become forward
562
+ if sys.version_info >= (3, 11) and "BACKWARD" in inst.opname:
563
+ flip_jump_direction(inst)
564
+ if sys.version_info >= (3, 10):
565
+ # see bytecode size comment in the absolute jump case above
566
+ inst.arg //= 2
567
+ inst.argval = target.offset
568
+ inst.argrepr = f"to {target.offset}"
569
+
570
+
571
+ def virtualize_exception_table(exn_tab_bytes: bytes, instructions: List[Instruction]):
572
+ """Replace exception table entries with pointers to make editing easier"""
573
+ exn_tab = parse_exception_table(exn_tab_bytes)
574
+ offset_to_inst = {cast(int, inst.offset): inst for inst in instructions}
575
+ offsets = sorted(offset_to_inst.keys())
576
+ end_offset_idx = 0
577
+ exn_tab_iter = iter(exn_tab)
578
+ try:
579
+
580
+ def step():
581
+ nonlocal end_offset_idx
582
+ entry = next(exn_tab_iter)
583
+ # find rightmost offset <= entry.end, since entry.end may not be
584
+ # an actual instruction, e.g. if the end instruction is LOAD_GLOBAL,
585
+ # which takes more than 2 bytes, then entry.end points to the end
586
+ # of the LOAD_GLOBAL instruction, not the beginning.
587
+ while (
588
+ end_offset_idx < len(offsets) and offsets[end_offset_idx] <= entry.end
589
+ ):
590
+ end_offset_idx += 1
591
+ assert end_offset_idx > 0
592
+ end_offset = offsets[end_offset_idx - 1]
593
+ inst_entry = InstructionExnTabEntry(
594
+ _get_instruction_by_offset(offset_to_inst, entry.start),
595
+ _get_instruction_by_offset(offset_to_inst, end_offset),
596
+ _get_instruction_by_offset(offset_to_inst, entry.target),
597
+ entry.depth,
598
+ entry.lasti,
599
+ )
600
+ return entry, inst_entry
601
+
602
+ entry, inst_entry = step()
603
+ for inst in instructions:
604
+ while inst.offset > entry.end:
605
+ entry, inst_entry = step()
606
+ if inst.offset >= entry.start:
607
+ inst.exn_tab_entry = copy.copy(inst_entry)
608
+ except StopIteration:
609
+ pass
610
+
611
+
612
+ def compute_exception_table(
613
+ instructions: List[Instruction],
614
+ ) -> List[ExceptionTableEntry]:
615
+ """Compute exception table in list format from instructions with exn_tab_entries"""
616
+ exn_dict: Dict[Tuple[int, int], Tuple[int, int, bool]] = {}
617
+ indexof = get_indexof(instructions)
618
+
619
+ for inst in instructions:
620
+ if inst.exn_tab_entry:
621
+ # account for prefixed EXTENDED_ARGS
622
+ start = _get_instruction_front(
623
+ instructions, indexof[inst.exn_tab_entry.start]
624
+ ).offset
625
+ # point to the last 2 bytes of the end instruction
626
+ end = (
627
+ cast(int, inst.exn_tab_entry.end.offset)
628
+ + instruction_size(inst.exn_tab_entry.end)
629
+ - 2
630
+ )
631
+ target = _get_instruction_front(
632
+ instructions, indexof[inst.exn_tab_entry.target]
633
+ ).offset
634
+ key = (start, end)
635
+ val = (target, inst.exn_tab_entry.depth, inst.exn_tab_entry.lasti)
636
+ if key in exn_dict:
637
+ assert exn_dict[key] == val
638
+ exn_dict[key] = val
639
+
640
+ # Dynamo may construct nested exception table entries for convenience,
641
+ # but Python expects exception table entries to not overlap.
642
+ # NOTE: below, "keys" refer to old instruction entries' starts and ends,
643
+ # and "entries" refer to the generated exception table entries.
644
+
645
+ # Sort keys by increasing start, then decreasing end
646
+ keys_sorted = sorted(exn_dict.keys(), key=lambda t: (t[0], -t[1]))
647
+ # smallest byte that the next exception table entry can start at
648
+ nexti = 0
649
+ # stack of current nested keys
650
+ key_stack: List[Tuple[int, int]] = []
651
+ exn_tab: List[ExceptionTableEntry] = []
652
+
653
+ def pop():
654
+ """
655
+ Pop the key_stack and append an exception table entry if possible.
656
+ """
657
+ nonlocal nexti
658
+ if key_stack:
659
+ key = key_stack.pop()
660
+ if nexti <= key[1]:
661
+ exn_tab.append(
662
+ ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key])
663
+ )
664
+ nexti = key[1] + 2
665
+
666
+ for key in keys_sorted:
667
+ # pop keys that are no longer nested over the current key
668
+ while key_stack and key_stack[-1][1] < key[0]:
669
+ pop()
670
+ if key_stack:
671
+ # create an entry covering to the current key, if possible
672
+ assert key_stack[-1][0] <= key[0] <= key[1] <= key_stack[-1][1]
673
+ left = max(nexti, key_stack[-1][0])
674
+ if left < key[0]:
675
+ exn_tab.append(
676
+ ExceptionTableEntry(left, key[0] - 2, *exn_dict[key_stack[-1]])
677
+ )
678
+ nexti = key[0]
679
+ key_stack.append(key)
680
+ while key_stack:
681
+ pop()
682
+ check_exception_table(exn_tab)
683
+ return exn_tab
684
+
685
+
686
+ def check_inst_exn_tab_entries_nested(
687
+ tab: List[InstructionExnTabEntry], indexof
688
+ ) -> None:
689
+ """
690
+ Checks `tab` is a properly sorted list of nested InstructionExnTabEntry's,
691
+ i.e. no entries partially overlap.
692
+ "Properly sorted" means entries are sorted by increasing starts, then
693
+ decreasing ends.
694
+ """
695
+ entry_stack: List[Tuple[int, int]] = []
696
+ for entry in tab:
697
+ key = (indexof[entry.start], indexof[entry.end])
698
+ while entry_stack and entry_stack[-1][1] < key[0]:
699
+ entry_stack.pop()
700
+ if entry_stack:
701
+ assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1]
702
+ entry_stack.append(key)
703
+
704
+
705
+ def propagate_inst_exn_table_entries(instructions: List[Instruction]) -> None:
706
+ """
707
+ Copies exception table entries to all instructions in an entry's range.
708
+ Supports nested exception table entries.
709
+ """
710
+ indexof = get_indexof(instructions)
711
+ entries: Dict[Tuple[int, int], InstructionExnTabEntry] = {}
712
+ for inst in instructions:
713
+ if inst.exn_tab_entry:
714
+ key = (
715
+ indexof[inst.exn_tab_entry.start],
716
+ indexof[inst.exn_tab_entry.end],
717
+ )
718
+ if key in entries:
719
+ assert inst.exn_tab_entry == entries[key]
720
+ entries[key] = inst.exn_tab_entry
721
+ sorted_entries = [
722
+ entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1]))
723
+ ]
724
+ check_inst_exn_tab_entries_nested(sorted_entries, indexof)
725
+ # Propagation of nested entries works since nested entries come later
726
+ # in sorted order.
727
+ for entry in sorted_entries:
728
+ for i in range(indexof[entry.start], indexof[entry.end] + 1):
729
+ instructions[i].exn_tab_entry = copy.copy(entry)
730
+
731
+
732
+ def check_inst_exn_tab_entries_valid(instructions: List[Instruction]):
733
+ """
734
+ Checks that exn_tab_entries of instructions are valid.
735
+ An entry's start, end, and target must be in instructions.
736
+ Instructions with an exn_tab_entry are located within
737
+ the entry's start and end instructions.
738
+ Instructions do not share exn_tab_entries.
739
+
740
+ Implicitly checks for no duplicate instructions.
741
+ """
742
+ indexof = get_indexof(instructions)
743
+ exn_tab_entry_set = set()
744
+ for i, inst in enumerate(instructions):
745
+ if inst.exn_tab_entry:
746
+ assert sys.version_info >= (3, 11)
747
+ assert id(inst.exn_tab_entry) not in exn_tab_entry_set
748
+ exn_tab_entry_set.add(id(inst.exn_tab_entry))
749
+ entry = inst.exn_tab_entry
750
+ assert entry.start in indexof
751
+ assert entry.end in indexof
752
+ assert entry.target in indexof
753
+ assert indexof[entry.start] <= i <= indexof[entry.end]
754
+
755
+
756
+ def strip_extended_args(instructions: List[Instruction]) -> None:
757
+ instructions[:] = [i for i in instructions if i.opcode != dis.EXTENDED_ARG]
758
+
759
+
760
+ def remove_load_call_method(instructions: List[Instruction]) -> List[Instruction]:
761
+ """LOAD_METHOD puts a NULL on the stack which causes issues, so remove it"""
762
+ rewrites = {"LOAD_METHOD": "LOAD_ATTR", "CALL_METHOD": "CALL_FUNCTION"}
763
+ for inst in instructions:
764
+ if inst.opname in rewrites:
765
+ inst.opname = rewrites[inst.opname]
766
+ inst.opcode = dis.opmap[inst.opname]
767
+ return instructions
768
+
769
+
770
+ def remove_jump_if_none(instructions: List[Instruction]) -> None:
771
+ new_insts = []
772
+ for inst in instructions:
773
+ new_insts.append(inst)
774
+ if "_NONE" in inst.opname:
775
+ is_op = create_instruction("IS_OP", arg=int("NOT" in inst.opname))
776
+ is_op.argval = is_op.arg
777
+ jump_op = create_instruction(
778
+ "POP_JUMP_FORWARD_IF_TRUE"
779
+ if "FORWARD" in inst.opname
780
+ else "POP_JUMP_BACKWARD_IF_TRUE",
781
+ target=inst.target,
782
+ )
783
+ # modify inst in-place to preserve jump target
784
+ inst.opcode = dis.opmap["LOAD_CONST"]
785
+ inst.opname = "LOAD_CONST"
786
+ inst.arg = None
787
+ inst.argval = None
788
+ new_insts.extend([is_op, jump_op])
789
+ instructions[:] = new_insts
790
+
791
+
792
+ def explicit_super(code: types.CodeType, instructions: List[Instruction]) -> None:
793
+ """convert super() with no args into explicit arg form"""
794
+ cell_and_free = (code.co_cellvars or tuple()) + (code.co_freevars or tuple())
795
+ if not len(code.co_varnames):
796
+ # A function with no argument cannot contain a valid "super()" call
797
+ return
798
+ output = []
799
+ for idx, inst in enumerate(instructions):
800
+ output.append(inst)
801
+ if inst.opname == "LOAD_GLOBAL" and inst.argval == "super":
802
+ nexti = instructions[idx + 1]
803
+ if nexti.opname in ("CALL_FUNCTION", "PRECALL") and nexti.arg == 0:
804
+ assert "__class__" in cell_and_free
805
+ output.append(create_instruction("LOAD_DEREF", argval="__class__"))
806
+ first_var = code.co_varnames[0]
807
+ if first_var in cell_and_free:
808
+ output.append(create_instruction("LOAD_DEREF", argval=first_var))
809
+ else:
810
+ output.append(create_instruction("LOAD_FAST", argval=first_var))
811
+ nexti.arg = 2
812
+ nexti.argval = 2
813
+ if nexti.opname == "PRECALL":
814
+ # also update the following CALL instruction
815
+ call_inst = instructions[idx + 2]
816
+ call_inst.arg = 2
817
+ call_inst.argval = 2
818
+
819
+ instructions[:] = output
820
+
821
+
822
+ def fix_extended_args(instructions: List[Instruction]) -> int:
823
+ """Fill in correct argvals for EXTENDED_ARG ops"""
824
+ output: List[Instruction] = []
825
+
826
+ def maybe_pop_n(n):
827
+ for _ in range(n):
828
+ if output and output[-1].opcode == dis.EXTENDED_ARG:
829
+ output.pop()
830
+
831
+ for inst in instructions:
832
+ if inst.opcode == dis.EXTENDED_ARG:
833
+ # Leave this instruction alone for now so we never shrink code
834
+ inst.arg = 0
835
+ elif inst.arg and inst.arg > 0xFFFFFF:
836
+ maybe_pop_n(3)
837
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 24))
838
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16))
839
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
840
+ elif inst.arg and inst.arg > 0xFFFF:
841
+ maybe_pop_n(2)
842
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16))
843
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
844
+ elif inst.arg and inst.arg > 0xFF:
845
+ maybe_pop_n(1)
846
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
847
+ output.append(inst)
848
+
849
+ added = len(output) - len(instructions)
850
+ assert added >= 0
851
+ instructions[:] = output
852
+ return added
853
+
854
+
855
+ # from https://github.com/python/cpython/blob/v3.11.1/Include/internal/pycore_opcode.h#L41
856
+ # TODO use the actual object instead, can interface from eval_frame.c
857
+ _PYOPCODE_CACHES = {
858
+ "BINARY_SUBSCR": 4,
859
+ "STORE_SUBSCR": 1,
860
+ "UNPACK_SEQUENCE": 1,
861
+ "STORE_ATTR": 4,
862
+ "LOAD_ATTR": 4,
863
+ "COMPARE_OP": 2,
864
+ "LOAD_GLOBAL": 5,
865
+ "BINARY_OP": 1,
866
+ "LOAD_METHOD": 10,
867
+ "PRECALL": 1,
868
+ "CALL": 4,
869
+ }
870
+
871
+
872
+ def instruction_size(inst) -> int:
873
+ if sys.version_info >= (3, 11):
874
+ return 2 * (_PYOPCODE_CACHES.get(dis.opname[inst.opcode], 0) + 1)
875
+ return 2
876
+
877
+
878
+ def check_offsets(instructions) -> None:
879
+ offset = 0
880
+ for inst in instructions:
881
+ assert inst.offset == offset
882
+ offset += instruction_size(inst)
883
+
884
+
885
+ def update_offsets(instructions) -> None:
886
+ offset = 0
887
+ for inst in instructions:
888
+ inst.offset = offset
889
+ offset += instruction_size(inst)
890
+
891
+
892
+ def debug_bytes(*args) -> str:
893
+ index = range(max(map(len, args)))
894
+ result = []
895
+ for arg in (
896
+ [index] + list(args) + [[int(a != b) for a, b in zip(args[-1], args[-2])]]
897
+ ):
898
+ result.append(" ".join(f"{x:03}" for x in arg))
899
+
900
+ return "bytes mismatch\n" + "\n".join(result)
901
+
902
+
903
+ def debug_checks(code):
904
+ """Make sure our assembler produces same bytes as we start with"""
905
+ dode = transform_code_object(code, lambda x, y: None, safe=True)
906
+ assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code)
907
+ assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab)
908
+
909
+
910
+ HAS_LOCAL = set(dis.haslocal)
911
+ HAS_NAME = set(dis.hasname)
912
+ HAS_FREE = set(dis.hasfree)
913
+ HAS_CONST = set(dis.hasconst)
914
+
915
+
916
+ def get_const_index(code_options, val) -> int:
917
+ for i, v in enumerate(code_options["co_consts"]):
918
+ # NOTE: stronger comparison is required, since we have
919
+ # examples where two values compare equal but have
920
+ # different semantic meaning in some cases, e.g.
921
+ # 0.0 == -0.0 but have different effects in torch.copysign.
922
+ if val is v:
923
+ return i
924
+ code_options["co_consts"] += (val,)
925
+ return len(code_options["co_consts"]) - 1
926
+
927
+
928
+ def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=None):
929
+ # compute instruction arg from argval if arg is not provided
930
+ names = {name: idx for idx, name in enumerate(code_options["co_names"])}
931
+ if sys.version_info < (3, 11):
932
+ assert varname_from_oparg is None
933
+ varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])}
934
+ freenames = {
935
+ name: idx
936
+ for idx, name in enumerate(
937
+ code_options["co_cellvars"] + code_options["co_freevars"]
938
+ )
939
+ }
940
+ else:
941
+ assert callable(varname_from_oparg)
942
+ allnames = {}
943
+ for idx in itertools.count():
944
+ try:
945
+ name = varname_from_oparg(idx)
946
+ allnames[name] = idx
947
+ except IndexError:
948
+ break
949
+ varnames = {name: allnames[name] for name in code_options["co_varnames"]}
950
+ freenames = {
951
+ name: allnames[name]
952
+ for name in code_options["co_cellvars"] + code_options["co_freevars"]
953
+ }
954
+ for i in range(len(instructions)):
955
+
956
+ def should_compute_arg():
957
+ # argval is prioritized over arg
958
+ return instructions[i].argval is not _NotProvided
959
+
960
+ if instructions[i].opname == "LOAD_GLOBAL":
961
+ # 3.11 LOAD_GLOBAL requires both arg and argval - see create_load_global
962
+ assert instructions[i].arg is not None
963
+ assert instructions[i].argval is not _NotProvided
964
+ if sys.version_info >= (3, 11):
965
+ instructions[i].arg = (names[instructions[i].argval] << 1) + (
966
+ cast(int, instructions[i].arg) % 2
967
+ )
968
+ else:
969
+ instructions[i].arg = names[instructions[i].argval]
970
+ elif instructions[i].opcode in HAS_LOCAL:
971
+ if should_compute_arg():
972
+ instructions[i].arg = varnames[instructions[i].argval]
973
+ elif instructions[i].opcode in HAS_NAME:
974
+ if should_compute_arg():
975
+ instructions[i].arg = names[instructions[i].argval]
976
+ elif instructions[i].opcode in HAS_FREE:
977
+ if should_compute_arg():
978
+ instructions[i].arg = freenames[instructions[i].argval]
979
+ elif instructions[i].opcode in HAS_CONST:
980
+ # NOTE: only update argval if arg is not provided. This assumes
981
+ # that any additions to co_consts are appended.
982
+ if instructions[i].arg is None:
983
+ # cannot use a dictionary since consts may not be hashable
984
+ idx = get_const_index(code_options, instructions[i].argval)
985
+ assert idx >= 0
986
+ instructions[i].arg = idx
987
+
988
+
989
+ def get_code_keys() -> List[str]:
990
+ # Python 3.11 changes to code keys are not fully documented.
991
+ # See https://github.com/python/cpython/blob/3.11/Objects/clinic/codeobject.c.h#L24
992
+ # for new format.
993
+ keys = ["co_argcount"]
994
+ keys.append("co_posonlyargcount")
995
+ keys.extend(
996
+ [
997
+ "co_kwonlyargcount",
998
+ "co_nlocals",
999
+ "co_stacksize",
1000
+ "co_flags",
1001
+ "co_code",
1002
+ "co_consts",
1003
+ "co_names",
1004
+ "co_varnames",
1005
+ "co_filename",
1006
+ "co_name",
1007
+ ]
1008
+ )
1009
+ if sys.version_info >= (3, 11):
1010
+ keys.append("co_qualname")
1011
+ keys.append("co_firstlineno")
1012
+ if sys.version_info >= (3, 10):
1013
+ keys.append("co_linetable")
1014
+ else:
1015
+ keys.append("co_lnotab")
1016
+ if sys.version_info >= (3, 11):
1017
+ # not documented, but introduced in https://github.com/python/cpython/issues/84403
1018
+ keys.append("co_exceptiontable")
1019
+ keys.extend(
1020
+ [
1021
+ "co_freevars",
1022
+ "co_cellvars",
1023
+ ]
1024
+ )
1025
+ return keys
1026
+
1027
+
1028
+ def transform_code_object(code, transformations, safe=False) -> types.CodeType:
1029
+ keys = get_code_keys()
1030
+ code_options = {k: getattr(code, k) for k in keys}
1031
+ assert len(code_options["co_varnames"]) == code_options["co_nlocals"]
1032
+
1033
+ instructions = cleaned_instructions(code, safe)
1034
+ propagate_line_nums(instructions)
1035
+
1036
+ transformations(instructions, code_options)
1037
+ return clean_and_assemble_instructions(instructions, keys, code_options)[1]
1038
+
1039
+
1040
+ def clean_and_assemble_instructions(
1041
+ instructions: List[Instruction], keys: List[str], code_options: Dict[str, Any]
1042
+ ) -> Tuple[List[Instruction], types.CodeType]:
1043
+ # also implicitly checks for no duplicate instructions
1044
+ check_inst_exn_tab_entries_valid(instructions)
1045
+
1046
+ code_options["co_nlocals"] = len(code_options["co_varnames"])
1047
+ varname_from_oparg = None
1048
+ if sys.version_info >= (3, 11):
1049
+ # temporary code object with updated names
1050
+ tmp_code = types.CodeType(*[code_options[k] for k in keys])
1051
+ varname_from_oparg = tmp_code._varname_from_oparg # type: ignore[attr-defined]
1052
+ fix_vars(instructions, code_options, varname_from_oparg=varname_from_oparg)
1053
+
1054
+ dirty = True
1055
+ while dirty:
1056
+ update_offsets(instructions)
1057
+ devirtualize_jumps(instructions)
1058
+ # this pass might change offsets, if so we need to try again
1059
+ dirty = bool(fix_extended_args(instructions))
1060
+
1061
+ remove_extra_line_nums(instructions)
1062
+ bytecode, lnotab = assemble(instructions, code_options["co_firstlineno"])
1063
+ if sys.version_info < (3, 10):
1064
+ code_options["co_lnotab"] = lnotab
1065
+ else:
1066
+ code_options["co_linetable"] = lnotab
1067
+
1068
+ code_options["co_code"] = bytecode
1069
+ code_options["co_stacksize"] = stacksize_analysis(instructions)
1070
+ assert set(keys) - {"co_posonlyargcount"} == set(code_options.keys()) - {
1071
+ "co_posonlyargcount"
1072
+ }
1073
+ if sys.version_info >= (3, 11):
1074
+ code_options["co_exceptiontable"] = assemble_exception_table(
1075
+ compute_exception_table(instructions)
1076
+ )
1077
+ return instructions, types.CodeType(*[code_options[k] for k in keys])
1078
+
1079
+
1080
+ def populate_kw_names_argval(instructions, consts):
1081
+ for inst in instructions:
1082
+ if inst.opname == "KW_NAMES":
1083
+ inst.argval = consts[inst.arg]
1084
+
1085
+
1086
+ def cleaned_instructions(code, safe=False) -> List[Instruction]:
1087
+ instructions = list(map(convert_instruction, dis.get_instructions(code)))
1088
+ check_offsets(instructions)
1089
+ if sys.version_info >= (3, 11):
1090
+ populate_kw_names_argval(instructions, code.co_consts)
1091
+ virtualize_exception_table(code.co_exceptiontable, instructions)
1092
+ virtualize_jumps(instructions)
1093
+ strip_extended_args(instructions)
1094
+ if not safe:
1095
+ if sys.version_info < (3, 11):
1096
+ remove_load_call_method(instructions)
1097
+ else:
1098
+ remove_jump_if_none(instructions)
1099
+ update_offsets(instructions)
1100
+ devirtualize_jumps(instructions)
1101
+ explicit_super(code, instructions)
1102
+ return instructions
1103
+
1104
+
1105
+ _unique_id_counter = itertools.count()
1106
+
1107
+
1108
+ def unique_id(name) -> str:
1109
+ return f"{name}_{next(_unique_id_counter)}"
1110
+
1111
+
1112
+ def is_generator(code: types.CodeType) -> bool:
1113
+ co_generator = 0x20
1114
+ return (code.co_flags & co_generator) > 0
venv/lib/python3.10/site-packages/torch/_dynamo/cache_size.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import types
3
+ import weakref
4
+ from dataclasses import dataclass
5
+ from typing import Tuple
6
+
7
+ from . import config
8
+
9
+ log = logging.getLogger(__name__)
10
+ """
11
+ [Note on cache size limit]
12
+
13
+ Background - TorchDynamo cache is a linked list. Each cache entry is a
14
+ (check_fn, out_code, next pointer). These are stored on the f_code's co_extra
15
+ scratch space. When a frame is invoked, we walk this linked list and run
16
+ check_fn in each cache_entry to decide if the frame needs recompilation. If none
17
+ of the check_fn's returns True, we recompile and add a new entry. To ensure we
18
+ don't end up recompiling infinitely, we put limits on the cache size.
19
+
20
+ There are two limits
21
+ 1) cache_size_limit
22
+ 2) accumulated_cache_size_limit
23
+
24
+
25
+ Earlier we used to have only limit - maximum number of entries in 1 cache line
26
+ (which is now represented by (2) above). So, why do we need two limits? Lets try
27
+ to understand that.
28
+
29
+ In general, we want our cache limit value to be a small number (e.g. 8 or even
30
+ lower). This ensures that for frames that cause too many recompilation fall to
31
+ eager quickly. However, there is another problem that prevents us from lowering
32
+ the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put
33
+ ID_MATCH guards on nn module if there is a graph break. This means we will have
34
+ many recompilations for the same code object because the ID_MATCH guard fails
35
+ for different instances of the nn module. This is a common pattern in how models
36
+ are authored. Therefore, this requires us to keep the cache_size_limit high.
37
+
38
+ We resolve this by introducing these two limits. The first limit (1) limits the
39
+ number of cache entries that have an ID_MATCH'd guard for an nn module instance.
40
+ And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations
41
+ for a code object. One important question is - what is the limit for the code
42
+ object that does not have any ID_MATCH guard? For such code objects, we choose
43
+ (1) as the cache size limit.
44
+
45
+ Lets take an example to understand how these limits help. Suppose, we have 16
46
+ instances of a nn module and we ID_MATCH on the self object. Further, suppose
47
+ the inputs to these functions have varying batch size, leading to one
48
+ recompilation. In total, there will be 32 recompilations, and therefore 32 cache
49
+ entries on the forward code object. In the older case when we had only 1 limit,
50
+ our cache size limit must be >= 32 to capture all these recompilations. Now,
51
+ suppose there is a separate function in the same program which is very dynamic
52
+ and unsuitable for compilation. Such a function will need to undergo 32
53
+ compilations to burst the cache and fallback to eager. These 32 recompilations
54
+ are too many and we want to fallback for these compilation-unfriendly functions
55
+ sooner.
56
+
57
+ In the new scenario, we can have (1) cache_size_limit = 2, (2)
58
+ accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can
59
+ have maximum of two cache entries, and the maximum number of cache entries
60
+ (irrespective of ID_MATCH obj) is 32. This covers the case of forward code
61
+ object which has 32 recompilations. For the other function, the one unsuitable
62
+ for recompilation, our limit is 2. So, we will burst the cache in just 2
63
+ recompilations. In this manner, these 2 limits help us resolve the tension
64
+ mentioned earlier.
65
+ """
66
+
67
+
68
+ @dataclass
69
+ class CacheSizeRelevantForFrame:
70
+ """
71
+ We track the number of cache entries that have same id_match objects as the
72
+ given frame.
73
+
74
+ TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count -
75
+ https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this
76
+ could be useful for debugging as well.
77
+ """
78
+
79
+ # Total number of CacheEntry objects in the Dynamo linked list
80
+ num_cache_entries: int = 0
81
+
82
+ # Number of CacheEntry objects having same ID_MATCH'd objects as given frame.
83
+ num_cache_entries_with_same_id_matched_objs: int = 0
84
+
85
+ def will_compilation_exceed(self, limit: int) -> bool:
86
+ # Checks if a compilation will exceed the given limit (thats why >=).
87
+ return (
88
+ self.will_compilation_exceed_accumulated_limit()
89
+ or self.will_compilation_exceed_specific_limit(limit)
90
+ )
91
+
92
+ def will_compilation_exceed_accumulated_limit(self) -> bool:
93
+ return self.num_cache_entries >= config.accumulated_cache_size_limit
94
+
95
+ def will_compilation_exceed_specific_limit(self, limit: int) -> bool:
96
+ return self.num_cache_entries_with_same_id_matched_objs >= limit
97
+
98
+
99
+ def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str):
100
+ obj = frame.f_locals.get(local_name, None)
101
+ weak_id = None
102
+ try:
103
+ weak_id = weakref.ref(obj)
104
+ except TypeError:
105
+ pass # cannot weakref bool object
106
+ return weak_id
107
+
108
+
109
+ def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool:
110
+ """
111
+ Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones
112
+ in frame.f_locals.
113
+ """
114
+ if not cache_entry:
115
+ return False
116
+
117
+ for (
118
+ local_name,
119
+ weakref_from_cache_entry,
120
+ ) in cache_entry.check_fn.id_matched_objs.items():
121
+ if weakref_from_cache_entry() is not None:
122
+ weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)
123
+ if weakref_from_frame != weakref_from_cache_entry:
124
+ return False
125
+
126
+ # Also covers the case where no ID_MATCH objects are saved in frame.f_locals
127
+ return True
128
+
129
+
130
+ def compute_cache_size(
131
+ frame: types.FrameType, cache_entry
132
+ ) -> CacheSizeRelevantForFrame:
133
+ # Walk the linked list to calculate the cache size
134
+ num_cache_entries = 0
135
+ num_cache_entries_with_same_id_matched_objs = 0
136
+
137
+ while cache_entry:
138
+ num_cache_entries += 1
139
+ # Track the number of cache entries having same ID_MATCH'd objects as
140
+ # that of frame.f_locals. This will be used later to compare against the
141
+ # cache_size_limit.
142
+ if _has_same_id_matched_objs(frame, cache_entry):
143
+ num_cache_entries_with_same_id_matched_objs += 1
144
+ cache_entry = cache_entry.next
145
+
146
+ return CacheSizeRelevantForFrame(
147
+ num_cache_entries, num_cache_entries_with_same_id_matched_objs
148
+ )
149
+
150
+
151
+ def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:
152
+ """
153
+ If the frame (earlier parsed by compute_cache_size) has more than 1 cache
154
+ entry with same ID_MATCH'd objects, then its a recompilation.
155
+ """
156
+ # Note that you can have multiple entries in the cache but still not a
157
+ # recompile, e.g., you can have 64 nn module instances, each one having an
158
+ # ID_MATCH guard, and each one having just 1 cache entry in the cache. In
159
+ # this case, we can have 64 entries in the cache, but no recompilation
160
+ # because there is only one entry for each id_matched_obj.
161
+ return cache_size.will_compilation_exceed(1)
162
+
163
+
164
+ def exceeds_cache_size_limit(cache_size: CacheSizeRelevantForFrame) -> Tuple[bool, str]:
165
+ """
166
+ Checks if we are exceeding the cache size limit.
167
+ """
168
+ if cache_size.will_compilation_exceed_accumulated_limit():
169
+ return True, "accumulated_cache_size_limit"
170
+ if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit):
171
+ return True, "cache_size_limit"
172
+ return False, ""
venv/lib/python3.10/site-packages/torch/_dynamo/callback.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class CompilationCallbackHandler:
2
+ def __init__(self):
3
+ self.start_callbacks = []
4
+ self.end_callbacks = []
5
+
6
+ def register_start_callback(self, callback):
7
+ """
8
+ Register a callback function to be called when the compilation starts.
9
+
10
+ Args:
11
+ - callback (callable): The callback function to register.
12
+ """
13
+ self.start_callbacks.append(callback)
14
+ return callback
15
+
16
+ def register_end_callback(self, callback):
17
+ """
18
+ Register a callback function to be called when the compilation ends.
19
+
20
+ Args:
21
+ - callback (callable): The callback function to register.
22
+ """
23
+ self.end_callbacks.append(callback)
24
+ return callback
25
+
26
+ def remove_start_callback(self, callback):
27
+ """
28
+ Remove a registered start callback function.
29
+
30
+ Args:
31
+ - callback (callable): The callback function to remove.
32
+ """
33
+ self.start_callbacks.remove(callback)
34
+
35
+ def remove_end_callback(self, callback):
36
+ """
37
+ Remove a registered end callback function.
38
+
39
+ Args:
40
+ - callback (callable): The callback function to remove.
41
+ """
42
+ self.end_callbacks.remove(callback)
43
+
44
+ def run_start_callbacks(self):
45
+ """
46
+ Execute all registered start callbacks.
47
+ """
48
+ for callback in self.start_callbacks:
49
+ callback()
50
+
51
+ def run_end_callbacks(self):
52
+ """
53
+ Execute all registered end callbacks.
54
+ """
55
+ for callback in self.end_callbacks:
56
+ callback()
57
+
58
+ def clear(self):
59
+ """
60
+ Clear all registered callbacks.
61
+ """
62
+ self.start_callbacks.clear()
63
+ self.end_callbacks.clear()
64
+
65
+
66
+ callback_handler = CompilationCallbackHandler()
67
+
68
+
69
+ def on_compile_start(callback):
70
+ """
71
+ Decorator to register a callback function for the start of the compilation.
72
+ """
73
+ callback_handler.register_start_callback(callback)
74
+ return callback
75
+
76
+
77
+ def on_compile_end(callback):
78
+ """
79
+ Decorator to register a callback function for the end of the compilation.
80
+ """
81
+ callback_handler.register_end_callback(callback)
82
+ return callback
venv/lib/python3.10/site-packages/torch/_dynamo/code_context.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import types
2
+
3
+ from .utils import ExactWeakKeyDictionary
4
+
5
+
6
+ class CodeContextDict:
7
+ def __init__(self):
8
+ self.code_context = ExactWeakKeyDictionary()
9
+
10
+ def has_context(self, code: types.CodeType):
11
+ return code in self.code_context
12
+
13
+ def get_context(self, code: types.CodeType):
14
+ ctx = self.code_context.get(code)
15
+ if ctx is None:
16
+ ctx = {}
17
+ self.code_context[code] = ctx
18
+ return ctx
19
+
20
+ def pop_context(self, code: types.CodeType):
21
+ ctx = self.get_context(code)
22
+ self.code_context._remove_id(id(code))
23
+ return ctx
24
+
25
+ def clear(self):
26
+ self.code_context.clear()
27
+
28
+
29
+ code_context = CodeContextDict()
venv/lib/python3.10/site-packages/torch/_dynamo/codegen.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import re
4
+ import sys
5
+ import types
6
+ from typing import Counter, Dict, List, Optional
7
+
8
+ import torch.nn
9
+ from . import utils
10
+
11
+ from .bytecode_transformation import (
12
+ create_call_function,
13
+ create_dup_top,
14
+ create_instruction,
15
+ create_load_global,
16
+ create_rot_n,
17
+ Instruction,
18
+ )
19
+ from .exc import unimplemented
20
+ from .source import AttrSource, Source
21
+ from .utils import is_safe_constant, rot_n_helper
22
+ from .variables.base import VariableTracker
23
+ from .variables.nn_module import NNModuleVariable
24
+ from .variables.tensor import (
25
+ NumpyNdarrayVariable,
26
+ SymNodeVariable,
27
+ TensorVariable,
28
+ UnspecializedPythonVariable,
29
+ )
30
+ from .variables.torch_function import TensorWithTFOverrideVariable
31
+
32
+
33
+ @dataclasses.dataclass
34
+ class GraphOutputEntry:
35
+ index: int
36
+ variable: VariableTracker
37
+
38
+
39
+ class PyCodegen:
40
+ """
41
+ Helper class uses for constructing Python bytecode
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ tx=None,
47
+ root: Optional[torch.nn.Module] = None,
48
+ graph_output_var: Optional[str] = None,
49
+ tempvars=None,
50
+ ):
51
+ self.root = root
52
+ self.top_of_stack: Optional[VariableTracker] = None
53
+ self.uses: Counter[VariableTracker] = collections.Counter()
54
+ self.graph_outputs: Dict[int, GraphOutputEntry] = {}
55
+ self._output: List[Instruction] = []
56
+ self.tempvars = tempvars or {}
57
+ self.tx = tx
58
+ self.graph_output_var = graph_output_var
59
+ self.code_options = self.tx.output.code_options
60
+ self.cell_and_freevars = self.tx.cell_and_freevars
61
+ self.new_var = self.tx.output.new_var
62
+ self.mutable_side_effects_from_source = False
63
+ self.value_from_source: bool = True
64
+
65
+ def restore_stack(self, stack_values, *, value_from_source=True):
66
+ prior = self.mutable_side_effects_from_source
67
+ self.mutable_side_effects_from_source = True
68
+ prev = self.value_from_source
69
+ self.value_from_source &= value_from_source
70
+ try:
71
+ self.foreach(stack_values)
72
+ finally:
73
+ self.mutable_side_effects_from_source = prior
74
+ self.value_from_source = prev
75
+
76
+ def graph_output_vars(self):
77
+ return [x.variable for x in self.graph_outputs.values()]
78
+
79
+ def call_reconstruct(self, value):
80
+ res = value.reconstruct(self)
81
+ assert res is None, f"reconstruct!=None {value}"
82
+
83
+ def __call__(self, value, allow_cache=True):
84
+ """Generate code such that top-of-stack (TOS) is set to value"""
85
+ if isinstance(value, Source):
86
+ self.call_reconstruct(value)
87
+ self.clear_tos()
88
+ return
89
+
90
+ assert isinstance(value, VariableTracker)
91
+ output = self._output
92
+ graph_outputs = self.graph_outputs
93
+
94
+ if self.top_of_stack is value and allow_cache:
95
+ output.append(create_dup_top())
96
+ return
97
+
98
+ if self.mutable_side_effects_from_source:
99
+ # this is needed to get aliasing relationships right
100
+ # value.mutable_local.source will get mutated to hold `value`
101
+ # mutable_side_effects_from_source=False is used to codegen the mutation
102
+ # mutable_side_effects_from_source=True is used to codegen a reference
103
+ from .side_effects import MutableSideEffects
104
+
105
+ if isinstance(value.mutable_local, MutableSideEffects):
106
+ self(value.mutable_local.source)
107
+ return
108
+
109
+ if allow_cache:
110
+ if value.mutable_local and value.mutable_local in self.tempvars:
111
+ output.append(self.create_load(self.tempvars[value.mutable_local]))
112
+ self.top_of_stack = value
113
+ return
114
+ if self.tempvars.get(value) is not None:
115
+ output.append(self.create_load(self.tempvars[value]))
116
+ self.top_of_stack = value
117
+ return
118
+
119
+ if value.source is not None and allow_cache and self.value_from_source:
120
+ self.call_reconstruct(value.source)
121
+ elif value.is_python_constant() and is_safe_constant(
122
+ value.as_python_constant()
123
+ ):
124
+ output.append(self.create_load_const(value.as_python_constant()))
125
+ elif isinstance(value, TensorWithTFOverrideVariable):
126
+ graph_outputs_key = self.add_graph_output(value)
127
+
128
+ self.load_import_from(utils.__name__, "to_subclass")
129
+ self.load_graph_output(graph_outputs[graph_outputs_key].index)
130
+ output.append(
131
+ self.create_load_global(
132
+ value.global_mangled_class_name(self.tx), False, add=True
133
+ )
134
+ )
135
+ output.extend(create_call_function(2, True))
136
+ elif isinstance(
137
+ value,
138
+ (
139
+ TensorVariable,
140
+ SymNodeVariable,
141
+ UnspecializedPythonVariable,
142
+ NumpyNdarrayVariable,
143
+ ),
144
+ ):
145
+ graph_outputs_key = self.add_graph_output(value)
146
+
147
+ if isinstance(value, NumpyNdarrayVariable):
148
+ self.load_import_from(utils.__name__, "to_numpy_helper")
149
+
150
+ self.load_graph_output(graph_outputs[graph_outputs_key].index)
151
+
152
+ if isinstance(value, NumpyNdarrayVariable):
153
+ output.extend(create_call_function(1, True))
154
+ elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap:
155
+ output.extend(
156
+ [self.create_load_attr("item")] + create_call_function(0, True)
157
+ )
158
+ elif isinstance(value, NNModuleVariable):
159
+ parts = value.module_key.split(".")
160
+ if parts[0] in self.code_options["co_varnames"]:
161
+ output.append(self.create_load(parts[0]))
162
+ parts = parts[1:]
163
+ else:
164
+ assert self.root is not None
165
+ output.append(self.create_load_output(self.root))
166
+ for part in parts:
167
+ output.append(self.create_load_attr(part))
168
+ else:
169
+ self.uses[value] += 1
170
+ try:
171
+ self.call_reconstruct(value)
172
+ except NotImplementedError:
173
+ unimplemented(f"reconstruct: {value}")
174
+ if allow_cache and value in self.tempvars:
175
+ self._output.append(create_dup_top())
176
+ self.add_cache(value)
177
+
178
+ self.top_of_stack = value
179
+
180
+ def add_graph_output(self, value):
181
+ graph_outputs_key = id(value.as_proxy())
182
+ if graph_outputs_key not in self.graph_outputs:
183
+ self.graph_outputs[graph_outputs_key] = GraphOutputEntry(
184
+ len(self.graph_outputs), value
185
+ )
186
+ return graph_outputs_key
187
+
188
+ def load_graph_output(self, index):
189
+ output = self._output
190
+ output.append(self.create_load(self.graph_output_var))
191
+ output.append(self._create_load_const(index))
192
+ output.append(create_instruction("BINARY_SUBSCR"))
193
+
194
+ def add_cache(self, value):
195
+ var = self.new_var()
196
+ self.tempvars[value] = var
197
+ if value.mutable_local:
198
+ self.tempvars[value.mutable_local] = var
199
+ self._output.append(self.create_store(var))
200
+
201
+ def foreach(self, items):
202
+ for i in items:
203
+ self(i)
204
+
205
+ def setup_globally_cached(self, name, value, push_null):
206
+ """Store value in a new global"""
207
+ name = re.sub(r"[^a-zA-Z0-9_]+", "_", name)
208
+ f_globals = self.tx.f_globals
209
+ if name in f_globals:
210
+ assert id(f_globals[name]) == id(value)
211
+ else:
212
+ f_globals[name] = value
213
+ return [self.create_load_global(name, push_null, add=True)]
214
+
215
+ def clear_tos(self):
216
+ self.top_of_stack = None
217
+
218
+ def append_output(self, inst):
219
+ assert isinstance(inst, Instruction)
220
+ self._output.append(inst)
221
+ self.clear_tos()
222
+
223
+ def extend_output(self, insts):
224
+ assert all(isinstance(x, Instruction) for x in insts)
225
+ self._output.extend(insts)
226
+ self.clear_tos()
227
+
228
+ def get_instructions(self) -> List[Instruction]:
229
+ return self._output
230
+
231
+ def create_load(self, name) -> Instruction:
232
+ if name in self.cell_and_freevars():
233
+ return create_instruction("LOAD_DEREF", argval=name)
234
+ assert name in self.code_options["co_varnames"], f"{name} missing"
235
+ return create_instruction("LOAD_FAST", argval=name)
236
+
237
+ def create_load_closure(self, name) -> Instruction:
238
+ assert name in self.cell_and_freevars()
239
+ return create_instruction("LOAD_CLOSURE", argval=name)
240
+
241
+ def create_store(self, name) -> Instruction:
242
+ if name in self.cell_and_freevars():
243
+ return create_instruction("STORE_DEREF", argval=name)
244
+ assert name in self.code_options["co_varnames"]
245
+ return create_instruction("STORE_FAST", argval=name)
246
+
247
+ def create_load_global(self, name, push_null, add=False) -> Instruction:
248
+ if add:
249
+ self.tx.output.update_co_names(name)
250
+ assert name in self.code_options["co_names"], f"{name} not in co_names"
251
+ return create_load_global(name, push_null)
252
+
253
+ def create_load_const(self, value) -> Instruction:
254
+ assert is_safe_constant(value), f"unsafe constant {value}"
255
+ return self._create_load_const(value)
256
+
257
+ def _create_load_const(self, value) -> Instruction:
258
+ return create_instruction("LOAD_CONST", argval=value)
259
+
260
+ create_load_output = _create_load_const
261
+
262
+ def create_load_method(self, name):
263
+ self.tx.output.update_co_names(name)
264
+ return create_instruction("LOAD_METHOD", argval=name)
265
+
266
+ def create_load_attr(self, name) -> Instruction:
267
+ if name not in self.code_options["co_names"]:
268
+ self.code_options["co_names"] += (name,)
269
+ return create_instruction("LOAD_ATTR", argval=name)
270
+
271
+ def load_attr(self, name):
272
+ self.append_output(self.create_load_attr(name))
273
+
274
+ def create_load_attrs(self, names):
275
+ return [self.create_load_attr(name) for name in names.split(".")]
276
+
277
+ def create_store_attr(self, name) -> Instruction:
278
+ if name not in self.code_options["co_names"]:
279
+ self.code_options["co_names"] += (name,)
280
+ return create_instruction("STORE_ATTR", argval=name)
281
+
282
+ def store_attr(self, name):
283
+ self.append_output(self.create_store_attr(name))
284
+
285
+ def load_function_name(self, fn_name, push_null, num_on_stack=0):
286
+ """Load the global fn_name on the stack num_on_stack down"""
287
+ output = []
288
+ if push_null and sys.version_info >= (3, 11):
289
+ output.extend(
290
+ [create_instruction("PUSH_NULL"), *self.rot_n(num_on_stack + 1)]
291
+ )
292
+ output.extend(
293
+ [
294
+ self.create_load_global(fn_name, False, add=True),
295
+ *self.rot_n(num_on_stack + 1),
296
+ ]
297
+ )
298
+ return output
299
+
300
+ def rot_n(self, n):
301
+ try:
302
+ return create_rot_n(n)
303
+ except AttributeError:
304
+ # desired rotate bytecode doesn't exist, generate equivalent bytecode
305
+ return [
306
+ create_instruction("BUILD_TUPLE", arg=n),
307
+ self._create_load_const(rot_n_helper(n)),
308
+ *create_rot_n(2),
309
+ create_instruction("CALL_FUNCTION_EX", arg=0),
310
+ create_instruction("UNPACK_SEQUENCE", arg=n),
311
+ ]
312
+
313
+ def pop_null(self):
314
+ # POP_TOP doesn't work for null, so we pop nulls by pushing in a
315
+ # nop function, calling it (which consumes the null), and popping the result.
316
+ assert sys.version_info >= (3, 11)
317
+ return [
318
+ self._create_load_const(lambda: None),
319
+ *create_call_function(0, False),
320
+ create_instruction("POP_TOP"),
321
+ ]
322
+
323
+ def call_function(self, nargs: int, push_null: bool):
324
+ self.extend_output(create_call_function(nargs, push_null=push_null))
325
+
326
+ def dup_top(self):
327
+ self.append_output(create_dup_top())
328
+
329
+ def store(self, varname):
330
+ self.append_output(self.create_store(varname))
331
+
332
+ def make_function_with_closure(
333
+ self, fn_name: str, code: types.CodeType, push_null: bool, num_on_stack=0
334
+ ):
335
+ freevars = code.co_freevars
336
+ assert freevars
337
+ output = self._output
338
+ if sys.version_info >= (3, 11) and push_null:
339
+ output.append(create_instruction("PUSH_NULL"))
340
+ output.extend(self.rot_n(num_on_stack + 1))
341
+ for var in freevars:
342
+ assert var in self.cell_and_freevars()
343
+ output.append(create_instruction("LOAD_CLOSURE", argval=var))
344
+ output.append(create_instruction("BUILD_TUPLE", arg=len(freevars)))
345
+ output.append(self.create_load_const(code))
346
+ if sys.version_info < (3, 11):
347
+ output.append(self.create_load_const(fn_name))
348
+ output.append(create_instruction("MAKE_FUNCTION", arg=0x08))
349
+ output.extend(self.rot_n(num_on_stack + 1))
350
+ self.clear_tos()
351
+
352
+ def create_load_python_module(self, mod, push_null) -> Instruction:
353
+ """
354
+ Generate a LOAD_GLOBAL instruction to fetch a given python module.
355
+ """
356
+ output = self.tx.output
357
+ global_scope = output.global_scope
358
+ name = re.sub(r"^.*[.]", "", mod.__name__)
359
+ if global_scope.get(name, None) is mod:
360
+ return self.create_load_global(name, push_null, add=True)
361
+ prefix = f"___module_{name}"
362
+ global_name = self.tx.output.install_global_by_id(prefix, mod)
363
+ return self.create_load_global(global_name, push_null, add=True)
364
+
365
+ def make_call_generated_code(self, fn_name: str) -> None:
366
+ """Call the generated code function stored in fn_name"""
367
+ self.extend_output(self.load_function_name(fn_name, True))
368
+
369
+ graphargs = self.tx.output.graphargs
370
+ for arg in graphargs:
371
+ if arg.is_unspecialized:
372
+ self.extend_output(
373
+ [
374
+ self.create_load_python_module(torch, True),
375
+ self.create_load_attr("as_tensor"),
376
+ ]
377
+ )
378
+ self.call_reconstruct(arg)
379
+ self.extend_output(create_call_function(1, False))
380
+ else:
381
+ self.call_reconstruct(arg)
382
+
383
+ self.extend_output(create_call_function(len(graphargs), False))
384
+
385
+ def load_import_from(self, module_name, object_name) -> None:
386
+ self(AttrSource(self.tx.import_source(module_name), object_name))
387
+
388
+ def create_call_function_kw(self, nargs, kw_names, push_null) -> List[Instruction]:
389
+ if sys.version_info >= (3, 11):
390
+ output = create_call_function(nargs, push_null)
391
+ assert output[-2].opname == "PRECALL"
392
+ kw_names_inst = create_instruction("KW_NAMES", argval=kw_names)
393
+ output.insert(-2, kw_names_inst)
394
+ return output
395
+ return [
396
+ self.create_load_const(kw_names),
397
+ create_instruction("CALL_FUNCTION_KW", arg=nargs),
398
+ ]
venv/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+ from torch._dynamo.external_utils import call_backward, call_hook
7
+ from torch._dynamo.source import GetItemSource, LocalSource
8
+ from torch._dynamo.utils import counters, lazy_format_graph_code
9
+ from torch._logging import getArtifactLogger, trace_structured
10
+ from torch._prims_common import clone_preserve_strides
11
+ from torch._subclasses import FakeTensorMode
12
+ from torch.fx import GraphModule
13
+ from torch.fx.experimental._backward_state import BackwardState
14
+ from torch.fx.experimental.proxy_tensor import (
15
+ decompose,
16
+ disable_autocast_cache,
17
+ disable_proxy_modes_tracing,
18
+ fetch_object_proxy,
19
+ ProxyTorchDispatchMode,
20
+ PythonKeyTracer,
21
+ track_tensor_tree,
22
+ )
23
+ from torch.fx.experimental.symbolic_shapes import DimDynamic, ShapeEnv
24
+ from torch.fx.proxy import Proxy
25
+
26
+ compiled_autograd_log = getArtifactLogger(__name__, "compiled_autograd")
27
+
28
+
29
+ def maybe_clone(x):
30
+ if x is not None:
31
+ return clone_preserve_strides(x)
32
+ return x
33
+
34
+
35
+ class AutogradCompilerInstance:
36
+ def __init__(self, compiler_fn) -> None:
37
+ self.compiler_fn = compiler_fn
38
+ self.stack = contextlib.ExitStack()
39
+ self.close = self.stack.close
40
+ self.shape_env = ShapeEnv()
41
+ self.fake_tensor_mode = FakeTensorMode(
42
+ allow_fallback_kernels=True,
43
+ allow_non_fake_inputs=True,
44
+ shape_env=self.shape_env,
45
+ )
46
+ self.fx_tracer = PythonKeyTracer()
47
+ self.proxy_mode = ProxyTorchDispatchMode(self.fx_tracer, "symbolic")
48
+ self.hooks_proxy: Optional[Proxy] = None
49
+
50
+ def wrap_fake(self, x, source):
51
+ assert isinstance(x, torch.Tensor)
52
+ return self.fake_tensor_mode.from_tensor(x, source=source)
53
+
54
+ @staticmethod
55
+ def source(name, idx) -> GetItemSource:
56
+ return GetItemSource(LocalSource(name), idx)
57
+
58
+ def begin_capture(self, inputs: List[torch.Tensor], sizes: List[int]):
59
+ counters["compiled_autograd"]["captures"] += 1
60
+ self.fx_tracer.root = torch.nn.Module()
61
+ self.fx_tracer.graph = torch.fx.Graph(tracer_cls=PythonKeyTracer)
62
+ self.fx_tracer.tensor_attrs = {}
63
+ args_proxy = self.fx_tracer.create_proxy("placeholder", "inputs", (), {})
64
+ sizes_proxy = self.fx_tracer.create_proxy("placeholder", "sizes", (), {})
65
+ self.hooks_proxy = self.fx_tracer.create_proxy("placeholder", "hooks", (), {})
66
+
67
+ # tensor inputs to fake tensors
68
+ inputs = [
69
+ self.wrap_fake(x, self.source("inputs", idx))
70
+ for idx, x in enumerate(inputs)
71
+ ]
72
+ proxies = [args_proxy[i] for i in range(len(inputs))]
73
+ self.bind_tensors_to_proxies(inputs, proxies)
74
+
75
+ # size inputs to symints
76
+ sizes = [
77
+ self.shape_env.create_unspecified_symint_and_symbol(
78
+ val,
79
+ self.source("sizes", idx),
80
+ DimDynamic.DYNAMIC,
81
+ )
82
+ for idx, val in enumerate(sizes)
83
+ ]
84
+ self.bind_tensors_to_proxies(sizes, sizes_proxy)
85
+
86
+ # TODO(jansel): are all these modes needed?
87
+ self.stack.enter_context(decompose({}))
88
+ self.stack.enter_context(self.fake_tensor_mode)
89
+ self.stack.enter_context(self.proxy_mode.sym_mode)
90
+ self.stack.enter_context(self.proxy_mode)
91
+ self.stack.enter_context(disable_autocast_cache())
92
+ return inputs, sizes
93
+
94
+ def proxy_call_backward(
95
+ self,
96
+ inputs,
97
+ output_metadatas,
98
+ saved_tensors,
99
+ backward_idx: int,
100
+ ):
101
+ assert self.hooks_proxy is not None
102
+ backward_fn = self.hooks_proxy[backward_idx] # type: ignore[index]
103
+ proxies = self.fx_tracer.create_proxy(
104
+ kind="call_function",
105
+ target=call_backward,
106
+ args=(
107
+ backward_fn,
108
+ self.to_proxy(saved_tensors),
109
+ *self.to_proxy(inputs),
110
+ ),
111
+ kwargs={},
112
+ )
113
+
114
+ with disable_proxy_modes_tracing():
115
+ # create fake Tensors
116
+ grad_ins: List[Optional[torch.Tensor]] = []
117
+ for output_metadata in output_metadatas:
118
+ if output_metadata is None:
119
+ grad_ins.append(None)
120
+ continue
121
+
122
+ layout, device, dtype, size = output_metadata
123
+ grad_ins.append(
124
+ torch.empty(size=size, dtype=dtype, layout=layout, device=device)
125
+ )
126
+ self.bind_tensors_to_proxies(grad_ins, proxies)
127
+ return tuple(grad_ins)
128
+
129
+ def proxy_call_hook(self, hook, *args):
130
+ return self.fx_tracer.create_proxy(
131
+ "call_function",
132
+ call_hook,
133
+ (
134
+ hook,
135
+ *[self.to_proxy(x) for x in args],
136
+ ),
137
+ {},
138
+ )
139
+
140
+ def tensor_pre_hook(self, inputs, hook_id, i: int):
141
+ assert self.hooks_proxy is not None
142
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
143
+ proxy = self.proxy_call_hook(
144
+ hook,
145
+ inputs[i],
146
+ )
147
+ with disable_proxy_modes_tracing():
148
+ inputs[i] = maybe_clone(inputs[i])
149
+ self.bind_tensors_to_proxies([inputs[i]], [proxy])
150
+ return inputs
151
+
152
+ def pre_hook(self, inputs, hook_id):
153
+ assert self.hooks_proxy is not None
154
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
155
+ proxies = self.proxy_call_hook(
156
+ hook,
157
+ inputs,
158
+ )
159
+ with disable_proxy_modes_tracing():
160
+ inputs = [maybe_clone(x) for x in inputs]
161
+ self.bind_tensors_to_proxies(inputs, proxies)
162
+ return inputs
163
+
164
+ def post_hook(self, outputs, inputs, hook_id):
165
+ assert self.hooks_proxy is not None
166
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
167
+ proxies = self.proxy_call_hook(
168
+ hook,
169
+ outputs,
170
+ inputs,
171
+ )
172
+ with disable_proxy_modes_tracing():
173
+ outputs = [maybe_clone(x) for x in outputs]
174
+ self.bind_tensors_to_proxies(outputs, proxies)
175
+ return outputs
176
+
177
+ def post_acc_grad_hook(self, input, hook_id):
178
+ assert isinstance(input, torch.Tensor)
179
+ assert self.hooks_proxy is not None
180
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
181
+ proxies = self.proxy_call_hook(
182
+ hook,
183
+ input,
184
+ )
185
+ with disable_proxy_modes_tracing():
186
+ input = [maybe_clone(input)]
187
+ self.bind_tensors_to_proxies(input, proxies)
188
+ return input
189
+
190
+ def end_capture(self, outputs):
191
+ self.stack.close()
192
+ self.fx_tracer.create_node(
193
+ "output",
194
+ "output",
195
+ (self.fx_tracer.create_arg(self.to_proxy(outputs)),),
196
+ {},
197
+ )
198
+ graph = GraphModule(
199
+ self.fx_tracer.root, self.fx_tracer.graph, "CompiledAutograd"
200
+ )
201
+ compiled_autograd_log.info(
202
+ "%s", lazy_format_graph_code("Compiled autograd graph", graph)
203
+ )
204
+ trace_structured(
205
+ "compiled_autograd_graph",
206
+ payload_fn=lambda: graph.print_readable(print_output=False),
207
+ )
208
+ return self.compiler_fn(graph)
209
+
210
+ def to_proxy(self, t):
211
+ if t is None:
212
+ return None
213
+ if isinstance(t, list):
214
+ return [self.to_proxy(x) for x in t]
215
+ if isinstance(t, tuple):
216
+ return tuple(self.to_proxy(x) for x in t)
217
+ assert isinstance(t, (torch.Tensor, torch.SymInt))
218
+ return fetch_object_proxy(self.fx_tracer)(t).proxy
219
+
220
+ def bind_tensors_to_proxies(self, tensors, proxies):
221
+ if isinstance(proxies, torch.fx.Proxy):
222
+ proxies = [proxies[i] for i in range(len(tensors))]
223
+ assert len(tensors) == len(proxies)
224
+ track_tensor_tree(tensors, proxies, constant=None, tracer=self.fx_tracer)
225
+
226
+ def bind_backward_state(self, index: int):
227
+ assert self.hooks_proxy is not None
228
+ proxy = self.hooks_proxy[index] # type: ignore[index]
229
+ bw_state = BackwardState()
230
+ track_tensor_tree(bw_state, proxy, constant=None, tracer=self.fx_tracer)
231
+ return bw_state
232
+
233
+
234
+ compiled_autograd_enabled = False
235
+
236
+ # We may have code like:
237
+ # with enable(compiler_fn):
238
+ # ...
239
+ # with disable():
240
+ # ...
241
+ # ...
242
+ # The disable() call just want to disable compiled autograd temporarily.
243
+ # But overall the feature is enabled.
244
+ #
245
+ # The code covered by the disable context manager has no way to know if
246
+ # compiled autograd is overall eanbled. Use another variable
247
+ # compiled_autograd_enabled_count to indicate how many times compiled
248
+ # autograd has been enabled in the call stack for this purpose.
249
+ compiled_autograd_enabled_count = 0
250
+
251
+
252
+ @contextlib.contextmanager
253
+ def enable(compiler_fn):
254
+ prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(
255
+ functools.partial(AutogradCompilerInstance, compiler_fn)
256
+ )
257
+ global compiled_autograd_enabled, compiled_autograd_enabled_count
258
+ compiled_autograd_enabled = True
259
+ compiled_autograd_enabled_count += 1
260
+ try:
261
+ with torch.autograd.set_multithreading_enabled(False):
262
+ yield
263
+ finally:
264
+ compiled_autograd_enabled_count -= 1
265
+ if not prior:
266
+ compiled_autograd_enabled = False
267
+ torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior)
268
+
269
+
270
+ @contextlib.contextmanager
271
+ def disable():
272
+ prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(None)
273
+ global compiled_autograd_enabled
274
+ compiled_autograd_enabled = False
275
+ try:
276
+ yield
277
+ finally:
278
+ if prior:
279
+ compiled_autograd_enabled = True
280
+ torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior)
venv/lib/python3.10/site-packages/torch/_dynamo/comptime.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file establishes the public comptime interface to Dynamo.
2
+ # This allows Dynamo users to execute arbitrary Python code while
3
+ # Dynamo is symbolically evaluating their original programs.
4
+ #
5
+ # The goal of the public API is to give users rope, without actually
6
+ # leaking private implementation details of Dynamo.
7
+
8
+ import builtins
9
+ import dis
10
+ import traceback
11
+ from typing import Optional, Union
12
+
13
+ import torch
14
+ from torch.fx.experimental.symbolic_shapes import free_symbols
15
+
16
+ from .exc import unimplemented
17
+ from .variables.constant import ConstantVariable
18
+ from .variables.tensor import SymNodeVariable
19
+
20
+
21
+ class ComptimeVar:
22
+ """
23
+ A ComptimeVar represents a Python value, at some particular point
24
+ in time, in the Python code we are symbolically evaluating with
25
+ torchdynamo. This must be distinguished from a runtime value, as
26
+ at compile-time there are some properties of the variable we
27
+ do not know (for example, if the ComptimeVar represents a Tensor,
28
+ we only know metadata about the tensor; we do NOT know what the
29
+ actual data in the Tensor is.)
30
+ """
31
+
32
+ def __init__(self, v):
33
+ self.__variable = v
34
+
35
+ def as_proxy(self):
36
+ """
37
+ Returns an fx.Proxy (or tuple/list of fx.Proxy) representing
38
+ this variable in the FX graph we are assembling to pass
39
+ to the user compiler.
40
+
41
+ This method only works for variables we actually track in
42
+ the FX graph, aka Tensors (and ints, if you are compiling
43
+ with dynamic shapes). In particular, if you have a list
44
+ or tuple of tensors, you will get a list/tuple of proxies
45
+ (not a single proxy representing the entire list/tuple).
46
+ """
47
+ return self.__variable.as_proxy()
48
+
49
+ def is_proxy(self):
50
+ """
51
+ Returns True if as_proxy() would succeed.
52
+ """
53
+ return self.__variable.is_proxy()
54
+
55
+ def as_fake(self):
56
+ """
57
+ Returns a "fake" value (either a FakeTensor or a SymInt)
58
+ representing the variable in question. This only works
59
+ for variables that denote Tensor or int. You can use
60
+ this to query metadata; e.g., v.as_fake().size(0) will
61
+ tell you the compile-time known size of the tensor.
62
+
63
+ WARNING: Do NOT mutate the returned tensor.
64
+ """
65
+ return self.__variable.as_proxy().node.meta["example_value"]
66
+
67
+ def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]:
68
+ """
69
+ Returns the size of the tensor (if dim is None) or the size
70
+ at the dimension dim. The returned size may be a SymInt.
71
+ """
72
+ return self.as_fake().size(dim)
73
+
74
+ def python_type(self):
75
+ """
76
+ Returns what type(v) would have returned for the variable
77
+ at compile time.
78
+ """
79
+ return self.__variable.python_type()
80
+
81
+ def as_python_constant(self):
82
+ """
83
+ Returns the Python value this variable would have, but only if it is
84
+ completely known at compile-time (e.g., it is constant).
85
+
86
+ WARNING: Do NOT mutate the returned constant. The returned constant
87
+ may or may not correspond to the actual value this variable may take
88
+ on at runtime; for example, if the variable in question is a constant
89
+ list, we may return a copy of that list.
90
+ """
91
+ return self.__variable.as_python_constant()
92
+
93
+ def is_python_constant(self):
94
+ """
95
+ Returns True if as_python_constant would succeed.
96
+ """
97
+ return self.__variable.is_python_constant()
98
+
99
+ def is_dynamic(self):
100
+ if isinstance(self.__variable, SymNodeVariable):
101
+ fs = free_symbols(self.__variable.sym_num)
102
+ return bool(fs)
103
+ return False
104
+
105
+ def force_static(self):
106
+ """
107
+ Forces that a value is static, inducing a guard on its specific value
108
+ """
109
+ if isinstance(self.__variable, SymNodeVariable):
110
+ self.__variable.evaluate_expr()
111
+ elif isinstance(self.__variable, ConstantVariable):
112
+ # TODO: Maybe complain if this isn't a int/bool/float variable
113
+ pass
114
+ else:
115
+ raise AssertionError(
116
+ f"cannot force {self.__variable} ({type(self.__variable)}) static"
117
+ )
118
+
119
+ def _i_will_not_complain_if_bc_breaks_VariableTracker(self):
120
+ """
121
+ Returns the internal data structure VariableTracker that Dynamo uses
122
+ to represent variables at compile time. There are no BC guarantees on
123
+ this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on
124
+ it.
125
+ """
126
+ return self.__variable
127
+
128
+ def __repr__(self):
129
+ # TODO: The default repr is pretty bad, do better
130
+ return repr(self.__variable)
131
+
132
+ # TODO: API for adding a custom guard
133
+
134
+
135
+ class ComptimeContext:
136
+ """
137
+ This context class provides access to a public API for Dynamo's internals.
138
+ If there is something here you would find useful that is missing, please
139
+ file a feature request at https://github.com/pytorch/pytorch/
140
+ """
141
+
142
+ def __init__(self, tx):
143
+ self.__tx = tx
144
+
145
+ def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar:
146
+ """
147
+ Retrieve the compile-time known information about a local.
148
+ """
149
+ tx = self.__get_tx(stacklevel)
150
+ return ComptimeVar(tx.symbolic_locals[name])
151
+
152
+ def graph_break(self, msg="ComptimeContext.graph_break"):
153
+ """
154
+ Manually trigger a graph break
155
+ """
156
+ unimplemented(msg)
157
+
158
+ def graph(self):
159
+ """
160
+ Retrieve the partially constructed FX graph that would be
161
+ passed to the user compiler after compilation.
162
+ """
163
+ return self.__tx.output.graph
164
+
165
+ def assert_static(self, val):
166
+ """
167
+ Asserts that the int is static (and not dynamic, per dynamic shapes)
168
+ """
169
+ assert (
170
+ not val.is_dynamic()
171
+ ), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)"
172
+
173
+ def print_graph(self, *, verbose=True, file=None):
174
+ """
175
+ Print the partially constructed FX graph that would be passed
176
+ to the user compiler after compilation.
177
+ """
178
+ print(
179
+ self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file
180
+ )
181
+
182
+ def parent(self):
183
+ return ComptimeContext(self.__tx.parent)
184
+
185
+ def __get_tx(self, stacklevel):
186
+ tx = self.__tx
187
+ for _ in range(stacklevel):
188
+ tx = tx.parent
189
+ return tx
190
+
191
+ def print_disas(self, *, file=None, stacklevel=0):
192
+ """
193
+ Print the current series of opcodes being executed (not including
194
+ parent frames), including where you are in the particular opcode
195
+ stream.
196
+ """
197
+ tx = self.__get_tx(stacklevel)
198
+ print(
199
+ dis.Bytecode(
200
+ tx.f_code,
201
+ current_offset=tx.instructions[tx.instruction_pointer].offset,
202
+ ).dis(),
203
+ file=file,
204
+ )
205
+
206
+ def print_value_stack(self, *, file=None, stacklevel=0):
207
+ """
208
+ Print the current Python value stack. Note that this is NOT the same
209
+ as the traceback; use print_bt() to print that. Note that at
210
+ stacklevel=0, this will typically be empty, as comptime cannot
211
+ currently be used in an expression context where there would be
212
+ intermediates on the stack. If you would find this useful, please
213
+ file a bug at https://github.com/pytorch/pytorch/
214
+
215
+ NB: Stack grows downwards in our print
216
+ """
217
+ # TODO: improve printing
218
+ tx = self.__get_tx(stacklevel)
219
+ for s in tx.stack:
220
+ print(f"- {s}", file=file)
221
+
222
+ def print_locals(self, *, file=None, stacklevel=0):
223
+ """
224
+ Print all of the locals available in the current context.
225
+ By default this view is very limited; you can get more information
226
+ about any individual local using get_local().
227
+ """
228
+ # TODO: improve by improving the VariableTracker printing
229
+ tx = self.__get_tx(stacklevel)
230
+ for k, v in tx.symbolic_locals.items():
231
+ print(f"{k} = {v}", file=file)
232
+
233
+ def print_bt(self, *, file=None, stacklevel=0):
234
+ """
235
+ Print the user code backtrace, starting at the beginning of the
236
+ frame Dynamo started evaluating. Note that this MAY NOT go all
237
+ the way to the torch.compile invocation, as we may have done
238
+ a graph break and are compiling an intermediate frame as the
239
+ starting point. If you think the other behavior would be better,
240
+ file a bug at https://github.com/pytorch/pytorch/
241
+ """
242
+ stack = []
243
+ tx = self.__get_tx(stacklevel)
244
+ while tx is not None:
245
+ stack.append(tx.frame_summary())
246
+ tx = getattr(tx, "parent", None)
247
+ print(
248
+ "".join(traceback.StackSummary.from_list(reversed(stack)).format()),
249
+ file=file,
250
+ )
251
+
252
+ def print_guards(self, *, file=None):
253
+ """
254
+ Print the currently installed guards for the Dynamo context.
255
+ This does NOT include guards associated with variables that
256
+ may or may not be installed in the future if those variables
257
+ are used.
258
+ """
259
+ # TODO: improve print format, current guard format is extremely
260
+ # verbose
261
+ print(
262
+ "\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)),
263
+ file=file,
264
+ )
265
+
266
+ def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self):
267
+ """
268
+ Returns the internal data structure InstructionTranslator that Dynamo
269
+ uses to track state of symbolic evaluation. There are no BC
270
+ guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if
271
+ you rely on it.
272
+ """
273
+ return self.__tx
274
+
275
+
276
+ class _Comptime:
277
+ @staticmethod
278
+ def __call__(fn):
279
+ """fn gets called at compile time in TorchDynamo, does nothing otherwise"""
280
+ return
281
+
282
+ # Convenience wrappers that are more compact to use
283
+
284
+ @staticmethod
285
+ def graph_break():
286
+ comptime(lambda ctx: ctx.graph_break())
287
+
288
+ @staticmethod
289
+ def print_graph():
290
+ comptime(lambda ctx: ctx.print_graph())
291
+
292
+ @staticmethod
293
+ def print_disas(*, stacklevel=0):
294
+ comptime(
295
+ lambda ctx: ctx.print_disas(
296
+ stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
297
+ )
298
+ )
299
+
300
+ @staticmethod
301
+ def print_value_stack(*, stacklevel=0):
302
+ comptime(
303
+ lambda ctx: ctx.print_value_stack(
304
+ stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
305
+ )
306
+ )
307
+
308
+ # This is a more useful variant of print_value_stack that can be used
309
+ # in an expression context; e.g., x + print_value_stack_and_return(y + z),
310
+ # you will see x on the stack prior to the addition operation
311
+ @staticmethod
312
+ def print_value_stack_and_return(e, *, stacklevel=0):
313
+ comptime(
314
+ lambda ctx: ctx.print_value_stack(
315
+ stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
316
+ )
317
+ )
318
+ return e
319
+
320
+ @staticmethod
321
+ def print_locals(*, stacklevel=0):
322
+ comptime(
323
+ lambda ctx: ctx.print_locals(
324
+ stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
325
+ )
326
+ )
327
+
328
+ @staticmethod
329
+ def print_bt(*, stacklevel=0):
330
+ comptime(
331
+ lambda ctx: ctx.print_bt(
332
+ stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
333
+ )
334
+ )
335
+
336
+ @staticmethod
337
+ def print_guards():
338
+ comptime(lambda ctx: ctx.print_guards())
339
+
340
+ @staticmethod
341
+ def assert_static(val):
342
+ comptime(lambda ctx: ctx.assert_static(ctx.get_local("val")))
343
+
344
+ @staticmethod
345
+ def force_static(val):
346
+ comptime(lambda ctx: ctx.get_local("val").force_static())
347
+
348
+ @staticmethod
349
+ def breakpoint():
350
+ """
351
+ Like pdb breakpoint(), but drop into pdb whenever this line
352
+ of code is compiled by dynamo. Use it by putting
353
+ this in your model code::
354
+
355
+ from torch._dynamo.comptime import comptime
356
+ comptime.breakpoint()
357
+
358
+ And then, inside pdb, you can access 'ctx' to query things
359
+ about the compilation context::
360
+
361
+ (Pdb) !ctx.print_bt()
362
+ (Pdb) !ctx.print_locals()
363
+ (Pdb) p ctx.get_local("attention").as_fake()
364
+ """
365
+
366
+ def inner(inner_ctx):
367
+ ctx = inner_ctx.parent()
368
+ builtins.breakpoint()
369
+
370
+ comptime(inner)
371
+
372
+
373
+ comptime = _Comptime()
venv/lib/python3.10/site-packages/torch/_dynamo/config.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import getpass
2
+ import inspect
3
+ import os
4
+ import re
5
+ import sys
6
+ import tempfile
7
+ from os.path import abspath, dirname
8
+ from typing import Any, Callable, Dict, Optional, Set, Type, TYPE_CHECKING, Union
9
+
10
+ import torch
11
+
12
+ # to configure logging for dynamo, aot, and inductor
13
+ # use the following API in the torch._logging module
14
+ # torch._logging.set_logs(dynamo=<level>, aot=<level>, inductor<level>)
15
+ # or use the environment variable TORCH_LOGS="dynamo,aot,inductor" (use a prefix + to indicate higher verbosity)
16
+ # see this design doc for more detailed info
17
+ # Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
18
+ # the name of a file to write the logs to
19
+ # [@compile_ignored: debug]
20
+ log_file_name: Optional[str] = None
21
+
22
+ # [@compile_ignored: debug] Verbose will print full stack traces on warnings and errors
23
+ verbose = os.environ.get("TORCHDYNAMO_VERBOSE", "0") == "1"
24
+
25
+ # [@compile_ignored: runtime_behaviour] verify the correctness of optimized backend
26
+ verify_correctness = False
27
+
28
+ # need this many ops to create an FX graph
29
+ minimum_call_count = 1
30
+
31
+ # turn on/off DCE pass
32
+ dead_code_elimination = True
33
+
34
+ # disable (for a function) when cache reaches this size
35
+
36
+ # controls the maximum number of cache entries with a guard on same ID_MATCH'd
37
+ # object. It also controls the maximum size of cache entries if they don't have
38
+ # any ID_MATCH'd guards.
39
+ # [@compile_ignored: runtime_behaviour]
40
+ cache_size_limit = 8
41
+
42
+ # [@compile_ignored: runtime_behaviour] controls the maximum number of entries for a code object.
43
+ accumulated_cache_size_limit = 64
44
+
45
+ # whether or not to specialize on int inputs. This only has an effect with
46
+ # dynamic_shapes; when dynamic_shapes is False, we ALWAYS specialize on int
47
+ # inputs. Note that assume_static_by_default will also cause ints to get
48
+ # specialized, so this is mostly useful for export, where we want inputs
49
+ # to be dynamic, but accesses to ints should NOT get promoted into inputs.
50
+ specialize_int = False
51
+
52
+ # legacy config, does nothing now!
53
+ dynamic_shapes = True
54
+
55
+ use_lazy_graph_module = (
56
+ os.environ.get("TORCH_COMPILE_USE_LAZY_GRAPH_MODULE", "1") == "1"
57
+ )
58
+
59
+ # This is a temporarily flag, which changes the behavior of dynamic_shapes=True.
60
+ # When assume_static_by_default is True, we only allocate symbols for shapes marked dynamic via mark_dynamic.
61
+ # NOTE - this flag can be removed once we can run dynamic_shapes=False w/ the mark_dynamic API
62
+ # see [Note - on the state of mark_dynamic]
63
+ assume_static_by_default = True
64
+
65
+ # This flag changes how dynamic_shapes=True works, and is meant to be used in conjunction
66
+ # with assume_static_by_default=True.
67
+ # With this flag enabled, we always compile a frame as fully static for the first time, and, if we fail
68
+ # any guards due to wobbles in shape, we recompile with *all* the wobbled shapes as being marked dynamic.
69
+ automatic_dynamic_shapes = True
70
+
71
+ # This flag changes how the shapes of parameters are treated.
72
+ # If this flag is set to True, then the shapes of torch.nn.Parameter as well as of torch.Tensor are attempted to be dynamic
73
+ # If this flag is set to False, then the shapes of torch.nn.Parameter are assumed to be static,
74
+ # while the shapes of torch.Tensor are assumed to be dynamic.
75
+ force_parameter_static_shapes = True
76
+
77
+ # This flag ensures that the shapes of a nn module are always assumed to be static
78
+ # If the flag is set to True, then the shapes of a nn.module are assumed to be static
79
+ # If the flag is set to False, then the shapes of a nn.module can be dynamic
80
+ force_nn_module_property_static_shapes = True
81
+
82
+ # Typically, if you mark_dynamic a dimension, we will error if the dimension
83
+ # actually ended up getting specialized. This knob changes the behavior so
84
+ # that we don't error at all. This is helpful for our CI where I'm using a
85
+ # heuristic to mark batch dimensions as dynamic and the heuristic may get it
86
+ # wrong.
87
+ allow_ignore_mark_dynamic = False
88
+
89
+ # Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing)
90
+ guard_nn_modules = False
91
+
92
+ # Uses CPython internal dictionary tags to detect mutation. There is some
93
+ # overlap between guard_nn_modules_using_dict_tags and guard_nn_modules flag.
94
+ # guard_nn_modules unspecializes the nn module instance and adds guard for each
95
+ # relevant member of the nn modules. On the other hand,
96
+ # guard_nn_modules_using_dict_tags specializes on each nn module instance but
97
+ # uses low overhead dict version matching to detect mutations, obviating the
98
+ # need to guard on members of the nn modules. With
99
+ # guard_nn_modules_using_dict_tags, the guard_nn_modules is not really required
100
+ # but kept around for debugging and discussing unspecializing nn module
101
+ # variables.
102
+ # TODO(janimesh, voz): Remove both of these flags (or atleast guard_nn_modules)
103
+ # once we have reached stability for the guard_nn_modules_using_dict_tags.
104
+ guard_nn_modules_using_dict_tags = True
105
+
106
+ # This feature doesn't really work. We offer this flag for experimental
107
+ # purposes / if you want to help us build out support.
108
+ #
109
+ # torchdynamo has very limited support for tensor subclasses that implement
110
+ # __torch_function__. Our current support is limited to tensor subclasses
111
+ # that DO NOT store metadata on the tensor (in general, dynamo does not
112
+ # support Python code that stores extra attributes on tensors at present).
113
+ # If your tensor subclass purely changes function call behavior via
114
+ # __torch_function__, you can allow torchdynamo to trace into it by
115
+ # adding it to traceable_tensor_subclasses. We don't do any safety checks,
116
+ # so it is up to you to ensure that your subclass is well behaved. See also
117
+ # https://github.com/pytorch/torchdynamo/issues/1948
118
+ #
119
+ # We do NOT currently support __torch_dispatch__. The implementation is
120
+ # currently buggy, the main show stopper for nontrivial use is
121
+ # https://github.com/pytorch/torchdynamo/issues/1952
122
+ traceable_tensor_subclasses: Set[Type[Any]] = set()
123
+
124
+ # Suppress errors in torch._dynamo.optimize, instead forcing a fallback to eager.
125
+ # This is a good way to get your model to work one way or another, but you may
126
+ # lose optimization opportunities this way. Devs, if your benchmark model is failing
127
+ # this way, you should figure out why instead of suppressing it.
128
+ suppress_errors = bool(os.environ.get("TORCHDYNAMO_SUPPRESS_ERRORS", False))
129
+
130
+ # Record and write an execution record of the current frame to a file
131
+ # if an exception is encountered
132
+ # @compile_ignored[debug]
133
+ replay_record_enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
134
+
135
+ # Rewrite assert statement in python with torch._assert
136
+ rewrite_assert_with_torch_assert = True
137
+
138
+ # Disable dynamo
139
+ disable = os.environ.get("TORCH_COMPILE_DISABLE", False)
140
+
141
+ # [@compile_ignored: runtime_behaviour] Get a cprofile trace of Dynamo
142
+ cprofile = os.environ.get("TORCH_COMPILE_CPROFILE", False)
143
+
144
+ # legacy config, does nothing now!
145
+ skipfiles_inline_module_allowlist: Dict[Any, Any] = {}
146
+
147
+ # If a string representing a PyTorch module is in this ignorelist,
148
+ # the `allowed_functions.is_allowed` function will not consider it
149
+ # when creating a list of PyTorch functions that will appear in
150
+ # FX IR.
151
+ allowed_functions_module_string_ignorelist = {
152
+ "torch.distributions",
153
+ "torch.testing",
154
+ "torch._refs",
155
+ "torch._prims",
156
+ "torch._decomp",
157
+ }
158
+
159
+ # Debug Flag to try minifier at different stages. Possible values are {None, "aot", "dynamo"}
160
+ # None - Minifier is switched off
161
+ # dynamo - Runs minifier on the TorchDynamo produced graphs, if compilation fails
162
+ # aot - Runs minifier on the Aot Autograd produced graphs, if compilation fails
163
+ # [@compile_ignored: debug]
164
+ repro_after = os.environ.get("TORCHDYNAMO_REPRO_AFTER", None)
165
+
166
+ # Compiler compilation debug info
167
+ # 1: Dumps the original graph out to repro.py if compilation fails
168
+ # 2: Dumps a minifier_launcher.py if compilation fails.
169
+ # 3: Always dumps a minifier_launcher.py. Good for segfaults.
170
+ # 4: Dumps a minifier_launcher.py if the accuracy fails.
171
+ # [@compile_ignored: debug]
172
+ repro_level = int(os.environ.get("TORCHDYNAMO_REPRO_LEVEL", 2))
173
+
174
+ # By default, we try to detect accuracy failure by running both forward
175
+ # and backward of a torchdynamo produced graph (if you are using repro_after
176
+ # 'dynamo'). This setting forces us to only test the forward graph and
177
+ # not the backward graph. This can be helpful if you're trying to debug
178
+ # an inference only problem, but the minifier seems to be choking on the
179
+ # backwards step
180
+ # TODO: Detect this situation automatically so the user doesn't need
181
+ # to manually configure this
182
+ # [@compile_ignored: debug]
183
+ repro_forward_only = os.environ.get("TORCHDYNAMO_REPRO_FORWARD_ONLY") == "1"
184
+
185
+ # The tolerance we should use when testing if a compiled graph
186
+ # has diverged so that we should treat it as an accuracy failure
187
+ # [@compile_ignored: debug]
188
+ repro_tolerance = 1e-3
189
+
190
+ # If True, when testing if two models are the same, we will test them against
191
+ # a third fp64 reference and only report a problem if the RMSE relative to the
192
+ # fp64 is greater. However, this will use more memory; you may disable this
193
+ # if memory usage is too high.
194
+ # [@compile_ignored: runtime_behaviour]
195
+ same_two_models_use_fp64 = True
196
+
197
+ # Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type.
198
+ # When this flag is set to False, we introduce a graph break instead of capturing.
199
+ # This requires dynamic_shapes to be True.
200
+ capture_scalar_outputs = False
201
+
202
+ # Not all backends support operators that have dynamic output shape (e.g.,
203
+ # nonzero, unique). When this flag is set to False, we introduce a graph
204
+ # break instead of capturing. This requires dynamic_shapes to be True.
205
+ # If you set this to True, you probably also want capture_scalar_outputs
206
+ # (these are separated for historical reasons).
207
+ capture_dynamic_output_shape_ops = False
208
+
209
+ # By default, dynamo will treat all ints as backed SymInts, which means (1) it
210
+ # will wait to see the int change over multiple runs before generalizing and
211
+ # (2) it will still always 0/1 specialize an int. When true, this knob
212
+ # forces dynamo to treat _length_per_key and _offset_per_key on
213
+ # KeyedJaggedTensor from torchrec as size-like unbacked SymInts, so that
214
+ # they (1) generalize immediately and (2) unsoundly never compare equal to
215
+ # 0/1. This is not on by default as AOTAutograd/Inductor cannot currently
216
+ # compile this code; however, this can be useful for export.
217
+ force_unspec_int_unbacked_size_like_on_torchrec_kjt = False
218
+
219
+ # Should almost always be true in prod. This relaxes the requirement that cond's true_fn and
220
+ # false_fn produces code with identical guards.
221
+ enforce_cond_guards_match = True
222
+
223
+ # Specify how to optimize a compiiled DDP module. The flag accepts a bollean
224
+ # value or a string. There are 4 modes.
225
+ # 1. "ddp_optimizer" (or True): with "ddp_ptimizer", Dynamo will automatically
226
+ # split model graph into pieces to match DDP bucket sizes to allow DDP
227
+ # comm/compute overlap.
228
+ # 2. "python_reducer" (experimental): this optimization requires the usage
229
+ # of compiled_autograd. With "python_reducer", DDP will disable the C++ reducer
230
+ # and use the Python reducer to allow compiled_autograd to trace the
231
+ # communication and allow comm/compute overlap without graph-breaks.
232
+ # 3. "python_reducer_without_compiled_forward" (experimental): this mode is
233
+ # similar to "python_reducer". One should only use this optimization mode
234
+ # when compiled_autograd is used but the DDP module is not compiled.
235
+ # 4. "no_optimization" (or False): Dynamo won't split the model graph, nor
236
+ # will Python reducer be used. With this mode, there will be no graph-breaks
237
+ # and the original DDP C++ reducer will be used. There will no comm/compute
238
+ # overlap. This mode CANNOT be used with compiled_autograd.
239
+ # Note that to avoid breaking the existing usage, mode 1 and mode 4 can be
240
+ # specified with a boolean value. True is using ddp_optimizer and False is
241
+ # no optimization.
242
+ optimize_ddp: Union[bool, str] = True
243
+
244
+ _ddp_optimization_mode = [
245
+ "ddp_optimizer",
246
+ "python_reducer", # experimental mode
247
+ "python_reducer_without_compiled_forward", # experimental mode
248
+ "no_optimization",
249
+ ]
250
+
251
+
252
+ def _get_optimize_ddp_mode():
253
+ m = sys.modules[__name__]
254
+ if isinstance(m.optimize_ddp, bool):
255
+ if m.optimize_ddp:
256
+ mode = "ddp_optimizer"
257
+ else:
258
+ mode = "no_optimization"
259
+ elif isinstance(m.optimize_ddp, str):
260
+ mode = m.optimize_ddp
261
+ else:
262
+ raise ValueError(f"Invalid type, {type(optimize_ddp)=}")
263
+
264
+ assert mode in m._ddp_optimization_mode, f"Invalid mode {mode=}"
265
+ return mode
266
+
267
+
268
+ # If True, delays DDPOptimizer submodule compilation to 1st run of the model,
269
+ # so that real tensor strides are used in all submodules
270
+ # (instead of using FakeTensor strides which can differ from real tensor strides and causes error in some cases).
271
+ # This feature is not hardened yet and it's known to cause issues to some models, so False by default.
272
+ optimize_ddp_lazy_compile = False
273
+
274
+ # Whether to skip guarding on FSDP-managed modules
275
+ skip_fsdp_guards = True
276
+
277
+ # Make dynamo skip guarding on hooks on nn modules
278
+ # Note: unsafe: if your model actually has hooks and you remove them, or doesn't and you add them,
279
+ # dynamo will not notice and will execute whichever version you first compiled.
280
+ skip_nnmodule_hook_guards = True
281
+
282
+ # If True, raises exception if TorchDynamo is called with a context manager
283
+ raise_on_ctx_manager_usage = True
284
+
285
+ # If True, raise when aot autograd is unsafe to use
286
+ raise_on_unsafe_aot_autograd = False
287
+
288
+ # If true, error if you torch.jit.trace over a dynamo-optimized function.
289
+ # If false, silently suppress dynamo
290
+ error_on_nested_jit_trace = True
291
+
292
+ # If true, error with a better message if we symbolically trace over a
293
+ # dynamo-optimized function. If false, silently suppress dynamo.
294
+ error_on_nested_fx_trace = True
295
+
296
+ # Disables graph breaking on rnn. YMMV with backends.
297
+ allow_rnn = False
298
+
299
+ # If true, error if we try to compile a function that has
300
+ # been seen before.
301
+ # [@compile_ignored: runtime_behaviour]
302
+ error_on_recompile = False
303
+
304
+ # [@compile_ignored: debug] Whether to report any guard failures (deprecated: does not do anything)
305
+ report_guard_failures = True
306
+
307
+ # [@compile_ignored: debug] root folder of the project
308
+ base_dir = dirname(dirname(dirname(abspath(__file__))))
309
+
310
+ # Trace through NumPy or graphbreak
311
+ trace_numpy = True
312
+
313
+ # Trace through torch.distributed code
314
+ trace_distributed = False
315
+
316
+ # Default NumPy dtypes when tracing with torch.compile
317
+ # We default to 64bits. For efficiency, one may want to change these to float32
318
+ numpy_default_float = "float64"
319
+ numpy_default_complex = "complex128"
320
+ numpy_default_int = "int64"
321
+
322
+ # use numpy's PRNG if True, pytorch otherwise
323
+ use_numpy_random_stream = False
324
+
325
+
326
+ def is_fbcode():
327
+ return not hasattr(torch.version, "git_version")
328
+
329
+
330
+ def default_debug_dir_root():
331
+ # [@compile_ignored: debug]
332
+ DEBUG_DIR_VAR_NAME = "TORCH_COMPILE_DEBUG_DIR"
333
+ if DEBUG_DIR_VAR_NAME in os.environ:
334
+ return os.path.join(os.environ[DEBUG_DIR_VAR_NAME], "torch_compile_debug")
335
+ elif is_fbcode():
336
+ return os.path.join(
337
+ tempfile.gettempdir(), getpass.getuser(), "torch_compile_debug"
338
+ )
339
+ else:
340
+ return os.path.join(os.getcwd(), "torch_compile_debug")
341
+
342
+
343
+ # [@compile_ignored: debug]
344
+ debug_dir_root = default_debug_dir_root()
345
+
346
+ # [@compile_ignored: debug]
347
+ _save_config_ignore = {
348
+ "repro_after",
349
+ "repro_level",
350
+ # workaround: "cannot pickle PyCapsule"
351
+ "constant_functions",
352
+ # workaround: "cannot pickle module"
353
+ "skipfiles_inline_module_allowlist",
354
+ }
355
+
356
+ # for backend="cudagraphs", mutations on input be sent to the cudagraph backend
357
+ # or replayed in aot_autograd epilogue. default is False because mutation on inputs
358
+ # can prevent cudagraphing.
359
+ cudagraph_backend_keep_input_mutation = False
360
+
361
+ # When True, only ops that have the torch.Tag.pt2_compliant tag
362
+ # will be allowed into the graph; all other ops will be disallowed
363
+ # and will fall back to eager-mode PyTorch. Useful to ensure
364
+ # correctness of custom ops.
365
+ only_allow_pt2_compliant_ops = False
366
+
367
+ capture_autograd_function = True
368
+
369
+ # enable/disable dynamo tracing for `torch.func` transforms
370
+ capture_func_transforms = False
371
+
372
+ # enable/disable user-defined triton kernel optimizations
373
+ optimize_user_defined_triton_kernels = True
374
+
375
+ # If to log Dynamo compilation metrics into log files (for OSS) and Scuba tables (for fbcode).
376
+ log_compilation_metrics = True
377
+
378
+ # A set of logging functions which will be reordered to the end of graph breaks,
379
+ # allowing dynamo to construct larget graph. Note that there are some
380
+ # limitations to this, such as how it does not correctly print objects that were
381
+ # mutated after the print statement.
382
+ reorderable_logging_functions: Set[Callable[[Any], None]] = set()
383
+
384
+ # simulates what would happen if we didn't have support for BUILD_SET opcode,
385
+ # used for testing
386
+ inject_BUILD_SET_unimplemented_TESTING_ONLY = False
387
+
388
+ _autograd_backward_strict_mode_banned_ops = [
389
+ "stride",
390
+ "requires_grad",
391
+ "storage_offset",
392
+ "layout",
393
+ "data",
394
+ ]
395
+
396
+ _autograd_backward_strict_mode_banned_ops.extend(
397
+ [name for name, _ in inspect.getmembers(torch.Tensor) if re.match(r"^is_.*", name)]
398
+ )
399
+
400
+ # Enables caching of dispatches to fake tensors.
401
+ fake_tensor_cache_enabled = (
402
+ os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE", "1") == "1"
403
+ )
404
+
405
+ # Enables cross checking between the fake tensor cache and dispatch.
406
+ fake_tensor_cache_crosscheck_enabled = (
407
+ os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE_CROSSCHECK", "0") == "1"
408
+ )
409
+
410
+ # support `context_fn` in torch.utils.checkpoint.checkpoint API under torch.compile().
411
+ # WARNING: this is an experimental flag and is subject to change.
412
+ _experimental_support_context_fn_in_torch_utils_checkpoint = False
413
+
414
+ if TYPE_CHECKING:
415
+ from torch.utils._config_typing import * # noqa: F401, F403
416
+
417
+ def _make_closure_patcher(**changes):
418
+ ...
419
+
420
+
421
+ from torch.utils._config_module import install_config_module
422
+
423
+ install_config_module(sys.modules[__name__])
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dis
3
+ import functools
4
+ import itertools
5
+ import logging
6
+ import os
7
+ import random
8
+ import sys
9
+ import threading
10
+ import time
11
+ import traceback
12
+ import types
13
+ import typing
14
+ import weakref
15
+ from typing import Any, Callable, Dict, List, Optional, Set
16
+
17
+ from torch.fx._lazy_graph_module import ( # type: ignore[attr-defined]
18
+ _use_lazy_graph_module,
19
+ )
20
+
21
+ try:
22
+ import numpy as np
23
+ except ModuleNotFoundError:
24
+ np = None # type: ignore[assignment]
25
+
26
+ import torch
27
+ import torch._logging
28
+ from torch._guards import compile_context, CompileContext, CompileId, tracing
29
+ from torch._logging import structured
30
+ from torch._utils_internal import signpost_event
31
+ from torch.fx.experimental.symbolic_shapes import (
32
+ ConstraintViolationError,
33
+ GuardOnDataDependentSymNode,
34
+ )
35
+ from torch.fx.graph_module import _forward_from_src as original_forward_from_src
36
+ from torch.nn.parallel.distributed import DistributedDataParallel
37
+ from torch.utils._python_dispatch import _disable_current_modes
38
+ from torch.utils._traceback import format_traceback_short
39
+
40
+ from . import config, exc, trace_rules
41
+ from .backends.registry import CompilerFn
42
+ from .bytecode_analysis import remove_dead_code, remove_pointless_jumps
43
+ from .bytecode_transformation import (
44
+ check_inst_exn_tab_entries_valid,
45
+ Instruction,
46
+ is_generator,
47
+ propagate_inst_exn_table_entries,
48
+ transform_code_object,
49
+ )
50
+ from .cache_size import (
51
+ CacheSizeRelevantForFrame,
52
+ compute_cache_size,
53
+ exceeds_cache_size_limit,
54
+ is_recompilation,
55
+ )
56
+ from .eval_frame import always_optimize_code_objects, skip_code, TorchPatcher
57
+ from .exc import (
58
+ augment_exc_message,
59
+ BackendCompilerFailed,
60
+ format_error_msg,
61
+ InternalTorchDynamoError,
62
+ TorchRuntimeError,
63
+ UncapturedHigherOrderOpError,
64
+ unimplemented,
65
+ Unsupported,
66
+ )
67
+ from .guards import (
68
+ CheckFunctionManager,
69
+ get_and_maybe_log_recompilation_reason,
70
+ GuardedCode,
71
+ )
72
+ from .hooks import Hooks
73
+ from .output_graph import OutputGraph
74
+ from .replay_record import ExecutionRecord
75
+ from .symbolic_convert import InstructionTranslator, SpeculationLog
76
+ from .trace_rules import is_numpy
77
+ from .types import BytecodeHook
78
+ from .utils import (
79
+ CleanupManager,
80
+ CompilationMetrics,
81
+ counters,
82
+ dynamo_timed,
83
+ format_bytecode,
84
+ frame_phase_timing,
85
+ gen_record_file_name,
86
+ increment_frame,
87
+ is_namedtuple,
88
+ istype,
89
+ LazyString,
90
+ maybe_cprofile,
91
+ orig_code_map,
92
+ record_compilation_metrics,
93
+ reset_graph_break_dup_checker,
94
+ setup_compile_debug,
95
+ troubleshooting_url,
96
+ write_record_to_file,
97
+ )
98
+
99
+ log = logging.getLogger(__name__)
100
+ bytecode_log = torch._logging.getArtifactLogger(__name__, "bytecode")
101
+ GlobalStateGuard = torch._C._dynamo.guards.GlobalStateGuard
102
+
103
+ compile_lock = threading.RLock()
104
+
105
+
106
+ class Tracker:
107
+ def __init__(self):
108
+ self.seen = []
109
+ self.seen_ids = set()
110
+
111
+ def add(self, strong_obj):
112
+ idx = id(strong_obj)
113
+ if idx not in self.seen_ids:
114
+ obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx))
115
+ self.seen.append(obj)
116
+ self.seen_ids.add(idx)
117
+
118
+ def __contains__(self, item):
119
+ return id(item) in self.seen_ids
120
+
121
+ def clear(self):
122
+ self.seen.clear()
123
+ self.seen_ids.clear()
124
+
125
+
126
+ input_codes = Tracker()
127
+ output_codes = Tracker()
128
+
129
+ initial_global_state: Optional[GlobalStateGuard] = None
130
+
131
+
132
+ @functools.wraps(original_forward_from_src)
133
+ def fx_forward_from_src_skip_result(*args, **kwargs):
134
+ # we monkey patch FX to prevent infinite loop of trying to convert
135
+ # our generated code
136
+ result: types.FunctionType = original_forward_from_src(*args, **kwargs)
137
+ skip_code(result.__code__)
138
+ return result
139
+
140
+
141
+ def preserve_global_state(fn):
142
+ """
143
+ Context manager to:
144
+ 1) Save/restore torch.is_grad_enabled() state
145
+ 2) Save/restore python random state
146
+ 3) Save/restore torch random state
147
+ 4) Monkey patch torch.fx.graph_module._forward_from_src
148
+ """
149
+
150
+ @functools.wraps(fn)
151
+ def _fn(*args, **kwargs):
152
+ guards = GlobalStateGuard()
153
+ prior_grad_mode = torch.is_grad_enabled()
154
+ prior_inference_mode = torch.is_inference_mode_enabled()
155
+ prior_deterministic = torch.are_deterministic_algorithms_enabled()
156
+ prior_warn_only = torch.is_deterministic_algorithms_warn_only_enabled()
157
+ py_rng_state = random.getstate()
158
+ torch_rng_state = torch.random.get_rng_state()
159
+ if torch.cuda.is_available():
160
+ cuda_rng_state = torch.cuda.get_rng_state()
161
+ prior_fwd_from_src = torch.fx.graph_module._forward_from_src
162
+ torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result
163
+ cleanup = setup_compile_debug()
164
+ try:
165
+ return fn(*args, **kwargs)
166
+ finally:
167
+ cleanup.close()
168
+ torch._C._set_grad_enabled(prior_grad_mode)
169
+ torch.torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode)
170
+ torch.use_deterministic_algorithms(
171
+ prior_deterministic, warn_only=prior_warn_only
172
+ )
173
+ random.setstate(py_rng_state)
174
+ torch.random.set_rng_state(torch_rng_state)
175
+ if torch.cuda.is_available():
176
+ torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
177
+ torch.fx.graph_module._forward_from_src = prior_fwd_from_src
178
+ assert (
179
+ guards.check()
180
+ ), "Global state changed while dynamo tracing, please report a bug"
181
+
182
+ _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
183
+ return _fn
184
+
185
+
186
+ @TorchPatcher.suppress_torch_distributed_warnings
187
+ def has_tensor_in_frame(frame):
188
+ """Check if the frame has torch.* related bits"""
189
+ # Check if the function was decorated using torch._dynamo.optimize
190
+ if frame.f_code in always_optimize_code_objects:
191
+ return True
192
+
193
+ # Check if there is global import of torch.*
194
+ for co_name in frame.f_code.co_names:
195
+ if co_name in frame.f_globals:
196
+ obj = frame.f_globals[co_name]
197
+ if isinstance(obj, types.ModuleType) and (
198
+ obj.__name__.startswith("torch.") or obj is torch
199
+ ):
200
+ return True
201
+ # ... or a global import of numpy.*
202
+ if np and config.trace_numpy and (obj is np or is_numpy(obj)):
203
+ return True
204
+
205
+ seen_ids: Dict[int, bool] = dict()
206
+
207
+ def has_tensor(obj):
208
+ """Recursively check if the obj has a tensor"""
209
+ obj_id = id(obj)
210
+ if obj_id in seen_ids:
211
+ return seen_ids[obj_id]
212
+ seen_ids[obj_id] = False
213
+
214
+ if isinstance(obj, (torch.Tensor, torch.nn.Module)) or (
215
+ istype(obj, type) and issubclass(obj, torch.nn.Module)
216
+ ):
217
+ seen_ids[obj_id] = True
218
+ return seen_ids[obj_id]
219
+ elif (
220
+ config.trace_numpy
221
+ and np
222
+ and (istype(obj, np.ndarray) or isinstance(obj, np.generic))
223
+ ):
224
+ seen_ids[obj_id] = True
225
+ return seen_ids[obj_id]
226
+ elif istype(obj, (list, tuple)):
227
+ seen_ids[obj_id] = any(has_tensor(v) for v in obj)
228
+ return seen_ids[obj_id]
229
+ elif istype(obj, dict):
230
+ # Some packages like pytest can be updated during runtime. So, make a
231
+ # copy of values to avoid issues like "RuntimeError: dictionary
232
+ # changed size during iteration"
233
+ values = list(obj.values())
234
+ seen_ids[obj_id] = any(has_tensor(v) for v in values)
235
+ return seen_ids[obj_id]
236
+ elif istype(obj, (str, int, float, type(None), bool)):
237
+ seen_ids[obj_id] = False
238
+ return seen_ids[obj_id]
239
+ elif is_namedtuple(obj) and hasattr(obj, "_fields"):
240
+ seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields)
241
+ return seen_ids[obj_id]
242
+ else:
243
+ # if config.debug:
244
+ # print(
245
+ # f"Assuming that object of type {type(obj)} does not have a tensor"
246
+ # )
247
+ return False
248
+
249
+ # Check if the passed arguments are of type Tensor
250
+ for value in frame.f_locals.values():
251
+ if has_tensor(value):
252
+ return True
253
+
254
+ log.debug(
255
+ "skipping because no torch.* %s \
256
+ %s %s",
257
+ frame.f_code.co_name,
258
+ frame.f_code.co_filename,
259
+ frame.f_code.co_firstlineno,
260
+ )
261
+
262
+ return False
263
+
264
+
265
+ def exception_handler(e, code, frame=None, export=False):
266
+ record_filename = None
267
+ if hasattr(e, "exec_record"):
268
+ record_filename = gen_record_file_name(e, code)
269
+ write_record_to_file(record_filename, e.exec_record)
270
+ e.record_filename = record_filename
271
+
272
+ augment_exc_message(e, export=export)
273
+
274
+
275
+ FRAME_COUNTER = 0
276
+ FRAME_COMPILE_COUNTER: typing.Counter[int] = collections.Counter()
277
+
278
+
279
+ def convert_frame_assert(
280
+ compiler_fn: CompilerFn,
281
+ one_graph: bool = True,
282
+ export: bool = False,
283
+ export_constraints=None,
284
+ ):
285
+ """Fully convert a frame into an FX graph"""
286
+ reset_graph_break_dup_checker()
287
+
288
+ def _convert_frame_assert(
289
+ frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, *, skip: int = 0
290
+ ):
291
+ increment_frame()
292
+
293
+ code = frame.f_code
294
+
295
+ cache_size = compute_cache_size(frame, cache_entry)
296
+ recompile_reasons = None
297
+ if is_recompilation(cache_size):
298
+ recompile_reasons = get_and_maybe_log_recompilation_reason(
299
+ cache_entry, frame
300
+ )
301
+
302
+ input_codes.add(code)
303
+ if code in output_codes:
304
+ return None
305
+ if (
306
+ os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION")
307
+ and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name
308
+ ):
309
+ return None
310
+ if code.co_name == "<genexpr>" and code.co_filename.endswith(
311
+ (
312
+ "transformers/file_utils.py",
313
+ "transformers/utils/generic.py",
314
+ "diffusers/utils/outputs.py",
315
+ )
316
+ ):
317
+ # not needed, but cleans up torchbench error stats
318
+ return None
319
+ if code.co_name == "__setattr__":
320
+ # setattr could be tricky to handle generally,
321
+ # but also not likely useful to compile- skip the whole frame
322
+ return None
323
+ if code.co_name == "__init__" and code.co_filename.startswith(
324
+ os.path.dirname(torch.optim.__file__)
325
+ ):
326
+ # optimizer support is still incomplete see
327
+ # test_state_dict in test/dynamo/test_optimizers.py
328
+ return None
329
+
330
+ # Check if the frame is generated by an exec builtin call
331
+ # TODO - Running exec generated frame seems propagates f_globals to the
332
+ # next frames.
333
+ if code.co_name == "<module>" and code.co_filename == "<string>":
334
+ return None
335
+
336
+ if (
337
+ code.co_name == "<lambda>"
338
+ and code.co_filename == "<string>"
339
+ and not bool(frame.f_builtins)
340
+ ):
341
+ # namedtuple subclass constructor. Empty builtins cause issue with
342
+ # len keyword in LIST_LEN guard.
343
+ return None
344
+
345
+ if is_generator(code):
346
+ unimplemented("generator")
347
+ exceeded, limit_type = exceeds_cache_size_limit(cache_size)
348
+ if exceeded:
349
+
350
+ def format_func_info(code):
351
+ return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})"
352
+
353
+ def format_guard_failures():
354
+ assert recompile_reasons, "TODO(whc) any other recompile reasons?"
355
+ return recompile_reasons[-1]
356
+
357
+ log.warning(
358
+ "torch._dynamo hit config.%s (%s)\n"
359
+ " function: %s\n"
360
+ " last reason: %s\n"
361
+ 'To log all recompilation reasons, use TORCH_LOGS="recompiles".\n'
362
+ "To diagnose recompilation issues, see %s.",
363
+ limit_type,
364
+ getattr(config, limit_type),
365
+ format_func_info(code),
366
+ format_guard_failures(),
367
+ troubleshooting_url,
368
+ )
369
+ unimplemented(f"{limit_type} reached")
370
+
371
+ if not has_tensor_in_frame(frame):
372
+ return None
373
+
374
+ global initial_global_state
375
+ initial_global_state = GlobalStateGuard()
376
+
377
+ global FRAME_COUNTER
378
+ if "_id" not in frame_state:
379
+ frame_state["_id"] = FRAME_COUNTER
380
+ FRAME_COUNTER += 1
381
+ frame_id = frame_state["_id"]
382
+
383
+ frame_compile_id = FRAME_COMPILE_COUNTER[frame_id]
384
+ FRAME_COMPILE_COUNTER[frame_id] += 1
385
+
386
+ compile_id = CompileId(frame_id, frame_compile_id)
387
+
388
+ signpost_event(
389
+ "dynamo",
390
+ "_convert_frame_assert._compile",
391
+ {
392
+ "co_name": code.co_name,
393
+ "co_filename": code.co_filename,
394
+ "co_firstlineno": code.co_firstlineno,
395
+ "cache_size": cache_size.num_cache_entries_with_same_id_matched_objs,
396
+ "accumulated_cache_size": cache_size.num_cache_entries,
397
+ },
398
+ )
399
+
400
+ return _compile(
401
+ frame.f_code,
402
+ frame.f_globals,
403
+ frame.f_locals,
404
+ frame.f_builtins,
405
+ compiler_fn,
406
+ one_graph,
407
+ export,
408
+ export_constraints,
409
+ hooks,
410
+ cache_size,
411
+ frame,
412
+ frame_state=frame_state,
413
+ compile_id=compile_id,
414
+ skip=skip + 1,
415
+ )
416
+
417
+ _convert_frame_assert._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined]
418
+
419
+ def _clone_with_backend(backend):
420
+ return convert_frame_assert(backend, one_graph, export, export_constraints)
421
+
422
+ _convert_frame_assert._clone_with_backend = _clone_with_backend # type: ignore[attr-defined]
423
+ return _convert_frame_assert
424
+
425
+
426
+ from collections import OrderedDict
427
+
428
+ from torch.utils.hooks import RemovableHandle
429
+
430
+ # we have to use `OrderedDict` to make `RemovableHandle` work.
431
+ _bytecode_hooks: Dict[int, BytecodeHook] = OrderedDict()
432
+
433
+
434
+ def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle:
435
+ """Register hooks for bytecode generated by Dynamo. The hook can do some
436
+ logging, as well as return a new code object to be used. Please refer
437
+ to `BytecodeHook` for the hook signature.
438
+ """
439
+ handle = RemovableHandle(_bytecode_hooks)
440
+ _bytecode_hooks[handle.id] = hook
441
+ return handle
442
+
443
+
444
+ @_use_lazy_graph_module(config.use_lazy_graph_module)
445
+ @maybe_cprofile
446
+ def _compile(
447
+ code: types.CodeType,
448
+ globals: Dict[str, object],
449
+ locals: Dict[str, object],
450
+ builtins: Dict[str, object],
451
+ compiler_fn: CompilerFn,
452
+ one_graph: bool,
453
+ export: bool,
454
+ export_constraints,
455
+ hooks: Hooks,
456
+ cache_size: CacheSizeRelevantForFrame,
457
+ frame: Optional[types.FrameType] = None,
458
+ frame_state=None,
459
+ compile_id=None,
460
+ *,
461
+ skip: int = 0,
462
+ ) -> Optional[GuardedCode]:
463
+ from torch.fx.experimental.validator import (
464
+ bisect,
465
+ BisectValidationException,
466
+ translation_validation_enabled,
467
+ ValidationException,
468
+ )
469
+
470
+ output: Optional[OutputGraph] = None
471
+ tracer: Optional[InstructionTranslator] = None
472
+ # This is shared across restarts
473
+ mutated_closure_cell_contents: Set[str] = set()
474
+ speculation_log = SpeculationLog()
475
+ torch._dynamo.callback_handler.run_start_callbacks()
476
+
477
+ @preserve_global_state
478
+ def transform(instructions, code_options):
479
+ nonlocal output
480
+ nonlocal tracer
481
+ speculation_log.restart()
482
+ tracer = InstructionTranslator(
483
+ instructions,
484
+ code,
485
+ locals,
486
+ globals,
487
+ builtins,
488
+ code_options,
489
+ compiler_fn,
490
+ one_graph,
491
+ export,
492
+ export_constraints,
493
+ mutated_closure_cell_contents,
494
+ frame_state=frame_state,
495
+ speculation_log=speculation_log,
496
+ )
497
+
498
+ try:
499
+ with tracing(tracer.output.tracing_context), tracer.set_current_tx():
500
+ tracer.run()
501
+ except exc.UnspecializeRestartAnalysis:
502
+ speculation_log.clear()
503
+ raise
504
+ except (exc.SpeculationRestartAnalysis, exc.SkipFrame):
505
+ raise
506
+ except Exception:
507
+ if translation_validation_enabled():
508
+ bisect(tracer.output.shape_env)
509
+ raise
510
+ finally:
511
+ tracer.output.call_cleanup_hooks()
512
+
513
+ output = tracer.output
514
+ assert output is not None
515
+ assert output.output_instructions
516
+ instructions[:] = output.output_instructions
517
+ code_options.update(output.code_options)
518
+
519
+ if config.dead_code_elimination:
520
+ propagate_inst_exn_table_entries(instructions)
521
+ check_inst_exn_tab_entries_valid(instructions)
522
+ instructions[:] = remove_pointless_jumps(remove_dead_code(instructions))
523
+
524
+ @dynamo_timed(phase_name="entire_frame_compile")
525
+ def compile_inner(
526
+ code: types.CodeType,
527
+ one_graph: bool,
528
+ hooks: Hooks,
529
+ transform: Callable[[List[Instruction], Dict[str, Any]], Any],
530
+ ) -> Optional[GuardedCode]:
531
+ nonlocal output
532
+ for attempt in itertools.count():
533
+ CompileContext.get().attempt = attempt
534
+ try:
535
+ out_code = transform_code_object(code, transform)
536
+ break
537
+ except exc.RestartAnalysis as e:
538
+ log.info(
539
+ "Restarting analysis due to %s",
540
+ LazyString(format_traceback_short, e.__traceback__),
541
+ )
542
+ if attempt > 100:
543
+ unimplemented("100+ RestartAnalysis() calls")
544
+ except exc.SkipFrame as e:
545
+ log.debug(
546
+ "Skipping frame %s %s \
547
+ %s %s",
548
+ e,
549
+ code.co_name,
550
+ code.co_filename,
551
+ code.co_firstlineno,
552
+ )
553
+ if one_graph:
554
+ log.debug("No graph captured with one_graph=True")
555
+ return None
556
+
557
+ def log_bytecode(prefix, name, filename, line_no, code):
558
+ if bytecode_log.isEnabledFor(logging.DEBUG):
559
+ bytecode_log.debug(
560
+ format_bytecode(prefix, name, filename, line_no, code)
561
+ )
562
+
563
+ log_bytecode(
564
+ "ORIGINAL BYTECODE",
565
+ code.co_name,
566
+ code.co_filename,
567
+ code.co_firstlineno,
568
+ code,
569
+ )
570
+ log_bytecode(
571
+ "MODIFIED BYTECODE",
572
+ code.co_name,
573
+ code.co_filename,
574
+ code.co_firstlineno,
575
+ out_code, # type: ignore[possibly-undefined]
576
+ )
577
+
578
+ for hook in _bytecode_hooks.values():
579
+ hook_output = hook(code, out_code)
580
+ if hook_output is not None:
581
+ out_code = hook_output
582
+
583
+ orig_code_map[out_code] = code
584
+ output_codes.add(out_code)
585
+
586
+ assert output is not None
587
+
588
+ # Tests for new code objects.
589
+ # The rationale for these tests can be found in torch/csrc/dynamo/eval_frame.c
590
+ # Only test once the code object is created.
591
+ # They are not tested during runtime.
592
+
593
+ def count_args(code):
594
+ import inspect
595
+
596
+ return (
597
+ code.co_argcount
598
+ + code.co_kwonlyargcount
599
+ + bool(code.co_flags & inspect.CO_VARARGS)
600
+ + bool(code.co_flags & inspect.CO_VARKEYWORDS)
601
+ )
602
+
603
+ total_argcount_old = count_args(code)
604
+ total_argcount_new = count_args(out_code)
605
+ msg = "arg mismatch: "
606
+ msg += f"old code object has args {code.co_varnames[:total_argcount_old]}, "
607
+ msg += f"new code object has args {out_code.co_varnames[:total_argcount_new]}"
608
+ assert (
609
+ code.co_varnames[:total_argcount_old]
610
+ == out_code.co_varnames[:total_argcount_new]
611
+ ), msg
612
+
613
+ msg = "free var mismatch: "
614
+ msg += f"old code object has free var {code.co_freevars}, "
615
+ msg += f"new code object has free var {out_code.co_freevars}"
616
+ assert code.co_freevars == out_code.co_freevars, msg
617
+
618
+ msg = "cell var mismatch: "
619
+ msg += f"old code object has cell var {code.co_cellvars}, "
620
+ msg += f"new code object has cell var {out_code.co_cellvars}"
621
+ assert code.co_cellvars == out_code.co_cellvars, msg
622
+
623
+ # Skipping Dynamo on a frame without any extracted graph.
624
+ # This does not affect eager functionality. But this is necessary
625
+ # for export for cases where Dynamo-reconstructed bytecode can create
626
+ # new function frames, confusing export in thinking that there
627
+ # are extra graphs now.
628
+
629
+ if output.export and output.is_empty_graph():
630
+ return None
631
+
632
+ assert output.guards is not None
633
+ CleanupManager.instance[out_code] = output.cleanups
634
+ check_fn = CheckFunctionManager(
635
+ output,
636
+ hooks.guard_fail_fn if hooks else None,
637
+ )
638
+
639
+ guarded_code = GuardedCode(out_code, check_fn.check_fn)
640
+
641
+ if not output.is_empty_graph() and hooks.guard_export_fn is not None:
642
+ # We should not run the guard_export_fn when Dynamo does not
643
+ # generate any graph. This can happen in export when TorchDynamo
644
+ # generated bytecode has some reconstruction logic for mutated
645
+ # variables which can trigger TorchDynamo on the children frames but
646
+ # they are benign and do not generate any new graphs.
647
+ hooks.guard_export_fn(output.guards)
648
+
649
+ return guarded_code
650
+
651
+ with compile_context(CompileContext(compile_id)):
652
+ log.debug(
653
+ "torchdynamo start compiling %s %s:%s, stack (elided %s frames):\n%s",
654
+ code.co_name,
655
+ code.co_filename,
656
+ code.co_firstlineno,
657
+ skip + 2,
658
+ # -2: omit current frame, omit contextlib decorator
659
+ "".join(traceback.format_list(traceback.extract_stack()[: -2 - skip])),
660
+ )
661
+ # -4: -2 as above, plus trace_structured frames
662
+ torch._logging.trace_structured(
663
+ "dynamo_start",
664
+ lambda: {
665
+ "stack": structured.from_traceback(
666
+ traceback.extract_stack()[: -4 - skip]
667
+ )
668
+ },
669
+ )
670
+ start_time = time.time()
671
+ fail_type: Optional[str] = None
672
+ fail_reason: Optional[str] = None
673
+ fail_user_frame_filename: Optional[str] = None
674
+ fail_user_frame_lineno: Optional[int] = None
675
+ try:
676
+ guarded_code = compile_inner(code, one_graph, hooks, transform)
677
+ return guarded_code
678
+ except (
679
+ Unsupported,
680
+ TorchRuntimeError,
681
+ BackendCompilerFailed,
682
+ AssertionError,
683
+ ConstraintViolationError,
684
+ GuardOnDataDependentSymNode,
685
+ ValidationException,
686
+ UncapturedHigherOrderOpError,
687
+ BisectValidationException,
688
+ ) as e:
689
+ fail_type = str(type(e))
690
+ fail_reason = str(e)
691
+ exception_handler(e, code, frame, export=export)
692
+ if e.innermost_user_frame_summary is not None: # type: ignore[union-attr]
693
+ fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[union-attr]
694
+ fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[union-attr]
695
+ raise
696
+ except Exception as e:
697
+ fail_type = str(type(e))
698
+ fail_reason = str(e)
699
+ exception_handler(e, code, frame, export=export)
700
+ if e.innermost_user_frame_summary is not None: # type: ignore[attr-defined]
701
+ fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[attr-defined]
702
+ fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[attr-defined]
703
+ raise InternalTorchDynamoError(str(e)).with_traceback(
704
+ e.__traceback__
705
+ ) from None
706
+ finally:
707
+ if tracer:
708
+ tracer.output.local_scope = {}
709
+
710
+ from .utils import curr_frame
711
+
712
+ frame_key = str(curr_frame)
713
+ if (
714
+ fail_reason is None
715
+ and output is not None
716
+ and frame_key in frame_phase_timing
717
+ ):
718
+ guard_count = len(output.guards)
719
+ shape_env_guard_count = len(output.shape_env.guards)
720
+ graph_op_count = output.count_calls()
721
+ graph_node_count = len(output.graph.nodes)
722
+ graph_input_count = len(output.placeholders)
723
+ entire_frame_compile_time = frame_phase_timing[frame_key].get(
724
+ "entire_frame_compile", None
725
+ )
726
+ backend_compile_time = frame_phase_timing[frame_key].get(
727
+ "backend_compile", None
728
+ )
729
+ inductor_compile_time = frame_phase_timing[frame_key].get(
730
+ "inductor_compile", None
731
+ )
732
+ code_gen_time = frame_phase_timing[frame_key].get("code_gen", None)
733
+ non_compliant_ops = {op.__qualname__ for op in output.non_compliant_ops}
734
+ compliant_custom_ops = {
735
+ op.__qualname__ for op in output.compliant_custom_ops
736
+ }
737
+ else:
738
+ guard_count = None
739
+ shape_env_guard_count = None
740
+ graph_op_count = None
741
+ graph_node_count = None
742
+ graph_input_count = None
743
+ entire_frame_compile_time = None
744
+ backend_compile_time = None
745
+ inductor_compile_time = None
746
+ code_gen_time = None
747
+ non_compliant_ops = set({})
748
+ compliant_custom_ops = set({})
749
+ metrics = CompilationMetrics(
750
+ frame_key,
751
+ code.co_name,
752
+ code.co_filename,
753
+ code.co_firstlineno,
754
+ cache_size.num_cache_entries_with_same_id_matched_objs,
755
+ cache_size.num_cache_entries,
756
+ guard_count,
757
+ shape_env_guard_count,
758
+ graph_op_count,
759
+ graph_node_count,
760
+ graph_input_count,
761
+ start_time,
762
+ entire_frame_compile_time,
763
+ backend_compile_time,
764
+ inductor_compile_time,
765
+ code_gen_time,
766
+ fail_type,
767
+ fail_reason,
768
+ fail_user_frame_filename,
769
+ fail_user_frame_lineno,
770
+ non_compliant_ops,
771
+ compliant_custom_ops,
772
+ )
773
+ record_compilation_metrics(metrics)
774
+ torch._dynamo.callback_handler.run_end_callbacks()
775
+
776
+
777
+ def convert_frame(compiler_fn: CompilerFn, hooks: Hooks):
778
+ """Try to convert a frame into an FX graph, if error leave frame unmodified"""
779
+ inner_convert = convert_frame_assert(compiler_fn, one_graph=False)
780
+
781
+ def _convert_frame(
782
+ frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, skip: int = 0
783
+ ):
784
+ counters["frames"]["total"] += 1
785
+ try:
786
+ result = inner_convert(
787
+ frame, cache_entry, hooks, frame_state, skip=skip + 1
788
+ )
789
+ counters["frames"]["ok"] += 1
790
+ return result
791
+ except Exception as e:
792
+ # These two exception types are "soft" failure, in the sense that
793
+ # we know this is due to something we didn't implement all the
794
+ # way, scare the user less about it. That being said, if you
795
+ # are trying to understand why a graph break happened, it's still
796
+ # important to have this information, so offer it.
797
+ #
798
+ # NB: NotImplementedError used to be on this list, but actually
799
+ # it is impossible for it to reach here, as it is converted into
800
+ # InternalTorchDynamoError. This behavior seemed reasonable
801
+ # to me (ezyang, Aug 2023) so I kept it, but maybe at some point
802
+ # someone wanted these to also get suppressed. If so, you'll
803
+ # need to make these exceptions not get wrapped
804
+
805
+ # We intentionally don't want to suppress error here.
806
+ if isinstance(e, UncapturedHigherOrderOpError):
807
+ raise
808
+
809
+ soft_fail = isinstance(e, Unsupported)
810
+ if not config.suppress_errors and not soft_fail:
811
+ raise
812
+
813
+ # Suppress the error. NB: It's very important to do the
814
+ # suppression logging HERE, where the actual suppression
815
+ # happens. Previously it was somewhere else and so it was
816
+ # possible to accidentally not log at all.
817
+ record_filename = getattr(e, "record_filename", None)
818
+ code = frame.f_code
819
+ error_msg = format_error_msg(e, code, record_filename, frame)
820
+
821
+ if soft_fail:
822
+ log.info(error_msg, exc_info=True)
823
+ else:
824
+ log.warning(error_msg, exc_info=True)
825
+ return None
826
+
827
+ _convert_frame._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined]
828
+ _convert_frame._clone_with_backend = lambda backend: convert_frame(backend, hooks) # type: ignore[attr-defined]
829
+ return _convert_frame
830
+
831
+
832
+ # TODO mlazos: add support for same args, or record them
833
+ def replay(filename):
834
+ from .backends.debugging import eager
835
+
836
+ original_replay_val = config.replay_record_enabled
837
+ config.replay_record_enabled = False
838
+ with open(filename, "rb") as in_file:
839
+ record = ExecutionRecord.load(in_file)
840
+ record.globals = dict(itertools.chain(record.globals.items(), globals().items()))
841
+
842
+ try:
843
+ _compile(
844
+ record.code,
845
+ record.globals,
846
+ record.locals,
847
+ record.builtins,
848
+ compiler_fn=eager,
849
+ one_graph=False,
850
+ export=False,
851
+ export_constraints=None,
852
+ hooks=Hooks(),
853
+ cache_size=CacheSizeRelevantForFrame(0, 0),
854
+ frame=None,
855
+ frame_state={},
856
+ )
857
+ finally:
858
+ config.replay_record_enabled = original_replay_val
859
+
860
+
861
+ def first_real_inst_idx(code):
862
+ if sys.version_info < (3, 11):
863
+ return 0
864
+ for inst in dis.get_instructions(code):
865
+ if inst.opname == "RESUME":
866
+ return inst.offset // 2
867
+ raise RuntimeError("RESUME instruction not found in code")
868
+
869
+
870
+ def catch_errors_wrapper(callback, hooks: Hooks):
871
+ @functools.wraps(callback)
872
+ def catch_errors(frame, cache_entry, frame_state):
873
+ assert frame_state is not None
874
+
875
+ is_skipfile = trace_rules.check(frame.f_code)
876
+ if (
877
+ # TODO: the first condition is not covered by any test
878
+ frame.f_lasti >= first_real_inst_idx(frame.f_code)
879
+ or is_skipfile
880
+ or config.disable
881
+ ):
882
+ if log.isEnabledFor(logging.DEBUG):
883
+ skip_reason = (
884
+ "traced frame already"
885
+ if frame.f_lasti >= first_real_inst_idx(frame.f_code)
886
+ else "in skipfiles"
887
+ if trace_rules.check(frame.f_code)
888
+ else "dynamo tracing is disabled"
889
+ )
890
+ if not is_skipfile or config.verbose:
891
+ log.debug(
892
+ "skipping: %s (reason: %s, file: %s)",
893
+ frame.f_code.co_name,
894
+ skip_reason,
895
+ frame.f_code.co_filename,
896
+ )
897
+ return None
898
+ if frame.f_code.co_filename == "<string>" and frame.f_code.co_name == "__new__":
899
+ # nametuple constructor
900
+ return None
901
+ if config._get_optimize_ddp_mode() == "ddp_optimizer":
902
+ ddp_module = DistributedDataParallel._get_active_ddp_module()
903
+ if ddp_module:
904
+ with compile_lock:
905
+ from torch._dynamo.backends.distributed import DDPOptimizer
906
+
907
+ ddp_optimizer = DDPOptimizer(
908
+ bucket_bytes_cap=ddp_module.bucket_bytes_cap,
909
+ backend_compile_fn=callback._torchdynamo_orig_callable,
910
+ )
911
+ assert hasattr(
912
+ callback, "_clone_with_backend"
913
+ ), "DDPOptimizer only supports callback fns that know how to clone themselves."
914
+ hijacked_callback = callback._clone_with_backend(
915
+ ddp_optimizer.compile_fn,
916
+ )
917
+ return hijacked_callback(frame, cache_entry, hooks, frame_state)
918
+
919
+ with compile_lock, _disable_current_modes():
920
+ # skip=1: skip this frame
921
+ return callback(frame, cache_entry, hooks, frame_state, skip=1)
922
+
923
+ catch_errors._torchdynamo_orig_callable = callback # type: ignore[attr-defined]
924
+ return catch_errors
venv/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import threading
3
+
4
+ # Global variable to identify which SubgraphTracer we are in.
5
+ # It is sometimes difficult to find an InstructionTranslator to use.
6
+ _current_scope_id = threading.local()
7
+
8
+
9
+ def current_scope_id():
10
+ global _current_scope_id
11
+ if not hasattr(_current_scope_id, "value"):
12
+ _current_scope_id.value = 1
13
+ return _current_scope_id.value
14
+
15
+
16
+ @contextlib.contextmanager
17
+ def enter_new_scope():
18
+ global _current_scope_id
19
+ try:
20
+ _current_scope_id.value = current_scope_id() + 1
21
+ yield
22
+ finally:
23
+ _current_scope_id.value = current_scope_id() - 1
venv/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="method-assign"
2
+
3
+ import copy
4
+ import functools
5
+ import getpass
6
+ import inspect
7
+ import itertools
8
+ import logging
9
+ import os
10
+ import re
11
+ import subprocess
12
+ import tempfile
13
+ import textwrap
14
+ from collections import Counter
15
+ from importlib import import_module
16
+ from typing import Any, Callable, Dict, List, Optional, TypeVar
17
+
18
+ import torch
19
+ import torch._prims_common as utils
20
+ import torch._subclasses.meta_utils
21
+ from torch import Tensor
22
+
23
+ from torch._dynamo.testing import rand_strided
24
+ from torch._prims_common import is_float_dtype
25
+ from torch.multiprocessing.reductions import StorageWeakRef
26
+ from torch.utils._content_store import ContentStoreReader, ContentStoreWriter
27
+
28
+ from . import config
29
+ from .utils import clone_inputs, get_debug_dir
30
+
31
+ log = logging.getLogger(__name__)
32
+
33
+ T = TypeVar("T")
34
+
35
+
36
+ inductor_config = import_module("torch._inductor.config")
37
+ use_buck = inductor_config.is_fbcode()
38
+
39
+ if use_buck:
40
+ import libfb.py.build_info
41
+
42
+
43
+ extra_deps = []
44
+ extra_imports = ""
45
+ if use_buck:
46
+ extra_deps = [
47
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators_gpu",
48
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators",
49
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu",
50
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops",
51
+ ]
52
+ cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined]
53
+ extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps])
54
+
55
+
56
+ BUCK_CMD_PREFIX = ["buck2", "run", "@mode/dev-nosan"]
57
+
58
+
59
+ class BuckTargetWriter:
60
+ def __init__(self, filename):
61
+ self.subdir, self.py_file = os.path.split(os.path.abspath(filename))
62
+ self.target = self.py_file.replace(".py", "")
63
+
64
+ # Get main_module path from fbcode
65
+ self.path = f'{self.subdir.replace("/", ".")}.{self.target}'
66
+ self.path = self.path[self.path.find("fbcode.") :]
67
+ self.path = self.path[7:]
68
+
69
+ # Get cmd line path
70
+ tmp = self.subdir
71
+ tmp = tmp[tmp.find("fbcode/") :][7:]
72
+ self.cmd_line_path = f"//{tmp}:{self.target}"
73
+
74
+ def build(self):
75
+ extra_cpp_deps = "\n".join([f' "{x}",' for x in extra_deps])
76
+ return textwrap.dedent(
77
+ f"""
78
+ load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
79
+
80
+ python_binary(
81
+ name="{self.target}",
82
+ srcs = ["{self.py_file}"],
83
+ compile = False,
84
+ deps = [
85
+ "//caffe2:torch",
86
+ "//caffe2/functorch:functorch",
87
+ "//triton:triton",
88
+ "{cur_target}",
89
+ ],
90
+ cpp_deps = [
91
+ {extra_cpp_deps}
92
+ ],
93
+ main_module = "{self.path}",
94
+ par_style = "xar",
95
+ )
96
+ """
97
+ )
98
+
99
+ def write(self, print_msg=True):
100
+ target_file = os.path.join(self.subdir, "TARGETS")
101
+ with open(target_file, "w") as fd:
102
+ fd.write(self.build())
103
+ # log.warning("Wrote isolation TARGETS file at %s", target_file)
104
+ cmd_split = BUCK_CMD_PREFIX + [self.cmd_line_path]
105
+ if print_msg:
106
+ log.warning(
107
+ "Found an example that reproduces the error. Run this cmd to repro - %s",
108
+ " ".join(cmd_split),
109
+ )
110
+ return cmd_split
111
+
112
+
113
+ def minifier_dir():
114
+ path = os.path.join(get_debug_dir(), "minifier")
115
+ if path is None:
116
+ path = f"{tempfile.gettempdir()}/minifier_{getpass.getuser()}"
117
+ if not os.path.exists(path):
118
+ os.makedirs(path, exist_ok=True)
119
+ return path
120
+
121
+
122
+ MAX_CONSTANT_NUMEL_INLINE = 4
123
+
124
+
125
+ class NNModuleToString:
126
+ safe_reprs = [
127
+ torch.nn.Linear,
128
+ torch.nn.Conv1d,
129
+ torch.nn.Conv2d,
130
+ torch.nn.Conv3d,
131
+ torch.nn.BatchNorm1d,
132
+ torch.nn.BatchNorm2d,
133
+ torch.nn.BatchNorm3d,
134
+ torch.nn.LayerNorm,
135
+ torch.nn.Dropout,
136
+ torch.nn.Softmax,
137
+ torch.nn.ReLU,
138
+ torch.nn.GELU,
139
+ torch.nn.Identity,
140
+ torch.nn.MaxPool2d,
141
+ torch.nn.Embedding,
142
+ torch.nn.Tanh,
143
+ torch.nn.ConvTranspose1d,
144
+ torch.nn.GLU,
145
+ torch.nn.LSTM,
146
+ torch.nn.Flatten,
147
+ torch.nn.AdaptiveAvgPool2d,
148
+ ]
149
+
150
+ @staticmethod
151
+ def can_convert_to_string(gm):
152
+ cant_convert = set()
153
+ for _, module in gm.named_children():
154
+ if type(module) not in NNModuleToString.safe_reprs:
155
+ cant_convert.add(module)
156
+
157
+ if len(cant_convert) > 0:
158
+ log.warning("We have not tested reprs of some modules - %s", cant_convert)
159
+ # TODO - Assuming that all modules can be safely repr'd. Check if that assumption is correct.
160
+ return True
161
+
162
+ @staticmethod
163
+ def convert(gm):
164
+ from torch.nn.modules.module import _addindent
165
+
166
+ tab = " " * 4
167
+
168
+ model_str = textwrap.dedent(
169
+ """
170
+ from torch.nn import *
171
+ class Repro(torch.nn.Module):
172
+ def __init__(self):
173
+ super().__init__()
174
+ """
175
+ )
176
+
177
+ for module_name, module in gm.named_children():
178
+ module_str = f"{module.__repr__()}"
179
+ # module should be a core torch.nn.Module, so all parameters
180
+ # should be on the same device.
181
+ example_param = next(module.parameters(), None)
182
+ if example_param is not None and example_param.is_cuda:
183
+ module_str = f"{module_str}.cuda()"
184
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
185
+
186
+ for buffer_name, buffer in gm._buffers.items():
187
+ if buffer is None:
188
+ continue
189
+ # Serialize full data for small buffers
190
+ if buffer.numel() <= MAX_CONSTANT_NUMEL_INLINE:
191
+ from torch._tensor_str import PRINT_OPTS
192
+
193
+ assert PRINT_OPTS.threshold >= MAX_CONSTANT_NUMEL_INLINE
194
+ tensor_str = repr(buffer)
195
+ elif torch.is_floating_point(buffer):
196
+ tensor_str = f"torch.randn({list(buffer.shape)}, dtype={buffer.dtype})"
197
+ else:
198
+ tensor_str = (
199
+ f"torch.randint(1, size={list(buffer.shape)}, dtype={buffer.dtype})"
200
+ )
201
+ if buffer.is_cuda:
202
+ tensor_str = f"{tensor_str}.cuda()"
203
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n"
204
+
205
+ for param_name, param in gm._parameters.items():
206
+ if param is None:
207
+ continue
208
+ maybe_device = ""
209
+ if param.is_cuda:
210
+ maybe_device = ', device="cuda"'
211
+ tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))"
212
+ model_str += f"{tab*2}self.{param_name} = {tensor_str}\n"
213
+
214
+ # TODO - Keep this code for now. But, I don't think we will need this.
215
+ # attrs = dir(gm)
216
+ # for attr in attrs:
217
+ # if "_tensor_constant" in attr:
218
+ # val = getattr(gm, attr)
219
+ # model_str += f" {attr} = {val!r}\n"
220
+
221
+ model_str += f"{_addindent(gm.code, 4)}\n"
222
+ return model_str
223
+
224
+
225
+ @functools.lru_cache(None) # subprocess is expensive
226
+ def _cuda_system_info_comment():
227
+ if not torch.cuda.is_available():
228
+ return "# torch.cuda.is_available()==False, no GPU info collected\n"
229
+
230
+ model_str = "# CUDA Info: \n"
231
+ try:
232
+ cuda_version_out = subprocess.check_output(["nvcc", "--version"])
233
+ cuda_version_lines = cuda_version_out.decode().split("\n")
234
+ comment = "".join([f"# {s} \n" for s in cuda_version_lines if s not in [""]])
235
+ model_str += f"{comment}\n"
236
+ except (FileNotFoundError, subprocess.CalledProcessError):
237
+ model_str += "# nvcc not found\n"
238
+
239
+ gpu_names = Counter(
240
+ torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())
241
+ )
242
+
243
+ model_str += "# GPU Hardware Info: \n"
244
+ for name, count in gpu_names.items():
245
+ model_str += f"# {name} : {count} \n"
246
+ model_str += "\n"
247
+ return model_str
248
+
249
+
250
+ def generate_config_string(*, stable_output=False):
251
+ import torch._functorch.config
252
+ import torch._inductor.config
253
+
254
+ if stable_output:
255
+ return "# config omitted due to stable_output=True"
256
+
257
+ experimental_config = torch.fx.experimental._config.codegen_config() # type: ignore[attr-defined]
258
+ return f"""\
259
+ import torch._dynamo.config
260
+ import torch._inductor.config
261
+ import torch._functorch.config
262
+ import torch.fx.experimental._config
263
+ {torch._dynamo.config.codegen_config()}
264
+ {torch._inductor.config.codegen_config()}
265
+ {torch._functorch.config.codegen_config()}
266
+ {experimental_config}
267
+ """
268
+
269
+
270
+ def get_minifier_repro_path():
271
+ return os.path.join(minifier_dir(), "minifier_launcher.py")
272
+
273
+
274
+ def helper_for_dump_minify(contents):
275
+ minified_repro_path = get_minifier_repro_path()
276
+ log.warning("Writing minified repro to:\n%s", minified_repro_path)
277
+
278
+ if use_buck:
279
+ BuckTargetWriter(minified_repro_path).write()
280
+ try:
281
+ with open(minified_repro_path, "w") as fd:
282
+ fd.write(contents)
283
+
284
+ except OSError as e:
285
+ log.exception(e)
286
+ raise NotImplementedError("Could not write to {minified_repro_path}") from e
287
+
288
+
289
+ class AccuracyError(Exception):
290
+ pass
291
+
292
+
293
+ def clone_inputs_retaining_gradness(example_inputs):
294
+ """
295
+ This clone inputs is different from utils clone_input. In case of minifier,
296
+ all the tensors are leaf tensors while creating a new graph. So, we set the
297
+ requires_grad field w/o checking the leafness of the tensor.
298
+ """
299
+ cloned_inputs = clone_inputs(example_inputs)
300
+ for idx in range(len(example_inputs)):
301
+ if isinstance(cloned_inputs[idx], torch.Tensor):
302
+ cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)
303
+ return cloned_inputs
304
+
305
+
306
+ def run_fwd_maybe_bwd(gm, args, only_fwd=False, disable_clone=False):
307
+ """
308
+ Runs a forward and possibly backward iteration for a given mod and args.
309
+
310
+ When disable_clone is True, we will use args as-is without cloning.
311
+ This is higher fidelity but we may destroy the args in the process.
312
+ """
313
+ from torch._functorch.aot_autograd import make_boxed_func
314
+
315
+ from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass
316
+
317
+ gm = copy.deepcopy(gm)
318
+ if not disable_clone:
319
+ args = clone_inputs_retaining_gradness(args)
320
+
321
+ if hasattr(gm, "zero_grad"):
322
+ gm.zero_grad(True)
323
+
324
+ # TorchInductor returned callable expects lists. So, boxing the call.
325
+ orig_named_parameters = getattr(gm, "named_parameters", None)
326
+ orig_named_buffers = getattr(gm, "named_buffers", None)
327
+ if not hasattr(gm, "_boxed_call") and (
328
+ orig_named_parameters is not None or orig_named_buffers is not None
329
+ ):
330
+ gm = make_boxed_func(gm)
331
+ if orig_named_parameters is not None:
332
+ gm.named_parameters = orig_named_parameters
333
+ if orig_named_buffers is not None:
334
+ gm.named_buffers = orig_named_buffers
335
+
336
+ out = gm(args)
337
+ if only_fwd:
338
+ return out
339
+ if requires_bwd_pass(out):
340
+ loss = reduce_to_scalar_loss(out)
341
+ loss.backward()
342
+ return collect_results(gm, out, None, args)
343
+
344
+
345
+ def same_two_models(
346
+ gm,
347
+ opt_gm,
348
+ example_inputs,
349
+ only_fwd=False,
350
+ *,
351
+ require_fp64=False,
352
+ ignore_non_fp=False,
353
+ ):
354
+ """
355
+ Check two models have same accuracy.
356
+
357
+ require_fp64: if True, raise an error if we unable to calculate the fp64 reference
358
+ ignore_non_fp: if True, do not compare outputs which are not floating point. This
359
+ is mostly useful for the minifier (which wants to avoid quantizing floating point
360
+ error into integer/boolean error)
361
+ """
362
+ from .eval_frame import OptimizedModule
363
+ from .testing import (
364
+ named_buffers_for_optimized_module,
365
+ named_parameters_for_optimized_module,
366
+ )
367
+ from .utils import same
368
+
369
+ if isinstance(gm, OptimizedModule):
370
+ gm.named_parameters = named_parameters_for_optimized_module(gm)
371
+ gm.named_buffers = named_buffers_for_optimized_module(gm)
372
+
373
+ if isinstance(opt_gm, OptimizedModule):
374
+ opt_gm.named_parameters = named_parameters_for_optimized_module(opt_gm)
375
+ opt_gm.named_buffers = named_buffers_for_optimized_module(opt_gm)
376
+
377
+ ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd)
378
+
379
+ fp64_ref = None
380
+ if config.same_two_models_use_fp64:
381
+ try:
382
+ fp64_model, fp64_examples = cast_to_fp64(
383
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
384
+ )
385
+ fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd)
386
+ except Exception:
387
+ if require_fp64:
388
+ raise RuntimeError("Could not generate fp64 outputs") # noqa: TRY200
389
+ log.warning("Could not generate fp64 outputs")
390
+
391
+ try:
392
+ res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd)
393
+ except Exception as e:
394
+ # This means that the minified graph is bad/exposes a different problem.
395
+ # As we are checking accuracy here, lets log the exception and return True.
396
+ log.exception(
397
+ "While minifying the program in accuracy minification mode, "
398
+ "ran into a runtime exception which is likely an unrelated issue."
399
+ " Skipping this graph."
400
+ )
401
+ return True
402
+
403
+ passing = same(
404
+ ref,
405
+ res,
406
+ fp64_ref,
407
+ tol=config.repro_tolerance,
408
+ equal_nan=True,
409
+ ignore_non_fp=ignore_non_fp,
410
+ )
411
+ return passing
412
+
413
+
414
+ def cast_dtype_args_to_fp64(model):
415
+ for node in model.graph.nodes:
416
+ if (
417
+ node.op == "call_function"
418
+ and node.target == torch.ops.prims.convert_element_type.default
419
+ ):
420
+ assert len(node.args) == 2
421
+ if is_float_dtype(node.args[1]) and node.args[1] != torch.float64:
422
+ node.args = (node.args[0], torch.float64)
423
+ if node.op == "call_function":
424
+ dtype = node.kwargs.get("dtype")
425
+ if dtype is not None and is_float_dtype(dtype):
426
+ new_kwargs = dict(node.kwargs)
427
+ new_kwargs["dtype"] = torch.float64
428
+ node.kwargs = new_kwargs
429
+
430
+ model.graph.lint()
431
+ model.recompile()
432
+ return model
433
+
434
+
435
+ def cast_to(dtype, model, inputs):
436
+ from torch.utils._pytree import tree_map
437
+
438
+ model = model.to(dtype)
439
+ if dtype == torch.float64:
440
+ # If casting to fp64 for accuracy comparison, we need to
441
+ # replace dtype arguments embedded in the graph with fp64
442
+ model = cast_dtype_args_to_fp64(model)
443
+
444
+ inputs = tree_map(
445
+ lambda x: x.to(dtype)
446
+ if isinstance(x, torch.Tensor) and x.is_floating_point()
447
+ else x,
448
+ inputs,
449
+ )
450
+ return model, inputs
451
+
452
+
453
+ def cast_to_fp64(model, inputs):
454
+ return cast_to(torch.float64, model, inputs)
455
+
456
+
457
+ def backend_accuracy_fails(
458
+ gm,
459
+ example_inputs,
460
+ compiler_fn,
461
+ only_fwd=False,
462
+ *,
463
+ require_fp64=False,
464
+ ignore_non_fp=False,
465
+ ):
466
+ try:
467
+ compiled_gm = compiler_fn(
468
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
469
+ )
470
+ return not same_two_models(
471
+ gm,
472
+ compiled_gm,
473
+ example_inputs,
474
+ only_fwd,
475
+ require_fp64=require_fp64,
476
+ ignore_non_fp=ignore_non_fp,
477
+ )
478
+ except Exception as e:
479
+ # This means that the minified graph is bad/exposes a different problem.
480
+ # As we are checking accuracy here, lets log the exception and return False.
481
+ log.exception(
482
+ "While minifying the program in accuracy minification mode, "
483
+ "ran into a runtime exception which is likely an unrelated issue."
484
+ " Skipping this graph"
485
+ )
486
+ return False
487
+
488
+
489
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
490
+ # REPRO SUPPORT CODE
491
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
492
+
493
+
494
+ # Helper functions for computing what the default values of tensor
495
+ # values should be. These all coincide with factory functions, e.g., torch.empty
496
+
497
+
498
+ def _stride_or_default(
499
+ stride: Optional["torch._prims_common.StrideType"],
500
+ *,
501
+ shape: "torch._prims_common.ShapeType",
502
+ ) -> "torch._prims_common.StrideType":
503
+ return stride if stride is not None else utils.make_contiguous_strides_for(shape)
504
+
505
+
506
+ def _mk_defaulter(d: T) -> Callable[[Optional[T]], T]:
507
+ return lambda x: x if x is not None else d
508
+
509
+
510
+ _dtype_or_default = _mk_defaulter(torch.float32)
511
+ _device_or_default = _mk_defaulter(torch.device("cpu"))
512
+ _storage_offset_or_default = _mk_defaulter(0)
513
+ _requires_grad_or_default = _mk_defaulter(False)
514
+ _is_leaf_or_default = _mk_defaulter(False)
515
+
516
+
517
+ class NopInputReader:
518
+ def __init__(self):
519
+ self.total = 0
520
+
521
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
522
+ self.total += 1
523
+
524
+ def tensor(self, *args, **kwargs):
525
+ pass
526
+
527
+ def symint(self, *args, **kwargs):
528
+ pass
529
+
530
+
531
+ # TODO: Support bundling the entire repro into a zip file for ease of
532
+ # transferring around
533
+ class InputReader:
534
+ def __init__(self, save_dir=None, *, pbar=None):
535
+ # If None, we will generate random data instead. It's important
536
+ # to natively support this use case as it will allow people to
537
+ # share repros without including the real data, if the problem
538
+ # reproduces even on random data.
539
+ if save_dir is None:
540
+ log.warning("no save_dir specified, will generate random data")
541
+ self.store = ContentStoreReader(save_dir) if save_dir is not None else None
542
+ self.args = []
543
+ self.pbar = pbar
544
+
545
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
546
+ if self.pbar is not None:
547
+ self.pbar.update(1)
548
+ device = _device_or_default(device)
549
+ dtype_hint = _dtype_or_default(dtype_hint)
550
+ if self.store is not None and storage_hash is not None:
551
+ try:
552
+ storage = self.store.read_storage(storage_hash)
553
+ except FileNotFoundError:
554
+ pass
555
+ else:
556
+ if device != storage.device:
557
+ log.warning("device mismatch: %s != %s", device, storage.device)
558
+ # TODO: transfer it to the right device? But failing this
559
+ # way would be very mysterious! Would have been better
560
+ # not to store device in the serialized format...
561
+ return storage
562
+ log.warning("could not load %s, generating random data instead", storage_hash)
563
+ shape = (nbytes // dtype_hint.itemsize,)
564
+ stride = _stride_or_default(None, shape=shape)
565
+ return rand_strided(shape, stride, dtype_hint, device).untyped_storage()
566
+
567
+ def tensor(
568
+ self,
569
+ storage,
570
+ shape,
571
+ stride=None,
572
+ *,
573
+ storage_offset=None,
574
+ dtype=None,
575
+ requires_grad=None,
576
+ is_leaf=None,
577
+ **metadata,
578
+ ):
579
+ stride = _stride_or_default(stride, shape=shape)
580
+ storage_offset = _storage_offset_or_default(storage_offset)
581
+ dtype = _dtype_or_default(dtype)
582
+ is_leaf = _is_leaf_or_default(is_leaf)
583
+ requires_grad = _requires_grad_or_default(requires_grad)
584
+ t = torch.tensor(
585
+ [], dtype=dtype, device=storage.device, requires_grad=requires_grad
586
+ )
587
+ with torch.no_grad():
588
+ t.set_(storage, storage_offset, shape, stride)
589
+ if not is_leaf:
590
+ # Fake up some autograd history in a very naughty way
591
+ with torch.enable_grad():
592
+ t = t.clone(memory_format=torch.preserve_format)
593
+ with torch.no_grad():
594
+ t.set_(storage, storage_offset, shape, stride)
595
+ assert torch._subclasses.meta_utils.safe_is_leaf(t) == is_leaf
596
+ torch._utils.set_tensor_metadata(t, metadata)
597
+ self.args.append(t)
598
+ return t # for BC
599
+
600
+ def symint(self, val):
601
+ self.args.append(val)
602
+ return val # for BC
603
+
604
+
605
+ # Here is our writer strategy:
606
+ # 1. We will stream all of the inputs to disk
607
+ # 2. You can now deterministically randomize the inputs, or reload
608
+ # the inputs from disk
609
+ # 3. You can YOLO run the script without the inputs, in which case
610
+ # we'll fill the inputs with random data and pray. This is the
611
+ # legacy behavior, but it's also useful if you want to find out
612
+ # if we're so broken even random inputs trigger it
613
+ # 4. We could offer an in process "check if the randomized thing
614
+ # works too" but this is delicate so we don't do it
615
+
616
+
617
+ class InputWriter:
618
+ def __init__(self, save_dir, *, stable_hash=False):
619
+ self._lines = []
620
+ # TODO: consider ensuring tensor and storage counters line up?
621
+ self.storage_counter = itertools.count()
622
+ self.save_dir = save_dir
623
+ self.store = (
624
+ ContentStoreWriter(save_dir, stable_hash=stable_hash)
625
+ if save_dir is not None
626
+ else None
627
+ )
628
+ self.seen_storages = {}
629
+
630
+ def lines(self):
631
+ r = [
632
+ "def load_args(reader):",
633
+ ]
634
+ r.extend(f" {l}" for l in self._lines)
635
+ # In case we need to change the internal format of load_args
636
+ # in an FC-breaking way
637
+ r.append("load_args._version = 0")
638
+ return r
639
+
640
+ # Storages are untyped, but we need to initialize them with data if
641
+ # we don't have the real data, so we give a hint saying what kind
642
+ # of initialization may be appropriate
643
+ #
644
+ # If we had a FakeTensor, device_hint tells us what device should be
645
+ def storage(self, untyped_storage, *, dtype_hint=None, device_hint=None) -> str:
646
+ ws = StorageWeakRef(untyped_storage)
647
+ v = self.seen_storages.get(ws)
648
+ if v is not None:
649
+ return v
650
+ v = f"buf{next(self.storage_counter)}"
651
+ maybe_dtype_hint = ""
652
+ if _dtype_or_default(None) != _dtype_or_default(dtype_hint):
653
+ maybe_dtype_hint = f", dtype_hint={dtype_hint!r}"
654
+ # TODO: being optional on device is kind of pointless as the default
655
+ # is CPU but most repros we care about are CUDA
656
+ maybe_device = ""
657
+ device = untyped_storage.device
658
+ if device.type == "meta":
659
+ assert device_hint is not None
660
+ device = device_hint
661
+ if _device_or_default(None) != device:
662
+ maybe_device = f", device={device!r}"
663
+ nbytes = untyped_storage.nbytes()
664
+ storage_hash = None
665
+ if self.store is not None and untyped_storage.device.type != "meta":
666
+ storage_hash = self.store.write_storage(untyped_storage)
667
+ self._lines.append(
668
+ f"{v} = reader.storage({storage_hash!r}, {nbytes!r}{maybe_device}{maybe_dtype_hint})"
669
+ )
670
+ self.seen_storages[ws] = v
671
+ return v
672
+
673
+ def tensor(self, name, t) -> None:
674
+ storage = self.storage(
675
+ t.untyped_storage(), dtype_hint=t.dtype, device_hint=t.device
676
+ )
677
+ args = []
678
+ # NB: this is positional, must come first
679
+ if _stride_or_default(None, shape=t.shape) != t.stride():
680
+ args.append(str(tuple(t.stride())))
681
+ if _dtype_or_default(None) != t.dtype:
682
+ args.append(f"dtype={t.dtype!r}")
683
+ if _storage_offset_or_default(None) != t.storage_offset():
684
+ args.append(f"storage_offset={t.storage_offset()!r}")
685
+ tensor_metadata = torch._utils.get_tensor_metadata(t)
686
+ if tensor_metadata:
687
+ args.extend(f"{k}={v!r}" for k, v in tensor_metadata.items())
688
+ if _requires_grad_or_default(None) != t.requires_grad:
689
+ args.append(f"requires_grad={t.requires_grad!r}")
690
+ is_leaf = torch._subclasses.meta_utils.safe_is_leaf(t)
691
+ if _is_leaf_or_default(None) != is_leaf:
692
+ args.append(f"is_leaf={is_leaf!r}")
693
+ self._lines.append(
694
+ "reader.tensor("
695
+ + ", ".join([storage, str(tuple(t.shape)), *args])
696
+ + f") # {name}"
697
+ )
698
+
699
+ # TODO: this doesn't actually symint atm
700
+ def symint(self, name, val) -> None:
701
+ if isinstance(val, torch.SymInt):
702
+ val = val.node.hint
703
+ self._lines.append(f"reader.symint({val!r}) # {name}")
704
+
705
+
706
+ def aot_graph_input_parser(
707
+ func: Callable[[List[Tensor]], List[Tensor]],
708
+ device: str = "cuda",
709
+ sym_shapes: Optional[Dict[str, int]] = None,
710
+ default_sym_shape: Optional[int] = None,
711
+ ) -> Dict[str, Any]:
712
+ """
713
+ Takes in a function which has been printed with print_readable() and constructs kwargs to run it.
714
+
715
+ Handles Tensor inputs, Symints, and a graph module which might have tensor constants.
716
+
717
+ Consider a function `forward` defined as follows:
718
+
719
+ def forward(self, primals_1: "f32[1001, 6]", primals_2: "f32[s0]", primals_3: "Sym(s0)",):
720
+ _tensor_constant0: "i64[4190]" = self._tensor_constant0
721
+ # Further implementation
722
+
723
+ kwargs = aot_graph_input_parser(forward)
724
+ forward(**kwargs)
725
+ """
726
+
727
+ from torch.fx.graph import dtype_abbrs
728
+
729
+ dtype_map = {value: key for key, value in dtype_abbrs.items()}
730
+ dtype_pattern = "|".join(dtype_abbrs.values())
731
+
732
+ # Extracting the source code from the function
733
+ source = inspect.getsource(func)
734
+
735
+ # Regular expressions
736
+ tensor_assignment_regex = rf"(_tensor_constant\d+): \"({dtype_pattern})\[\s*(.*?)\s*\]\" = self\.(_tensor_constant\d+)"
737
+ tensor_regex = rf"({dtype_pattern})\[\s*(.*?)\s*\]"
738
+ sym_shape_regex = r"Sym\((s\d+)\)"
739
+
740
+ class TensorContainer:
741
+ "Container for tensors as attributes"
742
+ pass
743
+
744
+ # Dictionary for tensors from annotations
745
+ kwargs: Dict[str, Any] = {}
746
+
747
+ sym_shapes = sym_shapes or {}
748
+
749
+ def get_sym_int(symint):
750
+ torch._check(
751
+ symint in sym_shapes or default_sym_shape is not None,
752
+ lambda: f"{symint} not in symbolic_shapes and default sym shape not passed in",
753
+ )
754
+ return sym_shapes.get(symint, default_sym_shape)
755
+
756
+ def gen_tensor(shape, dtype) -> Tensor:
757
+ # Resolve symbolic shapes to concrete values
758
+ resolved_shape = []
759
+ dynamic_dims = []
760
+ for i, dim in enumerate(shape):
761
+ dim = dim.strip()
762
+ if "s" in dim:
763
+ s = get_sym_int(dim)
764
+ resolved_shape.append(s)
765
+ dynamic_dims.append(i)
766
+ else:
767
+ resolved_shape.append(int(dim))
768
+
769
+ constructor = torch.randn if dtype.is_floating_point else torch.zeros
770
+ out = constructor(resolved_shape, dtype=dtype, device=device) # type: ignore[call-arg]
771
+ for d in dynamic_dims:
772
+ torch._dynamo.mark_dynamic(out, d)
773
+ return out
774
+
775
+ # Parse function annotations for tensor generation
776
+ annotations = func.__annotations__
777
+ for param, annotation in annotations.items():
778
+ # Skip 'return' annotation
779
+ if param == "return":
780
+ continue
781
+
782
+ match = re.search(tensor_regex, annotation)
783
+ if match:
784
+ data_type, shape_str = match.groups()
785
+ shape = tuple(shape_str.split(","))
786
+ dtype = dtype_map[data_type]
787
+ kwargs[param] = gen_tensor(shape, dtype)
788
+
789
+ match = re.search(sym_shape_regex, annotation)
790
+ if match:
791
+ kwargs[param] = get_sym_int(match.group(1))
792
+
793
+ if "self" in inspect.signature(func).parameters:
794
+ container = TensorContainer()
795
+ kwargs["self"] = container
796
+ for match in re.finditer(tensor_assignment_regex, source):
797
+ attr_name, data_type, shape_str, _ = match.groups()
798
+ shape = tuple(shape_str.split(","))
799
+ dtype = dtype_map[data_type]
800
+ setattr(container, attr_name, gen_tensor(shape, dtype))
801
+
802
+ return kwargs
venv/lib/python3.10/site-packages/torch/_dynamo/decorators.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import TYPE_CHECKING
3
+
4
+ import torch
5
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
6
+ from . import trace_rules, variables
7
+ from .comptime import comptime
8
+ from .eval_frame import DisableContext, innermost_fn, RunOnlyContext
9
+ from .exc import IncorrectUsage
10
+ from .external_utils import is_compiling
11
+
12
+ if TYPE_CHECKING:
13
+ from torch._C._dynamo.eval_frame import ( # noqa: F401
14
+ reset_code,
15
+ set_eval_frame,
16
+ set_guard_error_hook,
17
+ skip_code,
18
+ unsupported,
19
+ )
20
+ else:
21
+ for name in dir(torch._C._dynamo.eval_frame):
22
+ if name.startswith("__"):
23
+ continue
24
+ globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
25
+
26
+
27
+ def run(fn=None):
28
+ """Don't do any dynamic compiles, just use prior optimizations"""
29
+ if fn is not None:
30
+ fn = innermost_fn(fn)
31
+ assert callable(fn)
32
+ return RunOnlyContext()(fn)
33
+ return RunOnlyContext()
34
+
35
+
36
+ def disable(fn=None, recursive=True):
37
+ """
38
+ Decorator and context manager to disable TorchDynamo
39
+
40
+ If recursive=True, Dynamo is completely skipped on the decorated function
41
+ frame as well as the recursively invoked functions.
42
+
43
+ If recursive=False, Dynamo skips frames associated with the function code,
44
+ but still process recursively invoked frames.
45
+ """
46
+ if recursive:
47
+ if fn is not None:
48
+ fn = innermost_fn(fn)
49
+ assert callable(fn)
50
+ return DisableContext()(fn)
51
+ return DisableContext()
52
+ else:
53
+ return skip(fn)
54
+
55
+
56
+ def skip(fn=None):
57
+ """
58
+ Skip frames associated with the function code, but still process recursively
59
+ invoked frames
60
+ """
61
+ if fn is None:
62
+ return skip
63
+ fn = innermost_fn(fn)
64
+ assert callable(fn)
65
+ skip_code(fn.__code__)
66
+ fn._torchdynamo_disable = True
67
+ return fn
68
+
69
+
70
+ def assume_constant_result(fn):
71
+ fn._dynamo_marked_constant = True
72
+ return fn
73
+
74
+
75
+ def allow_in_graph(fn):
76
+ """
77
+ Customize which functions TorchDynamo will include in the generated
78
+ graph. Similar to `torch.fx.wrap()`.
79
+ ::
80
+
81
+ torch._dynamo.allow_in_graph(my_custom_function)
82
+
83
+ @torch._dynamo.optimize(...)
84
+ def fn(a):
85
+ x = torch.add(x, 1)
86
+ x = my_custom_function(x)
87
+ x = torch.add(x, 1)
88
+ return x
89
+
90
+ fn(...)
91
+
92
+ Will capture a single graph containing `my_custom_function()`.
93
+ """
94
+ if isinstance(fn, (list, tuple)):
95
+ return [allow_in_graph(x) for x in fn]
96
+ assert callable(fn), "allow_in_graph expects a callable"
97
+ if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:
98
+ trace_rules._disallowed_callable_ids.remove(id(fn))
99
+ trace_rules._allowed_callable_ids.add(id(fn))
100
+ return fn
101
+
102
+
103
+ def _disallow_in_graph_helper(throw_if_not_allowed):
104
+ def inner(fn):
105
+ if isinstance(fn, (list, tuple)):
106
+ return [disallow_in_graph(x) for x in fn]
107
+ assert callable(fn), "disallow_in_graph expects a callable"
108
+ if (
109
+ throw_if_not_allowed
110
+ and trace_rules.lookup_callable(fn)
111
+ != variables.TorchInGraphFunctionVariable
112
+ and trace_rules.lookup(fn) != variables.TorchInGraphFunctionVariable
113
+ ):
114
+ raise IncorrectUsage(
115
+ "disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). "
116
+ "Allowed callables means callables that TorchDynamo puts as-is in the extracted graph."
117
+ )
118
+ trace_rules._allowed_callable_ids.remove(id(fn))
119
+ trace_rules._disallowed_callable_ids.add(id(fn))
120
+ return fn
121
+
122
+ return inner
123
+
124
+
125
+ def disallow_in_graph(fn):
126
+ """
127
+ Customize which functions TorchDynamo will exclude in the generated
128
+ graph and force a graph break on.
129
+ ::
130
+
131
+ torch._dynamo.disallow_in_graph(torch.sub)
132
+
133
+ @torch._dynamo.optimize(...)
134
+ def fn(a):
135
+ x = torch.add(x, 1)
136
+ x = torch.sub(x, 1)
137
+ x = torch.add(x, 1)
138
+ return x
139
+
140
+ fn(...)
141
+
142
+ Will break the graph on `torch.sub`, and give two graphs each with a
143
+ single `torch.add()` op.
144
+ """
145
+ return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)
146
+
147
+
148
+ @_disallow_in_graph_helper(throw_if_not_allowed=False)
149
+ def graph_break():
150
+ """Force a graph break"""
151
+ pass
152
+
153
+
154
+ def forbid_in_graph(fn):
155
+ """
156
+ Customize which functions TorchDynamo will assert are not present while tracing.
157
+
158
+ If you want a graph break on this function instead, use disallow_in_graph.
159
+ TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust
160
+ documentation would not be amiss.
161
+ """
162
+ if isinstance(fn, (list, tuple)):
163
+ return [forbid_in_graph(x) for x in fn]
164
+ assert callable(fn), "forbid_in_graph applies only to callables"
165
+ fn._dynamo_forbidden = True
166
+ return fn
167
+
168
+
169
+ # Helper function to flatten a tensor subclass and apply a function to
170
+ # all inner tensors that match the outer dim. Used to reduce duplication
171
+ # across the various marking APIs.
172
+ def _apply_func_to_inner_tensors_of_same_dim(func, t, *args, **kwargs):
173
+ assert is_traceable_wrapper_subclass(t)
174
+
175
+ attrs, ctx = t.__tensor_flatten__()
176
+ for attr in attrs:
177
+ inner = getattr(t, attr)
178
+ if inner.dim() == t.dim():
179
+ func(inner, *args, **kwargs)
180
+
181
+
182
+ @dataclass(frozen=True)
183
+ class _DimRange:
184
+ """
185
+ This represents an dimension of a tensor and the corresponding
186
+ min and max values it can take. Don't create this
187
+ class directly; instead, use :func:`mark_dynamic`.
188
+ """
189
+
190
+ dim: int
191
+ min: int
192
+ max: int
193
+
194
+
195
+ @forbid_in_graph
196
+ def mark_dynamic(t, index, *, min=None, max=None):
197
+ """
198
+ Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim.
199
+
200
+ [Note - on the state of mark_dynamic]
201
+
202
+ The behavior of having a dynamic dimension on a tensor is governed by a few factors:
203
+
204
+ 1) torch._dynamo.config dynamic_shapes True or False.
205
+ a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
206
+ a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
207
+ mark_dynamic. We will eventually support this.
208
+
209
+ 2) If the dimension is fully constrained - as in, it does not allow more than a single value
210
+ in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
211
+ we will raise an error
212
+
213
+ 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
214
+ range of shapes, in eager we will pass it through, but export will raise an error.
215
+
216
+ 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
217
+ before torch.compile.
218
+
219
+ """
220
+ if is_traceable_wrapper_subclass(t):
221
+ # default behavior: mirror mark_dynamic() on all inner tensors with same dim as t
222
+ # TODO: Make this configurable via a supported public API
223
+ _apply_func_to_inner_tensors_of_same_dim(
224
+ mark_dynamic, t, index, min=min, max=max
225
+ )
226
+
227
+ if isinstance(index, int):
228
+ if not hasattr(t, "_dynamo_dynamic_indices"):
229
+ t._dynamo_dynamic_indices = set()
230
+ t._dynamo_dynamic_range = set()
231
+ # TODO(voz): Should we bounds check?
232
+ t._dynamo_dynamic_indices.add(index)
233
+ t._dynamo_dynamic_range.add(_DimRange(index, min, max))
234
+ return
235
+
236
+ assert isinstance(index, (list, tuple))
237
+ for i in index:
238
+ mark_dynamic(t, i, min=min, max=max)
239
+
240
+
241
+ @forbid_in_graph
242
+ def maybe_mark_dynamic(t, index):
243
+ """
244
+ Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
245
+ dimension ends up getting specialized, don't error).
246
+ """
247
+ if is_traceable_wrapper_subclass(t):
248
+ # default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
249
+ # TODO: Make this configurable via a supported public API
250
+ _apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
251
+
252
+ if isinstance(index, int):
253
+ if not hasattr(t, "_dynamo_weak_dynamic_indices"):
254
+ t._dynamo_weak_dynamic_indices = set()
255
+ # TODO(voz): Should we bounds check?
256
+ t._dynamo_weak_dynamic_indices.add(index)
257
+ return
258
+
259
+ assert isinstance(index, (list, tuple))
260
+ for i in index:
261
+ maybe_mark_dynamic(t, i)
262
+
263
+
264
+ def mark_static(t, index=None):
265
+ """
266
+ Mark a tensor as having a static dim.
267
+
268
+ This will prevent us from attempting to compile it dynamically
269
+ when dynamic=True; this can improve trace-time performance.
270
+
271
+ This has lower precedence than mark_dynamic.
272
+
273
+ Unlike mark_dynamic, this can be done inside a graph, in which case it
274
+ induces specialization on the tensor.
275
+ """
276
+ if is_compiling():
277
+ if index is None:
278
+ for s in t.size():
279
+ comptime.force_static(s)
280
+ else:
281
+ comptime.force_static(t.size(index))
282
+ return
283
+
284
+ if is_traceable_wrapper_subclass(t):
285
+ # default behavior: mirror mark_static() on all inner tensors with same dim as t
286
+ # TODO: Make this configurable via a supported public API
287
+ _apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)
288
+
289
+ if isinstance(index, int):
290
+ if not hasattr(t, "_dynamo_static_indices"):
291
+ t._dynamo_static_indices = set()
292
+ # TODO(voz): Should we bounds check?
293
+ t._dynamo_static_indices.add(index)
294
+ elif index is None:
295
+ for i in range(t.dim()):
296
+ mark_static(t, i)
297
+ else:
298
+ assert isinstance(index, (list, tuple))
299
+ for i in index:
300
+ mark_static(t, i)
301
+
302
+
303
+ @forbid_in_graph
304
+ def mark_static_address(t, guard=True):
305
+ """
306
+ Marks an input tensor whose data_ptr will not change across multiple calls
307
+ to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation
308
+ is not needed for this input. The data_ptr will be guarded if guard=True. Note:
309
+ Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called.
310
+ """
311
+ if not isinstance(t, torch.Tensor):
312
+ raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}")
313
+
314
+ if guard:
315
+ t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
316
+ else:
317
+ t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
318
+
319
+
320
+ # Note: this carefully avoids eagerly import einops.
321
+ # TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2
322
+ def _allow_in_graph_einops():
323
+ import einops
324
+
325
+ try:
326
+ # requires einops > 0.6.1, torch >= 2.0
327
+ from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401
328
+ _ops_were_registered_in_torchdynamo,
329
+ )
330
+
331
+ # einops > 0.6.1 will call the op registration logic as it is imported.
332
+ pass
333
+ except ImportError:
334
+ # einops <= 0.6.1
335
+ allow_in_graph(einops.rearrange)
336
+ allow_in_graph(einops.reduce)
337
+ if hasattr(einops, "repeat"):
338
+ allow_in_graph(einops.repeat) # available since einops 0.2.0
339
+ if hasattr(einops, "einsum"):
340
+ allow_in_graph(einops.einsum) # available since einops 0.5.0
341
+ if hasattr(einops, "pack"):
342
+ allow_in_graph(einops.pack) # available since einops 0.6.0
343
+ if hasattr(einops, "unpack"):
344
+ allow_in_graph(einops.unpack) # available since einops 0.6.0
345
+
346
+
347
+ trace_rules.add_module_init_func("einops", _allow_in_graph_einops)
venv/lib/python3.10/site-packages/torch/_dynamo/device_interface.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
3
+
4
+ import torch
5
+ from torch._streambase import _EventBase, _StreamBase
6
+
7
+ get_cuda_stream: Optional[Callable[[int], int]]
8
+ if torch.cuda._is_compiled():
9
+ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
10
+ else:
11
+ get_cuda_stream = None
12
+
13
+ _device_t = Union[torch.device, str, int, None]
14
+
15
+ # Recording the device properties in the main process but used in worker process.
16
+ caching_worker_device_properties: Dict[str, Any] = {}
17
+ caching_worker_current_devices: Dict[str, int] = {}
18
+
19
+
20
+ class DeviceInterfaceMeta(type):
21
+ def __new__(metacls, *args, **kwargs):
22
+ class_member = args[2]
23
+ if "Event" in class_member:
24
+ assert inspect.isclass(class_member["Event"]) and issubclass(
25
+ class_member["Event"], _EventBase
26
+ ), "DeviceInterface member Event should be inherit from _EventBase"
27
+ if "Stream" in class_member:
28
+ assert inspect.isclass(class_member["Stream"]) and issubclass(
29
+ class_member["Stream"], _StreamBase
30
+ ), "DeviceInterface member Stream should be inherit from _StreamBase"
31
+ return super().__new__(metacls, *args, **kwargs)
32
+
33
+
34
+ class DeviceInterface(metaclass=DeviceInterfaceMeta):
35
+ """
36
+ This is a simple device runtime interface for Inductor. It enables custom
37
+ backends to be integrated with Inductor in a device-agnostic semantic.
38
+ """
39
+
40
+ class device:
41
+ def __new__(cls, device: _device_t):
42
+ raise NotImplementedError()
43
+
44
+ class Worker:
45
+ """
46
+ Worker API to query device properties that will work in multi processing
47
+ workers that cannot use the GPU APIs (due to processing fork() and
48
+ initialization time issues). Properties are recorded in the main process
49
+ before we fork the workers.
50
+ """
51
+
52
+ @staticmethod
53
+ def set_device(device: int):
54
+ raise NotImplementedError()
55
+
56
+ @staticmethod
57
+ def current_device() -> int:
58
+ raise NotImplementedError()
59
+
60
+ @staticmethod
61
+ def get_device_properties(device: _device_t = None):
62
+ raise NotImplementedError()
63
+
64
+ @staticmethod
65
+ def current_device():
66
+ raise NotImplementedError()
67
+
68
+ @staticmethod
69
+ def set_device(device: _device_t):
70
+ raise NotImplementedError()
71
+
72
+ @staticmethod
73
+ def device_count():
74
+ raise NotImplementedError()
75
+
76
+ @staticmethod
77
+ def is_available() -> bool:
78
+ raise NotImplementedError()
79
+
80
+ @staticmethod
81
+ def stream(stream: torch.Stream):
82
+ raise NotImplementedError()
83
+
84
+ @staticmethod
85
+ def current_stream():
86
+ raise NotImplementedError()
87
+
88
+ @staticmethod
89
+ def set_stream(stream: torch.Stream):
90
+ raise NotImplementedError()
91
+
92
+ @staticmethod
93
+ def _set_stream_by_id(stream_id: int, device_index: int, device_type: int):
94
+ raise NotImplementedError()
95
+
96
+ @staticmethod
97
+ def get_raw_stream():
98
+ raise NotImplementedError()
99
+
100
+ @staticmethod
101
+ def synchronize(device: _device_t = None):
102
+ raise NotImplementedError()
103
+
104
+ @staticmethod
105
+ def get_device_properties(device: _device_t = None):
106
+ raise NotImplementedError()
107
+
108
+ @staticmethod
109
+ def get_compute_capability(device: _device_t = None):
110
+ raise NotImplementedError()
111
+
112
+
113
+ class CudaInterface(DeviceInterface):
114
+ device = torch.cuda.device
115
+
116
+ # register Event and Stream class into the backend interface
117
+ # make sure Event and Stream are implemented and inherited from the _EventBase and _StreamBase
118
+ Event = torch.cuda.Event
119
+ Stream = torch.cuda.Stream
120
+
121
+ class Worker:
122
+ @staticmethod
123
+ def set_device(device: int):
124
+ caching_worker_current_devices["cuda"] = device
125
+
126
+ @staticmethod
127
+ def current_device() -> int:
128
+ if "cuda" in caching_worker_current_devices:
129
+ return caching_worker_current_devices["cuda"]
130
+ return torch.cuda.current_device()
131
+
132
+ @staticmethod
133
+ def get_device_properties(device: _device_t = None):
134
+ if device is not None:
135
+ if isinstance(device, str):
136
+ device = torch.device(device)
137
+ assert device.type == "cuda"
138
+ if isinstance(device, torch.device):
139
+ device = device.index
140
+ if device is None:
141
+ device = CudaInterface.Worker.current_device()
142
+
143
+ if "cuda" not in caching_worker_device_properties:
144
+ device_prop = [
145
+ torch.cuda.get_device_properties(i)
146
+ for i in range(torch.cuda.device_count())
147
+ ]
148
+ caching_worker_device_properties["cuda"] = device_prop
149
+
150
+ return caching_worker_device_properties["cuda"][device]
151
+
152
+ current_device = staticmethod(torch.cuda.current_device)
153
+ set_device = staticmethod(torch.cuda.set_device)
154
+ device_count = staticmethod(torch.cuda.device_count)
155
+ stream = staticmethod(torch.cuda.stream) # type: ignore[assignment]
156
+ current_stream = staticmethod(torch.cuda.current_stream)
157
+ set_stream = staticmethod(torch.cuda.set_stream) # type: ignore[assignment]
158
+ _set_stream_by_id = staticmethod(torch.cuda._set_stream_by_id) # type: ignore[assignment]
159
+ synchronize = staticmethod(torch.cuda.synchronize)
160
+ get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment]
161
+ get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[arg-type]
162
+
163
+ # Can be mock patched by @patch decorator.
164
+ @staticmethod
165
+ def is_available() -> bool:
166
+ return torch.cuda.is_available()
167
+
168
+ @staticmethod
169
+ def get_compute_capability(device: _device_t = None):
170
+ major, min = torch.cuda.get_device_capability(device)
171
+ return major * 10 + min
172
+
173
+
174
+ device_interfaces: Dict[str, Type[DeviceInterface]] = {}
175
+
176
+
177
+ def register_interface_for_device(
178
+ device: Union[str, torch.device], device_interface: Type[DeviceInterface]
179
+ ):
180
+ if isinstance(device, torch.device):
181
+ device = str(device)
182
+ device_interfaces[device] = device_interface
183
+
184
+
185
+ def get_interface_for_device(device: Union[str, torch.device]) -> Type[DeviceInterface]:
186
+ if isinstance(device, torch.device):
187
+ device = str(device)
188
+ if device in device_interfaces:
189
+ return device_interfaces[device]
190
+ raise NotImplementedError(f"No interface for device {device}")
191
+
192
+
193
+ def get_registered_device_interfaces() -> Iterable[Tuple[str, Type[DeviceInterface]]]:
194
+ return device_interfaces.items()
195
+
196
+
197
+ register_interface_for_device("cuda", CudaInterface)
198
+ for i in range(torch.cuda.device_count()):
199
+ register_interface_for_device(f"cuda:{i}", CudaInterface)
venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py ADDED
@@ -0,0 +1,1561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="method-assign"
2
+
3
+ """
4
+ Functions in this file are responsible for modifying the eval frame
5
+ handler at RUNTIME. Therefore, all functions in this file are hot.
6
+ Functions that only execute at compile time should be placed
7
+ in torch._dynamo.convert_frame.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import contextlib
13
+ import functools
14
+ import inspect
15
+ import logging
16
+ import os
17
+ import sys
18
+ import textwrap
19
+ import threading
20
+ import traceback
21
+ import types
22
+ import warnings
23
+ import weakref
24
+ from enum import Enum
25
+ from os.path import dirname, join
26
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union
27
+ from unittest.mock import patch
28
+
29
+ import torch
30
+ import torch.fx
31
+ import torch.utils._pytree as pytree
32
+ import torch.utils.checkpoint
33
+ from torch import _guards
34
+ from torch._subclasses import fake_tensor
35
+ from torch._utils_internal import log_export_usage
36
+ from torch.export import Constraint
37
+ from torch.export.dynamic_shapes import _process_dynamic_shapes
38
+ from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
39
+ from torch.fx.experimental.symbolic_shapes import (
40
+ ConstraintViolationError,
41
+ DimDynamic,
42
+ StatelessSymbolicContext,
43
+ )
44
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
45
+
46
+ from ..fx import GraphModule
47
+ from .backends.registry import CompilerFn, lookup_backend
48
+
49
+ from .hooks import Hooks
50
+
51
+ # see discussion at https://github.com/pytorch/pytorch/issues/120699
52
+ reset_code = torch._C._dynamo.eval_frame.reset_code # noqa: F401
53
+ set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401
54
+ set_guard_error_hook = torch._C._dynamo.eval_frame.set_guard_error_hook # noqa: F401
55
+ skip_code = torch._C._dynamo.eval_frame.skip_code # noqa: F401
56
+ unsupported = torch._C._dynamo.eval_frame.unsupported # noqa: F401
57
+
58
+ from . import config, convert_frame, external_utils, trace_rules, utils
59
+ from .code_context import code_context
60
+ from .exc import CondOpArgsMismatchError, UserError, UserErrorType
61
+ from .mutation_guard import install_generation_tagging_init
62
+ from .types import CacheEntry, DynamoCallback
63
+ from .utils import common_constant_types, compile_times
64
+
65
+ log = logging.getLogger(__name__)
66
+
67
+ from torch._dispatch.python import enable_python_dispatcher
68
+
69
+ always_optimize_code_objects = utils.ExactWeakKeyDictionary()
70
+ null_context = contextlib.nullcontext
71
+
72
+
73
+ import sympy
74
+
75
+
76
+ # See https://github.com/python/typing/pull/240
77
+ class Unset(Enum):
78
+ token = 0
79
+
80
+
81
+ unset = Unset.token
82
+
83
+ guarded_backend_cache = threading.local()
84
+ cached_backends: Dict[int, CompilerFn] = {}
85
+
86
+
87
+ def check_current_backend(backend_obj_id: int):
88
+ """
89
+ Called from guards to check if we need to recompile due to a backend change
90
+ """
91
+ # TODO(jansel): we should move guarded_backend_cache to C++
92
+ try:
93
+ if guarded_backend_cache.skip_backend_check_for_run_only_mode:
94
+ return True
95
+ except AttributeError:
96
+ # Go slightly faster next time
97
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = False
98
+ try:
99
+ current_backend = guarded_backend_cache.current_backend
100
+ except AttributeError:
101
+ current_backend = None
102
+ return (
103
+ # Avoid the dict lookup in case of exact same object
104
+ id(current_backend) == backend_obj_id
105
+ or current_backend == cached_backends.get(backend_obj_id, None)
106
+ )
107
+
108
+
109
+ def _reset_guarded_backend_cache():
110
+ global cached_backends
111
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = False
112
+ guarded_backend_cache.current_backend = None
113
+ for backend in cached_backends.values():
114
+ if hasattr(backend, "reset"):
115
+ backend.reset()
116
+ cached_backends.clear()
117
+
118
+
119
+ def backend_cache_manager(callback: DynamoCallback):
120
+ # callback is False for RunOnlyContext. RunOnlyContext is used
121
+ # as a way to re-use the previous compiled cache.
122
+ # We therefore skip the check and re-use whatever code that's already cached.
123
+ # Note: the cache that's actually used depends on the caching policy.
124
+ if callback is False:
125
+
126
+ def change():
127
+ try:
128
+ prev_skip = guarded_backend_cache.skip_backend_check_for_run_only_mode
129
+ except AttributeError:
130
+ prev_skip = False
131
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = True
132
+
133
+ def revert():
134
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = prev_skip
135
+
136
+ return revert
137
+
138
+ else:
139
+ backend = innermost_fn(callback)
140
+
141
+ def change():
142
+ cached_backends.setdefault(id(backend), backend)
143
+ try:
144
+ prev_backend = guarded_backend_cache.current_backend
145
+ except AttributeError:
146
+ prev_backend = None
147
+ guarded_backend_cache.current_backend = backend
148
+
149
+ def revert():
150
+ guarded_backend_cache.current_backend = prev_backend
151
+
152
+ return revert
153
+
154
+ return change
155
+
156
+
157
+ DONT_WRAP_FILES = {
158
+ # For tracing into fx modules
159
+ inspect.getsourcefile(GraphModule),
160
+ join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
161
+ }
162
+
163
+
164
+ def _debug_get_cache_entry_list(
165
+ code: Union[types.CodeType, Callable[..., Any]]
166
+ ) -> List[CacheEntry]:
167
+ """
168
+ Given a code object or a callable object, retrieve the cache entries
169
+ stored in this code.
170
+ """
171
+ if callable(code):
172
+ code = code.__code__
173
+ return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
174
+
175
+
176
+ class OptimizedModule(torch.nn.Module):
177
+ """
178
+ Wraps the original nn.Module object and later patches its
179
+ forward method to optimized self.forward method.
180
+ """
181
+
182
+ _torchdynamo_orig_callable: Callable[..., Any]
183
+ get_compiler_config: Callable[[], Any]
184
+
185
+ def __init__(self, mod: torch.nn.Module, dynamo_ctx):
186
+ super().__init__()
187
+ # Installs the params/buffer
188
+ self._orig_mod = mod
189
+ self.dynamo_ctx = dynamo_ctx
190
+ self._initialize()
191
+
192
+ def _initialize(self):
193
+ # Do this stuff in constructor to lower overhead slightly
194
+ if isinstance(self._orig_mod.forward, types.MethodType) and trace_rules.check(
195
+ self._orig_mod.forward
196
+ ):
197
+ # This may be a torch.nn.* instance in trace_rules.py which
198
+ # won't trigger a frame evaluation workaround to add an extra
199
+ # frame we can capture
200
+ self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
201
+ else:
202
+ # Invoke hooks outside of dynamo then pickup the inner frame
203
+ self.forward = self.dynamo_ctx(self._orig_mod.__call__)
204
+
205
+ if hasattr(self._orig_mod, "_initialize_hook"):
206
+ self._forward = self.forward
207
+ self.forward = self._call_lazy_check
208
+
209
+ def __getstate__(self):
210
+ state = dict(self.__dict__)
211
+ state.pop("forward", None)
212
+ state.pop("__call__", None)
213
+ return state
214
+
215
+ def __setstate__(self, state):
216
+ self.__dict__ = state
217
+ self._initialize()
218
+
219
+ def __getattr__(self, name):
220
+ if name == "_orig_mod":
221
+ return self._modules["_orig_mod"]
222
+ return getattr(self._orig_mod, name)
223
+
224
+ def _call_lazy_check(self, *args, **kwargs):
225
+ if hasattr(self._orig_mod, "_initialize_hook"):
226
+ # In the case of a lazy module, we want to run
227
+ # the pre-hooks which initialize it.
228
+ # Afterwards, lazy module deletes its pre-hooks
229
+ # to avoid treating it as lazy on subsequent recompile.
230
+ self._orig_mod._infer_parameters(self._orig_mod, args, kwargs)
231
+ return self._forward(*args, **kwargs)
232
+
233
+ def __dir__(self):
234
+ orig_mod_attrs = self._orig_mod.__dir__()
235
+ return orig_mod_attrs + [
236
+ attr for attr in super().__dir__() if attr not in orig_mod_attrs
237
+ ]
238
+
239
+
240
+ def remove_from_cache(f):
241
+ """
242
+ Make sure f.__code__ is not cached to force a recompile
243
+ """
244
+ if isinstance(f, types.CodeType):
245
+ reset_code(f)
246
+ elif hasattr(f, "__code__"):
247
+ reset_code(f.__code__)
248
+ elif hasattr(getattr(f, "forward", None), "__code__"):
249
+ reset_code(f.forward.__code__)
250
+ else:
251
+ from . import reset # type: ignore[attr-defined]
252
+
253
+ reset()
254
+ log.warning("could not determine __code__ for %s", f)
255
+
256
+
257
+ def nothing():
258
+ pass
259
+
260
+
261
+ def always_false():
262
+ return False
263
+
264
+
265
+ def innermost_fn(fn):
266
+ """
267
+ In case of nesting of _TorchDynamoContext calls, find the innermost
268
+ function. TorchDynamo caches on fn.__code__ object, so its necessary to find
269
+ the innermost function to pass on the optimize, run, disable etc.
270
+ """
271
+ unaltered_fn = fn
272
+ while hasattr(unaltered_fn, "_torchdynamo_orig_callable"):
273
+ unaltered_fn = unaltered_fn._torchdynamo_orig_callable
274
+ assert callable(unaltered_fn)
275
+ return unaltered_fn
276
+
277
+
278
+ def make_set_enable_dynamic(enable: bool):
279
+ assert isinstance(enable, bool)
280
+ if enable:
281
+ # Assume everything is dynamic by default
282
+ return config._make_closure_patcher(assume_static_by_default=False)
283
+ else:
284
+ return config._make_closure_patcher(
285
+ automatic_dynamic_shapes=False, assume_static_by_default=True
286
+ )
287
+
288
+
289
+ class _TorchDynamoContext:
290
+ def __init__(
291
+ self,
292
+ callback: DynamoCallback,
293
+ on_enter=nothing,
294
+ backend_ctx_ctor=null_context,
295
+ patch_fn=nothing,
296
+ first_ctx=False,
297
+ *,
298
+ export=False,
299
+ dynamic=None,
300
+ compiler_config=None,
301
+ ):
302
+ super().__init__()
303
+ assert callable(callback) or callback is False or callback is None
304
+ self.callback: DynamoCallback = callback
305
+ self.prior: Union[Unset, DynamoCallback] = unset
306
+ self.first_ctx = first_ctx
307
+ self.export = export
308
+ self.compiler_config = compiler_config
309
+ self.cleanup_fns: List[Callable[[], Any]] = []
310
+ self.enter_exit_hooks = [backend_cache_manager(self.callback)]
311
+ patch_fn()
312
+
313
+ if dynamic is not None:
314
+ self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic))
315
+
316
+ if on_enter is not nothing:
317
+ # this case is not common
318
+ def call_on_enter():
319
+ on_enter()
320
+ return nothing
321
+
322
+ self.enter_exit_hooks.append(call_on_enter)
323
+
324
+ if backend_ctx_ctor is not contextlib.nullcontext:
325
+ # this case is not common
326
+ def call_backend_ctx():
327
+ ctx = backend_ctx_ctor()
328
+ ctx.__enter__()
329
+ return functools.partial(ctx.__exit__, None, None, None)
330
+
331
+ self.enter_exit_hooks.append(call_backend_ctx)
332
+
333
+ def __enter__(self):
334
+ if config.raise_on_ctx_manager_usage:
335
+ raise RuntimeError(
336
+ "torch._dynamo.optimize(...) is used with a context manager. "
337
+ "Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
338
+ "to use torch._dynamo.optimize(...) as an annotation/decorator. "
339
+ )
340
+ self.cleanup_fns = [enter() for enter in self.enter_exit_hooks]
341
+ self.prior = set_eval_frame(self.callback)
342
+
343
+ def __exit__(self, exc_type, exc_val, exc_tb):
344
+ assert self.prior is not unset
345
+ set_eval_frame(self.prior)
346
+ self.prior = unset
347
+ for cleanup in self.cleanup_fns:
348
+ cleanup()
349
+ self.cleanup_fns.clear()
350
+
351
+ def __call__(self, fn):
352
+ # public api for compiler config/options
353
+ def get_compiler_config():
354
+ return self.compiler_config
355
+
356
+ fn = innermost_fn(fn)
357
+
358
+ # add context containing GraphModule to any GraphModule forward functions
359
+ from torch.fx._lazy_graph_module import _LazyGraphModule
360
+
361
+ if isinstance(fn, _LazyGraphModule) or (
362
+ isinstance(getattr(fn, "__self__", None), _LazyGraphModule)
363
+ and fn.__name__ == "_lazy_forward"
364
+ ):
365
+ # Since dynamo will run the forward method for the GraphModule shortly
366
+ # anyways, it does not hurt to do the real recompilation here if
367
+ # this is a _LazyGraphModule. This makes it easier for dynamo to
368
+ # optimize a _LazyGraphModule.
369
+
370
+ lazy_gm = fn if isinstance(fn, _LazyGraphModule) else fn.__self__
371
+
372
+ _LazyGraphModule.force_recompile(lazy_gm)
373
+
374
+ # Assume that the underlying node metadata of `fn`,
375
+ # a GraphModule instance, accurately represents
376
+ # all instances of type(fn).
377
+ code_context.get_context(lazy_gm.forward.__code__)[
378
+ "orig_graphmodule"
379
+ ] = weakref.ref(lazy_gm)
380
+
381
+ if not isinstance(fn, _LazyGraphModule):
382
+ # replace fn with the real forward method
383
+ fn = lazy_gm.forward
384
+ elif isinstance(fn, GraphModule):
385
+ code_context.get_context(fn.forward.__code__)[
386
+ "orig_graphmodule"
387
+ ] = weakref.ref(fn)
388
+
389
+ # Optimize the forward method of torch.nn.Module object
390
+ if isinstance(fn, torch.nn.Module):
391
+ mod = fn
392
+ new_mod = OptimizedModule(mod, self)
393
+ # Save the function pointer to find the original callable while nesting
394
+ # of decorators.
395
+ new_mod._torchdynamo_orig_callable = mod.forward
396
+
397
+ # when compiling torch.nn.Module,
398
+ # provide public api OptimizedModule.get_compiler_config()
399
+ assert not hasattr(new_mod, "get_compiler_config")
400
+ new_mod.get_compiler_config = get_compiler_config
401
+
402
+ return new_mod
403
+ assert callable(fn)
404
+
405
+ try:
406
+ filename = inspect.getsourcefile(fn)
407
+ except TypeError:
408
+ filename = None
409
+ if (
410
+ (filename is None or trace_rules.check(fn))
411
+ and (
412
+ getattr(fn, "__name__", "") not in ["_call_impl", "_wrapped_call_impl"]
413
+ )
414
+ and filename not in DONT_WRAP_FILES
415
+ ):
416
+ # call to a builtin without a frame for us to capture
417
+ fn = external_utils.wrap_inline(fn)
418
+
419
+ callback = self.callback
420
+
421
+ if isinstance(self, DisableContext):
422
+ is_jit_tracing = always_false
423
+ is_fx_tracing = always_false
424
+ else:
425
+ is_jit_tracing = torch._C._is_tracing
426
+ is_fx_tracing = torch.fx._symbolic_trace.is_fx_tracing
427
+
428
+ @functools.wraps(fn)
429
+ def _fn(*args, **kwargs):
430
+ if is_fx_tracing():
431
+ if config.error_on_nested_fx_trace:
432
+ raise RuntimeError(
433
+ "Detected that you are using FX to symbolically trace "
434
+ "a dynamo-optimized function. This is not supported at the moment."
435
+ )
436
+ else:
437
+ return fn(*args, **kwargs)
438
+
439
+ if is_jit_tracing():
440
+ if config.error_on_nested_jit_trace:
441
+ raise RuntimeError(
442
+ "Detected that you are using FX to torch.jit.trace "
443
+ "a dynamo-optimized function. This is not supported at the moment."
444
+ )
445
+ else:
446
+ return fn(*args, **kwargs)
447
+
448
+ cleanups = [enter() for enter in self.enter_exit_hooks]
449
+ prior = set_eval_frame(callback)
450
+ try:
451
+ return fn(*args, **kwargs)
452
+ finally:
453
+ set_eval_frame(prior)
454
+ for cleanup in cleanups:
455
+ cleanup()
456
+
457
+ # hooks to properly handle inlining
458
+ if isinstance(self, DisableContext):
459
+ _fn._torchdynamo_disable = True # type: ignore[attr-defined]
460
+ else:
461
+ _fn._torchdynamo_inline = fn # type: ignore[attr-defined]
462
+
463
+ # Save the function pointer to find the original callable while nesting
464
+ # of decorators.
465
+ _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
466
+
467
+ # when compiling user function instead of nn.Module
468
+ # provide public api _fn.get_compiler_config()
469
+ assert not hasattr(_fn, "get_compiler_config")
470
+ _fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
471
+
472
+ # If the function is called using torch._dynamo.optimize decorator, we
473
+ # should prevent any type of skipping.
474
+ if callback not in (None, False):
475
+ if not hasattr(fn, "__code__"):
476
+ raise RuntimeError(
477
+ textwrap.dedent(
478
+ """
479
+
480
+ torch._dynamo.optimize is called on a non function object.
481
+ If this is a callable class, please wrap the relevant code into a function and optimize the
482
+ wrapper function.
483
+
484
+ >> class CallableClass:
485
+ >> def __init__(self):
486
+ >> super().__init__()
487
+ >> self.relu = torch.nn.ReLU()
488
+ >>
489
+ >> def __call__(self, x):
490
+ >> return self.relu(torch.sin(x))
491
+ >>
492
+ >> def print_hello(self):
493
+ >> print("Hello world")
494
+ >>
495
+ >> mod = CallableClass()
496
+
497
+ If you want to optimize the __call__ function and other code, wrap that up in a function
498
+
499
+ >> def wrapper_fn(x):
500
+ >> y = mod(x)
501
+ >> return y.sum()
502
+
503
+ and then optimize the wrapper_fn
504
+
505
+ >> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
506
+ """
507
+ )
508
+ )
509
+ always_optimize_code_objects[fn.__code__] = True
510
+
511
+ return _fn
512
+
513
+
514
+ class OptimizeContext(_TorchDynamoContext):
515
+ def __init__(
516
+ self,
517
+ callback,
518
+ backend_ctx_ctor,
519
+ first_ctx=False,
520
+ *,
521
+ export=False,
522
+ dynamic=None,
523
+ compiler_config=None,
524
+ ):
525
+ def on_enter():
526
+ install_generation_tagging_init()
527
+
528
+ super().__init__(
529
+ callback=callback,
530
+ on_enter=on_enter,
531
+ backend_ctx_ctor=backend_ctx_ctor,
532
+ patch_fn=TorchPatcher.patch,
533
+ first_ctx=first_ctx,
534
+ export=export,
535
+ dynamic=dynamic,
536
+ compiler_config=compiler_config,
537
+ )
538
+
539
+
540
+ class RunOnlyContext(_TorchDynamoContext):
541
+ def __init__(self):
542
+ # cudagraph trees relies on generation increment
543
+ def on_enter():
544
+ torch._dynamo.mutation_guard.GenerationTracker.generation += 1
545
+
546
+ super().__init__(callback=False, on_enter=on_enter)
547
+
548
+
549
+ class DisableContext(_TorchDynamoContext):
550
+ def __init__(self):
551
+ super().__init__(callback=None)
552
+
553
+
554
+ def _optimize_catch_errors(
555
+ compile_fn,
556
+ hooks: Hooks,
557
+ backend_ctx_ctor=null_context,
558
+ export=False,
559
+ dynamic=None,
560
+ compiler_config=None,
561
+ ):
562
+ return OptimizeContext(
563
+ convert_frame.catch_errors_wrapper(compile_fn, hooks),
564
+ backend_ctx_ctor=backend_ctx_ctor,
565
+ first_ctx=True,
566
+ export=export,
567
+ dynamic=dynamic,
568
+ compiler_config=compiler_config,
569
+ )
570
+
571
+
572
+ def get_compiler_fn(compiler_fn):
573
+ from .repro.after_dynamo import wrap_backend_debug
574
+
575
+ if hasattr(compiler_fn, "compiler_name"):
576
+ compiler_str = compiler_fn.compiler_name
577
+ elif isinstance(compiler_fn, str):
578
+ compiler_str = compiler_fn
579
+ else:
580
+ compiler_str = None
581
+ compiler_fn = lookup_backend(compiler_fn)
582
+ return wrap_backend_debug(compiler_fn, compiler_str)
583
+
584
+
585
+ class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg]
586
+ def __call__(self, fn):
587
+ assert callable(fn)
588
+ return fn
589
+
590
+
591
+ def check_if_dynamo_supported():
592
+ if sys.version_info >= (3, 12):
593
+ raise RuntimeError("Python 3.12+ not yet supported for torch.compile")
594
+
595
+
596
+ def is_dynamo_supported():
597
+ try:
598
+ check_if_dynamo_supported()
599
+ return True
600
+ except Exception:
601
+ return False
602
+
603
+
604
+ def check_if_inductor_supported():
605
+ check_if_dynamo_supported()
606
+
607
+ if sys.platform == "win32":
608
+ raise RuntimeError("Windows not yet supported for inductor")
609
+
610
+
611
+ def is_inductor_supported():
612
+ try:
613
+ check_if_inductor_supported()
614
+ return True
615
+ except Exception:
616
+ return False
617
+
618
+
619
+ def optimize(
620
+ backend="inductor",
621
+ *,
622
+ nopython=False,
623
+ guard_export_fn=None,
624
+ guard_fail_fn=None,
625
+ disable=False,
626
+ dynamic=None,
627
+ ):
628
+ """
629
+ The main entrypoint of TorchDynamo. Do graph capture and call
630
+ backend() to optimize extracted graphs.
631
+
632
+ Args:
633
+ backend: One of the two things:
634
+ - Either, a function/callable taking a torch.fx.GraphModule and
635
+ example_inputs and returning a python callable that runs the
636
+ graph faster.
637
+ One can also provide additional context for the backend, like
638
+ torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
639
+ See AOTAutogradMemoryEfficientFusionWithContext for the usage.
640
+ - Or, a string backend name in `torch._dynamo.list_backends()`
641
+ nopython: If True, graph breaks will be errors and there will
642
+ be a single whole-program graph.
643
+ disable: If True, turn this decorator into a no-op
644
+ dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
645
+ disable all dynamic shapes support (always specialize). If None, automatically
646
+ detect when sizes vary and generate dynamic kernels upon recompile.
647
+
648
+ Example Usage::
649
+
650
+ @torch._dynamo.optimize()
651
+ def toy_example(a, b):
652
+ ...
653
+ """
654
+ check_if_dynamo_supported()
655
+ # Note: The hooks object could be global instead of passed around, *however* that would make
656
+ # for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
657
+ # There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
658
+ # compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
659
+ # easier to understand UX at the cost of a little more plumbing on our end.
660
+ hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
661
+ torch._C._log_api_usage_once("torch._dynamo.optimize")
662
+ if disable or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1":
663
+ return _NullDecorator()
664
+
665
+ backend = get_compiler_fn(backend)
666
+
667
+ # Find if backend has any extra context manager
668
+ backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
669
+
670
+ if nopython:
671
+ return optimize_assert(
672
+ backend,
673
+ dynamic=dynamic,
674
+ hooks=hooks,
675
+ )
676
+ return _optimize_catch_errors(
677
+ convert_frame.convert_frame(backend, hooks=hooks),
678
+ hooks,
679
+ backend_ctx_ctor,
680
+ dynamic=dynamic,
681
+ compiler_config=backend.get_compiler_config()
682
+ if hasattr(backend, "get_compiler_config")
683
+ else None,
684
+ )
685
+
686
+
687
+ # TODO(voz): Consider making "explain" output alongside a run / part of a run
688
+ @patch("torch._dynamo.symbolic_convert.explain", True)
689
+ def explain(f, *extra_args, **extra_kwargs):
690
+ def inner(*args, **kwargs):
691
+ # TODO(voz): Do we want a decorator for this?
692
+ from . import reset # type: ignore[attr-defined]
693
+
694
+ reset()
695
+
696
+ graphs: List[torch.fx.GraphModule] = []
697
+ break_reasons: List[Any] = []
698
+ op_count: int = 0
699
+ ops_per_graph: List[torch.fx.Node] = []
700
+ out_guards: List[_guards.Guard] = []
701
+
702
+ def dynamo_graph_accumulating_compiler(
703
+ gm: torch.fx.GraphModule, example_inputs
704
+ ):
705
+ from .backends.debugging import _explain_graph_detail
706
+
707
+ nonlocal graphs
708
+ nonlocal op_count
709
+ nonlocal ops_per_graph
710
+ nonlocal break_reasons
711
+
712
+ gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail(
713
+ gm, graphs, op_count, ops_per_graph, break_reasons
714
+ )
715
+
716
+ return gm.forward
717
+
718
+ def guard_export_print(guards):
719
+ nonlocal out_guards
720
+ out_guards.extend(guards)
721
+
722
+ opt_f = optimize(
723
+ dynamo_graph_accumulating_compiler,
724
+ nopython=False,
725
+ guard_export_fn=guard_export_print,
726
+ )(f)
727
+ # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
728
+ opt_f(*args, **kwargs)
729
+
730
+ graph_count = len(graphs)
731
+
732
+ # For the explanation summary, dedupe reasons by the innermost stack frame and dedupe by it.
733
+ deduped_reasons = {}
734
+ for reason in break_reasons:
735
+ innermost_frame = reason.user_stack[-1]
736
+ # __repr__ uniquely identifies a FrameSummary so we can use it for deduping
737
+ deduped_reasons[repr(innermost_frame)] = reason
738
+
739
+ formatted_list = ""
740
+ for idx, break_reason in enumerate(deduped_reasons.values()):
741
+ formatted_stack = "".join(traceback.format_list(break_reason.user_stack))
742
+ msg = f"{idx + 1}. Reason: {break_reason.reason}\n User Stack: {formatted_stack}\n"
743
+ formatted_list += msg
744
+
745
+ graph_break_count = graph_count - 1
746
+ compile_time = compile_times(repr="str")
747
+
748
+ # TODO(voz): Do we want a decorator for this?
749
+ reset()
750
+ from .backends.debugging import ExplainOutput
751
+
752
+ return ExplainOutput(
753
+ graphs,
754
+ graph_count,
755
+ graph_break_count,
756
+ break_reasons,
757
+ op_count,
758
+ ops_per_graph,
759
+ out_guards,
760
+ compile_time,
761
+ )
762
+
763
+ if extra_args or extra_kwargs:
764
+ warnings.warn(
765
+ "explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
766
+ "If you don't migrate, we may break your explain call in the future if your user defined kwargs "
767
+ "conflict with future kwargs added to explain(f)."
768
+ )
769
+ return inner(*extra_args, **extra_kwargs)
770
+ else:
771
+ return inner
772
+
773
+
774
+ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
775
+ def __init__(
776
+ self,
777
+ m: torch.fx.GraphModule,
778
+ flat_args: Tuple[Any],
779
+ matched_input_elements_positions: List[int],
780
+ flat_results: List[Any],
781
+ matched_output_elements_positions: List[int],
782
+ example_fake_inputs: List[torch.Tensor],
783
+ flat_args_dynamic_dims: List[Set[int]],
784
+ fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
785
+ ):
786
+ super().__init__(m)
787
+
788
+ assert len(flat_args_dynamic_dims) == len(flat_args)
789
+ matched_input_elements_to_fake = {
790
+ val: example_fake_inputs[ix]
791
+ for ix, val in enumerate(matched_input_elements_positions)
792
+ }
793
+
794
+ self.new_args = []
795
+ for i in range(0, len(flat_args)):
796
+ arg = super().placeholder(f"arg{i}", (), {})
797
+ if i in matched_input_elements_to_fake:
798
+ arg.node.meta["val"] = matched_input_elements_to_fake[i]
799
+ else:
800
+ # Fill node.mata["val"] with faketensor from the input,
801
+ # if it's not found in matched_input_elements_positions
802
+ if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
803
+ # TODO(zhxchen17) Also preserve all the user constraints here.
804
+ arg.node.meta["val"] = fake_mode.from_tensor(
805
+ flat_args[i],
806
+ symbolic_context=StatelessSymbolicContext(
807
+ dynamic_sizes=[
808
+ DimDynamic.DYNAMIC
809
+ if d in flat_args_dynamic_dims[i]
810
+ else DimDynamic.STATIC
811
+ for d in range(len(flat_args[i].shape))
812
+ ],
813
+ constraint_sizes=[None] * len(flat_args[i].shape),
814
+ ),
815
+ )
816
+ self.new_args.append(arg)
817
+ self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
818
+ self.matched_output_elements_positions = matched_output_elements_positions
819
+ self.flat_results = flat_results
820
+
821
+ def placeholder(self, target, args, kwargs):
822
+ arg = next(self.old_args_gen)
823
+ if "val" in self.current_node.meta:
824
+ arg.node.meta["val"] = self.current_node.meta["val"]
825
+ if "tensor_dict" in self.current_node.meta:
826
+ arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
827
+ if "example_value" in self.current_node.meta:
828
+ arg.node.meta["example_value"] = self.current_node.meta["example_value"]
829
+ return arg
830
+
831
+ def output(self, target, args, kwargs):
832
+ dynamo_result_flat = args[0]
833
+ lookup = [*dynamo_result_flat, *self.new_args]
834
+ new_results_flat = []
835
+ for i in range(len(self.flat_results)):
836
+ if self.matched_output_elements_positions[i] is not None:
837
+ new_results_flat.append(
838
+ lookup[self.matched_output_elements_positions[i]]
839
+ )
840
+ else:
841
+ const_val = self.flat_results[i]
842
+ assert isinstance(const_val, tuple(common_constant_types))
843
+ new_results_flat.append(const_val)
844
+ return super().output(target, (new_results_flat,), {})
845
+
846
+ def run_node(self, n):
847
+ self.current_node = n
848
+ result_proxy = super().run_node(n)
849
+ if "val" in self.current_node.meta:
850
+ result_proxy.node.meta["val"] = self.current_node.meta["val"]
851
+ if "example_value" in self.current_node.meta:
852
+ result_proxy.node.meta["example_value"] = self.current_node.meta[
853
+ "example_value"
854
+ ]
855
+ if self.current_node.op != "output":
856
+ result_proxy.node._rename(
857
+ getattr(self.current_node, "name", result_proxy.node.name)
858
+ )
859
+ return result_proxy
860
+
861
+ def transform(self):
862
+ result_gm = super().transform()
863
+ if "dynamo_flat_name_to_original_fqn" in self.module.meta:
864
+ result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[
865
+ "dynamo_flat_name_to_original_fqn"
866
+ ]
867
+ return result_gm
868
+
869
+
870
+ class ExportResult(NamedTuple):
871
+ graph_module: torch.fx.GraphModule
872
+ guards: _guards.GuardsSet
873
+ # NB: Do not add new fields without overriding __iter__; people are
874
+ # destructuring so it is BC-breaking
875
+
876
+
877
+ def check_signature_rewritable(graph):
878
+ input_errors = []
879
+ for node in graph.graph.nodes:
880
+ if node.op == "placeholder":
881
+ assert hasattr(node, "_dynamo_source")
882
+ source = node._dynamo_source
883
+ user_stacks = graph._source_to_user_stacks.get(source)
884
+ if user_stacks is None:
885
+ continue
886
+ assert len(user_stacks) > 0
887
+ # In some cases we may not have a useful stack. Look for a
888
+ # useful stack
889
+ stack = None
890
+ for s in user_stacks:
891
+ if len(s) == 0:
892
+ continue
893
+ stack = s
894
+ break
895
+ if stack is None:
896
+ msg = f"{source.name()}, a closed over free variable"
897
+ else:
898
+ tb = "".join(traceback.format_list(stack))
899
+ extra = ""
900
+ if len(user_stacks) > 1:
901
+ extra = f"(elided {len(user_stacks)-1} more accesses)"
902
+ msg = f"{source.name()}, accessed at:\n{tb}{extra}"
903
+ # TODO: option to print ALL of the stack traces at once
904
+ input_errors.append(msg)
905
+
906
+ if input_errors:
907
+ raise UserError(
908
+ UserErrorType.INVALID_INPUT,
909
+ "Cannot export model which references tensors that are neither "
910
+ "buffers/parameters/constants nor are direct inputs. For each tensor, if you'd "
911
+ "like this tensor to be an explicit input, add it as a dummy argument "
912
+ "to the top-level model definition you are exporting; if you would "
913
+ "like its value to be embedded as an exported constant, wrap its access "
914
+ "in a function marked with @assume_constant_result.\n\n"
915
+ + "\n\n".join(input_errors),
916
+ )
917
+
918
+
919
+ def rewrite_signature(
920
+ f_sig,
921
+ graph,
922
+ fake_mode,
923
+ flat_args,
924
+ in_spec,
925
+ example_fake_inputs,
926
+ graph_captured_input,
927
+ graph_captured_output,
928
+ dynamo_traced_result,
929
+ flat_args_dynamic_dims,
930
+ ):
931
+ orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec)
932
+
933
+ def check_user_input_output(flat_values, error_type):
934
+ supported_types = [
935
+ torch.Tensor,
936
+ torch.SymInt,
937
+ torch.SymFloat,
938
+ torch.SymBool,
939
+ torch._C.ScriptObject,
940
+ ] + list(common_constant_types)
941
+
942
+ def is_supported_type(val):
943
+ return isinstance(val, tuple(supported_types))
944
+
945
+ value_type = "input" if error_type == UserErrorType.INVALID_INPUT else "output"
946
+ # We only check that the outputs are not None. Inputs can be None.
947
+ for v in flat_values:
948
+ if not is_supported_type(v):
949
+ if error_type == UserErrorType.INVALID_INPUT and v is None:
950
+ continue
951
+
952
+ raise UserError(
953
+ error_type,
954
+ f"It looks like one of the {value_type}s with type `{type(v)}` "
955
+ "is not supported or pytree-flattenable. \n"
956
+ f"Exported graphs {value_type}s can only contain the "
957
+ f"following supported types: {supported_types}. \n"
958
+ "If you are using a custom class object, "
959
+ "please register a pytree_flatten/unflatten function "
960
+ "using `torch.utils._pytree.register_pytree_node` or "
961
+ "`torch.export.register_dataclass`.",
962
+ )
963
+
964
+ check_user_input_output(flat_args, UserErrorType.INVALID_INPUT)
965
+ flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result)
966
+ check_user_input_output(flat_results_traced, UserErrorType.INVALID_OUTPUT)
967
+
968
+ def produce_matching(debug_type, sources, candidates):
969
+ matched_elements_positions: List[Optional[int]] = []
970
+ dict_of_source_vals = {}
971
+ for i, val in enumerate(sources):
972
+ dict_of_source_vals[id(val)] = i
973
+
974
+ for i, val in enumerate(candidates):
975
+ if isinstance(val, tuple(common_constant_types)):
976
+ matched_elements_positions.append(None)
977
+ elif id(val) not in dict_of_source_vals:
978
+ raise AssertionError(
979
+ f"Unexpectedly found a {type(val)} in the {debug_type}.\n"
980
+ 'Please file an issue along with a paste of the logs from TORCH_LOGS="+export"'
981
+ )
982
+ else:
983
+ matched_elements_positions.append(dict_of_source_vals[id(val)])
984
+
985
+ return matched_elements_positions
986
+
987
+ matched_input_elements_positions = produce_matching(
988
+ "inputs", flat_args, graph_captured_input
989
+ )
990
+
991
+ assert graph_captured_output is not None
992
+ matched_output_elements_positions = produce_matching(
993
+ "outputs", list(graph_captured_output) + flat_args, flat_results_traced
994
+ )
995
+
996
+ new_graph = FlattenInputOutputSignature(
997
+ graph,
998
+ flat_args,
999
+ matched_input_elements_positions,
1000
+ flat_results_traced,
1001
+ matched_output_elements_positions,
1002
+ example_fake_inputs,
1003
+ flat_args_dynamic_dims,
1004
+ fake_mode,
1005
+ ).transform()
1006
+
1007
+ # Make dynamo graph to have same input/output spec as user code
1008
+ def argument_names(f_sig, args, kwargs) -> List[str]:
1009
+ def signature_to_fullargspec(sig: inspect.Signature):
1010
+ # Get a list of Parameter objects from the Signature object
1011
+ params = list(sig.parameters.values())
1012
+ # Separate positional arguments, keyword-only arguments and varargs/varkw
1013
+ args = [
1014
+ p.name
1015
+ for p in params
1016
+ if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
1017
+ ]
1018
+ kwonlyargs = [
1019
+ p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY
1020
+ ]
1021
+ varargs = next(
1022
+ (p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL),
1023
+ None,
1024
+ )
1025
+ varkw = next(
1026
+ (p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD),
1027
+ None,
1028
+ )
1029
+ # Get default values for positional arguments and keyword-only arguments
1030
+ defaults = tuple(
1031
+ p.default
1032
+ for p in params
1033
+ if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
1034
+ and p.default is not inspect.Parameter.empty
1035
+ )
1036
+ kwonlydefaults = {
1037
+ p.name: p.default
1038
+ for p in params
1039
+ if p.kind == inspect.Parameter.KEYWORD_ONLY
1040
+ and p.default is not inspect.Parameter.empty
1041
+ }
1042
+ # Get annotations for parameters and return value
1043
+ annotations = {}
1044
+ if sig.return_annotation:
1045
+ annotations = {"return": sig.return_annotation}
1046
+ for parameter in params:
1047
+ annotations[parameter.name] = parameter.annotation
1048
+ # Return a FullArgSpec object with the extracted attributes
1049
+ return inspect.FullArgSpec(
1050
+ args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations
1051
+ )
1052
+
1053
+ fullargspec = signature_to_fullargspec(f_sig)
1054
+
1055
+ # 1. Map `args` 1-to-1 to positional arguments in original signature.
1056
+ input_strs = fullargspec.args[: len(args)]
1057
+
1058
+ if len(args) > len(fullargspec.args):
1059
+ # 2. If there are more arguments left in `args`, they map to varargs in original
1060
+ # signature. Assign names as {varargs}_0, {varargs}_1, ...
1061
+ assert fullargspec.varargs is not None, "More arguments than expected"
1062
+ input_strs += [
1063
+ f"{fullargspec.varargs}_{i}"
1064
+ for i in range(0, len(args) - len(input_strs))
1065
+ ]
1066
+ elif len(args) < len(fullargspec.args):
1067
+ # 3. If there are fewer arguments in `args` than `fullargspec.args`,
1068
+ # it implies these are arguments either with default values, or provided in
1069
+ # `kwargs`. The former can be safely ignored. Because Dynamo.export does not
1070
+ # export them as part of the function signature. The latter will be handled
1071
+ # in the next step.
1072
+ for unprovided_arg in fullargspec.args[
1073
+ len(args) : -len(fullargspec.defaults or [])
1074
+ ]:
1075
+ assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}"
1076
+
1077
+ # 4. Keyword arguments provided in `kwargs`.
1078
+ input_strs += list(kwargs.keys())
1079
+
1080
+ # 5. Keyword-only arguments with default values if not provided are not exported
1081
+ # as part of the function signature.
1082
+ for kwonly_arg in fullargspec.kwonlyargs:
1083
+ kwonlydefaults = fullargspec.kwonlydefaults or {}
1084
+ assert (
1085
+ kwonly_arg in kwargs or kwonly_arg in kwonlydefaults
1086
+ ), f"Missing keyword only argument {kwonly_arg}"
1087
+
1088
+ return input_strs
1089
+
1090
+ new_graph.graph._codegen = _PyTreeCodeGen(
1091
+ _PyTreeInfo(
1092
+ argument_names(f_sig, orig_args, orig_kwargs),
1093
+ in_spec,
1094
+ out_spec_traced,
1095
+ )
1096
+ )
1097
+ new_graph.recompile()
1098
+ return new_graph
1099
+
1100
+
1101
+ def export(
1102
+ f: Callable[..., Any],
1103
+ *extra_args,
1104
+ aten_graph: bool = False,
1105
+ pre_dispatch: bool = False,
1106
+ decomposition_table: Optional[
1107
+ Dict[torch._ops.OpOverload, Callable[..., Any]]
1108
+ ] = None,
1109
+ tracing_mode: str = "symbolic",
1110
+ constraints: Optional[List[Constraint]] = None,
1111
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
1112
+ assume_static_by_default: bool = False,
1113
+ same_signature: bool = True,
1114
+ disable_constraint_solver: bool = False,
1115
+ _log_export_usage: bool = True,
1116
+ **extra_kwargs,
1117
+ ) -> Callable[..., ExportResult]:
1118
+ """
1119
+ Export an input function f to a format that can be executed outside of PyTorch using the FX graph.
1120
+
1121
+ Args:
1122
+ f (callable): A PyTorch function to be exported.
1123
+
1124
+ aten_graph (bool): If True, exports a graph with ATen operators.
1125
+ If False, exports a graph with Python operators. Default is False.
1126
+
1127
+ pre_dispatch (bool): If True, exports a graph with ATen operators,
1128
+ but before any logic in the PyTorch dispatcher has run.
1129
+ This can be useful if you want to apply further transformations on a graph before running it
1130
+ through autograd, autocast, or any other functionalities that are integrated into the dispatcher.
1131
+ This flag is only valid if aten_graph=True is set.
1132
+ Default is False.
1133
+
1134
+ decomposition_table (dict): A dictionary that maps operators to their decomposition functions.
1135
+ Required if aten_graph or tracing_mode is specified. Default is None.
1136
+
1137
+ tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic".
1138
+
1139
+ constraints: [DEPRECATED: use ``dynamic_shapes`` instead, see below]
1140
+ An optional list of constraints on the dynamic arguments
1141
+ that specify their possible range of shapes. By default, shapes of
1142
+ input torch.Tensors are assumed to be static. If an input torch.Tensor
1143
+ is expected to have dynamic shapes, please use :func:`dynamic_dim`
1144
+ to define :class:`Constraint` objects that specify the dynamics and the possible
1145
+ range of shapes. See :func:`dynamic_dim` docstring for examples on
1146
+ how to use it.
1147
+
1148
+ dynamic_shapes:
1149
+ An optional argument where the type should either be:
1150
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
1151
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
1152
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
1153
+ is defined in the original function signature.
1154
+
1155
+ The dynamic shape of a tensor argument can be specified as either
1156
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
1157
+ not required to include static dimension indices in this dict, but when they are,
1158
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
1159
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
1160
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
1161
+ recursively specified by using mappings or sequences of contained specifications.
1162
+
1163
+ same_signature (bool): If True, rewrite the returned graph's signature to be the same as f.
1164
+
1165
+ disable_constraint_solver (bool): Whether the dim constraint solver must be disabled.
1166
+
1167
+ Returns:
1168
+ A function that given args and kwargs, returns a tuple of (graph, guards)
1169
+ Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options.
1170
+ Guards: The guards we accumulated during tracing f above
1171
+
1172
+ Raises:
1173
+ AssertionError: If decomposition_table is specified without setting aten_graph=True,
1174
+ or if graph breaks during tracing in export.
1175
+
1176
+ AssertionError: If Dynamo input and output is not consistent with traced input/output.
1177
+
1178
+ Note - this headerdoc was authored by ChatGPT, with slight modifications by the author.
1179
+ """
1180
+ if _log_export_usage:
1181
+ log_export_usage(event="export.private_api", flags={"_dynamo"})
1182
+
1183
+ # Deal with "local variable referenced before assignment"
1184
+ _f = f
1185
+ _assume_static_by_default = assume_static_by_default
1186
+
1187
+ def inner(*args, **kwargs):
1188
+ nonlocal constraints
1189
+ if constraints is not None:
1190
+ if _log_export_usage:
1191
+ warnings.warn(
1192
+ "Using `constraints` to specify dynamic shapes for export is DEPRECATED "
1193
+ "and will not be supported in the future. "
1194
+ "Please use `dynamic_shapes` instead (see docs on `torch.export.export`).",
1195
+ DeprecationWarning,
1196
+ stacklevel=2,
1197
+ )
1198
+ else:
1199
+ constraints = _process_dynamic_shapes(_f, args, kwargs, dynamic_shapes)
1200
+ f = _f
1201
+ assume_static_by_default = _assume_static_by_default
1202
+ check_if_dynamo_supported()
1203
+ torch._C._log_api_usage_once("torch._dynamo.export")
1204
+ if decomposition_table is not None:
1205
+ assert (
1206
+ aten_graph
1207
+ ), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True"
1208
+ if pre_dispatch:
1209
+ assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True"
1210
+ f = innermost_fn(f)
1211
+ call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f
1212
+ original_signature = inspect.signature(call_to_inspect)
1213
+ graph = None
1214
+ out_guards = None
1215
+ graph_captured_input = None
1216
+ graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None
1217
+ fake_mode = None
1218
+
1219
+ def guard_export_print(guards: _guards.GuardsSet):
1220
+ nonlocal out_guards
1221
+ assert (
1222
+ out_guards is None
1223
+ ), "whole graph export entails exactly one guard export"
1224
+ out_guards = guards
1225
+
1226
+ example_inputs = []
1227
+
1228
+ def dynamo_normalization_capturing_compiler(
1229
+ gm: torch.fx.GraphModule, inner_example_inputs
1230
+ ):
1231
+ nonlocal graph
1232
+ assert (
1233
+ graph is None
1234
+ ), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph."
1235
+ graph = gm
1236
+
1237
+ nonlocal fake_mode, example_inputs
1238
+ # NB: do NOT pass inner_example_inputs here, we are detecting the
1239
+ # Dynamo allocated fake mode, which should be DISTINCT from a
1240
+ # potential outer ambient fake mode which the user provided.
1241
+ # example_inputs is always the user specified inputs, so they
1242
+ # would have the wrong fake mode attached to them
1243
+ fake_mode = _guards.detect_fake_mode()
1244
+ example_inputs = inner_example_inputs
1245
+
1246
+ def result_capturing_wrapper(*graph_inputs):
1247
+ nonlocal graph_captured_result
1248
+ nonlocal graph_captured_input
1249
+
1250
+ graph_captured_input = graph_inputs
1251
+ assert graph is not None
1252
+
1253
+ named_parameters = dict(graph.named_parameters(remove_duplicate=False))
1254
+ named_buffers = dict(graph.named_buffers(remove_duplicate=False))
1255
+
1256
+ ambient_fake_mode = (
1257
+ _guards.detect_fake_mode(graph_inputs)
1258
+ if _guards.detect_fake_mode(graph_inputs) is not None
1259
+ else fake_mode
1260
+ )
1261
+
1262
+ with ambient_fake_mode, enable_python_dispatcher():
1263
+ params_and_buffers = {
1264
+ **named_parameters,
1265
+ **named_buffers,
1266
+ }
1267
+ fake_params_buffers = dict()
1268
+
1269
+ for name, value in params_and_buffers.items():
1270
+ fake_params_buffers[name] = ambient_fake_mode.from_tensor(
1271
+ value, static_shapes=True
1272
+ )
1273
+
1274
+ fake_graph_inputs = pytree.tree_map(
1275
+ ambient_fake_mode.from_tensor, graph_inputs
1276
+ )
1277
+ graph_captured_result = torch.func.functional_call(
1278
+ graph, fake_params_buffers, fake_graph_inputs
1279
+ )
1280
+
1281
+ return graph_captured_result
1282
+
1283
+ return result_capturing_wrapper
1284
+
1285
+ # Note: This is needed by rewrite_signature. We need to put it before
1286
+ # optimize_assert since user program may mutate the inputs.
1287
+ flat_args, in_spec = pytree.tree_flatten((args, kwargs))
1288
+
1289
+ remove_from_cache(f)
1290
+ constraint_violation_error = None
1291
+ if tracing_mode != "symbolic":
1292
+ assume_static_by_default = True
1293
+ with config.patch(
1294
+ specialize_int=True,
1295
+ assume_static_by_default=assume_static_by_default,
1296
+ automatic_dynamic_shapes=False,
1297
+ capture_dynamic_output_shape_ops=True,
1298
+ capture_scalar_outputs=True,
1299
+ ):
1300
+ opt_f = optimize_assert(
1301
+ dynamo_normalization_capturing_compiler,
1302
+ hooks=Hooks(
1303
+ guard_export_fn=guard_export_print,
1304
+ guard_fail_fn=None,
1305
+ ),
1306
+ export=True,
1307
+ export_constraints=constraints,
1308
+ )(f)
1309
+ # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
1310
+ try:
1311
+ result_traced = opt_f(*args, **kwargs)
1312
+ except ConstraintViolationError as e:
1313
+ constraint_violation_error = e
1314
+ remove_from_cache(f)
1315
+
1316
+ if (
1317
+ not disable_constraint_solver
1318
+ and (shape_env := getattr(fake_mode, "shape_env", None)) is not None
1319
+ and (dim_constraints := shape_env.dim_constraints) is not None
1320
+ and not isinstance(
1321
+ call_to_inspect, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)
1322
+ )
1323
+ and not trace_rules.check(call_to_inspect)
1324
+ ):
1325
+ dim_constraints.solve()
1326
+ dim_constraints.remove_redundant_dynamic_results()
1327
+ forced_specializations = dim_constraints.forced_specializations()
1328
+ msg = dim_constraints.prettify_results(
1329
+ original_signature, constraint_violation_error, forced_specializations
1330
+ )
1331
+ if constraint_violation_error:
1332
+ constraint_violation_error.args = (
1333
+ constraint_violation_error.args[0] + msg,
1334
+ )
1335
+ else:
1336
+ if forced_specializations:
1337
+ constraint_violation_error = ConstraintViolationError(msg)
1338
+ else:
1339
+ log.info(
1340
+ "Summary of dimension constraints:%s",
1341
+ msg,
1342
+ )
1343
+
1344
+ # Error if we have any constraints on static values
1345
+ for k in shape_env.var_to_range.keys():
1346
+ if isinstance(k, sympy.Integer):
1347
+ constraint_violation_error = ConstraintViolationError(
1348
+ f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n"
1349
+ "It appears that you're trying to set a constraint on a "
1350
+ f"value which we evaluated to have a static value of {k}. "
1351
+ 'Set TORCH_LOGS="+export" for more information.'
1352
+ )
1353
+ if constraint_violation_error:
1354
+ raise constraint_violation_error
1355
+
1356
+ assert (
1357
+ graph is not None
1358
+ ), "Failed to produce a graph during tracing as no tensor operations were found."
1359
+ assert hasattr(graph, "_source_to_user_stacks")
1360
+ assert out_guards is not None, "Failed to produce guards during tracing"
1361
+ assert fake_mode is not None
1362
+
1363
+ log.info(
1364
+ "Dynamo captured graph:\n\n%s", graph.print_readable(print_output=False)
1365
+ )
1366
+
1367
+ # This check need to happened before aten_graph
1368
+ # because placeholder's _source_node attribute is not preserved by make_fx
1369
+ if same_signature:
1370
+ check_signature_rewritable(graph)
1371
+
1372
+ # NB: This is mostly hitting the cache; Dynamo already converted these
1373
+ example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs]
1374
+
1375
+ if aten_graph:
1376
+ # Running graph with interpreter is needed for propagating the stack_trace
1377
+ def graph_with_interpreter(*args):
1378
+ with torch.fx.traceback.preserve_node_meta():
1379
+ return torch.fx.Interpreter(graph).run(*args)
1380
+
1381
+ with maybe_disable_fake_tensor_mode(), enable_python_dispatcher(), (
1382
+ fake_mode
1383
+ ):
1384
+ try:
1385
+ graph = make_fx(
1386
+ graph_with_interpreter,
1387
+ decomposition_table=decomposition_table,
1388
+ tracing_mode="real",
1389
+ _allow_non_fake_inputs=True,
1390
+ pre_dispatch=pre_dispatch,
1391
+ _allow_fake_constant=False,
1392
+ )(*example_fake_inputs)
1393
+ except CondOpArgsMismatchError as e:
1394
+ # Wrap the internal error to the user-facing error
1395
+ raise UserError( # noqa: TRY200
1396
+ UserErrorType.DYNAMIC_CONTROL_FLOW,
1397
+ str(e),
1398
+ case_name="cond_operands",
1399
+ )
1400
+
1401
+ assert graph is not None
1402
+ for node in graph.graph.nodes:
1403
+ if node.op == "get_attr" and isinstance(
1404
+ getattr(graph, node.target), torch.Tensor
1405
+ ):
1406
+ node.meta["val"] = fake_mode.from_tensor(
1407
+ getattr(graph, node.target), static_shapes=True
1408
+ )
1409
+
1410
+ if same_signature:
1411
+ flat_args_dynamic_dims = [
1412
+ {c.dim for c in (constraints or ()) if c.w_tensor() is x}
1413
+ for x in flat_args
1414
+ ]
1415
+ graph = rewrite_signature(
1416
+ original_signature,
1417
+ graph,
1418
+ fake_mode,
1419
+ flat_args,
1420
+ in_spec,
1421
+ example_fake_inputs,
1422
+ graph_captured_input,
1423
+ graph_captured_result,
1424
+ result_traced, # type: ignore[possibly-undefined]
1425
+ flat_args_dynamic_dims,
1426
+ )
1427
+ # Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
1428
+ assert graph is not None
1429
+ graph.meta["input_shape_constraints"] = (
1430
+ [constraint.serializable_spec for constraint in constraints]
1431
+ if constraints
1432
+ else []
1433
+ )
1434
+
1435
+ return ExportResult(graph, out_guards)
1436
+
1437
+ if extra_args or extra_kwargs:
1438
+ warnings.warn(
1439
+ "export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
1440
+ "If you don't migrate, we may break your export call in the future if your user defined kwargs "
1441
+ "conflict with future kwargs added to export(f)."
1442
+ )
1443
+ return inner(*extra_args, **extra_kwargs)
1444
+ else:
1445
+ return inner
1446
+
1447
+
1448
+ def optimize_assert(
1449
+ backend,
1450
+ *,
1451
+ hooks=Hooks(None, None),
1452
+ export=False,
1453
+ export_constraints=None,
1454
+ dynamic=None,
1455
+ ):
1456
+ """
1457
+ The same as `torch._dynamo.optimize(backend, nopython=True)`
1458
+ """
1459
+ backend = get_compiler_fn(backend)
1460
+
1461
+ # Find if backend has any extra context manager
1462
+ backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
1463
+
1464
+ return _optimize_catch_errors(
1465
+ convert_frame.convert_frame_assert(
1466
+ backend, export=export, export_constraints=export_constraints
1467
+ ),
1468
+ hooks,
1469
+ backend_ctx_ctor,
1470
+ export=export,
1471
+ dynamic=dynamic,
1472
+ )
1473
+
1474
+
1475
+ class TorchPatcher:
1476
+ @staticmethod
1477
+ @functools.lru_cache(None)
1478
+ def patch():
1479
+ # A better way to disable the following would be decorate the source
1480
+ # functions with @torch._disable_dynamo. However, this causes issues
1481
+ # with torch.deploy internally.
1482
+ from .decorators import disable
1483
+
1484
+ torch.jit.trace = disable(torch.jit.trace)
1485
+ torch.jit.trace_module = disable(torch.jit.trace_module)
1486
+ torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph)
1487
+ torch.fx._symbolic_trace.Tracer.trace = disable(
1488
+ torch.fx._symbolic_trace.Tracer.trace
1489
+ )
1490
+ torch.distributions.Distribution.set_default_validate_args(False)
1491
+
1492
+ from ..optim import (
1493
+ adadelta,
1494
+ adagrad,
1495
+ adam,
1496
+ adamax,
1497
+ adamw,
1498
+ asgd,
1499
+ lbfgs,
1500
+ nadam,
1501
+ radam,
1502
+ rmsprop,
1503
+ rprop,
1504
+ sgd,
1505
+ sparse_adam,
1506
+ )
1507
+
1508
+ optimizer_modules = {
1509
+ adadelta,
1510
+ adagrad,
1511
+ adam,
1512
+ adamax,
1513
+ adamw,
1514
+ asgd,
1515
+ lbfgs,
1516
+ nadam,
1517
+ radam,
1518
+ rmsprop,
1519
+ rprop,
1520
+ sgd,
1521
+ sparse_adam,
1522
+ }
1523
+
1524
+ for opt_mod in optimizer_modules:
1525
+ opt_name = opt_mod.__name__.split(".")[-1]
1526
+ fused_fn_name = f"_fused_{opt_name}"
1527
+ single_tensor_fn_name = f"_single_tensor_{opt_name}"
1528
+
1529
+ if hasattr(opt_mod, fused_fn_name):
1530
+ setattr(
1531
+ opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name))
1532
+ )
1533
+
1534
+ optimizer_classes = [
1535
+ opt
1536
+ for opt in torch.optim.__dict__.values()
1537
+ if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer)
1538
+ ]
1539
+
1540
+ # Note: we don't support sparsity or tracing through backwards
1541
+ excluded_optimizer_classes = {
1542
+ torch.optim.SparseAdam,
1543
+ torch.optim.LBFGS,
1544
+ }
1545
+
1546
+ for opt in optimizer_classes:
1547
+ if opt in excluded_optimizer_classes:
1548
+ opt.step = disable(opt.step)
1549
+
1550
+ if hasattr(opt, "_init_group"):
1551
+ opt._init_group = disable(opt._init_group)
1552
+
1553
+ @staticmethod
1554
+ def suppress_torch_distributed_warnings(fn):
1555
+ def inner_fn(*args, **kwargs):
1556
+ warnings.filterwarnings(
1557
+ "ignore", category=UserWarning, module="torch.distributed"
1558
+ )
1559
+ return fn(*args, **kwargs)
1560
+
1561
+ return inner_fn
venv/lib/python3.10/site-packages/torch/_dynamo/exc.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import textwrap
3
+ from enum import auto, Enum
4
+ from traceback import extract_stack, format_exc, format_list, StackSummary
5
+ from typing import cast, NoReturn, Optional
6
+
7
+ import torch._guards
8
+
9
+ from . import config
10
+
11
+ from .utils import counters
12
+
13
+
14
+ def exportdb_error_message(case_name):
15
+ return (
16
+ "For more information about this error, see: "
17
+ + "https://pytorch.org/docs/main/generated/exportdb/index.html#"
18
+ + case_name.replace("_", "-")
19
+ )
20
+
21
+
22
+ import logging
23
+
24
+ log = logging.getLogger(__name__)
25
+ graph_breaks_log = torch._logging.getArtifactLogger(__name__, "graph_breaks")
26
+
27
+
28
+ class TorchDynamoException(RuntimeError):
29
+ pass
30
+
31
+
32
+ class InternalTorchDynamoError(TorchDynamoException):
33
+ pass
34
+
35
+
36
+ class RestartAnalysis(TorchDynamoException):
37
+ pass
38
+
39
+
40
+ class SpeculationRestartAnalysis(RestartAnalysis):
41
+ pass
42
+
43
+
44
+ class UnspecializeRestartAnalysis(RestartAnalysis):
45
+ pass
46
+
47
+
48
+ class SkipFrame(TorchDynamoException):
49
+ pass
50
+
51
+
52
+ class TorchRuntimeError(TorchDynamoException):
53
+ pass
54
+
55
+
56
+ class InvalidBackend(TorchDynamoException):
57
+ def __init__(self, name):
58
+ super().__init__(
59
+ f"Invalid backend: {name!r}, see `torch._dynamo.list_backends()` for available backends."
60
+ )
61
+
62
+
63
+ class ResetRequired(TorchDynamoException):
64
+ def __init__(self):
65
+ super().__init__(
66
+ textwrap.dedent(
67
+ """
68
+ Must call `torch._dynamo.reset()` before changing backends. Detected two calls to
69
+ `torch.compile()` with a different backend compiler arguments.
70
+ """
71
+ )
72
+ )
73
+
74
+
75
+ class BackendCompilerFailed(TorchDynamoException):
76
+ def __init__(self, backend_fn, inner_exception):
77
+ self.backend_name = getattr(backend_fn, "__name__", "?")
78
+ self.inner_exception = inner_exception
79
+ msg = f"backend={self.backend_name!r} raised:\n{type(inner_exception).__name__}: {inner_exception}"
80
+ super().__init__(msg)
81
+
82
+
83
+ class Unsupported(TorchDynamoException):
84
+ def __init__(self, msg):
85
+ super().__init__(msg)
86
+ self.real_stack = torch._guards.TracingContext.extract_stack()
87
+ self.msg = msg
88
+ self.category: Optional[str] = None
89
+ self.add_to_stats()
90
+
91
+ def remove_from_stats(self):
92
+ assert self.category is not None
93
+ counters[self.category][self.msg] -= 1
94
+ if counters[self.category][self.msg] <= 0:
95
+ del counters[self.category][self.msg]
96
+
97
+ def add_to_stats(self, category="unimplemented"):
98
+ self.category = category
99
+ counters[category][self.msg] += 1
100
+
101
+
102
+ class RecompileError(TorchDynamoException):
103
+ pass
104
+
105
+
106
+ class ArgsMismatchError(Unsupported):
107
+ def __init__(self, msg):
108
+ super().__init__(msg)
109
+
110
+
111
+ class AttributeMutationError(Unsupported):
112
+ def __init__(self, msg):
113
+ super().__init__(msg)
114
+
115
+
116
+ class CondOpArgsMismatchError(ArgsMismatchError):
117
+ """
118
+ Internal error from cond() due to arguments mismatch.
119
+ """
120
+
121
+ def __init__(self, msg):
122
+ super().__init__(msg)
123
+
124
+
125
+ class UserErrorType(Enum):
126
+ DYNAMIC_CONTROL_FLOW = auto()
127
+ ANTI_PATTERN = auto()
128
+ STANDARD_LIBRARY = auto()
129
+ CONSTRAINT_VIOLATION = auto()
130
+ DYNAMIC_DIM = auto()
131
+ INVALID_INPUT = auto()
132
+ INVALID_OUTPUT = auto()
133
+
134
+
135
+ class UserError(Unsupported):
136
+ def __init__(self, error_type: UserErrorType, msg, case_name=None):
137
+ """
138
+ Type of errors that would be valid in Eager, but not supported in TorchDynamo.
139
+ The error message should tell user about next actions.
140
+
141
+ error_type: Type of user error
142
+ msg: Actionable error message
143
+ case_name: (Optional) Unique name (snake case) for the usage example in exportdb.
144
+ """
145
+ if case_name is not None:
146
+ assert isinstance(case_name, str)
147
+ if msg.endswith("."):
148
+ msg += " "
149
+ else:
150
+ msg += "\n"
151
+ msg += exportdb_error_message(case_name)
152
+ super().__init__(msg)
153
+ self.error_type = error_type
154
+ self.message = msg
155
+
156
+
157
+ class UncapturedHigherOrderOpError(TorchDynamoException):
158
+ pass
159
+
160
+
161
+ class IncorrectUsage(Exception):
162
+ pass
163
+
164
+
165
+ # These exceptions are ok to fallback to eager/graph_break.
166
+ exceptions_allowed_to_be_fallback = (
167
+ torch._subclasses.fake_tensor.DataDependentOutputException,
168
+ torch._subclasses.fake_tensor.DynamicOutputShapeException,
169
+ torch._subclasses.fake_tensor.UnsupportedOperatorException,
170
+ torch._subclasses.fake_tensor.UnsupportedFakeTensorException,
171
+ )
172
+
173
+
174
+ def unimplemented_with_warning(e: Exception, code, msg: str) -> NoReturn:
175
+ # This function calls unimplemented internally and eventually graph breaks
176
+ # or falls to eager. unimplemented itself does not print any user warnings,
177
+ # i.e., its very silent. This helper function is intended when an error is
178
+ # encountered in the torch.compile stack which is worth showing as warning
179
+ # to the user. For example, if AOT Autograd backend fails with a fake tensor
180
+ # exception, its ok to fallback to eager but not silently. Here, we can use
181
+ # this function to log the message and the stack trace.
182
+ graph_break_msg = format_error_msg_verbose(e, code)
183
+ graph_breaks_log.debug("%s", graph_break_msg)
184
+ log.warning(msg)
185
+ raise unimplemented(msg) from e
186
+
187
+
188
+ def unimplemented(msg: str) -> NoReturn:
189
+ assert msg != os.environ.get("BREAK", False)
190
+ raise Unsupported(msg)
191
+
192
+
193
+ def warning(msg: str) -> None:
194
+ counters["warnings"][msg] += 1
195
+ assert msg != os.environ.get("BREAK", False)
196
+
197
+
198
+ # KeyError has special handling for its args
199
+ # see https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L2534 for details
200
+ class KeyErrorMsg:
201
+ def __init__(self, value):
202
+ self.value = value
203
+
204
+ def __str__(self):
205
+ return str(self.value)
206
+
207
+ def __repr__(self) -> str:
208
+ return self.__str__()
209
+
210
+
211
+ def augment_exc_message(exc: Exception, msg: str = "\n", export: bool = False) -> None:
212
+ import traceback
213
+
214
+ exc.innermost_user_frame_summary = None # type: ignore[attr-defined]
215
+
216
+ real_stack = get_real_stack(exc)
217
+ if real_stack is not None and len(real_stack) > 0:
218
+ exc.innermost_user_frame_summary = real_stack[-1] # type: ignore[attr-defined]
219
+ msg += f"\nfrom user code:\n {''.join(traceback.format_list(real_stack))}"
220
+
221
+ if config.replay_record_enabled and hasattr(exc, "record_filename"):
222
+ msg += f"\nLast frame execution written to {exc.record_filename}. To run only this frame while debugging, run\
223
+ torch._dynamo.replay('{exc.record_filename}').\n"
224
+
225
+ if not config.verbose and hasattr(exc, "real_stack"):
226
+ msg += '\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n'
227
+
228
+ if hasattr(exc, "inner_exception") and hasattr(
229
+ exc.inner_exception, "minifier_path"
230
+ ):
231
+ if hasattr(exc.inner_exception, "buck_command"):
232
+ msg += (
233
+ f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run "
234
+ f"this buck command to find the smallest traced graph "
235
+ f"which reproduces this error: {exc.inner_exception.buck_command}\n"
236
+ )
237
+ else:
238
+ msg += (
239
+ f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run "
240
+ "this script to find the smallest traced graph which reproduces this error.\n"
241
+ )
242
+
243
+ if not config.suppress_errors and not export:
244
+ msg += (
245
+ "\n\n"
246
+ "You can suppress this exception and fall back to eager by setting:\n"
247
+ " import torch._dynamo\n"
248
+ " torch._dynamo.config.suppress_errors = True\n"
249
+ )
250
+
251
+ old_msg = "" if len(exc.args) == 0 else str(exc.args[0])
252
+
253
+ if isinstance(exc, KeyError):
254
+ exc.args = (KeyErrorMsg(old_msg + msg),) + exc.args[1:]
255
+ else:
256
+ new_msg = old_msg + msg
257
+ exc.args = (new_msg,) + exc.args[1:]
258
+
259
+
260
+ def get_real_stack(exc: Exception, frame=None) -> Optional[StackSummary]:
261
+ real_stack = getattr(exc, "real_stack", None)
262
+ if real_stack is None:
263
+ return None
264
+
265
+ # NB: it's possible for real_stack to be []; we still attempt to
266
+ # report a stack anyway because the stack_above_dynamo may still
267
+ # be useful for debugging
268
+
269
+ stack_above_dynamo = []
270
+ if frame is not None:
271
+ # NB: frame is PyInterpreterFrame on Python 3.11 and later,
272
+ # not a TRUE frame object. You can't actually feed it
273
+ # to traceback because it doesn't have enough information.
274
+ # To solve this problem, we technically should just materialize
275
+ # the frame, the same way _PyFrame_GetFrameObject would do
276
+ # (but we cannot actually do this, because this populates
277
+ # frame_obj field, which default eval frame doesn't like).
278
+ #
279
+ # Fortunately, in this case, we can hack it: there's no need
280
+ # to actually use the truly top frame, we can just extract
281
+ # from where we are right now and rely on filter_stack to
282
+ # get rid of all the dynamo frames. For ease of testing
283
+ # we apply this behavior to ALL Python versions
284
+ stack_above_dynamo = filter_stack(extract_stack())
285
+
286
+ return cast(StackSummary, stack_above_dynamo + real_stack)
287
+
288
+
289
+ # filter out all frames after entering dynamo
290
+ def filter_stack(stack):
291
+ user_stack = []
292
+ for frame in stack:
293
+ if "convert_frame" in frame.filename:
294
+ break
295
+ if "eval_frame" in frame.filename or "torch._dynamo.optimize(" in frame.line:
296
+ continue
297
+ user_stack.append(frame)
298
+
299
+ return user_stack
300
+
301
+
302
+ def format_error_msg_verbose(
303
+ exc: Exception, code, record_filename=None, frame=None
304
+ ) -> str:
305
+ msg = (
306
+ f"WON'T CONVERT {code.co_name} {code.co_filename} line {code.co_firstlineno}\n"
307
+ )
308
+ msg += "=" * 10 + " TorchDynamo Stack Trace " + "=" * 10 + "\n"
309
+ msg += format_exc()
310
+ real_stack = get_real_stack(exc, frame)
311
+ if real_stack is not None:
312
+ msg += (
313
+ "\n"
314
+ + "=" * 10
315
+ + " The above exception occurred while processing the following code "
316
+ + "=" * 10
317
+ + "\n\n"
318
+ )
319
+ msg += "".join(format_list(real_stack))
320
+ msg += "\n"
321
+ msg += "=" * 10
322
+
323
+ return msg
324
+
325
+
326
+ def format_error_msg(exc: Exception, code, record_filename=None, frame=None) -> str:
327
+ msg = os.linesep * 2
328
+
329
+ if config.verbose:
330
+ msg = format_error_msg_verbose(exc, code, record_filename, frame)
331
+ else:
332
+ msg = f"WON'T CONVERT {code.co_name} {code.co_filename}\
333
+ line {code.co_firstlineno} \ndue to: \n{format_exc()}"
334
+
335
+ return msg
venv/lib/python3.10/site-packages/torch/_dynamo/external_utils.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module contains functions that *will be allowed* by dynamo
2
+
3
+ import functools
4
+
5
+ import torch
6
+ import torch.utils._pytree as pytree
7
+
8
+ try:
9
+ import numpy as np
10
+ except ModuleNotFoundError:
11
+ np = None # type: ignore[assignment]
12
+
13
+
14
+ def is_compiling() -> bool:
15
+ """
16
+ Indicates whether we are tracing/compiling with torch.compile() or torch.export().
17
+
18
+ If need to check specifically that TorchDynamo is used, then use
19
+ torch.compiler.is_dynamo_compiling().
20
+
21
+ TODO(khabinov): we should deprecate this function and use one of these two:
22
+ * torch.compiler.is_compiling(),
23
+ * torch.compiler.is_dynamo_compiling().
24
+ It will depend on the context where to use what.
25
+ """
26
+ return torch.compiler.is_compiling()
27
+
28
+
29
+ def wrap_inline(fn):
30
+ """
31
+ Create an extra frame around fn that is not in skipfiles
32
+ """
33
+
34
+ @functools.wraps(fn)
35
+ def inner(*args, **kwargs):
36
+ return fn(*args, **kwargs)
37
+
38
+ return inner
39
+
40
+
41
+ def call_hook(hook, *args):
42
+ """
43
+ Used by compiled autograd to handle hook returning None
44
+ """
45
+ result = hook(*args)
46
+ if result is None:
47
+ return args[0]
48
+ return result
49
+
50
+
51
+ def wrap_numpy(f):
52
+ r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function
53
+ from ``torch.Tensor``s to ``torch.Tensor``s.
54
+ """
55
+ if not np:
56
+ return f
57
+
58
+ @functools.wraps(f)
59
+ def wrap(*args, **kwargs):
60
+ args, kwargs = pytree.tree_map_only(
61
+ torch.Tensor, lambda x: x.numpy(), (args, kwargs)
62
+ )
63
+ out = f(*args, **kwargs)
64
+ return pytree.tree_map_only(np.ndarray, lambda x: torch.as_tensor(x), out)
65
+
66
+ return wrap
67
+
68
+
69
+ class FakeContext:
70
+ def __init__(self, saved_tensors):
71
+ # this will cache the results of saved_tensors
72
+ # and will no longer call into c++ binding
73
+ self.saved_tensors = saved_tensors
74
+
75
+
76
+ def call_backward(backward_fn, saved_tensors, *args):
77
+ grads = backward_fn(FakeContext(saved_tensors), *args)
78
+
79
+ # in eager, we wrap in a tuple when there's only one grad output
80
+ if type(grads) is not tuple:
81
+ grads = (grads,)
82
+
83
+ return grads
84
+
85
+
86
+ def untyped_storage_size(x: torch.Tensor):
87
+ return x.untyped_storage().size()
88
+
89
+
90
+ def call_hook_from_backward_state(*args, bw_state, hook_name: str, **kwargs):
91
+ return getattr(bw_state, hook_name)(*args, **kwargs)
92
+
93
+
94
+ def call_module_hooks_from_backward_state(
95
+ _, result, *args, bw_state, hooks_name: str, module_name: str
96
+ ):
97
+ module = getattr(bw_state, module_name)
98
+ hooks = getattr(bw_state, hooks_name)
99
+ for hook in hooks:
100
+ new_result = hook(module, result, *args)
101
+ if new_result is not None:
102
+ result = new_result
103
+ return result
venv/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tokenize
2
+
3
+ from typing import Dict, List, Optional
4
+
5
+ cache: Dict[str, Dict[int, str]] = {}
6
+
7
+
8
+ def clearcache() -> None:
9
+ cache.clear()
10
+
11
+
12
+ def _add_file(filename: str) -> None:
13
+ try:
14
+ with open(filename) as f:
15
+ tokens = list(tokenize.generate_tokens(f.readline))
16
+ except OSError:
17
+ cache[filename] = {}
18
+ return
19
+
20
+ # NOTE: undefined behavior if file is not valid Python source,
21
+ # since tokenize will have undefined behavior.
22
+ result: Dict[int, str] = {}
23
+ # current full funcname, e.g. xxx.yyy.zzz
24
+ cur_name = ""
25
+ cur_indent = 0
26
+ significant_indents: List[int] = []
27
+
28
+ for i, token in enumerate(tokens):
29
+ if token.type == tokenize.INDENT:
30
+ cur_indent += 1
31
+ elif token.type == tokenize.DEDENT:
32
+ cur_indent -= 1
33
+ # possible end of function or class
34
+ if significant_indents and cur_indent == significant_indents[-1]:
35
+ significant_indents.pop()
36
+ # pop the last name
37
+ cur_name = cur_name.rpartition(".")[0]
38
+ elif (
39
+ token.type == tokenize.NAME
40
+ and i + 1 < len(tokens)
41
+ and tokens[i + 1].type == tokenize.NAME
42
+ and (token.string == "class" or token.string == "def")
43
+ ):
44
+ # name of class/function always follows class/def token
45
+ significant_indents.append(cur_indent)
46
+ if cur_name:
47
+ cur_name += "."
48
+ cur_name += tokens[i + 1].string
49
+ result[token.start[0]] = cur_name
50
+
51
+ cache[filename] = result
52
+
53
+
54
+ def get_funcname(filename: str, lineno: int) -> Optional[str]:
55
+ if filename not in cache:
56
+ _add_file(filename)
57
+ return cache[filename].get(lineno, None)