applied-ai-018 commited on
Commit
eb19839
·
verified ·
1 Parent(s): a618689

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/1.word_embeddings.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg.pt +3 -0
  9. ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  10. ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  11. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/profiler.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/tensor_version_op.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py +0 -0
  25. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/_dynamo/backends/common.py +112 -0
  29. venv/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py +239 -0
  30. venv/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py +289 -0
  31. venv/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py +612 -0
  32. venv/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py +16 -0
  33. venv/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py +37 -0
  34. venv/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py +115 -0
  35. venv/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py +14 -0
  36. venv/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py +75 -0
  37. venv/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py +172 -0
  38. venv/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py +0 -0
  39. venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py +932 -0
  43. venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py +566 -0
  44. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py +151 -0
  45. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/1.word_embeddings.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69bc159dc60ca0fa15d189e9ea5a5afe2fdb1fd1843f43d423706e3ef48e0582
3
+ size 415237419
ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17eaa0cccd5fef6a55674225f15d60f3ae6ea4c4a93b3558422135b52d01ee46
3
+ size 9372
ckpts/universal/global_step120/zero/10.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1dc0c4eb90385736831009786614e41bbc14440018e0ca45547e827d894162a
3
+ size 9387
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc37920f0413b3804caac3df9bab38fd1ba77bb527e0a092ca06fa4c268c1dd9
3
+ size 33555612
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bed44ee6d530f1d12e0af63406c2de39ddcd545f5234db79633ad832537d5fae
3
+ size 33555627
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a2890cb784a3591974935bb944c6b8f4ad401da0bc8fce465a4714a8c04f911
3
+ size 33555533
ckpts/universal/global_step120/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7532df6a0350673df2366907c0091363072a8a4b9f12bbf9a9793c7c8e5f64e
3
+ size 9387
ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbc8c554279d3831a4aaba7844cb92c0de84e792dc6406c762163ca7974c1a59
3
+ size 9372
ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d48d36ff4c24763c96242ad3ff413431a60d7ccb26095c441b892dfffdcc692
3
+ size 33555612
ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ee432c24153973ef44a9001e0b6398de0380a298b6676ed86d7897b1f2c23d
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc ADDED
Binary file (7.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (5.18 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/tensor_version_op.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc ADDED
Binary file (7.17 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc ADDED
Binary file (97.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/common.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import functools
5
+ import logging
6
+ from unittest.mock import patch
7
+
8
+ import torch
9
+ from torch._dynamo import disable
10
+ from torch._dynamo.utils import counters, defake
11
+ from torch._functorch.aot_autograd import aot_module_simplified
12
+ from torch.utils._python_dispatch import _disable_current_modes
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ def aot_autograd(**kwargs):
18
+ def compiler_fn(gm: torch.fx.GraphModule, example_inputs):
19
+ # Hack to get around circular import problems with aot_eager_decomp_partition
20
+ if callable(kwargs.get("decompositions")):
21
+ kwargs["decompositions"] = kwargs["decompositions"]()
22
+
23
+ # NB: dont delete counter increment
24
+ counters["aot_autograd"]["total"] += 1
25
+ use_fallback = False
26
+
27
+ if use_fallback:
28
+ log.debug("Unable to use AOT Autograd because graph has mutation")
29
+ counters["aot_autograd"]["not_ok"] += 1
30
+ return gm
31
+
32
+ # OK attempt to compile
33
+
34
+ def _wrapped_bw_compiler(*args, **kwargs):
35
+ # stop TorchDynamo from trying to compile our generated backwards pass
36
+ return disable(disable(bw_compiler)(*args, **kwargs))
37
+
38
+ bw_compiler = kwargs.get("bw_compiler") or kwargs["fw_compiler"]
39
+ kwargs["bw_compiler"] = _wrapped_bw_compiler
40
+ kwargs["inference_compiler"] = (
41
+ kwargs.get("inference_compiler") or kwargs["fw_compiler"]
42
+ )
43
+
44
+ from functorch.compile import nop
45
+
46
+ from torch._inductor.debug import enable_aot_logging
47
+
48
+ # debug asserts slow down compile time noticeably,
49
+ # So only default them on when the aot_eager backend is used.
50
+ if kwargs.get("fw_compiler", None) == nop:
51
+ patch_config = patch("functorch.compile.config.debug_assert", True)
52
+ else:
53
+ patch_config = contextlib.nullcontext()
54
+
55
+ try:
56
+ # NB: NOT cloned!
57
+ with enable_aot_logging(), patch_config:
58
+ cg = aot_module_simplified(gm, example_inputs, **kwargs)
59
+ counters["aot_autograd"]["ok"] += 1
60
+ return disable(cg)
61
+ except Exception:
62
+ counters["aot_autograd"]["not_ok"] += 1
63
+ raise
64
+
65
+ return compiler_fn
66
+
67
+
68
+ def mem_efficient_fusion_kwargs(use_decomps):
69
+ from functorch.compile import (
70
+ default_decompositions,
71
+ min_cut_rematerialization_partition,
72
+ ts_compile,
73
+ )
74
+
75
+ kwargs = {
76
+ # these are taken from memory_efficient_fusion()
77
+ "fw_compiler": ts_compile,
78
+ "bw_compiler": ts_compile,
79
+ "partition_fn": min_cut_rematerialization_partition,
80
+ }
81
+
82
+ if use_decomps:
83
+ kwargs["decompositions"] = default_decompositions
84
+
85
+ return kwargs
86
+
87
+
88
+ def fake_tensor_unsupported(fn):
89
+ """
90
+ Decorator for backends that need real inputs. We swap out fake
91
+ tensors for zero tensors.
92
+ """
93
+
94
+ @functools.wraps(fn)
95
+ def wrapper(model, inputs, **kwargs):
96
+ with _disable_current_modes():
97
+ inputs = list(map(defake, inputs))
98
+ return fn(model, inputs, **kwargs)
99
+
100
+ return wrapper
101
+
102
+
103
+ def device_from_inputs(example_inputs) -> torch.device:
104
+ for x in example_inputs:
105
+ if hasattr(x, "device"):
106
+ return x.device
107
+
108
+
109
+ def dtype_from_inputs(example_inputs) -> torch.dtype:
110
+ for x in example_inputs:
111
+ if hasattr(x, "dtype"):
112
+ return x.dtype
venv/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import operator
5
+ from collections import defaultdict
6
+ from typing import Dict, List, Optional
7
+
8
+ import torch
9
+ from torch._dynamo.backends.debugging import boxed_nop
10
+ from torch._inductor.cudagraph_trees import cudagraphify_impl
11
+ from torch._inductor.cudagraph_utils import (
12
+ BoxedDeviceIndex,
13
+ check_multiple_devices_or_any_cpu_nodes,
14
+ get_mutation_stack_trace,
15
+ )
16
+ from torch._inductor.utils import (
17
+ BoxedBool,
18
+ count_tangents,
19
+ has_incompatible_cudagraph_ops,
20
+ num_fw_fixed_arguments,
21
+ output_node,
22
+ )
23
+ from torch.multiprocessing.reductions import StorageWeakRef
24
+ from .common import aot_autograd
25
+ from .registry import register_backend
26
+
27
+ perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
28
+
29
+
30
+ def find_input_mutations(g):
31
+ def meta_fk(meta):
32
+ return meta["val"] if "val" in meta else meta["fake_result"]
33
+
34
+ inputs = defaultdict(set)
35
+ input_idx = 0
36
+ mutated_inputs = set()
37
+ for n in g.nodes:
38
+ if n.op == "placeholder":
39
+ if isinstance(meta_fk(n.meta), torch.Tensor):
40
+ inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx)
41
+ input_idx += 1
42
+ elif n.op == "call_function":
43
+ if n.target is operator.getitem:
44
+ continue
45
+ schema = n.target._schema
46
+ for i, arg in enumerate(schema.arguments):
47
+ if i < len(n.args):
48
+ argument = n.args[i]
49
+ else:
50
+ if arg.name not in n.kwargs:
51
+ continue
52
+ argument = n.kwargs[arg.name]
53
+ mut_arg = False
54
+ if arg.alias_info:
55
+ if arg.alias_info.is_write:
56
+ mut_arg = True
57
+ if mut_arg:
58
+ # TODO: not correct for args that contain tensors in a struct
59
+ # like list
60
+ mutated_inputs |= inputs[
61
+ StorageWeakRef(meta_fk(argument.meta)._typed_storage())
62
+ ]
63
+
64
+ # TODO: error on unrecognized nodes
65
+ return mutated_inputs
66
+
67
+
68
+ def get_device_node_mapping(gm: torch.fx.GraphModule):
69
+ device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
70
+ for n in gm.graph.nodes:
71
+ t = n.meta.get("val", None)
72
+ if isinstance(t, torch.Tensor) and t.device not in device_node_mapping:
73
+ device_node_mapping[t.device] = n
74
+ return device_node_mapping
75
+
76
+
77
+ def check_for_mutation(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
78
+ mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed))
79
+ if not mutation_indices:
80
+ return None
81
+
82
+ return get_mutation_stack_trace(aot_model, mutation_indices)
83
+
84
+
85
+ def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
86
+ if mut_skip := check_for_mutation(aot_model, num_fixed):
87
+ return mut_skip
88
+
89
+ if skip := check_multiple_devices_or_any_cpu_nodes(
90
+ get_device_node_mapping(aot_model)
91
+ ):
92
+ return skip
93
+
94
+ if has_incompatible_cudagraph_ops(aot_model):
95
+ return "skipping cudagraphs due to incompatible op"
96
+
97
+ return None
98
+
99
+
100
+ def get_device_index(gm) -> int:
101
+ device = next(iter(get_device_node_mapping(gm)))
102
+ assert device.type == "cuda"
103
+ return device.index
104
+
105
+
106
+ def get_stack_traces(gm) -> List[Optional[str]]:
107
+ output = output_node(gm)
108
+ assert len(output.args) == 1
109
+ return [
110
+ (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
111
+ for arg in output.args[0]
112
+ ]
113
+
114
+
115
+ def cudagraphs(dynamo_model, dynamo_inputs):
116
+ do_cudagraphs = BoxedBool(True)
117
+ boxed_device_index = BoxedDeviceIndex(None)
118
+
119
+ def forward_cudagraphs(aot_model, aot_inputs, is_inference=False):
120
+ interp = boxed_nop(aot_model, aot_inputs)
121
+ fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
122
+ if skip_msg := check_for_skip(aot_model, fixed):
123
+ BoxedBool.disable(do_cudagraphs)
124
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
125
+ return interp
126
+
127
+ boxed_device_index.set(get_device_index(aot_model))
128
+
129
+ out = cudagraphify_impl(
130
+ interp,
131
+ aot_inputs,
132
+ range(fixed),
133
+ device_index=boxed_device_index.value,
134
+ is_backward=False,
135
+ is_inference=False,
136
+ stack_traces=get_stack_traces(aot_model),
137
+ )
138
+ out._boxed_call = True
139
+ return out
140
+
141
+ def backward_cudagraphs(aot_model, aot_inputs):
142
+ interp = boxed_nop(aot_model, aot_inputs)
143
+ if not do_cudagraphs:
144
+ return aot_model
145
+
146
+ fixed = count_tangents(aot_model)
147
+ if skip_msg := check_for_skip(aot_model, fixed):
148
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
149
+
150
+ # See [Backward Generation Handling]
151
+ manager = torch._inductor.cudagraph_trees.get_manager(
152
+ boxed_device_index.value, create_if_none_exists=False
153
+ )
154
+ assert manager is not None
155
+
156
+ def fn(inputs):
157
+ manager.set_to_running_backward()
158
+ return aot_model(inputs)
159
+
160
+ fn._boxed_call = True
161
+ return fn
162
+
163
+ out = cudagraphify_impl(
164
+ interp,
165
+ aot_inputs,
166
+ range(fixed),
167
+ device_index=get_device_index(aot_model),
168
+ is_backward=True,
169
+ is_inference=False,
170
+ stack_traces=get_stack_traces(aot_model),
171
+ )
172
+ out._boxed_call = True
173
+ return out
174
+
175
+ aot_cudagraphs = aot_autograd(
176
+ fw_compiler=forward_cudagraphs,
177
+ bw_compiler=backward_cudagraphs,
178
+ inference_compiler=functools.partial(forward_cudagraphs, is_inference=True),
179
+ keep_inference_input_mutations=torch._dynamo.config.cudagraph_backend_keep_input_mutation,
180
+ )
181
+ return aot_cudagraphs(dynamo_model, dynamo_inputs)
182
+
183
+
184
+ class CudagraphsBackend:
185
+ compiler_name = "cudagraphs"
186
+
187
+ @staticmethod
188
+ def reset():
189
+ from torch._inductor.cudagraph_trees import reset_cudagraph_trees
190
+
191
+ reset_cudagraph_trees()
192
+
193
+ @staticmethod
194
+ def __call__(model, inputs):
195
+ return cudagraphs(model, inputs)
196
+
197
+
198
+ # aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful
199
+ # for debugging and can serve as a perf baseline.
200
+ register_backend(name="cudagraphs", compiler_fn=CudagraphsBackend())
201
+
202
+
203
+ def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):
204
+ """This isn't registered as a backend, but is used in some benchmarks"""
205
+ assert isinstance(inputs, (list, tuple))
206
+ if copy_inputs:
207
+ static_inputs = [torch.zeros_like(x) for x in inputs]
208
+ else:
209
+ static_inputs = list(inputs)
210
+
211
+ # warmup
212
+ torch.cuda.synchronize()
213
+ stream = torch.cuda.Stream()
214
+ stream.wait_stream(torch.cuda.current_stream())
215
+ with torch.cuda.stream(stream):
216
+ model(*inputs)
217
+ stream.synchronize()
218
+ torch.cuda.current_stream().wait_stream(stream)
219
+ torch.cuda.synchronize()
220
+
221
+ # record
222
+ graph = torch.cuda.CUDAGraph()
223
+ with torch.cuda.graph(graph, stream=stream):
224
+ static_outputs = model(*static_inputs)
225
+ if not isinstance(static_outputs, (list, tuple)):
226
+ static_outputs = (static_outputs,)
227
+
228
+ def run(*new_inputs):
229
+ assert len(static_inputs) == len(new_inputs)
230
+ if copy_inputs:
231
+ for dst, src in zip(static_inputs, new_inputs):
232
+ dst.copy_(src)
233
+ graph.replay()
234
+ if copy_outputs:
235
+ return [x.clone() for x in static_outputs]
236
+ else:
237
+ return static_outputs
238
+
239
+ return run
venv/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import dataclasses
4
+ import functools
5
+ from importlib import import_module
6
+ from typing import Any, List, Optional
7
+
8
+ from functorch.compile import min_cut_rematerialization_partition
9
+
10
+ import torch
11
+ from torch import _guards
12
+ from torch._functorch.compilers import ts_compile
13
+ from .common import aot_autograd
14
+ from .registry import register_debug_backend as register_backend
15
+
16
+ """
17
+ This file contains TorchDynamo backends intended for debugging uses.
18
+ """
19
+
20
+
21
+ @register_backend
22
+ def eager(gm, fake_tensor_inputs):
23
+ return gm
24
+
25
+
26
+ @register_backend
27
+ def pre_dispatch_eager(gm, fake_tensor_inputs):
28
+ from torch.fx.experimental.proxy_tensor import make_fx
29
+
30
+ def runnable_gm(*args):
31
+ return torch.fx.Interpreter(gm).run(*args)
32
+
33
+ pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs)
34
+ pre_dispatch_gm.print_readable()
35
+
36
+ return pre_dispatch_gm
37
+
38
+
39
+ @register_backend
40
+ def eager_debug(gm, fake_tensor_inputs):
41
+ from torch._subclasses.schema_check_mode import SchemaCheckMode
42
+
43
+ # We could add more debugging bits here.
44
+ # Right now, this backend can be used to check for and error on
45
+ # custom dispatcher ops that have incorrect schemas.
46
+ def inner(*args):
47
+ with SchemaCheckMode():
48
+ return torch.fx.Interpreter(gm).run(*args)
49
+
50
+ return inner
51
+
52
+
53
+ @register_backend(name="ts")
54
+ def torchscript(gm, fake_tensor_inputs):
55
+ return torch.jit.script(gm)
56
+
57
+
58
+ # used boxed call to discard inputs when they are no longer needed
59
+ def boxed_nop(fx_g, example_inputs):
60
+ def run(args):
61
+ return torch.fx.Interpreter(fx_g).boxed_run(args)
62
+
63
+ run._boxed_call = True
64
+ return run
65
+
66
+
67
+ # Useful for debugging purpose
68
+ # aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging.
69
+ aot_eager = aot_autograd(
70
+ fw_compiler=boxed_nop, partition_fn=min_cut_rematerialization_partition
71
+ )
72
+ register_backend(name="aot_eager", compiler_fn=aot_eager)
73
+
74
+ aot_eager_default_partitioner = aot_autograd(fw_compiler=boxed_nop)
75
+ register_backend(
76
+ name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner
77
+ )
78
+
79
+ # Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs
80
+ # inductor problems.
81
+ # aot_eager_decomp_partition just replaces the inductor compiler with nop to help
82
+ # isolate inductor vs aot_eager errors
83
+ aot_eager_decomp_partition = aot_autograd(
84
+ # these are taken from memory_efficient_fusion()
85
+ fw_compiler=boxed_nop,
86
+ bw_compiler=boxed_nop,
87
+ # NB: lambda here is to delay import of inductor
88
+ decompositions=lambda: import_module(
89
+ "torch._inductor.compile_fx"
90
+ ).select_decomp_table(),
91
+ partition_fn=functools.partial(
92
+ min_cut_rematerialization_partition, compiler="inductor"
93
+ ),
94
+ )
95
+ register_backend(
96
+ name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition
97
+ )
98
+
99
+ # AOT Autograd with torchscript backend. Default partitioner.
100
+ # aot_ts uses torchscript backend. We can use this with both nnc and nvfuser
101
+ # by using the relevant fuser with torch.jit.fuser(...)
102
+ aot_ts = aot_autograd(fw_compiler=ts_compile)
103
+ register_backend(name="aot_ts", compiler_fn=aot_ts)
104
+
105
+ # These buggy backends are used for inducing bugs so that we can test
106
+ # our repro extraction / minifier scripts
107
+
108
+
109
+ class ReluCompileError(Exception):
110
+ pass
111
+
112
+
113
+ class TestingOnlyCompileError(Exception):
114
+ pass
115
+
116
+
117
+ @register_backend
118
+ def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
119
+ for node in gm.graph.nodes:
120
+ if node.target == torch.relu:
121
+ raise ReluCompileError()
122
+ return gm
123
+
124
+
125
+ @register_backend
126
+ def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
127
+ for node in gm.graph.nodes:
128
+ if node.target == torch.relu:
129
+ node.target = torch._assert
130
+ node.args = (False, "ReluRuntimeError")
131
+ gm.recompile()
132
+ return gm
133
+
134
+
135
+ @register_backend
136
+ def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
137
+ for node in gm.graph.nodes:
138
+ if node.target == torch.relu:
139
+ node.target = torch.add
140
+ node.args = (node.args[0], 1)
141
+ gm.recompile()
142
+
143
+ return gm
144
+
145
+
146
+ @register_backend
147
+ def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
148
+ # Require at least one non-trivial thing in the graph,
149
+ # see https://github.com/pytorch/pytorch/issues/102898
150
+ for node in gm.graph.nodes:
151
+ if node.op == "call_function":
152
+ break
153
+ else:
154
+ return gm
155
+ for t in example_inputs:
156
+ if not t.is_leaf:
157
+ raise TestingOnlyCompileError()
158
+ return gm
159
+
160
+
161
+ @dataclasses.dataclass
162
+ class ExplainOutput:
163
+ """
164
+ This is the output of :func:`torch._dynamo.explain()`
165
+ There is no reason to create this class directly.
166
+ """
167
+
168
+ graphs: List[torch.fx.GraphModule]
169
+ graph_count: int
170
+ graph_break_count: int
171
+ break_reasons: List[
172
+ Any
173
+ ] # Type is GraphCompileReason but doesn't matter for this purpose
174
+ op_count: int
175
+ ops_per_graph: Optional[List[torch.fx.Node]] = None
176
+ out_guards: Optional[List[_guards.Guard]] = None
177
+ compile_times: Optional[str] = None
178
+
179
+ def __str__(self):
180
+ output = f"Graph Count: {self.graph_count}\n"
181
+ output += f"Graph Break Count: {self.graph_break_count}\n"
182
+ output += f"Op Count: {self.op_count}\n"
183
+
184
+ output += "Break Reasons:\n"
185
+ for idx, break_reason in enumerate(self.break_reasons):
186
+ output += f" Break Reason {idx+1}:\n"
187
+ output += f" Reason: {break_reason.reason}\n"
188
+ output += " User Stack:\n"
189
+ for frame_summary in break_reason.user_stack:
190
+ output += f" {frame_summary}\n"
191
+
192
+ if self.ops_per_graph is not None:
193
+ output += "Ops per Graph:\n"
194
+ for idx, ops in enumerate(self.ops_per_graph):
195
+ output += f" Ops {idx+1}:\n"
196
+ for op in ops:
197
+ output += f" {op}\n"
198
+
199
+ if self.out_guards is not None:
200
+ output += "Out Guards:\n"
201
+ for i, guard in enumerate(self.out_guards):
202
+ output += f" Guard {i+1}:\n"
203
+ output += f" {str(guard)}"
204
+
205
+ if self.compile_times is not None:
206
+ output += f"Compile Times: {self.compile_times}\n"
207
+ return output
208
+
209
+
210
+ def _explain_graph_detail(
211
+ gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons
212
+ ):
213
+ """
214
+ This function is a utility which processes a torch.fx.GraphModule and
215
+ accumulates information about its ops, graph breaks, and other details. It
216
+ is intended to be used by the ExplainWithBackend class and
217
+ `torch._dynamo.explain()` to provide details from Dynamo's graph capture.
218
+
219
+ Parameters:
220
+ gm (torch.fx.GraphModule): The GraphModule to be processed.
221
+ graphs (list): A list that accumulates all the GraphModules processed.
222
+ op_count (int): The total count of operations in all GraphModules processed so far.
223
+ ops_per_graph (list): A list that accumulates the operations of each GraphModule.
224
+ break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule.
225
+
226
+ Returns:
227
+ tuple: A tuple containing the processed GraphModule, the updated lists of graphs,
228
+ operations per graph, and break reasons, and the updated operation count.
229
+ """
230
+ graphs.append(gm)
231
+ ops = [node.target for node in gm.graph.nodes if node.op == "call_function"]
232
+ op_count += len(ops)
233
+ ops_per_graph.append(ops)
234
+ if gm.compile_subgraph_reason.graph_break:
235
+ break_reasons.append(gm.compile_subgraph_reason)
236
+
237
+ return gm, graphs, op_count, ops_per_graph, break_reasons
238
+
239
+
240
+ class ExplainWithBackend:
241
+ """
242
+ This class is intended to be used as a backend for `torch.compile`. It is
243
+ composable with other backends. When used in this way, it accumulates
244
+ information about graph breaks, ops, and other info and provides a string
245
+ representation summarizing this information.
246
+
247
+ Attributes:
248
+ backend (str): The name of the backend to use for optimization.
249
+ graphs (list): A list of the graphs captured by TorchDynamo.
250
+ op_count (int): The total number of operations in all optimized graphs.
251
+ break_reasons (list): A list of graph break reasons with stack traces.
252
+
253
+ Example Usage:
254
+ def fn(x):
255
+ x = torch.sigmoid(x)
256
+ return x
257
+
258
+ torch._dynamo.reset()
259
+ eb = ExplainWithBackend("inductor")
260
+ optimized_fn = torch.compile(fn, backend=eb)
261
+ result = optimized_fn(torch.randn(5))
262
+ print(eb.output())
263
+ """
264
+
265
+ def __init__(self, backend):
266
+ from .registry import lookup_backend
267
+
268
+ self.backend = lookup_backend(backend)
269
+ self.graphs = []
270
+ self.op_count = 0
271
+ self.break_reasons = []
272
+
273
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs):
274
+ gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(
275
+ gm, self.graphs, self.op_count, [], self.break_reasons
276
+ )
277
+ return self.backend(gm, example_inputs)
278
+
279
+ def output(self) -> ExplainOutput:
280
+ graph_count = len(self.graphs)
281
+ output = ExplainOutput(
282
+ self.graphs,
283
+ graph_count,
284
+ graph_count - 1,
285
+ self.break_reasons,
286
+ self.op_count,
287
+ )
288
+
289
+ return output
venv/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import logging
4
+ import traceback
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, List, Optional
7
+ from unittest import mock
8
+
9
+ import torch
10
+ from torch import fx
11
+ from torch._dynamo.output_graph import GraphCompileReason
12
+ from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode
13
+ from torch._logging import trace_structured
14
+ from torch.fx.node import Node
15
+
16
+ # Regular log messages should go through 'log'.
17
+ # ddp_graph_log is a separate artifact logger reserved for dumping graphs.
18
+ # See docs/source/logging.rst for more info.
19
+ log = logging.getLogger(__name__)
20
+ ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs")
21
+
22
+
23
+ def args_str(args):
24
+ # a debug helper
25
+ if torch.is_tensor(args):
26
+ return f"T[{args.shape}]"
27
+ elif isinstance(args, tuple):
28
+ return f"tuple({', '.join([args_str(x) for x in args])})"
29
+ elif isinstance(args, list):
30
+ return f"list({', '.join([args_str(x) for x in args])})"
31
+ else:
32
+ return str(args)
33
+
34
+
35
+ @dataclass
36
+ class Bucket:
37
+ size: int = 0
38
+ params: List[str] = field(default_factory=list)
39
+ nodes: List[fx.Node] = field(default_factory=list)
40
+
41
+ # param_ids is just used for unit testing
42
+ param_ids: List = field(default_factory=list)
43
+
44
+ # keep track of any buckets that were extended for logging purposes
45
+ opcount_increased_to_capture_external_output: int = 0
46
+ paramsize_before_opcount_increase: int = 0
47
+
48
+
49
+ def bucket_has_external_output(bucket: Bucket) -> bool:
50
+ nodes_in_bucket = set()
51
+ # we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards
52
+ # so we don't reverse it here
53
+ for node in bucket.nodes:
54
+ # assume node.op != output, since those are filtered in the original iteration
55
+ nodes_in_bucket.add(node)
56
+ for user in node.users:
57
+ if user not in nodes_in_bucket:
58
+ return True
59
+ return False
60
+
61
+
62
+ def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int):
63
+ headers = ("Index", "Size (b)", "Param Names")
64
+ rows = []
65
+ extended_buckets = []
66
+ for idx, bucket in enumerate(reversed(buckets)):
67
+ if len(bucket.params) > 0:
68
+ rows.append((idx, bucket.size, bucket.params[0]))
69
+ for param in bucket.params[1:]:
70
+ rows.append((None, None, param))
71
+ if bucket.opcount_increased_to_capture_external_output > 0:
72
+ extended_buckets.append(
73
+ (
74
+ idx,
75
+ bucket.opcount_increased_to_capture_external_output,
76
+ bucket.size - bucket.paramsize_before_opcount_increase,
77
+ )
78
+ )
79
+
80
+ if len(rows):
81
+ log.info(
82
+ "\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
83
+ bucket_bytes_cap,
84
+ len(buckets),
85
+ )
86
+
87
+ if len(extended_buckets):
88
+ log.warning(
89
+ "Some buckets were extended beyond their requested parameter capacities"
90
+ " in order to ensure each subgraph has an output node, required for fx graph partitioning."
91
+ " This can be the case when a subgraph would have only contained nodes performing inplace mutation,"
92
+ " and returning no logical outputs. This should not be a problem, unless it results in too few graph"
93
+ " partitions for optimal DDP performance."
94
+ )
95
+
96
+ try:
97
+ from tabulate import tabulate
98
+
99
+ log.debug(
100
+ "\nDDPOptimizer produced the following bucket assignments:\n%s",
101
+ tabulate(rows, headers=headers, tablefmt="simple_grid"),
102
+ )
103
+
104
+ if len(extended_buckets):
105
+ log.warning(
106
+ "DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
107
+ tabulate(
108
+ extended_buckets,
109
+ headers=("Index", "Extra Ops", "Extra Param Size (b)"),
110
+ tablefmt="simple_grid",
111
+ ),
112
+ )
113
+ except ImportError:
114
+ log.debug(
115
+ "Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information."
116
+ )
117
+ else:
118
+ log.debug("DDPOptimizer captured no parameters and did not split this graph.")
119
+
120
+
121
+ def has_higher_order_op(gm):
122
+ # Check if there is a higher order op in the graph
123
+ for node in gm.graph.nodes:
124
+ if node.op == "get_attr":
125
+ maybe_param = getattr(gm, node.target)
126
+ if isinstance(maybe_param, torch.fx.GraphModule):
127
+ return True
128
+ return False
129
+
130
+
131
+ # 3 (lazy compile): Replace submodules with lazily compiling submodule
132
+ class SubmoduleReplacer(torch.fx.interpreter.Interpreter):
133
+ def __init__(self, module, compiler):
134
+ super().__init__(module)
135
+ self.compiler = compiler
136
+
137
+ def lazily_compiled_submod(self, input_mod):
138
+ """
139
+ Create a wrapper around submodules which:
140
+ - lazily compiles each of the partitioned submodules using the user-provided compiler
141
+ - unpacks singleton tuples/lists into flat arg
142
+ """
143
+
144
+ class LazilyCompiledModule(torch.nn.Module):
145
+ def __init__(self, submod, compiler, unwrap_singleton_tuple):
146
+ super().__init__()
147
+ self.submod = submod
148
+ self.compiler = compiler
149
+ self.compiled = False
150
+ self.unwrap_singleton_tuple = unwrap_singleton_tuple
151
+
152
+ def forward(self, *args):
153
+ if not self.compiled:
154
+ # First compile with args as example_inputs
155
+ # These args will be fakeified if using Inductor/AOTAutograd
156
+ new_submod = self.compiler(self.submod, args)
157
+ del self.submod
158
+ self.submod = new_submod
159
+ self.compiled = True
160
+ self.compiler = None
161
+
162
+ x = self.submod(*args)
163
+ # we must let 'input_mod' return a tuple, to make AOT happy.
164
+ # (aot_autograd compile_fn literally requires that the output of a graph it compiles is a tuple).
165
+ # however, we don't acutally want this tuple to be returned, since the fx logic that calls the submod
166
+ # will again wrap outputs from the submod in a tuple. So we unwrap it, and count on it being re-wrapped
167
+ if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
168
+ return x[0]
169
+ return x
170
+
171
+ unwrap_singleton_tuple = False
172
+ for sn in input_mod.graph.nodes:
173
+ if sn.op == "output":
174
+ if not isinstance(sn.args[0], tuple):
175
+ unwrap_singleton_tuple = True
176
+ sn.args = (sn.args,)
177
+
178
+ input_mod.recompile()
179
+ input_mod.compile_subgraph_reason = GraphCompileReason(
180
+ "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
181
+ " Set `torch._dynamo.config.optimize_ddp = False` to disable.",
182
+ [
183
+ # it's close to useless to get a real stacktrace here, and quite verbose.
184
+ traceback.FrameSummary(__file__, 0, DDPOptimizer),
185
+ ],
186
+ )
187
+ wrapper = LazilyCompiledModule(
188
+ input_mod,
189
+ self.compiler,
190
+ unwrap_singleton_tuple,
191
+ )
192
+ return wrapper
193
+
194
+ # We replace the submodules with lazy submodules which compile
195
+ # the corresponding submodules when they are run with real values
196
+ # Always returns `None` - we do not need to propagate values in order
197
+ # to replace submodules.
198
+ def run_node(self, n: Node) -> Any:
199
+ if n.op == "call_module":
200
+ real_mod = self.fetch_attr(n.target)
201
+
202
+ ddp_graph_log.debug("\n---%s graph---\n%s", n.target, real_mod.graph)
203
+
204
+ assert len(n.kwargs) == 0, "We assume only args for these modules"
205
+ lazily_compiled_submod = self.lazily_compiled_submod(real_mod)
206
+
207
+ # We update the original (outer) graph with a call into the compiled module
208
+ # instead of the uncompiled one.
209
+ self.module.delete_submodule(n.target)
210
+ n.target = "compiled_" + n.target
211
+ self.module.add_submodule(n.target, lazily_compiled_submod)
212
+
213
+
214
+ # 3 (no lazy compile): compile each of the partitioned submodules using the user-provided compiler
215
+ class SubmodCompiler(torch.fx.interpreter.Interpreter):
216
+ def __init__(self, module, compiler, fake_mode):
217
+ super().__init__(module)
218
+ self.compiler = compiler
219
+ self.fake_mode = fake_mode
220
+
221
+ def compile_submod(self, input_mod, args, kwargs):
222
+ """
223
+ Compile the submodule,
224
+ using a wrapper to make sure its output is always a tuple,
225
+ which is required by AotAutograd based compilers
226
+ """
227
+ assert len(kwargs) == 0, "We assume only args for these modules"
228
+
229
+ class WrapperModule(torch.nn.Module):
230
+ def __init__(self, submod, unwrap_singleton_tuple):
231
+ super().__init__()
232
+ self.submod = submod
233
+ self.unwrap_singleton_tuple = unwrap_singleton_tuple
234
+
235
+ def forward(self, *args):
236
+ x = self.submod(*args)
237
+ # TODO(whc)
238
+ # for some reason the isinstance check is necessary if I split one node per submod
239
+ # - even though I supposedly wrapped the output in a tuple in those cases, the real
240
+ # compiled module was still returning a tensor
241
+ if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
242
+ return x[0]
243
+ return x
244
+
245
+ unwrap_singleton_tuple = False
246
+ for sn in input_mod.graph.nodes:
247
+ if sn.op == "output":
248
+ if not isinstance(sn.args[0], tuple):
249
+ unwrap_singleton_tuple = True
250
+ sn.args = (sn.args,)
251
+
252
+ input_mod.recompile()
253
+ input_mod.compile_subgraph_reason = GraphCompileReason(
254
+ "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
255
+ " Set `torch._dynamo.config.optimize_ddp = False` to disable.",
256
+ [
257
+ # it's close to useless to get a real stacktrace here, and quite verbose.
258
+ traceback.FrameSummary(__file__, 0, DDPOptimizer),
259
+ ],
260
+ )
261
+
262
+ wrapper = WrapperModule(
263
+ self.compiler(input_mod, args),
264
+ unwrap_singleton_tuple,
265
+ )
266
+ return wrapper
267
+
268
+ # Note:
269
+ #
270
+ # The way distributed works today around fake tensors can be somewhat confusing.
271
+ # Some of these codepaths are shared in both runtime, and compile time. The presence
272
+ # of a fake_mode, read off of fake tensor inputs, dictates how we will operate.
273
+ #
274
+ # A few things to keep in mind:
275
+ #
276
+ # 1) We invoke `compile_submod` with a real module. The output of that gets stored
277
+ # on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`.
278
+ #
279
+ # 2) When running a call_module targeted node, if we have a fake_mode, we fakify the
280
+ # module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it.
281
+ #
282
+ # 3) Fake tensors should always be around during compile time.
283
+ #
284
+ # 4) Fake tensors should never be around at runtime.
285
+ #
286
+ # 5) We end up with a compilation mode that takes a real submodule and fake tensors,
287
+ # to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd]
288
+ def run_node(self, n: Node) -> Any:
289
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
290
+ new_args = []
291
+ assert self.fake_mode
292
+ for arg in args:
293
+ if isinstance(arg, torch.Tensor) and not isinstance(
294
+ arg, torch._subclasses.FakeTensor
295
+ ):
296
+ new_args.append(torch._dynamo.utils.to_fake_tensor(arg, self.fake_mode))
297
+ else:
298
+ new_args.append(arg)
299
+
300
+ log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args))
301
+ assert isinstance(args, tuple)
302
+ assert isinstance(kwargs, dict)
303
+
304
+ if n.op == "call_module":
305
+ real_mod = self.fetch_attr(n.target)
306
+ if self.fake_mode:
307
+ curr_submod = deepcopy_to_fake_tensor(real_mod, self.fake_mode)
308
+ else:
309
+ curr_submod = real_mod
310
+
311
+ ddp_graph_log.debug("\n---%s graph---\n%s", n.target, curr_submod.graph)
312
+
313
+ # When calling the compiler on the submod, inputs (new_args) are expected to
314
+ # be FakeTensors already since Dynamo would have made them FakeTensors in the
315
+ # non-DDP flow. However, the parameters are _not_ expected to be FakeTensors,
316
+ # since this wrapping happens during compilation
317
+
318
+ # Note: Returning Fake Tensors on First AOT Autograd Call
319
+ #
320
+ # Inductor will optimize strides of outputs when it deems it profitable.
321
+ # For instance, converting to channels last. When we split the graph here
322
+ # into multiple inductor compilations, we need to make sure that the
323
+ # output strides of one compilation is appropriately passed to the subsequent
324
+ # compilations. However, the mapping from inductor output to dynamo output
325
+ # is non-trivial due to aot_autograd's deduping, de-aliasing, mutation, re-writing,
326
+ # subclass handling, etc. In order to replay all this logic we set a flag such that
327
+ # the first invocation of inductor in aot_autograd will return Fake Tensors with
328
+ # appropriate strides. Then, all of aot autograd's runtime logic is replayed.
329
+ # This gives us the appropriately strided outputs here which will reflect runtime strides.
330
+
331
+ class FakeifyFirstAOTInvocationGuard:
332
+ def __init__(self):
333
+ self.tc = torch._guards.TracingContext.try_get()
334
+ assert self.tc
335
+ torch._guards.TracingContext.try_get().fakify_first_call = True
336
+
337
+ def __del__(self):
338
+ self.tc.fakify_first_call = False
339
+
340
+ # For aot_eager and other backends, tracing context is not set
341
+ has_tracing_context = torch._guards.TracingContext.try_get() is not None
342
+ if has_tracing_context:
343
+ g = FakeifyFirstAOTInvocationGuard()
344
+
345
+ from torch._dynamo.utils import counters
346
+
347
+ init = counters["aot_autograd"]["total"]
348
+ compiled_submod_real = self.compile_submod(real_mod, new_args, kwargs)
349
+
350
+ # TODO - better way of doing this?
351
+ # Only aot autograd handles fakifying first call
352
+ invoked_aot_autograd = init != counters["aot_autograd"]["total"]
353
+
354
+ # We update the original (outer) graph with a call into the compiled module
355
+ # instead of the uncompiled one.
356
+ self.module.delete_submodule(n.target)
357
+ n.target = "compiled_" + n.target
358
+ self.module.add_submodule(n.target, compiled_submod_real)
359
+
360
+ # Finally, we have to produce inputs for use compiling the next submodule,
361
+ # and these need to be FakeTensors, so we execute the module under fake_mode
362
+ # Because parameters are not fake we patch fake tensor mode to allow non fake inputs
363
+ with self.fake_mode, mock.patch.object(
364
+ self.fake_mode, "allow_non_fake_inputs", True
365
+ ):
366
+ if has_tracing_context and invoked_aot_autograd:
367
+ out = compiled_submod_real(*new_args, **kwargs)
368
+ # output should be fake or subclass
369
+ assert all(
370
+ (not isinstance(t, torch.Tensor) or type(t) is not torch.Tensor)
371
+ for t in (out if isinstance(out, (list, tuple)) else [out])
372
+ )
373
+ return out
374
+ else:
375
+ return curr_submod(*new_args, **kwargs)
376
+ else:
377
+ # placeholder or output nodes don't need to get compiled, just executed
378
+ return getattr(self, n.op)(n.target, new_args, kwargs)
379
+
380
+
381
+ class DDPOptimizer:
382
+
383
+ """Note [DDPOptimizer]
384
+ DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP),
385
+ breaking the dynamo graph into chunks to compile separately, with the breaks aligning to
386
+ the boundaries of gradient-allreduce buckets chosen by DDP.
387
+
388
+ Background/Motivation
389
+ - DDP uses allreduce collectives to synchronize partial gradients computed on different workers
390
+ - DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce
391
+ - Parameters grouped into buckets are assumed to be adjacent in time, so they become ready
392
+ at around the same time during backward and thus can share the same allreduce efficiently
393
+ - Allreduces must overlap with backward compute for optimal training performance
394
+ - DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which
395
+ operates when individual grads become 'ready'
396
+ - Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the
397
+ autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole
398
+ fused backward function executes, preventing any overlap of compute and communication
399
+
400
+ Algorithm
401
+ - DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse
402
+ this graph in reverse order to determine the true order that gradients will become ready during backward.
403
+ - Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started
404
+ and a graph break introduced
405
+ - Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together
406
+ into an outer module that is returned to the user
407
+
408
+ Notes
409
+ - It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP,
410
+ and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does
411
+ in eager.
412
+ - If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently
413
+ produce splits that do not necessarily align with the buckets used by DDP. This should result in performance
414
+ degradation approaching the baseline case where graph-splits are not used, but not worse.
415
+ - If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the
416
+ subgraphs being compiled
417
+ - DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers
418
+ left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are
419
+ also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP,
420
+ it is not catastrophic but could impact performance by choosing sub-optimal bucket splits.
421
+ - DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients,
422
+ and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by
423
+ DDPOptimizer)
424
+
425
+ Debugging
426
+ - Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb.
427
+ - In many cases, the log messages are helpful (they show bucket size assignments)-
428
+ just set TORCH_LOGS env to include any of 'dynamo', 'distributed', or 'dist_ddp'.
429
+ - See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model
430
+ in a single process (or with torchrun, in multiple processes)
431
+
432
+ Args:
433
+ bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be
434
+ set to match the equivalent parameter on the original DDP module.
435
+
436
+ backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph.
437
+
438
+ first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP
439
+ special-cases the first bucket size since it is sometimes optimal to start a small allreduce early.
440
+
441
+ """
442
+
443
+ def __init__(
444
+ self,
445
+ bucket_bytes_cap: int,
446
+ backend_compile_fn,
447
+ first_bucket_cap: Optional[int] = None,
448
+ ):
449
+ if first_bucket_cap is not None:
450
+ self.first_bucket_cap = first_bucket_cap
451
+ elif torch.distributed.is_available():
452
+ # this constant comes from C10D lib which is not always built
453
+ self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES
454
+ else:
455
+ self.first_bucket_cap = bucket_bytes_cap
456
+
457
+ self.bucket_bytes_cap = bucket_bytes_cap
458
+ assert (
459
+ self.first_bucket_cap <= self.bucket_bytes_cap
460
+ ), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP"
461
+
462
+ self.backend_compile_fn = backend_compile_fn
463
+
464
+ def _ignore_parameter(self, parameter):
465
+ return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored
466
+
467
+ def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]):
468
+ """
469
+ Implements graph splitting, first determining a set of of buckets by counting
470
+ parameter sizes in reverse graph order, then invoking the user/backend compiler
471
+ to compile each subgraph. Finally, stiches compiled graphs into one graphmodule
472
+ and returns its callable.
473
+ """
474
+ if has_higher_order_op(gm):
475
+ # This indicates presence of a higher order op. For now, we
476
+ # have no way to break the higher order op into two buckets.
477
+ # Allowing higher order ops in the graph also requires
478
+ # changes in the split_module, becuase graph splitter
479
+ # currently assumes that all the args of all ops are
480
+ # tensors, but in the case of higher order ops, it could be
481
+ # a graph module. As a workaround, we are shortcircuiting
482
+ raise NotImplementedError(
483
+ "DDPOptimizer backend: Found a higher order op in the graph. "
484
+ "This is not supported. Please turn off DDP optimizer using "
485
+ "torch._dynamo.config.optimize_ddp=False. Note that this can "
486
+ "cause performance degradation because there will be one bucket "
487
+ "for the entire Dynamo graph. Please refer to this issue - "
488
+ "https://github.com/pytorch/pytorch/issues/104674."
489
+ )
490
+
491
+ # 1: compute the partition map according to DDP bucket logic
492
+ buckets = [Bucket()] # (size, param_names)
493
+ for node in reversed(gm.graph.nodes):
494
+ if node.op in ("output", "placeholder"):
495
+ continue
496
+
497
+ if (
498
+ buckets[0].size >= self.bucket_bytes_cap
499
+ or len(buckets) == 1
500
+ and buckets[0].size >= self.first_bucket_cap
501
+ ):
502
+ if bucket_has_external_output(buckets[0]):
503
+ buckets.insert(0, Bucket())
504
+ else:
505
+ # continue building this bucket past the point of filling its parameter capacity,
506
+ # to increase chances it contains at least one node that is either a global output or
507
+ # passed as input to a subsequent graph
508
+
509
+ if buckets[0].opcount_increased_to_capture_external_output == 0:
510
+ buckets[0].paramsize_before_opcount_increase = buckets[0].size
511
+ buckets[0].opcount_increased_to_capture_external_output += 1
512
+
513
+ if node.op == "call_module":
514
+ target = gm.get_submodule(node.target)
515
+ for name, param in target.named_parameters():
516
+ if param.requires_grad and not self._ignore_parameter(param):
517
+ buckets[0].size += param.untyped_storage().nbytes()
518
+ buckets[0].params.append(f"{node.target}_{name}")
519
+ buckets[0].param_ids.append(id(param))
520
+ elif node.op == "get_attr":
521
+ maybe_param = getattr(gm, node.target)
522
+ if maybe_param.requires_grad and not self._ignore_parameter(
523
+ maybe_param
524
+ ):
525
+ buckets[0].size += maybe_param.untyped_storage().nbytes()
526
+ buckets[0].params.append(node.target)
527
+ buckets[0].param_ids.append(id(maybe_param))
528
+
529
+ # All nodes have to be mapped to a bucket, even if they don't have their own params
530
+ # Ignored params still end up in buckets, we just don't count them towards the capacity
531
+ buckets[0].nodes.append(node)
532
+
533
+ if len(buckets) > 1 and buckets[0].size == 0:
534
+ # we collected a small preamble graph with ops that don't include parameters, fuse it back
535
+ buckets[1].nodes.extend(buckets[0].nodes)
536
+ assert len(buckets[0].params) == 0, "Params should be empty if size is 0"
537
+ del buckets[0]
538
+
539
+ # stash buckets for testing/debugging purposes
540
+ self.buckets = buckets
541
+ pretty_print_buckets(buckets, self.bucket_bytes_cap)
542
+
543
+ if len(buckets) == 1:
544
+ # bypass split/fuse logic if there is only one bucket
545
+ return self.backend_compile_fn(gm, example_inputs)
546
+
547
+ # 2: partition the graphmodule according to bucket capacity
548
+ partition_map = {}
549
+ for idx, b in enumerate(buckets):
550
+ for node in b.nodes:
551
+ partition_map[node] = idx
552
+
553
+ split_gm = fx.passes.split_module.split_module(
554
+ gm, None, lambda node: partition_map[node]
555
+ )
556
+
557
+ debug_str = (
558
+ f"\n---orig graph---\n{gm.graph}\n"
559
+ + f"\n---split graph---\n{split_gm.graph}\n"
560
+ )
561
+ for name, module in split_gm.named_modules():
562
+ if "." not in name and len(name):
563
+ # only print the submod graphs, not their children
564
+ debug_str += f"\n---{name} graph---\n{module.graph}\n"
565
+ debug_str += "\n---------------\n"
566
+ ddp_graph_log.debug(debug_str)
567
+
568
+ trace_structured(
569
+ "optimize_ddp_split_graph",
570
+ payload_fn=lambda: split_gm.print_readable(print_output=False),
571
+ )
572
+ for name, module in split_gm.named_modules():
573
+ if "." not in name and len(name):
574
+ trace_structured(
575
+ "optimize_ddp_split_child",
576
+ lambda: {"name": name},
577
+ payload_fn=lambda: module.print_readable(print_output=False),
578
+ )
579
+
580
+ # NOTE, we want to enable `optimize_ddp_lazy_compile` by default as soon as possible,
581
+ # becuase it will fix stride mismatch errors (see motivation: https://github.com/pytorch/pytorch/pull/114154).
582
+ # However, lazy compile currently causes shape mismatch in other cases (`test_graph_split_inductor_transpose`)
583
+ # and we need to fix them before we can enable it by default.
584
+ if not torch._dynamo.config.optimize_ddp_lazy_compile:
585
+ # Today, optimize_ddp=True and keep_output_stride=False can lead to silent
586
+ # correctness issues. The problem is that ddp_optimizer works by partitioning
587
+ # the dynamo graph, sending each subgraph through aot autograd to inductor,
588
+ # and creates example inputs by eagerly interpreting each subgraph to get
589
+ # an output that with the same metadata that we'd get from eager mode.
590
+ # This is a problem though, for torch._inductor.config.keep_output_stride.
591
+ # The above config can cause the outputs of the first graph to have
592
+ # **different** strides from eager, causing the inputs that we pass
593
+ # to the second graph to be wrong.
594
+ # To really fix this, we would need to faithfully ask inductor
595
+ # what the outputs to each graph it expects are.
596
+ fake_mode = detect_fake_mode(example_inputs)
597
+ if fake_mode is None:
598
+ fake_mode = torch._subclasses.fake_tensor.FakeTensorMode()
599
+
600
+ if torch._dynamo.config.optimize_ddp_lazy_compile:
601
+ submod_compiler = SubmoduleReplacer(split_gm, self.backend_compile_fn)
602
+ else:
603
+ submod_compiler = SubmodCompiler(
604
+ split_gm, self.backend_compile_fn, fake_mode
605
+ )
606
+ submod_compiler.run(*example_inputs)
607
+ split_gm.recompile()
608
+
609
+ ddp_graph_log.debug(
610
+ "\n---final graph---\n%s\n---------------\n", split_gm.graph
611
+ )
612
+ return split_gm
venv/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import sys
4
+
5
+ from torch._dynamo import register_backend
6
+
7
+
8
+ @register_backend
9
+ def inductor(*args, **kwargs):
10
+ if sys.platform == "win32":
11
+ raise RuntimeError("Windows not yet supported for inductor")
12
+
13
+ # do import here to avoid loading inductor into memory when it is not used
14
+ from torch._inductor.compile_fx import compile_fx
15
+
16
+ return compile_fx(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # This backend is maintained by ONNX team. To direct issues
4
+ # to the right people, please tag related GitHub issues with `module: onnx`.
5
+ #
6
+ # Maintainers' Github IDs: wschin, thiagocrepaldi, BowenBao, abock
7
+ from torch.onnx._internal.onnxruntime import (
8
+ is_onnxrt_backend_supported,
9
+ torch_compile_backend,
10
+ )
11
+ from .registry import register_backend
12
+
13
+
14
+ def has_onnxruntime():
15
+ # FIXME(abock): update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
16
+ return is_onnxrt_backend_supported()
17
+
18
+
19
+ if is_onnxrt_backend_supported():
20
+ register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
21
+ else:
22
+
23
+ def information_displaying_backend(*args, **kwargs):
24
+ raise ImportError(
25
+ "onnxrt is not registered as a backend. "
26
+ "Please make sure all dependencies such as "
27
+ "numpy, onnx, onnxscript, and onnxruntime-training are installed. "
28
+ "Suggested procedure to fix dependency problem:\n"
29
+ " (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
30
+ " (2) Open a new python terminal.\n"
31
+ " (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
32
+ " (4) If it returns `True`, then you can use `onnxrt` backend.\n"
33
+ " (5) If it returns `False`, please execute the package importing section in "
34
+ "torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
35
+ )
36
+
37
+ register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
venv/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import sys
5
+ from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple
6
+
7
+ import torch
8
+ from torch import fx
9
+
10
+
11
+ class CompiledFn(Protocol):
12
+ def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]:
13
+ ...
14
+
15
+
16
+ CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
17
+
18
+ _BACKENDS: Dict[str, CompilerFn] = dict()
19
+
20
+
21
+ def register_backend(
22
+ compiler_fn: Optional[CompilerFn] = None,
23
+ name: Optional[str] = None,
24
+ tags: Sequence[str] = (),
25
+ ):
26
+ """
27
+ Decorator to add a given compiler to the registry to allow calling
28
+ `torch.compile` with string shorthand. Note: for projects not
29
+ imported by default, it might be easier to pass a function directly
30
+ as a backend and not use a string.
31
+
32
+ Args:
33
+ compiler_fn: Callable taking a FX graph and fake tensor inputs
34
+ name: Optional name, defaults to `compiler_fn.__name__`
35
+ tags: Optional set of string tags to categorize backend with
36
+ """
37
+ if compiler_fn is None:
38
+ # @register_backend(name="") syntax
39
+ return functools.partial(register_backend, name=name, tags=tags)
40
+ assert callable(compiler_fn)
41
+ name = name or compiler_fn.__name__
42
+ assert name not in _BACKENDS, f"duplicate name: {name}"
43
+ _BACKENDS[name] = compiler_fn
44
+ compiler_fn._tags = tuple(tags)
45
+ return compiler_fn
46
+
47
+
48
+ register_debug_backend = functools.partial(register_backend, tags=("debug",))
49
+ register_experimental_backend = functools.partial(
50
+ register_backend, tags=("experimental",)
51
+ )
52
+
53
+
54
+ def lookup_backend(compiler_fn):
55
+ """Expand backend strings to functions"""
56
+ if isinstance(compiler_fn, str):
57
+ if compiler_fn not in _BACKENDS:
58
+ _lazy_import()
59
+ if compiler_fn not in _BACKENDS:
60
+ _lazy_import_entry_point(compiler_fn)
61
+ if compiler_fn not in _BACKENDS:
62
+ from ..exc import InvalidBackend
63
+
64
+ raise InvalidBackend(name=compiler_fn)
65
+ compiler_fn = _BACKENDS[compiler_fn]
66
+ return compiler_fn
67
+
68
+
69
+ def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
70
+ """
71
+ Return valid strings that can be passed to:
72
+
73
+ torch.compile(..., backend="name")
74
+ """
75
+ _lazy_import()
76
+ exclude_tags = set(exclude_tags or ())
77
+ return sorted(
78
+ [
79
+ name
80
+ for name, backend in _BACKENDS.items()
81
+ if not exclude_tags.intersection(backend._tags)
82
+ ]
83
+ )
84
+
85
+
86
+ @functools.lru_cache(None)
87
+ def _lazy_import():
88
+ from .. import backends
89
+ from ..utils import import_submodule
90
+
91
+ import_submodule(backends)
92
+
93
+ from ..repro.after_dynamo import dynamo_minifier_backend
94
+
95
+ assert dynamo_minifier_backend is not None
96
+
97
+
98
+ @functools.lru_cache(None)
99
+ def _lazy_import_entry_point(backend_name: str):
100
+ from importlib.metadata import entry_points
101
+
102
+ compiler_fn = None
103
+ group_name = "torch_dynamo_backends"
104
+ if sys.version_info < (3, 10):
105
+ backend_eps = entry_points()
106
+ eps = [ep for ep in backend_eps.get(group_name, ()) if ep.name == backend_name]
107
+ if len(eps) > 0:
108
+ compiler_fn = eps[0].load()
109
+ else:
110
+ backend_eps = entry_points(group=group_name)
111
+ if backend_name in backend_eps.names:
112
+ compiler_fn = backend_eps[backend_name].load()
113
+
114
+ if compiler_fn is not None and backend_name not in list_backends(tuple()):
115
+ register_backend(compiler_fn=compiler_fn, name=backend_name)
venv/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # import torch # type: ignore[import]
4
+ # from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import]
5
+ # from .registry import register_backend # type: ignore[import]
6
+
7
+ """
8
+ Placeholder for TensorRT backend for dynamo via torch-tensorrt
9
+ """
10
+
11
+ # @register_backend
12
+ # def tensorrt(gm, example_inputs):
13
+ # import torch_tensorrt # type: ignore[import]
14
+ # pass
venv/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import logging
4
+ import warnings
5
+
6
+ from functorch.compile import make_boxed_func
7
+
8
+ from ..backends.common import aot_autograd
9
+ from .registry import register_backend, register_experimental_backend
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ @register_experimental_backend
15
+ def torchxla_trivial(gm, fake_tensor_inputs):
16
+ return gm
17
+
18
+
19
+ @register_experimental_backend
20
+ def torchxla_trace_once(model, fake_tensor_inputs):
21
+ warnings.warn(
22
+ "This backend will be deprecated in 2.2, please use `openxla` backend instead"
23
+ )
24
+
25
+ return xla_backend_helper(model, fake_tensor_inputs)
26
+
27
+
28
+ @register_backend
29
+ def openxla_eval(model, fake_tensor_inputs):
30
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
31
+
32
+
33
+ def openxla_eval_boxed(model, fake_tensor_inputs):
34
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
35
+
36
+
37
+ def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
38
+ try:
39
+ import torch_xla.core.dynamo_bridge as bridge
40
+ except ImportError as e:
41
+ raise ImportError(
42
+ "Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla"
43
+ ) from e
44
+
45
+ compiled_graph = None
46
+
47
+ def fwd(*args):
48
+ nonlocal model
49
+ nonlocal compiled_graph
50
+ if compiled_graph is None:
51
+ compiled_graph = bridge.extract_compiled_graph(model, args)
52
+ del model
53
+ return compiled_graph(*args)
54
+
55
+ return make_boxed_func(fwd) if boxed else fwd
56
+
57
+
58
+ aot_torchxla_trivial = aot_autograd(
59
+ fw_compiler=torchxla_trivial,
60
+ )
61
+ register_experimental_backend(
62
+ name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial
63
+ )
64
+
65
+ aot_torchxla_trace_once = aot_autograd(
66
+ fw_compiler=torchxla_trace_once,
67
+ )
68
+ register_experimental_backend(
69
+ name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once
70
+ )
71
+
72
+ openxla = aot_autograd(
73
+ fw_compiler=openxla_eval_boxed,
74
+ )
75
+ register_backend(name="openxla", compiler_fn=openxla)
venv/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import importlib
5
+ import logging
6
+ import os
7
+ import tempfile
8
+
9
+ import torch
10
+ from .common import device_from_inputs, fake_tensor_unsupported
11
+
12
+ from .registry import register_backend
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ @register_backend
18
+ @fake_tensor_unsupported
19
+ def tvm(gm, example_inputs, *, scheduler=None, trials=20000):
20
+ import tvm # type: ignore[import]
21
+ from tvm import relay # type: ignore[import]
22
+ from tvm.contrib import graph_executor # type: ignore[import]
23
+
24
+ jit_mod = torch.jit.trace(gm, example_inputs)
25
+ device = device_from_inputs(example_inputs)
26
+ shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
27
+ example_outputs = gm(*example_inputs)
28
+ if len(example_outputs) == 0:
29
+ log.warning("Explicitly fall back to eager due to zero output")
30
+ return gm.forward
31
+ mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)
32
+ if device.type == "cuda":
33
+ dev = tvm.cuda(device.index)
34
+ target = tvm.target.cuda()
35
+ else:
36
+ dev = tvm.cpu(0)
37
+ target = tvm.target.Target(llvm_target())
38
+
39
+ if scheduler is None:
40
+ scheduler = os.environ.get("TVM_SCHEDULER", None)
41
+
42
+ if scheduler == "auto_scheduler":
43
+ from tvm import auto_scheduler
44
+
45
+ log_file = tempfile.NamedTemporaryFile()
46
+
47
+ if not os.path.exists(log_file):
48
+ tasks, task_weights = auto_scheduler.extract_tasks(
49
+ mod["main"], params, target
50
+ )
51
+ for task in tasks:
52
+ print(task.compute_dag)
53
+ else:
54
+ print("No tasks")
55
+ if len(tasks) != 0:
56
+ tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
57
+ if not os.path.exists(log_file):
58
+ assert trials > 0
59
+ tune_option = auto_scheduler.TuningOptions(
60
+ num_measure_trials=trials,
61
+ measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
62
+ early_stopping=2000,
63
+ )
64
+ try:
65
+ tuner.tune(tune_option)
66
+ except Exception:
67
+ if os.path.exists(log_file):
68
+ os.unlink(log_file)
69
+ raise
70
+
71
+ with auto_scheduler.ApplyHistoryBest(log_file):
72
+ with tvm.transform.PassContext(
73
+ opt_level=3, config={"relay.backend.use_auto_scheduler": True}
74
+ ):
75
+ lib = relay.build(mod, target=target, params=params)
76
+ elif scheduler == "meta_schedule":
77
+ from tvm import meta_schedule as ms
78
+
79
+ with tempfile.TemporaryDirectory() as work_dir:
80
+ if device.type != "cuda":
81
+ # meta_schedule needs num-cores to be specified
82
+ # here we use the maximum core count
83
+ target = tvm.target.Target(
84
+ f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}"
85
+ )
86
+ # TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch
87
+ # once USE_PT_TVMDSOOP is updated and turned on by default in TVM.
88
+ database = ms.relay_integration.tune_relay(
89
+ mod=mod,
90
+ target=target,
91
+ work_dir=work_dir,
92
+ max_trials_global=20000,
93
+ num_trials_per_iter=64,
94
+ params=params,
95
+ strategy="evolutionary",
96
+ )
97
+ lib = ms.relay_integration.compile_relay(
98
+ database=database,
99
+ mod=mod,
100
+ target=target,
101
+ params=params,
102
+ )
103
+ elif scheduler == "default" or not scheduler:
104
+ # no autotuning
105
+ with tvm.transform.PassContext(opt_level=10):
106
+ lib = relay.build(mod, target=target, params=params)
107
+ else:
108
+ raise NotImplementedError(
109
+ "This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. "
110
+ "There are three available options: default, auto_scheduler and meta_schedule."
111
+ )
112
+ m = graph_executor.GraphModule(lib["default"](dev))
113
+
114
+ def to_torch_tensor(nd_tensor):
115
+ """A helper function to transfer a NDArray to torch.tensor."""
116
+ if nd_tensor.dtype == "bool":
117
+ # DLPack does not support boolean so it can't be handled by
118
+ # torch.utils.dlpack.from_pack. Workaround by going through
119
+ # numpy, although this brings additional data copy overhead.
120
+ return torch.from_numpy(nd_tensor.numpy())
121
+ return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())
122
+
123
+ def to_tvm_tensor(torch_tensor):
124
+ """A helper function to transfer a torch.tensor to NDArray."""
125
+ if torch_tensor.dtype == torch.bool:
126
+ # same reason as above, fallback to numpy conversion which
127
+ # could introduce data copy overhead
128
+ return tvm.nd.array(torch_tensor.cpu().numpy())
129
+ return tvm.nd.from_dlpack(torch_tensor)
130
+
131
+ def exec_tvm(*i_args):
132
+ args = [a.contiguous() for a in i_args]
133
+ shape_info, _ = m.get_input_info()
134
+ active_inputs = {name for name, _ in shape_info.items()}
135
+ for idx, arg in enumerate(args, 0):
136
+ if arg.dim() != 0:
137
+ if arg.requires_grad:
138
+ arg = arg.detach()
139
+ inp_name = f"inp_{idx}"
140
+ if inp_name not in active_inputs:
141
+ log.warning(
142
+ "input %s skipped as not found in tvm's runtime library",
143
+ inp_name,
144
+ )
145
+ continue
146
+ m.set_input(
147
+ inp_name,
148
+ to_tvm_tensor(arg),
149
+ )
150
+ m.run()
151
+ return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())]
152
+
153
+ return exec_tvm
154
+
155
+
156
+ tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule")
157
+ tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler")
158
+
159
+
160
+ def has_tvm():
161
+ try:
162
+ importlib.import_module("tvm")
163
+ return True
164
+ except ImportError:
165
+ return False
166
+
167
+
168
+ @functools.lru_cache(None)
169
+ def llvm_target():
170
+ if "avx512" in open("/proc/cpuinfo").read():
171
+ return "llvm -mcpu=skylake-avx512"
172
+ return "llvm -mcpu=core-avx2"
venv/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import functools
4
+ import io
5
+ import logging
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ import sys
10
+ import textwrap
11
+ import uuid
12
+ from importlib import import_module
13
+ from tempfile import TemporaryFile
14
+ from typing import Any, Callable, Dict, Union
15
+
16
+ import torch
17
+ import torch.fx as fx
18
+ import torch.nn as nn
19
+ from torch._dynamo.debug_utils import (
20
+ _cuda_system_info_comment,
21
+ AccuracyError,
22
+ backend_accuracy_fails,
23
+ BuckTargetWriter,
24
+ cast_to_fp64,
25
+ extra_imports,
26
+ generate_config_string,
27
+ helper_for_dump_minify,
28
+ InputReader,
29
+ InputWriter,
30
+ MAX_CONSTANT_NUMEL_INLINE,
31
+ minifier_dir,
32
+ NNModuleToString,
33
+ NopInputReader,
34
+ same_two_models,
35
+ )
36
+ from torch._dynamo.utils import clone_inputs, counters, same
37
+ from torch.fx.experimental.proxy_tensor import make_fx
38
+ from torch.fx.experimental.symbolic_shapes import (
39
+ fx_placeholder_targets,
40
+ has_free_symbols,
41
+ )
42
+ from torch.hub import tqdm
43
+
44
+ from .. import config
45
+
46
+ log = logging.getLogger(__name__)
47
+
48
+
49
+ inductor_config = import_module("torch._inductor.config")
50
+ use_buck = inductor_config.is_fbcode()
51
+
52
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
53
+ # MAIN ENTRY POINT
54
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
55
+
56
+
57
+ def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str):
58
+ """
59
+ Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both
60
+ forward and backward call separately with the backend compiler_fn - like
61
+ inductor or nvfuser. Intercepting after Aot Autograd presents neat
62
+ abstraction, where all the params are lifted as graph inputs, making it easy
63
+ to save the graph as a string.
64
+ """
65
+
66
+ @functools.wraps(unconfigured_compiler_fn)
67
+ def debug_wrapper(gm, example_inputs, **kwargs):
68
+ from torch._subclasses import FakeTensorMode
69
+
70
+ compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
71
+
72
+ from torch._functorch.aot_autograd import get_aot_graph_name
73
+
74
+ graph_name = get_aot_graph_name()
75
+
76
+ # TODO: why do we need to deepcopy the original graph?
77
+ orig_graph = copy.deepcopy(gm.graph)
78
+ assert config.repro_after in ("dynamo", "aot", None)
79
+
80
+ try:
81
+ # Call the compiler_fn - which is either aot_autograd or inductor
82
+ # with fake inputs
83
+ inner_compiled_fn = compiler_fn(gm, example_inputs)
84
+ except Exception as e:
85
+ # TODO: Failures here are troublesome because no real inputs,
86
+ # need a different serialization strategy
87
+ if config.repro_after == "aot":
88
+ if config.repro_level == 1:
89
+ dump_compiler_graph_state(
90
+ fx.GraphModule(gm, orig_graph),
91
+ example_inputs,
92
+ compiler_name,
93
+ )
94
+ elif config.repro_level == 2:
95
+ dump_to_minify(
96
+ fx.GraphModule(gm, orig_graph),
97
+ example_inputs,
98
+ compiler_name,
99
+ )
100
+ log.error("CompilerError")
101
+ raise
102
+
103
+ # We may run regular PyTorch compute that may trigger Dynamo, do NOT
104
+ # recursively attempt to accuracy minify in that case!
105
+ def deferred_for_real_inputs(real_inputs):
106
+ # This is a bit obscure: if we recursively try to accuracy minify
107
+ # the SAME function, this would trigger. But most of the time
108
+ # we should never hit this branch
109
+ if config.repro_after != "aot":
110
+ return inner_compiled_fn(real_inputs)
111
+ with config.patch(repro_after=None):
112
+ return inner_debug_fn(real_inputs)
113
+
114
+ def inner_debug_fn(real_inputs):
115
+ """
116
+ Aot Autograd fw_compiler and bw_compiler can have fake tensors. So,
117
+ example_inputs can be fake tensors. We can call compiler_fn (which is
118
+ inductor or nvfuser) with fake tensors but the actually compiled_fn
119
+ should be called with real tensors. Therefore, the actual invocation
120
+ is deferred.
121
+ """
122
+ # Copy the tensor attrs like shape, stride etc by converting to Fake Tensor
123
+ # because inductor clears the tensor list in its codegen. And example_inputs
124
+ # are available only for the first invocation.
125
+ fake_mode = FakeTensorMode()
126
+ copy_tensor_attrs = [
127
+ fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x
128
+ for x in real_inputs
129
+ ]
130
+ if config.repro_level == 3:
131
+ # Always dump the original module in case we have segfaults
132
+ dump_to_minify(
133
+ fx.GraphModule(gm, orig_graph), real_inputs, compiler_name
134
+ )
135
+
136
+ if config.repro_level == 4:
137
+ if compiler_name != "inductor":
138
+ raise NotImplementedError(
139
+ "Accuracy minification is supported for inductor only"
140
+ )
141
+ if backend_aot_accuracy_fails(gm, real_inputs, compiler_fn):
142
+ log.warning(
143
+ "Accuracy failed for the AOT Autograd graph %s", graph_name
144
+ )
145
+ dump_compiler_graph_state(
146
+ fx.GraphModule(gm, orig_graph),
147
+ real_inputs,
148
+ f"{compiler_name}_accuracy",
149
+ )
150
+ dump_to_minify(
151
+ fx.GraphModule(gm, orig_graph),
152
+ real_inputs,
153
+ f"{compiler_name}_accuracy",
154
+ )
155
+ raise AccuracyError("Bad accuracy detected")
156
+ else:
157
+ # Call the compiled function with real inputs
158
+ return inner_compiled_fn(real_inputs)
159
+ else:
160
+ try:
161
+ # Call the compiled function with real inputs
162
+ out = inner_compiled_fn(real_inputs)
163
+ # sync cuda kernels to ensure IMA detection
164
+ for arg in example_inputs:
165
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
166
+ torch.cuda.synchronize()
167
+ break
168
+ return out
169
+ except Exception as e:
170
+ if config.repro_level == 1:
171
+ dump_compiler_graph_state(
172
+ fx.GraphModule(gm, orig_graph),
173
+ copy_tensor_attrs,
174
+ compiler_name,
175
+ )
176
+ elif config.repro_level == 2:
177
+ dump_to_minify(
178
+ fx.GraphModule(gm, orig_graph),
179
+ copy_tensor_attrs,
180
+ compiler_name,
181
+ )
182
+ raise
183
+
184
+ if config.repro_after == "aot":
185
+ compiled_fn = deferred_for_real_inputs
186
+ compiled_fn._boxed_call = True # type: ignore[attr-defined]
187
+ return compiled_fn
188
+ else:
189
+ return inner_compiled_fn
190
+
191
+ return debug_wrapper
192
+
193
+
194
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
195
+ # DUMP REPROS
196
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
197
+
198
+
199
+ def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None):
200
+ model_str = textwrap.dedent(
201
+ f"""
202
+ import torch
203
+ from torch import tensor, device
204
+ import torch.fx as fx
205
+ from torch._dynamo.testing import rand_strided
206
+ from math import inf
207
+ import torch._inductor.inductor_prims
208
+
209
+ {generate_config_string(stable_output=stable_output)}
210
+
211
+ isolate_fails_code_str = None
212
+
213
+ {extra_imports}
214
+
215
+ """
216
+ )
217
+ if not stable_output:
218
+ model_str += f"# torch version: {torch.version.__version__}\n"
219
+ if hasattr(torch.version, "cuda"):
220
+ model_str += f"# torch cuda version: {torch.version.cuda}\n"
221
+ if hasattr(torch.version, "git_version"):
222
+ model_str += f"# torch git version: {torch.version.git_version}\n\n\n"
223
+ model_str += _cuda_system_info_comment()
224
+
225
+ model_str += NNModuleToString.convert(gm)
226
+
227
+ # get hint shape/stride when dynamic shape enabled
228
+ def hint_if_symint(x):
229
+ return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x)
230
+
231
+ writer = InputWriter(save_dir)
232
+ for placeholder, arg in zip(fx_placeholder_targets(gm), args):
233
+ if isinstance(arg, (int, torch.SymInt)):
234
+ writer.symint(placeholder, arg)
235
+ elif isinstance(arg, torch.Tensor):
236
+ # TODO: improve these names with FQN
237
+ writer.tensor(placeholder, arg)
238
+ else:
239
+ raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
240
+
241
+ model_str += "\n".join(writer.lines()) + "\n"
242
+
243
+ model_str += "mod = Repro()\n"
244
+ return model_str
245
+
246
+
247
+ def save_graph_repro(
248
+ fd,
249
+ gm,
250
+ args,
251
+ compiler_name,
252
+ *,
253
+ stable_output=False,
254
+ save_dir=None,
255
+ command="run",
256
+ accuracy=None,
257
+ tracing_mode=None,
258
+ check_str=None,
259
+ ):
260
+ fd.write(
261
+ generate_compiler_repro_string(
262
+ gm,
263
+ args,
264
+ stable_output=stable_output,
265
+ save_dir=save_dir,
266
+ )
267
+ )
268
+ if accuracy is None:
269
+ accuracy = "_accuracy" in compiler_name
270
+ if tracing_mode is None:
271
+ tracing_mode = "real"
272
+ if any(has_free_symbols(a) for a in args):
273
+ tracing_mode = "symbolic"
274
+ fd.write("if __name__ == '__main__':\n")
275
+ fd.write(" from torch._dynamo.repro.after_aot import run_repro\n")
276
+ fd.write(
277
+ f" with torch.no_grad():\n"
278
+ f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, "
279
+ f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r}"
280
+ ")\n"
281
+ )
282
+
283
+
284
+ def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None):
285
+ subdir = os.path.join(minifier_dir(), "checkpoints")
286
+ if not os.path.exists(subdir):
287
+ os.makedirs(subdir, exist_ok=True)
288
+ file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py")
289
+ log.warning(
290
+ "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
291
+ )
292
+ with open(file_name, "w") as fd:
293
+ save_graph_repro(
294
+ fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy
295
+ )
296
+ curdir = os.getcwd()
297
+ repro_path = os.path.join(curdir, "repro.py")
298
+ try:
299
+ shutil.copyfile(file_name, repro_path)
300
+ log.warning("Copying repro file for convenience to %s", repro_path)
301
+ if use_buck:
302
+ BuckTargetWriter(file_name).write()
303
+ except OSError:
304
+ log.warning("No write permissions for %s", repro_path)
305
+ pass
306
+
307
+
308
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
309
+ # DUMP MINIFIER
310
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
311
+
312
+
313
+ def dump_to_minify(gm, args, compiler_name: str):
314
+ out = io.StringIO()
315
+ # TODO: factor this out
316
+ subdir = os.path.join(minifier_dir(), "checkpoints")
317
+ if not os.path.exists(subdir):
318
+ os.makedirs(subdir, exist_ok=True)
319
+ save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify")
320
+ return helper_for_dump_minify(out.getvalue())
321
+
322
+
323
+ def isolate_fails(
324
+ fx_g,
325
+ args,
326
+ compiler_name: str,
327
+ env=None,
328
+ save_dir=None,
329
+ accuracy=None,
330
+ tracing_mode=None,
331
+ check_str=None,
332
+ ):
333
+ if env is None:
334
+ env = {}
335
+ subdir = os.path.join(os.getcwd(), "isolate")
336
+ if not os.path.exists(subdir):
337
+ os.makedirs(subdir, exist_ok=True)
338
+ file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py")
339
+ with open(file_name, "w") as fd:
340
+ save_graph_repro(
341
+ fd,
342
+ fx_g,
343
+ args,
344
+ compiler_name,
345
+ save_dir=save_dir,
346
+ command="minifier-query",
347
+ accuracy=accuracy,
348
+ tracing_mode=tracing_mode,
349
+ check_str=check_str,
350
+ )
351
+ # with open(file_name, "r") as fd:
352
+ # print(fd.read())
353
+ new_env = os.environ.copy()
354
+ new_env = {**new_env, **env}
355
+ stdout, stderr = TemporaryFile(), TemporaryFile()
356
+
357
+ if use_buck:
358
+ cmd = BuckTargetWriter(file_name).write(print_msg=False)
359
+ else:
360
+ cmd = ["python", file_name]
361
+
362
+ p = subprocess.Popen(
363
+ cmd,
364
+ cwd=subdir,
365
+ stdout=stdout,
366
+ stderr=stderr,
367
+ env=new_env,
368
+ )
369
+ p.wait()
370
+
371
+ stdout.seek(0)
372
+ stderr.seek(0)
373
+ print(
374
+ textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout
375
+ )
376
+ print(
377
+ textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr
378
+ )
379
+ # print(f"Isolated test failed - {file_name}")
380
+ return p.returncode != 0
381
+
382
+
383
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
384
+ # MINIFIER TOOLS
385
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
386
+
387
+
388
+ def inductor_fails(fx_g, args, check_str=None):
389
+ has_cuda = False
390
+ for arg in args:
391
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
392
+ has_cuda = True
393
+ break
394
+
395
+ def sync():
396
+ if has_cuda:
397
+ # Ensures that segfaults are surfaced
398
+ torch.cuda.synchronize()
399
+
400
+ from torch._inductor.compile_fx import compile_fx_inner
401
+
402
+ try:
403
+ result = fx_g(*args)
404
+ assert isinstance(result, (tuple, list))
405
+ assert not any(isinstance(x, (tuple, list)) for x in result)
406
+ except Exception:
407
+ return False
408
+
409
+ sync()
410
+
411
+ try:
412
+ compile_mod = compile_fx_inner(fx_g, args)
413
+ compile_mod(args)
414
+ sync()
415
+ except Exception as e:
416
+ if check_str is not None and check_str not in repr(e):
417
+ return False
418
+ print(repr(e))
419
+ return True
420
+ return False
421
+
422
+
423
+ def inductor_accuracy_fails(
424
+ fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False
425
+ ):
426
+ from torch._inductor.compile_fx import compile_fx_inner
427
+
428
+ return backend_aot_accuracy_fails(
429
+ fx_g,
430
+ args,
431
+ compile_fx_inner,
432
+ require_fp64=require_fp64,
433
+ ignore_non_fp=ignore_non_fp,
434
+ )
435
+
436
+
437
+ backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True)
438
+
439
+
440
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
441
+ # REPRO MAIN
442
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
443
+
444
+
445
+ def repro_common(options, mod, load_args):
446
+ # Invariant for graphs we generate with the repro script
447
+ assert not any(mod.named_parameters())
448
+ for n, b in mod.named_buffers():
449
+ if b.numel() > MAX_CONSTANT_NUMEL_INLINE:
450
+ log.warning(
451
+ "Constant %s was not serialized, generated random data instead. "
452
+ "If you think this is affecting you, please comment on "
453
+ "https://github.com/pytorch/pytorch/issues/100468",
454
+ n,
455
+ )
456
+
457
+ if not hasattr(load_args, "_version"):
458
+ log.warning(
459
+ "load_args does not have a _version attribute, please file a bug to PyTorch "
460
+ "and describe how you generate this repro script"
461
+ )
462
+ else:
463
+ if load_args._version > 0:
464
+ log.warning(
465
+ "load_args is version %s, but this version of PyTorch only supports "
466
+ "version 0. We will try to run it anyway but there may be an incompatibility; "
467
+ "if so, try upgrading your version of PyTorch.",
468
+ load_args._version,
469
+ )
470
+
471
+ nop_reader = NopInputReader()
472
+ load_args(nop_reader)
473
+
474
+ with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
475
+ input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
476
+ load_args(input_reader)
477
+ args = input_reader.args
478
+
479
+ # Turn mod into a GraphModule the slow way
480
+ # TODO: speed this up
481
+ mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args)
482
+
483
+ torch._inductor.config.generate_intermediate_hooks = True
484
+
485
+ return mod, args
486
+
487
+
488
+ ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = {
489
+ "": inductor_fails,
490
+ # This might look inverted but it's not. strict_accuracy means "we will
491
+ # minify any time we see anything that diverges", whereas accuracy is more
492
+ # conservative, and will only minify if there is a meaningful fp64
493
+ # divergence
494
+ "accuracy": functools.partial(
495
+ inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True
496
+ ),
497
+ "strict_accuracy": inductor_accuracy_fails,
498
+ }
499
+
500
+
501
+ def repro_minifier_query(options, mod, load_args):
502
+ mod, args = repro_common(options, mod, load_args)
503
+ fail_fn = functools.partial(
504
+ ACCURACY_FAILS[options.accuracy], check_str=options.check_str
505
+ )
506
+ if fail_fn(mod, args):
507
+ sys.exit(1)
508
+ else:
509
+ sys.exit(0)
510
+
511
+
512
+ def repro_minify(options, mod, load_args):
513
+ from functorch.compile import minifier
514
+
515
+ mod, args = repro_common(options, mod, load_args)
516
+ compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor"
517
+
518
+ favored_device = 1 if torch.cuda.device_count() >= 2 else 0
519
+ env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)}
520
+
521
+ module_fails: Any
522
+ if options.isolate:
523
+ module_fails = functools.partial(
524
+ isolate_fails,
525
+ env=env_variables,
526
+ compiler_name=compiler_name,
527
+ save_dir=options.save_dir,
528
+ accuracy=options.accuracy,
529
+ tracing_mode=options.tracing_mode,
530
+ )
531
+ else:
532
+ module_fails = ACCURACY_FAILS[options.accuracy]
533
+
534
+ minifier(
535
+ mod,
536
+ args,
537
+ module_fails=functools.partial(module_fails, check_str=options.check_str),
538
+ dump_state=functools.partial(
539
+ dump_compiler_graph_state, compiler_name=compiler_name
540
+ ),
541
+ save_dir=options.save_dir,
542
+ offload_to_disk=options.offload_to_disk,
543
+ skip_offload=options.skip_saving_eager_intermediates,
544
+ skip_sanity=options.skip_sanity,
545
+ max_granularity=options.max_granularity,
546
+ )
547
+
548
+
549
+ def repro_analyze(options, mod, load_args):
550
+ from torch._inductor.compile_fx import compile_fx_inner
551
+ from torch._inductor.hooks import intermediate_hook
552
+
553
+ mod, args = repro_common(options, mod, load_args)
554
+
555
+ # TODO: The logic for cloning inputs/models here is intentionally
556
+ # modeled off of run_fwd_maybe_bwd, but arguably it is better not to
557
+ # clone inputs (as you are doubling your effective GPU memory usage).
558
+ # It is certainly faster though! It probably makes sense to let the
559
+ # user specify the offload strategy.
560
+
561
+ with tqdm(desc="Compiling"):
562
+ compiled = compile_fx_inner(mod, args)
563
+ total = counters["inductor"]["intermediate_hooks"]
564
+
565
+ known_names = set()
566
+
567
+ def save_hook(name, val):
568
+ known_names.add(name)
569
+ if not options.skip_saving_inductor_intermediates:
570
+ writer.write_tensor(os.path.join("inductor", name), val)
571
+ pbar.update(1) # type: ignore[has-type]
572
+
573
+ writer = torch.utils._content_store.ContentStoreWriter(
574
+ options.save_dir, stable_hash=options.stable_hash
575
+ )
576
+ reader = torch.utils._content_store.ContentStoreReader(options.save_dir)
577
+
578
+ new_args = clone_inputs(args)
579
+ with intermediate_hook(save_hook), tqdm(
580
+ desc="Saving inductor intermediates", total=total
581
+ ) as pbar:
582
+ compiled(new_args)
583
+ assert not new_args
584
+
585
+ def compare_tuples(tuple1, tuple2):
586
+ diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]]
587
+ diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices]
588
+
589
+ if not diff_values:
590
+ return None
591
+ else:
592
+ return " and ".join(f"{a} != {b}" for a, b in diff_values)
593
+
594
+ def check_hook(name, val):
595
+ meta = writer.compute_tensor_metadata(val)
596
+ meta2 = reader.read_tensor_metadata(os.path.join("inductor", name))
597
+ reason = compare_tuples(meta, meta2)
598
+ if reason is not None:
599
+ pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})")
600
+ pbar.update(1)
601
+
602
+ if not options.skip_check_deterministic:
603
+ new_args = clone_inputs(args)
604
+ with intermediate_hook(check_hook), tqdm(
605
+ desc="Checking inductor determinism", total=total
606
+ ) as pbar:
607
+ compiled(new_args)
608
+ assert not new_args
609
+
610
+ class WriterInterp(fx.Interpreter):
611
+ def __init__(self, mod, subdir):
612
+ super().__init__(mod)
613
+ self.subdir = subdir
614
+
615
+ def run_node(self, n):
616
+ r = super().run_node(n)
617
+ name = n.name
618
+ if name in known_names:
619
+ pbar.update(1)
620
+ writer.write_tensor(os.path.join(self.subdir, name), r)
621
+ return r
622
+
623
+ # NB: the module cast doesn't actually do anything, since there are no
624
+ # parameters/buffers on the module
625
+ if not options.skip_saving_float64_intermediates:
626
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
627
+ with tqdm(desc="Saving float64 intermediates", total=total) as pbar:
628
+ WriterInterp(new_mod, "float64").boxed_run(new_args)
629
+ assert not new_args
630
+
631
+ class ExactReaderInterp(fx.Interpreter):
632
+ def run_node(self, n):
633
+ r = super().run_node(n)
634
+ name = n.name
635
+ if name in known_names:
636
+ meta = writer.compute_tensor_metadata(r)
637
+ meta2 = reader.read_tensor_metadata(os.path.join("float64", name))
638
+ reason = compare_tuples(meta, meta2)
639
+ if reason is not None:
640
+ pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})")
641
+ pbar.update(1)
642
+ return r
643
+
644
+ # TODO: check eager determinism
645
+
646
+ if not options.skip_check_deterministic:
647
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
648
+ with tqdm(desc="Checking float64 determinism", total=total) as pbar:
649
+ ExactReaderInterp(new_mod).boxed_run(new_args)
650
+ assert not new_args
651
+
652
+ # Now that we've saved everything, interp through the eager graph
653
+ # and do comparisons
654
+ class ReaderInterp(fx.Interpreter):
655
+ def run_node(self, n):
656
+ r = super().run_node(n)
657
+ name = n.name
658
+ if name in known_names:
659
+ inductor = reader.read_tensor(os.path.join("inductor", name))
660
+ float64 = reader.read_tensor(os.path.join("float64", name))
661
+ logged = False
662
+
663
+ def log_error(msg, *args):
664
+ nonlocal logged
665
+ logged = True
666
+ pbar.write(f"DIVERGED at {name}: {msg % args}")
667
+
668
+ if not same(
669
+ r,
670
+ inductor,
671
+ float64,
672
+ tol=torch._dynamo.config.repro_tolerance,
673
+ equal_nan=True,
674
+ log_error=log_error,
675
+ ):
676
+ assert logged
677
+ pbar.update(1)
678
+ return r
679
+
680
+ with tqdm(desc="Checking divergence", total=total) as pbar:
681
+ ReaderInterp(mod).boxed_run(args)
682
+ assert not args
683
+
684
+
685
+ def repro_run(options, mod, load_args):
686
+ from torch._inductor.compile_fx import compile_fx_inner
687
+
688
+ mod, args = repro_common(options, mod, load_args)
689
+
690
+ from torch.cuda import synchronize
691
+
692
+ compiled = compile_fx_inner(mod, args)
693
+
694
+ if options.accuracy != "":
695
+ # We don't really respect --accuracy vs --strict-accuracy here, it
696
+ # seems counterintuitive
697
+ if not same_two_models(mod, compiled, args, only_fwd=True):
698
+ raise AccuracyError("Bad accuracy detected")
699
+ else:
700
+ need_sync = False
701
+ for arg in args:
702
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
703
+ need_sync = True
704
+ break
705
+ ref = compiled(list(args))
706
+ if need_sync:
707
+ synchronize() # ensure segfaults are surfaced
708
+ return lambda: compiled(list(args))
709
+
710
+
711
+ # TODO: lazily load the inputs or something, rather than cloning them
712
+ def run_repro(
713
+ mod,
714
+ load_args,
715
+ *,
716
+ command="run",
717
+ accuracy: Union[bool, str] = "",
718
+ save_dir=None,
719
+ tracing_mode=None,
720
+ patch_code=None,
721
+ check_str=None,
722
+ **kwargs,
723
+ ):
724
+ for k in kwargs:
725
+ log.warning(
726
+ "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
727
+ k,
728
+ )
729
+
730
+ if accuracy is True:
731
+ accuracy = "accuracy"
732
+ elif accuracy is False:
733
+ accuracy = ""
734
+
735
+ if patch_code is not None:
736
+ log.warning(
737
+ "patch_code no longer works on this version of PyTorch, silently ignoring"
738
+ )
739
+
740
+ parser = argparse.ArgumentParser(
741
+ description=f"""\
742
+ An after_aot repro script, typically triggering a bug in PyTorch Inductor.
743
+ When run with no arguments, this script defaults to running '{command}'.
744
+ Extra flags may be available; to find out more, try '{command} --help'.
745
+ There are also alternate subcommands available, see below.
746
+
747
+ default settings on this script:
748
+ {accuracy=}
749
+ {tracing_mode=}
750
+ {save_dir=}
751
+ {check_str=}
752
+ """,
753
+ formatter_class=argparse.RawTextHelpFormatter,
754
+ )
755
+
756
+ def common_flags(parser):
757
+ accuracy_group = parser.add_mutually_exclusive_group()
758
+ accuracy_group.add_argument(
759
+ "--no-accuracy",
760
+ dest="accuracy",
761
+ action="store_const",
762
+ const="",
763
+ default=accuracy,
764
+ help="do not test accuracy, just run the module and see if it errors",
765
+ )
766
+ accuracy_group.add_argument(
767
+ "--accuracy",
768
+ action="store_const",
769
+ const="accuracy",
770
+ default=accuracy,
771
+ help="""\
772
+ test if the RMSE between the compiled module and the fp64 reference is greater
773
+ than eager and the fp64 reference. This is usually more reliable than the
774
+ standard allclose test, as we expect numeric differences from compiling, often
775
+ improving accuracy over eager. RMSE test allows for compiled module to
776
+ diverge greatly from eager, as long as this divergence moves it closer to the
777
+ 'true' mathematical value of the network. Caveats: (1) double precision can
778
+ still suffer from rounding error, so it is not a perfect reference (see for
779
+ example 'Herbie: Automatically Improving Floating Point Accuracy') for
780
+ approaches that detect the necessary working precision and compute it in
781
+ arbitrary precision floating point; unfortunately, this is not practical for
782
+ tensor computation; (2) if there are not enough samples in the output being
783
+ compared, we may get unlucky and have an unlucky greater RMSE than eager; this
784
+ could be overcome by applying a more rigorous statistical test at some
785
+ p-value, which we leave for future work.
786
+ """,
787
+ )
788
+ accuracy_group.add_argument(
789
+ "--strict-accuracy",
790
+ dest="accuracy",
791
+ action="store_const",
792
+ const="strict_accuracy",
793
+ default=accuracy,
794
+ help="""\
795
+ by default, when doing accuracy minification we will reject reductions which
796
+ change the divergence from a floating point divergence to a integral/boolean
797
+ divergence. This is because some operations like ReLU involve temporarily
798
+ sharp boundaries that smooth out again afterwards; without requiring
799
+ divergence on floating point, the minifier will often fixate on divergent
800
+ boolean tensor even though this is not the true source of the divergence.
801
+ However, rejecting these reductions makes it more difficult for the minifier
802
+ to make process. Using this option will let the minifier progress for ALL
803
+ divergences--you just might not end up with a useful repro in the end.""",
804
+ )
805
+
806
+ parser.add_argument(
807
+ "--save-dir",
808
+ type=str,
809
+ default=save_dir,
810
+ metavar="DIR",
811
+ help="directory where saved inputs live",
812
+ )
813
+ parser.add_argument(
814
+ "--no-save-dir",
815
+ dest="save_dir",
816
+ action="store_const",
817
+ const=None,
818
+ help="don't use any directory for saved inputs",
819
+ )
820
+ parser.add_argument(
821
+ "--tracing-mode",
822
+ type=str,
823
+ metavar="{real,fake,symbolic}",
824
+ default=tracing_mode,
825
+ help="how to trace the repro module into a GraphModule with metadata",
826
+ )
827
+
828
+ subparsers = parser.add_subparsers(
829
+ dest="command", metavar="{run,minify,analyze}", required=True
830
+ )
831
+
832
+ parser_run = subparsers.add_parser(
833
+ "run",
834
+ help="just run the repro",
835
+ )
836
+ common_flags(parser_run)
837
+
838
+ parser_minify = subparsers.add_parser(
839
+ "minify", help="run the minifier on the repro"
840
+ )
841
+ common_flags(parser_minify)
842
+ parser_minify_isolate = parser_minify.add_mutually_exclusive_group()
843
+ parser_minify_isolate.add_argument(
844
+ "--isolate",
845
+ action="store_true",
846
+ default=True,
847
+ help="run in separate processes to avoid interference (default)",
848
+ )
849
+ parser_minify_isolate.add_argument(
850
+ "--no-isolate",
851
+ dest="isolate",
852
+ action="store_false",
853
+ help="speed up by running all compilation in same process",
854
+ )
855
+ parser_minify.add_argument(
856
+ "--skip-saving-eager-intermediates",
857
+ action="store_true",
858
+ help="skip saving eager intermediates on --minify",
859
+ )
860
+ # TODO: make this an option for --analyze too
861
+ parser_minify.add_argument(
862
+ "--offload-to-disk",
863
+ action="store_true",
864
+ help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing",
865
+ )
866
+ parser_minify.add_argument(
867
+ "--skip-sanity",
868
+ action="store_true",
869
+ help="skip sanity check at beginning of minification on original graph",
870
+ )
871
+ parser_minify.add_argument(
872
+ "--max-granularity",
873
+ type=int,
874
+ default=None,
875
+ help="start at this granularity and work down; must be power of 2",
876
+ )
877
+ parser_minify.add_argument(
878
+ "--check-str",
879
+ type=str,
880
+ default=check_str,
881
+ help="require minified program to fail with error containing this string",
882
+ )
883
+
884
+ parser_analyze = subparsers.add_parser(
885
+ "analyze", help="run the accuracy analyzer on the repro"
886
+ )
887
+ common_flags(parser_analyze)
888
+ parser_analyze.add_argument(
889
+ "--skip-saving-inductor-intermediates",
890
+ action="store_true",
891
+ help="skip saving inductor intermediates on --analyze",
892
+ )
893
+ parser_analyze.add_argument(
894
+ "--skip-saving-float64-intermediates",
895
+ action="store_true",
896
+ help="skip saving float64 intermediates",
897
+ )
898
+ parser_analyze.add_argument(
899
+ "--skip-check-deterministic",
900
+ action="store_true",
901
+ help="skip checking that the network is deterministic",
902
+ )
903
+ parser_analyze.add_argument(
904
+ "--stable-hash",
905
+ action="store_true",
906
+ help="use SHA-1 checksum instead of fast (but possibly unsound) hash",
907
+ )
908
+
909
+ # Run the repro in the context of minification, inverting exit code meaning
910
+ parser_minifier_query = subparsers.add_parser(
911
+ "minifier-query",
912
+ )
913
+ common_flags(parser_minifier_query)
914
+ parser_minifier_query.add_argument(
915
+ "--check-str",
916
+ type=str,
917
+ default=check_str,
918
+ help="require minified program to fail with error containing this string",
919
+ )
920
+
921
+ args = None
922
+ if len(sys.argv) <= 1:
923
+ args = [command, *sys.argv[1:]]
924
+
925
+ options = parser.parse_args(args)
926
+ COMMAND_FNS = {
927
+ "minify": repro_minify,
928
+ "analyze": repro_analyze,
929
+ "minifier-query": repro_minifier_query,
930
+ "run": repro_run,
931
+ }
932
+ return COMMAND_FNS[options.command](options, mod, load_args)
venv/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import functools
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import sys
8
+ import textwrap
9
+ from importlib import import_module
10
+ from typing import Union
11
+
12
+ import torch
13
+ import torch.fx as fx
14
+
15
+ from torch._dynamo.debug_utils import (
16
+ AccuracyError,
17
+ backend_accuracy_fails,
18
+ BUCK_CMD_PREFIX,
19
+ BuckTargetWriter,
20
+ extra_imports,
21
+ generate_config_string,
22
+ helper_for_dump_minify,
23
+ InputReader,
24
+ InputWriter,
25
+ minifier_dir,
26
+ NNModuleToString,
27
+ NopInputReader,
28
+ run_fwd_maybe_bwd,
29
+ same_two_models,
30
+ )
31
+ from torch.fx.experimental.symbolic_shapes import fx_placeholder_targets
32
+ from torch.hub import tqdm
33
+
34
+ from .. import config
35
+ from ..backends.registry import lookup_backend, register_debug_backend
36
+ from ..debug_utils import clone_inputs_retaining_gradness
37
+
38
+ log = logging.getLogger(__name__)
39
+
40
+
41
+ inductor_config = import_module("torch._inductor.config")
42
+ use_buck = inductor_config.is_fbcode()
43
+
44
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
45
+ # MAIN ENTRY POINT
46
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
47
+
48
+
49
+ def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):
50
+ """
51
+ A minifier decorator that wraps the TorchDynamo produced Fx graph modules.
52
+ As opposed to wrap_compiler_debug, this wrapper intercepts at the
53
+ TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some
54
+ level, e.g., it is useful for minifying issues related to Aot Autograd
55
+ tracing. If an error is found, we minify and save the minified repro in
56
+ repro.tar.gz.
57
+ """
58
+
59
+ @functools.wraps(unconfigured_compiler_fn)
60
+ def debug_wrapper(gm, example_inputs, **kwargs):
61
+ compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
62
+ assert config.repro_after in ("dynamo", "aot", None)
63
+
64
+ if config.repro_after == "dynamo":
65
+
66
+ def add_paths(exc):
67
+ exc.minifier_path = os.path.join(minifier_dir(), "minifier_launcher.py")
68
+ if use_buck:
69
+ exc.buck_command = " ".join(
70
+ BUCK_CMD_PREFIX
71
+ + [BuckTargetWriter(exc.minifier_path).cmd_line_path]
72
+ )
73
+
74
+ if config.repro_level == 3:
75
+ dump_to_minify_after_dynamo(gm, example_inputs, compiler_name)
76
+
77
+ # Check for either accuracy (level 4) or other type of failures.
78
+ if config.repro_level == 4:
79
+ # Check Accuracy
80
+ compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
81
+ if backend_accuracy_fails(gm, example_inputs, compiler_fn):
82
+ log.warning(
83
+ "Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error."
84
+ )
85
+ dump_to_minify_after_dynamo(
86
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)),
87
+ example_inputs,
88
+ compiler_name,
89
+ )
90
+ exc = AccuracyError("Bad accuracy detected.")
91
+ add_paths(exc)
92
+ raise exc
93
+ else:
94
+ try:
95
+ compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
96
+ run_fwd_maybe_bwd(compiled_gm, example_inputs)
97
+ except Exception as exc:
98
+ log.warning(
99
+ "Compiled Fx GraphModule failed. Creating script to minify the error."
100
+ )
101
+ if config.repro_level == 1:
102
+ dump_state_fn = functools.partial(
103
+ dump_backend_state, compiler_name=compiler_name
104
+ )
105
+ dump_state_fn(
106
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs
107
+ )
108
+ elif config.repro_level == 2:
109
+ dump_to_minify_after_dynamo(
110
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)),
111
+ example_inputs,
112
+ compiler_name,
113
+ )
114
+ add_paths(exc)
115
+ raise
116
+ else:
117
+ compiled_gm = compiler_fn(gm, example_inputs)
118
+
119
+ return compiled_gm
120
+
121
+ debug_wrapper._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined]
122
+ if hasattr(unconfigured_compiler_fn, "compiler_name"):
123
+ debug_wrapper.__name__ = unconfigured_compiler_fn.compiler_name
124
+ if hasattr(unconfigured_compiler_fn, "get_compiler_config"):
125
+ debug_wrapper.get_compiler_config = unconfigured_compiler_fn.get_compiler_config # type: ignore[attr-defined]
126
+ return debug_wrapper
127
+
128
+
129
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
130
+ # REPRO DUMPERS
131
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
132
+
133
+
134
+ def generate_dynamo_fx_repro_string(
135
+ gm,
136
+ args,
137
+ compiler_name,
138
+ check_accuracy=False,
139
+ *,
140
+ stable_output=False,
141
+ save_dir=None,
142
+ command="run",
143
+ ):
144
+ """
145
+ Generate a repro string for backend-agnostic minified version.
146
+ """
147
+
148
+ model_str = NNModuleToString.convert(gm)
149
+
150
+ # TODO: Figure out why torch.compile'd hash isn't work on this codepath
151
+ writer = InputWriter(save_dir, stable_hash=True)
152
+ for placeholder, arg in zip(fx_placeholder_targets(gm), args):
153
+ if isinstance(arg, (int, torch.SymInt)):
154
+ writer.symint(placeholder, arg)
155
+ elif isinstance(arg, torch.Tensor):
156
+ # TODO: improve these names with FQN
157
+ writer.tensor(placeholder, arg)
158
+ else:
159
+ raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
160
+ load_args = "\n".join(writer.lines())
161
+
162
+ return textwrap.dedent(
163
+ f"""
164
+ from math import inf
165
+ import torch
166
+ from torch import tensor, device
167
+ import torch.fx as fx
168
+ import torch._dynamo
169
+ from torch._dynamo.testing import rand_strided
170
+ from torch._dynamo.debug_utils import run_fwd_maybe_bwd
171
+
172
+ {generate_config_string(stable_output=stable_output)}
173
+
174
+ {extra_imports}
175
+
176
+ {model_str}
177
+ mod = Repro()
178
+
179
+ {load_args}
180
+
181
+ if __name__ == '__main__':
182
+ from torch._dynamo.repro.after_dynamo import run_repro
183
+ run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},
184
+ save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})
185
+ """
186
+ )
187
+
188
+
189
+ def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):
190
+ """
191
+ Saves the repro to a repro.py file
192
+ """
193
+ curdir = os.getcwd()
194
+ subdir = os.path.join(os.getcwd(), "checkpoints")
195
+ if not os.path.exists(subdir):
196
+ os.makedirs(subdir, exist_ok=True)
197
+ file_name = os.path.join(subdir, f"minified_{len(gm.graph.nodes)}_nodes.py")
198
+ log.warning(
199
+ "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
200
+ )
201
+
202
+ with open(file_name, "w") as fd:
203
+ fd.write(
204
+ generate_dynamo_fx_repro_string(
205
+ gm, args, compiler_name, check_accuracy, save_dir=subdir
206
+ )
207
+ )
208
+ latest_repro = os.path.join(curdir, "repro.py")
209
+ log.warning("Copying %s to %s for convenience", file_name, latest_repro)
210
+
211
+ if use_buck:
212
+ BuckTargetWriter(latest_repro).write()
213
+
214
+ shutil.copyfile(file_name, latest_repro)
215
+
216
+
217
+ def dump_backend_state(gm, args, compiler_name, check_accuracy=False):
218
+ """
219
+ Dumps the dynamo graph to repro the issue.
220
+ 1) It tries to convert Fx GraphModule to a string. If we can, it writes to a
221
+ repro.py file.
222
+ 2) If we can't convert Fx GraphModule to a string, we use to_folder to save
223
+ the module and save a tar file.
224
+ """
225
+ assert NNModuleToString.can_convert_to_string(gm)
226
+ return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)
227
+ # return dump_backend_repro_as_tarfile(gm, args, compiler_name)
228
+
229
+
230
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
231
+ # MINIFIER DUMPER
232
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
233
+
234
+
235
+ def dump_to_minify_after_dynamo(gm, args, compiler_name):
236
+ # TODO: factor this out
237
+ subdir = os.path.join(minifier_dir(), "checkpoints")
238
+ if not os.path.exists(subdir):
239
+ os.makedirs(subdir, exist_ok=True)
240
+ helper_for_dump_minify(
241
+ generate_dynamo_fx_repro_string(
242
+ gm,
243
+ args,
244
+ compiler_name,
245
+ check_accuracy=config.repro_level == 4,
246
+ save_dir=subdir,
247
+ command="minify",
248
+ )
249
+ )
250
+
251
+
252
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
253
+ # MINIFIER BACKENDS
254
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
255
+
256
+
257
+ @register_debug_backend
258
+ def dynamo_minifier_backend(gm, example_inputs, compiler_name):
259
+ from functorch.compile import minifier
260
+
261
+ compiler_fn = lookup_backend(compiler_name)
262
+
263
+ # TODO: It's inconsistent to pass SymInt inputs but REAL tensors.
264
+ # We should pass ints and look at the GraphModule placeholders
265
+ # to resolve them to SymInt (if necessary)
266
+ example_inputs = [
267
+ i.node.hint if isinstance(i, torch.SymInt) else i for i in example_inputs
268
+ ]
269
+
270
+ try:
271
+ compiled_gm = compiler_fn(gm, example_inputs)
272
+ run_fwd_maybe_bwd(compiled_gm, example_inputs)
273
+ raise ValueError("No issue was detected")
274
+ except Exception as exc:
275
+ orig_failure = str(exc)
276
+ log.warning(
277
+ "Compiled Fx GraphModule failed. Creating script to minify the error."
278
+ )
279
+ dump_state_fn = functools.partial(
280
+ dump_backend_state, compiler_name=compiler_name
281
+ )
282
+ dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
283
+ fails_fn = functools.partial(
284
+ backend_fails,
285
+ compiler_fn=compiler_fn,
286
+ orig_failure=orig_failure,
287
+ )
288
+ minifier(
289
+ gm,
290
+ example_inputs,
291
+ module_fails=fails_fn,
292
+ dump_state=dump_state_fn,
293
+ )
294
+ return gm
295
+
296
+
297
+ @register_debug_backend
298
+ def dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name):
299
+ from functorch.compile import minifier
300
+
301
+ compiler_fn = lookup_backend(compiler_name)
302
+
303
+ # Set the eval mode to remove randomness.
304
+ gm.eval()
305
+
306
+ # Check Accuracy
307
+ if backend_accuracy_fails(
308
+ gm, example_inputs, compiler_fn, only_fwd=config.repro_forward_only
309
+ ):
310
+ log.warning("Accuracy failed for the TorchDynamo produced graph")
311
+ dump_state_fn = functools.partial(
312
+ dump_backend_state, compiler_name=compiler_name, check_accuracy=True
313
+ )
314
+ fails_fn = functools.partial(
315
+ backend_accuracy_fails,
316
+ compiler_fn=compiler_fn,
317
+ only_fwd=config.repro_forward_only,
318
+ )
319
+ dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
320
+ minifier(
321
+ gm,
322
+ example_inputs,
323
+ module_fails=fails_fn,
324
+ dump_state=dump_state_fn,
325
+ )
326
+ else:
327
+ log.error("Input graph does not fail accuracy testing")
328
+ return gm
329
+
330
+
331
+ def backend_fails(gm, example_inputs, compiler_fn, orig_failure):
332
+ """
333
+ Minifier uses this function to identify if the minified graph module fails
334
+ with the same error.
335
+
336
+ One caveat is that minifier can potentially go into a wrong direction when
337
+ the resulting graph module fails for a different reason. To avoid this, we
338
+ save the string for the original exception and check similarity between new
339
+ and old exception. They can be somewhat different in some cases, when the
340
+ exception string depends on the failing node information. So, we have a
341
+ loose similarity metric to guide the minifier path.
342
+ """
343
+ from difflib import SequenceMatcher
344
+
345
+ try:
346
+ # Run the original gm to check eager validity
347
+ run_fwd_maybe_bwd(gm, clone_inputs_retaining_gradness(example_inputs))
348
+ compiled_gm = compiler_fn(gm, example_inputs)
349
+ run_fwd_maybe_bwd(compiled_gm, clone_inputs_retaining_gradness(example_inputs))
350
+ return False
351
+ except Exception as e:
352
+ new_failure = str(e)
353
+ if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5:
354
+ return True
355
+ return False
356
+
357
+
358
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
359
+ # REPRO MAIN
360
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
361
+
362
+
363
+ def run_load_args(options, mod, load_args):
364
+ if not hasattr(load_args, "_version"):
365
+ log.warning(
366
+ "load_args does not have a _version attribute, please file a bug to PyTorch "
367
+ "and describe how you generate this repro script"
368
+ )
369
+ else:
370
+ if load_args._version > 0:
371
+ log.warning(
372
+ "load_args is version %s, but this version of PyTorch only supports "
373
+ "version 0. We will try to run it anyway but there may be an incompatibility; "
374
+ "if so, try upgrading your version of PyTorch.",
375
+ load_args._version,
376
+ )
377
+
378
+ nop_reader = NopInputReader()
379
+ load_args(nop_reader)
380
+
381
+ with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
382
+ input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
383
+ load_args(input_reader)
384
+ args = input_reader.args
385
+
386
+ return args
387
+
388
+
389
+ def repro_minify(options, mod, load_args):
390
+ args = run_load_args(options, mod, load_args)
391
+
392
+ # Setup debug minifier compiler
393
+ if not options.accuracy:
394
+ compiler_fn = lookup_backend("dynamo_minifier_backend")
395
+ else:
396
+ compiler_fn = lookup_backend("dynamo_accuracy_minifier_backend")
397
+
398
+ if options.backend is None:
399
+ raise RuntimeError(
400
+ "Compiler name is None - this likely means that a custom compiler "
401
+ "was called by torchdynamo. Please remove this error, import your "
402
+ "custom compiler function, and replace the backend=None "
403
+ "line in run_repro to backend=<my_imported_custom_function>"
404
+ )
405
+
406
+ dynamo_minifier_backend = functools.partial(
407
+ compiler_fn,
408
+ compiler_name=options.backend,
409
+ )
410
+ opt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod)
411
+
412
+ with torch.cuda.amp.autocast(enabled=options.autocast):
413
+ opt_mod(*args)
414
+
415
+
416
+ def repro_run(options, mod, load_args):
417
+ opt_mod = torch._dynamo.optimize(options.backend)(mod)
418
+
419
+ if options.accuracy != "":
420
+ mod.eval()
421
+ opt_mod.eval()
422
+
423
+ with torch.cuda.amp.autocast(enabled=options.autocast):
424
+ # TODO: disable clone
425
+ args = run_load_args(options, mod, load_args)
426
+ assert same_two_models(mod, mod, args), "Eager itself failed"
427
+ if not same_two_models(mod, opt_mod, args):
428
+ raise AccuracyError("Dynamo failed")
429
+ else:
430
+ with torch.cuda.amp.autocast(enabled=options.autocast):
431
+ args = run_load_args(options, mod, load_args)
432
+ ref = run_fwd_maybe_bwd(
433
+ mod, args, only_fwd=options.only_fwd, disable_clone=True
434
+ )
435
+ del args
436
+
437
+ args = run_load_args(options, mod, load_args)
438
+ res = run_fwd_maybe_bwd(
439
+ opt_mod, args, only_fwd=options.only_fwd, disable_clone=True
440
+ )
441
+
442
+
443
+ def run_repro(
444
+ mod,
445
+ load_args,
446
+ *,
447
+ command="run",
448
+ accuracy: Union[bool, str] = "",
449
+ save_dir=None,
450
+ autocast=False,
451
+ backend="inductor",
452
+ **kwargs,
453
+ ):
454
+ for k in kwargs:
455
+ log.warning(
456
+ "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
457
+ k,
458
+ )
459
+
460
+ if accuracy is True:
461
+ accuracy = "accuracy"
462
+ elif accuracy is False:
463
+ accuracy = ""
464
+
465
+ parser = argparse.ArgumentParser(
466
+ description=f"""\
467
+ An after_dynamo repro script, typically triggering a bug in Dynamo or
468
+ AOTAutograd. When run with no arguments, this script defaults to running
469
+ '{command}'. Extra flags may be available; to find out more, try '{command}
470
+ --help'. There are also alternate subcommands available, see below.
471
+
472
+ default settings on this script:
473
+ {accuracy=}
474
+ {save_dir=}
475
+ """,
476
+ formatter_class=argparse.RawTextHelpFormatter,
477
+ )
478
+
479
+ def common_flags(parser):
480
+ accuracy_group = parser.add_mutually_exclusive_group()
481
+ accuracy_group.add_argument(
482
+ "--no-accuracy",
483
+ dest="accuracy",
484
+ action="store_const",
485
+ const="",
486
+ default=accuracy,
487
+ help="do not test accuracy, just run the module and see if it errors",
488
+ )
489
+ accuracy_group.add_argument(
490
+ "--accuracy",
491
+ action="store_const",
492
+ const="accuracy",
493
+ default=accuracy,
494
+ help="test accuracy",
495
+ )
496
+ parser.add_argument(
497
+ "--save-dir",
498
+ type=str,
499
+ default=save_dir,
500
+ metavar="DIR",
501
+ help="directory where saved inputs live",
502
+ )
503
+ parser.add_argument(
504
+ "--no-save-dir",
505
+ dest="save_dir",
506
+ action="store_const",
507
+ const=None,
508
+ help="don't use any directory for saved inputs",
509
+ )
510
+ parser.add_argument(
511
+ "--no-isolate",
512
+ dest="isolate",
513
+ action="store_false",
514
+ default=False,
515
+ help="no isolate (doesn't do anything for after_dynamo)",
516
+ )
517
+ parser.add_argument(
518
+ "--autocast",
519
+ default=autocast,
520
+ action="store_true",
521
+ help="use torch.cuda.amp.autocast",
522
+ )
523
+ parser.add_argument(
524
+ "--no-autocast",
525
+ dest="autocast",
526
+ action="store_false",
527
+ help="don't use torch.cuda.amp.autocast",
528
+ )
529
+ parser.add_argument(
530
+ "--backend",
531
+ type=str,
532
+ default=backend,
533
+ metavar="BACKEND",
534
+ help="torch.compile backend to use",
535
+ )
536
+
537
+ subparsers = parser.add_subparsers(
538
+ dest="command", metavar="{run,minify}", required=True
539
+ )
540
+
541
+ parser_run = subparsers.add_parser(
542
+ "run",
543
+ help="just run the repro",
544
+ )
545
+ common_flags(parser_run)
546
+ parser_run.add_argument(
547
+ "--only-fwd",
548
+ action="store_true",
549
+ help="don't run backwards compilation for testing",
550
+ )
551
+
552
+ parser_minify = subparsers.add_parser(
553
+ "minify", help="run the minifier on the repro"
554
+ )
555
+ common_flags(parser_minify)
556
+
557
+ args = None
558
+ if len(sys.argv) <= 1:
559
+ args = [command, *sys.argv[1:]]
560
+
561
+ options = parser.parse_args(args)
562
+ COMMAND_FNS = {
563
+ "minify": repro_minify,
564
+ "run": repro_run,
565
+ }
566
+ COMMAND_FNS[options.command](options, mod, load_args)
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from .base import VariableTracker
4
+ from .builtin import BuiltinVariable
5
+ from .constant import ConstantVariable, EnumVariable
6
+ from .ctx_manager import (
7
+ ContextWrappingVariable,
8
+ DeterministicAlgorithmsVariable,
9
+ DisabledSavedTensorsHooksVariable,
10
+ GradIncrementNestingCtxManagerVariable,
11
+ GradInplaceRequiresGradCtxManagerVariable,
12
+ GradModeVariable,
13
+ InferenceModeVariable,
14
+ StreamContextVariable,
15
+ StreamVariable,
16
+ VmapIncrementNestingCtxManagerVariable,
17
+ WithExitFunctionVariable,
18
+ )
19
+ from .dicts import (
20
+ ConstDictVariable,
21
+ CustomizedDictVariable,
22
+ DataClassVariable,
23
+ DefaultDictVariable,
24
+ SetVariable,
25
+ )
26
+ from .distributed import BackwardHookVariable
27
+ from .functions import (
28
+ FunctoolsPartialVariable,
29
+ NestedUserFunctionVariable,
30
+ SkipFunctionVariable,
31
+ UserFunctionVariable,
32
+ UserMethodVariable,
33
+ )
34
+ from .higher_order_ops import (
35
+ FunctorchHigherOrderVariable,
36
+ TorchHigherOrderOperatorVariable,
37
+ )
38
+ from .iter import (
39
+ CountIteratorVariable,
40
+ CycleIteratorVariable,
41
+ IteratorVariable,
42
+ ItertoolsVariable,
43
+ RepeatIteratorVariable,
44
+ )
45
+ from .lazy import LazyVariableTracker
46
+ from .lists import (
47
+ BaseListVariable,
48
+ ListIteratorVariable,
49
+ ListVariable,
50
+ NamedTupleVariable,
51
+ RangeVariable,
52
+ RestrictedListSubclassVariable,
53
+ SliceVariable,
54
+ TupleIteratorVariable,
55
+ TupleVariable,
56
+ )
57
+ from .misc import (
58
+ AutogradFunctionContextVariable,
59
+ AutogradFunctionVariable,
60
+ ClosureVariable,
61
+ DeletedVariable,
62
+ GetAttrVariable,
63
+ InspectSignatureVariable,
64
+ LambdaVariable,
65
+ MethodWrapperVariable,
66
+ NewCellVariable,
67
+ NewGlobalVariable,
68
+ NumpyVariable,
69
+ PythonModuleVariable,
70
+ StringFormatVariable,
71
+ SuperVariable,
72
+ TypingVariable,
73
+ UnknownVariable,
74
+ )
75
+ from .nn_module import NNModuleVariable, UnspecializedNNModuleVariable
76
+ from .sdpa import SDPAParamsVariable
77
+ from .tensor import (
78
+ FakeItemVariable,
79
+ NumpyNdarrayVariable,
80
+ SymNodeVariable,
81
+ TensorVariable,
82
+ UnspecializedPythonVariable,
83
+ UntypedStorageVariable,
84
+ )
85
+ from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable
86
+ from .user_defined import (
87
+ RemovableHandleVariable,
88
+ UserDefinedClassVariable,
89
+ UserDefinedObjectVariable,
90
+ )
91
+
92
+ __all__ = [
93
+ "AutogradFunctionContextVariable",
94
+ "AutogradFunctionVariable",
95
+ "BackwardHookVariable",
96
+ "BaseListVariable",
97
+ "BuiltinVariable",
98
+ "ClosureVariable",
99
+ "ConstantVariable",
100
+ "ConstDictVariable",
101
+ "ContextWrappingVariable",
102
+ "CountIteratorVariable",
103
+ "CustomizedDictVariable",
104
+ "CycleIteratorVariable",
105
+ "DataClassVariable",
106
+ "DefaultDictVariable",
107
+ "DeletedVariable",
108
+ "DeterministicAlgorithmsVariable",
109
+ "EnumVariable",
110
+ "FakeItemVariable",
111
+ "GetAttrVariable",
112
+ "GradModeVariable",
113
+ "InspectSignatureVariable",
114
+ "IteratorVariable",
115
+ "ItertoolsVariable",
116
+ "LambdaVariable",
117
+ "LazyVariableTracker",
118
+ "ListIteratorVariable",
119
+ "ListVariable",
120
+ "NamedTupleVariable",
121
+ "NestedUserFunctionVariable",
122
+ "NewCellVariable",
123
+ "NewGlobalVariable",
124
+ "NNModuleVariable",
125
+ "NumpyNdarrayVariable",
126
+ "NumpyVariable",
127
+ "PythonModuleVariable",
128
+ "RangeVariable",
129
+ "RemovableHandleVariable",
130
+ "RepeatIteratorVariable",
131
+ "RestrictedListSubclassVariable",
132
+ "SDPAParamsVariable",
133
+ "SkipFunctionVariable",
134
+ "SliceVariable",
135
+ "StringFormatVariable",
136
+ "SuperVariable",
137
+ "TensorVariable",
138
+ "TorchCtxManagerClassVariable",
139
+ "TorchInGraphFunctionVariable",
140
+ "TupleVariable",
141
+ "UnknownVariable",
142
+ "UnspecializedNNModuleVariable",
143
+ "UnspecializedPythonVariable",
144
+ "UntypedStorageVariable",
145
+ "UserDefinedClassVariable",
146
+ "UserDefinedObjectVariable",
147
+ "UserFunctionVariable",
148
+ "UserMethodVariable",
149
+ "VariableTracker",
150
+ "WithExitFunctionVariable",
151
+ ]
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc ADDED
Binary file (43.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc ADDED
Binary file (7.87 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc ADDED
Binary file (26.9 kB). View file