applied-ai-018 commited on
Commit
b0f08e2
·
verified ·
1 Parent(s): 6e3288a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/__init__.py +96 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/common.py +112 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py +239 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py +289 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py +612 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py +16 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py +37 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py +115 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py +14 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py +75 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py +172 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py +250 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/callback.py +82 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py +802 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/decorators.py +347 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py +1561 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/hooks.py +12 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py +126 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/replay_record.py +110 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py +932 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py +566 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/source.py +545 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/test_case.py +78 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/types.py +99 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py +151 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import convert_frame, eval_frame, resume_execution
3
+ from .backends.registry import list_backends, lookup_backend, register_backend
4
+ from .callback import callback_handler, on_compile_end, on_compile_start
5
+ from .code_context import code_context
6
+ from .convert_frame import replay
7
+ from .decorators import (
8
+ allow_in_graph,
9
+ assume_constant_result,
10
+ disable,
11
+ disallow_in_graph,
12
+ forbid_in_graph,
13
+ graph_break,
14
+ mark_dynamic,
15
+ mark_static,
16
+ mark_static_address,
17
+ maybe_mark_dynamic,
18
+ run,
19
+ )
20
+ from .eval_frame import (
21
+ _reset_guarded_backend_cache,
22
+ explain,
23
+ export,
24
+ is_dynamo_supported,
25
+ is_inductor_supported,
26
+ optimize,
27
+ optimize_assert,
28
+ OptimizedModule,
29
+ reset_code,
30
+ )
31
+ from .external_utils import is_compiling
32
+ from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count
33
+
34
+ __all__ = [
35
+ "allow_in_graph",
36
+ "assume_constant_result",
37
+ "disallow_in_graph",
38
+ "forbid_in_graph",
39
+ "graph_break",
40
+ "mark_dynamic",
41
+ "maybe_mark_dynamic",
42
+ "mark_static",
43
+ "mark_static_address",
44
+ "optimize",
45
+ "optimize_assert",
46
+ "export",
47
+ "explain",
48
+ "run",
49
+ "replay",
50
+ "disable",
51
+ "reset",
52
+ "OptimizedModule",
53
+ "is_compiling",
54
+ "register_backend",
55
+ "list_backends",
56
+ "lookup_backend",
57
+ ]
58
+
59
+ if torch.manual_seed is torch.random.manual_seed:
60
+ import torch.jit._builtins
61
+
62
+ # Wrap manual_seed with the disable decorator.
63
+ # Can't do it at its implementation due to dependency issues.
64
+ torch.manual_seed = disable(torch.manual_seed)
65
+ # Add the new manual_seed to the builtin registry.
66
+ torch.jit._builtins._register_builtin(torch.manual_seed, "aten::manual_seed")
67
+
68
+
69
+ def reset() -> None:
70
+ """Clear all compile caches and restore initial state"""
71
+ with convert_frame.compile_lock:
72
+ reset_code_caches()
73
+ convert_frame.input_codes.clear()
74
+ convert_frame.output_codes.clear()
75
+ orig_code_map.clear()
76
+ guard_failures.clear()
77
+ graph_break_reasons.clear()
78
+ resume_execution.ContinueExecutionCache.cache.clear()
79
+ _reset_guarded_backend_cache()
80
+ reset_frame_count()
81
+ torch._C._dynamo.compiled_autograd.clear_cache()
82
+ convert_frame.FRAME_COUNTER = 0
83
+ convert_frame.FRAME_COMPILE_COUNTER.clear()
84
+ callback_handler.clear()
85
+
86
+
87
+ def reset_code_caches() -> None:
88
+ """Clear compile caches that are keyed by code objects"""
89
+ with convert_frame.compile_lock:
90
+ for weak_code in (
91
+ convert_frame.input_codes.seen + convert_frame.output_codes.seen
92
+ ):
93
+ code = weak_code()
94
+ if code:
95
+ reset_code(code)
96
+ code_context.clear()
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc ADDED
Binary file (560 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc ADDED
Binary file (271 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc ADDED
Binary file (5.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/common.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import functools
5
+ import logging
6
+ from unittest.mock import patch
7
+
8
+ import torch
9
+ from torch._dynamo import disable
10
+ from torch._dynamo.utils import counters, defake
11
+ from torch._functorch.aot_autograd import aot_module_simplified
12
+ from torch.utils._python_dispatch import _disable_current_modes
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ def aot_autograd(**kwargs):
18
+ def compiler_fn(gm: torch.fx.GraphModule, example_inputs):
19
+ # Hack to get around circular import problems with aot_eager_decomp_partition
20
+ if callable(kwargs.get("decompositions")):
21
+ kwargs["decompositions"] = kwargs["decompositions"]()
22
+
23
+ # NB: dont delete counter increment
24
+ counters["aot_autograd"]["total"] += 1
25
+ use_fallback = False
26
+
27
+ if use_fallback:
28
+ log.debug("Unable to use AOT Autograd because graph has mutation")
29
+ counters["aot_autograd"]["not_ok"] += 1
30
+ return gm
31
+
32
+ # OK attempt to compile
33
+
34
+ def _wrapped_bw_compiler(*args, **kwargs):
35
+ # stop TorchDynamo from trying to compile our generated backwards pass
36
+ return disable(disable(bw_compiler)(*args, **kwargs))
37
+
38
+ bw_compiler = kwargs.get("bw_compiler") or kwargs["fw_compiler"]
39
+ kwargs["bw_compiler"] = _wrapped_bw_compiler
40
+ kwargs["inference_compiler"] = (
41
+ kwargs.get("inference_compiler") or kwargs["fw_compiler"]
42
+ )
43
+
44
+ from functorch.compile import nop
45
+
46
+ from torch._inductor.debug import enable_aot_logging
47
+
48
+ # debug asserts slow down compile time noticeably,
49
+ # So only default them on when the aot_eager backend is used.
50
+ if kwargs.get("fw_compiler", None) == nop:
51
+ patch_config = patch("functorch.compile.config.debug_assert", True)
52
+ else:
53
+ patch_config = contextlib.nullcontext()
54
+
55
+ try:
56
+ # NB: NOT cloned!
57
+ with enable_aot_logging(), patch_config:
58
+ cg = aot_module_simplified(gm, example_inputs, **kwargs)
59
+ counters["aot_autograd"]["ok"] += 1
60
+ return disable(cg)
61
+ except Exception:
62
+ counters["aot_autograd"]["not_ok"] += 1
63
+ raise
64
+
65
+ return compiler_fn
66
+
67
+
68
+ def mem_efficient_fusion_kwargs(use_decomps):
69
+ from functorch.compile import (
70
+ default_decompositions,
71
+ min_cut_rematerialization_partition,
72
+ ts_compile,
73
+ )
74
+
75
+ kwargs = {
76
+ # these are taken from memory_efficient_fusion()
77
+ "fw_compiler": ts_compile,
78
+ "bw_compiler": ts_compile,
79
+ "partition_fn": min_cut_rematerialization_partition,
80
+ }
81
+
82
+ if use_decomps:
83
+ kwargs["decompositions"] = default_decompositions
84
+
85
+ return kwargs
86
+
87
+
88
+ def fake_tensor_unsupported(fn):
89
+ """
90
+ Decorator for backends that need real inputs. We swap out fake
91
+ tensors for zero tensors.
92
+ """
93
+
94
+ @functools.wraps(fn)
95
+ def wrapper(model, inputs, **kwargs):
96
+ with _disable_current_modes():
97
+ inputs = list(map(defake, inputs))
98
+ return fn(model, inputs, **kwargs)
99
+
100
+ return wrapper
101
+
102
+
103
+ def device_from_inputs(example_inputs) -> torch.device:
104
+ for x in example_inputs:
105
+ if hasattr(x, "device"):
106
+ return x.device
107
+
108
+
109
+ def dtype_from_inputs(example_inputs) -> torch.dtype:
110
+ for x in example_inputs:
111
+ if hasattr(x, "dtype"):
112
+ return x.dtype
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import operator
5
+ from collections import defaultdict
6
+ from typing import Dict, List, Optional
7
+
8
+ import torch
9
+ from torch._dynamo.backends.debugging import boxed_nop
10
+ from torch._inductor.cudagraph_trees import cudagraphify_impl
11
+ from torch._inductor.cudagraph_utils import (
12
+ BoxedDeviceIndex,
13
+ check_multiple_devices_or_any_cpu_nodes,
14
+ get_mutation_stack_trace,
15
+ )
16
+ from torch._inductor.utils import (
17
+ BoxedBool,
18
+ count_tangents,
19
+ has_incompatible_cudagraph_ops,
20
+ num_fw_fixed_arguments,
21
+ output_node,
22
+ )
23
+ from torch.multiprocessing.reductions import StorageWeakRef
24
+ from .common import aot_autograd
25
+ from .registry import register_backend
26
+
27
+ perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
28
+
29
+
30
+ def find_input_mutations(g):
31
+ def meta_fk(meta):
32
+ return meta["val"] if "val" in meta else meta["fake_result"]
33
+
34
+ inputs = defaultdict(set)
35
+ input_idx = 0
36
+ mutated_inputs = set()
37
+ for n in g.nodes:
38
+ if n.op == "placeholder":
39
+ if isinstance(meta_fk(n.meta), torch.Tensor):
40
+ inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx)
41
+ input_idx += 1
42
+ elif n.op == "call_function":
43
+ if n.target is operator.getitem:
44
+ continue
45
+ schema = n.target._schema
46
+ for i, arg in enumerate(schema.arguments):
47
+ if i < len(n.args):
48
+ argument = n.args[i]
49
+ else:
50
+ if arg.name not in n.kwargs:
51
+ continue
52
+ argument = n.kwargs[arg.name]
53
+ mut_arg = False
54
+ if arg.alias_info:
55
+ if arg.alias_info.is_write:
56
+ mut_arg = True
57
+ if mut_arg:
58
+ # TODO: not correct for args that contain tensors in a struct
59
+ # like list
60
+ mutated_inputs |= inputs[
61
+ StorageWeakRef(meta_fk(argument.meta)._typed_storage())
62
+ ]
63
+
64
+ # TODO: error on unrecognized nodes
65
+ return mutated_inputs
66
+
67
+
68
+ def get_device_node_mapping(gm: torch.fx.GraphModule):
69
+ device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
70
+ for n in gm.graph.nodes:
71
+ t = n.meta.get("val", None)
72
+ if isinstance(t, torch.Tensor) and t.device not in device_node_mapping:
73
+ device_node_mapping[t.device] = n
74
+ return device_node_mapping
75
+
76
+
77
+ def check_for_mutation(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
78
+ mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed))
79
+ if not mutation_indices:
80
+ return None
81
+
82
+ return get_mutation_stack_trace(aot_model, mutation_indices)
83
+
84
+
85
+ def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
86
+ if mut_skip := check_for_mutation(aot_model, num_fixed):
87
+ return mut_skip
88
+
89
+ if skip := check_multiple_devices_or_any_cpu_nodes(
90
+ get_device_node_mapping(aot_model)
91
+ ):
92
+ return skip
93
+
94
+ if has_incompatible_cudagraph_ops(aot_model):
95
+ return "skipping cudagraphs due to incompatible op"
96
+
97
+ return None
98
+
99
+
100
+ def get_device_index(gm) -> int:
101
+ device = next(iter(get_device_node_mapping(gm)))
102
+ assert device.type == "cuda"
103
+ return device.index
104
+
105
+
106
+ def get_stack_traces(gm) -> List[Optional[str]]:
107
+ output = output_node(gm)
108
+ assert len(output.args) == 1
109
+ return [
110
+ (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
111
+ for arg in output.args[0]
112
+ ]
113
+
114
+
115
+ def cudagraphs(dynamo_model, dynamo_inputs):
116
+ do_cudagraphs = BoxedBool(True)
117
+ boxed_device_index = BoxedDeviceIndex(None)
118
+
119
+ def forward_cudagraphs(aot_model, aot_inputs, is_inference=False):
120
+ interp = boxed_nop(aot_model, aot_inputs)
121
+ fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
122
+ if skip_msg := check_for_skip(aot_model, fixed):
123
+ BoxedBool.disable(do_cudagraphs)
124
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
125
+ return interp
126
+
127
+ boxed_device_index.set(get_device_index(aot_model))
128
+
129
+ out = cudagraphify_impl(
130
+ interp,
131
+ aot_inputs,
132
+ range(fixed),
133
+ device_index=boxed_device_index.value,
134
+ is_backward=False,
135
+ is_inference=False,
136
+ stack_traces=get_stack_traces(aot_model),
137
+ )
138
+ out._boxed_call = True
139
+ return out
140
+
141
+ def backward_cudagraphs(aot_model, aot_inputs):
142
+ interp = boxed_nop(aot_model, aot_inputs)
143
+ if not do_cudagraphs:
144
+ return aot_model
145
+
146
+ fixed = count_tangents(aot_model)
147
+ if skip_msg := check_for_skip(aot_model, fixed):
148
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
149
+
150
+ # See [Backward Generation Handling]
151
+ manager = torch._inductor.cudagraph_trees.get_manager(
152
+ boxed_device_index.value, create_if_none_exists=False
153
+ )
154
+ assert manager is not None
155
+
156
+ def fn(inputs):
157
+ manager.set_to_running_backward()
158
+ return aot_model(inputs)
159
+
160
+ fn._boxed_call = True
161
+ return fn
162
+
163
+ out = cudagraphify_impl(
164
+ interp,
165
+ aot_inputs,
166
+ range(fixed),
167
+ device_index=get_device_index(aot_model),
168
+ is_backward=True,
169
+ is_inference=False,
170
+ stack_traces=get_stack_traces(aot_model),
171
+ )
172
+ out._boxed_call = True
173
+ return out
174
+
175
+ aot_cudagraphs = aot_autograd(
176
+ fw_compiler=forward_cudagraphs,
177
+ bw_compiler=backward_cudagraphs,
178
+ inference_compiler=functools.partial(forward_cudagraphs, is_inference=True),
179
+ keep_inference_input_mutations=torch._dynamo.config.cudagraph_backend_keep_input_mutation,
180
+ )
181
+ return aot_cudagraphs(dynamo_model, dynamo_inputs)
182
+
183
+
184
+ class CudagraphsBackend:
185
+ compiler_name = "cudagraphs"
186
+
187
+ @staticmethod
188
+ def reset():
189
+ from torch._inductor.cudagraph_trees import reset_cudagraph_trees
190
+
191
+ reset_cudagraph_trees()
192
+
193
+ @staticmethod
194
+ def __call__(model, inputs):
195
+ return cudagraphs(model, inputs)
196
+
197
+
198
+ # aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful
199
+ # for debugging and can serve as a perf baseline.
200
+ register_backend(name="cudagraphs", compiler_fn=CudagraphsBackend())
201
+
202
+
203
+ def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):
204
+ """This isn't registered as a backend, but is used in some benchmarks"""
205
+ assert isinstance(inputs, (list, tuple))
206
+ if copy_inputs:
207
+ static_inputs = [torch.zeros_like(x) for x in inputs]
208
+ else:
209
+ static_inputs = list(inputs)
210
+
211
+ # warmup
212
+ torch.cuda.synchronize()
213
+ stream = torch.cuda.Stream()
214
+ stream.wait_stream(torch.cuda.current_stream())
215
+ with torch.cuda.stream(stream):
216
+ model(*inputs)
217
+ stream.synchronize()
218
+ torch.cuda.current_stream().wait_stream(stream)
219
+ torch.cuda.synchronize()
220
+
221
+ # record
222
+ graph = torch.cuda.CUDAGraph()
223
+ with torch.cuda.graph(graph, stream=stream):
224
+ static_outputs = model(*static_inputs)
225
+ if not isinstance(static_outputs, (list, tuple)):
226
+ static_outputs = (static_outputs,)
227
+
228
+ def run(*new_inputs):
229
+ assert len(static_inputs) == len(new_inputs)
230
+ if copy_inputs:
231
+ for dst, src in zip(static_inputs, new_inputs):
232
+ dst.copy_(src)
233
+ graph.replay()
234
+ if copy_outputs:
235
+ return [x.clone() for x in static_outputs]
236
+ else:
237
+ return static_outputs
238
+
239
+ return run
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import dataclasses
4
+ import functools
5
+ from importlib import import_module
6
+ from typing import Any, List, Optional
7
+
8
+ from functorch.compile import min_cut_rematerialization_partition
9
+
10
+ import torch
11
+ from torch import _guards
12
+ from torch._functorch.compilers import ts_compile
13
+ from .common import aot_autograd
14
+ from .registry import register_debug_backend as register_backend
15
+
16
+ """
17
+ This file contains TorchDynamo backends intended for debugging uses.
18
+ """
19
+
20
+
21
+ @register_backend
22
+ def eager(gm, fake_tensor_inputs):
23
+ return gm
24
+
25
+
26
+ @register_backend
27
+ def pre_dispatch_eager(gm, fake_tensor_inputs):
28
+ from torch.fx.experimental.proxy_tensor import make_fx
29
+
30
+ def runnable_gm(*args):
31
+ return torch.fx.Interpreter(gm).run(*args)
32
+
33
+ pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs)
34
+ pre_dispatch_gm.print_readable()
35
+
36
+ return pre_dispatch_gm
37
+
38
+
39
+ @register_backend
40
+ def eager_debug(gm, fake_tensor_inputs):
41
+ from torch._subclasses.schema_check_mode import SchemaCheckMode
42
+
43
+ # We could add more debugging bits here.
44
+ # Right now, this backend can be used to check for and error on
45
+ # custom dispatcher ops that have incorrect schemas.
46
+ def inner(*args):
47
+ with SchemaCheckMode():
48
+ return torch.fx.Interpreter(gm).run(*args)
49
+
50
+ return inner
51
+
52
+
53
+ @register_backend(name="ts")
54
+ def torchscript(gm, fake_tensor_inputs):
55
+ return torch.jit.script(gm)
56
+
57
+
58
+ # used boxed call to discard inputs when they are no longer needed
59
+ def boxed_nop(fx_g, example_inputs):
60
+ def run(args):
61
+ return torch.fx.Interpreter(fx_g).boxed_run(args)
62
+
63
+ run._boxed_call = True
64
+ return run
65
+
66
+
67
+ # Useful for debugging purpose
68
+ # aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging.
69
+ aot_eager = aot_autograd(
70
+ fw_compiler=boxed_nop, partition_fn=min_cut_rematerialization_partition
71
+ )
72
+ register_backend(name="aot_eager", compiler_fn=aot_eager)
73
+
74
+ aot_eager_default_partitioner = aot_autograd(fw_compiler=boxed_nop)
75
+ register_backend(
76
+ name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner
77
+ )
78
+
79
+ # Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs
80
+ # inductor problems.
81
+ # aot_eager_decomp_partition just replaces the inductor compiler with nop to help
82
+ # isolate inductor vs aot_eager errors
83
+ aot_eager_decomp_partition = aot_autograd(
84
+ # these are taken from memory_efficient_fusion()
85
+ fw_compiler=boxed_nop,
86
+ bw_compiler=boxed_nop,
87
+ # NB: lambda here is to delay import of inductor
88
+ decompositions=lambda: import_module(
89
+ "torch._inductor.compile_fx"
90
+ ).select_decomp_table(),
91
+ partition_fn=functools.partial(
92
+ min_cut_rematerialization_partition, compiler="inductor"
93
+ ),
94
+ )
95
+ register_backend(
96
+ name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition
97
+ )
98
+
99
+ # AOT Autograd with torchscript backend. Default partitioner.
100
+ # aot_ts uses torchscript backend. We can use this with both nnc and nvfuser
101
+ # by using the relevant fuser with torch.jit.fuser(...)
102
+ aot_ts = aot_autograd(fw_compiler=ts_compile)
103
+ register_backend(name="aot_ts", compiler_fn=aot_ts)
104
+
105
+ # These buggy backends are used for inducing bugs so that we can test
106
+ # our repro extraction / minifier scripts
107
+
108
+
109
+ class ReluCompileError(Exception):
110
+ pass
111
+
112
+
113
+ class TestingOnlyCompileError(Exception):
114
+ pass
115
+
116
+
117
+ @register_backend
118
+ def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
119
+ for node in gm.graph.nodes:
120
+ if node.target == torch.relu:
121
+ raise ReluCompileError()
122
+ return gm
123
+
124
+
125
+ @register_backend
126
+ def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
127
+ for node in gm.graph.nodes:
128
+ if node.target == torch.relu:
129
+ node.target = torch._assert
130
+ node.args = (False, "ReluRuntimeError")
131
+ gm.recompile()
132
+ return gm
133
+
134
+
135
+ @register_backend
136
+ def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
137
+ for node in gm.graph.nodes:
138
+ if node.target == torch.relu:
139
+ node.target = torch.add
140
+ node.args = (node.args[0], 1)
141
+ gm.recompile()
142
+
143
+ return gm
144
+
145
+
146
+ @register_backend
147
+ def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
148
+ # Require at least one non-trivial thing in the graph,
149
+ # see https://github.com/pytorch/pytorch/issues/102898
150
+ for node in gm.graph.nodes:
151
+ if node.op == "call_function":
152
+ break
153
+ else:
154
+ return gm
155
+ for t in example_inputs:
156
+ if not t.is_leaf:
157
+ raise TestingOnlyCompileError()
158
+ return gm
159
+
160
+
161
+ @dataclasses.dataclass
162
+ class ExplainOutput:
163
+ """
164
+ This is the output of :func:`torch._dynamo.explain()`
165
+ There is no reason to create this class directly.
166
+ """
167
+
168
+ graphs: List[torch.fx.GraphModule]
169
+ graph_count: int
170
+ graph_break_count: int
171
+ break_reasons: List[
172
+ Any
173
+ ] # Type is GraphCompileReason but doesn't matter for this purpose
174
+ op_count: int
175
+ ops_per_graph: Optional[List[torch.fx.Node]] = None
176
+ out_guards: Optional[List[_guards.Guard]] = None
177
+ compile_times: Optional[str] = None
178
+
179
+ def __str__(self):
180
+ output = f"Graph Count: {self.graph_count}\n"
181
+ output += f"Graph Break Count: {self.graph_break_count}\n"
182
+ output += f"Op Count: {self.op_count}\n"
183
+
184
+ output += "Break Reasons:\n"
185
+ for idx, break_reason in enumerate(self.break_reasons):
186
+ output += f" Break Reason {idx+1}:\n"
187
+ output += f" Reason: {break_reason.reason}\n"
188
+ output += " User Stack:\n"
189
+ for frame_summary in break_reason.user_stack:
190
+ output += f" {frame_summary}\n"
191
+
192
+ if self.ops_per_graph is not None:
193
+ output += "Ops per Graph:\n"
194
+ for idx, ops in enumerate(self.ops_per_graph):
195
+ output += f" Ops {idx+1}:\n"
196
+ for op in ops:
197
+ output += f" {op}\n"
198
+
199
+ if self.out_guards is not None:
200
+ output += "Out Guards:\n"
201
+ for i, guard in enumerate(self.out_guards):
202
+ output += f" Guard {i+1}:\n"
203
+ output += f" {str(guard)}"
204
+
205
+ if self.compile_times is not None:
206
+ output += f"Compile Times: {self.compile_times}\n"
207
+ return output
208
+
209
+
210
+ def _explain_graph_detail(
211
+ gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons
212
+ ):
213
+ """
214
+ This function is a utility which processes a torch.fx.GraphModule and
215
+ accumulates information about its ops, graph breaks, and other details. It
216
+ is intended to be used by the ExplainWithBackend class and
217
+ `torch._dynamo.explain()` to provide details from Dynamo's graph capture.
218
+
219
+ Parameters:
220
+ gm (torch.fx.GraphModule): The GraphModule to be processed.
221
+ graphs (list): A list that accumulates all the GraphModules processed.
222
+ op_count (int): The total count of operations in all GraphModules processed so far.
223
+ ops_per_graph (list): A list that accumulates the operations of each GraphModule.
224
+ break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule.
225
+
226
+ Returns:
227
+ tuple: A tuple containing the processed GraphModule, the updated lists of graphs,
228
+ operations per graph, and break reasons, and the updated operation count.
229
+ """
230
+ graphs.append(gm)
231
+ ops = [node.target for node in gm.graph.nodes if node.op == "call_function"]
232
+ op_count += len(ops)
233
+ ops_per_graph.append(ops)
234
+ if gm.compile_subgraph_reason.graph_break:
235
+ break_reasons.append(gm.compile_subgraph_reason)
236
+
237
+ return gm, graphs, op_count, ops_per_graph, break_reasons
238
+
239
+
240
+ class ExplainWithBackend:
241
+ """
242
+ This class is intended to be used as a backend for `torch.compile`. It is
243
+ composable with other backends. When used in this way, it accumulates
244
+ information about graph breaks, ops, and other info and provides a string
245
+ representation summarizing this information.
246
+
247
+ Attributes:
248
+ backend (str): The name of the backend to use for optimization.
249
+ graphs (list): A list of the graphs captured by TorchDynamo.
250
+ op_count (int): The total number of operations in all optimized graphs.
251
+ break_reasons (list): A list of graph break reasons with stack traces.
252
+
253
+ Example Usage:
254
+ def fn(x):
255
+ x = torch.sigmoid(x)
256
+ return x
257
+
258
+ torch._dynamo.reset()
259
+ eb = ExplainWithBackend("inductor")
260
+ optimized_fn = torch.compile(fn, backend=eb)
261
+ result = optimized_fn(torch.randn(5))
262
+ print(eb.output())
263
+ """
264
+
265
+ def __init__(self, backend):
266
+ from .registry import lookup_backend
267
+
268
+ self.backend = lookup_backend(backend)
269
+ self.graphs = []
270
+ self.op_count = 0
271
+ self.break_reasons = []
272
+
273
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs):
274
+ gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(
275
+ gm, self.graphs, self.op_count, [], self.break_reasons
276
+ )
277
+ return self.backend(gm, example_inputs)
278
+
279
+ def output(self) -> ExplainOutput:
280
+ graph_count = len(self.graphs)
281
+ output = ExplainOutput(
282
+ self.graphs,
283
+ graph_count,
284
+ graph_count - 1,
285
+ self.break_reasons,
286
+ self.op_count,
287
+ )
288
+
289
+ return output
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import logging
4
+ import traceback
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, List, Optional
7
+ from unittest import mock
8
+
9
+ import torch
10
+ from torch import fx
11
+ from torch._dynamo.output_graph import GraphCompileReason
12
+ from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode
13
+ from torch._logging import trace_structured
14
+ from torch.fx.node import Node
15
+
16
+ # Regular log messages should go through 'log'.
17
+ # ddp_graph_log is a separate artifact logger reserved for dumping graphs.
18
+ # See docs/source/logging.rst for more info.
19
+ log = logging.getLogger(__name__)
20
+ ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs")
21
+
22
+
23
+ def args_str(args):
24
+ # a debug helper
25
+ if torch.is_tensor(args):
26
+ return f"T[{args.shape}]"
27
+ elif isinstance(args, tuple):
28
+ return f"tuple({', '.join([args_str(x) for x in args])})"
29
+ elif isinstance(args, list):
30
+ return f"list({', '.join([args_str(x) for x in args])})"
31
+ else:
32
+ return str(args)
33
+
34
+
35
+ @dataclass
36
+ class Bucket:
37
+ size: int = 0
38
+ params: List[str] = field(default_factory=list)
39
+ nodes: List[fx.Node] = field(default_factory=list)
40
+
41
+ # param_ids is just used for unit testing
42
+ param_ids: List = field(default_factory=list)
43
+
44
+ # keep track of any buckets that were extended for logging purposes
45
+ opcount_increased_to_capture_external_output: int = 0
46
+ paramsize_before_opcount_increase: int = 0
47
+
48
+
49
+ def bucket_has_external_output(bucket: Bucket) -> bool:
50
+ nodes_in_bucket = set()
51
+ # we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards
52
+ # so we don't reverse it here
53
+ for node in bucket.nodes:
54
+ # assume node.op != output, since those are filtered in the original iteration
55
+ nodes_in_bucket.add(node)
56
+ for user in node.users:
57
+ if user not in nodes_in_bucket:
58
+ return True
59
+ return False
60
+
61
+
62
+ def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int):
63
+ headers = ("Index", "Size (b)", "Param Names")
64
+ rows = []
65
+ extended_buckets = []
66
+ for idx, bucket in enumerate(reversed(buckets)):
67
+ if len(bucket.params) > 0:
68
+ rows.append((idx, bucket.size, bucket.params[0]))
69
+ for param in bucket.params[1:]:
70
+ rows.append((None, None, param))
71
+ if bucket.opcount_increased_to_capture_external_output > 0:
72
+ extended_buckets.append(
73
+ (
74
+ idx,
75
+ bucket.opcount_increased_to_capture_external_output,
76
+ bucket.size - bucket.paramsize_before_opcount_increase,
77
+ )
78
+ )
79
+
80
+ if len(rows):
81
+ log.info(
82
+ "\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
83
+ bucket_bytes_cap,
84
+ len(buckets),
85
+ )
86
+
87
+ if len(extended_buckets):
88
+ log.warning(
89
+ "Some buckets were extended beyond their requested parameter capacities"
90
+ " in order to ensure each subgraph has an output node, required for fx graph partitioning."
91
+ " This can be the case when a subgraph would have only contained nodes performing inplace mutation,"
92
+ " and returning no logical outputs. This should not be a problem, unless it results in too few graph"
93
+ " partitions for optimal DDP performance."
94
+ )
95
+
96
+ try:
97
+ from tabulate import tabulate
98
+
99
+ log.debug(
100
+ "\nDDPOptimizer produced the following bucket assignments:\n%s",
101
+ tabulate(rows, headers=headers, tablefmt="simple_grid"),
102
+ )
103
+
104
+ if len(extended_buckets):
105
+ log.warning(
106
+ "DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
107
+ tabulate(
108
+ extended_buckets,
109
+ headers=("Index", "Extra Ops", "Extra Param Size (b)"),
110
+ tablefmt="simple_grid",
111
+ ),
112
+ )
113
+ except ImportError:
114
+ log.debug(
115
+ "Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information."
116
+ )
117
+ else:
118
+ log.debug("DDPOptimizer captured no parameters and did not split this graph.")
119
+
120
+
121
+ def has_higher_order_op(gm):
122
+ # Check if there is a higher order op in the graph
123
+ for node in gm.graph.nodes:
124
+ if node.op == "get_attr":
125
+ maybe_param = getattr(gm, node.target)
126
+ if isinstance(maybe_param, torch.fx.GraphModule):
127
+ return True
128
+ return False
129
+
130
+
131
+ # 3 (lazy compile): Replace submodules with lazily compiling submodule
132
+ class SubmoduleReplacer(torch.fx.interpreter.Interpreter):
133
+ def __init__(self, module, compiler):
134
+ super().__init__(module)
135
+ self.compiler = compiler
136
+
137
+ def lazily_compiled_submod(self, input_mod):
138
+ """
139
+ Create a wrapper around submodules which:
140
+ - lazily compiles each of the partitioned submodules using the user-provided compiler
141
+ - unpacks singleton tuples/lists into flat arg
142
+ """
143
+
144
+ class LazilyCompiledModule(torch.nn.Module):
145
+ def __init__(self, submod, compiler, unwrap_singleton_tuple):
146
+ super().__init__()
147
+ self.submod = submod
148
+ self.compiler = compiler
149
+ self.compiled = False
150
+ self.unwrap_singleton_tuple = unwrap_singleton_tuple
151
+
152
+ def forward(self, *args):
153
+ if not self.compiled:
154
+ # First compile with args as example_inputs
155
+ # These args will be fakeified if using Inductor/AOTAutograd
156
+ new_submod = self.compiler(self.submod, args)
157
+ del self.submod
158
+ self.submod = new_submod
159
+ self.compiled = True
160
+ self.compiler = None
161
+
162
+ x = self.submod(*args)
163
+ # we must let 'input_mod' return a tuple, to make AOT happy.
164
+ # (aot_autograd compile_fn literally requires that the output of a graph it compiles is a tuple).
165
+ # however, we don't acutally want this tuple to be returned, since the fx logic that calls the submod
166
+ # will again wrap outputs from the submod in a tuple. So we unwrap it, and count on it being re-wrapped
167
+ if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
168
+ return x[0]
169
+ return x
170
+
171
+ unwrap_singleton_tuple = False
172
+ for sn in input_mod.graph.nodes:
173
+ if sn.op == "output":
174
+ if not isinstance(sn.args[0], tuple):
175
+ unwrap_singleton_tuple = True
176
+ sn.args = (sn.args,)
177
+
178
+ input_mod.recompile()
179
+ input_mod.compile_subgraph_reason = GraphCompileReason(
180
+ "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
181
+ " Set `torch._dynamo.config.optimize_ddp = False` to disable.",
182
+ [
183
+ # it's close to useless to get a real stacktrace here, and quite verbose.
184
+ traceback.FrameSummary(__file__, 0, DDPOptimizer),
185
+ ],
186
+ )
187
+ wrapper = LazilyCompiledModule(
188
+ input_mod,
189
+ self.compiler,
190
+ unwrap_singleton_tuple,
191
+ )
192
+ return wrapper
193
+
194
+ # We replace the submodules with lazy submodules which compile
195
+ # the corresponding submodules when they are run with real values
196
+ # Always returns `None` - we do not need to propagate values in order
197
+ # to replace submodules.
198
+ def run_node(self, n: Node) -> Any:
199
+ if n.op == "call_module":
200
+ real_mod = self.fetch_attr(n.target)
201
+
202
+ ddp_graph_log.debug("\n---%s graph---\n%s", n.target, real_mod.graph)
203
+
204
+ assert len(n.kwargs) == 0, "We assume only args for these modules"
205
+ lazily_compiled_submod = self.lazily_compiled_submod(real_mod)
206
+
207
+ # We update the original (outer) graph with a call into the compiled module
208
+ # instead of the uncompiled one.
209
+ self.module.delete_submodule(n.target)
210
+ n.target = "compiled_" + n.target
211
+ self.module.add_submodule(n.target, lazily_compiled_submod)
212
+
213
+
214
+ # 3 (no lazy compile): compile each of the partitioned submodules using the user-provided compiler
215
+ class SubmodCompiler(torch.fx.interpreter.Interpreter):
216
+ def __init__(self, module, compiler, fake_mode):
217
+ super().__init__(module)
218
+ self.compiler = compiler
219
+ self.fake_mode = fake_mode
220
+
221
+ def compile_submod(self, input_mod, args, kwargs):
222
+ """
223
+ Compile the submodule,
224
+ using a wrapper to make sure its output is always a tuple,
225
+ which is required by AotAutograd based compilers
226
+ """
227
+ assert len(kwargs) == 0, "We assume only args for these modules"
228
+
229
+ class WrapperModule(torch.nn.Module):
230
+ def __init__(self, submod, unwrap_singleton_tuple):
231
+ super().__init__()
232
+ self.submod = submod
233
+ self.unwrap_singleton_tuple = unwrap_singleton_tuple
234
+
235
+ def forward(self, *args):
236
+ x = self.submod(*args)
237
+ # TODO(whc)
238
+ # for some reason the isinstance check is necessary if I split one node per submod
239
+ # - even though I supposedly wrapped the output in a tuple in those cases, the real
240
+ # compiled module was still returning a tensor
241
+ if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
242
+ return x[0]
243
+ return x
244
+
245
+ unwrap_singleton_tuple = False
246
+ for sn in input_mod.graph.nodes:
247
+ if sn.op == "output":
248
+ if not isinstance(sn.args[0], tuple):
249
+ unwrap_singleton_tuple = True
250
+ sn.args = (sn.args,)
251
+
252
+ input_mod.recompile()
253
+ input_mod.compile_subgraph_reason = GraphCompileReason(
254
+ "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
255
+ " Set `torch._dynamo.config.optimize_ddp = False` to disable.",
256
+ [
257
+ # it's close to useless to get a real stacktrace here, and quite verbose.
258
+ traceback.FrameSummary(__file__, 0, DDPOptimizer),
259
+ ],
260
+ )
261
+
262
+ wrapper = WrapperModule(
263
+ self.compiler(input_mod, args),
264
+ unwrap_singleton_tuple,
265
+ )
266
+ return wrapper
267
+
268
+ # Note:
269
+ #
270
+ # The way distributed works today around fake tensors can be somewhat confusing.
271
+ # Some of these codepaths are shared in both runtime, and compile time. The presence
272
+ # of a fake_mode, read off of fake tensor inputs, dictates how we will operate.
273
+ #
274
+ # A few things to keep in mind:
275
+ #
276
+ # 1) We invoke `compile_submod` with a real module. The output of that gets stored
277
+ # on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`.
278
+ #
279
+ # 2) When running a call_module targeted node, if we have a fake_mode, we fakify the
280
+ # module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it.
281
+ #
282
+ # 3) Fake tensors should always be around during compile time.
283
+ #
284
+ # 4) Fake tensors should never be around at runtime.
285
+ #
286
+ # 5) We end up with a compilation mode that takes a real submodule and fake tensors,
287
+ # to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd]
288
+ def run_node(self, n: Node) -> Any:
289
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
290
+ new_args = []
291
+ assert self.fake_mode
292
+ for arg in args:
293
+ if isinstance(arg, torch.Tensor) and not isinstance(
294
+ arg, torch._subclasses.FakeTensor
295
+ ):
296
+ new_args.append(torch._dynamo.utils.to_fake_tensor(arg, self.fake_mode))
297
+ else:
298
+ new_args.append(arg)
299
+
300
+ log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args))
301
+ assert isinstance(args, tuple)
302
+ assert isinstance(kwargs, dict)
303
+
304
+ if n.op == "call_module":
305
+ real_mod = self.fetch_attr(n.target)
306
+ if self.fake_mode:
307
+ curr_submod = deepcopy_to_fake_tensor(real_mod, self.fake_mode)
308
+ else:
309
+ curr_submod = real_mod
310
+
311
+ ddp_graph_log.debug("\n---%s graph---\n%s", n.target, curr_submod.graph)
312
+
313
+ # When calling the compiler on the submod, inputs (new_args) are expected to
314
+ # be FakeTensors already since Dynamo would have made them FakeTensors in the
315
+ # non-DDP flow. However, the parameters are _not_ expected to be FakeTensors,
316
+ # since this wrapping happens during compilation
317
+
318
+ # Note: Returning Fake Tensors on First AOT Autograd Call
319
+ #
320
+ # Inductor will optimize strides of outputs when it deems it profitable.
321
+ # For instance, converting to channels last. When we split the graph here
322
+ # into multiple inductor compilations, we need to make sure that the
323
+ # output strides of one compilation is appropriately passed to the subsequent
324
+ # compilations. However, the mapping from inductor output to dynamo output
325
+ # is non-trivial due to aot_autograd's deduping, de-aliasing, mutation, re-writing,
326
+ # subclass handling, etc. In order to replay all this logic we set a flag such that
327
+ # the first invocation of inductor in aot_autograd will return Fake Tensors with
328
+ # appropriate strides. Then, all of aot autograd's runtime logic is replayed.
329
+ # This gives us the appropriately strided outputs here which will reflect runtime strides.
330
+
331
+ class FakeifyFirstAOTInvocationGuard:
332
+ def __init__(self):
333
+ self.tc = torch._guards.TracingContext.try_get()
334
+ assert self.tc
335
+ torch._guards.TracingContext.try_get().fakify_first_call = True
336
+
337
+ def __del__(self):
338
+ self.tc.fakify_first_call = False
339
+
340
+ # For aot_eager and other backends, tracing context is not set
341
+ has_tracing_context = torch._guards.TracingContext.try_get() is not None
342
+ if has_tracing_context:
343
+ g = FakeifyFirstAOTInvocationGuard()
344
+
345
+ from torch._dynamo.utils import counters
346
+
347
+ init = counters["aot_autograd"]["total"]
348
+ compiled_submod_real = self.compile_submod(real_mod, new_args, kwargs)
349
+
350
+ # TODO - better way of doing this?
351
+ # Only aot autograd handles fakifying first call
352
+ invoked_aot_autograd = init != counters["aot_autograd"]["total"]
353
+
354
+ # We update the original (outer) graph with a call into the compiled module
355
+ # instead of the uncompiled one.
356
+ self.module.delete_submodule(n.target)
357
+ n.target = "compiled_" + n.target
358
+ self.module.add_submodule(n.target, compiled_submod_real)
359
+
360
+ # Finally, we have to produce inputs for use compiling the next submodule,
361
+ # and these need to be FakeTensors, so we execute the module under fake_mode
362
+ # Because parameters are not fake we patch fake tensor mode to allow non fake inputs
363
+ with self.fake_mode, mock.patch.object(
364
+ self.fake_mode, "allow_non_fake_inputs", True
365
+ ):
366
+ if has_tracing_context and invoked_aot_autograd:
367
+ out = compiled_submod_real(*new_args, **kwargs)
368
+ # output should be fake or subclass
369
+ assert all(
370
+ (not isinstance(t, torch.Tensor) or type(t) is not torch.Tensor)
371
+ for t in (out if isinstance(out, (list, tuple)) else [out])
372
+ )
373
+ return out
374
+ else:
375
+ return curr_submod(*new_args, **kwargs)
376
+ else:
377
+ # placeholder or output nodes don't need to get compiled, just executed
378
+ return getattr(self, n.op)(n.target, new_args, kwargs)
379
+
380
+
381
+ class DDPOptimizer:
382
+
383
+ """Note [DDPOptimizer]
384
+ DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP),
385
+ breaking the dynamo graph into chunks to compile separately, with the breaks aligning to
386
+ the boundaries of gradient-allreduce buckets chosen by DDP.
387
+
388
+ Background/Motivation
389
+ - DDP uses allreduce collectives to synchronize partial gradients computed on different workers
390
+ - DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce
391
+ - Parameters grouped into buckets are assumed to be adjacent in time, so they become ready
392
+ at around the same time during backward and thus can share the same allreduce efficiently
393
+ - Allreduces must overlap with backward compute for optimal training performance
394
+ - DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which
395
+ operates when individual grads become 'ready'
396
+ - Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the
397
+ autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole
398
+ fused backward function executes, preventing any overlap of compute and communication
399
+
400
+ Algorithm
401
+ - DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse
402
+ this graph in reverse order to determine the true order that gradients will become ready during backward.
403
+ - Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started
404
+ and a graph break introduced
405
+ - Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together
406
+ into an outer module that is returned to the user
407
+
408
+ Notes
409
+ - It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP,
410
+ and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does
411
+ in eager.
412
+ - If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently
413
+ produce splits that do not necessarily align with the buckets used by DDP. This should result in performance
414
+ degradation approaching the baseline case where graph-splits are not used, but not worse.
415
+ - If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the
416
+ subgraphs being compiled
417
+ - DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers
418
+ left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are
419
+ also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP,
420
+ it is not catastrophic but could impact performance by choosing sub-optimal bucket splits.
421
+ - DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients,
422
+ and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by
423
+ DDPOptimizer)
424
+
425
+ Debugging
426
+ - Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb.
427
+ - In many cases, the log messages are helpful (they show bucket size assignments)-
428
+ just set TORCH_LOGS env to include any of 'dynamo', 'distributed', or 'dist_ddp'.
429
+ - See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model
430
+ in a single process (or with torchrun, in multiple processes)
431
+
432
+ Args:
433
+ bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be
434
+ set to match the equivalent parameter on the original DDP module.
435
+
436
+ backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph.
437
+
438
+ first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP
439
+ special-cases the first bucket size since it is sometimes optimal to start a small allreduce early.
440
+
441
+ """
442
+
443
+ def __init__(
444
+ self,
445
+ bucket_bytes_cap: int,
446
+ backend_compile_fn,
447
+ first_bucket_cap: Optional[int] = None,
448
+ ):
449
+ if first_bucket_cap is not None:
450
+ self.first_bucket_cap = first_bucket_cap
451
+ elif torch.distributed.is_available():
452
+ # this constant comes from C10D lib which is not always built
453
+ self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES
454
+ else:
455
+ self.first_bucket_cap = bucket_bytes_cap
456
+
457
+ self.bucket_bytes_cap = bucket_bytes_cap
458
+ assert (
459
+ self.first_bucket_cap <= self.bucket_bytes_cap
460
+ ), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP"
461
+
462
+ self.backend_compile_fn = backend_compile_fn
463
+
464
+ def _ignore_parameter(self, parameter):
465
+ return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored
466
+
467
+ def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]):
468
+ """
469
+ Implements graph splitting, first determining a set of of buckets by counting
470
+ parameter sizes in reverse graph order, then invoking the user/backend compiler
471
+ to compile each subgraph. Finally, stiches compiled graphs into one graphmodule
472
+ and returns its callable.
473
+ """
474
+ if has_higher_order_op(gm):
475
+ # This indicates presence of a higher order op. For now, we
476
+ # have no way to break the higher order op into two buckets.
477
+ # Allowing higher order ops in the graph also requires
478
+ # changes in the split_module, becuase graph splitter
479
+ # currently assumes that all the args of all ops are
480
+ # tensors, but in the case of higher order ops, it could be
481
+ # a graph module. As a workaround, we are shortcircuiting
482
+ raise NotImplementedError(
483
+ "DDPOptimizer backend: Found a higher order op in the graph. "
484
+ "This is not supported. Please turn off DDP optimizer using "
485
+ "torch._dynamo.config.optimize_ddp=False. Note that this can "
486
+ "cause performance degradation because there will be one bucket "
487
+ "for the entire Dynamo graph. Please refer to this issue - "
488
+ "https://github.com/pytorch/pytorch/issues/104674."
489
+ )
490
+
491
+ # 1: compute the partition map according to DDP bucket logic
492
+ buckets = [Bucket()] # (size, param_names)
493
+ for node in reversed(gm.graph.nodes):
494
+ if node.op in ("output", "placeholder"):
495
+ continue
496
+
497
+ if (
498
+ buckets[0].size >= self.bucket_bytes_cap
499
+ or len(buckets) == 1
500
+ and buckets[0].size >= self.first_bucket_cap
501
+ ):
502
+ if bucket_has_external_output(buckets[0]):
503
+ buckets.insert(0, Bucket())
504
+ else:
505
+ # continue building this bucket past the point of filling its parameter capacity,
506
+ # to increase chances it contains at least one node that is either a global output or
507
+ # passed as input to a subsequent graph
508
+
509
+ if buckets[0].opcount_increased_to_capture_external_output == 0:
510
+ buckets[0].paramsize_before_opcount_increase = buckets[0].size
511
+ buckets[0].opcount_increased_to_capture_external_output += 1
512
+
513
+ if node.op == "call_module":
514
+ target = gm.get_submodule(node.target)
515
+ for name, param in target.named_parameters():
516
+ if param.requires_grad and not self._ignore_parameter(param):
517
+ buckets[0].size += param.untyped_storage().nbytes()
518
+ buckets[0].params.append(f"{node.target}_{name}")
519
+ buckets[0].param_ids.append(id(param))
520
+ elif node.op == "get_attr":
521
+ maybe_param = getattr(gm, node.target)
522
+ if maybe_param.requires_grad and not self._ignore_parameter(
523
+ maybe_param
524
+ ):
525
+ buckets[0].size += maybe_param.untyped_storage().nbytes()
526
+ buckets[0].params.append(node.target)
527
+ buckets[0].param_ids.append(id(maybe_param))
528
+
529
+ # All nodes have to be mapped to a bucket, even if they don't have their own params
530
+ # Ignored params still end up in buckets, we just don't count them towards the capacity
531
+ buckets[0].nodes.append(node)
532
+
533
+ if len(buckets) > 1 and buckets[0].size == 0:
534
+ # we collected a small preamble graph with ops that don't include parameters, fuse it back
535
+ buckets[1].nodes.extend(buckets[0].nodes)
536
+ assert len(buckets[0].params) == 0, "Params should be empty if size is 0"
537
+ del buckets[0]
538
+
539
+ # stash buckets for testing/debugging purposes
540
+ self.buckets = buckets
541
+ pretty_print_buckets(buckets, self.bucket_bytes_cap)
542
+
543
+ if len(buckets) == 1:
544
+ # bypass split/fuse logic if there is only one bucket
545
+ return self.backend_compile_fn(gm, example_inputs)
546
+
547
+ # 2: partition the graphmodule according to bucket capacity
548
+ partition_map = {}
549
+ for idx, b in enumerate(buckets):
550
+ for node in b.nodes:
551
+ partition_map[node] = idx
552
+
553
+ split_gm = fx.passes.split_module.split_module(
554
+ gm, None, lambda node: partition_map[node]
555
+ )
556
+
557
+ debug_str = (
558
+ f"\n---orig graph---\n{gm.graph}\n"
559
+ + f"\n---split graph---\n{split_gm.graph}\n"
560
+ )
561
+ for name, module in split_gm.named_modules():
562
+ if "." not in name and len(name):
563
+ # only print the submod graphs, not their children
564
+ debug_str += f"\n---{name} graph---\n{module.graph}\n"
565
+ debug_str += "\n---------------\n"
566
+ ddp_graph_log.debug(debug_str)
567
+
568
+ trace_structured(
569
+ "optimize_ddp_split_graph",
570
+ payload_fn=lambda: split_gm.print_readable(print_output=False),
571
+ )
572
+ for name, module in split_gm.named_modules():
573
+ if "." not in name and len(name):
574
+ trace_structured(
575
+ "optimize_ddp_split_child",
576
+ lambda: {"name": name},
577
+ payload_fn=lambda: module.print_readable(print_output=False),
578
+ )
579
+
580
+ # NOTE, we want to enable `optimize_ddp_lazy_compile` by default as soon as possible,
581
+ # becuase it will fix stride mismatch errors (see motivation: https://github.com/pytorch/pytorch/pull/114154).
582
+ # However, lazy compile currently causes shape mismatch in other cases (`test_graph_split_inductor_transpose`)
583
+ # and we need to fix them before we can enable it by default.
584
+ if not torch._dynamo.config.optimize_ddp_lazy_compile:
585
+ # Today, optimize_ddp=True and keep_output_stride=False can lead to silent
586
+ # correctness issues. The problem is that ddp_optimizer works by partitioning
587
+ # the dynamo graph, sending each subgraph through aot autograd to inductor,
588
+ # and creates example inputs by eagerly interpreting each subgraph to get
589
+ # an output that with the same metadata that we'd get from eager mode.
590
+ # This is a problem though, for torch._inductor.config.keep_output_stride.
591
+ # The above config can cause the outputs of the first graph to have
592
+ # **different** strides from eager, causing the inputs that we pass
593
+ # to the second graph to be wrong.
594
+ # To really fix this, we would need to faithfully ask inductor
595
+ # what the outputs to each graph it expects are.
596
+ fake_mode = detect_fake_mode(example_inputs)
597
+ if fake_mode is None:
598
+ fake_mode = torch._subclasses.fake_tensor.FakeTensorMode()
599
+
600
+ if torch._dynamo.config.optimize_ddp_lazy_compile:
601
+ submod_compiler = SubmoduleReplacer(split_gm, self.backend_compile_fn)
602
+ else:
603
+ submod_compiler = SubmodCompiler(
604
+ split_gm, self.backend_compile_fn, fake_mode
605
+ )
606
+ submod_compiler.run(*example_inputs)
607
+ split_gm.recompile()
608
+
609
+ ddp_graph_log.debug(
610
+ "\n---final graph---\n%s\n---------------\n", split_gm.graph
611
+ )
612
+ return split_gm
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import sys
4
+
5
+ from torch._dynamo import register_backend
6
+
7
+
8
+ @register_backend
9
+ def inductor(*args, **kwargs):
10
+ if sys.platform == "win32":
11
+ raise RuntimeError("Windows not yet supported for inductor")
12
+
13
+ # do import here to avoid loading inductor into memory when it is not used
14
+ from torch._inductor.compile_fx import compile_fx
15
+
16
+ return compile_fx(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # This backend is maintained by ONNX team. To direct issues
4
+ # to the right people, please tag related GitHub issues with `module: onnx`.
5
+ #
6
+ # Maintainers' Github IDs: wschin, thiagocrepaldi, BowenBao, abock
7
+ from torch.onnx._internal.onnxruntime import (
8
+ is_onnxrt_backend_supported,
9
+ torch_compile_backend,
10
+ )
11
+ from .registry import register_backend
12
+
13
+
14
+ def has_onnxruntime():
15
+ # FIXME(abock): update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
16
+ return is_onnxrt_backend_supported()
17
+
18
+
19
+ if is_onnxrt_backend_supported():
20
+ register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
21
+ else:
22
+
23
+ def information_displaying_backend(*args, **kwargs):
24
+ raise ImportError(
25
+ "onnxrt is not registered as a backend. "
26
+ "Please make sure all dependencies such as "
27
+ "numpy, onnx, onnxscript, and onnxruntime-training are installed. "
28
+ "Suggested procedure to fix dependency problem:\n"
29
+ " (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
30
+ " (2) Open a new python terminal.\n"
31
+ " (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
32
+ " (4) If it returns `True`, then you can use `onnxrt` backend.\n"
33
+ " (5) If it returns `False`, please execute the package importing section in "
34
+ "torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
35
+ )
36
+
37
+ register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import sys
5
+ from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple
6
+
7
+ import torch
8
+ from torch import fx
9
+
10
+
11
+ class CompiledFn(Protocol):
12
+ def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]:
13
+ ...
14
+
15
+
16
+ CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
17
+
18
+ _BACKENDS: Dict[str, CompilerFn] = dict()
19
+
20
+
21
+ def register_backend(
22
+ compiler_fn: Optional[CompilerFn] = None,
23
+ name: Optional[str] = None,
24
+ tags: Sequence[str] = (),
25
+ ):
26
+ """
27
+ Decorator to add a given compiler to the registry to allow calling
28
+ `torch.compile` with string shorthand. Note: for projects not
29
+ imported by default, it might be easier to pass a function directly
30
+ as a backend and not use a string.
31
+
32
+ Args:
33
+ compiler_fn: Callable taking a FX graph and fake tensor inputs
34
+ name: Optional name, defaults to `compiler_fn.__name__`
35
+ tags: Optional set of string tags to categorize backend with
36
+ """
37
+ if compiler_fn is None:
38
+ # @register_backend(name="") syntax
39
+ return functools.partial(register_backend, name=name, tags=tags)
40
+ assert callable(compiler_fn)
41
+ name = name or compiler_fn.__name__
42
+ assert name not in _BACKENDS, f"duplicate name: {name}"
43
+ _BACKENDS[name] = compiler_fn
44
+ compiler_fn._tags = tuple(tags)
45
+ return compiler_fn
46
+
47
+
48
+ register_debug_backend = functools.partial(register_backend, tags=("debug",))
49
+ register_experimental_backend = functools.partial(
50
+ register_backend, tags=("experimental",)
51
+ )
52
+
53
+
54
+ def lookup_backend(compiler_fn):
55
+ """Expand backend strings to functions"""
56
+ if isinstance(compiler_fn, str):
57
+ if compiler_fn not in _BACKENDS:
58
+ _lazy_import()
59
+ if compiler_fn not in _BACKENDS:
60
+ _lazy_import_entry_point(compiler_fn)
61
+ if compiler_fn not in _BACKENDS:
62
+ from ..exc import InvalidBackend
63
+
64
+ raise InvalidBackend(name=compiler_fn)
65
+ compiler_fn = _BACKENDS[compiler_fn]
66
+ return compiler_fn
67
+
68
+
69
+ def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
70
+ """
71
+ Return valid strings that can be passed to:
72
+
73
+ torch.compile(..., backend="name")
74
+ """
75
+ _lazy_import()
76
+ exclude_tags = set(exclude_tags or ())
77
+ return sorted(
78
+ [
79
+ name
80
+ for name, backend in _BACKENDS.items()
81
+ if not exclude_tags.intersection(backend._tags)
82
+ ]
83
+ )
84
+
85
+
86
+ @functools.lru_cache(None)
87
+ def _lazy_import():
88
+ from .. import backends
89
+ from ..utils import import_submodule
90
+
91
+ import_submodule(backends)
92
+
93
+ from ..repro.after_dynamo import dynamo_minifier_backend
94
+
95
+ assert dynamo_minifier_backend is not None
96
+
97
+
98
+ @functools.lru_cache(None)
99
+ def _lazy_import_entry_point(backend_name: str):
100
+ from importlib.metadata import entry_points
101
+
102
+ compiler_fn = None
103
+ group_name = "torch_dynamo_backends"
104
+ if sys.version_info < (3, 10):
105
+ backend_eps = entry_points()
106
+ eps = [ep for ep in backend_eps.get(group_name, ()) if ep.name == backend_name]
107
+ if len(eps) > 0:
108
+ compiler_fn = eps[0].load()
109
+ else:
110
+ backend_eps = entry_points(group=group_name)
111
+ if backend_name in backend_eps.names:
112
+ compiler_fn = backend_eps[backend_name].load()
113
+
114
+ if compiler_fn is not None and backend_name not in list_backends(tuple()):
115
+ register_backend(compiler_fn=compiler_fn, name=backend_name)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # import torch # type: ignore[import]
4
+ # from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import]
5
+ # from .registry import register_backend # type: ignore[import]
6
+
7
+ """
8
+ Placeholder for TensorRT backend for dynamo via torch-tensorrt
9
+ """
10
+
11
+ # @register_backend
12
+ # def tensorrt(gm, example_inputs):
13
+ # import torch_tensorrt # type: ignore[import]
14
+ # pass
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import logging
4
+ import warnings
5
+
6
+ from functorch.compile import make_boxed_func
7
+
8
+ from ..backends.common import aot_autograd
9
+ from .registry import register_backend, register_experimental_backend
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ @register_experimental_backend
15
+ def torchxla_trivial(gm, fake_tensor_inputs):
16
+ return gm
17
+
18
+
19
+ @register_experimental_backend
20
+ def torchxla_trace_once(model, fake_tensor_inputs):
21
+ warnings.warn(
22
+ "This backend will be deprecated in 2.2, please use `openxla` backend instead"
23
+ )
24
+
25
+ return xla_backend_helper(model, fake_tensor_inputs)
26
+
27
+
28
+ @register_backend
29
+ def openxla_eval(model, fake_tensor_inputs):
30
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
31
+
32
+
33
+ def openxla_eval_boxed(model, fake_tensor_inputs):
34
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
35
+
36
+
37
+ def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
38
+ try:
39
+ import torch_xla.core.dynamo_bridge as bridge
40
+ except ImportError as e:
41
+ raise ImportError(
42
+ "Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla"
43
+ ) from e
44
+
45
+ compiled_graph = None
46
+
47
+ def fwd(*args):
48
+ nonlocal model
49
+ nonlocal compiled_graph
50
+ if compiled_graph is None:
51
+ compiled_graph = bridge.extract_compiled_graph(model, args)
52
+ del model
53
+ return compiled_graph(*args)
54
+
55
+ return make_boxed_func(fwd) if boxed else fwd
56
+
57
+
58
+ aot_torchxla_trivial = aot_autograd(
59
+ fw_compiler=torchxla_trivial,
60
+ )
61
+ register_experimental_backend(
62
+ name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial
63
+ )
64
+
65
+ aot_torchxla_trace_once = aot_autograd(
66
+ fw_compiler=torchxla_trace_once,
67
+ )
68
+ register_experimental_backend(
69
+ name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once
70
+ )
71
+
72
+ openxla = aot_autograd(
73
+ fw_compiler=openxla_eval_boxed,
74
+ )
75
+ register_backend(name="openxla", compiler_fn=openxla)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import importlib
5
+ import logging
6
+ import os
7
+ import tempfile
8
+
9
+ import torch
10
+ from .common import device_from_inputs, fake_tensor_unsupported
11
+
12
+ from .registry import register_backend
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ @register_backend
18
+ @fake_tensor_unsupported
19
+ def tvm(gm, example_inputs, *, scheduler=None, trials=20000):
20
+ import tvm # type: ignore[import]
21
+ from tvm import relay # type: ignore[import]
22
+ from tvm.contrib import graph_executor # type: ignore[import]
23
+
24
+ jit_mod = torch.jit.trace(gm, example_inputs)
25
+ device = device_from_inputs(example_inputs)
26
+ shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
27
+ example_outputs = gm(*example_inputs)
28
+ if len(example_outputs) == 0:
29
+ log.warning("Explicitly fall back to eager due to zero output")
30
+ return gm.forward
31
+ mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)
32
+ if device.type == "cuda":
33
+ dev = tvm.cuda(device.index)
34
+ target = tvm.target.cuda()
35
+ else:
36
+ dev = tvm.cpu(0)
37
+ target = tvm.target.Target(llvm_target())
38
+
39
+ if scheduler is None:
40
+ scheduler = os.environ.get("TVM_SCHEDULER", None)
41
+
42
+ if scheduler == "auto_scheduler":
43
+ from tvm import auto_scheduler
44
+
45
+ log_file = tempfile.NamedTemporaryFile()
46
+
47
+ if not os.path.exists(log_file):
48
+ tasks, task_weights = auto_scheduler.extract_tasks(
49
+ mod["main"], params, target
50
+ )
51
+ for task in tasks:
52
+ print(task.compute_dag)
53
+ else:
54
+ print("No tasks")
55
+ if len(tasks) != 0:
56
+ tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
57
+ if not os.path.exists(log_file):
58
+ assert trials > 0
59
+ tune_option = auto_scheduler.TuningOptions(
60
+ num_measure_trials=trials,
61
+ measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
62
+ early_stopping=2000,
63
+ )
64
+ try:
65
+ tuner.tune(tune_option)
66
+ except Exception:
67
+ if os.path.exists(log_file):
68
+ os.unlink(log_file)
69
+ raise
70
+
71
+ with auto_scheduler.ApplyHistoryBest(log_file):
72
+ with tvm.transform.PassContext(
73
+ opt_level=3, config={"relay.backend.use_auto_scheduler": True}
74
+ ):
75
+ lib = relay.build(mod, target=target, params=params)
76
+ elif scheduler == "meta_schedule":
77
+ from tvm import meta_schedule as ms
78
+
79
+ with tempfile.TemporaryDirectory() as work_dir:
80
+ if device.type != "cuda":
81
+ # meta_schedule needs num-cores to be specified
82
+ # here we use the maximum core count
83
+ target = tvm.target.Target(
84
+ f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}"
85
+ )
86
+ # TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch
87
+ # once USE_PT_TVMDSOOP is updated and turned on by default in TVM.
88
+ database = ms.relay_integration.tune_relay(
89
+ mod=mod,
90
+ target=target,
91
+ work_dir=work_dir,
92
+ max_trials_global=20000,
93
+ num_trials_per_iter=64,
94
+ params=params,
95
+ strategy="evolutionary",
96
+ )
97
+ lib = ms.relay_integration.compile_relay(
98
+ database=database,
99
+ mod=mod,
100
+ target=target,
101
+ params=params,
102
+ )
103
+ elif scheduler == "default" or not scheduler:
104
+ # no autotuning
105
+ with tvm.transform.PassContext(opt_level=10):
106
+ lib = relay.build(mod, target=target, params=params)
107
+ else:
108
+ raise NotImplementedError(
109
+ "This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. "
110
+ "There are three available options: default, auto_scheduler and meta_schedule."
111
+ )
112
+ m = graph_executor.GraphModule(lib["default"](dev))
113
+
114
+ def to_torch_tensor(nd_tensor):
115
+ """A helper function to transfer a NDArray to torch.tensor."""
116
+ if nd_tensor.dtype == "bool":
117
+ # DLPack does not support boolean so it can't be handled by
118
+ # torch.utils.dlpack.from_pack. Workaround by going through
119
+ # numpy, although this brings additional data copy overhead.
120
+ return torch.from_numpy(nd_tensor.numpy())
121
+ return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())
122
+
123
+ def to_tvm_tensor(torch_tensor):
124
+ """A helper function to transfer a torch.tensor to NDArray."""
125
+ if torch_tensor.dtype == torch.bool:
126
+ # same reason as above, fallback to numpy conversion which
127
+ # could introduce data copy overhead
128
+ return tvm.nd.array(torch_tensor.cpu().numpy())
129
+ return tvm.nd.from_dlpack(torch_tensor)
130
+
131
+ def exec_tvm(*i_args):
132
+ args = [a.contiguous() for a in i_args]
133
+ shape_info, _ = m.get_input_info()
134
+ active_inputs = {name for name, _ in shape_info.items()}
135
+ for idx, arg in enumerate(args, 0):
136
+ if arg.dim() != 0:
137
+ if arg.requires_grad:
138
+ arg = arg.detach()
139
+ inp_name = f"inp_{idx}"
140
+ if inp_name not in active_inputs:
141
+ log.warning(
142
+ "input %s skipped as not found in tvm's runtime library",
143
+ inp_name,
144
+ )
145
+ continue
146
+ m.set_input(
147
+ inp_name,
148
+ to_tvm_tensor(arg),
149
+ )
150
+ m.run()
151
+ return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())]
152
+
153
+ return exec_tvm
154
+
155
+
156
+ tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule")
157
+ tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler")
158
+
159
+
160
+ def has_tvm():
161
+ try:
162
+ importlib.import_module("tvm")
163
+ return True
164
+ except ImportError:
165
+ return False
166
+
167
+
168
+ @functools.lru_cache(None)
169
+ def llvm_target():
170
+ if "avx512" in open("/proc/cpuinfo").read():
171
+ return "llvm -mcpu=skylake-avx512"
172
+ return "llvm -mcpu=core-avx2"
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bisect
2
+ import dataclasses
3
+ import dis
4
+ import sys
5
+ from typing import Any, Set, Union
6
+
7
+ TERMINAL_OPCODES = {
8
+ dis.opmap["RETURN_VALUE"],
9
+ dis.opmap["JUMP_FORWARD"],
10
+ dis.opmap["RAISE_VARARGS"],
11
+ # TODO(jansel): double check exception handling
12
+ }
13
+ if sys.version_info >= (3, 9):
14
+ TERMINAL_OPCODES.add(dis.opmap["RERAISE"])
15
+ if sys.version_info >= (3, 11):
16
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_BACKWARD"])
17
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_FORWARD"])
18
+ else:
19
+ TERMINAL_OPCODES.add(dis.opmap["JUMP_ABSOLUTE"])
20
+ JUMP_OPCODES = set(dis.hasjrel + dis.hasjabs)
21
+ JUMP_OPNAMES = {dis.opname[opcode] for opcode in JUMP_OPCODES}
22
+ HASLOCAL = set(dis.haslocal)
23
+ HASFREE = set(dis.hasfree)
24
+
25
+ stack_effect = dis.stack_effect
26
+
27
+
28
+ def get_indexof(insts):
29
+ """
30
+ Get a mapping from instruction memory address to index in instruction list.
31
+ Additionally checks that each instruction only appears once in the list.
32
+ """
33
+ indexof = {}
34
+ for i, inst in enumerate(insts):
35
+ assert inst not in indexof
36
+ indexof[inst] = i
37
+ return indexof
38
+
39
+
40
+ def remove_dead_code(instructions):
41
+ """Dead code elimination"""
42
+ indexof = get_indexof(instructions)
43
+ live_code = set()
44
+
45
+ def find_live_code(start):
46
+ for i in range(start, len(instructions)):
47
+ if i in live_code:
48
+ return
49
+ live_code.add(i)
50
+ inst = instructions[i]
51
+ if inst.exn_tab_entry:
52
+ find_live_code(indexof[inst.exn_tab_entry.target])
53
+ if inst.opcode in JUMP_OPCODES:
54
+ find_live_code(indexof[inst.target])
55
+ if inst.opcode in TERMINAL_OPCODES:
56
+ return
57
+
58
+ find_live_code(0)
59
+
60
+ # change exception table entries if start/end instructions are dead
61
+ # assumes that exception table entries have been propagated,
62
+ # e.g. with bytecode_transformation.propagate_inst_exn_table_entries,
63
+ # and that instructions with an exn_tab_entry lies within its start/end.
64
+ if sys.version_info >= (3, 11):
65
+ live_idx = sorted(live_code)
66
+ for i, inst in enumerate(instructions):
67
+ if i in live_code and inst.exn_tab_entry:
68
+ # find leftmost live instruction >= start
69
+ start_idx = bisect.bisect_left(
70
+ live_idx, indexof[inst.exn_tab_entry.start]
71
+ )
72
+ assert start_idx < len(live_idx)
73
+ # find rightmost live instruction <= end
74
+ end_idx = (
75
+ bisect.bisect_right(live_idx, indexof[inst.exn_tab_entry.end]) - 1
76
+ )
77
+ assert end_idx >= 0
78
+ assert live_idx[start_idx] <= i <= live_idx[end_idx]
79
+ inst.exn_tab_entry.start = instructions[live_idx[start_idx]]
80
+ inst.exn_tab_entry.end = instructions[live_idx[end_idx]]
81
+
82
+ return [inst for i, inst in enumerate(instructions) if i in live_code]
83
+
84
+
85
+ def remove_pointless_jumps(instructions):
86
+ """Eliminate jumps to the next instruction"""
87
+ pointless_jumps = {
88
+ id(a)
89
+ for a, b in zip(instructions, instructions[1:])
90
+ if a.opname == "JUMP_ABSOLUTE" and a.target is b
91
+ }
92
+ return [inst for inst in instructions if id(inst) not in pointless_jumps]
93
+
94
+
95
+ def propagate_line_nums(instructions):
96
+ """Ensure every instruction has line number set in case some are removed"""
97
+ cur_line_no = None
98
+
99
+ def populate_line_num(inst):
100
+ nonlocal cur_line_no
101
+ if inst.starts_line:
102
+ cur_line_no = inst.starts_line
103
+
104
+ inst.starts_line = cur_line_no
105
+
106
+ for inst in instructions:
107
+ populate_line_num(inst)
108
+
109
+
110
+ def remove_extra_line_nums(instructions):
111
+ """Remove extra starts line properties before packing bytecode"""
112
+
113
+ cur_line_no = None
114
+
115
+ def remove_line_num(inst):
116
+ nonlocal cur_line_no
117
+ if inst.starts_line is None:
118
+ return
119
+ elif inst.starts_line == cur_line_no:
120
+ inst.starts_line = None
121
+ else:
122
+ cur_line_no = inst.starts_line
123
+
124
+ for inst in instructions:
125
+ remove_line_num(inst)
126
+
127
+
128
+ @dataclasses.dataclass
129
+ class ReadsWrites:
130
+ reads: Set[Any]
131
+ writes: Set[Any]
132
+ visited: Set[Any]
133
+
134
+
135
+ def livevars_analysis(instructions, instruction):
136
+ indexof = get_indexof(instructions)
137
+ must = ReadsWrites(set(), set(), set())
138
+ may = ReadsWrites(set(), set(), set())
139
+
140
+ def walk(state, start):
141
+ if start in state.visited:
142
+ return
143
+ state.visited.add(start)
144
+
145
+ for i in range(start, len(instructions)):
146
+ inst = instructions[i]
147
+ if inst.opcode in HASLOCAL or inst.opcode in HASFREE:
148
+ if "LOAD" in inst.opname or "DELETE" in inst.opname:
149
+ if inst.argval not in must.writes:
150
+ state.reads.add(inst.argval)
151
+ elif "STORE" in inst.opname:
152
+ state.writes.add(inst.argval)
153
+ elif inst.opname == "MAKE_CELL":
154
+ pass
155
+ else:
156
+ raise NotImplementedError(f"unhandled {inst.opname}")
157
+ if inst.exn_tab_entry:
158
+ walk(may, indexof[inst.exn_tab_entry.target])
159
+ if inst.opcode in JUMP_OPCODES:
160
+ walk(may, indexof[inst.target])
161
+ state = may
162
+ if inst.opcode in TERMINAL_OPCODES:
163
+ return
164
+
165
+ walk(must, indexof[instruction])
166
+ return must.reads | may.reads
167
+
168
+
169
+ @dataclasses.dataclass
170
+ class FixedPointBox:
171
+ value: bool = True
172
+
173
+
174
+ @dataclasses.dataclass
175
+ class StackSize:
176
+ low: Union[int, float]
177
+ high: Union[int, float]
178
+ fixed_point: FixedPointBox
179
+
180
+ def zero(self):
181
+ self.low = 0
182
+ self.high = 0
183
+ self.fixed_point.value = False
184
+
185
+ def offset_of(self, other, n):
186
+ prior = (self.low, self.high)
187
+ self.low = min(self.low, other.low + n)
188
+ self.high = max(self.high, other.high + n)
189
+ if (self.low, self.high) != prior:
190
+ self.fixed_point.value = False
191
+
192
+ def exn_tab_jump(self, depth):
193
+ prior = (self.low, self.high)
194
+ self.low = min(self.low, depth)
195
+ self.high = max(self.high, depth)
196
+ if (self.low, self.high) != prior:
197
+ self.fixed_point.value = False
198
+
199
+
200
+ def stacksize_analysis(instructions) -> Union[int, float]:
201
+ assert instructions
202
+ fixed_point = FixedPointBox()
203
+ stack_sizes = {
204
+ inst: StackSize(float("inf"), float("-inf"), fixed_point)
205
+ for inst in instructions
206
+ }
207
+ stack_sizes[instructions[0]].zero()
208
+
209
+ for _ in range(100):
210
+ if fixed_point.value:
211
+ break
212
+ fixed_point.value = True
213
+
214
+ for inst, next_inst in zip(instructions, instructions[1:] + [None]):
215
+ stack_size = stack_sizes[inst]
216
+ # CALL_FINALLY in Python 3.8 is handled differently when determining stack depth.
217
+ # See https://github.com/python/cpython/blob/3.8/Python/compile.c#L5450.
218
+ # Essentially, the stack effect of CALL_FINALLY is computed with jump=True,
219
+ # but the resulting stack depth is propagated to the next instruction, not the
220
+ # jump target.
221
+ is_call_finally = (
222
+ sys.version_info < (3, 9) and inst.opcode == dis.opmap["CALL_FINALLY"]
223
+ )
224
+ if inst.opcode not in TERMINAL_OPCODES:
225
+ assert next_inst is not None, f"missing next inst: {inst}"
226
+ stack_sizes[next_inst].offset_of(
227
+ stack_size,
228
+ stack_effect(inst.opcode, inst.arg, jump=is_call_finally),
229
+ )
230
+ if inst.opcode in JUMP_OPCODES and not is_call_finally:
231
+ stack_sizes[inst.target].offset_of(
232
+ stack_size, stack_effect(inst.opcode, inst.arg, jump=True)
233
+ )
234
+ if inst.exn_tab_entry:
235
+ # see https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt
236
+ # on why depth is computed this way.
237
+ depth = inst.exn_tab_entry.depth + int(inst.exn_tab_entry.lasti) + 1
238
+ stack_sizes[inst.exn_tab_entry.target].exn_tab_jump(depth)
239
+
240
+ if False:
241
+ for inst in instructions:
242
+ stack_size = stack_sizes[inst]
243
+ print(stack_size.low, stack_size.high, inst)
244
+
245
+ low = min([x.low for x in stack_sizes.values()])
246
+ high = max([x.high for x in stack_sizes.values()])
247
+
248
+ assert fixed_point.value, "failed to reach fixed point"
249
+ assert low >= 0
250
+ return high
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/callback.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class CompilationCallbackHandler:
2
+ def __init__(self):
3
+ self.start_callbacks = []
4
+ self.end_callbacks = []
5
+
6
+ def register_start_callback(self, callback):
7
+ """
8
+ Register a callback function to be called when the compilation starts.
9
+
10
+ Args:
11
+ - callback (callable): The callback function to register.
12
+ """
13
+ self.start_callbacks.append(callback)
14
+ return callback
15
+
16
+ def register_end_callback(self, callback):
17
+ """
18
+ Register a callback function to be called when the compilation ends.
19
+
20
+ Args:
21
+ - callback (callable): The callback function to register.
22
+ """
23
+ self.end_callbacks.append(callback)
24
+ return callback
25
+
26
+ def remove_start_callback(self, callback):
27
+ """
28
+ Remove a registered start callback function.
29
+
30
+ Args:
31
+ - callback (callable): The callback function to remove.
32
+ """
33
+ self.start_callbacks.remove(callback)
34
+
35
+ def remove_end_callback(self, callback):
36
+ """
37
+ Remove a registered end callback function.
38
+
39
+ Args:
40
+ - callback (callable): The callback function to remove.
41
+ """
42
+ self.end_callbacks.remove(callback)
43
+
44
+ def run_start_callbacks(self):
45
+ """
46
+ Execute all registered start callbacks.
47
+ """
48
+ for callback in self.start_callbacks:
49
+ callback()
50
+
51
+ def run_end_callbacks(self):
52
+ """
53
+ Execute all registered end callbacks.
54
+ """
55
+ for callback in self.end_callbacks:
56
+ callback()
57
+
58
+ def clear(self):
59
+ """
60
+ Clear all registered callbacks.
61
+ """
62
+ self.start_callbacks.clear()
63
+ self.end_callbacks.clear()
64
+
65
+
66
+ callback_handler = CompilationCallbackHandler()
67
+
68
+
69
+ def on_compile_start(callback):
70
+ """
71
+ Decorator to register a callback function for the start of the compilation.
72
+ """
73
+ callback_handler.register_start_callback(callback)
74
+ return callback
75
+
76
+
77
+ def on_compile_end(callback):
78
+ """
79
+ Decorator to register a callback function for the end of the compilation.
80
+ """
81
+ callback_handler.register_end_callback(callback)
82
+ return callback
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="method-assign"
2
+
3
+ import copy
4
+ import functools
5
+ import getpass
6
+ import inspect
7
+ import itertools
8
+ import logging
9
+ import os
10
+ import re
11
+ import subprocess
12
+ import tempfile
13
+ import textwrap
14
+ from collections import Counter
15
+ from importlib import import_module
16
+ from typing import Any, Callable, Dict, List, Optional, TypeVar
17
+
18
+ import torch
19
+ import torch._prims_common as utils
20
+ import torch._subclasses.meta_utils
21
+ from torch import Tensor
22
+
23
+ from torch._dynamo.testing import rand_strided
24
+ from torch._prims_common import is_float_dtype
25
+ from torch.multiprocessing.reductions import StorageWeakRef
26
+ from torch.utils._content_store import ContentStoreReader, ContentStoreWriter
27
+
28
+ from . import config
29
+ from .utils import clone_inputs, get_debug_dir
30
+
31
+ log = logging.getLogger(__name__)
32
+
33
+ T = TypeVar("T")
34
+
35
+
36
+ inductor_config = import_module("torch._inductor.config")
37
+ use_buck = inductor_config.is_fbcode()
38
+
39
+ if use_buck:
40
+ import libfb.py.build_info
41
+
42
+
43
+ extra_deps = []
44
+ extra_imports = ""
45
+ if use_buck:
46
+ extra_deps = [
47
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators_gpu",
48
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators",
49
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu",
50
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops",
51
+ ]
52
+ cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined]
53
+ extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps])
54
+
55
+
56
+ BUCK_CMD_PREFIX = ["buck2", "run", "@mode/dev-nosan"]
57
+
58
+
59
+ class BuckTargetWriter:
60
+ def __init__(self, filename):
61
+ self.subdir, self.py_file = os.path.split(os.path.abspath(filename))
62
+ self.target = self.py_file.replace(".py", "")
63
+
64
+ # Get main_module path from fbcode
65
+ self.path = f'{self.subdir.replace("/", ".")}.{self.target}'
66
+ self.path = self.path[self.path.find("fbcode.") :]
67
+ self.path = self.path[7:]
68
+
69
+ # Get cmd line path
70
+ tmp = self.subdir
71
+ tmp = tmp[tmp.find("fbcode/") :][7:]
72
+ self.cmd_line_path = f"//{tmp}:{self.target}"
73
+
74
+ def build(self):
75
+ extra_cpp_deps = "\n".join([f' "{x}",' for x in extra_deps])
76
+ return textwrap.dedent(
77
+ f"""
78
+ load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
79
+
80
+ python_binary(
81
+ name="{self.target}",
82
+ srcs = ["{self.py_file}"],
83
+ compile = False,
84
+ deps = [
85
+ "//caffe2:torch",
86
+ "//caffe2/functorch:functorch",
87
+ "//triton:triton",
88
+ "{cur_target}",
89
+ ],
90
+ cpp_deps = [
91
+ {extra_cpp_deps}
92
+ ],
93
+ main_module = "{self.path}",
94
+ par_style = "xar",
95
+ )
96
+ """
97
+ )
98
+
99
+ def write(self, print_msg=True):
100
+ target_file = os.path.join(self.subdir, "TARGETS")
101
+ with open(target_file, "w") as fd:
102
+ fd.write(self.build())
103
+ # log.warning("Wrote isolation TARGETS file at %s", target_file)
104
+ cmd_split = BUCK_CMD_PREFIX + [self.cmd_line_path]
105
+ if print_msg:
106
+ log.warning(
107
+ "Found an example that reproduces the error. Run this cmd to repro - %s",
108
+ " ".join(cmd_split),
109
+ )
110
+ return cmd_split
111
+
112
+
113
+ def minifier_dir():
114
+ path = os.path.join(get_debug_dir(), "minifier")
115
+ if path is None:
116
+ path = f"{tempfile.gettempdir()}/minifier_{getpass.getuser()}"
117
+ if not os.path.exists(path):
118
+ os.makedirs(path, exist_ok=True)
119
+ return path
120
+
121
+
122
+ MAX_CONSTANT_NUMEL_INLINE = 4
123
+
124
+
125
+ class NNModuleToString:
126
+ safe_reprs = [
127
+ torch.nn.Linear,
128
+ torch.nn.Conv1d,
129
+ torch.nn.Conv2d,
130
+ torch.nn.Conv3d,
131
+ torch.nn.BatchNorm1d,
132
+ torch.nn.BatchNorm2d,
133
+ torch.nn.BatchNorm3d,
134
+ torch.nn.LayerNorm,
135
+ torch.nn.Dropout,
136
+ torch.nn.Softmax,
137
+ torch.nn.ReLU,
138
+ torch.nn.GELU,
139
+ torch.nn.Identity,
140
+ torch.nn.MaxPool2d,
141
+ torch.nn.Embedding,
142
+ torch.nn.Tanh,
143
+ torch.nn.ConvTranspose1d,
144
+ torch.nn.GLU,
145
+ torch.nn.LSTM,
146
+ torch.nn.Flatten,
147
+ torch.nn.AdaptiveAvgPool2d,
148
+ ]
149
+
150
+ @staticmethod
151
+ def can_convert_to_string(gm):
152
+ cant_convert = set()
153
+ for _, module in gm.named_children():
154
+ if type(module) not in NNModuleToString.safe_reprs:
155
+ cant_convert.add(module)
156
+
157
+ if len(cant_convert) > 0:
158
+ log.warning("We have not tested reprs of some modules - %s", cant_convert)
159
+ # TODO - Assuming that all modules can be safely repr'd. Check if that assumption is correct.
160
+ return True
161
+
162
+ @staticmethod
163
+ def convert(gm):
164
+ from torch.nn.modules.module import _addindent
165
+
166
+ tab = " " * 4
167
+
168
+ model_str = textwrap.dedent(
169
+ """
170
+ from torch.nn import *
171
+ class Repro(torch.nn.Module):
172
+ def __init__(self):
173
+ super().__init__()
174
+ """
175
+ )
176
+
177
+ for module_name, module in gm.named_children():
178
+ module_str = f"{module.__repr__()}"
179
+ # module should be a core torch.nn.Module, so all parameters
180
+ # should be on the same device.
181
+ example_param = next(module.parameters(), None)
182
+ if example_param is not None and example_param.is_cuda:
183
+ module_str = f"{module_str}.cuda()"
184
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
185
+
186
+ for buffer_name, buffer in gm._buffers.items():
187
+ if buffer is None:
188
+ continue
189
+ # Serialize full data for small buffers
190
+ if buffer.numel() <= MAX_CONSTANT_NUMEL_INLINE:
191
+ from torch._tensor_str import PRINT_OPTS
192
+
193
+ assert PRINT_OPTS.threshold >= MAX_CONSTANT_NUMEL_INLINE
194
+ tensor_str = repr(buffer)
195
+ elif torch.is_floating_point(buffer):
196
+ tensor_str = f"torch.randn({list(buffer.shape)}, dtype={buffer.dtype})"
197
+ else:
198
+ tensor_str = (
199
+ f"torch.randint(1, size={list(buffer.shape)}, dtype={buffer.dtype})"
200
+ )
201
+ if buffer.is_cuda:
202
+ tensor_str = f"{tensor_str}.cuda()"
203
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n"
204
+
205
+ for param_name, param in gm._parameters.items():
206
+ if param is None:
207
+ continue
208
+ maybe_device = ""
209
+ if param.is_cuda:
210
+ maybe_device = ', device="cuda"'
211
+ tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))"
212
+ model_str += f"{tab*2}self.{param_name} = {tensor_str}\n"
213
+
214
+ # TODO - Keep this code for now. But, I don't think we will need this.
215
+ # attrs = dir(gm)
216
+ # for attr in attrs:
217
+ # if "_tensor_constant" in attr:
218
+ # val = getattr(gm, attr)
219
+ # model_str += f" {attr} = {val!r}\n"
220
+
221
+ model_str += f"{_addindent(gm.code, 4)}\n"
222
+ return model_str
223
+
224
+
225
+ @functools.lru_cache(None) # subprocess is expensive
226
+ def _cuda_system_info_comment():
227
+ if not torch.cuda.is_available():
228
+ return "# torch.cuda.is_available()==False, no GPU info collected\n"
229
+
230
+ model_str = "# CUDA Info: \n"
231
+ try:
232
+ cuda_version_out = subprocess.check_output(["nvcc", "--version"])
233
+ cuda_version_lines = cuda_version_out.decode().split("\n")
234
+ comment = "".join([f"# {s} \n" for s in cuda_version_lines if s not in [""]])
235
+ model_str += f"{comment}\n"
236
+ except (FileNotFoundError, subprocess.CalledProcessError):
237
+ model_str += "# nvcc not found\n"
238
+
239
+ gpu_names = Counter(
240
+ torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())
241
+ )
242
+
243
+ model_str += "# GPU Hardware Info: \n"
244
+ for name, count in gpu_names.items():
245
+ model_str += f"# {name} : {count} \n"
246
+ model_str += "\n"
247
+ return model_str
248
+
249
+
250
+ def generate_config_string(*, stable_output=False):
251
+ import torch._functorch.config
252
+ import torch._inductor.config
253
+
254
+ if stable_output:
255
+ return "# config omitted due to stable_output=True"
256
+
257
+ experimental_config = torch.fx.experimental._config.codegen_config() # type: ignore[attr-defined]
258
+ return f"""\
259
+ import torch._dynamo.config
260
+ import torch._inductor.config
261
+ import torch._functorch.config
262
+ import torch.fx.experimental._config
263
+ {torch._dynamo.config.codegen_config()}
264
+ {torch._inductor.config.codegen_config()}
265
+ {torch._functorch.config.codegen_config()}
266
+ {experimental_config}
267
+ """
268
+
269
+
270
+ def get_minifier_repro_path():
271
+ return os.path.join(minifier_dir(), "minifier_launcher.py")
272
+
273
+
274
+ def helper_for_dump_minify(contents):
275
+ minified_repro_path = get_minifier_repro_path()
276
+ log.warning("Writing minified repro to:\n%s", minified_repro_path)
277
+
278
+ if use_buck:
279
+ BuckTargetWriter(minified_repro_path).write()
280
+ try:
281
+ with open(minified_repro_path, "w") as fd:
282
+ fd.write(contents)
283
+
284
+ except OSError as e:
285
+ log.exception(e)
286
+ raise NotImplementedError("Could not write to {minified_repro_path}") from e
287
+
288
+
289
+ class AccuracyError(Exception):
290
+ pass
291
+
292
+
293
+ def clone_inputs_retaining_gradness(example_inputs):
294
+ """
295
+ This clone inputs is different from utils clone_input. In case of minifier,
296
+ all the tensors are leaf tensors while creating a new graph. So, we set the
297
+ requires_grad field w/o checking the leafness of the tensor.
298
+ """
299
+ cloned_inputs = clone_inputs(example_inputs)
300
+ for idx in range(len(example_inputs)):
301
+ if isinstance(cloned_inputs[idx], torch.Tensor):
302
+ cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)
303
+ return cloned_inputs
304
+
305
+
306
+ def run_fwd_maybe_bwd(gm, args, only_fwd=False, disable_clone=False):
307
+ """
308
+ Runs a forward and possibly backward iteration for a given mod and args.
309
+
310
+ When disable_clone is True, we will use args as-is without cloning.
311
+ This is higher fidelity but we may destroy the args in the process.
312
+ """
313
+ from torch._functorch.aot_autograd import make_boxed_func
314
+
315
+ from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass
316
+
317
+ gm = copy.deepcopy(gm)
318
+ if not disable_clone:
319
+ args = clone_inputs_retaining_gradness(args)
320
+
321
+ if hasattr(gm, "zero_grad"):
322
+ gm.zero_grad(True)
323
+
324
+ # TorchInductor returned callable expects lists. So, boxing the call.
325
+ orig_named_parameters = getattr(gm, "named_parameters", None)
326
+ orig_named_buffers = getattr(gm, "named_buffers", None)
327
+ if not hasattr(gm, "_boxed_call") and (
328
+ orig_named_parameters is not None or orig_named_buffers is not None
329
+ ):
330
+ gm = make_boxed_func(gm)
331
+ if orig_named_parameters is not None:
332
+ gm.named_parameters = orig_named_parameters
333
+ if orig_named_buffers is not None:
334
+ gm.named_buffers = orig_named_buffers
335
+
336
+ out = gm(args)
337
+ if only_fwd:
338
+ return out
339
+ if requires_bwd_pass(out):
340
+ loss = reduce_to_scalar_loss(out)
341
+ loss.backward()
342
+ return collect_results(gm, out, None, args)
343
+
344
+
345
+ def same_two_models(
346
+ gm,
347
+ opt_gm,
348
+ example_inputs,
349
+ only_fwd=False,
350
+ *,
351
+ require_fp64=False,
352
+ ignore_non_fp=False,
353
+ ):
354
+ """
355
+ Check two models have same accuracy.
356
+
357
+ require_fp64: if True, raise an error if we unable to calculate the fp64 reference
358
+ ignore_non_fp: if True, do not compare outputs which are not floating point. This
359
+ is mostly useful for the minifier (which wants to avoid quantizing floating point
360
+ error into integer/boolean error)
361
+ """
362
+ from .eval_frame import OptimizedModule
363
+ from .testing import (
364
+ named_buffers_for_optimized_module,
365
+ named_parameters_for_optimized_module,
366
+ )
367
+ from .utils import same
368
+
369
+ if isinstance(gm, OptimizedModule):
370
+ gm.named_parameters = named_parameters_for_optimized_module(gm)
371
+ gm.named_buffers = named_buffers_for_optimized_module(gm)
372
+
373
+ if isinstance(opt_gm, OptimizedModule):
374
+ opt_gm.named_parameters = named_parameters_for_optimized_module(opt_gm)
375
+ opt_gm.named_buffers = named_buffers_for_optimized_module(opt_gm)
376
+
377
+ ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd)
378
+
379
+ fp64_ref = None
380
+ if config.same_two_models_use_fp64:
381
+ try:
382
+ fp64_model, fp64_examples = cast_to_fp64(
383
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
384
+ )
385
+ fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd)
386
+ except Exception:
387
+ if require_fp64:
388
+ raise RuntimeError("Could not generate fp64 outputs") # noqa: TRY200
389
+ log.warning("Could not generate fp64 outputs")
390
+
391
+ try:
392
+ res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd)
393
+ except Exception as e:
394
+ # This means that the minified graph is bad/exposes a different problem.
395
+ # As we are checking accuracy here, lets log the exception and return True.
396
+ log.exception(
397
+ "While minifying the program in accuracy minification mode, "
398
+ "ran into a runtime exception which is likely an unrelated issue."
399
+ " Skipping this graph."
400
+ )
401
+ return True
402
+
403
+ passing = same(
404
+ ref,
405
+ res,
406
+ fp64_ref,
407
+ tol=config.repro_tolerance,
408
+ equal_nan=True,
409
+ ignore_non_fp=ignore_non_fp,
410
+ )
411
+ return passing
412
+
413
+
414
+ def cast_dtype_args_to_fp64(model):
415
+ for node in model.graph.nodes:
416
+ if (
417
+ node.op == "call_function"
418
+ and node.target == torch.ops.prims.convert_element_type.default
419
+ ):
420
+ assert len(node.args) == 2
421
+ if is_float_dtype(node.args[1]) and node.args[1] != torch.float64:
422
+ node.args = (node.args[0], torch.float64)
423
+ if node.op == "call_function":
424
+ dtype = node.kwargs.get("dtype")
425
+ if dtype is not None and is_float_dtype(dtype):
426
+ new_kwargs = dict(node.kwargs)
427
+ new_kwargs["dtype"] = torch.float64
428
+ node.kwargs = new_kwargs
429
+
430
+ model.graph.lint()
431
+ model.recompile()
432
+ return model
433
+
434
+
435
+ def cast_to(dtype, model, inputs):
436
+ from torch.utils._pytree import tree_map
437
+
438
+ model = model.to(dtype)
439
+ if dtype == torch.float64:
440
+ # If casting to fp64 for accuracy comparison, we need to
441
+ # replace dtype arguments embedded in the graph with fp64
442
+ model = cast_dtype_args_to_fp64(model)
443
+
444
+ inputs = tree_map(
445
+ lambda x: x.to(dtype)
446
+ if isinstance(x, torch.Tensor) and x.is_floating_point()
447
+ else x,
448
+ inputs,
449
+ )
450
+ return model, inputs
451
+
452
+
453
+ def cast_to_fp64(model, inputs):
454
+ return cast_to(torch.float64, model, inputs)
455
+
456
+
457
+ def backend_accuracy_fails(
458
+ gm,
459
+ example_inputs,
460
+ compiler_fn,
461
+ only_fwd=False,
462
+ *,
463
+ require_fp64=False,
464
+ ignore_non_fp=False,
465
+ ):
466
+ try:
467
+ compiled_gm = compiler_fn(
468
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
469
+ )
470
+ return not same_two_models(
471
+ gm,
472
+ compiled_gm,
473
+ example_inputs,
474
+ only_fwd,
475
+ require_fp64=require_fp64,
476
+ ignore_non_fp=ignore_non_fp,
477
+ )
478
+ except Exception as e:
479
+ # This means that the minified graph is bad/exposes a different problem.
480
+ # As we are checking accuracy here, lets log the exception and return False.
481
+ log.exception(
482
+ "While minifying the program in accuracy minification mode, "
483
+ "ran into a runtime exception which is likely an unrelated issue."
484
+ " Skipping this graph"
485
+ )
486
+ return False
487
+
488
+
489
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
490
+ # REPRO SUPPORT CODE
491
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
492
+
493
+
494
+ # Helper functions for computing what the default values of tensor
495
+ # values should be. These all coincide with factory functions, e.g., torch.empty
496
+
497
+
498
+ def _stride_or_default(
499
+ stride: Optional["torch._prims_common.StrideType"],
500
+ *,
501
+ shape: "torch._prims_common.ShapeType",
502
+ ) -> "torch._prims_common.StrideType":
503
+ return stride if stride is not None else utils.make_contiguous_strides_for(shape)
504
+
505
+
506
+ def _mk_defaulter(d: T) -> Callable[[Optional[T]], T]:
507
+ return lambda x: x if x is not None else d
508
+
509
+
510
+ _dtype_or_default = _mk_defaulter(torch.float32)
511
+ _device_or_default = _mk_defaulter(torch.device("cpu"))
512
+ _storage_offset_or_default = _mk_defaulter(0)
513
+ _requires_grad_or_default = _mk_defaulter(False)
514
+ _is_leaf_or_default = _mk_defaulter(False)
515
+
516
+
517
+ class NopInputReader:
518
+ def __init__(self):
519
+ self.total = 0
520
+
521
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
522
+ self.total += 1
523
+
524
+ def tensor(self, *args, **kwargs):
525
+ pass
526
+
527
+ def symint(self, *args, **kwargs):
528
+ pass
529
+
530
+
531
+ # TODO: Support bundling the entire repro into a zip file for ease of
532
+ # transferring around
533
+ class InputReader:
534
+ def __init__(self, save_dir=None, *, pbar=None):
535
+ # If None, we will generate random data instead. It's important
536
+ # to natively support this use case as it will allow people to
537
+ # share repros without including the real data, if the problem
538
+ # reproduces even on random data.
539
+ if save_dir is None:
540
+ log.warning("no save_dir specified, will generate random data")
541
+ self.store = ContentStoreReader(save_dir) if save_dir is not None else None
542
+ self.args = []
543
+ self.pbar = pbar
544
+
545
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
546
+ if self.pbar is not None:
547
+ self.pbar.update(1)
548
+ device = _device_or_default(device)
549
+ dtype_hint = _dtype_or_default(dtype_hint)
550
+ if self.store is not None and storage_hash is not None:
551
+ try:
552
+ storage = self.store.read_storage(storage_hash)
553
+ except FileNotFoundError:
554
+ pass
555
+ else:
556
+ if device != storage.device:
557
+ log.warning("device mismatch: %s != %s", device, storage.device)
558
+ # TODO: transfer it to the right device? But failing this
559
+ # way would be very mysterious! Would have been better
560
+ # not to store device in the serialized format...
561
+ return storage
562
+ log.warning("could not load %s, generating random data instead", storage_hash)
563
+ shape = (nbytes // dtype_hint.itemsize,)
564
+ stride = _stride_or_default(None, shape=shape)
565
+ return rand_strided(shape, stride, dtype_hint, device).untyped_storage()
566
+
567
+ def tensor(
568
+ self,
569
+ storage,
570
+ shape,
571
+ stride=None,
572
+ *,
573
+ storage_offset=None,
574
+ dtype=None,
575
+ requires_grad=None,
576
+ is_leaf=None,
577
+ **metadata,
578
+ ):
579
+ stride = _stride_or_default(stride, shape=shape)
580
+ storage_offset = _storage_offset_or_default(storage_offset)
581
+ dtype = _dtype_or_default(dtype)
582
+ is_leaf = _is_leaf_or_default(is_leaf)
583
+ requires_grad = _requires_grad_or_default(requires_grad)
584
+ t = torch.tensor(
585
+ [], dtype=dtype, device=storage.device, requires_grad=requires_grad
586
+ )
587
+ with torch.no_grad():
588
+ t.set_(storage, storage_offset, shape, stride)
589
+ if not is_leaf:
590
+ # Fake up some autograd history in a very naughty way
591
+ with torch.enable_grad():
592
+ t = t.clone(memory_format=torch.preserve_format)
593
+ with torch.no_grad():
594
+ t.set_(storage, storage_offset, shape, stride)
595
+ assert torch._subclasses.meta_utils.safe_is_leaf(t) == is_leaf
596
+ torch._utils.set_tensor_metadata(t, metadata)
597
+ self.args.append(t)
598
+ return t # for BC
599
+
600
+ def symint(self, val):
601
+ self.args.append(val)
602
+ return val # for BC
603
+
604
+
605
+ # Here is our writer strategy:
606
+ # 1. We will stream all of the inputs to disk
607
+ # 2. You can now deterministically randomize the inputs, or reload
608
+ # the inputs from disk
609
+ # 3. You can YOLO run the script without the inputs, in which case
610
+ # we'll fill the inputs with random data and pray. This is the
611
+ # legacy behavior, but it's also useful if you want to find out
612
+ # if we're so broken even random inputs trigger it
613
+ # 4. We could offer an in process "check if the randomized thing
614
+ # works too" but this is delicate so we don't do it
615
+
616
+
617
+ class InputWriter:
618
+ def __init__(self, save_dir, *, stable_hash=False):
619
+ self._lines = []
620
+ # TODO: consider ensuring tensor and storage counters line up?
621
+ self.storage_counter = itertools.count()
622
+ self.save_dir = save_dir
623
+ self.store = (
624
+ ContentStoreWriter(save_dir, stable_hash=stable_hash)
625
+ if save_dir is not None
626
+ else None
627
+ )
628
+ self.seen_storages = {}
629
+
630
+ def lines(self):
631
+ r = [
632
+ "def load_args(reader):",
633
+ ]
634
+ r.extend(f" {l}" for l in self._lines)
635
+ # In case we need to change the internal format of load_args
636
+ # in an FC-breaking way
637
+ r.append("load_args._version = 0")
638
+ return r
639
+
640
+ # Storages are untyped, but we need to initialize them with data if
641
+ # we don't have the real data, so we give a hint saying what kind
642
+ # of initialization may be appropriate
643
+ #
644
+ # If we had a FakeTensor, device_hint tells us what device should be
645
+ def storage(self, untyped_storage, *, dtype_hint=None, device_hint=None) -> str:
646
+ ws = StorageWeakRef(untyped_storage)
647
+ v = self.seen_storages.get(ws)
648
+ if v is not None:
649
+ return v
650
+ v = f"buf{next(self.storage_counter)}"
651
+ maybe_dtype_hint = ""
652
+ if _dtype_or_default(None) != _dtype_or_default(dtype_hint):
653
+ maybe_dtype_hint = f", dtype_hint={dtype_hint!r}"
654
+ # TODO: being optional on device is kind of pointless as the default
655
+ # is CPU but most repros we care about are CUDA
656
+ maybe_device = ""
657
+ device = untyped_storage.device
658
+ if device.type == "meta":
659
+ assert device_hint is not None
660
+ device = device_hint
661
+ if _device_or_default(None) != device:
662
+ maybe_device = f", device={device!r}"
663
+ nbytes = untyped_storage.nbytes()
664
+ storage_hash = None
665
+ if self.store is not None and untyped_storage.device.type != "meta":
666
+ storage_hash = self.store.write_storage(untyped_storage)
667
+ self._lines.append(
668
+ f"{v} = reader.storage({storage_hash!r}, {nbytes!r}{maybe_device}{maybe_dtype_hint})"
669
+ )
670
+ self.seen_storages[ws] = v
671
+ return v
672
+
673
+ def tensor(self, name, t) -> None:
674
+ storage = self.storage(
675
+ t.untyped_storage(), dtype_hint=t.dtype, device_hint=t.device
676
+ )
677
+ args = []
678
+ # NB: this is positional, must come first
679
+ if _stride_or_default(None, shape=t.shape) != t.stride():
680
+ args.append(str(tuple(t.stride())))
681
+ if _dtype_or_default(None) != t.dtype:
682
+ args.append(f"dtype={t.dtype!r}")
683
+ if _storage_offset_or_default(None) != t.storage_offset():
684
+ args.append(f"storage_offset={t.storage_offset()!r}")
685
+ tensor_metadata = torch._utils.get_tensor_metadata(t)
686
+ if tensor_metadata:
687
+ args.extend(f"{k}={v!r}" for k, v in tensor_metadata.items())
688
+ if _requires_grad_or_default(None) != t.requires_grad:
689
+ args.append(f"requires_grad={t.requires_grad!r}")
690
+ is_leaf = torch._subclasses.meta_utils.safe_is_leaf(t)
691
+ if _is_leaf_or_default(None) != is_leaf:
692
+ args.append(f"is_leaf={is_leaf!r}")
693
+ self._lines.append(
694
+ "reader.tensor("
695
+ + ", ".join([storage, str(tuple(t.shape)), *args])
696
+ + f") # {name}"
697
+ )
698
+
699
+ # TODO: this doesn't actually symint atm
700
+ def symint(self, name, val) -> None:
701
+ if isinstance(val, torch.SymInt):
702
+ val = val.node.hint
703
+ self._lines.append(f"reader.symint({val!r}) # {name}")
704
+
705
+
706
+ def aot_graph_input_parser(
707
+ func: Callable[[List[Tensor]], List[Tensor]],
708
+ device: str = "cuda",
709
+ sym_shapes: Optional[Dict[str, int]] = None,
710
+ default_sym_shape: Optional[int] = None,
711
+ ) -> Dict[str, Any]:
712
+ """
713
+ Takes in a function which has been printed with print_readable() and constructs kwargs to run it.
714
+
715
+ Handles Tensor inputs, Symints, and a graph module which might have tensor constants.
716
+
717
+ Consider a function `forward` defined as follows:
718
+
719
+ def forward(self, primals_1: "f32[1001, 6]", primals_2: "f32[s0]", primals_3: "Sym(s0)",):
720
+ _tensor_constant0: "i64[4190]" = self._tensor_constant0
721
+ # Further implementation
722
+
723
+ kwargs = aot_graph_input_parser(forward)
724
+ forward(**kwargs)
725
+ """
726
+
727
+ from torch.fx.graph import dtype_abbrs
728
+
729
+ dtype_map = {value: key for key, value in dtype_abbrs.items()}
730
+ dtype_pattern = "|".join(dtype_abbrs.values())
731
+
732
+ # Extracting the source code from the function
733
+ source = inspect.getsource(func)
734
+
735
+ # Regular expressions
736
+ tensor_assignment_regex = rf"(_tensor_constant\d+): \"({dtype_pattern})\[\s*(.*?)\s*\]\" = self\.(_tensor_constant\d+)"
737
+ tensor_regex = rf"({dtype_pattern})\[\s*(.*?)\s*\]"
738
+ sym_shape_regex = r"Sym\((s\d+)\)"
739
+
740
+ class TensorContainer:
741
+ "Container for tensors as attributes"
742
+ pass
743
+
744
+ # Dictionary for tensors from annotations
745
+ kwargs: Dict[str, Any] = {}
746
+
747
+ sym_shapes = sym_shapes or {}
748
+
749
+ def get_sym_int(symint):
750
+ torch._check(
751
+ symint in sym_shapes or default_sym_shape is not None,
752
+ lambda: f"{symint} not in symbolic_shapes and default sym shape not passed in",
753
+ )
754
+ return sym_shapes.get(symint, default_sym_shape)
755
+
756
+ def gen_tensor(shape, dtype) -> Tensor:
757
+ # Resolve symbolic shapes to concrete values
758
+ resolved_shape = []
759
+ dynamic_dims = []
760
+ for i, dim in enumerate(shape):
761
+ dim = dim.strip()
762
+ if "s" in dim:
763
+ s = get_sym_int(dim)
764
+ resolved_shape.append(s)
765
+ dynamic_dims.append(i)
766
+ else:
767
+ resolved_shape.append(int(dim))
768
+
769
+ constructor = torch.randn if dtype.is_floating_point else torch.zeros
770
+ out = constructor(resolved_shape, dtype=dtype, device=device) # type: ignore[call-arg]
771
+ for d in dynamic_dims:
772
+ torch._dynamo.mark_dynamic(out, d)
773
+ return out
774
+
775
+ # Parse function annotations for tensor generation
776
+ annotations = func.__annotations__
777
+ for param, annotation in annotations.items():
778
+ # Skip 'return' annotation
779
+ if param == "return":
780
+ continue
781
+
782
+ match = re.search(tensor_regex, annotation)
783
+ if match:
784
+ data_type, shape_str = match.groups()
785
+ shape = tuple(shape_str.split(","))
786
+ dtype = dtype_map[data_type]
787
+ kwargs[param] = gen_tensor(shape, dtype)
788
+
789
+ match = re.search(sym_shape_regex, annotation)
790
+ if match:
791
+ kwargs[param] = get_sym_int(match.group(1))
792
+
793
+ if "self" in inspect.signature(func).parameters:
794
+ container = TensorContainer()
795
+ kwargs["self"] = container
796
+ for match in re.finditer(tensor_assignment_regex, source):
797
+ attr_name, data_type, shape_str, _ = match.groups()
798
+ shape = tuple(shape_str.split(","))
799
+ dtype = dtype_map[data_type]
800
+ setattr(container, attr_name, gen_tensor(shape, dtype))
801
+
802
+ return kwargs
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/decorators.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import TYPE_CHECKING
3
+
4
+ import torch
5
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
6
+ from . import trace_rules, variables
7
+ from .comptime import comptime
8
+ from .eval_frame import DisableContext, innermost_fn, RunOnlyContext
9
+ from .exc import IncorrectUsage
10
+ from .external_utils import is_compiling
11
+
12
+ if TYPE_CHECKING:
13
+ from torch._C._dynamo.eval_frame import ( # noqa: F401
14
+ reset_code,
15
+ set_eval_frame,
16
+ set_guard_error_hook,
17
+ skip_code,
18
+ unsupported,
19
+ )
20
+ else:
21
+ for name in dir(torch._C._dynamo.eval_frame):
22
+ if name.startswith("__"):
23
+ continue
24
+ globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
25
+
26
+
27
+ def run(fn=None):
28
+ """Don't do any dynamic compiles, just use prior optimizations"""
29
+ if fn is not None:
30
+ fn = innermost_fn(fn)
31
+ assert callable(fn)
32
+ return RunOnlyContext()(fn)
33
+ return RunOnlyContext()
34
+
35
+
36
+ def disable(fn=None, recursive=True):
37
+ """
38
+ Decorator and context manager to disable TorchDynamo
39
+
40
+ If recursive=True, Dynamo is completely skipped on the decorated function
41
+ frame as well as the recursively invoked functions.
42
+
43
+ If recursive=False, Dynamo skips frames associated with the function code,
44
+ but still process recursively invoked frames.
45
+ """
46
+ if recursive:
47
+ if fn is not None:
48
+ fn = innermost_fn(fn)
49
+ assert callable(fn)
50
+ return DisableContext()(fn)
51
+ return DisableContext()
52
+ else:
53
+ return skip(fn)
54
+
55
+
56
+ def skip(fn=None):
57
+ """
58
+ Skip frames associated with the function code, but still process recursively
59
+ invoked frames
60
+ """
61
+ if fn is None:
62
+ return skip
63
+ fn = innermost_fn(fn)
64
+ assert callable(fn)
65
+ skip_code(fn.__code__)
66
+ fn._torchdynamo_disable = True
67
+ return fn
68
+
69
+
70
+ def assume_constant_result(fn):
71
+ fn._dynamo_marked_constant = True
72
+ return fn
73
+
74
+
75
+ def allow_in_graph(fn):
76
+ """
77
+ Customize which functions TorchDynamo will include in the generated
78
+ graph. Similar to `torch.fx.wrap()`.
79
+ ::
80
+
81
+ torch._dynamo.allow_in_graph(my_custom_function)
82
+
83
+ @torch._dynamo.optimize(...)
84
+ def fn(a):
85
+ x = torch.add(x, 1)
86
+ x = my_custom_function(x)
87
+ x = torch.add(x, 1)
88
+ return x
89
+
90
+ fn(...)
91
+
92
+ Will capture a single graph containing `my_custom_function()`.
93
+ """
94
+ if isinstance(fn, (list, tuple)):
95
+ return [allow_in_graph(x) for x in fn]
96
+ assert callable(fn), "allow_in_graph expects a callable"
97
+ if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:
98
+ trace_rules._disallowed_callable_ids.remove(id(fn))
99
+ trace_rules._allowed_callable_ids.add(id(fn))
100
+ return fn
101
+
102
+
103
+ def _disallow_in_graph_helper(throw_if_not_allowed):
104
+ def inner(fn):
105
+ if isinstance(fn, (list, tuple)):
106
+ return [disallow_in_graph(x) for x in fn]
107
+ assert callable(fn), "disallow_in_graph expects a callable"
108
+ if (
109
+ throw_if_not_allowed
110
+ and trace_rules.lookup_callable(fn)
111
+ != variables.TorchInGraphFunctionVariable
112
+ and trace_rules.lookup(fn) != variables.TorchInGraphFunctionVariable
113
+ ):
114
+ raise IncorrectUsage(
115
+ "disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). "
116
+ "Allowed callables means callables that TorchDynamo puts as-is in the extracted graph."
117
+ )
118
+ trace_rules._allowed_callable_ids.remove(id(fn))
119
+ trace_rules._disallowed_callable_ids.add(id(fn))
120
+ return fn
121
+
122
+ return inner
123
+
124
+
125
+ def disallow_in_graph(fn):
126
+ """
127
+ Customize which functions TorchDynamo will exclude in the generated
128
+ graph and force a graph break on.
129
+ ::
130
+
131
+ torch._dynamo.disallow_in_graph(torch.sub)
132
+
133
+ @torch._dynamo.optimize(...)
134
+ def fn(a):
135
+ x = torch.add(x, 1)
136
+ x = torch.sub(x, 1)
137
+ x = torch.add(x, 1)
138
+ return x
139
+
140
+ fn(...)
141
+
142
+ Will break the graph on `torch.sub`, and give two graphs each with a
143
+ single `torch.add()` op.
144
+ """
145
+ return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)
146
+
147
+
148
+ @_disallow_in_graph_helper(throw_if_not_allowed=False)
149
+ def graph_break():
150
+ """Force a graph break"""
151
+ pass
152
+
153
+
154
+ def forbid_in_graph(fn):
155
+ """
156
+ Customize which functions TorchDynamo will assert are not present while tracing.
157
+
158
+ If you want a graph break on this function instead, use disallow_in_graph.
159
+ TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust
160
+ documentation would not be amiss.
161
+ """
162
+ if isinstance(fn, (list, tuple)):
163
+ return [forbid_in_graph(x) for x in fn]
164
+ assert callable(fn), "forbid_in_graph applies only to callables"
165
+ fn._dynamo_forbidden = True
166
+ return fn
167
+
168
+
169
+ # Helper function to flatten a tensor subclass and apply a function to
170
+ # all inner tensors that match the outer dim. Used to reduce duplication
171
+ # across the various marking APIs.
172
+ def _apply_func_to_inner_tensors_of_same_dim(func, t, *args, **kwargs):
173
+ assert is_traceable_wrapper_subclass(t)
174
+
175
+ attrs, ctx = t.__tensor_flatten__()
176
+ for attr in attrs:
177
+ inner = getattr(t, attr)
178
+ if inner.dim() == t.dim():
179
+ func(inner, *args, **kwargs)
180
+
181
+
182
+ @dataclass(frozen=True)
183
+ class _DimRange:
184
+ """
185
+ This represents an dimension of a tensor and the corresponding
186
+ min and max values it can take. Don't create this
187
+ class directly; instead, use :func:`mark_dynamic`.
188
+ """
189
+
190
+ dim: int
191
+ min: int
192
+ max: int
193
+
194
+
195
+ @forbid_in_graph
196
+ def mark_dynamic(t, index, *, min=None, max=None):
197
+ """
198
+ Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim.
199
+
200
+ [Note - on the state of mark_dynamic]
201
+
202
+ The behavior of having a dynamic dimension on a tensor is governed by a few factors:
203
+
204
+ 1) torch._dynamo.config dynamic_shapes True or False.
205
+ a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
206
+ a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
207
+ mark_dynamic. We will eventually support this.
208
+
209
+ 2) If the dimension is fully constrained - as in, it does not allow more than a single value
210
+ in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
211
+ we will raise an error
212
+
213
+ 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
214
+ range of shapes, in eager we will pass it through, but export will raise an error.
215
+
216
+ 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
217
+ before torch.compile.
218
+
219
+ """
220
+ if is_traceable_wrapper_subclass(t):
221
+ # default behavior: mirror mark_dynamic() on all inner tensors with same dim as t
222
+ # TODO: Make this configurable via a supported public API
223
+ _apply_func_to_inner_tensors_of_same_dim(
224
+ mark_dynamic, t, index, min=min, max=max
225
+ )
226
+
227
+ if isinstance(index, int):
228
+ if not hasattr(t, "_dynamo_dynamic_indices"):
229
+ t._dynamo_dynamic_indices = set()
230
+ t._dynamo_dynamic_range = set()
231
+ # TODO(voz): Should we bounds check?
232
+ t._dynamo_dynamic_indices.add(index)
233
+ t._dynamo_dynamic_range.add(_DimRange(index, min, max))
234
+ return
235
+
236
+ assert isinstance(index, (list, tuple))
237
+ for i in index:
238
+ mark_dynamic(t, i, min=min, max=max)
239
+
240
+
241
+ @forbid_in_graph
242
+ def maybe_mark_dynamic(t, index):
243
+ """
244
+ Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
245
+ dimension ends up getting specialized, don't error).
246
+ """
247
+ if is_traceable_wrapper_subclass(t):
248
+ # default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
249
+ # TODO: Make this configurable via a supported public API
250
+ _apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
251
+
252
+ if isinstance(index, int):
253
+ if not hasattr(t, "_dynamo_weak_dynamic_indices"):
254
+ t._dynamo_weak_dynamic_indices = set()
255
+ # TODO(voz): Should we bounds check?
256
+ t._dynamo_weak_dynamic_indices.add(index)
257
+ return
258
+
259
+ assert isinstance(index, (list, tuple))
260
+ for i in index:
261
+ maybe_mark_dynamic(t, i)
262
+
263
+
264
+ def mark_static(t, index=None):
265
+ """
266
+ Mark a tensor as having a static dim.
267
+
268
+ This will prevent us from attempting to compile it dynamically
269
+ when dynamic=True; this can improve trace-time performance.
270
+
271
+ This has lower precedence than mark_dynamic.
272
+
273
+ Unlike mark_dynamic, this can be done inside a graph, in which case it
274
+ induces specialization on the tensor.
275
+ """
276
+ if is_compiling():
277
+ if index is None:
278
+ for s in t.size():
279
+ comptime.force_static(s)
280
+ else:
281
+ comptime.force_static(t.size(index))
282
+ return
283
+
284
+ if is_traceable_wrapper_subclass(t):
285
+ # default behavior: mirror mark_static() on all inner tensors with same dim as t
286
+ # TODO: Make this configurable via a supported public API
287
+ _apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)
288
+
289
+ if isinstance(index, int):
290
+ if not hasattr(t, "_dynamo_static_indices"):
291
+ t._dynamo_static_indices = set()
292
+ # TODO(voz): Should we bounds check?
293
+ t._dynamo_static_indices.add(index)
294
+ elif index is None:
295
+ for i in range(t.dim()):
296
+ mark_static(t, i)
297
+ else:
298
+ assert isinstance(index, (list, tuple))
299
+ for i in index:
300
+ mark_static(t, i)
301
+
302
+
303
+ @forbid_in_graph
304
+ def mark_static_address(t, guard=True):
305
+ """
306
+ Marks an input tensor whose data_ptr will not change across multiple calls
307
+ to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation
308
+ is not needed for this input. The data_ptr will be guarded if guard=True. Note:
309
+ Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called.
310
+ """
311
+ if not isinstance(t, torch.Tensor):
312
+ raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}")
313
+
314
+ if guard:
315
+ t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
316
+ else:
317
+ t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
318
+
319
+
320
+ # Note: this carefully avoids eagerly import einops.
321
+ # TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2
322
+ def _allow_in_graph_einops():
323
+ import einops
324
+
325
+ try:
326
+ # requires einops > 0.6.1, torch >= 2.0
327
+ from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401
328
+ _ops_were_registered_in_torchdynamo,
329
+ )
330
+
331
+ # einops > 0.6.1 will call the op registration logic as it is imported.
332
+ pass
333
+ except ImportError:
334
+ # einops <= 0.6.1
335
+ allow_in_graph(einops.rearrange)
336
+ allow_in_graph(einops.reduce)
337
+ if hasattr(einops, "repeat"):
338
+ allow_in_graph(einops.repeat) # available since einops 0.2.0
339
+ if hasattr(einops, "einsum"):
340
+ allow_in_graph(einops.einsum) # available since einops 0.5.0
341
+ if hasattr(einops, "pack"):
342
+ allow_in_graph(einops.pack) # available since einops 0.6.0
343
+ if hasattr(einops, "unpack"):
344
+ allow_in_graph(einops.unpack) # available since einops 0.6.0
345
+
346
+
347
+ trace_rules.add_module_init_func("einops", _allow_in_graph_einops)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py ADDED
@@ -0,0 +1,1561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="method-assign"
2
+
3
+ """
4
+ Functions in this file are responsible for modifying the eval frame
5
+ handler at RUNTIME. Therefore, all functions in this file are hot.
6
+ Functions that only execute at compile time should be placed
7
+ in torch._dynamo.convert_frame.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import contextlib
13
+ import functools
14
+ import inspect
15
+ import logging
16
+ import os
17
+ import sys
18
+ import textwrap
19
+ import threading
20
+ import traceback
21
+ import types
22
+ import warnings
23
+ import weakref
24
+ from enum import Enum
25
+ from os.path import dirname, join
26
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union
27
+ from unittest.mock import patch
28
+
29
+ import torch
30
+ import torch.fx
31
+ import torch.utils._pytree as pytree
32
+ import torch.utils.checkpoint
33
+ from torch import _guards
34
+ from torch._subclasses import fake_tensor
35
+ from torch._utils_internal import log_export_usage
36
+ from torch.export import Constraint
37
+ from torch.export.dynamic_shapes import _process_dynamic_shapes
38
+ from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
39
+ from torch.fx.experimental.symbolic_shapes import (
40
+ ConstraintViolationError,
41
+ DimDynamic,
42
+ StatelessSymbolicContext,
43
+ )
44
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
45
+
46
+ from ..fx import GraphModule
47
+ from .backends.registry import CompilerFn, lookup_backend
48
+
49
+ from .hooks import Hooks
50
+
51
+ # see discussion at https://github.com/pytorch/pytorch/issues/120699
52
+ reset_code = torch._C._dynamo.eval_frame.reset_code # noqa: F401
53
+ set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401
54
+ set_guard_error_hook = torch._C._dynamo.eval_frame.set_guard_error_hook # noqa: F401
55
+ skip_code = torch._C._dynamo.eval_frame.skip_code # noqa: F401
56
+ unsupported = torch._C._dynamo.eval_frame.unsupported # noqa: F401
57
+
58
+ from . import config, convert_frame, external_utils, trace_rules, utils
59
+ from .code_context import code_context
60
+ from .exc import CondOpArgsMismatchError, UserError, UserErrorType
61
+ from .mutation_guard import install_generation_tagging_init
62
+ from .types import CacheEntry, DynamoCallback
63
+ from .utils import common_constant_types, compile_times
64
+
65
+ log = logging.getLogger(__name__)
66
+
67
+ from torch._dispatch.python import enable_python_dispatcher
68
+
69
+ always_optimize_code_objects = utils.ExactWeakKeyDictionary()
70
+ null_context = contextlib.nullcontext
71
+
72
+
73
+ import sympy
74
+
75
+
76
+ # See https://github.com/python/typing/pull/240
77
+ class Unset(Enum):
78
+ token = 0
79
+
80
+
81
+ unset = Unset.token
82
+
83
+ guarded_backend_cache = threading.local()
84
+ cached_backends: Dict[int, CompilerFn] = {}
85
+
86
+
87
+ def check_current_backend(backend_obj_id: int):
88
+ """
89
+ Called from guards to check if we need to recompile due to a backend change
90
+ """
91
+ # TODO(jansel): we should move guarded_backend_cache to C++
92
+ try:
93
+ if guarded_backend_cache.skip_backend_check_for_run_only_mode:
94
+ return True
95
+ except AttributeError:
96
+ # Go slightly faster next time
97
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = False
98
+ try:
99
+ current_backend = guarded_backend_cache.current_backend
100
+ except AttributeError:
101
+ current_backend = None
102
+ return (
103
+ # Avoid the dict lookup in case of exact same object
104
+ id(current_backend) == backend_obj_id
105
+ or current_backend == cached_backends.get(backend_obj_id, None)
106
+ )
107
+
108
+
109
+ def _reset_guarded_backend_cache():
110
+ global cached_backends
111
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = False
112
+ guarded_backend_cache.current_backend = None
113
+ for backend in cached_backends.values():
114
+ if hasattr(backend, "reset"):
115
+ backend.reset()
116
+ cached_backends.clear()
117
+
118
+
119
+ def backend_cache_manager(callback: DynamoCallback):
120
+ # callback is False for RunOnlyContext. RunOnlyContext is used
121
+ # as a way to re-use the previous compiled cache.
122
+ # We therefore skip the check and re-use whatever code that's already cached.
123
+ # Note: the cache that's actually used depends on the caching policy.
124
+ if callback is False:
125
+
126
+ def change():
127
+ try:
128
+ prev_skip = guarded_backend_cache.skip_backend_check_for_run_only_mode
129
+ except AttributeError:
130
+ prev_skip = False
131
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = True
132
+
133
+ def revert():
134
+ guarded_backend_cache.skip_backend_check_for_run_only_mode = prev_skip
135
+
136
+ return revert
137
+
138
+ else:
139
+ backend = innermost_fn(callback)
140
+
141
+ def change():
142
+ cached_backends.setdefault(id(backend), backend)
143
+ try:
144
+ prev_backend = guarded_backend_cache.current_backend
145
+ except AttributeError:
146
+ prev_backend = None
147
+ guarded_backend_cache.current_backend = backend
148
+
149
+ def revert():
150
+ guarded_backend_cache.current_backend = prev_backend
151
+
152
+ return revert
153
+
154
+ return change
155
+
156
+
157
+ DONT_WRAP_FILES = {
158
+ # For tracing into fx modules
159
+ inspect.getsourcefile(GraphModule),
160
+ join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
161
+ }
162
+
163
+
164
+ def _debug_get_cache_entry_list(
165
+ code: Union[types.CodeType, Callable[..., Any]]
166
+ ) -> List[CacheEntry]:
167
+ """
168
+ Given a code object or a callable object, retrieve the cache entries
169
+ stored in this code.
170
+ """
171
+ if callable(code):
172
+ code = code.__code__
173
+ return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
174
+
175
+
176
+ class OptimizedModule(torch.nn.Module):
177
+ """
178
+ Wraps the original nn.Module object and later patches its
179
+ forward method to optimized self.forward method.
180
+ """
181
+
182
+ _torchdynamo_orig_callable: Callable[..., Any]
183
+ get_compiler_config: Callable[[], Any]
184
+
185
+ def __init__(self, mod: torch.nn.Module, dynamo_ctx):
186
+ super().__init__()
187
+ # Installs the params/buffer
188
+ self._orig_mod = mod
189
+ self.dynamo_ctx = dynamo_ctx
190
+ self._initialize()
191
+
192
+ def _initialize(self):
193
+ # Do this stuff in constructor to lower overhead slightly
194
+ if isinstance(self._orig_mod.forward, types.MethodType) and trace_rules.check(
195
+ self._orig_mod.forward
196
+ ):
197
+ # This may be a torch.nn.* instance in trace_rules.py which
198
+ # won't trigger a frame evaluation workaround to add an extra
199
+ # frame we can capture
200
+ self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
201
+ else:
202
+ # Invoke hooks outside of dynamo then pickup the inner frame
203
+ self.forward = self.dynamo_ctx(self._orig_mod.__call__)
204
+
205
+ if hasattr(self._orig_mod, "_initialize_hook"):
206
+ self._forward = self.forward
207
+ self.forward = self._call_lazy_check
208
+
209
+ def __getstate__(self):
210
+ state = dict(self.__dict__)
211
+ state.pop("forward", None)
212
+ state.pop("__call__", None)
213
+ return state
214
+
215
+ def __setstate__(self, state):
216
+ self.__dict__ = state
217
+ self._initialize()
218
+
219
+ def __getattr__(self, name):
220
+ if name == "_orig_mod":
221
+ return self._modules["_orig_mod"]
222
+ return getattr(self._orig_mod, name)
223
+
224
+ def _call_lazy_check(self, *args, **kwargs):
225
+ if hasattr(self._orig_mod, "_initialize_hook"):
226
+ # In the case of a lazy module, we want to run
227
+ # the pre-hooks which initialize it.
228
+ # Afterwards, lazy module deletes its pre-hooks
229
+ # to avoid treating it as lazy on subsequent recompile.
230
+ self._orig_mod._infer_parameters(self._orig_mod, args, kwargs)
231
+ return self._forward(*args, **kwargs)
232
+
233
+ def __dir__(self):
234
+ orig_mod_attrs = self._orig_mod.__dir__()
235
+ return orig_mod_attrs + [
236
+ attr for attr in super().__dir__() if attr not in orig_mod_attrs
237
+ ]
238
+
239
+
240
+ def remove_from_cache(f):
241
+ """
242
+ Make sure f.__code__ is not cached to force a recompile
243
+ """
244
+ if isinstance(f, types.CodeType):
245
+ reset_code(f)
246
+ elif hasattr(f, "__code__"):
247
+ reset_code(f.__code__)
248
+ elif hasattr(getattr(f, "forward", None), "__code__"):
249
+ reset_code(f.forward.__code__)
250
+ else:
251
+ from . import reset # type: ignore[attr-defined]
252
+
253
+ reset()
254
+ log.warning("could not determine __code__ for %s", f)
255
+
256
+
257
+ def nothing():
258
+ pass
259
+
260
+
261
+ def always_false():
262
+ return False
263
+
264
+
265
+ def innermost_fn(fn):
266
+ """
267
+ In case of nesting of _TorchDynamoContext calls, find the innermost
268
+ function. TorchDynamo caches on fn.__code__ object, so its necessary to find
269
+ the innermost function to pass on the optimize, run, disable etc.
270
+ """
271
+ unaltered_fn = fn
272
+ while hasattr(unaltered_fn, "_torchdynamo_orig_callable"):
273
+ unaltered_fn = unaltered_fn._torchdynamo_orig_callable
274
+ assert callable(unaltered_fn)
275
+ return unaltered_fn
276
+
277
+
278
+ def make_set_enable_dynamic(enable: bool):
279
+ assert isinstance(enable, bool)
280
+ if enable:
281
+ # Assume everything is dynamic by default
282
+ return config._make_closure_patcher(assume_static_by_default=False)
283
+ else:
284
+ return config._make_closure_patcher(
285
+ automatic_dynamic_shapes=False, assume_static_by_default=True
286
+ )
287
+
288
+
289
+ class _TorchDynamoContext:
290
+ def __init__(
291
+ self,
292
+ callback: DynamoCallback,
293
+ on_enter=nothing,
294
+ backend_ctx_ctor=null_context,
295
+ patch_fn=nothing,
296
+ first_ctx=False,
297
+ *,
298
+ export=False,
299
+ dynamic=None,
300
+ compiler_config=None,
301
+ ):
302
+ super().__init__()
303
+ assert callable(callback) or callback is False or callback is None
304
+ self.callback: DynamoCallback = callback
305
+ self.prior: Union[Unset, DynamoCallback] = unset
306
+ self.first_ctx = first_ctx
307
+ self.export = export
308
+ self.compiler_config = compiler_config
309
+ self.cleanup_fns: List[Callable[[], Any]] = []
310
+ self.enter_exit_hooks = [backend_cache_manager(self.callback)]
311
+ patch_fn()
312
+
313
+ if dynamic is not None:
314
+ self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic))
315
+
316
+ if on_enter is not nothing:
317
+ # this case is not common
318
+ def call_on_enter():
319
+ on_enter()
320
+ return nothing
321
+
322
+ self.enter_exit_hooks.append(call_on_enter)
323
+
324
+ if backend_ctx_ctor is not contextlib.nullcontext:
325
+ # this case is not common
326
+ def call_backend_ctx():
327
+ ctx = backend_ctx_ctor()
328
+ ctx.__enter__()
329
+ return functools.partial(ctx.__exit__, None, None, None)
330
+
331
+ self.enter_exit_hooks.append(call_backend_ctx)
332
+
333
+ def __enter__(self):
334
+ if config.raise_on_ctx_manager_usage:
335
+ raise RuntimeError(
336
+ "torch._dynamo.optimize(...) is used with a context manager. "
337
+ "Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
338
+ "to use torch._dynamo.optimize(...) as an annotation/decorator. "
339
+ )
340
+ self.cleanup_fns = [enter() for enter in self.enter_exit_hooks]
341
+ self.prior = set_eval_frame(self.callback)
342
+
343
+ def __exit__(self, exc_type, exc_val, exc_tb):
344
+ assert self.prior is not unset
345
+ set_eval_frame(self.prior)
346
+ self.prior = unset
347
+ for cleanup in self.cleanup_fns:
348
+ cleanup()
349
+ self.cleanup_fns.clear()
350
+
351
+ def __call__(self, fn):
352
+ # public api for compiler config/options
353
+ def get_compiler_config():
354
+ return self.compiler_config
355
+
356
+ fn = innermost_fn(fn)
357
+
358
+ # add context containing GraphModule to any GraphModule forward functions
359
+ from torch.fx._lazy_graph_module import _LazyGraphModule
360
+
361
+ if isinstance(fn, _LazyGraphModule) or (
362
+ isinstance(getattr(fn, "__self__", None), _LazyGraphModule)
363
+ and fn.__name__ == "_lazy_forward"
364
+ ):
365
+ # Since dynamo will run the forward method for the GraphModule shortly
366
+ # anyways, it does not hurt to do the real recompilation here if
367
+ # this is a _LazyGraphModule. This makes it easier for dynamo to
368
+ # optimize a _LazyGraphModule.
369
+
370
+ lazy_gm = fn if isinstance(fn, _LazyGraphModule) else fn.__self__
371
+
372
+ _LazyGraphModule.force_recompile(lazy_gm)
373
+
374
+ # Assume that the underlying node metadata of `fn`,
375
+ # a GraphModule instance, accurately represents
376
+ # all instances of type(fn).
377
+ code_context.get_context(lazy_gm.forward.__code__)[
378
+ "orig_graphmodule"
379
+ ] = weakref.ref(lazy_gm)
380
+
381
+ if not isinstance(fn, _LazyGraphModule):
382
+ # replace fn with the real forward method
383
+ fn = lazy_gm.forward
384
+ elif isinstance(fn, GraphModule):
385
+ code_context.get_context(fn.forward.__code__)[
386
+ "orig_graphmodule"
387
+ ] = weakref.ref(fn)
388
+
389
+ # Optimize the forward method of torch.nn.Module object
390
+ if isinstance(fn, torch.nn.Module):
391
+ mod = fn
392
+ new_mod = OptimizedModule(mod, self)
393
+ # Save the function pointer to find the original callable while nesting
394
+ # of decorators.
395
+ new_mod._torchdynamo_orig_callable = mod.forward
396
+
397
+ # when compiling torch.nn.Module,
398
+ # provide public api OptimizedModule.get_compiler_config()
399
+ assert not hasattr(new_mod, "get_compiler_config")
400
+ new_mod.get_compiler_config = get_compiler_config
401
+
402
+ return new_mod
403
+ assert callable(fn)
404
+
405
+ try:
406
+ filename = inspect.getsourcefile(fn)
407
+ except TypeError:
408
+ filename = None
409
+ if (
410
+ (filename is None or trace_rules.check(fn))
411
+ and (
412
+ getattr(fn, "__name__", "") not in ["_call_impl", "_wrapped_call_impl"]
413
+ )
414
+ and filename not in DONT_WRAP_FILES
415
+ ):
416
+ # call to a builtin without a frame for us to capture
417
+ fn = external_utils.wrap_inline(fn)
418
+
419
+ callback = self.callback
420
+
421
+ if isinstance(self, DisableContext):
422
+ is_jit_tracing = always_false
423
+ is_fx_tracing = always_false
424
+ else:
425
+ is_jit_tracing = torch._C._is_tracing
426
+ is_fx_tracing = torch.fx._symbolic_trace.is_fx_tracing
427
+
428
+ @functools.wraps(fn)
429
+ def _fn(*args, **kwargs):
430
+ if is_fx_tracing():
431
+ if config.error_on_nested_fx_trace:
432
+ raise RuntimeError(
433
+ "Detected that you are using FX to symbolically trace "
434
+ "a dynamo-optimized function. This is not supported at the moment."
435
+ )
436
+ else:
437
+ return fn(*args, **kwargs)
438
+
439
+ if is_jit_tracing():
440
+ if config.error_on_nested_jit_trace:
441
+ raise RuntimeError(
442
+ "Detected that you are using FX to torch.jit.trace "
443
+ "a dynamo-optimized function. This is not supported at the moment."
444
+ )
445
+ else:
446
+ return fn(*args, **kwargs)
447
+
448
+ cleanups = [enter() for enter in self.enter_exit_hooks]
449
+ prior = set_eval_frame(callback)
450
+ try:
451
+ return fn(*args, **kwargs)
452
+ finally:
453
+ set_eval_frame(prior)
454
+ for cleanup in cleanups:
455
+ cleanup()
456
+
457
+ # hooks to properly handle inlining
458
+ if isinstance(self, DisableContext):
459
+ _fn._torchdynamo_disable = True # type: ignore[attr-defined]
460
+ else:
461
+ _fn._torchdynamo_inline = fn # type: ignore[attr-defined]
462
+
463
+ # Save the function pointer to find the original callable while nesting
464
+ # of decorators.
465
+ _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
466
+
467
+ # when compiling user function instead of nn.Module
468
+ # provide public api _fn.get_compiler_config()
469
+ assert not hasattr(_fn, "get_compiler_config")
470
+ _fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
471
+
472
+ # If the function is called using torch._dynamo.optimize decorator, we
473
+ # should prevent any type of skipping.
474
+ if callback not in (None, False):
475
+ if not hasattr(fn, "__code__"):
476
+ raise RuntimeError(
477
+ textwrap.dedent(
478
+ """
479
+
480
+ torch._dynamo.optimize is called on a non function object.
481
+ If this is a callable class, please wrap the relevant code into a function and optimize the
482
+ wrapper function.
483
+
484
+ >> class CallableClass:
485
+ >> def __init__(self):
486
+ >> super().__init__()
487
+ >> self.relu = torch.nn.ReLU()
488
+ >>
489
+ >> def __call__(self, x):
490
+ >> return self.relu(torch.sin(x))
491
+ >>
492
+ >> def print_hello(self):
493
+ >> print("Hello world")
494
+ >>
495
+ >> mod = CallableClass()
496
+
497
+ If you want to optimize the __call__ function and other code, wrap that up in a function
498
+
499
+ >> def wrapper_fn(x):
500
+ >> y = mod(x)
501
+ >> return y.sum()
502
+
503
+ and then optimize the wrapper_fn
504
+
505
+ >> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
506
+ """
507
+ )
508
+ )
509
+ always_optimize_code_objects[fn.__code__] = True
510
+
511
+ return _fn
512
+
513
+
514
+ class OptimizeContext(_TorchDynamoContext):
515
+ def __init__(
516
+ self,
517
+ callback,
518
+ backend_ctx_ctor,
519
+ first_ctx=False,
520
+ *,
521
+ export=False,
522
+ dynamic=None,
523
+ compiler_config=None,
524
+ ):
525
+ def on_enter():
526
+ install_generation_tagging_init()
527
+
528
+ super().__init__(
529
+ callback=callback,
530
+ on_enter=on_enter,
531
+ backend_ctx_ctor=backend_ctx_ctor,
532
+ patch_fn=TorchPatcher.patch,
533
+ first_ctx=first_ctx,
534
+ export=export,
535
+ dynamic=dynamic,
536
+ compiler_config=compiler_config,
537
+ )
538
+
539
+
540
+ class RunOnlyContext(_TorchDynamoContext):
541
+ def __init__(self):
542
+ # cudagraph trees relies on generation increment
543
+ def on_enter():
544
+ torch._dynamo.mutation_guard.GenerationTracker.generation += 1
545
+
546
+ super().__init__(callback=False, on_enter=on_enter)
547
+
548
+
549
+ class DisableContext(_TorchDynamoContext):
550
+ def __init__(self):
551
+ super().__init__(callback=None)
552
+
553
+
554
+ def _optimize_catch_errors(
555
+ compile_fn,
556
+ hooks: Hooks,
557
+ backend_ctx_ctor=null_context,
558
+ export=False,
559
+ dynamic=None,
560
+ compiler_config=None,
561
+ ):
562
+ return OptimizeContext(
563
+ convert_frame.catch_errors_wrapper(compile_fn, hooks),
564
+ backend_ctx_ctor=backend_ctx_ctor,
565
+ first_ctx=True,
566
+ export=export,
567
+ dynamic=dynamic,
568
+ compiler_config=compiler_config,
569
+ )
570
+
571
+
572
+ def get_compiler_fn(compiler_fn):
573
+ from .repro.after_dynamo import wrap_backend_debug
574
+
575
+ if hasattr(compiler_fn, "compiler_name"):
576
+ compiler_str = compiler_fn.compiler_name
577
+ elif isinstance(compiler_fn, str):
578
+ compiler_str = compiler_fn
579
+ else:
580
+ compiler_str = None
581
+ compiler_fn = lookup_backend(compiler_fn)
582
+ return wrap_backend_debug(compiler_fn, compiler_str)
583
+
584
+
585
+ class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg]
586
+ def __call__(self, fn):
587
+ assert callable(fn)
588
+ return fn
589
+
590
+
591
+ def check_if_dynamo_supported():
592
+ if sys.version_info >= (3, 12):
593
+ raise RuntimeError("Python 3.12+ not yet supported for torch.compile")
594
+
595
+
596
+ def is_dynamo_supported():
597
+ try:
598
+ check_if_dynamo_supported()
599
+ return True
600
+ except Exception:
601
+ return False
602
+
603
+
604
+ def check_if_inductor_supported():
605
+ check_if_dynamo_supported()
606
+
607
+ if sys.platform == "win32":
608
+ raise RuntimeError("Windows not yet supported for inductor")
609
+
610
+
611
+ def is_inductor_supported():
612
+ try:
613
+ check_if_inductor_supported()
614
+ return True
615
+ except Exception:
616
+ return False
617
+
618
+
619
+ def optimize(
620
+ backend="inductor",
621
+ *,
622
+ nopython=False,
623
+ guard_export_fn=None,
624
+ guard_fail_fn=None,
625
+ disable=False,
626
+ dynamic=None,
627
+ ):
628
+ """
629
+ The main entrypoint of TorchDynamo. Do graph capture and call
630
+ backend() to optimize extracted graphs.
631
+
632
+ Args:
633
+ backend: One of the two things:
634
+ - Either, a function/callable taking a torch.fx.GraphModule and
635
+ example_inputs and returning a python callable that runs the
636
+ graph faster.
637
+ One can also provide additional context for the backend, like
638
+ torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
639
+ See AOTAutogradMemoryEfficientFusionWithContext for the usage.
640
+ - Or, a string backend name in `torch._dynamo.list_backends()`
641
+ nopython: If True, graph breaks will be errors and there will
642
+ be a single whole-program graph.
643
+ disable: If True, turn this decorator into a no-op
644
+ dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
645
+ disable all dynamic shapes support (always specialize). If None, automatically
646
+ detect when sizes vary and generate dynamic kernels upon recompile.
647
+
648
+ Example Usage::
649
+
650
+ @torch._dynamo.optimize()
651
+ def toy_example(a, b):
652
+ ...
653
+ """
654
+ check_if_dynamo_supported()
655
+ # Note: The hooks object could be global instead of passed around, *however* that would make
656
+ # for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
657
+ # There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
658
+ # compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
659
+ # easier to understand UX at the cost of a little more plumbing on our end.
660
+ hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
661
+ torch._C._log_api_usage_once("torch._dynamo.optimize")
662
+ if disable or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1":
663
+ return _NullDecorator()
664
+
665
+ backend = get_compiler_fn(backend)
666
+
667
+ # Find if backend has any extra context manager
668
+ backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
669
+
670
+ if nopython:
671
+ return optimize_assert(
672
+ backend,
673
+ dynamic=dynamic,
674
+ hooks=hooks,
675
+ )
676
+ return _optimize_catch_errors(
677
+ convert_frame.convert_frame(backend, hooks=hooks),
678
+ hooks,
679
+ backend_ctx_ctor,
680
+ dynamic=dynamic,
681
+ compiler_config=backend.get_compiler_config()
682
+ if hasattr(backend, "get_compiler_config")
683
+ else None,
684
+ )
685
+
686
+
687
+ # TODO(voz): Consider making "explain" output alongside a run / part of a run
688
+ @patch("torch._dynamo.symbolic_convert.explain", True)
689
+ def explain(f, *extra_args, **extra_kwargs):
690
+ def inner(*args, **kwargs):
691
+ # TODO(voz): Do we want a decorator for this?
692
+ from . import reset # type: ignore[attr-defined]
693
+
694
+ reset()
695
+
696
+ graphs: List[torch.fx.GraphModule] = []
697
+ break_reasons: List[Any] = []
698
+ op_count: int = 0
699
+ ops_per_graph: List[torch.fx.Node] = []
700
+ out_guards: List[_guards.Guard] = []
701
+
702
+ def dynamo_graph_accumulating_compiler(
703
+ gm: torch.fx.GraphModule, example_inputs
704
+ ):
705
+ from .backends.debugging import _explain_graph_detail
706
+
707
+ nonlocal graphs
708
+ nonlocal op_count
709
+ nonlocal ops_per_graph
710
+ nonlocal break_reasons
711
+
712
+ gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail(
713
+ gm, graphs, op_count, ops_per_graph, break_reasons
714
+ )
715
+
716
+ return gm.forward
717
+
718
+ def guard_export_print(guards):
719
+ nonlocal out_guards
720
+ out_guards.extend(guards)
721
+
722
+ opt_f = optimize(
723
+ dynamo_graph_accumulating_compiler,
724
+ nopython=False,
725
+ guard_export_fn=guard_export_print,
726
+ )(f)
727
+ # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
728
+ opt_f(*args, **kwargs)
729
+
730
+ graph_count = len(graphs)
731
+
732
+ # For the explanation summary, dedupe reasons by the innermost stack frame and dedupe by it.
733
+ deduped_reasons = {}
734
+ for reason in break_reasons:
735
+ innermost_frame = reason.user_stack[-1]
736
+ # __repr__ uniquely identifies a FrameSummary so we can use it for deduping
737
+ deduped_reasons[repr(innermost_frame)] = reason
738
+
739
+ formatted_list = ""
740
+ for idx, break_reason in enumerate(deduped_reasons.values()):
741
+ formatted_stack = "".join(traceback.format_list(break_reason.user_stack))
742
+ msg = f"{idx + 1}. Reason: {break_reason.reason}\n User Stack: {formatted_stack}\n"
743
+ formatted_list += msg
744
+
745
+ graph_break_count = graph_count - 1
746
+ compile_time = compile_times(repr="str")
747
+
748
+ # TODO(voz): Do we want a decorator for this?
749
+ reset()
750
+ from .backends.debugging import ExplainOutput
751
+
752
+ return ExplainOutput(
753
+ graphs,
754
+ graph_count,
755
+ graph_break_count,
756
+ break_reasons,
757
+ op_count,
758
+ ops_per_graph,
759
+ out_guards,
760
+ compile_time,
761
+ )
762
+
763
+ if extra_args or extra_kwargs:
764
+ warnings.warn(
765
+ "explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
766
+ "If you don't migrate, we may break your explain call in the future if your user defined kwargs "
767
+ "conflict with future kwargs added to explain(f)."
768
+ )
769
+ return inner(*extra_args, **extra_kwargs)
770
+ else:
771
+ return inner
772
+
773
+
774
+ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
775
+ def __init__(
776
+ self,
777
+ m: torch.fx.GraphModule,
778
+ flat_args: Tuple[Any],
779
+ matched_input_elements_positions: List[int],
780
+ flat_results: List[Any],
781
+ matched_output_elements_positions: List[int],
782
+ example_fake_inputs: List[torch.Tensor],
783
+ flat_args_dynamic_dims: List[Set[int]],
784
+ fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
785
+ ):
786
+ super().__init__(m)
787
+
788
+ assert len(flat_args_dynamic_dims) == len(flat_args)
789
+ matched_input_elements_to_fake = {
790
+ val: example_fake_inputs[ix]
791
+ for ix, val in enumerate(matched_input_elements_positions)
792
+ }
793
+
794
+ self.new_args = []
795
+ for i in range(0, len(flat_args)):
796
+ arg = super().placeholder(f"arg{i}", (), {})
797
+ if i in matched_input_elements_to_fake:
798
+ arg.node.meta["val"] = matched_input_elements_to_fake[i]
799
+ else:
800
+ # Fill node.mata["val"] with faketensor from the input,
801
+ # if it's not found in matched_input_elements_positions
802
+ if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
803
+ # TODO(zhxchen17) Also preserve all the user constraints here.
804
+ arg.node.meta["val"] = fake_mode.from_tensor(
805
+ flat_args[i],
806
+ symbolic_context=StatelessSymbolicContext(
807
+ dynamic_sizes=[
808
+ DimDynamic.DYNAMIC
809
+ if d in flat_args_dynamic_dims[i]
810
+ else DimDynamic.STATIC
811
+ for d in range(len(flat_args[i].shape))
812
+ ],
813
+ constraint_sizes=[None] * len(flat_args[i].shape),
814
+ ),
815
+ )
816
+ self.new_args.append(arg)
817
+ self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
818
+ self.matched_output_elements_positions = matched_output_elements_positions
819
+ self.flat_results = flat_results
820
+
821
+ def placeholder(self, target, args, kwargs):
822
+ arg = next(self.old_args_gen)
823
+ if "val" in self.current_node.meta:
824
+ arg.node.meta["val"] = self.current_node.meta["val"]
825
+ if "tensor_dict" in self.current_node.meta:
826
+ arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
827
+ if "example_value" in self.current_node.meta:
828
+ arg.node.meta["example_value"] = self.current_node.meta["example_value"]
829
+ return arg
830
+
831
+ def output(self, target, args, kwargs):
832
+ dynamo_result_flat = args[0]
833
+ lookup = [*dynamo_result_flat, *self.new_args]
834
+ new_results_flat = []
835
+ for i in range(len(self.flat_results)):
836
+ if self.matched_output_elements_positions[i] is not None:
837
+ new_results_flat.append(
838
+ lookup[self.matched_output_elements_positions[i]]
839
+ )
840
+ else:
841
+ const_val = self.flat_results[i]
842
+ assert isinstance(const_val, tuple(common_constant_types))
843
+ new_results_flat.append(const_val)
844
+ return super().output(target, (new_results_flat,), {})
845
+
846
+ def run_node(self, n):
847
+ self.current_node = n
848
+ result_proxy = super().run_node(n)
849
+ if "val" in self.current_node.meta:
850
+ result_proxy.node.meta["val"] = self.current_node.meta["val"]
851
+ if "example_value" in self.current_node.meta:
852
+ result_proxy.node.meta["example_value"] = self.current_node.meta[
853
+ "example_value"
854
+ ]
855
+ if self.current_node.op != "output":
856
+ result_proxy.node._rename(
857
+ getattr(self.current_node, "name", result_proxy.node.name)
858
+ )
859
+ return result_proxy
860
+
861
+ def transform(self):
862
+ result_gm = super().transform()
863
+ if "dynamo_flat_name_to_original_fqn" in self.module.meta:
864
+ result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[
865
+ "dynamo_flat_name_to_original_fqn"
866
+ ]
867
+ return result_gm
868
+
869
+
870
+ class ExportResult(NamedTuple):
871
+ graph_module: torch.fx.GraphModule
872
+ guards: _guards.GuardsSet
873
+ # NB: Do not add new fields without overriding __iter__; people are
874
+ # destructuring so it is BC-breaking
875
+
876
+
877
+ def check_signature_rewritable(graph):
878
+ input_errors = []
879
+ for node in graph.graph.nodes:
880
+ if node.op == "placeholder":
881
+ assert hasattr(node, "_dynamo_source")
882
+ source = node._dynamo_source
883
+ user_stacks = graph._source_to_user_stacks.get(source)
884
+ if user_stacks is None:
885
+ continue
886
+ assert len(user_stacks) > 0
887
+ # In some cases we may not have a useful stack. Look for a
888
+ # useful stack
889
+ stack = None
890
+ for s in user_stacks:
891
+ if len(s) == 0:
892
+ continue
893
+ stack = s
894
+ break
895
+ if stack is None:
896
+ msg = f"{source.name()}, a closed over free variable"
897
+ else:
898
+ tb = "".join(traceback.format_list(stack))
899
+ extra = ""
900
+ if len(user_stacks) > 1:
901
+ extra = f"(elided {len(user_stacks)-1} more accesses)"
902
+ msg = f"{source.name()}, accessed at:\n{tb}{extra}"
903
+ # TODO: option to print ALL of the stack traces at once
904
+ input_errors.append(msg)
905
+
906
+ if input_errors:
907
+ raise UserError(
908
+ UserErrorType.INVALID_INPUT,
909
+ "Cannot export model which references tensors that are neither "
910
+ "buffers/parameters/constants nor are direct inputs. For each tensor, if you'd "
911
+ "like this tensor to be an explicit input, add it as a dummy argument "
912
+ "to the top-level model definition you are exporting; if you would "
913
+ "like its value to be embedded as an exported constant, wrap its access "
914
+ "in a function marked with @assume_constant_result.\n\n"
915
+ + "\n\n".join(input_errors),
916
+ )
917
+
918
+
919
+ def rewrite_signature(
920
+ f_sig,
921
+ graph,
922
+ fake_mode,
923
+ flat_args,
924
+ in_spec,
925
+ example_fake_inputs,
926
+ graph_captured_input,
927
+ graph_captured_output,
928
+ dynamo_traced_result,
929
+ flat_args_dynamic_dims,
930
+ ):
931
+ orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec)
932
+
933
+ def check_user_input_output(flat_values, error_type):
934
+ supported_types = [
935
+ torch.Tensor,
936
+ torch.SymInt,
937
+ torch.SymFloat,
938
+ torch.SymBool,
939
+ torch._C.ScriptObject,
940
+ ] + list(common_constant_types)
941
+
942
+ def is_supported_type(val):
943
+ return isinstance(val, tuple(supported_types))
944
+
945
+ value_type = "input" if error_type == UserErrorType.INVALID_INPUT else "output"
946
+ # We only check that the outputs are not None. Inputs can be None.
947
+ for v in flat_values:
948
+ if not is_supported_type(v):
949
+ if error_type == UserErrorType.INVALID_INPUT and v is None:
950
+ continue
951
+
952
+ raise UserError(
953
+ error_type,
954
+ f"It looks like one of the {value_type}s with type `{type(v)}` "
955
+ "is not supported or pytree-flattenable. \n"
956
+ f"Exported graphs {value_type}s can only contain the "
957
+ f"following supported types: {supported_types}. \n"
958
+ "If you are using a custom class object, "
959
+ "please register a pytree_flatten/unflatten function "
960
+ "using `torch.utils._pytree.register_pytree_node` or "
961
+ "`torch.export.register_dataclass`.",
962
+ )
963
+
964
+ check_user_input_output(flat_args, UserErrorType.INVALID_INPUT)
965
+ flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result)
966
+ check_user_input_output(flat_results_traced, UserErrorType.INVALID_OUTPUT)
967
+
968
+ def produce_matching(debug_type, sources, candidates):
969
+ matched_elements_positions: List[Optional[int]] = []
970
+ dict_of_source_vals = {}
971
+ for i, val in enumerate(sources):
972
+ dict_of_source_vals[id(val)] = i
973
+
974
+ for i, val in enumerate(candidates):
975
+ if isinstance(val, tuple(common_constant_types)):
976
+ matched_elements_positions.append(None)
977
+ elif id(val) not in dict_of_source_vals:
978
+ raise AssertionError(
979
+ f"Unexpectedly found a {type(val)} in the {debug_type}.\n"
980
+ 'Please file an issue along with a paste of the logs from TORCH_LOGS="+export"'
981
+ )
982
+ else:
983
+ matched_elements_positions.append(dict_of_source_vals[id(val)])
984
+
985
+ return matched_elements_positions
986
+
987
+ matched_input_elements_positions = produce_matching(
988
+ "inputs", flat_args, graph_captured_input
989
+ )
990
+
991
+ assert graph_captured_output is not None
992
+ matched_output_elements_positions = produce_matching(
993
+ "outputs", list(graph_captured_output) + flat_args, flat_results_traced
994
+ )
995
+
996
+ new_graph = FlattenInputOutputSignature(
997
+ graph,
998
+ flat_args,
999
+ matched_input_elements_positions,
1000
+ flat_results_traced,
1001
+ matched_output_elements_positions,
1002
+ example_fake_inputs,
1003
+ flat_args_dynamic_dims,
1004
+ fake_mode,
1005
+ ).transform()
1006
+
1007
+ # Make dynamo graph to have same input/output spec as user code
1008
+ def argument_names(f_sig, args, kwargs) -> List[str]:
1009
+ def signature_to_fullargspec(sig: inspect.Signature):
1010
+ # Get a list of Parameter objects from the Signature object
1011
+ params = list(sig.parameters.values())
1012
+ # Separate positional arguments, keyword-only arguments and varargs/varkw
1013
+ args = [
1014
+ p.name
1015
+ for p in params
1016
+ if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
1017
+ ]
1018
+ kwonlyargs = [
1019
+ p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY
1020
+ ]
1021
+ varargs = next(
1022
+ (p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL),
1023
+ None,
1024
+ )
1025
+ varkw = next(
1026
+ (p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD),
1027
+ None,
1028
+ )
1029
+ # Get default values for positional arguments and keyword-only arguments
1030
+ defaults = tuple(
1031
+ p.default
1032
+ for p in params
1033
+ if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
1034
+ and p.default is not inspect.Parameter.empty
1035
+ )
1036
+ kwonlydefaults = {
1037
+ p.name: p.default
1038
+ for p in params
1039
+ if p.kind == inspect.Parameter.KEYWORD_ONLY
1040
+ and p.default is not inspect.Parameter.empty
1041
+ }
1042
+ # Get annotations for parameters and return value
1043
+ annotations = {}
1044
+ if sig.return_annotation:
1045
+ annotations = {"return": sig.return_annotation}
1046
+ for parameter in params:
1047
+ annotations[parameter.name] = parameter.annotation
1048
+ # Return a FullArgSpec object with the extracted attributes
1049
+ return inspect.FullArgSpec(
1050
+ args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations
1051
+ )
1052
+
1053
+ fullargspec = signature_to_fullargspec(f_sig)
1054
+
1055
+ # 1. Map `args` 1-to-1 to positional arguments in original signature.
1056
+ input_strs = fullargspec.args[: len(args)]
1057
+
1058
+ if len(args) > len(fullargspec.args):
1059
+ # 2. If there are more arguments left in `args`, they map to varargs in original
1060
+ # signature. Assign names as {varargs}_0, {varargs}_1, ...
1061
+ assert fullargspec.varargs is not None, "More arguments than expected"
1062
+ input_strs += [
1063
+ f"{fullargspec.varargs}_{i}"
1064
+ for i in range(0, len(args) - len(input_strs))
1065
+ ]
1066
+ elif len(args) < len(fullargspec.args):
1067
+ # 3. If there are fewer arguments in `args` than `fullargspec.args`,
1068
+ # it implies these are arguments either with default values, or provided in
1069
+ # `kwargs`. The former can be safely ignored. Because Dynamo.export does not
1070
+ # export them as part of the function signature. The latter will be handled
1071
+ # in the next step.
1072
+ for unprovided_arg in fullargspec.args[
1073
+ len(args) : -len(fullargspec.defaults or [])
1074
+ ]:
1075
+ assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}"
1076
+
1077
+ # 4. Keyword arguments provided in `kwargs`.
1078
+ input_strs += list(kwargs.keys())
1079
+
1080
+ # 5. Keyword-only arguments with default values if not provided are not exported
1081
+ # as part of the function signature.
1082
+ for kwonly_arg in fullargspec.kwonlyargs:
1083
+ kwonlydefaults = fullargspec.kwonlydefaults or {}
1084
+ assert (
1085
+ kwonly_arg in kwargs or kwonly_arg in kwonlydefaults
1086
+ ), f"Missing keyword only argument {kwonly_arg}"
1087
+
1088
+ return input_strs
1089
+
1090
+ new_graph.graph._codegen = _PyTreeCodeGen(
1091
+ _PyTreeInfo(
1092
+ argument_names(f_sig, orig_args, orig_kwargs),
1093
+ in_spec,
1094
+ out_spec_traced,
1095
+ )
1096
+ )
1097
+ new_graph.recompile()
1098
+ return new_graph
1099
+
1100
+
1101
+ def export(
1102
+ f: Callable[..., Any],
1103
+ *extra_args,
1104
+ aten_graph: bool = False,
1105
+ pre_dispatch: bool = False,
1106
+ decomposition_table: Optional[
1107
+ Dict[torch._ops.OpOverload, Callable[..., Any]]
1108
+ ] = None,
1109
+ tracing_mode: str = "symbolic",
1110
+ constraints: Optional[List[Constraint]] = None,
1111
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
1112
+ assume_static_by_default: bool = False,
1113
+ same_signature: bool = True,
1114
+ disable_constraint_solver: bool = False,
1115
+ _log_export_usage: bool = True,
1116
+ **extra_kwargs,
1117
+ ) -> Callable[..., ExportResult]:
1118
+ """
1119
+ Export an input function f to a format that can be executed outside of PyTorch using the FX graph.
1120
+
1121
+ Args:
1122
+ f (callable): A PyTorch function to be exported.
1123
+
1124
+ aten_graph (bool): If True, exports a graph with ATen operators.
1125
+ If False, exports a graph with Python operators. Default is False.
1126
+
1127
+ pre_dispatch (bool): If True, exports a graph with ATen operators,
1128
+ but before any logic in the PyTorch dispatcher has run.
1129
+ This can be useful if you want to apply further transformations on a graph before running it
1130
+ through autograd, autocast, or any other functionalities that are integrated into the dispatcher.
1131
+ This flag is only valid if aten_graph=True is set.
1132
+ Default is False.
1133
+
1134
+ decomposition_table (dict): A dictionary that maps operators to their decomposition functions.
1135
+ Required if aten_graph or tracing_mode is specified. Default is None.
1136
+
1137
+ tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic".
1138
+
1139
+ constraints: [DEPRECATED: use ``dynamic_shapes`` instead, see below]
1140
+ An optional list of constraints on the dynamic arguments
1141
+ that specify their possible range of shapes. By default, shapes of
1142
+ input torch.Tensors are assumed to be static. If an input torch.Tensor
1143
+ is expected to have dynamic shapes, please use :func:`dynamic_dim`
1144
+ to define :class:`Constraint` objects that specify the dynamics and the possible
1145
+ range of shapes. See :func:`dynamic_dim` docstring for examples on
1146
+ how to use it.
1147
+
1148
+ dynamic_shapes:
1149
+ An optional argument where the type should either be:
1150
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
1151
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
1152
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
1153
+ is defined in the original function signature.
1154
+
1155
+ The dynamic shape of a tensor argument can be specified as either
1156
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
1157
+ not required to include static dimension indices in this dict, but when they are,
1158
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
1159
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
1160
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
1161
+ recursively specified by using mappings or sequences of contained specifications.
1162
+
1163
+ same_signature (bool): If True, rewrite the returned graph's signature to be the same as f.
1164
+
1165
+ disable_constraint_solver (bool): Whether the dim constraint solver must be disabled.
1166
+
1167
+ Returns:
1168
+ A function that given args and kwargs, returns a tuple of (graph, guards)
1169
+ Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options.
1170
+ Guards: The guards we accumulated during tracing f above
1171
+
1172
+ Raises:
1173
+ AssertionError: If decomposition_table is specified without setting aten_graph=True,
1174
+ or if graph breaks during tracing in export.
1175
+
1176
+ AssertionError: If Dynamo input and output is not consistent with traced input/output.
1177
+
1178
+ Note - this headerdoc was authored by ChatGPT, with slight modifications by the author.
1179
+ """
1180
+ if _log_export_usage:
1181
+ log_export_usage(event="export.private_api", flags={"_dynamo"})
1182
+
1183
+ # Deal with "local variable referenced before assignment"
1184
+ _f = f
1185
+ _assume_static_by_default = assume_static_by_default
1186
+
1187
+ def inner(*args, **kwargs):
1188
+ nonlocal constraints
1189
+ if constraints is not None:
1190
+ if _log_export_usage:
1191
+ warnings.warn(
1192
+ "Using `constraints` to specify dynamic shapes for export is DEPRECATED "
1193
+ "and will not be supported in the future. "
1194
+ "Please use `dynamic_shapes` instead (see docs on `torch.export.export`).",
1195
+ DeprecationWarning,
1196
+ stacklevel=2,
1197
+ )
1198
+ else:
1199
+ constraints = _process_dynamic_shapes(_f, args, kwargs, dynamic_shapes)
1200
+ f = _f
1201
+ assume_static_by_default = _assume_static_by_default
1202
+ check_if_dynamo_supported()
1203
+ torch._C._log_api_usage_once("torch._dynamo.export")
1204
+ if decomposition_table is not None:
1205
+ assert (
1206
+ aten_graph
1207
+ ), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True"
1208
+ if pre_dispatch:
1209
+ assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True"
1210
+ f = innermost_fn(f)
1211
+ call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f
1212
+ original_signature = inspect.signature(call_to_inspect)
1213
+ graph = None
1214
+ out_guards = None
1215
+ graph_captured_input = None
1216
+ graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None
1217
+ fake_mode = None
1218
+
1219
+ def guard_export_print(guards: _guards.GuardsSet):
1220
+ nonlocal out_guards
1221
+ assert (
1222
+ out_guards is None
1223
+ ), "whole graph export entails exactly one guard export"
1224
+ out_guards = guards
1225
+
1226
+ example_inputs = []
1227
+
1228
+ def dynamo_normalization_capturing_compiler(
1229
+ gm: torch.fx.GraphModule, inner_example_inputs
1230
+ ):
1231
+ nonlocal graph
1232
+ assert (
1233
+ graph is None
1234
+ ), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph."
1235
+ graph = gm
1236
+
1237
+ nonlocal fake_mode, example_inputs
1238
+ # NB: do NOT pass inner_example_inputs here, we are detecting the
1239
+ # Dynamo allocated fake mode, which should be DISTINCT from a
1240
+ # potential outer ambient fake mode which the user provided.
1241
+ # example_inputs is always the user specified inputs, so they
1242
+ # would have the wrong fake mode attached to them
1243
+ fake_mode = _guards.detect_fake_mode()
1244
+ example_inputs = inner_example_inputs
1245
+
1246
+ def result_capturing_wrapper(*graph_inputs):
1247
+ nonlocal graph_captured_result
1248
+ nonlocal graph_captured_input
1249
+
1250
+ graph_captured_input = graph_inputs
1251
+ assert graph is not None
1252
+
1253
+ named_parameters = dict(graph.named_parameters(remove_duplicate=False))
1254
+ named_buffers = dict(graph.named_buffers(remove_duplicate=False))
1255
+
1256
+ ambient_fake_mode = (
1257
+ _guards.detect_fake_mode(graph_inputs)
1258
+ if _guards.detect_fake_mode(graph_inputs) is not None
1259
+ else fake_mode
1260
+ )
1261
+
1262
+ with ambient_fake_mode, enable_python_dispatcher():
1263
+ params_and_buffers = {
1264
+ **named_parameters,
1265
+ **named_buffers,
1266
+ }
1267
+ fake_params_buffers = dict()
1268
+
1269
+ for name, value in params_and_buffers.items():
1270
+ fake_params_buffers[name] = ambient_fake_mode.from_tensor(
1271
+ value, static_shapes=True
1272
+ )
1273
+
1274
+ fake_graph_inputs = pytree.tree_map(
1275
+ ambient_fake_mode.from_tensor, graph_inputs
1276
+ )
1277
+ graph_captured_result = torch.func.functional_call(
1278
+ graph, fake_params_buffers, fake_graph_inputs
1279
+ )
1280
+
1281
+ return graph_captured_result
1282
+
1283
+ return result_capturing_wrapper
1284
+
1285
+ # Note: This is needed by rewrite_signature. We need to put it before
1286
+ # optimize_assert since user program may mutate the inputs.
1287
+ flat_args, in_spec = pytree.tree_flatten((args, kwargs))
1288
+
1289
+ remove_from_cache(f)
1290
+ constraint_violation_error = None
1291
+ if tracing_mode != "symbolic":
1292
+ assume_static_by_default = True
1293
+ with config.patch(
1294
+ specialize_int=True,
1295
+ assume_static_by_default=assume_static_by_default,
1296
+ automatic_dynamic_shapes=False,
1297
+ capture_dynamic_output_shape_ops=True,
1298
+ capture_scalar_outputs=True,
1299
+ ):
1300
+ opt_f = optimize_assert(
1301
+ dynamo_normalization_capturing_compiler,
1302
+ hooks=Hooks(
1303
+ guard_export_fn=guard_export_print,
1304
+ guard_fail_fn=None,
1305
+ ),
1306
+ export=True,
1307
+ export_constraints=constraints,
1308
+ )(f)
1309
+ # TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
1310
+ try:
1311
+ result_traced = opt_f(*args, **kwargs)
1312
+ except ConstraintViolationError as e:
1313
+ constraint_violation_error = e
1314
+ remove_from_cache(f)
1315
+
1316
+ if (
1317
+ not disable_constraint_solver
1318
+ and (shape_env := getattr(fake_mode, "shape_env", None)) is not None
1319
+ and (dim_constraints := shape_env.dim_constraints) is not None
1320
+ and not isinstance(
1321
+ call_to_inspect, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)
1322
+ )
1323
+ and not trace_rules.check(call_to_inspect)
1324
+ ):
1325
+ dim_constraints.solve()
1326
+ dim_constraints.remove_redundant_dynamic_results()
1327
+ forced_specializations = dim_constraints.forced_specializations()
1328
+ msg = dim_constraints.prettify_results(
1329
+ original_signature, constraint_violation_error, forced_specializations
1330
+ )
1331
+ if constraint_violation_error:
1332
+ constraint_violation_error.args = (
1333
+ constraint_violation_error.args[0] + msg,
1334
+ )
1335
+ else:
1336
+ if forced_specializations:
1337
+ constraint_violation_error = ConstraintViolationError(msg)
1338
+ else:
1339
+ log.info(
1340
+ "Summary of dimension constraints:%s",
1341
+ msg,
1342
+ )
1343
+
1344
+ # Error if we have any constraints on static values
1345
+ for k in shape_env.var_to_range.keys():
1346
+ if isinstance(k, sympy.Integer):
1347
+ constraint_violation_error = ConstraintViolationError(
1348
+ f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n"
1349
+ "It appears that you're trying to set a constraint on a "
1350
+ f"value which we evaluated to have a static value of {k}. "
1351
+ 'Set TORCH_LOGS="+export" for more information.'
1352
+ )
1353
+ if constraint_violation_error:
1354
+ raise constraint_violation_error
1355
+
1356
+ assert (
1357
+ graph is not None
1358
+ ), "Failed to produce a graph during tracing as no tensor operations were found."
1359
+ assert hasattr(graph, "_source_to_user_stacks")
1360
+ assert out_guards is not None, "Failed to produce guards during tracing"
1361
+ assert fake_mode is not None
1362
+
1363
+ log.info(
1364
+ "Dynamo captured graph:\n\n%s", graph.print_readable(print_output=False)
1365
+ )
1366
+
1367
+ # This check need to happened before aten_graph
1368
+ # because placeholder's _source_node attribute is not preserved by make_fx
1369
+ if same_signature:
1370
+ check_signature_rewritable(graph)
1371
+
1372
+ # NB: This is mostly hitting the cache; Dynamo already converted these
1373
+ example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs]
1374
+
1375
+ if aten_graph:
1376
+ # Running graph with interpreter is needed for propagating the stack_trace
1377
+ def graph_with_interpreter(*args):
1378
+ with torch.fx.traceback.preserve_node_meta():
1379
+ return torch.fx.Interpreter(graph).run(*args)
1380
+
1381
+ with maybe_disable_fake_tensor_mode(), enable_python_dispatcher(), (
1382
+ fake_mode
1383
+ ):
1384
+ try:
1385
+ graph = make_fx(
1386
+ graph_with_interpreter,
1387
+ decomposition_table=decomposition_table,
1388
+ tracing_mode="real",
1389
+ _allow_non_fake_inputs=True,
1390
+ pre_dispatch=pre_dispatch,
1391
+ _allow_fake_constant=False,
1392
+ )(*example_fake_inputs)
1393
+ except CondOpArgsMismatchError as e:
1394
+ # Wrap the internal error to the user-facing error
1395
+ raise UserError( # noqa: TRY200
1396
+ UserErrorType.DYNAMIC_CONTROL_FLOW,
1397
+ str(e),
1398
+ case_name="cond_operands",
1399
+ )
1400
+
1401
+ assert graph is not None
1402
+ for node in graph.graph.nodes:
1403
+ if node.op == "get_attr" and isinstance(
1404
+ getattr(graph, node.target), torch.Tensor
1405
+ ):
1406
+ node.meta["val"] = fake_mode.from_tensor(
1407
+ getattr(graph, node.target), static_shapes=True
1408
+ )
1409
+
1410
+ if same_signature:
1411
+ flat_args_dynamic_dims = [
1412
+ {c.dim for c in (constraints or ()) if c.w_tensor() is x}
1413
+ for x in flat_args
1414
+ ]
1415
+ graph = rewrite_signature(
1416
+ original_signature,
1417
+ graph,
1418
+ fake_mode,
1419
+ flat_args,
1420
+ in_spec,
1421
+ example_fake_inputs,
1422
+ graph_captured_input,
1423
+ graph_captured_result,
1424
+ result_traced, # type: ignore[possibly-undefined]
1425
+ flat_args_dynamic_dims,
1426
+ )
1427
+ # Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
1428
+ assert graph is not None
1429
+ graph.meta["input_shape_constraints"] = (
1430
+ [constraint.serializable_spec for constraint in constraints]
1431
+ if constraints
1432
+ else []
1433
+ )
1434
+
1435
+ return ExportResult(graph, out_guards)
1436
+
1437
+ if extra_args or extra_kwargs:
1438
+ warnings.warn(
1439
+ "export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
1440
+ "If you don't migrate, we may break your export call in the future if your user defined kwargs "
1441
+ "conflict with future kwargs added to export(f)."
1442
+ )
1443
+ return inner(*extra_args, **extra_kwargs)
1444
+ else:
1445
+ return inner
1446
+
1447
+
1448
+ def optimize_assert(
1449
+ backend,
1450
+ *,
1451
+ hooks=Hooks(None, None),
1452
+ export=False,
1453
+ export_constraints=None,
1454
+ dynamic=None,
1455
+ ):
1456
+ """
1457
+ The same as `torch._dynamo.optimize(backend, nopython=True)`
1458
+ """
1459
+ backend = get_compiler_fn(backend)
1460
+
1461
+ # Find if backend has any extra context manager
1462
+ backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
1463
+
1464
+ return _optimize_catch_errors(
1465
+ convert_frame.convert_frame_assert(
1466
+ backend, export=export, export_constraints=export_constraints
1467
+ ),
1468
+ hooks,
1469
+ backend_ctx_ctor,
1470
+ export=export,
1471
+ dynamic=dynamic,
1472
+ )
1473
+
1474
+
1475
+ class TorchPatcher:
1476
+ @staticmethod
1477
+ @functools.lru_cache(None)
1478
+ def patch():
1479
+ # A better way to disable the following would be decorate the source
1480
+ # functions with @torch._disable_dynamo. However, this causes issues
1481
+ # with torch.deploy internally.
1482
+ from .decorators import disable
1483
+
1484
+ torch.jit.trace = disable(torch.jit.trace)
1485
+ torch.jit.trace_module = disable(torch.jit.trace_module)
1486
+ torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph)
1487
+ torch.fx._symbolic_trace.Tracer.trace = disable(
1488
+ torch.fx._symbolic_trace.Tracer.trace
1489
+ )
1490
+ torch.distributions.Distribution.set_default_validate_args(False)
1491
+
1492
+ from ..optim import (
1493
+ adadelta,
1494
+ adagrad,
1495
+ adam,
1496
+ adamax,
1497
+ adamw,
1498
+ asgd,
1499
+ lbfgs,
1500
+ nadam,
1501
+ radam,
1502
+ rmsprop,
1503
+ rprop,
1504
+ sgd,
1505
+ sparse_adam,
1506
+ )
1507
+
1508
+ optimizer_modules = {
1509
+ adadelta,
1510
+ adagrad,
1511
+ adam,
1512
+ adamax,
1513
+ adamw,
1514
+ asgd,
1515
+ lbfgs,
1516
+ nadam,
1517
+ radam,
1518
+ rmsprop,
1519
+ rprop,
1520
+ sgd,
1521
+ sparse_adam,
1522
+ }
1523
+
1524
+ for opt_mod in optimizer_modules:
1525
+ opt_name = opt_mod.__name__.split(".")[-1]
1526
+ fused_fn_name = f"_fused_{opt_name}"
1527
+ single_tensor_fn_name = f"_single_tensor_{opt_name}"
1528
+
1529
+ if hasattr(opt_mod, fused_fn_name):
1530
+ setattr(
1531
+ opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name))
1532
+ )
1533
+
1534
+ optimizer_classes = [
1535
+ opt
1536
+ for opt in torch.optim.__dict__.values()
1537
+ if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer)
1538
+ ]
1539
+
1540
+ # Note: we don't support sparsity or tracing through backwards
1541
+ excluded_optimizer_classes = {
1542
+ torch.optim.SparseAdam,
1543
+ torch.optim.LBFGS,
1544
+ }
1545
+
1546
+ for opt in optimizer_classes:
1547
+ if opt in excluded_optimizer_classes:
1548
+ opt.step = disable(opt.step)
1549
+
1550
+ if hasattr(opt, "_init_group"):
1551
+ opt._init_group = disable(opt._init_group)
1552
+
1553
+ @staticmethod
1554
+ def suppress_torch_distributed_warnings(fn):
1555
+ def inner_fn(*args, **kwargs):
1556
+ warnings.filterwarnings(
1557
+ "ignore", category=UserWarning, module="torch.distributed"
1558
+ )
1559
+ return fn(*args, **kwargs)
1560
+
1561
+ return inner_fn
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/hooks.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+
3
+ from typing import Callable, Optional
4
+
5
+ from torch._guards import GuardsSet
6
+ from .types import GuardFail
7
+
8
+
9
+ @dataclasses.dataclass
10
+ class Hooks:
11
+ guard_export_fn: Optional[Callable[[GuardsSet], None]] = None
12
+ guard_fail_fn: Optional[Callable[[GuardFail], None]] = None
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="method-assign"
2
+
3
+ import functools
4
+ import weakref
5
+
6
+ import torch.nn
7
+ from torch.nn import Module
8
+
9
+ from .utils import ExactWeakKeyDictionary, is_lazy_module
10
+
11
+
12
+ class MutationTracker:
13
+ db = ExactWeakKeyDictionary()
14
+
15
+ def __init__(self):
16
+ self.mutation_count = 0
17
+ self.watchers = []
18
+
19
+ def on_mutation(self, name):
20
+ self.mutation_count += 1
21
+ tmp = self.watchers
22
+ self.watchers = []
23
+ for ref in tmp:
24
+ guarded = ref()
25
+ if guarded is not None:
26
+ guarded.invalidate(ref)
27
+
28
+ def track(self, guarded_code):
29
+ self.watchers.append(weakref.ref(guarded_code))
30
+
31
+
32
+ def watch(obj, guarded_code):
33
+ """invalidate guarded_code when obj is mutated"""
34
+ ensure_patched(type(obj))
35
+
36
+ if obj not in MutationTracker.db:
37
+ MutationTracker.db[obj] = MutationTracker()
38
+ tracker = MutationTracker.db[obj]
39
+ tracker.track(guarded_code)
40
+
41
+
42
+ def ensure_patched(cls):
43
+ if getattr(cls, "___needs_mutation_patch", True):
44
+ cls.___needs_mutation_patch = False
45
+ original_setattr = cls.__setattr__
46
+
47
+ @functools.wraps(original_setattr)
48
+ def custom_setattr(self, key, value):
49
+ try:
50
+ MutationTracker.db[self].on_mutation(key)
51
+ except KeyError:
52
+ pass
53
+ return original_setattr(self, key, value)
54
+
55
+ cls.__setattr__ = custom_setattr
56
+
57
+
58
+ class GenerationTracker:
59
+ generation = 0
60
+ dynamic_classes = ExactWeakKeyDictionary()
61
+ generation_values = ExactWeakKeyDictionary()
62
+
63
+ @classmethod
64
+ def tag(cls, obj):
65
+ cls.generation_values[obj] = cls.generation
66
+
67
+ @staticmethod
68
+ def mark_class_dynamic(cls):
69
+ assert issubclass(cls, torch.nn.Module)
70
+ GenerationTracker.dynamic_classes[cls] = True
71
+
72
+ @classmethod
73
+ def get_generation_value(cls, obj):
74
+ if obj not in cls.generation_values:
75
+ return -1
76
+ return cls.generation_values[obj]
77
+
78
+ @classmethod
79
+ def check(cls, obj):
80
+ return (
81
+ obj in cls.generation_values
82
+ and cls.generation_values[obj] == cls.generation
83
+ )
84
+
85
+
86
+ def is_dynamic_nn_module(obj):
87
+ """Check for nn.Modules() created dynamically or mutated"""
88
+ if isinstance(obj, torch.nn.Module) and "forward" in obj.__dict__:
89
+ # A monkey patched `.forward` indicates something wacky is going on
90
+ return True
91
+ if hasattr(obj, "torchdynamo_force_dynamic"):
92
+ return obj.torchdynamo_force_dynamic
93
+ if is_lazy_module(obj):
94
+ return False
95
+ dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check(
96
+ obj
97
+ )
98
+ return dyn
99
+
100
+
101
+ def install_generation_tagging_init():
102
+ """
103
+ Monkey patch torch.nn.Module.__init__ and torch.nn.Module.__setstate__
104
+ so we can detect nn.Module instances created dynamically inside forward methods.
105
+ """
106
+
107
+ if getattr(Module, "___needs_generation_tag_patch", True):
108
+ init = Module.__init__
109
+
110
+ def patched_init(self, *args, **kwargs):
111
+ init(self, *args, **kwargs)
112
+ GenerationTracker.tag(self)
113
+
114
+ Module.__init__ = patched_init
115
+
116
+ setstate = Module.__setstate__
117
+
118
+ def patched_setstate(self, state):
119
+ setstate(self, state)
120
+ GenerationTracker.tag(self)
121
+
122
+ Module.__setstate__ = patched_setstate
123
+
124
+ Module.___needs_generation_tag_patch = False # type: ignore[attr-defined]
125
+
126
+ GenerationTracker.generation += 1
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/replay_record.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from dataclasses import field
3
+ from types import CodeType, ModuleType
4
+ from typing import Any, Dict
5
+
6
+ from torch.utils._import_utils import import_dill
7
+
8
+ dill = import_dill()
9
+
10
+
11
+ @dataclasses.dataclass
12
+ class ModuleRecord:
13
+ module: ModuleType
14
+ accessed_attrs: Dict[str, Any] = field(default_factory=dict)
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class DummyModule:
19
+ name: str
20
+ is_torch: bool = False
21
+
22
+ @property
23
+ def __name__(self):
24
+ return self.name
25
+
26
+
27
+ @dataclasses.dataclass
28
+ class ExecutionRecord:
29
+ code: CodeType
30
+ globals: Dict[str, Any] = field(default_factory=dict)
31
+ locals: Dict[str, Any] = field(default_factory=dict)
32
+ builtins: Dict[str, Any] = field(default_factory=dict)
33
+ code_options: Dict[str, Any] = field(default_factory=dict)
34
+
35
+ def dump(self, f):
36
+ assert dill is not None, "replay_record requires `pip install dill`"
37
+ dill.dump(self, f)
38
+
39
+ @classmethod
40
+ def load(cls, f):
41
+ assert dill is not None, "replay_record requires `pip install dill`"
42
+ return dill.load(f)
43
+
44
+
45
+ @dataclasses.dataclass
46
+ class ExecutionRecorder:
47
+ LOCAL_MOD_PREFIX = "___local_mod_"
48
+
49
+ code: CodeType
50
+ globals: Dict[str, Any] = field(default_factory=dict)
51
+ locals: Dict[str, Any] = field(default_factory=dict)
52
+ builtins: Dict[str, Any] = field(default_factory=dict)
53
+ code_options: Dict[str, Any] = field(default_factory=dict)
54
+ name_to_modrec: Dict[str, Any] = field(default_factory=dict)
55
+
56
+ def add_local_var(self, name, var):
57
+ if isinstance(var, ModuleType):
58
+ self.locals[name] = self._add_mod(var)
59
+ else:
60
+ self.locals[name] = var
61
+
62
+ def add_global_var(self, name, var):
63
+ if isinstance(var, ModuleType):
64
+ self.globals[name] = self._add_mod(var)
65
+ else:
66
+ self.globals[name] = var
67
+
68
+ def add_local_mod(self, name, mod):
69
+ assert isinstance(mod, ModuleType)
70
+
71
+ self.add_global_var(name, mod)
72
+
73
+ def record_module_access(self, mod, name, val):
74
+ if isinstance(val, ModuleType):
75
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = self._add_mod(val)
76
+ return
77
+
78
+ if mod.__name__ in self.name_to_modrec:
79
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = val
80
+
81
+ def get_record(self):
82
+ return ExecutionRecord(
83
+ self.code,
84
+ ExecutionRecorder._resolve_modules(self.globals),
85
+ ExecutionRecorder._resolve_modules(self.locals),
86
+ self.builtins.copy(),
87
+ self.code_options.copy(),
88
+ )
89
+
90
+ def _add_mod(self, mod):
91
+ if mod.__name__ not in self.name_to_modrec:
92
+ self.name_to_modrec[mod.__name__] = ModuleRecord(mod)
93
+
94
+ return self.name_to_modrec[mod.__name__]
95
+
96
+ # Convert ModuleRecords -> DummyModule tree
97
+ @classmethod
98
+ def _resolve_modules(cls, vars):
99
+ def resolve_module(var):
100
+ if not isinstance(var, ModuleRecord):
101
+ return var
102
+
103
+ dummy_mod = DummyModule(var.module.__name__)
104
+ for attr_name, attr_value in var.accessed_attrs.items():
105
+ attr_value = resolve_module(attr_value)
106
+ dummy_mod.__setattr__(attr_name, attr_value)
107
+
108
+ return dummy_mod
109
+
110
+ return {k: resolve_module(v) for k, v in vars.items()}
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import functools
4
+ import io
5
+ import logging
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ import sys
10
+ import textwrap
11
+ import uuid
12
+ from importlib import import_module
13
+ from tempfile import TemporaryFile
14
+ from typing import Any, Callable, Dict, Union
15
+
16
+ import torch
17
+ import torch.fx as fx
18
+ import torch.nn as nn
19
+ from torch._dynamo.debug_utils import (
20
+ _cuda_system_info_comment,
21
+ AccuracyError,
22
+ backend_accuracy_fails,
23
+ BuckTargetWriter,
24
+ cast_to_fp64,
25
+ extra_imports,
26
+ generate_config_string,
27
+ helper_for_dump_minify,
28
+ InputReader,
29
+ InputWriter,
30
+ MAX_CONSTANT_NUMEL_INLINE,
31
+ minifier_dir,
32
+ NNModuleToString,
33
+ NopInputReader,
34
+ same_two_models,
35
+ )
36
+ from torch._dynamo.utils import clone_inputs, counters, same
37
+ from torch.fx.experimental.proxy_tensor import make_fx
38
+ from torch.fx.experimental.symbolic_shapes import (
39
+ fx_placeholder_targets,
40
+ has_free_symbols,
41
+ )
42
+ from torch.hub import tqdm
43
+
44
+ from .. import config
45
+
46
+ log = logging.getLogger(__name__)
47
+
48
+
49
+ inductor_config = import_module("torch._inductor.config")
50
+ use_buck = inductor_config.is_fbcode()
51
+
52
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
53
+ # MAIN ENTRY POINT
54
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
55
+
56
+
57
+ def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str):
58
+ """
59
+ Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both
60
+ forward and backward call separately with the backend compiler_fn - like
61
+ inductor or nvfuser. Intercepting after Aot Autograd presents neat
62
+ abstraction, where all the params are lifted as graph inputs, making it easy
63
+ to save the graph as a string.
64
+ """
65
+
66
+ @functools.wraps(unconfigured_compiler_fn)
67
+ def debug_wrapper(gm, example_inputs, **kwargs):
68
+ from torch._subclasses import FakeTensorMode
69
+
70
+ compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
71
+
72
+ from torch._functorch.aot_autograd import get_aot_graph_name
73
+
74
+ graph_name = get_aot_graph_name()
75
+
76
+ # TODO: why do we need to deepcopy the original graph?
77
+ orig_graph = copy.deepcopy(gm.graph)
78
+ assert config.repro_after in ("dynamo", "aot", None)
79
+
80
+ try:
81
+ # Call the compiler_fn - which is either aot_autograd or inductor
82
+ # with fake inputs
83
+ inner_compiled_fn = compiler_fn(gm, example_inputs)
84
+ except Exception as e:
85
+ # TODO: Failures here are troublesome because no real inputs,
86
+ # need a different serialization strategy
87
+ if config.repro_after == "aot":
88
+ if config.repro_level == 1:
89
+ dump_compiler_graph_state(
90
+ fx.GraphModule(gm, orig_graph),
91
+ example_inputs,
92
+ compiler_name,
93
+ )
94
+ elif config.repro_level == 2:
95
+ dump_to_minify(
96
+ fx.GraphModule(gm, orig_graph),
97
+ example_inputs,
98
+ compiler_name,
99
+ )
100
+ log.error("CompilerError")
101
+ raise
102
+
103
+ # We may run regular PyTorch compute that may trigger Dynamo, do NOT
104
+ # recursively attempt to accuracy minify in that case!
105
+ def deferred_for_real_inputs(real_inputs):
106
+ # This is a bit obscure: if we recursively try to accuracy minify
107
+ # the SAME function, this would trigger. But most of the time
108
+ # we should never hit this branch
109
+ if config.repro_after != "aot":
110
+ return inner_compiled_fn(real_inputs)
111
+ with config.patch(repro_after=None):
112
+ return inner_debug_fn(real_inputs)
113
+
114
+ def inner_debug_fn(real_inputs):
115
+ """
116
+ Aot Autograd fw_compiler and bw_compiler can have fake tensors. So,
117
+ example_inputs can be fake tensors. We can call compiler_fn (which is
118
+ inductor or nvfuser) with fake tensors but the actually compiled_fn
119
+ should be called with real tensors. Therefore, the actual invocation
120
+ is deferred.
121
+ """
122
+ # Copy the tensor attrs like shape, stride etc by converting to Fake Tensor
123
+ # because inductor clears the tensor list in its codegen. And example_inputs
124
+ # are available only for the first invocation.
125
+ fake_mode = FakeTensorMode()
126
+ copy_tensor_attrs = [
127
+ fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x
128
+ for x in real_inputs
129
+ ]
130
+ if config.repro_level == 3:
131
+ # Always dump the original module in case we have segfaults
132
+ dump_to_minify(
133
+ fx.GraphModule(gm, orig_graph), real_inputs, compiler_name
134
+ )
135
+
136
+ if config.repro_level == 4:
137
+ if compiler_name != "inductor":
138
+ raise NotImplementedError(
139
+ "Accuracy minification is supported for inductor only"
140
+ )
141
+ if backend_aot_accuracy_fails(gm, real_inputs, compiler_fn):
142
+ log.warning(
143
+ "Accuracy failed for the AOT Autograd graph %s", graph_name
144
+ )
145
+ dump_compiler_graph_state(
146
+ fx.GraphModule(gm, orig_graph),
147
+ real_inputs,
148
+ f"{compiler_name}_accuracy",
149
+ )
150
+ dump_to_minify(
151
+ fx.GraphModule(gm, orig_graph),
152
+ real_inputs,
153
+ f"{compiler_name}_accuracy",
154
+ )
155
+ raise AccuracyError("Bad accuracy detected")
156
+ else:
157
+ # Call the compiled function with real inputs
158
+ return inner_compiled_fn(real_inputs)
159
+ else:
160
+ try:
161
+ # Call the compiled function with real inputs
162
+ out = inner_compiled_fn(real_inputs)
163
+ # sync cuda kernels to ensure IMA detection
164
+ for arg in example_inputs:
165
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
166
+ torch.cuda.synchronize()
167
+ break
168
+ return out
169
+ except Exception as e:
170
+ if config.repro_level == 1:
171
+ dump_compiler_graph_state(
172
+ fx.GraphModule(gm, orig_graph),
173
+ copy_tensor_attrs,
174
+ compiler_name,
175
+ )
176
+ elif config.repro_level == 2:
177
+ dump_to_minify(
178
+ fx.GraphModule(gm, orig_graph),
179
+ copy_tensor_attrs,
180
+ compiler_name,
181
+ )
182
+ raise
183
+
184
+ if config.repro_after == "aot":
185
+ compiled_fn = deferred_for_real_inputs
186
+ compiled_fn._boxed_call = True # type: ignore[attr-defined]
187
+ return compiled_fn
188
+ else:
189
+ return inner_compiled_fn
190
+
191
+ return debug_wrapper
192
+
193
+
194
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
195
+ # DUMP REPROS
196
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
197
+
198
+
199
+ def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None):
200
+ model_str = textwrap.dedent(
201
+ f"""
202
+ import torch
203
+ from torch import tensor, device
204
+ import torch.fx as fx
205
+ from torch._dynamo.testing import rand_strided
206
+ from math import inf
207
+ import torch._inductor.inductor_prims
208
+
209
+ {generate_config_string(stable_output=stable_output)}
210
+
211
+ isolate_fails_code_str = None
212
+
213
+ {extra_imports}
214
+
215
+ """
216
+ )
217
+ if not stable_output:
218
+ model_str += f"# torch version: {torch.version.__version__}\n"
219
+ if hasattr(torch.version, "cuda"):
220
+ model_str += f"# torch cuda version: {torch.version.cuda}\n"
221
+ if hasattr(torch.version, "git_version"):
222
+ model_str += f"# torch git version: {torch.version.git_version}\n\n\n"
223
+ model_str += _cuda_system_info_comment()
224
+
225
+ model_str += NNModuleToString.convert(gm)
226
+
227
+ # get hint shape/stride when dynamic shape enabled
228
+ def hint_if_symint(x):
229
+ return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x)
230
+
231
+ writer = InputWriter(save_dir)
232
+ for placeholder, arg in zip(fx_placeholder_targets(gm), args):
233
+ if isinstance(arg, (int, torch.SymInt)):
234
+ writer.symint(placeholder, arg)
235
+ elif isinstance(arg, torch.Tensor):
236
+ # TODO: improve these names with FQN
237
+ writer.tensor(placeholder, arg)
238
+ else:
239
+ raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
240
+
241
+ model_str += "\n".join(writer.lines()) + "\n"
242
+
243
+ model_str += "mod = Repro()\n"
244
+ return model_str
245
+
246
+
247
+ def save_graph_repro(
248
+ fd,
249
+ gm,
250
+ args,
251
+ compiler_name,
252
+ *,
253
+ stable_output=False,
254
+ save_dir=None,
255
+ command="run",
256
+ accuracy=None,
257
+ tracing_mode=None,
258
+ check_str=None,
259
+ ):
260
+ fd.write(
261
+ generate_compiler_repro_string(
262
+ gm,
263
+ args,
264
+ stable_output=stable_output,
265
+ save_dir=save_dir,
266
+ )
267
+ )
268
+ if accuracy is None:
269
+ accuracy = "_accuracy" in compiler_name
270
+ if tracing_mode is None:
271
+ tracing_mode = "real"
272
+ if any(has_free_symbols(a) for a in args):
273
+ tracing_mode = "symbolic"
274
+ fd.write("if __name__ == '__main__':\n")
275
+ fd.write(" from torch._dynamo.repro.after_aot import run_repro\n")
276
+ fd.write(
277
+ f" with torch.no_grad():\n"
278
+ f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, "
279
+ f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r}"
280
+ ")\n"
281
+ )
282
+
283
+
284
+ def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None):
285
+ subdir = os.path.join(minifier_dir(), "checkpoints")
286
+ if not os.path.exists(subdir):
287
+ os.makedirs(subdir, exist_ok=True)
288
+ file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py")
289
+ log.warning(
290
+ "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
291
+ )
292
+ with open(file_name, "w") as fd:
293
+ save_graph_repro(
294
+ fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy
295
+ )
296
+ curdir = os.getcwd()
297
+ repro_path = os.path.join(curdir, "repro.py")
298
+ try:
299
+ shutil.copyfile(file_name, repro_path)
300
+ log.warning("Copying repro file for convenience to %s", repro_path)
301
+ if use_buck:
302
+ BuckTargetWriter(file_name).write()
303
+ except OSError:
304
+ log.warning("No write permissions for %s", repro_path)
305
+ pass
306
+
307
+
308
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
309
+ # DUMP MINIFIER
310
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
311
+
312
+
313
+ def dump_to_minify(gm, args, compiler_name: str):
314
+ out = io.StringIO()
315
+ # TODO: factor this out
316
+ subdir = os.path.join(minifier_dir(), "checkpoints")
317
+ if not os.path.exists(subdir):
318
+ os.makedirs(subdir, exist_ok=True)
319
+ save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify")
320
+ return helper_for_dump_minify(out.getvalue())
321
+
322
+
323
+ def isolate_fails(
324
+ fx_g,
325
+ args,
326
+ compiler_name: str,
327
+ env=None,
328
+ save_dir=None,
329
+ accuracy=None,
330
+ tracing_mode=None,
331
+ check_str=None,
332
+ ):
333
+ if env is None:
334
+ env = {}
335
+ subdir = os.path.join(os.getcwd(), "isolate")
336
+ if not os.path.exists(subdir):
337
+ os.makedirs(subdir, exist_ok=True)
338
+ file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py")
339
+ with open(file_name, "w") as fd:
340
+ save_graph_repro(
341
+ fd,
342
+ fx_g,
343
+ args,
344
+ compiler_name,
345
+ save_dir=save_dir,
346
+ command="minifier-query",
347
+ accuracy=accuracy,
348
+ tracing_mode=tracing_mode,
349
+ check_str=check_str,
350
+ )
351
+ # with open(file_name, "r") as fd:
352
+ # print(fd.read())
353
+ new_env = os.environ.copy()
354
+ new_env = {**new_env, **env}
355
+ stdout, stderr = TemporaryFile(), TemporaryFile()
356
+
357
+ if use_buck:
358
+ cmd = BuckTargetWriter(file_name).write(print_msg=False)
359
+ else:
360
+ cmd = ["python", file_name]
361
+
362
+ p = subprocess.Popen(
363
+ cmd,
364
+ cwd=subdir,
365
+ stdout=stdout,
366
+ stderr=stderr,
367
+ env=new_env,
368
+ )
369
+ p.wait()
370
+
371
+ stdout.seek(0)
372
+ stderr.seek(0)
373
+ print(
374
+ textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout
375
+ )
376
+ print(
377
+ textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr
378
+ )
379
+ # print(f"Isolated test failed - {file_name}")
380
+ return p.returncode != 0
381
+
382
+
383
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
384
+ # MINIFIER TOOLS
385
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
386
+
387
+
388
+ def inductor_fails(fx_g, args, check_str=None):
389
+ has_cuda = False
390
+ for arg in args:
391
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
392
+ has_cuda = True
393
+ break
394
+
395
+ def sync():
396
+ if has_cuda:
397
+ # Ensures that segfaults are surfaced
398
+ torch.cuda.synchronize()
399
+
400
+ from torch._inductor.compile_fx import compile_fx_inner
401
+
402
+ try:
403
+ result = fx_g(*args)
404
+ assert isinstance(result, (tuple, list))
405
+ assert not any(isinstance(x, (tuple, list)) for x in result)
406
+ except Exception:
407
+ return False
408
+
409
+ sync()
410
+
411
+ try:
412
+ compile_mod = compile_fx_inner(fx_g, args)
413
+ compile_mod(args)
414
+ sync()
415
+ except Exception as e:
416
+ if check_str is not None and check_str not in repr(e):
417
+ return False
418
+ print(repr(e))
419
+ return True
420
+ return False
421
+
422
+
423
+ def inductor_accuracy_fails(
424
+ fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False
425
+ ):
426
+ from torch._inductor.compile_fx import compile_fx_inner
427
+
428
+ return backend_aot_accuracy_fails(
429
+ fx_g,
430
+ args,
431
+ compile_fx_inner,
432
+ require_fp64=require_fp64,
433
+ ignore_non_fp=ignore_non_fp,
434
+ )
435
+
436
+
437
+ backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True)
438
+
439
+
440
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
441
+ # REPRO MAIN
442
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
443
+
444
+
445
+ def repro_common(options, mod, load_args):
446
+ # Invariant for graphs we generate with the repro script
447
+ assert not any(mod.named_parameters())
448
+ for n, b in mod.named_buffers():
449
+ if b.numel() > MAX_CONSTANT_NUMEL_INLINE:
450
+ log.warning(
451
+ "Constant %s was not serialized, generated random data instead. "
452
+ "If you think this is affecting you, please comment on "
453
+ "https://github.com/pytorch/pytorch/issues/100468",
454
+ n,
455
+ )
456
+
457
+ if not hasattr(load_args, "_version"):
458
+ log.warning(
459
+ "load_args does not have a _version attribute, please file a bug to PyTorch "
460
+ "and describe how you generate this repro script"
461
+ )
462
+ else:
463
+ if load_args._version > 0:
464
+ log.warning(
465
+ "load_args is version %s, but this version of PyTorch only supports "
466
+ "version 0. We will try to run it anyway but there may be an incompatibility; "
467
+ "if so, try upgrading your version of PyTorch.",
468
+ load_args._version,
469
+ )
470
+
471
+ nop_reader = NopInputReader()
472
+ load_args(nop_reader)
473
+
474
+ with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
475
+ input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
476
+ load_args(input_reader)
477
+ args = input_reader.args
478
+
479
+ # Turn mod into a GraphModule the slow way
480
+ # TODO: speed this up
481
+ mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args)
482
+
483
+ torch._inductor.config.generate_intermediate_hooks = True
484
+
485
+ return mod, args
486
+
487
+
488
+ ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = {
489
+ "": inductor_fails,
490
+ # This might look inverted but it's not. strict_accuracy means "we will
491
+ # minify any time we see anything that diverges", whereas accuracy is more
492
+ # conservative, and will only minify if there is a meaningful fp64
493
+ # divergence
494
+ "accuracy": functools.partial(
495
+ inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True
496
+ ),
497
+ "strict_accuracy": inductor_accuracy_fails,
498
+ }
499
+
500
+
501
+ def repro_minifier_query(options, mod, load_args):
502
+ mod, args = repro_common(options, mod, load_args)
503
+ fail_fn = functools.partial(
504
+ ACCURACY_FAILS[options.accuracy], check_str=options.check_str
505
+ )
506
+ if fail_fn(mod, args):
507
+ sys.exit(1)
508
+ else:
509
+ sys.exit(0)
510
+
511
+
512
+ def repro_minify(options, mod, load_args):
513
+ from functorch.compile import minifier
514
+
515
+ mod, args = repro_common(options, mod, load_args)
516
+ compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor"
517
+
518
+ favored_device = 1 if torch.cuda.device_count() >= 2 else 0
519
+ env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)}
520
+
521
+ module_fails: Any
522
+ if options.isolate:
523
+ module_fails = functools.partial(
524
+ isolate_fails,
525
+ env=env_variables,
526
+ compiler_name=compiler_name,
527
+ save_dir=options.save_dir,
528
+ accuracy=options.accuracy,
529
+ tracing_mode=options.tracing_mode,
530
+ )
531
+ else:
532
+ module_fails = ACCURACY_FAILS[options.accuracy]
533
+
534
+ minifier(
535
+ mod,
536
+ args,
537
+ module_fails=functools.partial(module_fails, check_str=options.check_str),
538
+ dump_state=functools.partial(
539
+ dump_compiler_graph_state, compiler_name=compiler_name
540
+ ),
541
+ save_dir=options.save_dir,
542
+ offload_to_disk=options.offload_to_disk,
543
+ skip_offload=options.skip_saving_eager_intermediates,
544
+ skip_sanity=options.skip_sanity,
545
+ max_granularity=options.max_granularity,
546
+ )
547
+
548
+
549
+ def repro_analyze(options, mod, load_args):
550
+ from torch._inductor.compile_fx import compile_fx_inner
551
+ from torch._inductor.hooks import intermediate_hook
552
+
553
+ mod, args = repro_common(options, mod, load_args)
554
+
555
+ # TODO: The logic for cloning inputs/models here is intentionally
556
+ # modeled off of run_fwd_maybe_bwd, but arguably it is better not to
557
+ # clone inputs (as you are doubling your effective GPU memory usage).
558
+ # It is certainly faster though! It probably makes sense to let the
559
+ # user specify the offload strategy.
560
+
561
+ with tqdm(desc="Compiling"):
562
+ compiled = compile_fx_inner(mod, args)
563
+ total = counters["inductor"]["intermediate_hooks"]
564
+
565
+ known_names = set()
566
+
567
+ def save_hook(name, val):
568
+ known_names.add(name)
569
+ if not options.skip_saving_inductor_intermediates:
570
+ writer.write_tensor(os.path.join("inductor", name), val)
571
+ pbar.update(1) # type: ignore[has-type]
572
+
573
+ writer = torch.utils._content_store.ContentStoreWriter(
574
+ options.save_dir, stable_hash=options.stable_hash
575
+ )
576
+ reader = torch.utils._content_store.ContentStoreReader(options.save_dir)
577
+
578
+ new_args = clone_inputs(args)
579
+ with intermediate_hook(save_hook), tqdm(
580
+ desc="Saving inductor intermediates", total=total
581
+ ) as pbar:
582
+ compiled(new_args)
583
+ assert not new_args
584
+
585
+ def compare_tuples(tuple1, tuple2):
586
+ diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]]
587
+ diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices]
588
+
589
+ if not diff_values:
590
+ return None
591
+ else:
592
+ return " and ".join(f"{a} != {b}" for a, b in diff_values)
593
+
594
+ def check_hook(name, val):
595
+ meta = writer.compute_tensor_metadata(val)
596
+ meta2 = reader.read_tensor_metadata(os.path.join("inductor", name))
597
+ reason = compare_tuples(meta, meta2)
598
+ if reason is not None:
599
+ pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})")
600
+ pbar.update(1)
601
+
602
+ if not options.skip_check_deterministic:
603
+ new_args = clone_inputs(args)
604
+ with intermediate_hook(check_hook), tqdm(
605
+ desc="Checking inductor determinism", total=total
606
+ ) as pbar:
607
+ compiled(new_args)
608
+ assert not new_args
609
+
610
+ class WriterInterp(fx.Interpreter):
611
+ def __init__(self, mod, subdir):
612
+ super().__init__(mod)
613
+ self.subdir = subdir
614
+
615
+ def run_node(self, n):
616
+ r = super().run_node(n)
617
+ name = n.name
618
+ if name in known_names:
619
+ pbar.update(1)
620
+ writer.write_tensor(os.path.join(self.subdir, name), r)
621
+ return r
622
+
623
+ # NB: the module cast doesn't actually do anything, since there are no
624
+ # parameters/buffers on the module
625
+ if not options.skip_saving_float64_intermediates:
626
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
627
+ with tqdm(desc="Saving float64 intermediates", total=total) as pbar:
628
+ WriterInterp(new_mod, "float64").boxed_run(new_args)
629
+ assert not new_args
630
+
631
+ class ExactReaderInterp(fx.Interpreter):
632
+ def run_node(self, n):
633
+ r = super().run_node(n)
634
+ name = n.name
635
+ if name in known_names:
636
+ meta = writer.compute_tensor_metadata(r)
637
+ meta2 = reader.read_tensor_metadata(os.path.join("float64", name))
638
+ reason = compare_tuples(meta, meta2)
639
+ if reason is not None:
640
+ pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})")
641
+ pbar.update(1)
642
+ return r
643
+
644
+ # TODO: check eager determinism
645
+
646
+ if not options.skip_check_deterministic:
647
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
648
+ with tqdm(desc="Checking float64 determinism", total=total) as pbar:
649
+ ExactReaderInterp(new_mod).boxed_run(new_args)
650
+ assert not new_args
651
+
652
+ # Now that we've saved everything, interp through the eager graph
653
+ # and do comparisons
654
+ class ReaderInterp(fx.Interpreter):
655
+ def run_node(self, n):
656
+ r = super().run_node(n)
657
+ name = n.name
658
+ if name in known_names:
659
+ inductor = reader.read_tensor(os.path.join("inductor", name))
660
+ float64 = reader.read_tensor(os.path.join("float64", name))
661
+ logged = False
662
+
663
+ def log_error(msg, *args):
664
+ nonlocal logged
665
+ logged = True
666
+ pbar.write(f"DIVERGED at {name}: {msg % args}")
667
+
668
+ if not same(
669
+ r,
670
+ inductor,
671
+ float64,
672
+ tol=torch._dynamo.config.repro_tolerance,
673
+ equal_nan=True,
674
+ log_error=log_error,
675
+ ):
676
+ assert logged
677
+ pbar.update(1)
678
+ return r
679
+
680
+ with tqdm(desc="Checking divergence", total=total) as pbar:
681
+ ReaderInterp(mod).boxed_run(args)
682
+ assert not args
683
+
684
+
685
+ def repro_run(options, mod, load_args):
686
+ from torch._inductor.compile_fx import compile_fx_inner
687
+
688
+ mod, args = repro_common(options, mod, load_args)
689
+
690
+ from torch.cuda import synchronize
691
+
692
+ compiled = compile_fx_inner(mod, args)
693
+
694
+ if options.accuracy != "":
695
+ # We don't really respect --accuracy vs --strict-accuracy here, it
696
+ # seems counterintuitive
697
+ if not same_two_models(mod, compiled, args, only_fwd=True):
698
+ raise AccuracyError("Bad accuracy detected")
699
+ else:
700
+ need_sync = False
701
+ for arg in args:
702
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
703
+ need_sync = True
704
+ break
705
+ ref = compiled(list(args))
706
+ if need_sync:
707
+ synchronize() # ensure segfaults are surfaced
708
+ return lambda: compiled(list(args))
709
+
710
+
711
+ # TODO: lazily load the inputs or something, rather than cloning them
712
+ def run_repro(
713
+ mod,
714
+ load_args,
715
+ *,
716
+ command="run",
717
+ accuracy: Union[bool, str] = "",
718
+ save_dir=None,
719
+ tracing_mode=None,
720
+ patch_code=None,
721
+ check_str=None,
722
+ **kwargs,
723
+ ):
724
+ for k in kwargs:
725
+ log.warning(
726
+ "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
727
+ k,
728
+ )
729
+
730
+ if accuracy is True:
731
+ accuracy = "accuracy"
732
+ elif accuracy is False:
733
+ accuracy = ""
734
+
735
+ if patch_code is not None:
736
+ log.warning(
737
+ "patch_code no longer works on this version of PyTorch, silently ignoring"
738
+ )
739
+
740
+ parser = argparse.ArgumentParser(
741
+ description=f"""\
742
+ An after_aot repro script, typically triggering a bug in PyTorch Inductor.
743
+ When run with no arguments, this script defaults to running '{command}'.
744
+ Extra flags may be available; to find out more, try '{command} --help'.
745
+ There are also alternate subcommands available, see below.
746
+
747
+ default settings on this script:
748
+ {accuracy=}
749
+ {tracing_mode=}
750
+ {save_dir=}
751
+ {check_str=}
752
+ """,
753
+ formatter_class=argparse.RawTextHelpFormatter,
754
+ )
755
+
756
+ def common_flags(parser):
757
+ accuracy_group = parser.add_mutually_exclusive_group()
758
+ accuracy_group.add_argument(
759
+ "--no-accuracy",
760
+ dest="accuracy",
761
+ action="store_const",
762
+ const="",
763
+ default=accuracy,
764
+ help="do not test accuracy, just run the module and see if it errors",
765
+ )
766
+ accuracy_group.add_argument(
767
+ "--accuracy",
768
+ action="store_const",
769
+ const="accuracy",
770
+ default=accuracy,
771
+ help="""\
772
+ test if the RMSE between the compiled module and the fp64 reference is greater
773
+ than eager and the fp64 reference. This is usually more reliable than the
774
+ standard allclose test, as we expect numeric differences from compiling, often
775
+ improving accuracy over eager. RMSE test allows for compiled module to
776
+ diverge greatly from eager, as long as this divergence moves it closer to the
777
+ 'true' mathematical value of the network. Caveats: (1) double precision can
778
+ still suffer from rounding error, so it is not a perfect reference (see for
779
+ example 'Herbie: Automatically Improving Floating Point Accuracy') for
780
+ approaches that detect the necessary working precision and compute it in
781
+ arbitrary precision floating point; unfortunately, this is not practical for
782
+ tensor computation; (2) if there are not enough samples in the output being
783
+ compared, we may get unlucky and have an unlucky greater RMSE than eager; this
784
+ could be overcome by applying a more rigorous statistical test at some
785
+ p-value, which we leave for future work.
786
+ """,
787
+ )
788
+ accuracy_group.add_argument(
789
+ "--strict-accuracy",
790
+ dest="accuracy",
791
+ action="store_const",
792
+ const="strict_accuracy",
793
+ default=accuracy,
794
+ help="""\
795
+ by default, when doing accuracy minification we will reject reductions which
796
+ change the divergence from a floating point divergence to a integral/boolean
797
+ divergence. This is because some operations like ReLU involve temporarily
798
+ sharp boundaries that smooth out again afterwards; without requiring
799
+ divergence on floating point, the minifier will often fixate on divergent
800
+ boolean tensor even though this is not the true source of the divergence.
801
+ However, rejecting these reductions makes it more difficult for the minifier
802
+ to make process. Using this option will let the minifier progress for ALL
803
+ divergences--you just might not end up with a useful repro in the end.""",
804
+ )
805
+
806
+ parser.add_argument(
807
+ "--save-dir",
808
+ type=str,
809
+ default=save_dir,
810
+ metavar="DIR",
811
+ help="directory where saved inputs live",
812
+ )
813
+ parser.add_argument(
814
+ "--no-save-dir",
815
+ dest="save_dir",
816
+ action="store_const",
817
+ const=None,
818
+ help="don't use any directory for saved inputs",
819
+ )
820
+ parser.add_argument(
821
+ "--tracing-mode",
822
+ type=str,
823
+ metavar="{real,fake,symbolic}",
824
+ default=tracing_mode,
825
+ help="how to trace the repro module into a GraphModule with metadata",
826
+ )
827
+
828
+ subparsers = parser.add_subparsers(
829
+ dest="command", metavar="{run,minify,analyze}", required=True
830
+ )
831
+
832
+ parser_run = subparsers.add_parser(
833
+ "run",
834
+ help="just run the repro",
835
+ )
836
+ common_flags(parser_run)
837
+
838
+ parser_minify = subparsers.add_parser(
839
+ "minify", help="run the minifier on the repro"
840
+ )
841
+ common_flags(parser_minify)
842
+ parser_minify_isolate = parser_minify.add_mutually_exclusive_group()
843
+ parser_minify_isolate.add_argument(
844
+ "--isolate",
845
+ action="store_true",
846
+ default=True,
847
+ help="run in separate processes to avoid interference (default)",
848
+ )
849
+ parser_minify_isolate.add_argument(
850
+ "--no-isolate",
851
+ dest="isolate",
852
+ action="store_false",
853
+ help="speed up by running all compilation in same process",
854
+ )
855
+ parser_minify.add_argument(
856
+ "--skip-saving-eager-intermediates",
857
+ action="store_true",
858
+ help="skip saving eager intermediates on --minify",
859
+ )
860
+ # TODO: make this an option for --analyze too
861
+ parser_minify.add_argument(
862
+ "--offload-to-disk",
863
+ action="store_true",
864
+ help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing",
865
+ )
866
+ parser_minify.add_argument(
867
+ "--skip-sanity",
868
+ action="store_true",
869
+ help="skip sanity check at beginning of minification on original graph",
870
+ )
871
+ parser_minify.add_argument(
872
+ "--max-granularity",
873
+ type=int,
874
+ default=None,
875
+ help="start at this granularity and work down; must be power of 2",
876
+ )
877
+ parser_minify.add_argument(
878
+ "--check-str",
879
+ type=str,
880
+ default=check_str,
881
+ help="require minified program to fail with error containing this string",
882
+ )
883
+
884
+ parser_analyze = subparsers.add_parser(
885
+ "analyze", help="run the accuracy analyzer on the repro"
886
+ )
887
+ common_flags(parser_analyze)
888
+ parser_analyze.add_argument(
889
+ "--skip-saving-inductor-intermediates",
890
+ action="store_true",
891
+ help="skip saving inductor intermediates on --analyze",
892
+ )
893
+ parser_analyze.add_argument(
894
+ "--skip-saving-float64-intermediates",
895
+ action="store_true",
896
+ help="skip saving float64 intermediates",
897
+ )
898
+ parser_analyze.add_argument(
899
+ "--skip-check-deterministic",
900
+ action="store_true",
901
+ help="skip checking that the network is deterministic",
902
+ )
903
+ parser_analyze.add_argument(
904
+ "--stable-hash",
905
+ action="store_true",
906
+ help="use SHA-1 checksum instead of fast (but possibly unsound) hash",
907
+ )
908
+
909
+ # Run the repro in the context of minification, inverting exit code meaning
910
+ parser_minifier_query = subparsers.add_parser(
911
+ "minifier-query",
912
+ )
913
+ common_flags(parser_minifier_query)
914
+ parser_minifier_query.add_argument(
915
+ "--check-str",
916
+ type=str,
917
+ default=check_str,
918
+ help="require minified program to fail with error containing this string",
919
+ )
920
+
921
+ args = None
922
+ if len(sys.argv) <= 1:
923
+ args = [command, *sys.argv[1:]]
924
+
925
+ options = parser.parse_args(args)
926
+ COMMAND_FNS = {
927
+ "minify": repro_minify,
928
+ "analyze": repro_analyze,
929
+ "minifier-query": repro_minifier_query,
930
+ "run": repro_run,
931
+ }
932
+ return COMMAND_FNS[options.command](options, mod, load_args)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import functools
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import sys
8
+ import textwrap
9
+ from importlib import import_module
10
+ from typing import Union
11
+
12
+ import torch
13
+ import torch.fx as fx
14
+
15
+ from torch._dynamo.debug_utils import (
16
+ AccuracyError,
17
+ backend_accuracy_fails,
18
+ BUCK_CMD_PREFIX,
19
+ BuckTargetWriter,
20
+ extra_imports,
21
+ generate_config_string,
22
+ helper_for_dump_minify,
23
+ InputReader,
24
+ InputWriter,
25
+ minifier_dir,
26
+ NNModuleToString,
27
+ NopInputReader,
28
+ run_fwd_maybe_bwd,
29
+ same_two_models,
30
+ )
31
+ from torch.fx.experimental.symbolic_shapes import fx_placeholder_targets
32
+ from torch.hub import tqdm
33
+
34
+ from .. import config
35
+ from ..backends.registry import lookup_backend, register_debug_backend
36
+ from ..debug_utils import clone_inputs_retaining_gradness
37
+
38
+ log = logging.getLogger(__name__)
39
+
40
+
41
+ inductor_config = import_module("torch._inductor.config")
42
+ use_buck = inductor_config.is_fbcode()
43
+
44
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
45
+ # MAIN ENTRY POINT
46
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
47
+
48
+
49
+ def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):
50
+ """
51
+ A minifier decorator that wraps the TorchDynamo produced Fx graph modules.
52
+ As opposed to wrap_compiler_debug, this wrapper intercepts at the
53
+ TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some
54
+ level, e.g., it is useful for minifying issues related to Aot Autograd
55
+ tracing. If an error is found, we minify and save the minified repro in
56
+ repro.tar.gz.
57
+ """
58
+
59
+ @functools.wraps(unconfigured_compiler_fn)
60
+ def debug_wrapper(gm, example_inputs, **kwargs):
61
+ compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
62
+ assert config.repro_after in ("dynamo", "aot", None)
63
+
64
+ if config.repro_after == "dynamo":
65
+
66
+ def add_paths(exc):
67
+ exc.minifier_path = os.path.join(minifier_dir(), "minifier_launcher.py")
68
+ if use_buck:
69
+ exc.buck_command = " ".join(
70
+ BUCK_CMD_PREFIX
71
+ + [BuckTargetWriter(exc.minifier_path).cmd_line_path]
72
+ )
73
+
74
+ if config.repro_level == 3:
75
+ dump_to_minify_after_dynamo(gm, example_inputs, compiler_name)
76
+
77
+ # Check for either accuracy (level 4) or other type of failures.
78
+ if config.repro_level == 4:
79
+ # Check Accuracy
80
+ compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
81
+ if backend_accuracy_fails(gm, example_inputs, compiler_fn):
82
+ log.warning(
83
+ "Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error."
84
+ )
85
+ dump_to_minify_after_dynamo(
86
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)),
87
+ example_inputs,
88
+ compiler_name,
89
+ )
90
+ exc = AccuracyError("Bad accuracy detected.")
91
+ add_paths(exc)
92
+ raise exc
93
+ else:
94
+ try:
95
+ compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
96
+ run_fwd_maybe_bwd(compiled_gm, example_inputs)
97
+ except Exception as exc:
98
+ log.warning(
99
+ "Compiled Fx GraphModule failed. Creating script to minify the error."
100
+ )
101
+ if config.repro_level == 1:
102
+ dump_state_fn = functools.partial(
103
+ dump_backend_state, compiler_name=compiler_name
104
+ )
105
+ dump_state_fn(
106
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs
107
+ )
108
+ elif config.repro_level == 2:
109
+ dump_to_minify_after_dynamo(
110
+ fx.GraphModule(gm, copy.deepcopy(gm.graph)),
111
+ example_inputs,
112
+ compiler_name,
113
+ )
114
+ add_paths(exc)
115
+ raise
116
+ else:
117
+ compiled_gm = compiler_fn(gm, example_inputs)
118
+
119
+ return compiled_gm
120
+
121
+ debug_wrapper._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined]
122
+ if hasattr(unconfigured_compiler_fn, "compiler_name"):
123
+ debug_wrapper.__name__ = unconfigured_compiler_fn.compiler_name
124
+ if hasattr(unconfigured_compiler_fn, "get_compiler_config"):
125
+ debug_wrapper.get_compiler_config = unconfigured_compiler_fn.get_compiler_config # type: ignore[attr-defined]
126
+ return debug_wrapper
127
+
128
+
129
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
130
+ # REPRO DUMPERS
131
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
132
+
133
+
134
+ def generate_dynamo_fx_repro_string(
135
+ gm,
136
+ args,
137
+ compiler_name,
138
+ check_accuracy=False,
139
+ *,
140
+ stable_output=False,
141
+ save_dir=None,
142
+ command="run",
143
+ ):
144
+ """
145
+ Generate a repro string for backend-agnostic minified version.
146
+ """
147
+
148
+ model_str = NNModuleToString.convert(gm)
149
+
150
+ # TODO: Figure out why torch.compile'd hash isn't work on this codepath
151
+ writer = InputWriter(save_dir, stable_hash=True)
152
+ for placeholder, arg in zip(fx_placeholder_targets(gm), args):
153
+ if isinstance(arg, (int, torch.SymInt)):
154
+ writer.symint(placeholder, arg)
155
+ elif isinstance(arg, torch.Tensor):
156
+ # TODO: improve these names with FQN
157
+ writer.tensor(placeholder, arg)
158
+ else:
159
+ raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
160
+ load_args = "\n".join(writer.lines())
161
+
162
+ return textwrap.dedent(
163
+ f"""
164
+ from math import inf
165
+ import torch
166
+ from torch import tensor, device
167
+ import torch.fx as fx
168
+ import torch._dynamo
169
+ from torch._dynamo.testing import rand_strided
170
+ from torch._dynamo.debug_utils import run_fwd_maybe_bwd
171
+
172
+ {generate_config_string(stable_output=stable_output)}
173
+
174
+ {extra_imports}
175
+
176
+ {model_str}
177
+ mod = Repro()
178
+
179
+ {load_args}
180
+
181
+ if __name__ == '__main__':
182
+ from torch._dynamo.repro.after_dynamo import run_repro
183
+ run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},
184
+ save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})
185
+ """
186
+ )
187
+
188
+
189
+ def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):
190
+ """
191
+ Saves the repro to a repro.py file
192
+ """
193
+ curdir = os.getcwd()
194
+ subdir = os.path.join(os.getcwd(), "checkpoints")
195
+ if not os.path.exists(subdir):
196
+ os.makedirs(subdir, exist_ok=True)
197
+ file_name = os.path.join(subdir, f"minified_{len(gm.graph.nodes)}_nodes.py")
198
+ log.warning(
199
+ "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
200
+ )
201
+
202
+ with open(file_name, "w") as fd:
203
+ fd.write(
204
+ generate_dynamo_fx_repro_string(
205
+ gm, args, compiler_name, check_accuracy, save_dir=subdir
206
+ )
207
+ )
208
+ latest_repro = os.path.join(curdir, "repro.py")
209
+ log.warning("Copying %s to %s for convenience", file_name, latest_repro)
210
+
211
+ if use_buck:
212
+ BuckTargetWriter(latest_repro).write()
213
+
214
+ shutil.copyfile(file_name, latest_repro)
215
+
216
+
217
+ def dump_backend_state(gm, args, compiler_name, check_accuracy=False):
218
+ """
219
+ Dumps the dynamo graph to repro the issue.
220
+ 1) It tries to convert Fx GraphModule to a string. If we can, it writes to a
221
+ repro.py file.
222
+ 2) If we can't convert Fx GraphModule to a string, we use to_folder to save
223
+ the module and save a tar file.
224
+ """
225
+ assert NNModuleToString.can_convert_to_string(gm)
226
+ return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)
227
+ # return dump_backend_repro_as_tarfile(gm, args, compiler_name)
228
+
229
+
230
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
231
+ # MINIFIER DUMPER
232
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
233
+
234
+
235
+ def dump_to_minify_after_dynamo(gm, args, compiler_name):
236
+ # TODO: factor this out
237
+ subdir = os.path.join(minifier_dir(), "checkpoints")
238
+ if not os.path.exists(subdir):
239
+ os.makedirs(subdir, exist_ok=True)
240
+ helper_for_dump_minify(
241
+ generate_dynamo_fx_repro_string(
242
+ gm,
243
+ args,
244
+ compiler_name,
245
+ check_accuracy=config.repro_level == 4,
246
+ save_dir=subdir,
247
+ command="minify",
248
+ )
249
+ )
250
+
251
+
252
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
253
+ # MINIFIER BACKENDS
254
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
255
+
256
+
257
+ @register_debug_backend
258
+ def dynamo_minifier_backend(gm, example_inputs, compiler_name):
259
+ from functorch.compile import minifier
260
+
261
+ compiler_fn = lookup_backend(compiler_name)
262
+
263
+ # TODO: It's inconsistent to pass SymInt inputs but REAL tensors.
264
+ # We should pass ints and look at the GraphModule placeholders
265
+ # to resolve them to SymInt (if necessary)
266
+ example_inputs = [
267
+ i.node.hint if isinstance(i, torch.SymInt) else i for i in example_inputs
268
+ ]
269
+
270
+ try:
271
+ compiled_gm = compiler_fn(gm, example_inputs)
272
+ run_fwd_maybe_bwd(compiled_gm, example_inputs)
273
+ raise ValueError("No issue was detected")
274
+ except Exception as exc:
275
+ orig_failure = str(exc)
276
+ log.warning(
277
+ "Compiled Fx GraphModule failed. Creating script to minify the error."
278
+ )
279
+ dump_state_fn = functools.partial(
280
+ dump_backend_state, compiler_name=compiler_name
281
+ )
282
+ dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
283
+ fails_fn = functools.partial(
284
+ backend_fails,
285
+ compiler_fn=compiler_fn,
286
+ orig_failure=orig_failure,
287
+ )
288
+ minifier(
289
+ gm,
290
+ example_inputs,
291
+ module_fails=fails_fn,
292
+ dump_state=dump_state_fn,
293
+ )
294
+ return gm
295
+
296
+
297
+ @register_debug_backend
298
+ def dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name):
299
+ from functorch.compile import minifier
300
+
301
+ compiler_fn = lookup_backend(compiler_name)
302
+
303
+ # Set the eval mode to remove randomness.
304
+ gm.eval()
305
+
306
+ # Check Accuracy
307
+ if backend_accuracy_fails(
308
+ gm, example_inputs, compiler_fn, only_fwd=config.repro_forward_only
309
+ ):
310
+ log.warning("Accuracy failed for the TorchDynamo produced graph")
311
+ dump_state_fn = functools.partial(
312
+ dump_backend_state, compiler_name=compiler_name, check_accuracy=True
313
+ )
314
+ fails_fn = functools.partial(
315
+ backend_accuracy_fails,
316
+ compiler_fn=compiler_fn,
317
+ only_fwd=config.repro_forward_only,
318
+ )
319
+ dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
320
+ minifier(
321
+ gm,
322
+ example_inputs,
323
+ module_fails=fails_fn,
324
+ dump_state=dump_state_fn,
325
+ )
326
+ else:
327
+ log.error("Input graph does not fail accuracy testing")
328
+ return gm
329
+
330
+
331
+ def backend_fails(gm, example_inputs, compiler_fn, orig_failure):
332
+ """
333
+ Minifier uses this function to identify if the minified graph module fails
334
+ with the same error.
335
+
336
+ One caveat is that minifier can potentially go into a wrong direction when
337
+ the resulting graph module fails for a different reason. To avoid this, we
338
+ save the string for the original exception and check similarity between new
339
+ and old exception. They can be somewhat different in some cases, when the
340
+ exception string depends on the failing node information. So, we have a
341
+ loose similarity metric to guide the minifier path.
342
+ """
343
+ from difflib import SequenceMatcher
344
+
345
+ try:
346
+ # Run the original gm to check eager validity
347
+ run_fwd_maybe_bwd(gm, clone_inputs_retaining_gradness(example_inputs))
348
+ compiled_gm = compiler_fn(gm, example_inputs)
349
+ run_fwd_maybe_bwd(compiled_gm, clone_inputs_retaining_gradness(example_inputs))
350
+ return False
351
+ except Exception as e:
352
+ new_failure = str(e)
353
+ if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5:
354
+ return True
355
+ return False
356
+
357
+
358
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
359
+ # REPRO MAIN
360
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
361
+
362
+
363
+ def run_load_args(options, mod, load_args):
364
+ if not hasattr(load_args, "_version"):
365
+ log.warning(
366
+ "load_args does not have a _version attribute, please file a bug to PyTorch "
367
+ "and describe how you generate this repro script"
368
+ )
369
+ else:
370
+ if load_args._version > 0:
371
+ log.warning(
372
+ "load_args is version %s, but this version of PyTorch only supports "
373
+ "version 0. We will try to run it anyway but there may be an incompatibility; "
374
+ "if so, try upgrading your version of PyTorch.",
375
+ load_args._version,
376
+ )
377
+
378
+ nop_reader = NopInputReader()
379
+ load_args(nop_reader)
380
+
381
+ with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
382
+ input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
383
+ load_args(input_reader)
384
+ args = input_reader.args
385
+
386
+ return args
387
+
388
+
389
+ def repro_minify(options, mod, load_args):
390
+ args = run_load_args(options, mod, load_args)
391
+
392
+ # Setup debug minifier compiler
393
+ if not options.accuracy:
394
+ compiler_fn = lookup_backend("dynamo_minifier_backend")
395
+ else:
396
+ compiler_fn = lookup_backend("dynamo_accuracy_minifier_backend")
397
+
398
+ if options.backend is None:
399
+ raise RuntimeError(
400
+ "Compiler name is None - this likely means that a custom compiler "
401
+ "was called by torchdynamo. Please remove this error, import your "
402
+ "custom compiler function, and replace the backend=None "
403
+ "line in run_repro to backend=<my_imported_custom_function>"
404
+ )
405
+
406
+ dynamo_minifier_backend = functools.partial(
407
+ compiler_fn,
408
+ compiler_name=options.backend,
409
+ )
410
+ opt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod)
411
+
412
+ with torch.cuda.amp.autocast(enabled=options.autocast):
413
+ opt_mod(*args)
414
+
415
+
416
+ def repro_run(options, mod, load_args):
417
+ opt_mod = torch._dynamo.optimize(options.backend)(mod)
418
+
419
+ if options.accuracy != "":
420
+ mod.eval()
421
+ opt_mod.eval()
422
+
423
+ with torch.cuda.amp.autocast(enabled=options.autocast):
424
+ # TODO: disable clone
425
+ args = run_load_args(options, mod, load_args)
426
+ assert same_two_models(mod, mod, args), "Eager itself failed"
427
+ if not same_two_models(mod, opt_mod, args):
428
+ raise AccuracyError("Dynamo failed")
429
+ else:
430
+ with torch.cuda.amp.autocast(enabled=options.autocast):
431
+ args = run_load_args(options, mod, load_args)
432
+ ref = run_fwd_maybe_bwd(
433
+ mod, args, only_fwd=options.only_fwd, disable_clone=True
434
+ )
435
+ del args
436
+
437
+ args = run_load_args(options, mod, load_args)
438
+ res = run_fwd_maybe_bwd(
439
+ opt_mod, args, only_fwd=options.only_fwd, disable_clone=True
440
+ )
441
+
442
+
443
+ def run_repro(
444
+ mod,
445
+ load_args,
446
+ *,
447
+ command="run",
448
+ accuracy: Union[bool, str] = "",
449
+ save_dir=None,
450
+ autocast=False,
451
+ backend="inductor",
452
+ **kwargs,
453
+ ):
454
+ for k in kwargs:
455
+ log.warning(
456
+ "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
457
+ k,
458
+ )
459
+
460
+ if accuracy is True:
461
+ accuracy = "accuracy"
462
+ elif accuracy is False:
463
+ accuracy = ""
464
+
465
+ parser = argparse.ArgumentParser(
466
+ description=f"""\
467
+ An after_dynamo repro script, typically triggering a bug in Dynamo or
468
+ AOTAutograd. When run with no arguments, this script defaults to running
469
+ '{command}'. Extra flags may be available; to find out more, try '{command}
470
+ --help'. There are also alternate subcommands available, see below.
471
+
472
+ default settings on this script:
473
+ {accuracy=}
474
+ {save_dir=}
475
+ """,
476
+ formatter_class=argparse.RawTextHelpFormatter,
477
+ )
478
+
479
+ def common_flags(parser):
480
+ accuracy_group = parser.add_mutually_exclusive_group()
481
+ accuracy_group.add_argument(
482
+ "--no-accuracy",
483
+ dest="accuracy",
484
+ action="store_const",
485
+ const="",
486
+ default=accuracy,
487
+ help="do not test accuracy, just run the module and see if it errors",
488
+ )
489
+ accuracy_group.add_argument(
490
+ "--accuracy",
491
+ action="store_const",
492
+ const="accuracy",
493
+ default=accuracy,
494
+ help="test accuracy",
495
+ )
496
+ parser.add_argument(
497
+ "--save-dir",
498
+ type=str,
499
+ default=save_dir,
500
+ metavar="DIR",
501
+ help="directory where saved inputs live",
502
+ )
503
+ parser.add_argument(
504
+ "--no-save-dir",
505
+ dest="save_dir",
506
+ action="store_const",
507
+ const=None,
508
+ help="don't use any directory for saved inputs",
509
+ )
510
+ parser.add_argument(
511
+ "--no-isolate",
512
+ dest="isolate",
513
+ action="store_false",
514
+ default=False,
515
+ help="no isolate (doesn't do anything for after_dynamo)",
516
+ )
517
+ parser.add_argument(
518
+ "--autocast",
519
+ default=autocast,
520
+ action="store_true",
521
+ help="use torch.cuda.amp.autocast",
522
+ )
523
+ parser.add_argument(
524
+ "--no-autocast",
525
+ dest="autocast",
526
+ action="store_false",
527
+ help="don't use torch.cuda.amp.autocast",
528
+ )
529
+ parser.add_argument(
530
+ "--backend",
531
+ type=str,
532
+ default=backend,
533
+ metavar="BACKEND",
534
+ help="torch.compile backend to use",
535
+ )
536
+
537
+ subparsers = parser.add_subparsers(
538
+ dest="command", metavar="{run,minify}", required=True
539
+ )
540
+
541
+ parser_run = subparsers.add_parser(
542
+ "run",
543
+ help="just run the repro",
544
+ )
545
+ common_flags(parser_run)
546
+ parser_run.add_argument(
547
+ "--only-fwd",
548
+ action="store_true",
549
+ help="don't run backwards compilation for testing",
550
+ )
551
+
552
+ parser_minify = subparsers.add_parser(
553
+ "minify", help="run the minifier on the repro"
554
+ )
555
+ common_flags(parser_minify)
556
+
557
+ args = None
558
+ if len(sys.argv) <= 1:
559
+ args = [command, *sys.argv[1:]]
560
+
561
+ options = parser.parse_args(args)
562
+ COMMAND_FNS = {
563
+ "minify": repro_minify,
564
+ "run": repro_run,
565
+ }
566
+ COMMAND_FNS[options.command](options, mod, load_args)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/source.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import enum
4
+ from typing import Any, Optional, Union
5
+
6
+ from torch._guards import ChainedSource, GuardSource, Source
7
+
8
+ from . import utils
9
+ from .bytecode_transformation import create_call_function, create_instruction
10
+ from .utils import enum_repr
11
+
12
+ # It shouldn't be supported to construct an NNModuleVariable inside an FSDP module,
13
+ # so those cases are omitted intentionally
14
+ _GUARD_SOURCE_NN_MODULE = {
15
+ GuardSource.LOCAL: GuardSource.LOCAL_NN_MODULE,
16
+ GuardSource.GLOBAL: GuardSource.GLOBAL_NN_MODULE,
17
+ GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL_NN_MODULE,
18
+ GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL_NN_MODULE,
19
+ }
20
+
21
+ _GUARD_SOURCE_FSDP_MODULE = {
22
+ GuardSource.LOCAL: GuardSource.LOCAL_FSDP_MODULE,
23
+ GuardSource.GLOBAL: GuardSource.GLOBAL_FSDP_MODULE,
24
+ GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE,
25
+ GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
26
+ GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL_FSDP_MODULE,
27
+ GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
28
+ }
29
+
30
+ _GUARD_SOURCE_NOT_NN_MODULE = {
31
+ GuardSource.LOCAL: GuardSource.LOCAL,
32
+ GuardSource.GLOBAL: GuardSource.GLOBAL,
33
+ GuardSource.LOCAL_NN_MODULE: GuardSource.LOCAL,
34
+ GuardSource.GLOBAL_NN_MODULE: GuardSource.GLOBAL,
35
+ GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL,
36
+ GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL,
37
+ }
38
+
39
+
40
+ def is_constant_source(source):
41
+ if isinstance(source, ConstantSource):
42
+ return True
43
+ try:
44
+ if source.guard_source() == GuardSource.CONSTANT:
45
+ return True
46
+ except NotImplementedError:
47
+ pass
48
+
49
+ return False
50
+
51
+
52
+ def reconstruct_getitem(
53
+ source: Union["GetItemSource", "ODictGetItemSource"], codegen, index_is_slice
54
+ ):
55
+ source.base.reconstruct(codegen)
56
+ if isinstance(source.index, Source):
57
+ source.index.reconstruct(codegen)
58
+ else:
59
+ if index_is_slice:
60
+ assert isinstance(source, GetItemSource)
61
+ codegen.append_output(codegen.create_load_const(source.unpack_slice()))
62
+ else:
63
+ codegen.append_output(codegen.create_load_const(source.index))
64
+
65
+
66
+ @dataclasses.dataclass(frozen=True)
67
+ class LocalSource(Source):
68
+ local_name: str
69
+ cell_or_freevar: bool = False
70
+
71
+ def reconstruct(self, codegen):
72
+ codegen.append_output(codegen.create_load(self.local_name))
73
+
74
+ def guard_source(self):
75
+ return GuardSource.LOCAL
76
+
77
+ def name(self):
78
+ return f"L[{repr(self.local_name)}]"
79
+
80
+
81
+ @dataclasses.dataclass(frozen=True)
82
+ class SyntheticLocalSource(Source):
83
+ local_name: str
84
+
85
+ def reconstruct(self, codegen):
86
+ codegen.append_output(codegen.create_load(self.local_name))
87
+
88
+ def guard_source(self):
89
+ return GuardSource.SYNTHETIC_LOCAL
90
+
91
+ def name(self):
92
+ return f"SYNTHETIC_LOCAL[{self.local_name!r}]"
93
+
94
+
95
+ @dataclasses.dataclass(frozen=True)
96
+ class RandomValueSource(Source):
97
+ random_call_index: int
98
+
99
+ def guard_source(self):
100
+ return GuardSource.RANDOM_VALUE
101
+
102
+ def reconstruct(self, codegen):
103
+ codegen.append_output(codegen.create_load(codegen.tx.output.random_values_var))
104
+ codegen.append_output(codegen.create_load_const(self.random_call_index))
105
+ codegen.append_output(create_instruction("BINARY_SUBSCR"))
106
+
107
+ def name(self):
108
+ return f"random_value_{self.random_call_index}"
109
+
110
+
111
+ @dataclasses.dataclass(frozen=True)
112
+ class GlobalSource(Source):
113
+ global_name: str
114
+
115
+ def reconstruct(self, codegen):
116
+ codegen.append_output(
117
+ codegen.create_load_global(self.global_name, False, add=True)
118
+ )
119
+
120
+ def guard_source(self):
121
+ return GuardSource.GLOBAL
122
+
123
+ def name(self):
124
+ return f"G[{repr(self.global_name)}]"
125
+
126
+
127
+ @dataclasses.dataclass(frozen=True)
128
+ class GlobalWeakRefSource(Source):
129
+ global_name: str
130
+
131
+ def reconstruct(self, codegen):
132
+ codegen.append_output(
133
+ codegen.create_load_global(self.global_name, True, add=True)
134
+ )
135
+ codegen.extend_output(create_call_function(0, False))
136
+
137
+ def guard_source(self):
138
+ return GuardSource.GLOBAL
139
+
140
+ def name(self):
141
+ return f"G[{repr(self.global_name)}]()"
142
+
143
+
144
+ @dataclasses.dataclass(frozen=True)
145
+ class AttrSource(ChainedSource):
146
+ member: str
147
+ get_static: bool = False
148
+
149
+ def __post_init__(self):
150
+ assert self.base, "Can't construct an AttrSource without a valid base source"
151
+ if "." in self.member:
152
+ member_parts = self.member.split(".")
153
+ object.__setattr__(
154
+ self, "base", AttrSource(self.base, ".".join(member_parts[:-1]))
155
+ )
156
+ object.__setattr__(self, "member", member_parts[-1])
157
+
158
+ def reconstruct(self, codegen):
159
+ self.base.reconstruct(codegen)
160
+ codegen.extend_output(codegen.create_load_attrs(self.member))
161
+
162
+ def guard_source(self):
163
+ return self.base.guard_source()
164
+
165
+ def name(self):
166
+ if self.get_static:
167
+ return f"inspect.getattr_static({self.base.name()}, {self.member!r})"
168
+ elif not self.member.isidentifier():
169
+ return f"getattr({self.base.name()}, {self.member!r})"
170
+ return f"{self.base.name()}.{self.member}"
171
+
172
+
173
+ @dataclasses.dataclass(frozen=True)
174
+ class ParamBufferSource(AttrSource):
175
+ def guard_source(self):
176
+ return _GUARD_SOURCE_NN_MODULE[self.base.guard_source()]
177
+
178
+
179
+ # This source is intended to be used in places where a source is needed but it is expected
180
+ # that the symbol will be simplified out later on. Symbols with ephemeral sources are
181
+ # prioritized to be simplified out when e.g. compared against a symbol without an ephemeral
182
+ # source. Guarding on this source is an error.
183
+ #
184
+ # Example: During subclass view fake-ification, any close-over ViewFunc state should be
185
+ # symbolicized / fake-ified to avoid invalid specialization during view replay. This source
186
+ # is useful for symbols utilized in the middle of the view chain that are not expected to be
187
+ # present within the final view shape metadata.
188
+ @dataclasses.dataclass(frozen=True)
189
+ class EphemeralSource(Source):
190
+ desc: Optional[str] = None
191
+
192
+ def guard_source(self):
193
+ return GuardSource.EPHEMERAL
194
+
195
+ def name(self):
196
+ return f"<ephemeral{': ' + self.desc if self.desc is not None else ''}>"
197
+
198
+ def make_guard(self):
199
+ raise NotImplementedError()
200
+
201
+ def is_ephemeral(self):
202
+ return True
203
+
204
+
205
+ class TensorProperty(enum.Enum):
206
+ SIZE = 0
207
+ STRIDE = 1
208
+ STORAGE_OFFSET = 2
209
+
210
+ def method_name(self):
211
+ if self is TensorProperty.SIZE:
212
+ return "size"
213
+ elif self is TensorProperty.STRIDE:
214
+ return "stride"
215
+ elif self is TensorProperty.STORAGE_OFFSET:
216
+ return "storage_offset"
217
+
218
+
219
+ @dataclasses.dataclass(frozen=True)
220
+ class TensorPropertySource(ChainedSource):
221
+ prop: TensorProperty
222
+ idx: Optional[int] = None # None for STORAGE_OFFSET
223
+
224
+ def __post_init__(self):
225
+ assert self.base is not None
226
+ if self.prop is TensorProperty.STORAGE_OFFSET:
227
+ assert self.idx is None
228
+ else:
229
+ assert self.idx is not None
230
+
231
+ def reconstruct(self, codegen):
232
+ self.base.reconstruct(codegen)
233
+ codegen.append_output(codegen.create_load_attr(self.prop.method_name()))
234
+ if self.idx is not None:
235
+ codegen.append_output(codegen.create_load_const(self.idx))
236
+ codegen.extend_output(
237
+ create_call_function(1 if self.idx is not None else 0, True)
238
+ )
239
+
240
+ def guard_source(self):
241
+ return self.base.guard_source()
242
+
243
+ def name(self):
244
+ if self.prop is TensorProperty.SIZE:
245
+ return f"{self.base.name()}.size()[{self.idx}]"
246
+ elif self.prop is TensorProperty.STRIDE:
247
+ return f"{self.base.name()}.stride()[{self.idx}]"
248
+ elif self.prop is TensorProperty.STORAGE_OFFSET:
249
+ assert self.idx is None
250
+ return f"{self.base.name()}.storage_offset()"
251
+ else:
252
+ raise AssertionError(f"unhandled {self.prop}")
253
+
254
+
255
+ @dataclasses.dataclass(frozen=True)
256
+ class NegateSource(ChainedSource):
257
+ def __post_init__(self):
258
+ assert self.base is not None
259
+
260
+ def reconstruct(self, codegen):
261
+ raise NotImplementedError()
262
+
263
+ def guard_source(self):
264
+ return self.base.guard_source()
265
+
266
+ def name(self):
267
+ # NB: use method call so that function stripping regexes work
268
+ return f"{self.base.name()}.__neg__()"
269
+
270
+
271
+ @dataclasses.dataclass(frozen=True)
272
+ class ConvertIntSource(ChainedSource):
273
+ def __post_init__(self):
274
+ assert self.base is not None
275
+
276
+ def reconstruct(self, codegen):
277
+ self.base.reconstruct(codegen)
278
+
279
+ def guard_source(self):
280
+ return self.base.guard_source()
281
+
282
+ def name(self):
283
+ return f"cast_symbool_to_symint_guardless({self.base.name()})"
284
+
285
+
286
+ @dataclasses.dataclass(frozen=True)
287
+ class DefaultsSource(ChainedSource):
288
+ idx_key: Union[int, str]
289
+ is_kw: bool = False
290
+ field: str = dataclasses.field(init=False, repr=False, compare=False)
291
+ _name: str = dataclasses.field(init=False, repr=False, compare=False)
292
+
293
+ def __post_init__(self):
294
+ assert (
295
+ self.base
296
+ ), "Base must be a valid source in order to properly track and guard this Defaults to its origin."
297
+ if self.is_kw:
298
+ assert isinstance(self.idx_key, str)
299
+ object.__setattr__(self, "field", "__kwdefaults__")
300
+ object.__setattr__(
301
+ self, "_name", f"{self.base.name()}.{self.field}['{self.idx_key}']"
302
+ )
303
+ else:
304
+ assert isinstance(self.idx_key, int)
305
+ object.__setattr__(self, "field", "__defaults__")
306
+ object.__setattr__(
307
+ self, "_name", f"{self.base.name()}.{self.field}[{self.idx_key}]"
308
+ )
309
+
310
+ def reconstruct(self, codegen):
311
+ self.base.reconstruct(codegen)
312
+ codegen.extend_output(codegen.create_load_attrs(self.field))
313
+ codegen.append_output(codegen.create_load_const(self.idx_key))
314
+ codegen.append_output(create_instruction("BINARY_SUBSCR"))
315
+
316
+ def guard_source(self):
317
+ return self.base.guard_source()
318
+
319
+ def name(self):
320
+ return self._name
321
+
322
+
323
+ @dataclasses.dataclass(frozen=True)
324
+ class GetItemSource(ChainedSource):
325
+ index: Any
326
+ index_is_slice: bool = False
327
+
328
+ def __post_init__(self):
329
+ assert self.base is not None
330
+ if isinstance(self.index, slice):
331
+ # store the hashable version of the slice so the whole GetItemSource is hashable
332
+ super().__setattr__("index", self.index.__reduce__())
333
+ super().__setattr__("index_is_slice", True)
334
+
335
+ def reconstruct(self, codegen):
336
+ reconstruct_getitem(self, codegen, index_is_slice=self.index_is_slice)
337
+ codegen.append_output(create_instruction("BINARY_SUBSCR"))
338
+
339
+ def guard_source(self):
340
+ return self.base.guard_source()
341
+
342
+ def unpack_slice(self):
343
+ assert self.index_is_slice
344
+ slice_class, slice_args = self.index
345
+ return slice_class(*slice_args)
346
+
347
+ def name(self):
348
+ # Index can be of following types
349
+ # 1) ConstDictKeySource
350
+ # 2) enum.Enum
351
+ # 3) index is a slice - example 1:4
352
+ # 4) index is a constant - example string, integer
353
+ if isinstance(self.index, Source):
354
+ if not isinstance(self.index, ConstDictKeySource):
355
+ raise ValueError(
356
+ "GetItemSource index must be a constant, enum or ConstDictKeySource"
357
+ )
358
+ return f"{self.base.name()}[{self.index.name()}]"
359
+ elif self.index_is_slice:
360
+ return f"{self.base.name()}[{self.unpack_slice()!r}]"
361
+ elif isinstance(self.index, enum.Enum):
362
+ return f"{self.base.name()}[{enum_repr(self.index, self.guard_source().is_local())}]"
363
+ else:
364
+ return f"{self.base.name()}[{self.index!r}]"
365
+
366
+
367
+ @dataclasses.dataclass(frozen=True)
368
+ class ConstDictKeySource(GetItemSource):
369
+ def is_dict_key(self):
370
+ return True
371
+
372
+ def reconstruct(self, codegen):
373
+ codegen.load_import_from(utils.__name__, "dict_keys_getitem")
374
+ self.base.reconstruct(codegen)
375
+ codegen.append_output(codegen.create_load_const(self.index))
376
+ codegen.extend_output(create_call_function(2, True))
377
+
378
+ def name(self):
379
+ # The list creation will be CSE'd by PyExprCSEPass
380
+ return f"list({self.base.name()}.keys())[{self.index!r}]"
381
+
382
+
383
+ @dataclasses.dataclass(frozen=True)
384
+ class TupleIteratorGetItemSource(GetItemSource):
385
+ def reconstruct(self, codegen):
386
+ codegen.load_import_from(utils.__name__, "tuple_iterator_getitem")
387
+ self.base.reconstruct(codegen)
388
+ codegen.append_output(codegen.create_load_const(self.index))
389
+ codegen.extend_output(create_call_function(2, True))
390
+
391
+ def name(self):
392
+ return f"___tuple_iterator_getitem({self.base.name()}, {self.index!r})"
393
+
394
+
395
+ @dataclasses.dataclass(frozen=True)
396
+ class TypeSource(ChainedSource):
397
+ def __post_init__(self):
398
+ assert self.base is not None
399
+
400
+ def reconstruct(self, codegen):
401
+ codegen.load_import_from("builtins", "type")
402
+ self.base.reconstruct(codegen)
403
+ codegen.extend_output(create_call_function(1, True))
404
+
405
+ def guard_source(self):
406
+ return self.base.guard_source()
407
+
408
+ def name(self):
409
+ return f"type({self.base.name()})"
410
+
411
+
412
+ @dataclasses.dataclass(frozen=True)
413
+ class ODictGetItemSource(ChainedSource):
414
+ index: Any
415
+
416
+ def __post_init__(self):
417
+ assert self.base is not None
418
+
419
+ def reconstruct(self, codegen):
420
+ codegen.append_output(
421
+ codegen._create_load_const(collections.OrderedDict.__getitem__)
422
+ )
423
+ reconstruct_getitem(self, codegen, index_is_slice=False)
424
+ codegen.extend_output(create_call_function(2, True))
425
+
426
+ def guard_source(self):
427
+ return self.base.guard_source()
428
+
429
+ def name(self):
430
+ if isinstance(self.index, type):
431
+ rep = f'__load_module("{self.index.__module__}").{self.index.__qualname__}'
432
+ return f"___odict_getitem({self.base.name()}, {rep})"
433
+ elif isinstance(self.index, Source):
434
+ return f"___odict_getitem({self.base.name()}, {self.index.name()})"
435
+ else:
436
+ return f"___odict_getitem({self.base.name()}, {self.index!r})"
437
+
438
+
439
+ @dataclasses.dataclass(frozen=True)
440
+ class NNModuleSource(ChainedSource):
441
+ def reconstruct(self, codegen):
442
+ self.base.reconstruct(codegen)
443
+
444
+ def guard_source(self):
445
+ return _GUARD_SOURCE_NN_MODULE[self.base.guard_source()]
446
+
447
+ def name(self):
448
+ return self.base.name()
449
+
450
+
451
+ @dataclasses.dataclass(frozen=True)
452
+ class NotNNModuleSource(NNModuleSource):
453
+ def guard_source(self):
454
+ return _GUARD_SOURCE_NOT_NN_MODULE[self.base.guard_source()]
455
+
456
+
457
+ @dataclasses.dataclass(frozen=True)
458
+ class FSDPNNModuleSource(NNModuleSource):
459
+ def guard_source(self):
460
+ return _GUARD_SOURCE_FSDP_MODULE[self.base.guard_source()]
461
+
462
+
463
+ @dataclasses.dataclass(frozen=True)
464
+ class GlobalStateSource(Source):
465
+ def name(self):
466
+ return ""
467
+
468
+ def guard_source(self):
469
+ return GuardSource.GLOBAL
470
+
471
+
472
+ @dataclasses.dataclass(frozen=True)
473
+ class ConstantSource(Source):
474
+ source_name: str
475
+
476
+ def reconstruct(self, codegen):
477
+ codegen.append_output(
478
+ codegen.create_load_global(self.source_name, False, add=False)
479
+ )
480
+
481
+ def guard_source(self):
482
+ return GuardSource.CONSTANT
483
+
484
+ def name(self):
485
+ return self.source_name
486
+
487
+ def make_guard(self, fn):
488
+ raise NotImplementedError()
489
+
490
+
491
+ @dataclasses.dataclass(frozen=True)
492
+ class NumpyTensorSource(ChainedSource):
493
+ def name(self) -> str:
494
+ return f"___from_numpy({self.base.name()})"
495
+
496
+ def guard_source(self):
497
+ return self.base.guard_source()
498
+
499
+ def reconstruct(self, codegen):
500
+ codegen.load_import_from("torch", "as_tensor")
501
+ self.base.reconstruct(codegen)
502
+ codegen.extend_output(create_call_function(1, True))
503
+
504
+
505
+ # This is a synthetic source that is associated with the singleton
506
+ # shape env guard we always register for all frames. We get the actual
507
+ # guard contents from the ambient ShapeEnv
508
+ @dataclasses.dataclass(frozen=True)
509
+ class ShapeEnvSource(Source):
510
+ def name(self):
511
+ return ""
512
+
513
+ def guard_source(self):
514
+ return GuardSource.SHAPE_ENV
515
+
516
+
517
+ @dataclasses.dataclass(frozen=True)
518
+ class BackwardStateSource(Source):
519
+ def name(self):
520
+ return ""
521
+
522
+ def guard_source(self):
523
+ return GuardSource.BACKWARD_STATE
524
+
525
+
526
+ def is_from_local_source(source: Source, *, allow_cell_or_freevar=True):
527
+ if isinstance(source, ChainedSource):
528
+ return is_from_local_source(
529
+ source.base, allow_cell_or_freevar=allow_cell_or_freevar
530
+ )
531
+ if not isinstance(source, LocalSource):
532
+ return False
533
+ if not allow_cell_or_freevar and source.cell_or_freevar:
534
+ return False
535
+ return True
536
+
537
+
538
+ # TODO: can probably write a generic "test this on everything in the chain"
539
+ # helper
540
+ def is_from_defaults(source: Source):
541
+ if isinstance(source, DefaultsSource):
542
+ return True
543
+ if isinstance(source, ChainedSource):
544
+ return is_from_defaults(source.base)
545
+ return False
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/test_case.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import importlib
3
+ import logging
4
+ import sys
5
+
6
+ import torch
7
+ import torch.testing
8
+ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
9
+ IS_WINDOWS,
10
+ TEST_WITH_CROSSREF,
11
+ TEST_WITH_TORCHDYNAMO,
12
+ TestCase as TorchTestCase,
13
+ )
14
+
15
+ from . import config, reset, utils
16
+
17
+ log = logging.getLogger(__name__)
18
+
19
+
20
+ def run_tests(needs=()):
21
+ from torch.testing._internal.common_utils import run_tests
22
+
23
+ if (
24
+ TEST_WITH_TORCHDYNAMO
25
+ or IS_WINDOWS
26
+ or TEST_WITH_CROSSREF
27
+ or sys.version_info >= (3, 12)
28
+ ):
29
+ return # skip testing
30
+
31
+ if isinstance(needs, str):
32
+ needs = (needs,)
33
+ for need in needs:
34
+ if need == "cuda" and not torch.cuda.is_available():
35
+ return
36
+ else:
37
+ try:
38
+ importlib.import_module(need)
39
+ except ImportError:
40
+ return
41
+ run_tests()
42
+
43
+
44
+ class TestCase(TorchTestCase):
45
+ _exit_stack: contextlib.ExitStack
46
+
47
+ @classmethod
48
+ def tearDownClass(cls):
49
+ cls._exit_stack.close()
50
+ super().tearDownClass()
51
+
52
+ @classmethod
53
+ def setUpClass(cls):
54
+ super().setUpClass()
55
+ cls._exit_stack = contextlib.ExitStack() # type: ignore[attr-defined]
56
+ cls._exit_stack.enter_context( # type: ignore[attr-defined]
57
+ config.patch(
58
+ raise_on_ctx_manager_usage=True,
59
+ suppress_errors=False,
60
+ log_compilation_metrics=False,
61
+ ),
62
+ )
63
+
64
+ def setUp(self):
65
+ self._prior_is_grad_enabled = torch.is_grad_enabled()
66
+ super().setUp()
67
+ reset()
68
+ utils.counters.clear()
69
+
70
+ def tearDown(self):
71
+ for k, v in utils.counters.items():
72
+ print(k, v.most_common())
73
+ reset()
74
+ utils.counters.clear()
75
+ super().tearDown()
76
+ if self._prior_is_grad_enabled is not torch.is_grad_enabled():
77
+ log.warning("Running test changed grad mode")
78
+ torch.set_grad_enabled(self._prior_is_grad_enabled)
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/types.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import sys
3
+ import types
4
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Protocol, Union
5
+
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ if sys.version_info >= (3, 11):
10
+ from torch._C._dynamo import eval_frame
11
+
12
+ DynamoFrameType: TypeAlias = eval_frame._PyInterpreterFrame
13
+ else:
14
+ DynamoFrameType: TypeAlias = types.FrameType
15
+
16
+ import torch
17
+
18
+ # This class has a `check_fn` field for the guard,
19
+ # and a `code` field for the code object.
20
+ CacheEntry = torch._C._dynamo.eval_frame._CacheEntry
21
+
22
+ ExtraState = torch._C._dynamo.eval_frame._ExtraState
23
+
24
+ # We use a dict to store additional data per frame.
25
+ FrameState = Dict[Any, Any]
26
+
27
+
28
+ class GuardFail(NamedTuple):
29
+ # A string repr of the piece of failed guard code we eval-ed
30
+ reason: str
31
+ # A code object where we failed a guard
32
+ orig_code: types.CodeType
33
+
34
+
35
+ class GuardFn(Protocol):
36
+ closure_vars: Dict[str, object]
37
+ args: List[str]
38
+ code_parts: List[str]
39
+ verbose_code_parts: List[str]
40
+ global_scope: Dict[str, object]
41
+ guard_fail_fn: Optional[Callable[[GuardFail], None]]
42
+ cache_entry: Optional[CacheEntry]
43
+ extra_state: Optional[ExtraState]
44
+
45
+ # maps locals of user function to bool
46
+ def __call__(self, f_locals: Dict[str, object]) -> bool:
47
+ ...
48
+
49
+
50
+ @dataclasses.dataclass
51
+ class GuardedCode:
52
+ code: types.CodeType
53
+ check_fn: GuardFn
54
+
55
+
56
+ class DynamoCallbackFn(Protocol):
57
+ def __call__(
58
+ self,
59
+ frame: DynamoFrameType,
60
+ cache_entry: Optional[CacheEntry],
61
+ frame_state: FrameState,
62
+ ) -> Optional[GuardedCode]:
63
+ ...
64
+
65
+
66
+ DynamoCallback = Union[DynamoCallbackFn, None, bool]
67
+
68
+
69
+ class DynamoGuardHook(Protocol):
70
+ def __call__(
71
+ self,
72
+ guard_fn: GuardFn,
73
+ code: types.CodeType,
74
+ f_locals: Dict[str, object],
75
+ index: int,
76
+ last: bool,
77
+ ) -> None:
78
+ ...
79
+
80
+
81
+ class ProfilerStartHook(Protocol):
82
+ def __call__(
83
+ self,
84
+ name: str,
85
+ # TODO(whc) how do I annotate a _RecordFunction here?
86
+ ) -> Any:
87
+ ...
88
+
89
+
90
+ class ProfilerEndHook(Protocol):
91
+ def __call__(self, record: Any) -> None:
92
+ ...
93
+
94
+
95
+ class BytecodeHook(Protocol):
96
+ def __call__(
97
+ self, code: types.CodeType, new_code: types.CodeType
98
+ ) -> Optional[types.CodeType]:
99
+ ...
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from .base import VariableTracker
4
+ from .builtin import BuiltinVariable
5
+ from .constant import ConstantVariable, EnumVariable
6
+ from .ctx_manager import (
7
+ ContextWrappingVariable,
8
+ DeterministicAlgorithmsVariable,
9
+ DisabledSavedTensorsHooksVariable,
10
+ GradIncrementNestingCtxManagerVariable,
11
+ GradInplaceRequiresGradCtxManagerVariable,
12
+ GradModeVariable,
13
+ InferenceModeVariable,
14
+ StreamContextVariable,
15
+ StreamVariable,
16
+ VmapIncrementNestingCtxManagerVariable,
17
+ WithExitFunctionVariable,
18
+ )
19
+ from .dicts import (
20
+ ConstDictVariable,
21
+ CustomizedDictVariable,
22
+ DataClassVariable,
23
+ DefaultDictVariable,
24
+ SetVariable,
25
+ )
26
+ from .distributed import BackwardHookVariable
27
+ from .functions import (
28
+ FunctoolsPartialVariable,
29
+ NestedUserFunctionVariable,
30
+ SkipFunctionVariable,
31
+ UserFunctionVariable,
32
+ UserMethodVariable,
33
+ )
34
+ from .higher_order_ops import (
35
+ FunctorchHigherOrderVariable,
36
+ TorchHigherOrderOperatorVariable,
37
+ )
38
+ from .iter import (
39
+ CountIteratorVariable,
40
+ CycleIteratorVariable,
41
+ IteratorVariable,
42
+ ItertoolsVariable,
43
+ RepeatIteratorVariable,
44
+ )
45
+ from .lazy import LazyVariableTracker
46
+ from .lists import (
47
+ BaseListVariable,
48
+ ListIteratorVariable,
49
+ ListVariable,
50
+ NamedTupleVariable,
51
+ RangeVariable,
52
+ RestrictedListSubclassVariable,
53
+ SliceVariable,
54
+ TupleIteratorVariable,
55
+ TupleVariable,
56
+ )
57
+ from .misc import (
58
+ AutogradFunctionContextVariable,
59
+ AutogradFunctionVariable,
60
+ ClosureVariable,
61
+ DeletedVariable,
62
+ GetAttrVariable,
63
+ InspectSignatureVariable,
64
+ LambdaVariable,
65
+ MethodWrapperVariable,
66
+ NewCellVariable,
67
+ NewGlobalVariable,
68
+ NumpyVariable,
69
+ PythonModuleVariable,
70
+ StringFormatVariable,
71
+ SuperVariable,
72
+ TypingVariable,
73
+ UnknownVariable,
74
+ )
75
+ from .nn_module import NNModuleVariable, UnspecializedNNModuleVariable
76
+ from .sdpa import SDPAParamsVariable
77
+ from .tensor import (
78
+ FakeItemVariable,
79
+ NumpyNdarrayVariable,
80
+ SymNodeVariable,
81
+ TensorVariable,
82
+ UnspecializedPythonVariable,
83
+ UntypedStorageVariable,
84
+ )
85
+ from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable
86
+ from .user_defined import (
87
+ RemovableHandleVariable,
88
+ UserDefinedClassVariable,
89
+ UserDefinedObjectVariable,
90
+ )
91
+
92
+ __all__ = [
93
+ "AutogradFunctionContextVariable",
94
+ "AutogradFunctionVariable",
95
+ "BackwardHookVariable",
96
+ "BaseListVariable",
97
+ "BuiltinVariable",
98
+ "ClosureVariable",
99
+ "ConstantVariable",
100
+ "ConstDictVariable",
101
+ "ContextWrappingVariable",
102
+ "CountIteratorVariable",
103
+ "CustomizedDictVariable",
104
+ "CycleIteratorVariable",
105
+ "DataClassVariable",
106
+ "DefaultDictVariable",
107
+ "DeletedVariable",
108
+ "DeterministicAlgorithmsVariable",
109
+ "EnumVariable",
110
+ "FakeItemVariable",
111
+ "GetAttrVariable",
112
+ "GradModeVariable",
113
+ "InspectSignatureVariable",
114
+ "IteratorVariable",
115
+ "ItertoolsVariable",
116
+ "LambdaVariable",
117
+ "LazyVariableTracker",
118
+ "ListIteratorVariable",
119
+ "ListVariable",
120
+ "NamedTupleVariable",
121
+ "NestedUserFunctionVariable",
122
+ "NewCellVariable",
123
+ "NewGlobalVariable",
124
+ "NNModuleVariable",
125
+ "NumpyNdarrayVariable",
126
+ "NumpyVariable",
127
+ "PythonModuleVariable",
128
+ "RangeVariable",
129
+ "RemovableHandleVariable",
130
+ "RepeatIteratorVariable",
131
+ "RestrictedListSubclassVariable",
132
+ "SDPAParamsVariable",
133
+ "SkipFunctionVariable",
134
+ "SliceVariable",
135
+ "StringFormatVariable",
136
+ "SuperVariable",
137
+ "TensorVariable",
138
+ "TorchCtxManagerClassVariable",
139
+ "TorchInGraphFunctionVariable",
140
+ "TupleVariable",
141
+ "UnknownVariable",
142
+ "UnspecializedNNModuleVariable",
143
+ "UnspecializedPythonVariable",
144
+ "UntypedStorageVariable",
145
+ "UserDefinedClassVariable",
146
+ "UserDefinedObjectVariable",
147
+ "UserFunctionVariable",
148
+ "UserMethodVariable",
149
+ "VariableTracker",
150
+ "WithExitFunctionVariable",
151
+ ]
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc ADDED
Binary file (43.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc ADDED
Binary file (7.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc ADDED
Binary file (26.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc ADDED
Binary file (28.2 kB). View file