Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/_export/error.py +56 -0
- env-llmeval/lib/python3.10/site-packages/torch/_export/pass_base.py +429 -0
- env-llmeval/lib/python3.10/site-packages/torch/_export/unflatten.py +642 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_10.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_9.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py +128 -0
- env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py +222 -0
env-llmeval/lib/python3.10/site-packages/torch/_export/error.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
|
3 |
+
|
4 |
+
class ExportErrorType(Enum):
|
5 |
+
# User providing invalid inputs to either tracer, or other public facing APIs
|
6 |
+
INVALID_INPUT_TYPE = 1
|
7 |
+
|
8 |
+
# User returning values from their models that we don’t support.
|
9 |
+
INVALID_OUTPUT_TYPE = 2
|
10 |
+
|
11 |
+
# Generated IR does not conform to Export IR Specification.
|
12 |
+
VIOLATION_OF_SPEC = 3
|
13 |
+
|
14 |
+
# User’s code contains types and functionalities we don’t support.
|
15 |
+
NOT_SUPPORTED = 4
|
16 |
+
|
17 |
+
# User's code didn't provide necessary details for us to successfully trace and export.
|
18 |
+
# For example, we use a lot of decorators and ask users to annotate their model.
|
19 |
+
MISSING_PROPERTY = 5
|
20 |
+
|
21 |
+
# User is using an API without proper initialization step.
|
22 |
+
UNINITIALIZED = 6
|
23 |
+
|
24 |
+
|
25 |
+
def internal_assert(pred: bool, assert_msg: str) -> None:
|
26 |
+
"""
|
27 |
+
This is exir's custom assert method. It internally just throws InternalError.
|
28 |
+
Note that the sole purpose is to throw our own error while maintaining similar syntax
|
29 |
+
as python assert.
|
30 |
+
"""
|
31 |
+
|
32 |
+
if not pred:
|
33 |
+
raise InternalError(assert_msg)
|
34 |
+
|
35 |
+
|
36 |
+
class InternalError(Exception):
|
37 |
+
"""
|
38 |
+
Raised when an internal invariance is violated in EXIR stack.
|
39 |
+
Should hint users to report a bug to dev and expose the original
|
40 |
+
error message.
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(self, message: str) -> None:
|
44 |
+
super().__init__(message)
|
45 |
+
|
46 |
+
|
47 |
+
class ExportError(Exception):
|
48 |
+
"""
|
49 |
+
This type of exception is raised for errors that are directly caused by the user
|
50 |
+
code. In general, user errors happen during model authoring, tracing, using our public
|
51 |
+
facing APIs, and writing graph passes.
|
52 |
+
"""
|
53 |
+
|
54 |
+
def __init__(self, error_code: ExportErrorType, message: str) -> None:
|
55 |
+
prefix = f"[{error_code}]: "
|
56 |
+
super().__init__(prefix + message)
|
env-llmeval/lib/python3.10/site-packages/torch/_export/pass_base.py
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
import traceback
|
3 |
+
import typing
|
4 |
+
from contextlib import nullcontext
|
5 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from functorch.experimental.control_flow import _unstack_pytree
|
9 |
+
from torch import fx
|
10 |
+
from torch._dispatch.python import enable_python_dispatcher
|
11 |
+
from torch._export.pass_infra.node_metadata import NodeMetadata
|
12 |
+
from torch._export.pass_infra.proxy_value import ProxyValue
|
13 |
+
from torch._subclasses import FakeTensor, UnsupportedFakeTensorException
|
14 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
15 |
+
from torch.fx import traceback as fx_traceback
|
16 |
+
from torch.fx.experimental.proxy_tensor import PythonKeyTracer
|
17 |
+
from torch.fx.graph import CodeGen
|
18 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
19 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
|
20 |
+
from torch.utils import _pytree as pytree
|
21 |
+
|
22 |
+
|
23 |
+
__all__ = ["_ExportPassBase"]
|
24 |
+
|
25 |
+
|
26 |
+
Argument = Any
|
27 |
+
Value = Any
|
28 |
+
Fn = Callable[..., Any]
|
29 |
+
PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]]
|
30 |
+
|
31 |
+
|
32 |
+
class ExportPassBaseError(RuntimeError):
|
33 |
+
pass
|
34 |
+
|
35 |
+
|
36 |
+
class _ExportPassBase(PassBase):
|
37 |
+
"""
|
38 |
+
Interpreter-based pass class to help users maintain the IR spec while writing
|
39 |
+
transformations.
|
40 |
+
"""
|
41 |
+
|
42 |
+
@staticmethod
|
43 |
+
def _create_dummy_node_metadata():
|
44 |
+
return NodeMetadata({"stack_trace": "".join(traceback.format_stack(limit=1))})
|
45 |
+
|
46 |
+
|
47 |
+
class ExportTracer(PythonKeyTracer):
|
48 |
+
"""
|
49 |
+
Tracer used to create nodes during the retracing part of the Expo_ExportPassBasertPassBase
|
50 |
+
"""
|
51 |
+
def __init__(self, callback: "_ExportPassBase", codegen: CodeGen) -> None:
|
52 |
+
super().__init__()
|
53 |
+
self.callback = callback
|
54 |
+
self.root = torch.nn.Module()
|
55 |
+
self.graph = torch.fx.Graph()
|
56 |
+
self.graph.set_codegen(codegen)
|
57 |
+
self.tensor_attrs: Dict[str, torch.Tensor] = {} # type: ignore[assignment]
|
58 |
+
self.fake_tensor_mode: Optional[FakeTensorMode] = None
|
59 |
+
self.submodules: Dict[torch.nn.Module, str] = {}
|
60 |
+
|
61 |
+
def trace(self) -> None:
|
62 |
+
raise ExportPassBaseError("ExportTracer doesn't support trace().")
|
63 |
+
|
64 |
+
def create_arg(self, a: Argument) -> torch.fx.Node:
|
65 |
+
if isinstance(a, torch.nn.Module):
|
66 |
+
if a not in self.submodules:
|
67 |
+
name_submodule = f"submodule_{len(self.submodules)}"
|
68 |
+
self.root.add_module(name_submodule, a)
|
69 |
+
self.submodules[a] = name_submodule
|
70 |
+
elif isinstance(a, FakeTensor):
|
71 |
+
if not hasattr(a, "constant") or a.constant is None:
|
72 |
+
raise ExportPassBaseError(f"Cannot add {a} to graph.")
|
73 |
+
a = a.constant
|
74 |
+
node = super().create_arg(a)
|
75 |
+
if (
|
76 |
+
isinstance(a, torch.Tensor)
|
77 |
+
and isinstance(node, torch.fx.Node)
|
78 |
+
and node.op == "get_attr"
|
79 |
+
):
|
80 |
+
self.set_metadata(node, a)
|
81 |
+
self.callback.on_attr(ProxyValue(a, node))
|
82 |
+
return node
|
83 |
+
|
84 |
+
def set_metadata(
|
85 |
+
self, node: torch.fx.Node, value: Argument,
|
86 |
+
) -> None:
|
87 |
+
# propagate the fake tensor or sym nodes
|
88 |
+
def make_val(
|
89 |
+
x: Argument,
|
90 |
+
) -> Union[FakeTensor, torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, None]:
|
91 |
+
if isinstance(x, FakeTensor):
|
92 |
+
return x
|
93 |
+
elif isinstance(x, torch.Tensor):
|
94 |
+
if x.is_quantized:
|
95 |
+
# TODO (tmanlaibaatar) properly support Quantized FakeTensor
|
96 |
+
x = torch.dequantize(x)
|
97 |
+
|
98 |
+
try:
|
99 |
+
assert self.fake_tensor_mode is not None
|
100 |
+
# TODO we should allocate static shapes
|
101 |
+
# for param/buffer values
|
102 |
+
if isinstance(x, torch.nn.Parameter):
|
103 |
+
fake_tensor = self.fake_tensor_mode.from_tensor(
|
104 |
+
x, static_shapes=True
|
105 |
+
)
|
106 |
+
else:
|
107 |
+
fake_tensor = self.fake_tensor_mode.from_tensor(x)
|
108 |
+
except UnsupportedFakeTensorException:
|
109 |
+
# TODO: This is just a workaround to get over the
|
110 |
+
# x.as_subclass error
|
111 |
+
print(
|
112 |
+
"Fakeifying a Tensor subclass is not supported \
|
113 |
+
right now. Instead a TensorMetadata is used."
|
114 |
+
)
|
115 |
+
fake_tensor = None
|
116 |
+
return fake_tensor
|
117 |
+
elif isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str)):
|
118 |
+
return x
|
119 |
+
else:
|
120 |
+
return None
|
121 |
+
|
122 |
+
node.meta["val"] = pytree.tree_map(make_val, value)
|
123 |
+
|
124 |
+
# Set the tensor_metadata for values that do not have a corresponding FakeTensor
|
125 |
+
def make_tensor_meta(x: Argument) -> Optional[TensorMetadata]:
|
126 |
+
if not isinstance(x, FakeTensor) and isinstance(x, torch.Tensor):
|
127 |
+
if x.is_quantized:
|
128 |
+
# TODO (tmanlaibaatar) properly support Quantized FakeTensor
|
129 |
+
x = torch.dequantize(x)
|
130 |
+
|
131 |
+
try:
|
132 |
+
assert self.fake_tensor_mode is not None
|
133 |
+
_ = self.fake_tensor_mode.from_tensor(x)
|
134 |
+
tensor_meta = None
|
135 |
+
except UnsupportedFakeTensorException:
|
136 |
+
# TODO: This is just a workaround to get over the
|
137 |
+
# x.as_subclass error
|
138 |
+
tensor_meta = _extract_tensor_metadata(x)
|
139 |
+
return tensor_meta
|
140 |
+
else:
|
141 |
+
return None
|
142 |
+
|
143 |
+
node.meta["tensor_meta"] = pytree.tree_map(make_tensor_meta, value)
|
144 |
+
|
145 |
+
class ExportInterpreter(fx.Interpreter):
|
146 |
+
"""
|
147 |
+
Interpreter to callback on any _ExportPassBase functions
|
148 |
+
"""
|
149 |
+
def __init__(self, callback: "_ExportPassBase", gm: fx.GraphModule) -> None:
|
150 |
+
super().__init__(gm)
|
151 |
+
self.callback = callback
|
152 |
+
self.node: torch.fx.Node = next(iter(gm.graph.nodes))
|
153 |
+
|
154 |
+
def placeholder(
|
155 |
+
self,
|
156 |
+
target: str,
|
157 |
+
args: Tuple[Argument, ...],
|
158 |
+
kwargs: Dict[str, Argument],
|
159 |
+
) -> ProxyValue:
|
160 |
+
arg = super().placeholder(target, args, kwargs)
|
161 |
+
return self.callback.placeholder(target, arg, NodeMetadata(self.node.meta))
|
162 |
+
|
163 |
+
def output(
|
164 |
+
self,
|
165 |
+
target: torch.fx.node.Target,
|
166 |
+
args: Tuple[Argument, ...],
|
167 |
+
kwargs: Dict[str, Argument],
|
168 |
+
) -> ProxyValue:
|
169 |
+
return self.callback.output(args[0], NodeMetadata(self.node.meta)).data
|
170 |
+
|
171 |
+
def call_function(
|
172 |
+
self,
|
173 |
+
target: torch.fx.node.Target,
|
174 |
+
args: Tuple[Argument, ...],
|
175 |
+
kwargs: Dict[str, Argument],
|
176 |
+
) -> ProxyValue:
|
177 |
+
meta = NodeMetadata(self.node.meta)
|
178 |
+
|
179 |
+
if target == operator.getitem:
|
180 |
+
value, key = args
|
181 |
+
return self.callback.call_getitem(value, key, meta)
|
182 |
+
elif getattr(target, "__module__", None) == "_operator":
|
183 |
+
assert callable(target)
|
184 |
+
return self.callback.call_sym(target, args, meta)
|
185 |
+
elif isinstance(target, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):
|
186 |
+
return self.callback.call_operator(
|
187 |
+
target,
|
188 |
+
args,
|
189 |
+
kwargs,
|
190 |
+
meta,
|
191 |
+
)
|
192 |
+
elif target == torch.ops.higher_order.cond:
|
193 |
+
pred, true_fn, false_fn, inputs = args
|
194 |
+
return self.callback.call_cond(pred, true_fn, false_fn, inputs, meta)
|
195 |
+
elif target == torch.ops.higher_order.map_impl:
|
196 |
+
f, num_args, *rest = args # type: ignore[assignment]
|
197 |
+
return self.callback.call_map(f, num_args, list(rest), meta)
|
198 |
+
# For other unregistered HigherOrderOps, just interpret them blindly
|
199 |
+
elif isinstance(target, torch._ops.HigherOrderOperator):
|
200 |
+
return self.callback._fx(
|
201 |
+
"call_function",
|
202 |
+
target,
|
203 |
+
args,
|
204 |
+
kwargs,
|
205 |
+
meta,
|
206 |
+
)
|
207 |
+
else:
|
208 |
+
raise ExportPassBaseError(f"Unsupported target type: {target}")
|
209 |
+
|
210 |
+
def get_attr(
|
211 |
+
self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument]
|
212 |
+
) -> Argument:
|
213 |
+
return super().get_attr(target, args, kwargs)
|
214 |
+
|
215 |
+
def call_module(
|
216 |
+
self,
|
217 |
+
target: torch.fx.node.Target,
|
218 |
+
args: Tuple[Argument, ...],
|
219 |
+
kwargs: Dict[str, Argument],
|
220 |
+
) -> None:
|
221 |
+
raise ExportPassBaseError("call_module is not supported.")
|
222 |
+
|
223 |
+
def call_method(
|
224 |
+
self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument]
|
225 |
+
) -> None:
|
226 |
+
raise ExportPassBaseError("call_method is not supported.")
|
227 |
+
|
228 |
+
def run_node(self, n: torch.fx.Node) -> Argument:
|
229 |
+
self.node = n
|
230 |
+
self.callback.node_debug_str = n.format_node()
|
231 |
+
return super().run_node(n)
|
232 |
+
|
233 |
+
def __init__(self) -> None:
|
234 |
+
self.interpreter = torch.fx.Interpreter(
|
235 |
+
torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
|
236 |
+
)
|
237 |
+
self.tracer = self.ExportTracer(self, CodeGen())
|
238 |
+
self.fake_tensor_mode: Optional[FakeTensorMode] = None
|
239 |
+
self._initialized = True
|
240 |
+
self.node_debug_str: typing.Optional[str] = None
|
241 |
+
|
242 |
+
def _fx(
|
243 |
+
self,
|
244 |
+
kind: str,
|
245 |
+
target: torch.fx.node.Target,
|
246 |
+
args: Tuple[Argument, ...],
|
247 |
+
kwargs: Dict[str, Argument],
|
248 |
+
meta: NodeMetadata,
|
249 |
+
) -> ProxyValue:
|
250 |
+
args_data, kwargs_data = pytree.tree_map_only(
|
251 |
+
ProxyValue, lambda x: x.data, (args, kwargs)
|
252 |
+
)
|
253 |
+
res_data = getattr(self.interpreter, kind)(target, args_data, kwargs_data)
|
254 |
+
args_proxy, kwargs_proxy = pytree.tree_map_only(
|
255 |
+
ProxyValue, lambda x: x.proxy, (args, kwargs)
|
256 |
+
)
|
257 |
+
|
258 |
+
name = None
|
259 |
+
if isinstance(target, torch._ops.OpOverload):
|
260 |
+
name = self.tracer.graph._target_to_str(target.overloadpacket.__name__)
|
261 |
+
|
262 |
+
res_proxy = self.tracer.create_proxy(kind, target, args_proxy, kwargs_proxy, name=name)
|
263 |
+
res_proxy.node.meta.update(meta.data)
|
264 |
+
self.tracer.set_metadata(res_proxy.node, res_data)
|
265 |
+
return ProxyValue(res_data, res_proxy)
|
266 |
+
|
267 |
+
def inputs(self, graph_module: torch.fx.GraphModule) -> List[Argument]:
|
268 |
+
# TODO(angelayi): Update this with what we decide to do for metadata in
|
269 |
+
# the exported graph module
|
270 |
+
if (args := graph_module.meta.get("args", None)) is not None:
|
271 |
+
return list(args)
|
272 |
+
|
273 |
+
def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]:
|
274 |
+
if "val" in node.meta:
|
275 |
+
fake = node.meta["val"]
|
276 |
+
if hasattr(fake, "constant") and fake.constant is not None:
|
277 |
+
return fake.constant
|
278 |
+
return fake
|
279 |
+
elif tensor_meta := node.meta.get("tensor_meta"):
|
280 |
+
assert self.fake_tensor_mode is not None
|
281 |
+
return FakeTensor(
|
282 |
+
self.fake_tensor_mode,
|
283 |
+
torch.empty(
|
284 |
+
tensor_meta.shape,
|
285 |
+
dtype=tensor_meta.dtype,
|
286 |
+
device="meta",
|
287 |
+
requires_grad=tensor_meta.requires_grad,
|
288 |
+
memory_format=tensor_meta.memory_format,
|
289 |
+
),
|
290 |
+
torch.device("cpu"),
|
291 |
+
)
|
292 |
+
elif len(node.users) == 0:
|
293 |
+
return None
|
294 |
+
raise ExportPassBaseError(
|
295 |
+
f"Cannot construct an input for graph module: {graph_module}.",
|
296 |
+
)
|
297 |
+
|
298 |
+
return [
|
299 |
+
extract_input(node)
|
300 |
+
for node in graph_module.graph.nodes
|
301 |
+
if node.op == "placeholder"
|
302 |
+
]
|
303 |
+
|
304 |
+
def on_attr(self, attr: ProxyValue) -> None:
|
305 |
+
pass
|
306 |
+
|
307 |
+
def placeholder(self, name: str, arg: Argument, meta: NodeMetadata) -> ProxyValue:
|
308 |
+
arg_proxy = self.tracer.create_proxy("placeholder", name, (), {})
|
309 |
+
arg_proxy.node.meta = meta.data
|
310 |
+
self.tracer.set_metadata(arg_proxy.node, arg)
|
311 |
+
return ProxyValue(arg, arg_proxy)
|
312 |
+
|
313 |
+
def call_operator(
|
314 |
+
self,
|
315 |
+
op,
|
316 |
+
args: Tuple[Argument, ...],
|
317 |
+
kwargs: Dict[str, Argument],
|
318 |
+
meta: NodeMetadata,
|
319 |
+
) -> ProxyValue:
|
320 |
+
return self._fx("call_function", op, args, kwargs, meta)
|
321 |
+
|
322 |
+
def call_sym(
|
323 |
+
self,
|
324 |
+
target: Fn,
|
325 |
+
args: Tuple[Argument, ...],
|
326 |
+
meta: NodeMetadata,
|
327 |
+
) -> ProxyValue:
|
328 |
+
return self._fx("call_function", target, args, {}, meta)
|
329 |
+
|
330 |
+
def call_cond(
|
331 |
+
self,
|
332 |
+
pred: ProxyValue,
|
333 |
+
true_fn: torch.fx.GraphModule,
|
334 |
+
false_fn: torch.fx.GraphModule,
|
335 |
+
inputs: List[Argument],
|
336 |
+
meta: NodeMetadata,
|
337 |
+
) -> ProxyValue:
|
338 |
+
true_branch = self.call_submodule(true_fn, tuple(inputs))
|
339 |
+
false_branch = self.call_submodule(false_fn, tuple(inputs))
|
340 |
+
assert true_branch is not None
|
341 |
+
assert false_branch is not None
|
342 |
+
return self._fx(
|
343 |
+
"call_function",
|
344 |
+
torch.ops.higher_order.cond,
|
345 |
+
(pred, true_branch.graph_module, false_branch.graph_module, list(inputs)),
|
346 |
+
{},
|
347 |
+
meta,
|
348 |
+
)
|
349 |
+
|
350 |
+
def call_map(
|
351 |
+
self,
|
352 |
+
f: torch.fx.GraphModule,
|
353 |
+
num_args: int,
|
354 |
+
args: List[ProxyValue],
|
355 |
+
meta: NodeMetadata,
|
356 |
+
) -> ProxyValue:
|
357 |
+
xs = _unstack_pytree([arg.data for arg in args[:num_args]])[0]
|
358 |
+
pos_args = args[num_args:]
|
359 |
+
f_branch = self.call_submodule(f, tuple(xs + [arg.data for arg in pos_args]))
|
360 |
+
assert f_branch is not None
|
361 |
+
return self._fx(
|
362 |
+
"call_function",
|
363 |
+
torch.ops.higher_order.map_impl,
|
364 |
+
(f_branch.graph_module, num_args, *args),
|
365 |
+
{},
|
366 |
+
meta,
|
367 |
+
)
|
368 |
+
|
369 |
+
def call_getitem(
|
370 |
+
self, value: ProxyValue, key: int, meta: NodeMetadata
|
371 |
+
) -> ProxyValue:
|
372 |
+
return self._fx("call_function", operator.getitem, (value, key), {}, meta)
|
373 |
+
|
374 |
+
def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
|
375 |
+
return self._fx("output", "output", (results,), {}, meta)
|
376 |
+
|
377 |
+
def call_submodule(
|
378 |
+
self, graph_module: fx.GraphModule, inputs: Tuple[Argument, ...]
|
379 |
+
) -> PassResult:
|
380 |
+
prev_tracer, self.tracer = self.tracer, self.ExportTracer(
|
381 |
+
self, graph_module.graph._codegen
|
382 |
+
)
|
383 |
+
self.tracer.fake_tensor_mode = prev_tracer.fake_tensor_mode
|
384 |
+
interpreter = self.ExportInterpreter(self, graph_module)
|
385 |
+
prev_interpreter, self.interpreter = self.interpreter, torch.fx.Interpreter(
|
386 |
+
torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
|
387 |
+
)
|
388 |
+
inputs_data = pytree.tree_map_only(ProxyValue, lambda x: x.data, inputs)
|
389 |
+
with fx_traceback.preserve_node_meta():
|
390 |
+
interpreter.run(*inputs_data)
|
391 |
+
|
392 |
+
new_graph_module = torch.fx.GraphModule(self.tracer.root, self.tracer.graph)
|
393 |
+
|
394 |
+
self.tracer = prev_tracer
|
395 |
+
self.interpreter = prev_interpreter
|
396 |
+
return PassResult(
|
397 |
+
new_graph_module,
|
398 |
+
True,
|
399 |
+
)
|
400 |
+
|
401 |
+
def call(self, graph_module: fx.GraphModule) -> PassResult:
|
402 |
+
if not getattr(self, "_initialized", False):
|
403 |
+
raise ExportPassBaseError(
|
404 |
+
"ExportPass is not initialized with __init__().",
|
405 |
+
)
|
406 |
+
|
407 |
+
inputs = self.inputs(graph_module)
|
408 |
+
|
409 |
+
fake_tensor_mode = None
|
410 |
+
for i in inputs:
|
411 |
+
if isinstance(i, FakeTensor):
|
412 |
+
assert (
|
413 |
+
fake_tensor_mode is None or fake_tensor_mode is i.fake_mode
|
414 |
+
), "Multiple fake tensor mode detected."
|
415 |
+
fake_tensor_mode = i.fake_mode
|
416 |
+
if fake_tensor_mode is None:
|
417 |
+
self.tracer.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True)
|
418 |
+
fake_tensor_mode = nullcontext() # type: ignore[assignment]
|
419 |
+
dispatcher_mode = nullcontext() # type: ignore[assignment]
|
420 |
+
else:
|
421 |
+
fake_tensor_mode.allow_non_fake_inputs = True
|
422 |
+
self.tracer.fake_tensor_mode = fake_tensor_mode
|
423 |
+
dispatcher_mode = enable_python_dispatcher() # type: ignore[assignment]
|
424 |
+
self.fake_tensor_mode = self.tracer.fake_tensor_mode
|
425 |
+
|
426 |
+
with fake_tensor_mode, dispatcher_mode: # type: ignore[assignment, union-attr]
|
427 |
+
result = self.call_submodule(graph_module, tuple(inputs))
|
428 |
+
|
429 |
+
return result
|
env-llmeval/lib/python3.10/site-packages/torch/_export/unflatten.py
ADDED
@@ -0,0 +1,642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import operator
|
3 |
+
from copy import deepcopy
|
4 |
+
from typing import cast, Dict, List, Optional, Union
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.fx._pytree as fx_pytree
|
8 |
+
import torch.utils._pytree as pytree
|
9 |
+
from torch.export import ExportedProgram
|
10 |
+
from torch.export.exported_program import (
|
11 |
+
ConstantArgument,
|
12 |
+
ModuleCallSignature,
|
13 |
+
SymIntArgument,
|
14 |
+
TensorArgument,
|
15 |
+
)
|
16 |
+
from torch.fx import GraphModule
|
17 |
+
from .utils import _check_input_constraints_pre_hook
|
18 |
+
|
19 |
+
|
20 |
+
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
|
21 |
+
# This installs empty Modules where none exist yet if they are subpaths of target
|
22 |
+
def _assign_attr(
|
23 |
+
from_obj: torch.Tensor,
|
24 |
+
to_module: torch.nn.Module,
|
25 |
+
target: str,
|
26 |
+
is_parameter: bool,
|
27 |
+
):
|
28 |
+
*prefix, field = target.split(".")
|
29 |
+
for item in prefix:
|
30 |
+
t = getattr(to_module, item, None)
|
31 |
+
|
32 |
+
if t is None:
|
33 |
+
t = torch.nn.Module()
|
34 |
+
setattr(to_module, item, t)
|
35 |
+
to_module = t
|
36 |
+
|
37 |
+
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
|
38 |
+
# So, we register it as a named buffer in the target module.
|
39 |
+
if not isinstance(from_obj, torch.Tensor):
|
40 |
+
raise ValueError("Expected only parameters or buffers, got:", type(from_obj))
|
41 |
+
|
42 |
+
if is_parameter:
|
43 |
+
to_module.register_parameter(field, torch.nn.Parameter(from_obj))
|
44 |
+
else:
|
45 |
+
to_module.register_buffer(field, from_obj)
|
46 |
+
|
47 |
+
|
48 |
+
class _UnflattenedModule(torch.fx.GraphModule):
|
49 |
+
def __init__(self, export_module: ExportedProgram):
|
50 |
+
if export_module.graph_signature.backward_signature is not None:
|
51 |
+
raise ValueError("Unflattening on JointExportModule NYI")
|
52 |
+
super().__init__({}, torch.fx.Graph(), "_UnflattenedModule")
|
53 |
+
|
54 |
+
export_graph = deepcopy(export_module.graph)
|
55 |
+
self.graph_signature = deepcopy(export_module.graph_signature)
|
56 |
+
self.module_call_graph = deepcopy(export_module.module_call_graph)
|
57 |
+
_inplace_buffer_mutations(export_graph, self.graph_signature)
|
58 |
+
_outline_submodules(export_graph, self)
|
59 |
+
|
60 |
+
self.range_constraints = export_module.range_constraints
|
61 |
+
self.equality_constraints = export_module.equality_constraints
|
62 |
+
|
63 |
+
state_dict = export_module.state_dict
|
64 |
+
for name in self.graph_signature.parameters:
|
65 |
+
cloned = state_dict[name].clone()
|
66 |
+
_assign_attr(
|
67 |
+
cloned,
|
68 |
+
self,
|
69 |
+
name,
|
70 |
+
is_parameter=True,
|
71 |
+
)
|
72 |
+
for name in self.graph_signature.buffers:
|
73 |
+
cloned = state_dict[name].clone()
|
74 |
+
_assign_attr(
|
75 |
+
cloned,
|
76 |
+
self,
|
77 |
+
name,
|
78 |
+
is_parameter=False,
|
79 |
+
)
|
80 |
+
|
81 |
+
inputs_to_state: Dict[str, str] = {
|
82 |
+
**self.graph_signature.inputs_to_parameters,
|
83 |
+
**self.graph_signature.inputs_to_buffers,
|
84 |
+
}
|
85 |
+
|
86 |
+
_sink_params(self, inputs_to_state, [])
|
87 |
+
# Check all input nodes has been processed.
|
88 |
+
for module in self.modules():
|
89 |
+
if not isinstance(module, torch.fx.GraphModule):
|
90 |
+
continue
|
91 |
+
for node in module.graph.nodes:
|
92 |
+
if node.op != "placeholder":
|
93 |
+
continue
|
94 |
+
assert node.name not in inputs_to_state
|
95 |
+
|
96 |
+
def __call__(self, *args, **kwargs):
|
97 |
+
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
|
98 |
+
assert self.module_call_graph[0].fqn == ""
|
99 |
+
signature = self.module_call_graph[0].signature
|
100 |
+
if in_spec != signature.in_spec:
|
101 |
+
raise TypeError(
|
102 |
+
f"Input treespec does not match with exported module's. "
|
103 |
+
"Are you sure you are calling this with the right arguments? "
|
104 |
+
f"Input treespec: {in_spec}. ",
|
105 |
+
f"Exported module treespec: {signature.in_spec}",
|
106 |
+
)
|
107 |
+
|
108 |
+
# TODO(zhxchen17) Use lineno map to dump the original stacktrace during error handling.
|
109 |
+
tree_out = super().__call__(*flat_args)
|
110 |
+
return pytree.tree_unflatten(tree_out, signature.out_spec)
|
111 |
+
|
112 |
+
|
113 |
+
def unflatten(module: ExportedProgram) -> _UnflattenedModule:
|
114 |
+
"""Unflatten an ExportedProgram, producing a module with the same module
|
115 |
+
hierarchy as the original eager module.
|
116 |
+
"""
|
117 |
+
module = _UnflattenedModule(module)
|
118 |
+
module.register_forward_pre_hook(_check_input_constraints_pre_hook)
|
119 |
+
return module
|
120 |
+
|
121 |
+
|
122 |
+
def _inplace_buffer_mutations(graph: torch.fx.Graph, graph_signature) -> None:
|
123 |
+
"""Transform buffer mutations from their functionalized form into a copy_
|
124 |
+
node in the graph.
|
125 |
+
|
126 |
+
Functionalization represents buffer mutation by passing the buffer as an input and output. So for example, the eager code:
|
127 |
+
def forward(self, x):
|
128 |
+
self.buffer += x
|
129 |
+
return x * x
|
130 |
+
|
131 |
+
Will become a graph that looks like:
|
132 |
+
def forward(self, buffer, x):
|
133 |
+
mutated_buffer = aten.add(buffer, x)
|
134 |
+
mul = aten.mul(x, x)
|
135 |
+
return (mutated_buffer, mul)
|
136 |
+
|
137 |
+
We want to inplace this into something that looks like the original eager code:
|
138 |
+
def forward(self, buffer, x):
|
139 |
+
mutated_buffer = aten.add(buffer, x)
|
140 |
+
buffer.copy_(mutated_buffer)
|
141 |
+
mul = aten.mul(x, x)
|
142 |
+
return (mul,)
|
143 |
+
"""
|
144 |
+
output_node = next(iter(reversed(graph.nodes)))
|
145 |
+
assert output_node.op == "output" and len(output_node.args) == 1
|
146 |
+
return_args = output_node.args[0]
|
147 |
+
|
148 |
+
mutation_node_to_buffer = graph_signature.buffers_to_mutate
|
149 |
+
mutations = return_args[: len(mutation_node_to_buffer)]
|
150 |
+
buffers_to_inputs = {v: k for k, v in graph_signature.inputs_to_buffers.items()}
|
151 |
+
input_name_to_node = {
|
152 |
+
node.name: node for node in graph.nodes if node.op == "placeholder"
|
153 |
+
}
|
154 |
+
|
155 |
+
for mutation in mutations:
|
156 |
+
buffer_name = mutation_node_to_buffer[mutation.name]
|
157 |
+
input_name = buffers_to_inputs[buffer_name]
|
158 |
+
input_node = input_name_to_node[input_name]
|
159 |
+
|
160 |
+
with graph.inserting_after(mutation):
|
161 |
+
new_node = graph.create_node(
|
162 |
+
"call_function", torch.ops.aten.copy_, (input_node, mutation)
|
163 |
+
)
|
164 |
+
for k, v in mutation.meta.items():
|
165 |
+
new_node.meta[k] = v
|
166 |
+
# Replace all uses of the previously functional mutation with our copy_ output.
|
167 |
+
mutation.replace_all_uses_with(new_node, lambda x: x is not new_node)
|
168 |
+
|
169 |
+
# Remove the mutated buffer from the graph outputs, since we don't need to
|
170 |
+
# thread it through anymore. We don't need to handle the inputs, which will
|
171 |
+
# be handled by _sink_params.
|
172 |
+
user_outputs = tuple(
|
173 |
+
return_args[len(mutation_node_to_buffer) :],
|
174 |
+
)
|
175 |
+
output_node.args = ((user_outputs),)
|
176 |
+
|
177 |
+
|
178 |
+
def is_prefix(candidate, target):
|
179 |
+
"""Check whether `candidate` is a prefix of `target`."""
|
180 |
+
return len(candidate) < len(target) and target[: len(candidate)] == candidate
|
181 |
+
|
182 |
+
|
183 |
+
def compute_accessor(parent_fqn: str, child_fqn: str) -> str:
|
184 |
+
if parent_fqn == "":
|
185 |
+
# Handle the root module correctly.
|
186 |
+
return child_fqn
|
187 |
+
|
188 |
+
parent_split = parent_fqn.split(".")
|
189 |
+
child_split = child_fqn.split(".")
|
190 |
+
|
191 |
+
assert (
|
192 |
+
child_split[: len(parent_split)] == parent_split
|
193 |
+
), f"Child module '{child_fqn}' is not a descendant of parent module '{parent_fqn}'"
|
194 |
+
return ".".join(child_split[len(parent_split) :])
|
195 |
+
|
196 |
+
|
197 |
+
def _verify_graph_equivalence(x: torch.fx.GraphModule, y: torch.fx.GraphModule):
|
198 |
+
def graph_dump(graph: torch.fx.Graph) -> str:
|
199 |
+
ret = []
|
200 |
+
nodes_idx: Dict[int, int] = {}
|
201 |
+
|
202 |
+
def arg_dump(arg) -> str:
|
203 |
+
if isinstance(arg, torch.fx.Node):
|
204 |
+
return "%" + str(nodes_idx[id(arg)])
|
205 |
+
return str(arg)
|
206 |
+
|
207 |
+
for i, node in enumerate(graph.nodes):
|
208 |
+
args_dump = [str(arg) for arg in pytree.tree_map(arg_dump, node.args)]
|
209 |
+
args_dump += [
|
210 |
+
f"{key}={value}"
|
211 |
+
for key, value in pytree.tree_map(arg_dump, node.kwargs).items()
|
212 |
+
]
|
213 |
+
target = node.target if node.op == "call_function" else ""
|
214 |
+
ret.append(f"{i}: {node.op}[{target}]({', '.join(args_dump)})")
|
215 |
+
nodes_idx[id(node)] = i
|
216 |
+
return "\n".join(ret)
|
217 |
+
|
218 |
+
assert graph_dump(x.graph) == graph_dump(y.graph)
|
219 |
+
|
220 |
+
|
221 |
+
def _add_spec(gm: torch.fx.GraphModule, spec) -> str:
|
222 |
+
i = 0
|
223 |
+
while hasattr(gm, f"_spec_{i}"):
|
224 |
+
i += 1
|
225 |
+
name = f"_spec_{i}"
|
226 |
+
setattr(gm, name, spec)
|
227 |
+
return name
|
228 |
+
|
229 |
+
|
230 |
+
def _generate_flatten(gm: torch.fx.GraphModule, node, spec) -> torch.fx.Node:
|
231 |
+
name = _add_spec(gm, spec)
|
232 |
+
spec_node = gm.graph.get_attr(name)
|
233 |
+
return gm.graph.call_function(fx_pytree.tree_flatten_spec, (node, spec_node))
|
234 |
+
|
235 |
+
|
236 |
+
def _generate_unflatten(gm: torch.fx.GraphModule, nodes, spec) -> torch.fx.Node:
|
237 |
+
name = _add_spec(gm, spec)
|
238 |
+
spec_node = gm.graph.get_attr(name)
|
239 |
+
return gm.graph.call_function(pytree.tree_unflatten, (nodes, spec_node))
|
240 |
+
|
241 |
+
|
242 |
+
class ModuleFrame:
|
243 |
+
def __init__(
|
244 |
+
self,
|
245 |
+
flat_graph,
|
246 |
+
seen_nodes,
|
247 |
+
seen_modules,
|
248 |
+
parent,
|
249 |
+
module_stack,
|
250 |
+
module_id,
|
251 |
+
module_call_graph: Dict[str, ModuleCallSignature],
|
252 |
+
graph_module=None,
|
253 |
+
):
|
254 |
+
self.flat_graph = flat_graph
|
255 |
+
self.seen_nodes = seen_nodes
|
256 |
+
self.seen_modules = seen_modules
|
257 |
+
self.parent = parent
|
258 |
+
self.module_stack = module_stack
|
259 |
+
self.module_id = module_id
|
260 |
+
|
261 |
+
self.module_call_graph = module_call_graph
|
262 |
+
self.verbose = False
|
263 |
+
|
264 |
+
self.fqn = self.module_stack[-1]
|
265 |
+
if graph_module is not None:
|
266 |
+
self.graph_module = graph_module
|
267 |
+
else:
|
268 |
+
# InterpreterModule doesn't work with torch.compile:
|
269 |
+
# 1. in-place compile: nn.Module compile the forward function, and if we overwrite __call__,
|
270 |
+
# in-place compile will not be effective
|
271 |
+
# 2. out-of-place compile: there are a lot of graph guard failures on "self" in the
|
272 |
+
# InterpreterModule
|
273 |
+
# self.graph_module = InterpreterModule(
|
274 |
+
self.graph_module = torch.fx.GraphModule(
|
275 |
+
{},
|
276 |
+
torch.fx.Graph(),
|
277 |
+
self.fqn,
|
278 |
+
)
|
279 |
+
self.graph_module.meta["module_call_signature"] = module_call_graph.get(
|
280 |
+
self.fqn
|
281 |
+
)
|
282 |
+
|
283 |
+
if self.module_id in self.seen_modules:
|
284 |
+
self.cached_graph_module = self.seen_modules[self.module_id]
|
285 |
+
else:
|
286 |
+
self.cached_graph_module = None
|
287 |
+
self.seen_modules[self.module_id] = self.graph_module
|
288 |
+
|
289 |
+
self.nodes = list(self.flat_graph.nodes)
|
290 |
+
self.graph = self.graph_module.graph
|
291 |
+
|
292 |
+
# Mapping of nodes in the flat graph to nodes in this graph.
|
293 |
+
self.node_map: Dict[torch.fx.Node, torch.fx.Node] = {}
|
294 |
+
self.node_to_placeholder = {}
|
295 |
+
|
296 |
+
self.parent_call_module: Optional[torch.fx.Node] = None
|
297 |
+
if parent is not None:
|
298 |
+
accessor = compute_accessor(parent.fqn, self.fqn)
|
299 |
+
parent.graph_module.add_submodule(
|
300 |
+
accessor,
|
301 |
+
self.graph_module
|
302 |
+
if self.cached_graph_module is None
|
303 |
+
else self.cached_graph_module,
|
304 |
+
)
|
305 |
+
self.parent_call_module = parent.graph.call_module(accessor)
|
306 |
+
|
307 |
+
signature = module_call_graph.get(self.fqn)
|
308 |
+
if signature is not None and self.parent is not None:
|
309 |
+
assert len(signature.in_spec.children_specs) == 2
|
310 |
+
args_spec = signature.in_spec.children_specs[0]
|
311 |
+
kwargs_spec = signature.in_spec.children_specs[1]
|
312 |
+
assert args_spec.context is None
|
313 |
+
assert kwargs_spec.context is not None
|
314 |
+
|
315 |
+
with self.graph_module.graph.inserting_after(None):
|
316 |
+
arg_nodes = []
|
317 |
+
for idx in range(len(args_spec.children_specs)):
|
318 |
+
arg_nodes.append(
|
319 |
+
self.graph_module.graph.placeholder(f"_positional_arg_{idx}")
|
320 |
+
)
|
321 |
+
kwarg_nodes = {}
|
322 |
+
for name in kwargs_spec.context:
|
323 |
+
kwarg_nodes[name] = self.graph_module.graph.placeholder(name)
|
324 |
+
flat_args = _generate_flatten(
|
325 |
+
self.graph_module,
|
326 |
+
(tuple(arg_nodes), kwarg_nodes),
|
327 |
+
signature.in_spec,
|
328 |
+
)
|
329 |
+
for idx, arg in enumerate(signature.inputs):
|
330 |
+
flat_arg_node = self.graph_module.graph.create_node(
|
331 |
+
op="call_function",
|
332 |
+
target=operator.getitem,
|
333 |
+
args=(flat_args, idx),
|
334 |
+
name=arg.name
|
335 |
+
if not isinstance(arg, ConstantArgument)
|
336 |
+
else f"_constant_{idx}",
|
337 |
+
)
|
338 |
+
if isinstance(arg, ConstantArgument):
|
339 |
+
continue
|
340 |
+
flat_arg_node.meta = copy.copy(self.seen_nodes[arg.name].meta)
|
341 |
+
self.node_to_placeholder[self.seen_nodes[arg.name]] = flat_arg_node
|
342 |
+
|
343 |
+
with self.parent.graph.inserting_before(self.parent_call_module):
|
344 |
+
nodes: List[Optional[torch.fx.Node]] = []
|
345 |
+
for input in signature.inputs:
|
346 |
+
if isinstance(input, ConstantArgument) and input.value is None:
|
347 |
+
nodes.append(None)
|
348 |
+
else:
|
349 |
+
assert isinstance(input, (TensorArgument, SymIntArgument))
|
350 |
+
nodes.append(
|
351 |
+
self.parent.remap_input(self.seen_nodes[input.name])
|
352 |
+
)
|
353 |
+
|
354 |
+
inputs_node = _generate_unflatten(
|
355 |
+
self.parent.graph_module,
|
356 |
+
nodes,
|
357 |
+
signature.in_spec,
|
358 |
+
)
|
359 |
+
|
360 |
+
args_node = self.parent.graph.call_function(
|
361 |
+
operator.getitem, (inputs_node, 0)
|
362 |
+
)
|
363 |
+
kwargs_node = self.parent.graph.call_function(
|
364 |
+
operator.getitem, (inputs_node, 1)
|
365 |
+
)
|
366 |
+
arg_nodes = [
|
367 |
+
self.parent.graph.call_function(operator.getitem, (args_node, i))
|
368 |
+
for i in range(len(args_spec.children_specs))
|
369 |
+
]
|
370 |
+
kwarg_nodes = {
|
371 |
+
k: self.parent.graph.call_function(
|
372 |
+
operator.getitem, (kwargs_node, k)
|
373 |
+
)
|
374 |
+
for k in kwargs_spec.context
|
375 |
+
}
|
376 |
+
assert self.parent_call_module is not None
|
377 |
+
self.parent_call_module.args = tuple(arg_nodes)
|
378 |
+
self.parent_call_module.kwargs = kwarg_nodes
|
379 |
+
|
380 |
+
def add_placeholder(self, x):
|
381 |
+
assert x.graph is self.flat_graph
|
382 |
+
# x is not in subgraph, create a new placeholder for subgraph
|
383 |
+
with self.graph.inserting_before(None):
|
384 |
+
placeholder_node = self.graph.placeholder(x.name, type_expr=x.type)
|
385 |
+
# copy all meta fields, even if some fields might be irrelvant for
|
386 |
+
# the placeholder node
|
387 |
+
placeholder_node.meta = copy.copy(x.meta)
|
388 |
+
self.node_to_placeholder[x] = placeholder_node
|
389 |
+
|
390 |
+
def remap_input(self, x):
|
391 |
+
assert x.graph is self.flat_graph
|
392 |
+
if x in self.node_map:
|
393 |
+
return self.node_map[x]
|
394 |
+
if x not in self.node_to_placeholder:
|
395 |
+
self.add_placeholder(x)
|
396 |
+
if self.parent_call_module is not None:
|
397 |
+
# Important to *prepend* the output to match how we are
|
398 |
+
# inserting placeholder nodes.
|
399 |
+
self.parent_call_module.insert_arg(0, self.parent.remap_input(x))
|
400 |
+
return self.node_to_placeholder[x]
|
401 |
+
|
402 |
+
def finalize_outputs(self):
|
403 |
+
orig_outputs = []
|
404 |
+
|
405 |
+
signature = self.module_call_graph.get(self.fqn)
|
406 |
+
if signature is not None and self.parent is not None:
|
407 |
+
for output in signature.outputs:
|
408 |
+
if isinstance(output, (TensorArgument, SymIntArgument)):
|
409 |
+
orig_outputs.append(self.seen_nodes[output.name])
|
410 |
+
else:
|
411 |
+
raise RuntimeError(
|
412 |
+
f"Unsupported data type for output node: {output}"
|
413 |
+
)
|
414 |
+
|
415 |
+
tree_out_node = _generate_unflatten(
|
416 |
+
self.graph_module,
|
417 |
+
tuple(
|
418 |
+
self.node_map[self.seen_nodes[output.name]]
|
419 |
+
for output in orig_outputs
|
420 |
+
),
|
421 |
+
signature.out_spec,
|
422 |
+
)
|
423 |
+
parent_out: Optional[torch.fx.Node] = _generate_flatten(
|
424 |
+
self.parent.graph_module, self.parent_call_module, signature.out_spec
|
425 |
+
)
|
426 |
+
graph_outputs: Union[torch.fx.Node, List[torch.fx.Node]] = tree_out_node
|
427 |
+
else:
|
428 |
+
graph_outputs = []
|
429 |
+
# Iterate through nodes we have copied into self.graph.
|
430 |
+
for orig_node in self.node_map.keys():
|
431 |
+
for user_node in orig_node.users:
|
432 |
+
if user_node.name not in self.seen_nodes:
|
433 |
+
# external user node, need to expose as an output
|
434 |
+
orig_outputs.append(orig_node)
|
435 |
+
graph_outputs.append(self.node_map[orig_node])
|
436 |
+
break
|
437 |
+
|
438 |
+
parent_out = self.parent_call_module
|
439 |
+
if len(graph_outputs) == 1:
|
440 |
+
graph_outputs = graph_outputs[0]
|
441 |
+
|
442 |
+
assert isinstance(graph_outputs, (list, torch.fx.Node))
|
443 |
+
|
444 |
+
self.graph.output(graph_outputs)
|
445 |
+
|
446 |
+
# lint to ensure correctness
|
447 |
+
self.graph.lint()
|
448 |
+
self.graph_module.recompile()
|
449 |
+
|
450 |
+
# Rewrite outputs in parent module
|
451 |
+
if parent_out is None:
|
452 |
+
return
|
453 |
+
|
454 |
+
if len(orig_outputs) == 1 and signature is None:
|
455 |
+
self.parent.node_map[orig_outputs[0]] = parent_out
|
456 |
+
else:
|
457 |
+
for i, orig_output in enumerate(orig_outputs):
|
458 |
+
# Use Proxy to record getitem access.
|
459 |
+
proxy_out = torch.fx.Proxy(parent_out)[i].node # type: ignore[index]
|
460 |
+
self.parent.node_map[orig_output] = proxy_out
|
461 |
+
|
462 |
+
if self.cached_graph_module is not None:
|
463 |
+
_verify_graph_equivalence(self.cached_graph_module, self.graph_module)
|
464 |
+
|
465 |
+
def copy_node(self, node):
|
466 |
+
self.print("copying", node.format_node())
|
467 |
+
self.node_map[node] = self.graph.node_copy(node, self.remap_input)
|
468 |
+
self.seen_nodes[node.name] = node
|
469 |
+
|
470 |
+
def run_outer(self):
|
471 |
+
i = 0
|
472 |
+
for node in self.flat_graph.nodes:
|
473 |
+
self.print(i, node.meta.get("nn_module_stack"), node.format_node())
|
474 |
+
i += 1
|
475 |
+
|
476 |
+
# Copy all graph inputs
|
477 |
+
node_idx: int = 0
|
478 |
+
node = self.nodes[node_idx]
|
479 |
+
while node.op == "placeholder":
|
480 |
+
self.copy_node(node)
|
481 |
+
node_idx += 1
|
482 |
+
node = self.nodes[node_idx]
|
483 |
+
|
484 |
+
self.run_from(node_idx)
|
485 |
+
|
486 |
+
# Copy graph outputs
|
487 |
+
for node in self.flat_graph.nodes:
|
488 |
+
if node.op == "output":
|
489 |
+
self.copy_node(node)
|
490 |
+
|
491 |
+
def print(self, *args, **kwargs):
|
492 |
+
if self.verbose:
|
493 |
+
print(*args, **kwargs)
|
494 |
+
|
495 |
+
def run_from(self, node_idx):
|
496 |
+
module_idx = 0
|
497 |
+
# Walk through the graph, building up a new graph with the right submodules
|
498 |
+
while node_idx < len(self.nodes):
|
499 |
+
node = self.nodes[node_idx]
|
500 |
+
assert node.op != "placeholder"
|
501 |
+
|
502 |
+
self.print()
|
503 |
+
self.print("STEP", node_idx, node.format_node())
|
504 |
+
self.print(self.module_stack)
|
505 |
+
if node.op == "output":
|
506 |
+
if len(self.module_stack) == 1:
|
507 |
+
# We want the output node of the original graph to be handled
|
508 |
+
# specially by the outermost stack frame (in run_outer). So
|
509 |
+
# skip finalization here.
|
510 |
+
return node_idx
|
511 |
+
|
512 |
+
# We've reached the end of the graph. Wrap up all the existing stack frames.
|
513 |
+
self.finalize_outputs()
|
514 |
+
return node_idx
|
515 |
+
|
516 |
+
node_module_stack = (
|
517 |
+
[path for path, ty in node.meta["nn_module_stack"].values()]
|
518 |
+
if "nn_module_stack" in node.meta
|
519 |
+
else self.module_stack
|
520 |
+
)
|
521 |
+
if node_module_stack[: len(self.module_stack)] != self.module_stack:
|
522 |
+
# This means that the current module is done executing and the
|
523 |
+
# current node is the beginning of a new module.
|
524 |
+
#
|
525 |
+
# In this case, we should finalize this module and return without
|
526 |
+
# incrementing the node counter.
|
527 |
+
self.finalize_outputs()
|
528 |
+
self.print("outlining", self.fqn)
|
529 |
+
self.print(self.graph)
|
530 |
+
return node_idx
|
531 |
+
|
532 |
+
assert node_module_stack is not None
|
533 |
+
|
534 |
+
if is_prefix(self.module_stack, node_module_stack):
|
535 |
+
# This means that the current node represents the execution of a new
|
536 |
+
# module.
|
537 |
+
next_module = node_module_stack[len(self.module_stack)]
|
538 |
+
self.print("Creating new stack frame for", next_module)
|
539 |
+
# Run a nested version of module outliner from the current node
|
540 |
+
# counter. Once it is complete, continue from that point.
|
541 |
+
node_idx = ModuleFrame(
|
542 |
+
self.flat_graph,
|
543 |
+
self.seen_nodes,
|
544 |
+
self.seen_modules,
|
545 |
+
self,
|
546 |
+
self.module_stack + [next_module],
|
547 |
+
list(node.meta["nn_module_stack"].keys())[len(self.module_stack)],
|
548 |
+
self.module_call_graph,
|
549 |
+
).run_from(node_idx)
|
550 |
+
module_idx += 1
|
551 |
+
continue
|
552 |
+
|
553 |
+
# The only remaining possibility is that we are in the right stack
|
554 |
+
# frame. Copy the node into this frame's graph and increment the node counter.
|
555 |
+
assert node_module_stack == self.module_stack
|
556 |
+
self.copy_node(node)
|
557 |
+
node_idx += 1
|
558 |
+
|
559 |
+
|
560 |
+
def _outline_submodules(orig_graph: torch.fx.Graph, root_module: torch.fx.GraphModule):
|
561 |
+
seen_nodes: Dict[str, torch.fx.Node] = {}
|
562 |
+
seen_modules: Dict[int, torch.nn.Module] = {}
|
563 |
+
ModuleFrame(
|
564 |
+
orig_graph,
|
565 |
+
seen_nodes,
|
566 |
+
seen_modules,
|
567 |
+
None,
|
568 |
+
[""],
|
569 |
+
"",
|
570 |
+
{
|
571 |
+
entry.fqn: entry.signature
|
572 |
+
for entry in root_module.module_call_graph
|
573 |
+
if entry.signature
|
574 |
+
},
|
575 |
+
graph_module=root_module,
|
576 |
+
).run_outer()
|
577 |
+
|
578 |
+
|
579 |
+
def _sink_params(
|
580 |
+
module: GraphModule,
|
581 |
+
inputs_to_state: Dict[str, str],
|
582 |
+
scope: List[str],
|
583 |
+
):
|
584 |
+
"""Sink params and buffers from graph inputs into get_attr nodes.
|
585 |
+
|
586 |
+
Exported modules are purely functional, so they pass their parameters and
|
587 |
+
buffers in as inputs to the graph.
|
588 |
+
|
589 |
+
To replicate eager's semantics, we need to get them from the module state
|
590 |
+
via get_attr instead.
|
591 |
+
|
592 |
+
module: GraphModule, potentially containining nested submodules.
|
593 |
+
inputs_to_state: mapping graph input names to the corresponding key in the state_dict.
|
594 |
+
scope: tracks where we are in the module hierarchy, so that we can emit the
|
595 |
+
right `getattr(self, "foo.bar")` calls, etc.
|
596 |
+
"""
|
597 |
+
for name, submodule in module._modules.items():
|
598 |
+
_sink_params(cast(GraphModule, submodule), inputs_to_state, scope + [name])
|
599 |
+
|
600 |
+
if not isinstance(module, GraphModule):
|
601 |
+
return
|
602 |
+
|
603 |
+
graph = module.graph
|
604 |
+
inputs = filter(lambda n: n.op == "placeholder", graph.nodes)
|
605 |
+
|
606 |
+
# Also remove from call_module nodes
|
607 |
+
call_module_nodes = filter(lambda n: n.op == "call_module", graph.nodes)
|
608 |
+
for node in call_module_nodes:
|
609 |
+
node.args = tuple(filter(lambda n: n.name not in inputs_to_state, node.args))
|
610 |
+
|
611 |
+
for node in inputs:
|
612 |
+
if node.name not in inputs_to_state:
|
613 |
+
continue
|
614 |
+
|
615 |
+
if len(node.users) > 0:
|
616 |
+
state_name = inputs_to_state[node.name].split(".")
|
617 |
+
# If there's a mismatch beteewn scope name and state name, then there must be multuple scopes
|
618 |
+
# pointing to the same state name, meaning some modules are shared. In such case, we can simply
|
619 |
+
# skip updating the current node because another later iteration will take care of this input
|
620 |
+
# node when the unique match between scope and state name occurs.
|
621 |
+
# To make sure this always happen, we should enforce the invariant that no placeholder node
|
622 |
+
# in the unflattened graph appears in inputs_to_state dict, which means all the extra input
|
623 |
+
# nodes have been handled.
|
624 |
+
if state_name[: len(scope)] != scope:
|
625 |
+
continue
|
626 |
+
attr_path = state_name[len(scope) :]
|
627 |
+
state_attr = _recursive_getattr(module, attr_path)
|
628 |
+
assert isinstance(state_attr, torch.Tensor)
|
629 |
+
|
630 |
+
with graph.inserting_after(node):
|
631 |
+
new_node = graph.create_node("get_attr", ".".join(attr_path))
|
632 |
+
|
633 |
+
node.replace_all_uses_with(new_node, propagate_meta=True)
|
634 |
+
graph.erase_node(node)
|
635 |
+
module.recompile()
|
636 |
+
|
637 |
+
|
638 |
+
def _recursive_getattr(obj, attr_path):
|
639 |
+
for attr in attr_path:
|
640 |
+
obj = getattr(obj, attr)
|
641 |
+
|
642 |
+
return obj
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.62 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc
ADDED
Binary file (17.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc
ADDED
Binary file (4.48 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc
ADDED
Binary file (68.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc
ADDED
Binary file (4.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc
ADDED
Binary file (31.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc
ADDED
Binary file (8.95 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc
ADDED
Binary file (5.16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc
ADDED
Binary file (7.25 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc
ADDED
Binary file (60.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc
ADDED
Binary file (16.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc
ADDED
Binary file (15.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc
ADDED
Binary file (15.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc
ADDED
Binary file (4.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc
ADDED
Binary file (9.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc
ADDED
Binary file (5.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc
ADDED
Binary file (30.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc
ADDED
Binary file (621 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc
ADDED
Binary file (10.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc
ADDED
Binary file (3.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc
ADDED
Binary file (202 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc
ADDED
Binary file (133 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc
ADDED
Binary file (4.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc
ADDED
Binary file (2.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc
ADDED
Binary file (46.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc
ADDED
Binary file (486 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc
ADDED
Binary file (72 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc
ADDED
Binary file (29.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc
ADDED
Binary file (19.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc
ADDED
Binary file (4.14 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc
ADDED
Binary file (29.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (39.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc
ADDED
Binary file (14.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc
ADDED
Binary file (8.89 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_10.cpython-310.pyc
ADDED
Binary file (5.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_9.cpython-310.pyc
ADDED
Binary file (5.95 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (281 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc
ADDED
Binary file (3.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc
ADDED
Binary file (7.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc
ADDED
Binary file (5.41 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc
ADDED
Binary file (5.26 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc
ADDED
Binary file (3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from ..lowering import register_lowering
|
4 |
+
from ..select_algorithm import (
|
5 |
+
autotune_select_algorithm,
|
6 |
+
ExternKernelChoice,
|
7 |
+
TritonTemplate,
|
8 |
+
)
|
9 |
+
from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
|
10 |
+
|
11 |
+
from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
|
12 |
+
|
13 |
+
aten = torch.ops.aten
|
14 |
+
|
15 |
+
|
16 |
+
def bmm_grid(b, m, n, meta):
|
17 |
+
return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), b, 1)
|
18 |
+
|
19 |
+
|
20 |
+
bmm_template = TritonTemplate(
|
21 |
+
name="bmm",
|
22 |
+
grid=bmm_grid,
|
23 |
+
source=r"""
|
24 |
+
{{def_kernel("A", "B")}}
|
25 |
+
M = {{size("A", -2)}}
|
26 |
+
N = {{size("B", -1)}}
|
27 |
+
K = {{size("A", -1)}}
|
28 |
+
|
29 |
+
stride_aq = {{stride("A", 0)}}
|
30 |
+
stride_am = {{stride("A", 1)}}
|
31 |
+
stride_ak = {{stride("A", 2)}}
|
32 |
+
|
33 |
+
stride_bq = {{stride("B", 0)}}
|
34 |
+
stride_bk = {{stride("B", 1)}}
|
35 |
+
stride_bn = {{stride("B", 2)}}
|
36 |
+
|
37 |
+
# based on triton.ops.matmul
|
38 |
+
pid = tl.program_id(0)
|
39 |
+
grid_m = (M + BLOCK_M - 1) // BLOCK_M
|
40 |
+
grid_n = (N + BLOCK_N - 1) // BLOCK_N
|
41 |
+
|
42 |
+
# re-order program ID for better L2 performance
|
43 |
+
width = GROUP_M * grid_n
|
44 |
+
group_id = pid // width
|
45 |
+
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
|
46 |
+
pid_m = group_id * GROUP_M + (pid % group_size)
|
47 |
+
pid_n = (pid % width) // (group_size)
|
48 |
+
|
49 |
+
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
50 |
+
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
51 |
+
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
|
52 |
+
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
|
53 |
+
rk = tl.arange(0, BLOCK_K)
|
54 |
+
|
55 |
+
idx_q = tl.program_id(1) # batch dimension for BMM
|
56 |
+
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
|
57 |
+
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)
|
58 |
+
|
59 |
+
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
|
60 |
+
for k in range(K, 0, -BLOCK_K):
|
61 |
+
if EVEN_K:
|
62 |
+
a = tl.load(A)
|
63 |
+
b = tl.load(B)
|
64 |
+
else:
|
65 |
+
a = tl.load(A, mask=rk[None, :] < k, other=0.)
|
66 |
+
b = tl.load(B, mask=rk[:, None] < k, other=0.)
|
67 |
+
acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
|
68 |
+
A += BLOCK_K * stride_ak
|
69 |
+
B += BLOCK_K * stride_bk
|
70 |
+
|
71 |
+
# rematerialize rm and rn to save registers
|
72 |
+
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
73 |
+
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
74 |
+
idx_q = tl.program_id(1) # batch dimension for BMM
|
75 |
+
idx_m = rm[:, None]
|
76 |
+
idx_n = rn[None, :]
|
77 |
+
mask = (idx_m < M) & (idx_n < N)
|
78 |
+
|
79 |
+
# inductor generates a suffix
|
80 |
+
{{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
|
81 |
+
""",
|
82 |
+
)
|
83 |
+
|
84 |
+
aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out")
|
85 |
+
aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
|
86 |
+
|
87 |
+
|
88 |
+
@register_lowering(aten.bmm)
|
89 |
+
def tuned_bmm(mat1, mat2, *, layout=None):
|
90 |
+
m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
|
91 |
+
|
92 |
+
# options to tune from
|
93 |
+
choices = [aten_bmm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
|
94 |
+
if use_triton_template(layout):
|
95 |
+
for config in mm_configs(m, n, k):
|
96 |
+
bmm_template.maybe_append_choice(
|
97 |
+
choices,
|
98 |
+
input_nodes=(mat1, mat2),
|
99 |
+
layout=layout,
|
100 |
+
**mm_options(config, k, layout),
|
101 |
+
)
|
102 |
+
|
103 |
+
return autotune_select_algorithm("bmm", choices, [mat1, mat2], layout)
|
104 |
+
|
105 |
+
|
106 |
+
# Don't register this since it is slower than decomposing it
|
107 |
+
# @register_lowering(aten.baddbmm)
|
108 |
+
def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
|
109 |
+
m, n, k, layout, mat1, mat2, inp = mm_args(mat1, mat2, inp, layout=layout)
|
110 |
+
|
111 |
+
# options to tune from
|
112 |
+
choices = (
|
113 |
+
[aten_baddbmm.bind((inp, mat1, mat2), layout, alpha=alpha, beta=beta)]
|
114 |
+
if use_aten_gemm_kernels()
|
115 |
+
else []
|
116 |
+
)
|
117 |
+
if use_triton_template(layout):
|
118 |
+
for config in mm_configs(m, n, k):
|
119 |
+
bmm_template.maybe_append_choice(
|
120 |
+
choices,
|
121 |
+
input_nodes=(inp, mat1, mat2),
|
122 |
+
layout=layout,
|
123 |
+
**mm_options(config, k, layout),
|
124 |
+
prefix_args=1,
|
125 |
+
epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
|
126 |
+
)
|
127 |
+
|
128 |
+
return autotune_select_algorithm("baddbmm", choices, [inp, mat1, mat2], layout)
|
env-llmeval/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import logging
|
3 |
+
from typing import cast, List, Tuple
|
4 |
+
|
5 |
+
import sympy
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch._inductor.select_algorithm import realize_inputs
|
9 |
+
from torch._inductor.virtualized import V
|
10 |
+
|
11 |
+
from ..utils import ceildiv as cdiv, next_power_of_2
|
12 |
+
|
13 |
+
log = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
|
16 |
+
def triton_config(num_stages, num_warps, **kwargs):
|
17 |
+
from triton import Config
|
18 |
+
|
19 |
+
return Config(kwargs, num_stages=num_stages, num_warps=num_warps)
|
20 |
+
|
21 |
+
|
22 |
+
def filtered_configs(
|
23 |
+
m: int,
|
24 |
+
n: int,
|
25 |
+
k: int,
|
26 |
+
configs: List[Tuple[int, int, int, int, int]],
|
27 |
+
has_int8_tensor=False,
|
28 |
+
):
|
29 |
+
"""Heuristic to shrink configs when they are bigger than the input size"""
|
30 |
+
|
31 |
+
# According to https://github.com/openai/triton/issues/2156#issuecomment-1695897424
|
32 |
+
# it's safer to use at least [32, 32] block size for int8/uint8
|
33 |
+
# tensors
|
34 |
+
min_block_size = 32 if has_int8_tensor else 16
|
35 |
+
m = max(
|
36 |
+
next_power_of_2(
|
37 |
+
V.graph.sizevars.size_hint(
|
38 |
+
m, fallback=torch._inductor.config.unbacked_symint_fallback
|
39 |
+
)
|
40 |
+
),
|
41 |
+
min_block_size,
|
42 |
+
)
|
43 |
+
n = max(
|
44 |
+
next_power_of_2(
|
45 |
+
V.graph.sizevars.size_hint(
|
46 |
+
n, fallback=torch._inductor.config.unbacked_symint_fallback
|
47 |
+
)
|
48 |
+
),
|
49 |
+
min_block_size,
|
50 |
+
)
|
51 |
+
k = max(
|
52 |
+
next_power_of_2(
|
53 |
+
V.graph.sizevars.size_hint(
|
54 |
+
k, fallback=torch._inductor.config.unbacked_symint_fallback
|
55 |
+
)
|
56 |
+
),
|
57 |
+
min_block_size,
|
58 |
+
)
|
59 |
+
used = set()
|
60 |
+
for block_m, block_n, block_k, num_stages, num_warps in configs:
|
61 |
+
# shrink configs for small sizes
|
62 |
+
block_m = max(min(block_m, m), min_block_size)
|
63 |
+
block_n = max(min(block_n, n), min_block_size)
|
64 |
+
block_k = max(min(block_k, k), min_block_size)
|
65 |
+
# each warp computes 16x16 tile = 256
|
66 |
+
num_warps = min(num_warps, block_m * block_n // 256)
|
67 |
+
if (block_m, block_n, block_k, num_stages, num_warps) not in used:
|
68 |
+
used.add((block_m, block_n, block_k, num_stages, num_warps))
|
69 |
+
yield triton_config(
|
70 |
+
BLOCK_M=block_m,
|
71 |
+
BLOCK_N=block_n,
|
72 |
+
BLOCK_K=block_k,
|
73 |
+
num_stages=num_stages,
|
74 |
+
num_warps=num_warps,
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
# List of dictionaries to store the kernel configs. Configs that evaluate to true
|
79 |
+
# will be utilised on the target platform
|
80 |
+
mm_kernel_configs = [
|
81 |
+
# "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
|
82 |
+
{"config": (64, 64, 32, 2, 4), "cond": True},
|
83 |
+
{"config": (64, 128, 32, 3, 4), "cond": True},
|
84 |
+
{"config": (128, 64, 32, 3, 4), "cond": True},
|
85 |
+
{"config": (64, 128, 32, 4, 8), "cond": True},
|
86 |
+
{"config": (128, 64, 32, 4, 8), "cond": True},
|
87 |
+
{"config": (64, 32, 32, 5, 8), "cond": True},
|
88 |
+
{"config": (32, 64, 32, 5, 8), "cond": True},
|
89 |
+
{"config": (128, 128, 32, 2, 8), "cond": True},
|
90 |
+
{"config": (64, 64, 64, 3, 8), "cond": True},
|
91 |
+
{"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
|
92 |
+
{"config": (64, 64, 16, 2, 4), "cond": True},
|
93 |
+
{"config": (32, 32, 16, 1, 2), "cond": True},
|
94 |
+
]
|
95 |
+
|
96 |
+
int8_mm_kernel_configs = [
|
97 |
+
{"config": (64, 64, 32, 2, 4), "cond": True},
|
98 |
+
{"config": (64, 128, 32, 3, 4), "cond": True},
|
99 |
+
{"config": (128, 64, 32, 3, 4), "cond": True},
|
100 |
+
{"config": (64, 128, 32, 4, 8), "cond": True},
|
101 |
+
{"config": (128, 64, 32, 4, 8), "cond": True},
|
102 |
+
{"config": (64, 32, 32, 5, 8), "cond": True},
|
103 |
+
{"config": (32, 64, 32, 5, 8), "cond": True},
|
104 |
+
{"config": (128, 128, 32, 2, 8), "cond": True},
|
105 |
+
{"config": (64, 64, 64, 3, 8), "cond": True},
|
106 |
+
# {"config": (32, 32, 128, 2, 4), "cond": True},
|
107 |
+
# {"config": (64, 64, 16, 2, 4), "cond": True},
|
108 |
+
# {"config": (32, 32, 16, 1, 2), "cond": True},
|
109 |
+
{"config": (128, 256, 128, 3, 8), "cond": torch.version.hip is None},
|
110 |
+
{"config": (256, 128, 128, 3, 8), "cond": torch.version.hip is None},
|
111 |
+
]
|
112 |
+
|
113 |
+
# Create filtered list of configs based on cond evaluation
|
114 |
+
|
115 |
+
|
116 |
+
mm_platform_configs = tuple(
|
117 |
+
cast(Tuple[int, int, int, int, int], config["config"])
|
118 |
+
for config in mm_kernel_configs
|
119 |
+
if config["cond"]
|
120 |
+
)
|
121 |
+
int8_platform_configs = tuple(
|
122 |
+
cast(Tuple[int, int, int, int, int], config["config"])
|
123 |
+
for config in int8_mm_kernel_configs
|
124 |
+
if config["cond"]
|
125 |
+
)
|
126 |
+
|
127 |
+
# On ROCm convert num_stages to 1 as pipelining provides no benefit
|
128 |
+
if torch.version.hip:
|
129 |
+
mm_platform_configs = tuple(
|
130 |
+
(config[0], config[1], config[2], 1, config[4])
|
131 |
+
for config in mm_platform_configs
|
132 |
+
)
|
133 |
+
int8_platform_configs = tuple(
|
134 |
+
(config[0], config[1], config[2], 1, config[4])
|
135 |
+
for config in mm_platform_configs
|
136 |
+
)
|
137 |
+
|
138 |
+
mm_configs = functools.partial(
|
139 |
+
filtered_configs,
|
140 |
+
configs=mm_platform_configs,
|
141 |
+
)
|
142 |
+
|
143 |
+
int8_mm_configs = functools.partial(
|
144 |
+
filtered_configs,
|
145 |
+
configs=int8_platform_configs,
|
146 |
+
)
|
147 |
+
|
148 |
+
|
149 |
+
def mm_grid(m, n, meta):
|
150 |
+
"""
|
151 |
+
The CUDA grid size for matmul triton templates.
|
152 |
+
"""
|
153 |
+
return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), 1, 1)
|
154 |
+
|
155 |
+
|
156 |
+
def acc_type(dtype):
|
157 |
+
if dtype in (torch.float16, torch.bfloat16):
|
158 |
+
return "tl.float32"
|
159 |
+
return f"tl.{dtype}".replace("torch.", "")
|
160 |
+
|
161 |
+
|
162 |
+
def mm_options(config, sym_k, layout, b_prologue_cast_type=None):
|
163 |
+
"""
|
164 |
+
Common options to matmul triton templates.
|
165 |
+
"""
|
166 |
+
even_k_symbolic = (
|
167 |
+
# it isn't worth guarding on this
|
168 |
+
sympy.gcd(sym_k, config.kwargs["BLOCK_K"])
|
169 |
+
== config.kwargs["BLOCK_K"]
|
170 |
+
)
|
171 |
+
return dict(
|
172 |
+
GROUP_M=8,
|
173 |
+
EVEN_K=even_k_symbolic,
|
174 |
+
ALLOW_TF32=torch.backends.cuda.matmul.allow_tf32,
|
175 |
+
ACC_TYPE=acc_type(layout.dtype),
|
176 |
+
B_PROLOGUE_CAST_TYPE=b_prologue_cast_type,
|
177 |
+
num_stages=config.num_stages,
|
178 |
+
num_warps=config.num_warps,
|
179 |
+
**config.kwargs,
|
180 |
+
)
|
181 |
+
|
182 |
+
|
183 |
+
def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False):
|
184 |
+
"""
|
185 |
+
Common arg processing for mm,bmm,addmm,etc
|
186 |
+
"""
|
187 |
+
mat1, mat2 = realize_inputs(mat1, mat2)
|
188 |
+
*b1, m, k1 = mat1.get_size()
|
189 |
+
*b2, k2, n = mat2.get_size()
|
190 |
+
b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)]
|
191 |
+
if use_4x2_dim:
|
192 |
+
k2 = k2 * 2
|
193 |
+
k = V.graph.sizevars.guard_equals(k1, k2)
|
194 |
+
if layout is None:
|
195 |
+
from torch._inductor.ir import FixedLayout
|
196 |
+
|
197 |
+
if out_dtype is None:
|
198 |
+
out_dtype = mat1.get_dtype()
|
199 |
+
layout = FixedLayout(
|
200 |
+
mat1.get_device(),
|
201 |
+
out_dtype,
|
202 |
+
[*b, m, n],
|
203 |
+
)
|
204 |
+
else:
|
205 |
+
assert out_dtype is None, "out_dtype is ignored if layout is specified."
|
206 |
+
|
207 |
+
from ..lowering import expand
|
208 |
+
|
209 |
+
others = [realize_inputs(expand(x, layout.size)) for x in others]
|
210 |
+
|
211 |
+
return [m, n, k, layout, mat1, mat2, *others]
|
212 |
+
|
213 |
+
|
214 |
+
def addmm_epilogue(dtype, alpha, beta):
|
215 |
+
def epilogue(acc, bias):
|
216 |
+
if alpha != 1:
|
217 |
+
acc = V.ops.mul(acc, V.ops.constant(alpha, dtype))
|
218 |
+
if beta != 1:
|
219 |
+
bias = V.ops.mul(bias, V.ops.constant(beta, dtype))
|
220 |
+
return V.ops.add(acc, bias)
|
221 |
+
|
222 |
+
return epilogue
|