applied-ai-018 commited on
Commit
7e3c8e3
·
verified ·
1 Parent(s): 85a7cec

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__init__.py +11 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py +44 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py +56 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py +112 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py +61 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py +110 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py +2 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py +278 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py +75 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py +303 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py +686 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/operator_support.py +220 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py +66 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py +247 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/reinplace.py +675 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py +193 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/split_module.py +507 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/split_utils.py +298 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py +871 -0
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import graph_drawer
2
+ from . import graph_manipulation
3
+ from . import net_min_base
4
+ from . import operator_support
5
+ from . import param_fetch
6
+ from . import reinplace
7
+ from . import shape_prop
8
+ from . import split_module
9
+ from . import split_utils
10
+ from . import splitter_base
11
+ from . import tools_common
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (571 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc ADDED
Binary file (7.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc ADDED
Binary file (7.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc ADDED
Binary file (5.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc ADDED
Binary file (6.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc ADDED
Binary file (7.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import torch
4
+
5
+
6
+ def annotate_getitem_nodes(graph: torch.fx.Graph) -> None:
7
+ """
8
+ Annotate the type of getitem nodes, inferred from the type of sequence node.
9
+ If sequence node is not annotated with a type, do nothing.
10
+ Currently support getitem nodes from Tuple, List, and NamedTuple sequence node.
11
+
12
+ This is helpful since annotations on local names within function are lost during FX transforms.
13
+ Adding back known type annotation for getitem nodes to improve jit scriptability.
14
+
15
+ Args:
16
+ graph (Graph): The graph to be annotated
17
+ """
18
+ for node in graph.nodes:
19
+ if node.target == operator.getitem:
20
+ sequence_node, index_node = node.args
21
+ if not sequence_node.type:
22
+ continue
23
+ # container types
24
+ if hasattr(sequence_node.type, "_name"):
25
+ parameterized_types = sequence_node.type.__args__
26
+ if sequence_node.type._name == "Tuple":
27
+ if len(parameterized_types) == 2 and isinstance(
28
+ parameterized_types[1], type(...)
29
+ ):
30
+ node.type = parameterized_types[0]
31
+ else:
32
+ assert len(parameterized_types) > index_node
33
+ node_type = parameterized_types[index_node]
34
+ node.type = node_type
35
+ elif sequence_node.type._name == "List":
36
+ assert len(parameterized_types) == 1
37
+ node.type = parameterized_types[0]
38
+ # NamedTuple type
39
+ elif hasattr(sequence_node.type, "__annotations__"):
40
+ if sequence_node.type == torch.Tensor:
41
+ continue
42
+ sequence_node_field_types = sequence_node.type.__annotations__
43
+ field_name = sequence_node.type._fields[index_node]
44
+ node.type = sequence_node_field_types[field_name]
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
3
+ from torch.fx.passes.operator_support import OperatorSupport
4
+ from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
5
+ from torch.fx.passes.fake_tensor_prop import FakeTensorProp
6
+ from torch.utils import _pytree as pytree
7
+
8
+ import operator
9
+
10
+ class CudaGraphsSupport(OperatorSupport):
11
+ # TODO: why is submodules passed here
12
+ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
13
+ if node.op not in CALLABLE_NODE_OPS:
14
+ return False
15
+
16
+ if node.target in [torch.ops.aten.embedding_dense_backward.default]:
17
+ return False
18
+
19
+ if node.target in [operator.getitem]:
20
+ return True
21
+
22
+ found_not_cuda = False
23
+
24
+ def meta_fk(meta):
25
+ return meta["val"] if "val" in meta else meta["fake_result"]
26
+
27
+ def find_not_cuda(t):
28
+ nonlocal found_not_cuda
29
+ if isinstance(t, torch.Tensor) and t.device.type != 'cuda':
30
+ found_not_cuda = True
31
+
32
+ for n in node.all_input_nodes:
33
+ pytree.tree_map_(find_not_cuda, meta_fk(n.meta))
34
+
35
+ pytree.tree_map_(find_not_cuda, meta_fk(node.meta))
36
+
37
+ # NB: factory function is accounted for because the result would be
38
+ # cpu or cuda
39
+
40
+ return not found_not_cuda
41
+
42
+ def partition_cudagraphs(gm, inputs):
43
+ """
44
+ Partition an FX graph into sub-GraphModules that can be validly run under
45
+ CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
46
+ must involve CUDA tensors only/
47
+ """
48
+
49
+ FakeTensorProp(gm).propagate(*inputs)
50
+ supported_ops = CudaGraphsSupport()
51
+ # TODO: single node partition may be wrong due to the pessimization
52
+ # from copying in and out the data. Check in benchmarks, perhaps
53
+ partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)
54
+ partitions = partitioner.propose_partitions()
55
+ fused_graph = partitioner.fuse_partitions(partitions)
56
+ return fused_graph
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple, Any
2
+
3
+ import torch
4
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
5
+ from torch.utils._pytree import tree_flatten
6
+
7
+ from torch.fx import GraphModule, Graph
8
+ from torch.fx import Node
9
+
10
+ aten = torch.ops.aten
11
+
12
+
13
+ # stateful ops are banned from CSE
14
+ rand_ops = {aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm} # noqa: E501,B950
15
+
16
+ inplace_ops = {aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_} # noqa: E501
17
+
18
+
19
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
20
+ def get_CSE_banned_ops():
21
+ return rand_ops.union(inplace_ops)
22
+
23
+
24
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
25
+ class CSEPass(PassBase):
26
+
27
+ def __init__(self, banned_ops=None):
28
+ """
29
+ This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node.
30
+
31
+ For functional dialects, user would only need to specify the random ops in ban list.
32
+
33
+ Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects.
34
+ If your dialect contains stateful operators, please customized the banned_ops.
35
+
36
+ """
37
+ if banned_ops is None:
38
+ banned_ops = set()
39
+ self.banned_ops = banned_ops
40
+ super().__init__()
41
+
42
+ def call(self, graph_module: GraphModule) -> PassResult:
43
+ """
44
+ Return a new copy of torch.fx.GraphModule with CSE applied to the input graph
45
+
46
+ Example usage:
47
+
48
+ from torch.fx.experimental.proxy_tensor import make_fx
49
+ def f(a):
50
+ b = a * a
51
+ c = a * a
52
+ return b+c
53
+
54
+ p = CSEPass()
55
+ traced_graph = make_fx(f)(torch.tensor(1))
56
+ print(traced_graph)
57
+ result = p(traced_graph)
58
+ print(result.graph_module)
59
+ """
60
+ def get_aten_target(node):
61
+ if hasattr(node.target, 'overloadpacket'):
62
+ return node.target.overloadpacket
63
+ return node.target
64
+
65
+ modified = False
66
+ new_graph = Graph()
67
+ env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph
68
+ hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph
69
+ token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token
70
+ for n in graph_module.graph.nodes:
71
+ # The placeholder, output, and get_attr nodes are copied to the new graph without change
72
+ # do not CSE away random operations
73
+ if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops:
74
+ new_node = new_graph.node_copy(n, lambda x: env[x])
75
+ env[n] = new_node
76
+ else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
77
+ # substitute args and kwargs members to their mapping in env if exists
78
+ # specs can be used to reconstruct nested list/dictionaries
79
+ def substitute(arg_list):
80
+ arg_list, spec = tree_flatten(arg_list)
81
+ for i in range(len(arg_list)):
82
+ v = arg_list[i]
83
+ if isinstance(v, Node) and v in env:
84
+ arg_list[i] = env[v]
85
+ return tuple(arg_list), spec
86
+ args, args_spec = substitute(n.args)
87
+ kwargs, kwargs_spec = substitute(n.kwargs)
88
+
89
+ # each token corresponds to a unique node
90
+ # nodes with the same token can be substituted
91
+ token = {"target": n.target, "args": args, "args_spec": args_spec,
92
+ "kwargs": kwargs, "kwargs_spec": kwargs_spec}
93
+
94
+ # hash substituted args to a number, do not hash specs because specs are not hashable
95
+ hash_arg = hash((args, kwargs))
96
+ hash_val = (n.target, hash_arg)
97
+
98
+ # check if a node has a substitute and can be eliminated
99
+ hash_val_in_hash_env = hash_val in hash_env
100
+ if hash_val_in_hash_env and token_map[hash_val] == token:
101
+ modified = True # substitution happens and the graph is modified
102
+ env[n] = hash_env[hash_val]
103
+ continue
104
+
105
+ new_node = new_graph.node_copy(n, lambda x: env[x])
106
+ env[n] = new_node
107
+ if not hash_val_in_hash_env:
108
+ hash_env[hash_val] = new_node
109
+ token_map[hash_val] = token
110
+
111
+ csed_gm = GraphModule(graph_module, new_graph)
112
+ return PassResult(csed_gm, modified)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch.fx
4
+ from torch.fx import Node
5
+ from torch.fx._compatibility import compatibility
6
+ from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
7
+ from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake
8
+ from torch.fx.node import map_aggregate
9
+
10
+ __all__ = ['FakeTensorProp']
11
+
12
+ @compatibility(is_backward_compatible=False)
13
+ class FakeTensorProp(torch.fx.Interpreter):
14
+ """
15
+ Execute an FX graph Node-by-Node and record a fake tensor representing
16
+ the metadata for the node. Unlike ShapeProp, (1) this propagation
17
+ is cheap--it does the propagation with meta tensors which do not actually
18
+ store data, and (2) the fake tensors have much more fine grained information,
19
+ e.g., they have accurate alias information that can be consulted by looking
20
+ at the storages.
21
+
22
+ Args:
23
+ module (GraphModule): The module to be executed
24
+ mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
25
+ """
26
+ def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
27
+ super().__init__(module)
28
+ if mode is None:
29
+ mode = FakeTensorMode()
30
+ self._mode = mode
31
+
32
+ def run_node(self, n: Node):
33
+ result = super().run_node(n)
34
+
35
+ def extract_val(obj):
36
+ if isinstance(obj, FakeTensor):
37
+ return snapshot_fake(obj)
38
+ elif isinstance(obj, torch.Tensor):
39
+ # TODO: How is it possible that we get a non fake tensor? We
40
+ # should be running under the mode...
41
+ return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
42
+ elif isinstance(obj, py_sym_types):
43
+ return obj
44
+ else:
45
+ return None
46
+
47
+ meta = map_aggregate(result, extract_val)
48
+ if meta is not None:
49
+ n.meta['val'] = meta
50
+ return result
51
+
52
+ def propagate(self, *args):
53
+ fake_args = [
54
+ self._mode.from_tensor(a) if isinstance(a, torch.Tensor) else a
55
+ for a in args
56
+ ]
57
+ return self.propagate_dont_convert_inputs(*fake_args)
58
+
59
+ def propagate_dont_convert_inputs(self, *args):
60
+ with self._mode:
61
+ return super().run(*args)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, NamedTuple, Optional
2
+
3
+ import torch
4
+ from torch.fx._compatibility import compatibility
5
+ from torch.fx.graph import Graph
6
+ from torch.fx.graph_module import GraphModule
7
+ from torch.fx.node import (
8
+ map_arg,
9
+ Node,
10
+ Target,
11
+ )
12
+ from torch.fx.passes.shape_prop import ShapeProp
13
+
14
+ __all__ = ['replace_target_nodes_with', 'size_bytes', 'get_size_of_all_nodes', 'get_tensor_meta',
15
+ 'get_size_of_node']
16
+
17
+ @compatibility(is_backward_compatible=False)
18
+ def replace_target_nodes_with(
19
+ fx_module: GraphModule,
20
+ old_op: str,
21
+ old_target: Target,
22
+ new_op: str,
23
+ new_target: Target,
24
+ ):
25
+ """Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
26
+ and updates them to match the new op code and target"""
27
+ new_graph = Graph()
28
+ val_map: Dict[Node, Node] = {}
29
+ for node in fx_module.graph.nodes:
30
+ if node.op == old_op and node.target == old_target:
31
+ args = map_arg(node.args, lambda n: val_map[n])
32
+ kwargs = map_arg(node.kwargs, lambda n: val_map[n])
33
+ assert isinstance(args, tuple)
34
+ assert isinstance(kwargs, dict)
35
+ val_map[node] = new_graph.create_node(
36
+ new_op, new_target, args, kwargs, node.name
37
+ )
38
+ else:
39
+ val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
40
+ fx_module.graph = new_graph
41
+
42
+
43
+ @compatibility(is_backward_compatible=False)
44
+ class size_bytes(NamedTuple):
45
+ output_size: int
46
+ total_size: int
47
+
48
+
49
+ @compatibility(is_backward_compatible=False)
50
+ def get_size_of_all_nodes(
51
+ fx_module: GraphModule, args: Optional[List[torch.Tensor]] = None
52
+ ) -> None:
53
+ """Given a fx graph module, update each node with its total size (weights + bias + output)
54
+ and its output_size(output). For a non-module node, the total size is the output size.
55
+ return total size"""
56
+ if args is not None:
57
+ # Mark shape and dtype for each node (node.shape and node.dtype)
58
+ ShapeProp(fx_module).propagate(*args)
59
+ # Calculate the total size of the whole fx graph
60
+ total_size_of_graph = 0.0
61
+ for node in fx_module.graph.nodes:
62
+ if node.op == "output":
63
+ break
64
+ node.size_bytes = get_size_of_node(fx_module, node)
65
+ return
66
+
67
+
68
+ @compatibility(is_backward_compatible=False)
69
+ def get_tensor_meta(node: Node) -> Any:
70
+ tensor_meta = node.meta.get("tensor_meta")
71
+
72
+ if not tensor_meta:
73
+ raise RuntimeError(
74
+ f"Node {node} has no tensor metadata associated with it! "
75
+ f"Check that shape propagation has run."
76
+ )
77
+
78
+ return tensor_meta
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
83
+ """Given a node with node.dtype and node.shape, return its total size and its output size.
84
+ total_size = weights + bias + output_size
85
+ """
86
+ # Total num of elements
87
+ total_num_of_elems = 0
88
+ # For a module, conside all parameters
89
+ if node.op == "call_module":
90
+ submodule_dict = dict(fx_module.named_modules())
91
+ submodule = submodule_dict[node.target]
92
+ parameters = submodule.named_parameters()
93
+ # Parameters are named tuples
94
+ for name, p in parameters:
95
+ total_num_of_elems += p.numel()
96
+ # Don't forget the output size
97
+ # node.shape is the shape of this node's output
98
+ tensor_meta = get_tensor_meta(node)
99
+ output_elem = tensor_meta.shape.numel()
100
+ total_num_of_elems += output_elem
101
+ # Assume for now if it's quantized then it's qint8 or quint8
102
+ if tensor_meta.is_quantized:
103
+ size_per_elem_bytes = torch._empty_affine_quantized(
104
+ [], dtype=tensor_meta.dtype
105
+ ).element_size()
106
+ else:
107
+ size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
108
+ total_size = size_per_elem_bytes * total_num_of_elems
109
+ output_size = size_per_elem_bytes * output_elem
110
+ return size_bytes(output_size, total_size)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ from . import pass_manager
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (223 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc ADDED
Binary file (7.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc ADDED
Binary file (3.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc ADDED
Binary file (9.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Set, Iterable, Sequence, Optional, Deque
2
+
3
+ from torch.fx.passes.utils.fuser_utils import fuse_by_partitions
4
+
5
+ from torch.fx.graph_module import GraphModule
6
+ from torch.fx.node import Node, _get_qualified_name
7
+ from torch.fx.passes.operator_support import OperatorSupportBase
8
+
9
+ import logging
10
+ import itertools
11
+ from copy import copy
12
+ from collections import deque
13
+
14
+ logger = logging.getLogger(__name__)
15
+ logger.setLevel(logging.WARNING)
16
+
17
+ class Partition:
18
+ def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None):
19
+ self.id = id
20
+ self.nodes: Set[Node] = set(nodes) if nodes is not None else set()
21
+
22
+ def __repr__(self) -> str:
23
+ return str(self.nodes)
24
+
25
+ def add_node(self, node: Node):
26
+ self.nodes.add(node)
27
+
28
+ def remove_node(self, node: Node):
29
+ self.nodes.remove(node)
30
+
31
+ def size(self):
32
+ return len(self.nodes)
33
+
34
+ class CapabilityBasedPartitioner:
35
+
36
+ def __init__(self,
37
+ graph_module: GraphModule,
38
+ operator_support: OperatorSupportBase,
39
+ allows_single_node_partition: bool = False,
40
+ non_compute_ops: Optional[Sequence[str]] = None,
41
+ allowed_single_node_partition_ops: Optional[Sequence[str]] = None,
42
+ ) -> None:
43
+ self.graph_module = graph_module
44
+ self.operator_support = operator_support
45
+ self.allows_single_node_partition = allows_single_node_partition
46
+ self.non_compute_ops = non_compute_ops if non_compute_ops is not None else []
47
+ self.allowed_single_node_partition_ops = (
48
+ allowed_single_node_partition_ops
49
+ if allowed_single_node_partition_ops is not None
50
+ else []
51
+ )
52
+
53
+ def __is_node_supported(self, node: Node) -> bool:
54
+ return (
55
+ self.operator_support.is_node_supported(dict(self.graph_module.named_modules()), node)
56
+ )
57
+
58
+ def propose_partitions(self) -> List[Partition]:
59
+ # assumptions: nodes in candidate list is sorted in topological order
60
+ assignment: Dict[Node, int] = {} # mapping from node to partition_id
61
+ partitions_by_id: Dict[int, Partition] = {} # mapping from partition_id to partition
62
+ new_partition_id = itertools.count()
63
+
64
+ # try to merge partition other_id into partition self_id
65
+ # merge only happens if the end graph doesn't contain cyclic dependency
66
+ # returns `True` when merge happens, `False` otherwise.
67
+ def maybe_merge_partition(self_id: int, other_id: int):
68
+ # merged_nodes is the union of nodes in two partition to-be-merged
69
+ merged_nodes = copy(partitions_by_id[self_id].nodes)
70
+ merged_nodes.update(partitions_by_id[other_id].nodes)
71
+
72
+ # Note it's ok to use `set` here, since we are only query if a node
73
+ # has been visited. We are NEVER going to iterate on nodes inside
74
+ # the set.
75
+ visited: Set[Node] = set()
76
+
77
+ def dfs_iter_find_cycle(root_node):
78
+ stack : Deque[Node] = deque()
79
+ stack.append(root_node)
80
+
81
+ while stack:
82
+ node = stack.pop()
83
+
84
+ if node in visited:
85
+ continue
86
+ if node in merged_nodes:
87
+ return True # found cycle, return
88
+
89
+ # branching on hitting partition or not
90
+ if node in assignment:
91
+ # Since partition is not merged in the graph yet, when we
92
+ # hit a node in a partition through DFS, we need to
93
+ # traverse all nodes in the partition to properly reflect
94
+ # dependencies after the fusion
95
+ for p_node in partitions_by_id[assignment[node]].nodes:
96
+ for user_node in p_node.users:
97
+ if user_node not in partitions_by_id[assignment[node]].nodes:
98
+ stack.append(user_node)
99
+ else:
100
+ for user_node in node.users:
101
+ stack.append(user_node)
102
+
103
+ visited.add(node)
104
+
105
+ return False
106
+
107
+ # check if merge would create cyclic dependency.
108
+ for node in merged_nodes:
109
+ for user_node in node.users:
110
+ if user_node not in merged_nodes and dfs_iter_find_cycle(user_node):
111
+ # return false indicating cyclic dependency found and
112
+ # merge is aborted
113
+ return False
114
+
115
+ # no cyclic dependency found, move forward with the merge
116
+ # updating partition nodes
117
+ partitions_by_id[self_id].nodes = merged_nodes
118
+ # updating assignment map
119
+ for node in partitions_by_id[other_id].nodes:
120
+ assignment[node] = self_id
121
+ # delete other partition
122
+ del partitions_by_id[other_id]
123
+
124
+ return True
125
+
126
+ def merge_single_node(node: Node, id: Optional[int]):
127
+ if node in assignment:
128
+ partitions_by_id[assignment[node]].remove_node(node)
129
+
130
+ if id is None:
131
+ assignment.pop(node)
132
+ elif id not in partitions_by_id:
133
+ assignment[node] = id
134
+ partitions_by_id[id] = Partition(id=id, nodes=[node])
135
+ else:
136
+ assignment[node] = id
137
+ partitions_by_id[id].add_node(node)
138
+
139
+ logger.debug("Proposing partitions...")
140
+
141
+ for node in reversed(self.graph_module.graph.nodes):
142
+ # use Dict as an ordered set to ensure deterministic partitioning result, don't care value
143
+ merge_candidates: Dict[int, None] = {}
144
+
145
+ # Note a limited horizontal fusion is enabled:
146
+ # when `node` is not supported, the code below attempts to fuse consumer of `node`.
147
+ #
148
+ # I don't see a need to add a knob to disable horizontal fusion yet, we can short-cut
149
+ # the fusion by adding an `else` block here to skip horizontal fusion.
150
+ if self.__is_node_supported(node) and node not in assignment:
151
+ partition_id = next(new_partition_id)
152
+ merge_single_node(node, partition_id)
153
+ merge_candidates[partition_id] = None
154
+
155
+ # merge all possible partitions
156
+ for node in assignment:
157
+ merge_candidates[assignment[node]] = None
158
+
159
+ merge_candidates_list = list(merge_candidates.keys())
160
+ if len(merge_candidates_list) > 1:
161
+ self_id = merge_candidates_list[0]
162
+ for other_id in merge_candidates_list[1:]:
163
+ # note: merge partition `other_id` into partition `self_id` if
164
+ # it doesn't create cyclic dependency in the graph, otherwise,
165
+ # this is a no-op
166
+ maybe_merge_partition(self_id, other_id)
167
+
168
+ # post processing to re-assign "getitem" nodes into upstream partition
169
+ logger.debug("Reassigning getitem nodes to its producer node's partition...")
170
+ nodes_reassignment: Dict[Node, int] = {}
171
+ for node in self.graph_module.graph.nodes:
172
+ is_tuple_output = True
173
+ for user in node.users:
174
+ if user.op != "call_function" or \
175
+ _get_qualified_name(user.target) != "_operator.getitem": # type: ignore[arg-type]
176
+ is_tuple_output = False
177
+ break
178
+
179
+ # node has tuple outputs, re-assign all following getitem node into node's partition
180
+ if is_tuple_output:
181
+ id = assignment.get(node, None) # type: ignore[arg-type]
182
+ for user in node.users:
183
+ if assignment.get(user, None) != id: # type: ignore[arg-type]
184
+ nodes_reassignment[user] = id # type: ignore[assignment]
185
+ for node, id in nodes_reassignment.items():
186
+ merge_single_node(node, id)
187
+
188
+ # filter out single node partitions
189
+ if not self.allows_single_node_partition:
190
+ logger.debug("Filtering out single node partitions...")
191
+ default_non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"}
192
+ non_compute_ops = default_non_compute_ops.union(set(self.non_compute_ops))
193
+ partitions_to_remove: List[int] = []
194
+ for id, partition in partitions_by_id.items():
195
+ compute_node_count = 0
196
+ for node in partition.nodes:
197
+ if node.op == "call_function":
198
+ assert callable(node.target)
199
+ if _get_qualified_name(node.target) not in non_compute_ops:
200
+ compute_node_count += 1
201
+ if _get_qualified_name(node.target) in self.allowed_single_node_partition_ops:
202
+ compute_node_count += 1
203
+ if compute_node_count <= 1:
204
+ partitions_to_remove.append(id)
205
+ for id in partitions_to_remove:
206
+ del partitions_by_id[id]
207
+
208
+ logger.debug("Partitions proposed:")
209
+ for id, partition in partitions_by_id.items():
210
+ logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
211
+
212
+ return list(partitions_by_id.values())
213
+
214
+ def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
215
+ logger.debug("Fusing partitions...")
216
+ # fuse_by_partitions expects partitions in List[List[Node]]: [ [node0, node1], [node2, node3] ]
217
+ return fuse_by_partitions(self.graph_module, [list(partition.nodes) for partition in partitions])
218
+
219
+ # remove non-compute-ops that sits at the boundary of a partition.
220
+ def remove_bookend_non_compute_ops(self, partitions: List[Partition]):
221
+ non_compute_ops = set(self.non_compute_ops)
222
+
223
+ def is_non_compute_node(node: Node):
224
+ return node.op == "call_function" and \
225
+ _get_qualified_name(node.target) in non_compute_ops # type: ignore[arg-type]
226
+
227
+ # cache transparent nodes
228
+ transparent_input_nodes: Dict[Node, bool] = {}
229
+ transparent_output_nodes: Dict[Node, bool] = {}
230
+
231
+ def is_transparent_input_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
232
+ if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
233
+ return True
234
+ if node in transparent_input_nodes:
235
+ return transparent_input_nodes[node]
236
+ if is_non_compute_node(node):
237
+ for input_n in node.all_input_nodes:
238
+ if not is_transparent_input_node(input_n, partition, removed_nodes):
239
+ transparent_input_nodes[node] = False
240
+ return False
241
+ transparent_input_nodes[node] = True
242
+ return True
243
+ transparent_input_nodes[node] = False
244
+ return False
245
+
246
+ def is_transparent_output_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
247
+ if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
248
+ return True
249
+ if node in transparent_output_nodes:
250
+ return transparent_output_nodes[node]
251
+ if is_non_compute_node(node):
252
+ for output_n in node.users:
253
+ if not is_transparent_output_node(output_n, partition, removed_nodes):
254
+ transparent_output_nodes[node] = False
255
+ return False
256
+ transparent_output_nodes[node] = True
257
+ return True
258
+ transparent_output_nodes[node] = False
259
+ return False
260
+
261
+ for partition in partitions:
262
+ # Note it's ok to use `set` here, since we are only query if a node
263
+ # has been removed. We are NEVER going to iterate on nodes inside
264
+ # the set.
265
+ remove_node: Set[Node] = set()
266
+ for node in partition.nodes:
267
+ if is_non_compute_node(node) and \
268
+ (is_transparent_input_node(node, partition.nodes, remove_node) or
269
+ is_transparent_output_node(node, partition.nodes, remove_node)):
270
+ remove_node.add(node)
271
+
272
+ if len(remove_node) != 0:
273
+ partition.nodes = partition.nodes - remove_node
274
+
275
+ def partition_and_fuse(self) -> GraphModule:
276
+ partitions = self.propose_partitions()
277
+ fused_gm = self.fuse_partitions(partitions)
278
+ return fused_gm
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from collections import namedtuple
3
+ from typing import Optional
4
+
5
+ from torch.fx.graph_module import GraphModule
6
+ from torch.fx._compatibility import compatibility
7
+
8
+
9
+ __all__ = ['PassResult', 'PassBase']
10
+
11
+ @compatibility(is_backward_compatible=False)
12
+ class PassResult(namedtuple("PassResult", ["graph_module", "modified"])):
13
+ """
14
+ Result of a pass:
15
+ graph_module: The modified graph module
16
+ modified: A flag for if the pass has modified the graph module
17
+ """
18
+ def __new__(cls, graph_module, modified):
19
+ return super().__new__(cls, graph_module, modified)
20
+
21
+ @compatibility(is_backward_compatible=False)
22
+ class PassBase(abc.ABC):
23
+ """
24
+ Base interface for implementing passes.
25
+
26
+ It is required to implement the `call` function so that we can directly
27
+ pass instances of the Pass directly to the PassManager and call them as a
28
+ function.
29
+
30
+ We can directly pass an instance of a class implementing this interface into
31
+ the PassManager's `passes` attribute.
32
+ """
33
+
34
+ def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:
35
+ """
36
+ Runs the precondition check, the pass itself, and the postcondition check.
37
+ """
38
+
39
+ self.requires(graph_module)
40
+ res = self.call(graph_module)
41
+ self.ensures(graph_module)
42
+ return res
43
+
44
+ @abc.abstractmethod
45
+ def call(self, graph_module: GraphModule) -> Optional[PassResult]:
46
+ """
47
+ The pass that is run through the given graph module. To implement a
48
+ pass, it is required to implement this function.
49
+
50
+ Args:
51
+ graph_module: The graph module we will run a pass on
52
+ """
53
+ pass
54
+
55
+ def requires(self, graph_module: GraphModule) -> None: # noqa: B027
56
+ """
57
+ This function will be called before the pass is run and will check that
58
+ the given graph module contains the preconditions needed to run the
59
+ pass. It is not required to implement this function.
60
+
61
+ Args:
62
+ graph_module: The graph module we will run checks on
63
+ """
64
+ pass
65
+
66
+ def ensures(self, graph_module: GraphModule) -> None: # noqa: B027
67
+ """
68
+ This function will be called after the pass is run and will check that
69
+ the given graph module contains the postconditions needed to run the
70
+ pass. It is not required to implement this function.
71
+
72
+ Args:
73
+ graph_module: The graph module we will run checks on
74
+ """
75
+ pass
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import logging
3
+ from queue import Queue
4
+ from functools import wraps
5
+ from typing import Callable, Dict, List
6
+
7
+ import torch.nn as nn
8
+ from torch.fx.graph_module import GraphModule
9
+ from torch.fx._compatibility import compatibility
10
+ from torch.fx.passes.infra.pass_base import PassResult
11
+
12
+ logger = logging.getLogger(__name__)
13
+ logger.setLevel(logging.WARNING)
14
+
15
+ __all__ = ['pass_result_wrapper', 'this_before_that_pass_constraint', 'PassManager']
16
+
17
+ @compatibility(is_backward_compatible=False)
18
+ def pass_result_wrapper(fn: Callable) -> Callable:
19
+ """
20
+ Wrapper for passes which currently do not return a PassResult.
21
+ This wrapper makes them return a PassResult containing the modified object
22
+ and True for the "modified" flag.
23
+
24
+ Args:
25
+ fn (Callable[Module, Any])
26
+
27
+ Returns:
28
+ wrapped_fn (Callable[Module, PassResult])
29
+ """
30
+ if fn is None:
31
+ return None
32
+
33
+ @wraps(fn)
34
+ def wrapped_fn(gm):
35
+ res = fn(gm)
36
+ if res is None:
37
+ return PassResult(gm, True)
38
+ if isinstance(res, PassResult):
39
+ return res
40
+ elif isinstance(res, nn.Module):
41
+ return PassResult(res, True)
42
+
43
+ if not inspect.isfunction(fn):
44
+ wrapped_fn.__name__ = type(fn).__name__
45
+
46
+ return wrapped_fn
47
+
48
+ def _validate_pass_schedule_constraint(
49
+ constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
50
+ ) -> None:
51
+ for i, a in enumerate(passes):
52
+ for j, b in enumerate(passes[i + 1 :]):
53
+ if constraint(a, b):
54
+ continue
55
+ raise RuntimeError(
56
+ f"pass schedule constraint violated. Expected {a} before {b}"
57
+ f" but found {a} at index {i} and {b} at index{j} in pass"
58
+ f" list."
59
+ )
60
+
61
+ def _topological_sort_passes(
62
+ passes: List[Callable], constraints: List[Callable]
63
+ ) -> List[Callable]:
64
+ """
65
+ Args
66
+ passes: Passes that we are ordering
67
+ constraints: Constraints applied on these passes
68
+
69
+ Returns
70
+ A sorted list of callables and a boolean of if a circular dependency
71
+ existed
72
+ """
73
+ if len(constraints) == 0:
74
+ return passes
75
+
76
+ # Contruct a graph mapping nodes to a list of their users
77
+ graph: Dict[Callable, List[Callable]] = {p : [] for p in passes}
78
+ indegree_map: Dict[Callable, int] = {p : 0 for p in passes}
79
+ candidates: Queue = Queue()
80
+ for a in passes:
81
+ for b in passes:
82
+ if a == b:
83
+ continue
84
+
85
+ for constraint in constraints:
86
+ if not constraint(a, b):
87
+ graph[b].append(a)
88
+ indegree_map[a] += 1
89
+
90
+ if indegree_map[a] == 0:
91
+ candidates.put(a)
92
+
93
+ visited: Dict[Callable, bool] = {p : False for p in passes}
94
+ sorted_passes: List[Callable] = []
95
+
96
+ while not candidates.empty():
97
+ p = candidates.get()
98
+ sorted_passes.append(p)
99
+ visited[p] = True
100
+
101
+ for n in graph[p]:
102
+ if not visited[n]:
103
+ indegree_map[n] -= 1
104
+ if indegree_map[n] == 0:
105
+ candidates.put(n)
106
+
107
+ # Check if there are unvisited nodes (aka cycles in the graph)
108
+ cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys()))
109
+ if len(cycle_passes) != 0:
110
+ error = f"Circular dependency detected within the following passes: {cycle_passes}"
111
+ raise RuntimeError(error)
112
+
113
+ return sorted_passes
114
+
115
+ @compatibility(is_backward_compatible=False)
116
+ def this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable:
117
+ """
118
+ Defines a partial order ('depends on' function) where `this` must occur
119
+ before `that`.
120
+
121
+ For example, the following pass list and constraint list would be invalid.
122
+ ```
123
+ passes = [pass_b, pass_a]
124
+
125
+ constraints = [
126
+ this_before_that_pass_constraint(pass_a, pass_b)
127
+ ]
128
+ ```
129
+
130
+ Args:
131
+ this (Callable): pass which should occur first
132
+ that (Callable): pass which should occur later
133
+
134
+ Returns:
135
+ depends_on (Callable[[Object, Object], bool]
136
+ """
137
+
138
+ def depends_on(a: Callable, b: Callable):
139
+ if a == that and b == this:
140
+ return False
141
+ return True
142
+
143
+ return depends_on
144
+
145
+
146
+ @compatibility(is_backward_compatible=False)
147
+ class PassManager:
148
+ """
149
+ Construct a PassManager.
150
+
151
+ Collects passes and constraints. This defines the pass schedule, manages
152
+ pass constraints and pass execution.
153
+
154
+ Args:
155
+ passes (Optional[List[Callable]]): List of passes. A pass is a
156
+ callable which modifies an object and returns a PassResult
157
+ constraint (Optional[List[Callable]]): List of constraints. A
158
+ constraint is a callable which takes two passes (A, B) and returns
159
+ True if A depends on B and False otherwise. See implementation of
160
+ `this_before_that_pass_constraint` for example.
161
+ steps (int): Max number of times we run the passes (default = 1).
162
+ run_checks_after_each_pass (bool): Whether to run checks and linting
163
+ after each pass
164
+ suppress_check_failures (bool): Whether to raise errors when running
165
+ checks
166
+ """
167
+
168
+ passes: List[Callable[[nn.Module], PassResult]]
169
+ constraints: List[Callable[[Callable, Callable], bool]]
170
+ _validated: bool = False
171
+ steps: int = 1
172
+
173
+ def __init__(
174
+ self,
175
+ passes=None,
176
+ constraints=None,
177
+ steps=None,
178
+ run_checks_after_each_pass: bool = False,
179
+ suppress_check_failures: bool = False,
180
+ ):
181
+ self.passes = passes or []
182
+ self.constraints = constraints or []
183
+ if steps:
184
+ self.steps = steps
185
+
186
+ self.run_checks_after_each_pass = run_checks_after_each_pass
187
+ self.suppress_check_failures = suppress_check_failures
188
+
189
+ def add_pass(self, _pass: Callable):
190
+ """
191
+ Adds a pass into the current list of passes.
192
+ """
193
+ self.passes.append(_pass)
194
+ self._validated = False
195
+
196
+ def add_constraint(self, constraint: Callable):
197
+ """
198
+ Adds a constraint into the current list of constraints.
199
+ """
200
+ self.constraints.append(constraint)
201
+ self._validated = False
202
+
203
+ def validate_constraints(self):
204
+ """
205
+ Validates that current pass schedule defined by `self.passes` is valid
206
+ according to all constraints in `self.constraints`
207
+ """
208
+ if self._validated:
209
+ return
210
+ for constraint in self.constraints:
211
+ _validate_pass_schedule_constraint(constraint, self.passes)
212
+ self._validated = True
213
+
214
+ def solve_constraints(self):
215
+ """
216
+ Finds a valid traversal order based on the given constraints and orders
217
+ the passes based on this order.
218
+
219
+ If a circular dependency exists between the constraints and steps = 1,
220
+ then we will raise an error because if steps != 1 this means that we
221
+ will re-run the passes, allowing for circular dependencies.
222
+ """
223
+ self.passes = _topological_sort_passes(self.passes, self.constraints)
224
+ self._validated = True
225
+
226
+ def add_checks(self, check: Callable) -> None:
227
+ """
228
+ Adds a function which takes runs various checks on a given graph module.
229
+ This function is run before and after each pass if the
230
+ `run_checks_after_each_pass` flag is enabled.
231
+ """
232
+ sig = inspect.signature(check)
233
+
234
+ if len(list(sig.parameters.values())) != 1:
235
+ raise TypeError("PassManager check function should only take in one variable, a module")
236
+
237
+ setattr(self, "check", check) # noqa: B010
238
+
239
+ def check(self, module: nn.Module) -> None:
240
+ pass
241
+
242
+ def __call__(self, module: nn.Module) -> PassResult:
243
+ """
244
+ Runs a list of passes in the order based on `self.passes` on the given
245
+ graph module. Each time a pass is run, checks and linting will be run on
246
+ the graph module if `run_checks_after_each_pass` is set.
247
+
248
+ If the module is a graph module, we will run the list of passes until
249
+ the graph stops changing, or until `steps` number of times.
250
+ """
251
+ # Order the passes based on the constraints
252
+ if not self._validated:
253
+ self.solve_constraints()
254
+
255
+ # Check graph invariants
256
+ self.check(module)
257
+
258
+ # Run the set of passes `steps` number of times or until the graph stops
259
+ # changing
260
+ overall_modified = False
261
+ for _ in range(self.steps):
262
+ modified = False
263
+
264
+ # Run the set of passes on the graph module
265
+ for i, fn in enumerate(self.passes):
266
+ fn_name = fn.__name__ if inspect.isfunction(fn) else type(fn).__name__
267
+ logger.debug("Running pass '%s'", fn_name)
268
+
269
+ try:
270
+ res = fn(module)
271
+
272
+ if not isinstance(res, PassResult) and not hasattr(
273
+ res, "graph_module"
274
+ ):
275
+ raise TypeError(
276
+ f"The result of the pass {fn_name} should be type PassResult."
277
+ + "Please wrap it with pass_result_wrapper()"
278
+ )
279
+ module = res.graph_module
280
+ modified = modified or res.modified
281
+
282
+ if isinstance(module, GraphModule):
283
+ logger.debug("Graph after pass '%s': %s", fn_name, module.graph)
284
+ module.recompile()
285
+
286
+ # Check graph invariants
287
+ if self.run_checks_after_each_pass:
288
+ self.check(module)
289
+
290
+ except Exception as e:
291
+ prev_pass_names = [
292
+ p.__name__ if inspect.isfunction(p) else type(p).__name__
293
+ for p in self.passes[:i]
294
+ ]
295
+ msg = f"An error occurred when running the '{fn_name}' pass after the following passes: {prev_pass_names}"
296
+ raise Exception(msg) from e
297
+
298
+ # If the graph no longer changes, then we can stop running these passes
299
+ overall_modified = overall_modified or modified
300
+ if not modified:
301
+ break
302
+
303
+ return PassResult(module, overall_modified)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dataclasses import dataclass
3
+ from typing import Any, Callable, Dict, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.fx
7
+ from torch.fx._compatibility import compatibility
8
+ from torch.fx.node import map_arg
9
+
10
+ from .shape_prop import ShapeProp
11
+ from .split_utils import split_by_tags
12
+ from .tools_common import (
13
+ CALLABLE_NODE_OPS,
14
+ FxNetAccFusionsFinder,
15
+ Names,
16
+ NodeList,
17
+ NodeSet,
18
+ TensorOrTensors,
19
+ Tensors,
20
+ )
21
+
22
+ __all__ = [
23
+ "FxNetMinimizerBadModuleError",
24
+ "FxNetMinimizerRunFuncError",
25
+ "FxNetMinimizerResultMismatchError",
26
+ ]
27
+
28
+ _LOGGER = logging.getLogger(__name__)
29
+
30
+
31
+ @compatibility(is_backward_compatible=False)
32
+ class FxNetMinimizerBadModuleError(Exception):
33
+ """
34
+ Raised if failed to split out a minimize module
35
+ """
36
+
37
+ pass
38
+
39
+
40
+ @compatibility(is_backward_compatible=False)
41
+ class FxNetMinimizerRunFuncError(Exception):
42
+ """
43
+ Raised if error occurs during run_a or run_b functions
44
+ """
45
+
46
+ pass
47
+
48
+
49
+ @compatibility(is_backward_compatible=False)
50
+ class FxNetMinimizerResultMismatchError(Exception):
51
+ """
52
+ Raised if comparing function thinks the results are mismatching.
53
+ """
54
+
55
+ pass
56
+
57
+
58
+ @dataclass
59
+ class _MinimizerSettingBase:
60
+ """
61
+ Args:
62
+ `accumulate_error`: Instead of using a's input for both converted module to verify
63
+ , use the previous outputs of each converted module as input to accumulate the
64
+ errors.
65
+
66
+ `traverse_method`: "sequential" or "binary" or "accumulate"
67
+ Determine the way of traverse the nodes in FX module.
68
+
69
+ `find_all`: Minimizer will go through the entire model and return all problematic nodes.
70
+
71
+ `return_intermediate`: If true, when using `run_nodes()` function to run the
72
+ model, intermediate results of all the ops will be returned as output.
73
+ """
74
+
75
+ accumulate_error: bool = False
76
+ traverse_method: str = "sequential"
77
+ find_all: bool = False
78
+ return_intermediate: bool = False
79
+
80
+ def __str__(self):
81
+ settings_str = "FX Minimizer Settings:\n"
82
+
83
+ for k, v in vars(self).items():
84
+ settings_str += f"\t{k}: {v}\n"
85
+
86
+ return settings_str
87
+
88
+
89
+ class _MinimizerBase:
90
+ """
91
+ This class is used to automatically find problematic nodes in a model. It takes a FX
92
+ graphmodule and generate some submodules while traverse the graph. Then two functions
93
+ `run_a` and `run_b` will be used to run the same submodule and a function `compare_fn`
94
+ will be used to compare the results.
95
+
96
+ Currently we provides two ways to traverse the graph and generate submodules.
97
+ 1. Sequential traversal: this will traverse the graph node by node and generate
98
+ one submodule with one sigle node.
99
+ 2. Binary searching: this will do a binary search style traversal on the graph.
100
+
101
+ For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP.
102
+ """
103
+
104
+ def __init__(
105
+ self,
106
+ module: torch.fx.GraphModule,
107
+ sample_input: Tensors,
108
+ compare_fn: Callable[
109
+ [TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool]
110
+ ],
111
+ settings: _MinimizerSettingBase,
112
+ ):
113
+ assert isinstance(module, torch.fx.GraphModule)
114
+
115
+ self.module = module
116
+ self.sample_input = sample_input
117
+ self.compare_fn = compare_fn
118
+ self.settings = settings
119
+
120
+ # Stores outputs of run_a function
121
+ self.a_outputs: Dict[str, Any] = {}
122
+
123
+ # Stores outputs of run_b function
124
+ self.b_outputs: Dict[str, Any] = {}
125
+
126
+ # Stores the results of compare_fn
127
+ self.results: Dict[Any, Any] = {}
128
+
129
+ # Stores the report for the runs
130
+ self.reports: List[List[str]] = []
131
+
132
+ # Current iteration
133
+ self.iteration: int = 0
134
+
135
+ callable_nodes = {
136
+ node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS
137
+ }
138
+ ShapeProp(self.module).propagate(*self.sample_input)
139
+ self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)()
140
+
141
+ # Check if number of input in sample_input matches the number of placeholders
142
+ placeholders = [
143
+ node.name for node in self.module.graph.nodes if node.op == "placeholder"
144
+ ]
145
+ assert len(placeholders) == len(self.sample_input)
146
+
147
+ # Store sample_input
148
+ for i, name in enumerate(placeholders):
149
+ self.a_outputs[name] = sample_input[i]
150
+ self.b_outputs[name] = sample_input[i]
151
+
152
+ def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
153
+ """
154
+ Run `mod` with `inputs` and generate output. The output will be compared with
155
+ output of run_b().
156
+ """
157
+ raise RuntimeError("run_a() is not implemented.")
158
+
159
+ def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
160
+ """
161
+ Run `mod` with `inputs` and generate output. The output will be compared with
162
+ output of run_a().
163
+ """
164
+ raise RuntimeError("run_b() is not implemented.")
165
+
166
+ def _store_outputs(
167
+ self,
168
+ a_result: TensorOrTensors,
169
+ b_result: TensorOrTensors,
170
+ submodule: torch.fx.GraphModule,
171
+ ):
172
+ """
173
+ Store the outputs of self.run_a() and self.run_b() into self.a_outputs and
174
+ self.b_outputs, so that we can use them when execute preceding nodes that
175
+ use those outputs as inputs.
176
+
177
+ Args:
178
+ a_result: Output of self.run_a(). Could be a tensor or tensors.
179
+ b_result: Output of self.run_b(). Could be a tensor or tensors.
180
+ submodule: The module that generates a_result and b_result.
181
+ """
182
+ output_node = next(
183
+ node for node in submodule.graph.nodes if node.op == "output"
184
+ )
185
+
186
+ # Only one output
187
+ if isinstance(output_node.args[0], torch.fx.Node):
188
+ self.a_outputs[output_node.args[0].name] = a_result
189
+ self.b_outputs[output_node.args[0].name] = b_result
190
+ # Multiple outputs
191
+ else:
192
+ for i, arg in enumerate(output_node.args[0]):
193
+ self.a_outputs[arg.name] = a_result[i]
194
+ self.b_outputs[arg.name] = b_result[i]
195
+
196
+ def _get_submod_inputs(
197
+ self, main_module: torch.fx.GraphModule, submod_path: str
198
+ ) -> Tuple[Tensors, Tensors]:
199
+ """
200
+ Try get submodule inputs from stored outputs. If not found then use
201
+ torch_glow.get_submod_inputs to get the inputs.
202
+
203
+ If accumulate_error is False, use a_input for run_a() and run_b()
204
+ otherwise use a_input for run_a and b_input for run_b.
205
+
206
+ Args:
207
+ main_module: Top-levlel fx module.
208
+ submod_path: Path to the submodule we want to run and compare results.
209
+
210
+ Returns:
211
+ a_input: List of tensor(s) that will be used by run_a() as submodule inputs.
212
+ b_input: List of tensor(s) that will be used by run_b() as submodule inputs.
213
+ """
214
+ a_input = []
215
+ b_input = []
216
+ submodule = getattr(main_module, submod_path)
217
+ placeholders = [
218
+ node.name for node in submodule.graph.nodes if node.op == "placeholder"
219
+ ]
220
+
221
+ # If all placeholder can be found in stored outputs, use stored
222
+ # outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs`
223
+ # to get the inputs.
224
+ if set(placeholders) <= self.a_outputs.keys():
225
+ for name in placeholders:
226
+ a_input.append(self.a_outputs[name])
227
+ b_input.append(self.b_outputs[name])
228
+ else:
229
+ if self.settings.accumulate_error:
230
+ print(f"Can't find previous stored outputs named {placeholders}!")
231
+
232
+ def get_inputs(self: torch.nn.Module, inputs: Any):
233
+ nonlocal a_input
234
+ a_input = inputs
235
+
236
+ # Use forward hook to get the inputs to the submodule
237
+ handle = submodule.register_forward_pre_hook(get_inputs)
238
+ main_module(*self.sample_input)
239
+ handle.remove()
240
+
241
+ b_input = a_input
242
+
243
+ if not self.settings.accumulate_error:
244
+ return a_input, a_input
245
+
246
+ return a_input, b_input
247
+
248
+ def _tag_nodes(self, selected_nodes: NodeSet):
249
+ """
250
+ Tag selected nodes with tag "minimize". Nodes with the same tags will
251
+ be split to the same submodule afterwards.
252
+
253
+ Args:
254
+ selected_nodes: Nodes that we want to minimize. We will tag those nodes
255
+ with "minimize", all preceding nodes with "main_0" and all following
256
+ nodes with "main_1".
257
+ """
258
+ for node in self.module.graph.nodes:
259
+ if node.op not in CALLABLE_NODE_OPS:
260
+ continue
261
+
262
+ if node in selected_nodes:
263
+ node.tag = "minimize"
264
+ elif any(
265
+ n.tag in {"minimize", "main_1"}
266
+ for n in node.all_input_nodes
267
+ if n.op in CALLABLE_NODE_OPS
268
+ ):
269
+ node.tag = "main_1"
270
+ else:
271
+ node.tag = "main_0"
272
+
273
+ def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]:
274
+ """
275
+ Split self.module so that one submodule consists of `nodes` and only `nodes`.
276
+
277
+ Args:
278
+ nodes: Nodes that we want to include in the minimize submodule.
279
+
280
+ Returns:
281
+ split_module (torch.fx.GraphModule): the module after split.
282
+ submodule_name (str): the name of the submodule that consists of `nodes`.
283
+ """
284
+ # Color provided nodes
285
+ self._tag_nodes(nodes)
286
+
287
+ # Split module based on coloring
288
+ split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"])
289
+
290
+ # Find submodule containing colored nodes
291
+ submodule_name: str = ""
292
+ for child_name, _ in split_module.named_children():
293
+ # Skip submodules we're not interested in at the moment
294
+ if "minimize" not in child_name:
295
+ continue
296
+
297
+ if submodule_name == "":
298
+ submodule_name = child_name
299
+ else:
300
+ raise FxNetMinimizerBadModuleError(
301
+ f"Expected only one minimize submodule with nodes {nodes}"
302
+ )
303
+
304
+ if submodule_name == "":
305
+ raise FxNetMinimizerBadModuleError(
306
+ f"Minimize submodule was not found with nodes {nodes}"
307
+ )
308
+
309
+ return split_module, submodule_name
310
+
311
+ def _run_and_compare(
312
+ self, split_module: torch.fx.GraphModule, submod_name: str, output_names: Names
313
+ ):
314
+ """
315
+ Run the submodule in `split_module` that has name `submod_name`
316
+ using `self.run_a` and `self.run_b` and compare their results.
317
+
318
+ Args:
319
+ split_module: Main module that contains the minimize submodule.
320
+ submod_name: Name of the minimize submodule.
321
+ output_names: Names of the node we want to output. If None, we
322
+ will use the original output.
323
+ """
324
+ submodule = getattr(split_module, submod_name)
325
+ a_input, b_input = self._get_submod_inputs(split_module, submod_name)
326
+
327
+ if len(self.reports) == 0:
328
+ self.reports.append([])
329
+ self.iteration = 1
330
+
331
+ report = self.reports[self.iteration - 1]
332
+ report.append("Run and compare ...")
333
+
334
+ if output_names:
335
+ output_nodes: NodeList = []
336
+ for node in submodule.graph.nodes:
337
+ if node.op == "output":
338
+ submodule.graph.erase_node(node)
339
+
340
+ if node.name in output_names:
341
+ output_nodes.append(node)
342
+
343
+ submodule.graph.output(
344
+ output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes)
345
+ )
346
+ submodule.graph.lint()
347
+ submodule.recompile()
348
+
349
+ # Use name of args in output node as key to store comparison result
350
+ for node in submodule.graph.nodes:
351
+ if node.op == "output":
352
+ result_key = map_arg(node.args, lambda x: x.name)
353
+
354
+ a_result = self.run_a(submodule, a_input)
355
+ b_result = self.run_b(submodule, b_input)
356
+ self._store_outputs(a_result, b_result, submodule)
357
+
358
+ # Compare results
359
+ names: Names = output_names
360
+ if output_names is None:
361
+ names = [str(v) for v in result_key]
362
+
363
+ numeric_result, bool_result = self.compare_fn(a_result, b_result, names)
364
+
365
+ self.results[result_key] = numeric_result
366
+ report.append(f"Numerical accuracy = {numeric_result}")
367
+ if not bool_result:
368
+ report.append(f"Result mismatch for {result_key}")
369
+ raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
370
+
371
+ def _binary_search_impl(
372
+ self, all_nodes: NodeList, start_idx: int, end_idx: int
373
+ ) -> NodeSet:
374
+ """
375
+ Recursive binary search implementation.
376
+ """
377
+ nodes: NodeList = all_nodes[start_idx:end_idx]
378
+
379
+ report: List[str] = []
380
+ self.reports.append(report)
381
+ self.iteration += 1
382
+ report.append(f"Binary search iteration {self.iteration}.")
383
+ report.append(
384
+ f"From node index {start_idx} to {end_idx-1}. "
385
+ f"Size of the interested node list is {len(nodes)}"
386
+ )
387
+
388
+ cur_nodes: NodeSet = set(nodes)
389
+
390
+ for node in nodes:
391
+ if node in self.fusions:
392
+ cur_nodes.update(self.fusions[node])
393
+
394
+ try:
395
+ split_module, submod_name = self._build_submodule(cur_nodes)
396
+ self._run_and_compare(split_module, submod_name, [])
397
+ except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
398
+
399
+ if len(nodes) == 1:
400
+ report.append(
401
+ f"This is the last node in the sub-module. "
402
+ f"Search in the current branch is successful with culprit = {cur_nodes}."
403
+ )
404
+ self.print_report(report)
405
+ return cur_nodes
406
+
407
+ report.append(
408
+ "Proceed to split and lower the halves of the current "
409
+ "sub-module individually."
410
+ )
411
+ self.print_report(report)
412
+
413
+ mid = len(nodes) // 2
414
+ culprits = self._binary_search_impl(all_nodes, start_idx, start_idx + mid)
415
+
416
+ if len(culprits) != 0 and not self.settings.find_all:
417
+ return culprits
418
+
419
+ culprits = self._binary_search_impl(all_nodes, start_idx + mid, end_idx)
420
+
421
+ if len(culprits) == 0:
422
+ report.append(
423
+ f"Further split and lowering found no errors. "
424
+ f"Unable to minimize the submodule with list of nodes: {nodes}"
425
+ )
426
+ self.print_report(report)
427
+
428
+ return culprits
429
+ else:
430
+ report.append("No discrepancy found.")
431
+ self.print_report(report)
432
+ return set()
433
+
434
+ def _binary_traverse(self, nodes: NodeList) -> NodeSet:
435
+ """
436
+ Binary search on `nodes` for culprit.
437
+ """
438
+ return self._binary_search_impl(nodes, 0, len(nodes))
439
+
440
+ def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
441
+ """
442
+ Traverse `nodes` one by one and determine if any of them is a culprit.
443
+ """
444
+ culprits: NodeSet = set()
445
+
446
+ for node in nodes:
447
+ report: List[str] = []
448
+ self.reports.append(report)
449
+ self.iteration += 1
450
+ report.append(f"Sequential traverse iteration {self.iteration}.")
451
+ report.append(f"Visit node: {node.name}")
452
+
453
+ _LOGGER.info("Visit node: %s", node.name)
454
+ cur_nodes: NodeSet = {node}
455
+
456
+ if node in self.fusions:
457
+ cur_nodes = self.fusions[node]
458
+
459
+ try:
460
+ split_module, submod_name = self._build_submodule(cur_nodes)
461
+ self._run_and_compare(split_module, submod_name, [node.name])
462
+ self.print_report(report)
463
+ except (FxNetMinimizerResultMismatchError):
464
+ culprits.add(node)
465
+ report.append(f"Found culprit from numeric error: {node}")
466
+ self.print_report(report)
467
+ if not self.settings.find_all:
468
+ return culprits
469
+ except (FxNetMinimizerRunFuncError):
470
+ culprits.update(cur_nodes)
471
+ report.append(f"Found culprit from run error: {node}")
472
+ self.print_report(report)
473
+ if not self.settings.find_all:
474
+ return culprits
475
+
476
+ return culprits
477
+
478
+ def _accumulate_traverse(self, nodes: NodeList) -> NodeSet:
479
+ culprits: NodeSet = set()
480
+ nodes_to_run: NodeSet = set()
481
+
482
+ # find_all is not supported for accumulate traversal because all the
483
+ # ops run on NNPI. So we return after the first op that raises error.
484
+ if self.settings.find_all:
485
+ print("'Find All' mode is not supported in accumulate traversal.")
486
+ return culprits
487
+
488
+ for node in nodes:
489
+ report: List[str] = []
490
+ self.reports.append(report)
491
+ self.iteration += 1
492
+ report.append(f"Accumulate traverse iteration {self.iteration}.")
493
+
494
+ nodes_to_run.add(node)
495
+
496
+ node_name = node.name
497
+ if node_name is not None and isinstance(node_name, tuple):
498
+ node_name = node_name[0]
499
+ assert node_name is not None and isinstance(
500
+ node_name, str
501
+ ), f"minimize: node_name: {node_name}"
502
+
503
+ report.append(f"Add node: {node_name}")
504
+
505
+ try:
506
+ split_module, submod_name = self._build_submodule(nodes_to_run)
507
+ self._run_and_compare(split_module, submod_name, [node_name])
508
+ self.print_report(report)
509
+ except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
510
+ culprits.add(node)
511
+ report.append(f"Found culprit {node}")
512
+ self.print_report(report)
513
+ return culprits
514
+
515
+ return culprits
516
+
517
+ def _skip_traverse_impl(self, all_nodes: NodeList, start_idx: int, end_idx: int) -> NodeSet:
518
+ """
519
+ Skip certain nodes in graph based on settings
520
+ """
521
+ culprits: NodeSet = set()
522
+ nodes: NodeList = all_nodes[start_idx:end_idx]
523
+
524
+ report: List[str] = []
525
+ self.reports.append(report)
526
+ self.iteration += 1
527
+ report.append(f" Nodes block {self.iteration}.")
528
+ report.append(
529
+ f"From node index {start_idx} to {end_idx-1}. "
530
+ f"Size of the interested node list is {len(nodes)}"
531
+ )
532
+
533
+ cur_nodes: NodeSet = set(nodes)
534
+
535
+ for node in nodes:
536
+ if node in self.fusions:
537
+ cur_nodes.update(self.fusions[node])
538
+
539
+ try:
540
+ split_module, submod_name = self._build_submodule(cur_nodes)
541
+ self._run_and_compare(split_module, submod_name, [])
542
+ except (FxNetMinimizerResultMismatchError):
543
+ culprits.update(cur_nodes)
544
+ report.append(f"Found culprit from numeric error: {cur_nodes}")
545
+ self.print_report(report)
546
+ return culprits
547
+ except (FxNetMinimizerRunFuncError):
548
+ culprits.update(cur_nodes)
549
+ report.append(f"Found culprit from run error: {node}")
550
+ self.print_report(report)
551
+ return culprits
552
+ else:
553
+ report.append("No discrepancy found.")
554
+ self.print_report(report)
555
+ return set()
556
+
557
+
558
+ def _skip_traverse(self, all_nodes: NodeList, skip_nodes: List) -> NodeSet:
559
+ """
560
+ Skip certain nodes in graph based on settings
561
+ """
562
+ start_idx = 0
563
+ num_nodes = len(all_nodes)
564
+ idx = 0
565
+ culprits = set()
566
+ while idx < num_nodes:
567
+ node = all_nodes[idx]
568
+ if (node.name in skip_nodes): # skip the node
569
+ if idx > start_idx:
570
+ culprits = self._skip_traverse_impl(all_nodes, start_idx, idx)
571
+ start_idx = idx + 1
572
+ elif idx == num_nodes - 1 and start_idx <= idx: # last node
573
+ culprits = self._skip_traverse_impl(all_nodes, start_idx, idx + 1)
574
+ idx += 1
575
+
576
+ return culprits
577
+
578
+
579
+
580
+ def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:
581
+ """
582
+ Collect nodes in the model that between nodes with name of `start` and `end`.
583
+ These two nodes are also included.
584
+ """
585
+ nodes: NodeList = []
586
+ add_node = start is None
587
+
588
+ for node in self.module.graph.nodes:
589
+ if node.op not in CALLABLE_NODE_OPS:
590
+ continue
591
+
592
+ if node.name == start:
593
+ add_node = True
594
+
595
+ if add_node:
596
+ nodes.append(node)
597
+
598
+ if node.name == end:
599
+ break
600
+
601
+ return nodes
602
+
603
+ def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None):
604
+ """
605
+ Run part of the model from `start` node to `end` node. If `start` is None
606
+ then we start from the beginning of the model. If `end` is None then we
607
+ stop at the end of the model.
608
+
609
+ Args:
610
+ start: The name of the node which is the first node of the submodule
611
+ we want to run. If set to None, then we'll start with the first
612
+ node of the model.
613
+ end: The name of the node which is the last node of the submodule we
614
+ want to run. If set to None, we'll end with the last node of the
615
+ model.
616
+ """
617
+ nodes = self._collect_nodes(start, end)
618
+ cur_nodes = set(nodes)
619
+
620
+ for node in nodes:
621
+ if node in self.fusions:
622
+ cur_nodes.update(self.fusions[node])
623
+
624
+ output_names = []
625
+ if self.settings.return_intermediate:
626
+ output_names = [node.name for node in nodes]
627
+
628
+ try:
629
+ split_module, submod_name = self._build_submodule(cur_nodes)
630
+ self._run_and_compare(split_module, submod_name, output_names)
631
+ except (
632
+ FxNetMinimizerRunFuncError,
633
+ FxNetMinimizerResultMismatchError,
634
+ ) as e:
635
+ print(e)
636
+
637
+ def print_report(self, report: List[str]):
638
+ for i in range(len(report)):
639
+ if i > 0:
640
+ print(" . " + report[i])
641
+ else:
642
+ print(report[i])
643
+
644
+ def print_reports(self):
645
+ for report in self.reports:
646
+ self.print_report(report)
647
+
648
+ def minimize(
649
+ self, start: Optional[str] = None, end: Optional[str] = None, skip_nodes: Optional[List] = None,
650
+ ) -> NodeSet:
651
+ """
652
+ Minimizing the model from node with name `start` to node with name `end` base
653
+ on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or
654
+ FxNetMinimizerResultMismatchError errors.
655
+
656
+ Args:
657
+ start: The name of the node where we want to start minimizing. If set
658
+ to None, then we'll start with the first node of the model.
659
+ end: The name of the node where we want to terminate minimizing. If
660
+ set to None, we'll end with the last node of the model.
661
+
662
+ Returns:
663
+ nodes: A list of nodes that causes FxNetMinimizerRunFuncError or
664
+ FxNetMinimizerResultMismatchError errors during minimizing.
665
+ """
666
+
667
+ print(self.settings)
668
+ print(self.module.graph)
669
+
670
+ nodes = self._collect_nodes(start, end)
671
+
672
+ if self.settings.traverse_method == "sequential":
673
+ return self._sequential_traverse(nodes)
674
+
675
+ if self.settings.traverse_method == "binary":
676
+ return self._binary_traverse(nodes)
677
+
678
+ if self.settings.traverse_method == "accumulate":
679
+ return self._accumulate_traverse(nodes)
680
+
681
+ if(self.settings.traverse_method == "skip"):
682
+ if (skip_nodes is None):
683
+ raise RuntimeError("'skip_nodes' can't be None when 'traverse_method' is 'skip'.")
684
+ return self._skip_traverse(nodes, skip_nodes)
685
+
686
+ raise RuntimeError(f"Unknown traverse method {self.settings.traverse_method}!")
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/operator_support.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import typing as t
3
+
4
+ import torch
5
+ import torch.fx
6
+ from torch.fx._compatibility import compatibility
7
+ from .shape_prop import TensorMetadata
8
+ from .tools_common import get_node_target, CALLABLE_NODE_OPS
9
+
10
+
11
+ __all__ = ['OperatorSupportBase', 'OperatorSupport', 'create_op_support', 'chain', 'OpSupports', 'any_chain']
12
+
13
+ # fx.Node.target typename, as returned by `get_node_target()`
14
+ TargetTypeName = str
15
+
16
+ # Arguments' dtypes for a given node, see `OperatorSupport`
17
+ SupportedArgumentDTypes = t.Optional[
18
+ t.Tuple[
19
+ t.Sequence[t.Sequence[torch.dtype]],
20
+ t.Dict[str, t.Sequence[torch.dtype]],
21
+ ]
22
+ ]
23
+
24
+ SupportDict = t.Mapping[TargetTypeName, SupportedArgumentDTypes]
25
+
26
+
27
+ @compatibility(is_backward_compatible=False)
28
+ class OperatorSupportBase(abc.ABC):
29
+ """Interface for determining if a fx.Node is supported by a backend"""
30
+ @abc.abstractmethod
31
+ def is_node_supported(
32
+ self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
33
+ ) -> bool:
34
+ raise NotImplementedError()
35
+
36
+
37
+ @compatibility(is_backward_compatible=False)
38
+ class OperatorSupport(OperatorSupportBase):
39
+ """
40
+ `_support_dict` maps node.target typename to supported inputs dtypes.
41
+
42
+ node.target typename is retrieved using helper function `get_node_target()`
43
+
44
+ If supported inputs dtypes is None, it means any dtype is supported, else
45
+ we should see a tuple like (([dtypes], ...), {"name":[dtypes], ...}).
46
+
47
+ The first tuple ([dtypes], ...) indicates what dtypes are supported for
48
+ inputs in node.args and the second dict {"name": [dtypes], ...} indicates
49
+ what dtypes are supported for inputs in node.kwargs.
50
+
51
+ For inputs in args, if we don't want to check it, we can put None there,
52
+ e.g. (None, [torch.float]) indicates that we don't care about the type of
53
+ the first input in args. And for inputs in kwargs, if not listed, will not
54
+ be checked.
55
+ """
56
+
57
+ _support_dict: SupportDict
58
+
59
+ def __init__(
60
+ self,
61
+ support_dict: t.Optional[SupportDict] = None
62
+ ):
63
+ self._support_dict = support_dict or {}
64
+
65
+ def is_node_supported(
66
+ self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
67
+ ) -> bool:
68
+ """
69
+ Args:
70
+ `submodules`: mapping from module name to the module. This can be
71
+ retrieved by calling model.named_modules().
72
+
73
+ `node`: a Fx node that we want to determine whether it's supported.
74
+
75
+ Returns:
76
+ `is_supported`: whether the arg `node` is supported.
77
+ """
78
+ if node.op not in CALLABLE_NODE_OPS:
79
+ return True
80
+
81
+ target = get_node_target(submodules, node)
82
+
83
+ # Target not found in _support_dict meaning that we don't support this op at all
84
+ if target not in self._support_dict:
85
+ return False
86
+
87
+ # The rule for target is None meaning that we accept any dtype
88
+ if self._support_dict[target] is None:
89
+ return True
90
+
91
+ args_dtypes, kwargs_dtypes = self._support_dict[target] # type: ignore[misc]
92
+
93
+ # Check args dtypes
94
+ for i, dtypes in enumerate(args_dtypes):
95
+ if len(node.args) <= i:
96
+ break
97
+
98
+ # None indicates we don't care about the dtype of args[i]
99
+ if dtypes is None:
100
+ continue
101
+
102
+ # If arg is not a node then we don't check it
103
+ if not isinstance(node.args[i], torch.fx.Node):
104
+ continue
105
+
106
+ arg_dtype = _get_arg_dtype(node.args[i]) # type: ignore[arg-type]
107
+ if arg_dtype not in dtypes:
108
+ return False
109
+
110
+ # Check kwargs dtypes
111
+ for k, dtypes in kwargs_dtypes.items():
112
+ if k not in node.kwargs:
113
+ continue
114
+
115
+ # If arg is not a node then we don't check it
116
+ if not isinstance(node.kwargs[k], torch.fx.Node):
117
+ continue
118
+
119
+ kwarg_dtype = _get_arg_dtype(node.kwargs[k]) # type: ignore[arg-type]
120
+ if kwarg_dtype not in dtypes:
121
+ return False
122
+
123
+ return True
124
+
125
+
126
+ # ======================================================================
127
+ # Functional interfaces and utils for defining basic operator support logic
128
+ # and composing them into more complex ones
129
+ # ======================================================================
130
+
131
+ IsNodeSupported = t.Callable[[t.Mapping[str, torch.nn.Module], torch.fx.Node], bool]
132
+
133
+
134
+ @compatibility(is_backward_compatible=False)
135
+ def create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase:
136
+ """Wraps a `IsNodeSupported` function into an `OperatorSupportBase` instance
137
+
138
+ `IsNodeSupported` has the same call signature as
139
+ `OperatorSupportBase.is_node_supported`
140
+ """
141
+ class FunctionalOperatorSupport(OperatorSupportBase):
142
+ def is_node_supported(
143
+ self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
144
+ ) -> bool:
145
+ return is_node_supported(submodules, node)
146
+ return FunctionalOperatorSupport()
147
+
148
+
149
+ @compatibility(is_backward_compatible=False)
150
+ def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
151
+ """Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
152
+ instance by evaluating each input `OperatorSupportBase` instance, and returns False if
153
+ any of it reports False.
154
+ """
155
+ def _chain(submods, node) -> bool:
156
+ return all(
157
+ x.is_node_supported(submods, node)
158
+ for x in op_support
159
+ )
160
+ return create_op_support(_chain)
161
+
162
+
163
+ @compatibility(is_backward_compatible=False)
164
+ def any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
165
+ """Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
166
+ instance by evaluating each input `OperatorSupportBase` instance, and returns True if
167
+ any of it reports True.
168
+ """
169
+ def _any_chain(submods, node) -> bool:
170
+ return any(
171
+ x.is_node_supported(submods, node)
172
+ for x in op_support
173
+ )
174
+ return create_op_support(_any_chain)
175
+
176
+
177
+ @compatibility(is_backward_compatible=False)
178
+ class OpSupports:
179
+ """A set of atomic `OperatorSupportBase` instances that can be combined together
180
+ to form more complex operator support logic.
181
+ """
182
+ @classmethod
183
+ def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase:
184
+ """Report a node as non-supported, if any of its arguments is of dtype"""
185
+
186
+ def _decline_if_input_dtype(
187
+ submodules: t.Mapping[str, torch.nn.Module],
188
+ node: torch.fx.Node,
189
+ ) -> bool:
190
+ for arg in node.all_input_nodes:
191
+ # escape dtype check for get_attr node
192
+ if arg.op == "get_attr":
193
+ continue
194
+ arg_dtype = _get_arg_dtype(arg)
195
+ if arg_dtype == dtype:
196
+ return False
197
+ return True
198
+ return create_op_support(_decline_if_input_dtype)
199
+
200
+ @classmethod
201
+ def decline_if_node_in_names(cls, disallow_set: t.Set[str]) -> OperatorSupportBase:
202
+ """
203
+ If a node has a name that is in the disallow set, reported it as non-supported.
204
+ """
205
+ def _decline_if_node_in_names(
206
+ submodules: t.Mapping[str, torch.nn.Module],
207
+ node: torch.fx.Node,
208
+ ) -> bool:
209
+ if node.name in disallow_set:
210
+ return False
211
+ else:
212
+ return True
213
+ return create_op_support(_decline_if_node_in_names)
214
+
215
+
216
+ def _get_arg_dtype(arg: torch.fx.Node) -> t.Any:
217
+ assert isinstance(arg, torch.fx.Node)
218
+ tensor_meta = arg.meta.get("tensor_meta") # type: ignore[union-attr]
219
+ dtype = tensor_meta.dtype if isinstance(tensor_meta, TensorMetadata) else arg.meta["type"]
220
+ return dtype
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.graph_module import GraphModule
2
+ from typing import Any, Callable, Dict, List, Tuple, Type
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from torch.fx._compatibility import compatibility
7
+
8
+ __all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes']
9
+
10
+ # Matching method matches the attribute name of current version to the attribute name of `target_version`
11
+ @compatibility(is_backward_compatible=False)
12
+ def default_matching(name: str, target_version: int) -> str:
13
+ """Default matching method
14
+ """
15
+ return name
16
+
17
+ # This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
18
+ # The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
19
+ # If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
20
+ module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
21
+ torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
22
+ torch.nn.modules.conv.Conv2d: (
23
+ 1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
24
+ ),
25
+ torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching),
26
+ torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching),
27
+ torch.nn.modules.pooling.MaxPool2d: (
28
+ 1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching
29
+ ),
30
+ torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching),
31
+ }
32
+
33
+ @compatibility(is_backward_compatible=False)
34
+ def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:
35
+ """If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`
36
+ after checking module's version is compatible with the `module_fetch_book`.
37
+ """
38
+ attrs_for_lowering: Dict[str, Any] = {}
39
+ attrs_for_lowering["name"] = torch.typename(mod)
40
+
41
+ if type(mod) in module_fetch_book:
42
+ version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
43
+ if version < mod._version:
44
+ raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
45
+ "please upgrade the module_fetch_book, open an issue and @842974287 "
46
+ "or report a bug to AIACC team directly.")
47
+ for attr in param_to_fetch:
48
+ attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
49
+ else:
50
+ raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
51
+ "please add it to the module_fetch_book, open an issue and @842974287 "
52
+ "or report a bug to AIACC team directly.")
53
+ return attrs_for_lowering
54
+
55
+ @compatibility(is_backward_compatible=False)
56
+ def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:
57
+ """Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module.
58
+ """
59
+ submodules = dict(fx_module.named_modules())
60
+
61
+ for node in fx_module.graph.nodes:
62
+ if node.op == "call_module":
63
+ if isinstance(submodules[node.target], GraphModule):
64
+ lift_lowering_attrs_to_nodes(submodules[node.target])
65
+ else:
66
+ node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+ from inspect import unwrap
3
+ from typing import Callable, List, Optional
4
+ import logging
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ __all__ = [
9
+ "PassManager",
10
+ "inplace_wrapper",
11
+ "log_hook",
12
+ "loop_pass",
13
+ "this_before_that_pass_constraint",
14
+ "these_before_those_pass_constraint",
15
+ ]
16
+
17
+ # for callables which modify object inplace and return something other than
18
+ # the object on which they act
19
+ def inplace_wrapper(fn: Callable) -> Callable:
20
+ """
21
+ Convenience wrapper for passes which modify an object inplace. This
22
+ wrapper makes them return the modified object instead.
23
+
24
+ Args:
25
+ fn (Callable[Object, Any])
26
+
27
+ Returns:
28
+ wrapped_fn (Callable[Object, Object])
29
+ """
30
+
31
+ @wraps(fn)
32
+ def wrapped_fn(gm):
33
+ val = fn(gm)
34
+ return gm
35
+
36
+ return wrapped_fn
37
+
38
+ def log_hook(fn: Callable, level=logging.INFO) -> Callable:
39
+ """
40
+ Logs callable output.
41
+
42
+ This is useful for logging output of passes. Note inplace_wrapper replaces
43
+ the pass output with the modified object. If we want to log the original
44
+ output, apply this wrapper before inplace_wrapper.
45
+
46
+
47
+ ```
48
+ def my_pass(d: Dict) -> bool:
49
+ changed = False
50
+ if 'foo' in d:
51
+ d['foo'] = 'bar'
52
+ changed = True
53
+ return changed
54
+
55
+ pm = PassManager(
56
+ passes=[
57
+ inplace_wrapper(log_hook(my_pass))
58
+ ]
59
+ )
60
+ ```
61
+
62
+ Args:
63
+ fn (Callable[Type1, Type2])
64
+ level: logging level (e.g. logging.INFO)
65
+
66
+ Returns:
67
+ wrapped_fn (Callable[Type1, Type2])
68
+ """
69
+ @wraps(fn)
70
+ def wrapped_fn(gm):
71
+ val = fn(gm)
72
+ logger.log(level, "Ran pass %s\t Return value: %s", fn, val)
73
+ return val
74
+
75
+ return wrapped_fn
76
+
77
+
78
+
79
+ def loop_pass(base_pass: Callable, n_iter: Optional[int] = None, predicate: Optional[Callable] = None):
80
+ """
81
+ Convenience wrapper for passes which need to be applied multiple times.
82
+
83
+ Exactly one of `n_iter`or `predicate` must be specified.
84
+
85
+ Args:
86
+ base_pass (Callable[Object, Object]): pass to be applied in loop
87
+ n_iter (int, optional): number of times to loop pass
88
+ predicate (Callable[Object, bool], optional):
89
+
90
+ """
91
+ assert (n_iter is not None) ^ (
92
+ predicate is not None
93
+ ), "Exactly one of `n_iter`or `predicate` must be specified."
94
+
95
+ @wraps(base_pass)
96
+ def new_pass(source):
97
+ output = source
98
+ if n_iter is not None and n_iter > 0:
99
+ for _ in range(n_iter):
100
+ output = base_pass(output)
101
+ elif predicate is not None:
102
+ while predicate(output):
103
+ output = base_pass(output)
104
+ else:
105
+ raise RuntimeError(
106
+ f"loop_pass must be given positive int n_iter (given "
107
+ f"{n_iter}) xor predicate (given {predicate})"
108
+ )
109
+ return output
110
+
111
+ return new_pass
112
+
113
+
114
+ # Pass Schedule Constraints:
115
+ #
116
+ # Implemented as 'depends on' operators. A constraint is satisfied iff a list
117
+ # has a valid partial ordering according to this comparison operator.
118
+ def _validate_pass_schedule_constraint(
119
+ constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
120
+ ):
121
+ for i, a in enumerate(passes):
122
+ for j, b in enumerate(passes[i + 1 :]):
123
+ if constraint(a, b):
124
+ continue
125
+ raise RuntimeError(
126
+ f"pass schedule constraint violated. Expected {a} before {b}"
127
+ f" but found {a} at index {i} and {b} at index{j} in pass"
128
+ f" list."
129
+ )
130
+
131
+
132
+ def this_before_that_pass_constraint(this: Callable, that: Callable):
133
+ """
134
+ Defines a partial order ('depends on' function) where `this` must occur
135
+ before `that`.
136
+ """
137
+
138
+ def depends_on(a: Callable, b: Callable):
139
+ if a == that and b == this:
140
+ return False
141
+ return True
142
+
143
+ return depends_on
144
+
145
+
146
+ def these_before_those_pass_constraint(these: Callable, those: Callable):
147
+ """
148
+ Defines a partial order ('depends on' function) where `these` must occur
149
+ before `those`. Where the inputs are 'unwrapped' before comparison.
150
+
151
+ For example, the following pass list and constraint list would be invalid.
152
+ ```
153
+ passes = [
154
+ loop_pass(pass_b, 3),
155
+ loop_pass(pass_a, 5),
156
+ ]
157
+
158
+ constraints = [
159
+ these_before_those_pass_constraint(pass_a, pass_b)
160
+ ]
161
+ ```
162
+
163
+ Args:
164
+ these (Callable): pass which should occur first
165
+ those (Callable): pass which should occur later
166
+
167
+ Returns:
168
+ depends_on (Callable[[Object, Object], bool]
169
+ """
170
+
171
+ def depends_on(a: Callable, b: Callable):
172
+ if unwrap(a) == those and unwrap(b) == these:
173
+ return False
174
+ return True
175
+
176
+ return depends_on
177
+
178
+
179
+ class PassManager:
180
+ """
181
+ Construct a PassManager.
182
+
183
+ Collects passes and constraints. This defines the pass schedule, manages
184
+ pass constraints and pass execution.
185
+
186
+ Args:
187
+ passes (Optional[List[Callable]]): list of passes. A pass is a
188
+ callable which modifies an object and returns modified object
189
+ constraint (Optional[List[Callable]]): list of constraints. A
190
+ constraint is a callable which takes two passes (A, B) and returns
191
+ True if A depends on B and False otherwise. See implementation of
192
+ `this_before_that_pass_constraint` for example.
193
+ """
194
+
195
+ passes: List[Callable]
196
+ constraints: List[Callable]
197
+ _validated: bool = False
198
+
199
+ def __init__(
200
+ self,
201
+ passes=None,
202
+ constraints=None,
203
+ ):
204
+ self.passes = passes or []
205
+ self.constraints = constraints or []
206
+
207
+ @classmethod
208
+ def build_from_passlist(cls, passes):
209
+ pm = PassManager(passes)
210
+ # TODO(alexbeloi): add constraint management/validation
211
+ return pm
212
+
213
+ def add_pass(self, _pass: Callable):
214
+ self.passes.append(_pass)
215
+ self._validated = False
216
+
217
+ def add_constraint(self, constraint):
218
+ self.constraints.append(constraint)
219
+ self._validated = False
220
+
221
+ def remove_pass(self, _passes: List[Callable]):
222
+ if _passes is None:
223
+ return
224
+ passes_left = []
225
+ for ps in self.passes:
226
+ if ps.__name__ not in _passes:
227
+ passes_left.append(ps)
228
+ self.passes = passes_left
229
+ self._validated = False
230
+
231
+ def validate(self):
232
+ """
233
+ Validates that current pass schedule defined by `self.passes` is valid
234
+ according to all constraints in `self.constraints`
235
+ """
236
+ if self._validated:
237
+ return
238
+ for constraint in self.constraints:
239
+ _validate_pass_schedule_constraint(constraint, self.passes)
240
+ self._validated = True
241
+
242
+ def __call__(self, source):
243
+ self.validate()
244
+ out = source
245
+ for _pass in self.passes:
246
+ out = _pass(out)
247
+ return out
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/reinplace.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx import Node
3
+ from torch.fx._compatibility import compatibility
4
+ from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
5
+ from torch.utils._pytree import tree_map_only
6
+ from torch.utils import _pytree as pytree
7
+ from torch.multiprocessing.reductions import StorageWeakRef
8
+
9
+ import _operator
10
+ from enum import Enum
11
+ import itertools
12
+ from typing import Set, Dict
13
+ from collections import defaultdict
14
+
15
+ __all__ = ['reinplace']
16
+
17
+ class _ViewType(Enum):
18
+ NonView = 0
19
+ SingleOutputView = 1
20
+ MultiOutputView = 2
21
+
22
+ def _is_view_op(tgt):
23
+ if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
24
+ schema = tgt._schema
25
+ if len(schema.arguments) > 0:
26
+ first_arg = schema.arguments[0]
27
+ # check if op is a view
28
+ return first_arg.alias_info is not None and not first_arg.alias_info.is_write
29
+
30
+ def _get_view_type(tgt) -> _ViewType:
31
+ if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
32
+ schema = tgt._schema
33
+ if len(schema.arguments) > 0:
34
+ first_arg = schema.arguments[0]
35
+ # check if op is a view
36
+ if first_arg.alias_info is not None and not first_arg.alias_info.is_write:
37
+ # check if op is a multi-output view
38
+ if '*' in first_arg.alias_info.after_set:
39
+ return _ViewType.MultiOutputView
40
+ else:
41
+ return _ViewType.SingleOutputView
42
+ return _ViewType.NonView
43
+
44
+
45
+ # Stores a bunch of metadata related to functionalization each node.
46
+ # Relevant metadata:
47
+ # n.meta['fake_result']: FakeTensor (same type as the output of the node, but with FakeTenors instead of Tensors)
48
+ # The fake tensor output from running the current node
49
+ # n.meta['view_of']: Node
50
+ # If the current node n is a view of some base tensor, the 'view_of' field tells us which
51
+ # view node was used to generate the current node (a view tensor).
52
+ # This information actually makes `fake_result` redundant, but we can use `fake_result`
53
+ # to sanity check that our aliasing information is correct.
54
+ @compatibility(is_backward_compatible=False)
55
+ class _FunctionalizationMetadataProp(torch.fx.Interpreter):
56
+
57
+ def run_node(self, node: Node):
58
+ self.node_counter += 1
59
+ result = super().run_node(node)
60
+ node.meta['fake_result'] = result
61
+ node.meta['node_idx'] = self.node_counter
62
+
63
+ # (1) Update metadata with the list of nodes that are used by this node
64
+ # copy_() doesn't read from its first argument; it writes to it, overwriting previous data.
65
+ # We don't want to treat it as "being used as an input".
66
+ node_args = node.args
67
+ if node.target is torch.ops.aten.copy_.default:
68
+ node_args = node_args[1:]
69
+
70
+ # (2) Update metadata to track aliasing information about view tensor nodes.
71
+ if node.op == 'call_function':
72
+ view_type = _get_view_type(node.target)
73
+ if view_type == _ViewType.SingleOutputView:
74
+ assert isinstance(node.args[0], Node)
75
+ node.meta['view_of'] = node.args[0]
76
+ elif view_type == _ViewType.MultiOutputView:
77
+ self.multi_output_view_nodes[node] = node.args[0]
78
+
79
+ # Check if we returned a multi-output view,
80
+ # and we're now grabbing the individual views from the output.
81
+ #
82
+ # For multi-output views, we want to map each output view to the base,
83
+ # but this mapping involves two separate nodes in FX IR.
84
+ # e.g. "a, b = x_1.split(...)" becomes:
85
+ # %split_tensor : [num_users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
86
+ # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
87
+ # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
88
+ # And we'd like to set:
89
+ # getitem1.meta['view_of'] = x_1
90
+ elif node.target is _operator.getitem:
91
+ list_arg = node.args[0]
92
+ maybe_base_of_view = self.multi_output_view_nodes.get(list_arg, None)
93
+ if maybe_base_of_view is not None:
94
+ # Note: we could also track indexing info here for multi-output views.
95
+ # I don't think this metadata is strictly needed for de-functionalization.
96
+ assert isinstance(maybe_base_of_view, Node)
97
+ node.meta['view_of'] = maybe_base_of_view
98
+
99
+ if 'view_of' in node.meta:
100
+ # We're linking the current node with its first argument as views.
101
+ # Assert here that this is actually the case, and their storages are the same.
102
+ assert isinstance(node.meta['fake_result'], FakeTensor)
103
+ assert isinstance(node.meta['view_of'].meta['fake_result'], FakeTensor)
104
+ view_storage = StorageWeakRef(node.meta['fake_result']._typed_storage())
105
+ base_storage = StorageWeakRef(node.meta['view_of'].meta['fake_result']._typed_storage())
106
+ assert view_storage == base_storage
107
+ return result
108
+
109
+
110
+
111
+ def propagate(self, *args):
112
+ self.multi_output_view_nodes = {}
113
+ self.node_counter = -1
114
+
115
+ with FakeTensorMode() as mode:
116
+ fake_args = [mode.from_tensor(a) for a in args]
117
+ return super().run(*fake_args)
118
+
119
+ def _schemas_match(functional_schema, inplace_schema):
120
+ names_match = inplace_schema.name.endswith("_") and inplace_schema.name[:-1] == functional_schema.name
121
+ arg_types_match = len(functional_schema.arguments) == len(inplace_schema.arguments) and all(
122
+ a1.type == a2.type for a1, a2 in zip(functional_schema.arguments, inplace_schema.arguments))
123
+ # for the inplace op, its first argument should be mutable
124
+ assert inplace_schema.arguments[0].alias_info is not None and inplace_schema.arguments[0].alias_info.is_write
125
+ # and its remaining arguments shouldn't be.
126
+ assert all(a.alias_info is None for a in inplace_schema.arguments[1:])
127
+ return names_match and arg_types_match
128
+
129
+ # TODO: this should be beefed up to be able to properly re-inplace with:
130
+ # - mutating ops (e.g. _fused_moving_avg_obs_fq_helper)
131
+ # - out= ops (e.g. angle -> angle.out)
132
+ # TODO: we should also figure this info out using torchgen.
133
+ def _maybe_get_inplace_op(op):
134
+ # __module__ seems broken; it returns torch._ops.aten which doesn't exist
135
+ if not isinstance(op, torch._ops.OpOverload):
136
+ return None
137
+ # Some view ops have inplace variants (as_strided_, etc),
138
+ # but we do NOT want the reinplacing pass to directly add these into the program.
139
+ # (they'll require extra special handling, aren't aren't really useful for perf anyway)
140
+ if _is_view_op(op):
141
+ return None
142
+ op_namespace = op.__module__.split(".")[-1]
143
+ op_base_name = op.overloadpacket.__name__
144
+ maybe_namespace_module = getattr(torch.ops, op_namespace)
145
+ maybe_inplace_op = None if maybe_namespace_module is None else getattr(maybe_namespace_module, f'{op_base_name}_', None)
146
+ if maybe_inplace_op is None:
147
+ return None
148
+
149
+ inplace_overloads = [
150
+ getattr(maybe_inplace_op, overload_name) for overload_name in maybe_inplace_op.overloads()
151
+ ]
152
+ inplace_overloads_with_matching_schemas = [
153
+ f
154
+ for f in inplace_overloads
155
+ if _schemas_match(op._schema, f._schema)
156
+ ]
157
+ # Just because foo() and foo_() are both existing operators,
158
+ # They aren't guaranteed to have compatible schemas.
159
+ # For example, pow.Scalar(Scalar self, Tensor exponent) has no valid inplace variant,
160
+ # Even though several overloads of pow_ exist.
161
+ if len(inplace_overloads_with_matching_schemas) == 0:
162
+ return None
163
+ assert len(inplace_overloads_with_matching_schemas) == 1
164
+ inplace_op = inplace_overloads_with_matching_schemas[0]
165
+ return inplace_op
166
+
167
+ _VIEW_INVERSE_MAP = {
168
+ torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
169
+ torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
170
+ torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
171
+ torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
172
+ }
173
+
174
+ # This function, given a set of set of (aliased) tensor nodes,
175
+ # Returns any nodes in the graph that *use* any of the aliases, that occur *after* op_index
176
+ # in the node ordering.
177
+ def _get_all_later_node_usages(tensor_aliases: Set[Node], op_index: int):
178
+ def _add_if_tensor(x, set_):
179
+ if isinstance(x, FakeTensor):
180
+ set_.add(StorageWeakRef(x._typed_storage()))
181
+
182
+ nodes_used_after = set()
183
+ for t in tensor_aliases:
184
+ # get all nodes that use the current alias
185
+ usage_nodes = t.users
186
+ for n in usage_nodes:
187
+ # We only care about usages after the current node
188
+ if 'node_idx' not in n.meta or n.meta['node_idx'] <= op_index:
189
+ continue
190
+ # We also don't care about intermediate view ops.
191
+ # They only matter if their output is then used elsewhere
192
+ # (either in an out-of-place op, or as an output to the function).
193
+ if n in tensor_aliases:
194
+ if isinstance(n.target, torch._ops.OpOverload) or n.target == _operator.getitem:
195
+ continue
196
+ nodes_used_after.add(n)
197
+ return nodes_used_after
198
+
199
+ # Given an op that we're trying to re-inplace, "b = foo(a)",
200
+ # And given a {view}_scatter op that shows up later in the graph, "y = {view}_scatter(base, x, args...)"
201
+ # Then re-inplacing `foo()` would allow us to remove the `{view}_scatter` op entirely, IF:
202
+ # If there are any aliases in the alias_set(a) that satisfy:
203
+ # (1) The base of "alias", "alias_base", has the same size/stride/offset metadata as "base"
204
+ # (2) The output of running {view}(alias, args...) gives you the same size/stride/offset metadata
205
+ # as "alias"
206
+ def _get_view_inverse_node_usages(later_node_usages: Set[Node], self_aliases: Set[Node]) -> Set[Node]:
207
+ def matching_view_metadata(a, b):
208
+ return a.size() == b.size() and \
209
+ a.stride() == b.stride() and \
210
+ a.storage_offset() == b.storage_offset()
211
+
212
+ view_inverse_nodes = set()
213
+ # Go through them in node order, so we can see chains of view_scatter ops.
214
+ for n in sorted(later_node_usages, key=lambda x: x.meta['node_idx']):
215
+ if n.target not in _VIEW_INVERSE_MAP:
216
+ continue
217
+ base = n.args[0]
218
+ mutated_view = n.args[1]
219
+ assert isinstance(base, Node)
220
+ assert isinstance(base.meta['fake_result'], FakeTensor)
221
+ assert isinstance(mutated_view, Node)
222
+ assert isinstance(mutated_view.meta['fake_result'], FakeTensor)
223
+ # Check that this view_inverse op actually corresponds to taking doing the inverse
224
+ # of one of our existing self_alias nodes.
225
+ original_view = _VIEW_INVERSE_MAP[n.target]
226
+ for self_alias in self_aliases:
227
+ # We're looking for some alias of the self arg, "alias",
228
+ # that was created from some op `alias = foo(base, args...)`
229
+ # such that the current _scatter op "inverts" that foo call.
230
+ # We can check that by running the original op again, and checking that the strides match.
231
+ if 'view_of' not in self_alias.meta:
232
+ continue
233
+ self_alias_base = self_alias.meta['view_of']
234
+ try:
235
+ # The we're trying to re-use the args from the view_scatter call inside of the corresponding
236
+ # view op, which might throw. This just indicates that view_scatter op isn't a valid inverse
237
+ # of the current alias we're looking at.
238
+ view_replay_metadata = original_view(self_alias_base.meta['fake_result'], *n.args[2:], **n.kwargs)
239
+ expected_metadata = self_alias.meta['fake_result']
240
+ # If the alias and its base both have matching metadata, then this view_scatter op is valid to re-inplace.
241
+ if matching_view_metadata(self_alias_base.meta['fake_result'], base.meta['fake_result']) and \
242
+ matching_view_metadata(view_replay_metadata, expected_metadata):
243
+ view_inverse_nodes.add(n)
244
+ except Exception:
245
+ continue
246
+
247
+ return view_inverse_nodes
248
+
249
+
250
+ @compatibility(is_backward_compatible=True)
251
+ def reinplace(gm, *sample_args):
252
+ """
253
+ Given an fx.GraphModule, modifies it to perform "reinplacing",
254
+ mutating the nodes of the graph.
255
+ We look for out-of-place op call sites like `b = a.add(...)`,
256
+ and convert them to be inplace (`b = a.add_(...)`),
257
+ as long as the input to the current operator ("a") isn't re-used
258
+ anywhere later in the graph.
259
+
260
+ This pass currently expects to operate on a **functional, ATen** graph.
261
+ This can be obtained by running `make_fx(functionalize(f))`.
262
+
263
+ Sample inputs are needed to determine aliasing relationships of the inputs.
264
+ In general, we can't reinplace node `b = a.add(...)` if "a" aliases any of the
265
+ inputs to the program.
266
+
267
+ Given a node "b = foo(a, args...) the algorithm for re-inplacing is as follows:
268
+
269
+ (1) Perform some initial checks on the metadata of "a" and "args..."
270
+ that can disqualify them from being reinplaced.
271
+
272
+ (1a) Check that the self argument we're attempting to reinplace
273
+ has acceptable dtype/size metadata to reinplace with.
274
+
275
+ For example, if we have:
276
+ a = torch.ones(1)
277
+ b = torch.ones(10)
278
+ out = torch.add(a, b)
279
+ We can't turn that into
280
+ a.add_(b)
281
+ Because that would require resizing "a".
282
+
283
+ Similarly, we can't convert torch.ge(a, b) into a.ge_(b),
284
+ because that would require changing a's dtype (from e.g. float32 to bool).
285
+ Note that in this specific example, we could technically do better..
286
+
287
+ If we see the pattern:
288
+ a_1 = a.ge(b)
289
+ a_2 = aten._to_copy(a_1, a.dtype)
290
+ Then we this should be valid to completely re-inplace
291
+ (this is exactly what functionalization will emit when it sees a.ge_(b)).
292
+
293
+ This optimization is only really important for user programs
294
+ that directly use inplace comparison ops though.
295
+
296
+ We also cannot re-inplace on tensors that have overlapping memory,
297
+ e.g. torch.ones(1).expand(4, 4).add_(1)
298
+
299
+ (1b) Check if "a" is an alias of any of the program inputs.
300
+
301
+ If it is, skip and move to the next node.
302
+ Inplace'ing an op that would cause it to mutate a program is not sound,
303
+ because that would be a side effect visible to the user.
304
+
305
+ NOTE: there's a future optimization that we should make:
306
+ if "a" is a (alias of a) program input, but later in the program
307
+ there is a node that looks like "a.copy_(...)",
308
+ Then re-inplacing is ok to do - we are temporarily re-using a's buffer,
309
+ which will later be overwritten by the copy_() call.
310
+
311
+ This will be an important optimization to have for programs that mutate
312
+ their inputs. It currently isn't implemented though.
313
+
314
+ (1c) Check if "a" and "args..." alias
315
+
316
+ For example, re-inplacing to create code like the below
317
+ isn't guaranteed to be sound:
318
+
319
+ aten.mul_(a, a)
320
+
321
+ (2) Check that "a" and all of its outstanding aliases are not used anywhere
322
+ later in the graph. If this is the case, then it's safe to re-inplace
323
+ to "b = foo_(a)".
324
+
325
+ There are a few caveats to this, explained in more detail below:
326
+ (a) If "a" is used later as an argument to a view op, that is okay.
327
+ It's only a problem if "a" (or that view) is later passed
328
+ into a normal operator, or if it is returned as the program output.
329
+ (b) If "a" is a repeat argument in `foo()`, then don't reinplace.
330
+ Most ATen kernels don't make any guarantees that this is sound,
331
+ e.g. if you do aten.mul_(a, a).
332
+ So we'll just ban re-inplacing in this case.
333
+ It's only a problem if "a" (or that view) is later passed
334
+ (c) If "a" is used as an input into a view "inverse" / "scatter"
335
+ operator, it is potentially fine to re-inplace
336
+ (and remove that scatter operator from the graph).
337
+ See below for a more detailed example.
338
+
339
+ NOTE: there is an optimization in this step that is crucial
340
+ to fully recovering performance from functionalization.
341
+
342
+ Given this program:
343
+ def f(x):
344
+ a = torch.ops.aten.add(x, x)
345
+ b = torch.ops.aten.diagonal(a)
346
+ torch.ops.aten.fill_(b, 0)
347
+ return d
348
+
349
+ Functionalization will emit the following:
350
+ def f(x):
351
+ a = torch.ops.aten.add(x, x)
352
+ b = torch.ops.aten.diagonal(a, 0, 1)
353
+ b_updated = torch.ops.aten.fill(b, 0)
354
+ a_updated = torch.ops.aten.diagonal_scatter(a, b_updated, 0, 1)
355
+ return a_updated
356
+
357
+ Ordinarily, we would not be able to reinplace the fill,
358
+ because "b" aliases with "a" which is used by the diagonal_scatter call.
359
+
360
+ "re-inplacing" is on the hook for figuring out that it is ok to
361
+ completely, the expensive diagonal_scatter call, if we re-inplace the add().
362
+
363
+ So, for every `alias in alias_set(a)`, instead of checking
364
+ that "alias" is not used anywhere later in the graph,
365
+ we check that
366
+ EITHER:
367
+ (a) alias is not used anywhere later in the graph
368
+ OR:
369
+ (b) alias is used exactly once later on in the graph,
370
+ in the following op:
371
+
372
+ out = foo_scatter(alias, x, args...)
373
+
374
+ where the following must hold:
375
+ (i) "foo_scatter" is the "inverse" operator for foo.
376
+ This only applies to "foo" ops that are view operators,
377
+ which view into a subset of the original tensor's memory.
378
+ In practice, there are ~4 operators where this applies:
379
+ diagonal -> diagonal_scatter
380
+ slice -> slice_scatter
381
+ select -> select_scatter
382
+ as_strided -> as_strided_scatter
383
+ (ii) "args..." are the same between the foo() and foo_scatter() calls.
384
+
385
+ (3) Perform the actual re-inplacing on foo!
386
+
387
+ (3b) is the common case, but special care is needed for {view}_scatter (3a)
388
+
389
+ (3a) {view}_scatter ops.
390
+
391
+ Consider this program:
392
+ a = torch.zeros(2, 2)
393
+ b = torch.ones(2)
394
+ a[0] = b
395
+
396
+ Post functionalization, that will look like:
397
+ a = torch.zeros(2)
398
+ b = torch.ones(1)
399
+ a_updated = torch.select_scatter(a, b, 0, 0)
400
+
401
+ In this case though, there is no "functional" op to re-inplace!
402
+ Instead, we'd like to directly remove toe select_scatter call.
403
+ We already know from (3) that this is valid,
404
+ because "a" has no later usages in the graph.
405
+
406
+ We perform the re-inplacing on the {view}_scatter op like so
407
+ Before:
408
+ a_updated = torch.select_scatter(a, b, args...)
409
+ After:
410
+ a_slice = a.select(a, args...)
411
+ a_slice.copy_(b)
412
+
413
+ (3b) Otherwise, replace the functional op with its inplace variant.
414
+ Before:
415
+ b = foo(a, args...)
416
+ After:
417
+ a.foo_(args...)
418
+
419
+ (4) Finally, after converting either:
420
+ Before:
421
+ b = foo(a)
422
+ After:
423
+ foo_(a)
424
+ or
425
+ Before:
426
+ b = {slice}_scatter(a, mutated_slice, args...)
427
+ After:
428
+ slice = {slice}(a, args...)
429
+ slice.copy_(mutated_slice)
430
+
431
+ We now need to find all later nodes that use "b" as an argument
432
+ and update them to take in "a" instead.
433
+
434
+ Note that for the majority of inplace ops, this isn't actually necessary
435
+ (because most inplace ops return "self" as their output).
436
+ This isn't generally true for all mutable ops though, which is why
437
+ we need to actually replace all of the arguments.
438
+
439
+ We also need to update our metadata of Dict[StorageWeakRef, Set[Node]],
440
+ That maps a given tensor storage to the set of all nodes that take in that storage
441
+ as an input.
442
+ Specifically, re-inplacing `b = foo(a)` causes "a" and "b"'s sets to get fused
443
+ together.
444
+
445
+ (5) Any "view_inverse/scatter" nodes that were identified as "it's ok to ignore them"
446
+ during step (3) get manually deleted from the graph.
447
+ Their outputs are no longer used, so technically standard DCE would be able
448
+ to do this, but we can no longer run FX's DCE pass now that we have mutable
449
+ ops in the graph.
450
+ """
451
+ _FunctionalizationMetadataProp(gm).propagate(*sample_args)
452
+
453
+ # Useful debug printing
454
+ # def _print(x):
455
+ # if isinstance(x, FakeTensor):
456
+ # print(f'fake_result: {StorageWeakRef(x._typed_storage()).cdata}')
457
+
458
+ # for n in gm.graph.nodes:
459
+ # print(n.format_node())
460
+ # if hasattr(n, 'meta'):
461
+ # print(f'node_idx: {n.meta["node_idx"]}')
462
+ # if 'fake_result' in n.meta:
463
+ # tree_map(_print, n.meta['fake_result'])
464
+ # if 'view_of' in n.meta:
465
+ # print(f'view_of: {str(n.meta["view_of"])}')
466
+ # print()
467
+
468
+ # We need to know which nodes correspond to inputs (or their aliases)
469
+ # so we know not to re-inplace them.
470
+ # NOTE: later, we'll need to add an optimization for fully recovering performance
471
+ # on programs that mutate inputs.
472
+ input_storages = {
473
+ StorageWeakRef(
474
+ node.meta['fake_result']._typed_storage()
475
+ ) for node in gm.graph.nodes if node.op == 'placeholder'}
476
+
477
+
478
+ # We also need to know for a given node, what are all of its aliasing nodes.
479
+ storage_to_nodes: Dict[StorageWeakRef, Set[Node]] = defaultdict(set)
480
+ for n in gm.graph.nodes:
481
+ if 'fake_result' in n.meta:
482
+ # Tree-mapping because some ops can return lists of tensors.
483
+ def _add_to_map(x):
484
+ if isinstance(x, FakeTensor):
485
+ storage_to_nodes[StorageWeakRef(x._typed_storage())].add(n)
486
+ pytree.tree_map_(_add_to_map, n.meta['fake_result'])
487
+
488
+ # inplace-ify functional ops, subject to the constraints written below.
489
+ all_later_view_inverse_nodes_to_delete = set()
490
+ for idx, node in enumerate(gm.graph.nodes):
491
+ if node.op == 'call_function':
492
+
493
+ # Today, the re-inplace pass on directly acts on:
494
+ # - functional ops with an inplace variant
495
+ # - {view}_scatter ops that can be potentially removed from the graph.
496
+ # Both of these ops take in tensor first args, so filtering on this condition
497
+ # makes the later code simpler.
498
+ # We should revisit this at some point though, particularly when we also want
499
+ # the reinplacer to be able to handle out= and mutable operators
500
+ # and tensorlist first args (like `_foreach_` ops).
501
+ if not isinstance(node.target, torch._ops.OpOverload):
502
+ continue
503
+ if len(node.target._schema.arguments) < 1:
504
+ continue
505
+ if type(node.target._schema.arguments[0].type) != torch.TensorType:
506
+ continue
507
+
508
+ # Step 1a: Check that the self argument we're attempting to reinplace
509
+ # has the same size/stride as the output.
510
+ # For example, we shouldn't try to reinplace torch.add(scalar_tensor, larger_tensor)
511
+ # As it would require resizing scalar_tensor.
512
+ # (We could potentially swizzle this into larger_tensor.add_(scalar_tensor),
513
+ # this is probably an optimization to revisit later).
514
+ self_arg = node.args[0]
515
+ self_flattened = pytree.tree_leaves(self_arg.meta['fake_result'])
516
+ node_flattened = pytree.tree_leaves(node.meta['fake_result'])
517
+ self_has_wrong_metadata = False
518
+ if len(self_flattened) == len(node_flattened):
519
+ for self_meta, node_meta in zip(self_flattened, node_flattened):
520
+ if self_meta.numel() != node_meta.numel():
521
+ self_has_wrong_metadata = True
522
+ if self_meta.dtype != node_meta.dtype:
523
+ self_has_wrong_metadata = True
524
+ # We also cannot re-inplace on tensors that have internal memory overlap.
525
+ # e.g. torch.ones(1).expand(4, 4).add_(1)
526
+ if torch._debug_has_internal_overlap(self_meta) == 1:
527
+ self_has_wrong_metadata = True
528
+ # Here, we (optimistically) assume that a.resize(b) is valid to re-inplace,
529
+ # Since users should never really be calling the functional "torch.ops.aten.resize"
530
+ # op directly in their programs.
531
+ if self_has_wrong_metadata and node.target != torch.ops.aten.resize.default:
532
+ continue
533
+
534
+ # Step 1b: ensure that the op we're trying to re-inplace isn't a program input
535
+ self_arg_name = self_arg.name
536
+ self_arg_storage = StorageWeakRef(self_arg.meta['fake_result']._typed_storage())
537
+ if self_arg_storage in input_storages:
538
+ # TODO: later, add the optimization for handling `copy_()` calls in the graph.
539
+ continue
540
+ if len([x for x in node.args if x is self_arg]) > 1:
541
+ # Step 1c:
542
+ # Calling stuff like aten.mul_(a, a) isn't guaranteed to be sound,
543
+ # so we prevent re-inplacing in this case.
544
+ continue
545
+
546
+ self_arg_storage = StorageWeakRef(self_arg.meta['fake_result']._typed_storage())
547
+ self_aliases = storage_to_nodes[self_arg_storage]
548
+
549
+ # First, we find all later usages of any of the aliases of self_arg.
550
+ later_node_usages = _get_all_later_node_usages(self_aliases, node.meta['node_idx'])
551
+ # Then, we check if any of those later usages are actually view_scatter ops
552
+ # that are safe to fully remove.
553
+ later_view_inverse_node_usages = _get_view_inverse_node_usages(later_node_usages, self_aliases)
554
+
555
+ # Step 2: Check to see if the input to the op is re-used later in the graph.
556
+ # If not (same goes for its aliases), then this op is safe to re-in place.
557
+ # This is a slightly roundabout way to check that there are no later usages of the current self argument.
558
+ # (later_view_inverse_node_usages corresponds to "view_scatter" nodes that we are allowed to delete)
559
+ can_reinplace = len(later_node_usages - later_view_inverse_node_usages) == 0
560
+ if not can_reinplace:
561
+ continue
562
+
563
+ # Step 3a: Special handling for when we see *_scatter operators.
564
+ # When we see an operator like `b = torch.slice_scatter(a, ...)`,
565
+ # instead of trying to "inplace" it into a.slice_scatter_(..._),
566
+ # we would prefer to remove it from the graph entirely,
567
+ # and instead copy_() the slice directly into the larger tensor.
568
+ # See the description of the algorithm for a full example.
569
+ if node.target in _VIEW_INVERSE_MAP and node not in all_later_view_inverse_nodes_to_delete:
570
+ view_op = _VIEW_INVERSE_MAP[node.target]
571
+ # Before:
572
+ # base_updated = torch.ops.aten.slice_scatter.default(base, mutated_slice, args...)
573
+ # After:
574
+ # slice = torch.ops.aten.slice.default(base, args...)
575
+ # slice.copy_(mutated_slice)
576
+ with gm.graph.inserting_before(node):
577
+ mutated_slice_node = node.args[1]
578
+ remaining_slice_args = node.args[2:]
579
+ slice_node = gm.graph.create_node(
580
+ 'call_function', view_op, (self_arg,) + tuple(remaining_slice_args), node.kwargs)
581
+ copy_node = gm.graph.create_node(
582
+ 'call_function', torch.ops.aten.copy_.default, (slice_node, mutated_slice_node,), {})
583
+ # Add the slice_scatter node to our "nodes to delete" list.
584
+ all_later_view_inverse_nodes_to_delete.add(node)
585
+
586
+
587
+ else:
588
+ # Step 3b: Check to see if this operator has an inplace variant.
589
+ maybe_inplace_op = _maybe_get_inplace_op(node.target)
590
+ if maybe_inplace_op is None:
591
+ continue
592
+ # And if so, replace it with its inplace variant.
593
+ node.target = maybe_inplace_op
594
+
595
+ # At this point, 'storage_to_nodes' will be stale.
596
+ # Now that we're inplacing `b = foo(a)`, we need to effectively
597
+ # union together the dict values for b and a's storage.
598
+ # Hmm... morally I think we also want to keep the `fake_result` metadata
599
+ # up to date here, but I'm not sure how easy it is to do.
600
+ # Maybe it's fine to wait until the end of the pass to update it.
601
+ curr_node_storage = StorageWeakRef(node.meta['fake_result']._typed_storage())
602
+ storage_to_nodes[self_arg_storage].update(storage_to_nodes[curr_node_storage])
603
+ storage_to_nodes[curr_node_storage].update(storage_to_nodes[self_arg_storage])
604
+
605
+ # Need to remember the view_scatter view nodes we found so we can remove them alter.
606
+ all_later_view_inverse_nodes_to_delete.update(later_view_inverse_node_usages)
607
+
608
+ # Step 4:
609
+ # Now that we've replaced b = a.foo() with a.foo_(),
610
+ # We need to replace any later usages of "b" with "a"
611
+ for old in itertools.chain([node], later_view_inverse_node_usages):
612
+ new = old.args[0]
613
+ nodes_to_update = [n for n in old.users if n.meta['node_idx'] > node.meta['node_idx']]
614
+ for node_to_update in nodes_to_update:
615
+ new_args = []
616
+ args = node_to_update.args
617
+
618
+ def replace_arg(a):
619
+ if a == old:
620
+ return new
621
+ return a
622
+
623
+ # First, replace usages of "b" with "a"
624
+ node_to_update.args = tree_map_only(Node, replace_arg, node_to_update.args)
625
+ node_to_update.kwargs = tree_map_only(Node, replace_arg, node_to_update.kwargs)
626
+
627
+ # Second, update our storage_to_nodes data structure.
628
+ old_flattened_res = pytree.tree_leaves(old.meta['fake_result'])
629
+ node_flattened_res = pytree.tree_leaves(node_to_update.meta['fake_result'])
630
+
631
+ old_res_storage = {
632
+ StorageWeakRef(
633
+ x._typed_storage()
634
+ ) for x in old_flattened_res if isinstance(x, FakeTensor)}
635
+ node_res_storage = {
636
+ StorageWeakRef(
637
+ x._typed_storage()
638
+ ) for x in node_flattened_res if isinstance(x, FakeTensor)}
639
+
640
+ # This will happen if we're updating a view op, e.g.
641
+ # e.g. replacing
642
+ # x = view(old)
643
+ # x = view(new)
644
+ # When that happens, we need to make sure to keep our
645
+ # storage mapping up to date.
646
+ #
647
+ # We're checking for len(...) == 1 here because all view ops are guaranteed to return either a single tensor,
648
+ # or multiple tensors that all share the same storage.
649
+ # We can't just check equality because we might encounter FX nodes that return zero tensor outputs.
650
+ if len(old_res_storage) == 1 and len(node_res_storage) == 1 and old_res_storage == node_res_storage:
651
+ new_flattened_res = pytree.tree_leaves(new.meta['fake_result'])
652
+ new_res_storage = {
653
+ StorageWeakRef(
654
+ x._typed_storage()
655
+ ) for x in new_flattened_res if isinstance(x, FakeTensor)}
656
+ assert len(new_res_storage) == 1
657
+ (old_ref,) = old_res_storage
658
+ (new_ref,) = new_res_storage
659
+ (node_ref,) = node_res_storage
660
+ # Technically, "old_ref" and all its aliases will remain
661
+ # in our mapping.
662
+ # That should be fine though, since we deleted "old"
663
+ # from the graph at this point.
664
+ storage_to_nodes[node_ref].update(storage_to_nodes[new_ref])
665
+ storage_to_nodes[new_ref].update(storage_to_nodes[node_ref])
666
+
667
+ # Step 4: delete any _scatter nodes that we de-functionalized
668
+ # Need to take care not to delete any of these nodes until after *all* modifications
669
+ # to the graph are finished.
670
+ for to_delete in all_later_view_inverse_nodes_to_delete:
671
+ gm.graph.erase_node(to_delete)
672
+
673
+
674
+ gm.recompile()
675
+ return gm
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.fx
3
+ import traceback
4
+
5
+ from torch._dispatch.python import enable_python_dispatcher
6
+ from torch.fx.node import Node, map_aggregate
7
+ from typing import Any, Tuple, NamedTuple, Optional, Dict
8
+ from torch.fx._compatibility import compatibility
9
+ from torch._guards import detect_fake_mode
10
+
11
+ __all__ = ['TensorMetadata', 'ShapeProp']
12
+
13
+ @compatibility(is_backward_compatible=True)
14
+ class TensorMetadata(NamedTuple):
15
+ # TensorMetadata is a structure containing pertinent information
16
+ # about a tensor within a PyTorch program.
17
+
18
+ # General Tensor metadata
19
+ shape : torch.Size
20
+ dtype : torch.dtype
21
+ requires_grad : bool
22
+ stride : Tuple[int, ...]
23
+ memory_format : Optional[torch.memory_format]
24
+
25
+ # Quantization metadata
26
+ is_quantized : bool
27
+ qparams: Dict[str, Any]
28
+
29
+ def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
30
+ """
31
+ Extract a TensorMetadata NamedTuple describing `result`.
32
+ """
33
+ shape = result.shape
34
+ dtype = result.dtype
35
+ requires_grad = result.requires_grad
36
+ stride = result.stride()
37
+
38
+ memory_formats = {
39
+ torch.contiguous_format,
40
+ torch.channels_last,
41
+ torch.channels_last_3d,
42
+ }
43
+
44
+ memory_format = None
45
+
46
+ for query_format in memory_formats:
47
+ if result.is_contiguous(memory_format=query_format):
48
+ memory_format = query_format
49
+ break
50
+
51
+ is_quantized = result.is_quantized
52
+ qparams: Dict[str, Any] = {}
53
+ if is_quantized:
54
+ qscheme = result.qscheme()
55
+ qparams["qscheme"] = qscheme
56
+ if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
57
+ qparams["scale"] = result.q_scale() # type: ignore[assignment]
58
+ qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment]
59
+ elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:
60
+ # In this branch, scale and zero_point are expected to be tensors,
61
+ # we store the values as immutable_list in TensorMetadata for
62
+ # easier serialization downstream
63
+ qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]
64
+ qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]
65
+ qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment]
66
+
67
+ return TensorMetadata(
68
+ shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)
69
+
70
+ @compatibility(is_backward_compatible=True)
71
+ class ShapeProp(torch.fx.Interpreter):
72
+ """
73
+ Execute an FX graph Node-by-Node and
74
+ record the shape and type of the result
75
+ into the corresponding node.
76
+
77
+ Example:
78
+ In this example, we record the shape
79
+ and data type of a module given
80
+ an example input ``torch.randn(50, D_in)``.
81
+ We print the name, shape and dtype of each node.
82
+
83
+ class TwoLayerNet(torch.nn.Module):
84
+ def __init__(self, D_in, H, D_out):
85
+ super().__init__()
86
+ self.linear1 = torch.nn.Linear(D_in, H)
87
+ self.linear2 = torch.nn.Linear(H, D_out)
88
+ def forward(self, x):
89
+ h_relu = self.linear1(x).clamp(min=0)
90
+ y_pred = self.linear2(h_relu)
91
+ return y_pred
92
+ N, D_in, H, D_out = 64, 1000, 100, 10
93
+ x = torch.randn(N, D_in)
94
+ y = torch.randn(N, D_out)
95
+ model = TwoLayerNet(D_in, H, D_out)
96
+ gm = torch.fx.symbolic_trace(model)
97
+ sample_input = torch.randn(50, D_in)
98
+ ShapeProp(gm).propagate(sample_input)
99
+
100
+ for node in gm.graph.nodes:
101
+ print(node.name, node.meta['tensor_meta'].dtype,
102
+ node.meta['tensor_meta'].shape)
103
+
104
+ The output of this code is:
105
+
106
+ x torch.float32 torch.Size([50, 1000])
107
+ linear1 torch.float32 torch.Size([50, 100])
108
+ clamp_1 torch.float32 torch.Size([50, 100])
109
+ linear2 torch.float32 torch.Size([50, 10])
110
+ output torch.float32 torch.Size([50, 10])
111
+
112
+ Args:
113
+ module (GraphModule): The module to be executed
114
+ fake_mode (FakeTensorMode): A fake mode for copying the gm
115
+
116
+ """
117
+ def __init__(self, gm, fake_mode=None):
118
+ super().__init__(gm)
119
+ if fake_mode is None:
120
+ fake_mode = detect_fake_mode()
121
+ if fake_mode is not None:
122
+ from torch._dynamo.utils import deepcopy_to_fake_tensor
123
+ # Note:
124
+ # We need fake execution cause the inputs are fake, however, we cannot fakify the module
125
+ # - because we need to write to the tensor_meta of the real module. So we fakify to
126
+ # produce a result (L131 below), to extract tensor meta, and then keep going.
127
+ #
128
+ # If we were to fakify, we would write to the wrong node, and then downstream fusion
129
+ # would be missing the tensor_meta.
130
+ #
131
+ # See torch/_inductor/overrides.py for where this is called upstream of fusion.
132
+ self.fake_module = deepcopy_to_fake_tensor(self.module, fake_mode)
133
+ self.fake_mode = fake_mode
134
+ else:
135
+ self.fake_module = None
136
+ self.fake_mode = None
137
+
138
+ self.real_module = self.module
139
+
140
+ def run_node(self, n : Node) -> Any:
141
+ try:
142
+ if self.fake_module is not None:
143
+ # Hacky swap. Alternatively, we could do this with overriding
144
+ # call_module and get_attr.
145
+ self.module = self.fake_module
146
+ try:
147
+ if self.fake_mode is not None:
148
+ with self.fake_mode, enable_python_dispatcher():
149
+ result = super().run_node(n)
150
+ else:
151
+ result = super().run_node(n)
152
+ finally:
153
+ self.module = self.real_module
154
+ except Exception as e:
155
+ traceback.print_exc()
156
+ raise RuntimeError(
157
+ f"ShapeProp error for: node={n.format_node()} with "
158
+ f"meta={n.meta}"
159
+ ) from e
160
+
161
+ found_tensor = False
162
+
163
+ def extract_tensor_meta(obj):
164
+ if isinstance(obj, torch.Tensor):
165
+ nonlocal found_tensor
166
+ found_tensor = True
167
+ return _extract_tensor_metadata(obj)
168
+ else:
169
+ return obj
170
+
171
+ meta = map_aggregate(result, extract_tensor_meta)
172
+ if found_tensor:
173
+ n.meta['tensor_meta'] = meta
174
+
175
+ n.meta['type'] = type(result)
176
+ return result
177
+
178
+ def propagate(self, *args):
179
+ """
180
+ Run `module` via interpretation and return the result and
181
+ record the shape and type of each node.
182
+
183
+ Args:
184
+ *args (Tensor): the sample input.
185
+
186
+ Returns:
187
+ Any: The value returned from executing the Module
188
+ """
189
+ if self.fake_mode is not None:
190
+ fake_args = [self.fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args]
191
+ else:
192
+ fake_args = args
193
+ return super().run(*fake_args)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/split_module.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Callable, Dict, List, Optional, Set, TYPE_CHECKING
3
+ from collections import OrderedDict
4
+ import logging
5
+
6
+ import torch
7
+ from torch.fx._compatibility import compatibility
8
+ from torch.fx.graph_module import GraphModule
9
+ from torch.fx.node import Node
10
+
11
+ if TYPE_CHECKING:
12
+ import sympy # noqa: F401
13
+
14
+ __all__ = ["Partition", "split_module"]
15
+ _LOGGER = logging.getLogger(__name__)
16
+
17
+ @compatibility(is_backward_compatible=True)
18
+ class Partition:
19
+ def __init__(self, name: str):
20
+ self.name: str = name
21
+ self.submod_name = f"submod_{name}"
22
+ self.node_names: List[str] = []
23
+ self.inputs: Dict[str, None] = {}
24
+ self.outputs: Dict[str, None] = {}
25
+ self.dependencies: Dict[str, None] = {}
26
+ self.dependents: Dict[str, None] = {}
27
+ self.graph: torch.fx.graph.Graph = torch.fx.graph.Graph()
28
+ self.environment: Dict[Node, Node] = {}
29
+ self.targets: Dict[str, Any] = {}
30
+
31
+ def __repr__(self) -> str:
32
+ return (
33
+ f"name: {self.name},\n"
34
+ f" nodes: {self.node_names},\n"
35
+ f" inputs: {self.inputs},\n"
36
+ f" outputs: {self.outputs},\n"
37
+ f" partitions depended on: {self.dependencies},\n"
38
+ f" partition dependents: {self.dependents}"
39
+ )
40
+
41
+
42
+ # Creates subgraphs out of main graph
43
+ @compatibility(is_backward_compatible=True)
44
+ def split_module(
45
+ m: GraphModule,
46
+ root_m: torch.nn.Module,
47
+ split_callback: Callable[[Node], int],
48
+ qualname_map: Optional[Dict[str, str]] = None,
49
+ keep_original_order: Optional[bool] = False,
50
+ ):
51
+ """
52
+ Creates subgraphs out of main graph
53
+
54
+ Args:
55
+ m (GraphModule): Graph module to split
56
+ root_m (torch.nn.Module): root nn module. Not currently used. Included
57
+ because the root nn module is usually transformed via
58
+ torch.fx._symbolic_trace.symbolic_trace (see example below)
59
+ split_callback (Callable[[Node], int]): Callable function
60
+ that maps a given Node instance to a numeric partition identifier.
61
+ split_module will use this function as the policy for which operations
62
+ appear in which partitions in the output Module.
63
+ qualname_map: Optional[Dict[str, str]]: optional output parameter that returns a
64
+ mapping from new target names in the module after split to old target
65
+ names in the original module.
66
+ keep_original_order: Optional[bool]: keep the original order of the GraphModule
67
+ or use the Topological order of the new constructed GraphModule
68
+
69
+
70
+ Returns:
71
+ GraphModule: the module after split.
72
+
73
+ Example:
74
+
75
+ This is a sample setup:
76
+
77
+ import torch
78
+ from torch.fx.symbolic_trace import symbolic_trace
79
+ from torch.fx.graph_module import GraphModule
80
+ from torch.fx.node import Node
81
+ from torch.fx.passes.split_module import split_module
82
+
83
+ class MyModule(torch.nn.Module):
84
+ def __init__(self):
85
+ super().__init__()
86
+ self.param = torch.nn.Parameter(torch.rand(3, 4))
87
+ self.linear = torch.nn.Linear(4, 5)
88
+
89
+ def forward(self, x, y):
90
+ z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
91
+ w = self.linear(y).clamp(min=0.0, max=1.0)
92
+ return z + w
93
+
94
+ # symbolically trace model
95
+ my_module = MyModule()
96
+ my_module_traced = symbolic_trace(my_module)
97
+
98
+ # random mod partitioning
99
+ partition_counter = 0
100
+ NPARTITIONS = 3
101
+
102
+ def mod_partition(node: Node):
103
+ global partition_counter
104
+ partition = partition_counter % NPARTITIONS
105
+ partition_counter = (partition_counter + 1) % NPARTITIONS
106
+ return partition
107
+
108
+ # split module in module with submodules
109
+ module_with_submodules = split_module(
110
+ my_module_traced, my_module, mod_partition
111
+ )
112
+
113
+ Output looks like this. Original graph is broken into partitions
114
+
115
+ > print(module_with_submodules)
116
+ GraphModule(
117
+ (submod_0): GraphModule(
118
+ (linear): Linear(in_features=4, out_features=5, bias=True)
119
+ )
120
+ (submod_1): GraphModule(
121
+ (linear): Linear(in_features=4, out_features=5, bias=True)
122
+ )
123
+ (submod_2): GraphModule()
124
+ )
125
+
126
+ def forward(self, x, y):
127
+ param = self.param
128
+ submod_0 = self.submod_0(x, param, y); x = param = y = None
129
+ getitem = submod_0[0]
130
+ getitem_1 = submod_0[1]; submod_0 = None
131
+ submod_1 = self.submod_1(getitem, getitem_1); getitem = getitem_1 = None
132
+ getitem_2 = submod_1[0]
133
+ getitem_3 = submod_1[1]; submod_1 = None
134
+ submod_2 = self.submod_2(getitem_2, getitem_3); getitem_2 = getitem_3 = None
135
+ return submod_2
136
+
137
+ Output of split module is the same as output of input traced module.
138
+ This is an example within a test setting:
139
+
140
+ > orig_out = my_module_traced(x, y)
141
+ > submodules_out = module_with_submodules(x, y)
142
+ > self.assertEqual(orig_out, submodules_out)
143
+ True
144
+ """
145
+
146
+ def construct_graph(
147
+ node: Node,
148
+ base_mod_env: Dict[str, Node],
149
+ base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule],
150
+ ):
151
+ if node.op == "placeholder":
152
+ default_value = (
153
+ node.args[0] if len(node.args) > 0 else inspect.Signature.empty
154
+ )
155
+ base_mod_env[node.name] = base_mod_graph.placeholder(
156
+ node.target, type_expr=node.type, default_value=default_value
157
+ )
158
+ base_mod_env[node.name].meta = node.meta.copy()
159
+ elif node.op == "get_attr":
160
+ base_mod_env[node.name] = base_mod_graph.get_attr(node.target)
161
+ base_mod_env[node.name].meta = node.meta.copy()
162
+ attr_val = m
163
+ for atom in node.target.split("."): # type: ignore[union-attr]
164
+ if not hasattr(attr_val, atom):
165
+ raise AttributeError(f"Node target {node.target} not found!")
166
+ attr_val = getattr(attr_val, atom)
167
+ base_mod_attrs[node.target] = attr_val # type: ignore[index]
168
+ return base_mod_env, base_mod_attrs
169
+
170
+ partitions: Dict[str, Partition] = {}
171
+ orig_nodes: Dict[str, Node] = {}
172
+ symbol_to_node: Dict["sympy.Symbol", Node] = {}
173
+
174
+ def record_cross_partition_use(
175
+ def_node: Node, use_node: Optional[Node]
176
+ ): # noqa: B950
177
+ from torch.fx.experimental.symbolic_shapes import free_symbols
178
+
179
+ defined = getattr(def_node, "_fx_partition", None)
180
+ used = getattr(use_node, "_fx_partition", None)
181
+ if defined != used:
182
+ if defined is not None:
183
+ def_partition = partitions[defined]
184
+ def_partition.outputs.setdefault(def_node.name)
185
+ if used is not None:
186
+ def_partition.dependents.setdefault(used)
187
+
188
+ if used is not None:
189
+ use_partition = partitions[used]
190
+ use_partition.inputs.setdefault(def_node.name)
191
+ if (def_val := def_node.meta.get("example_value")) is not None:
192
+ for s in sorted(free_symbols(def_val)):
193
+ use_partition.inputs.setdefault(symbol_to_node[s].name)
194
+ if defined is not None:
195
+ use_partition.dependencies.setdefault(defined)
196
+
197
+ def instantiate_node_partition_mapping(node):
198
+ partition_name = str(split_callback(node))
199
+
200
+ # add node to partitions
201
+ partition = partitions.get(partition_name)
202
+ if partition is None:
203
+ partitions[partition_name] = partition = Partition(partition_name)
204
+
205
+ partition.node_names.append(node.name)
206
+ node._fx_partition = partition_name
207
+
208
+ # Global State Nodes are nodes which by their global state effects,
209
+ # "taint" all downstream nodes while they are active.
210
+ GLOBAL_STATE_NODES = [
211
+ torch.amp._enter_autocast,
212
+ torch.amp._exit_autocast,
213
+ torch._C._set_grad_enabled
214
+ ]
215
+
216
+ # For grad regions:
217
+ # ------------------------
218
+ # 1. first region: we do nothing
219
+ # 2. subsequent regions: we insert the set_grad at the beginning
220
+ grad_regions: OrderedDict[Node, Set[int]] = OrderedDict()
221
+
222
+ # For autocast regions:
223
+ # ------------------------
224
+ # 1. first region: we will only insert the _exit at the end
225
+ # 2. intermediate regions: we will insert both the
226
+ # _enter at the beginning and _exit at the end
227
+ # 3. last region: we will only insert _enter at the beginning
228
+ # We will do so in the order in which the autocasts were instantiated.
229
+ autocast_regions: OrderedDict[Node, Set[int]] = OrderedDict()
230
+ autocast_exits: Dict[Node, Optional[Node]] = {}
231
+
232
+ active_grad = None
233
+ active_autocasts = set()
234
+
235
+ import sympy # noqa: F811
236
+
237
+ for node in m.graph.nodes:
238
+ if node.op in ["placeholder", "get_attr", "output"]:
239
+ if (
240
+ node.op == "placeholder" and
241
+ (val := node.meta.get("example_value")) is not None and
242
+ isinstance(val, torch.SymInt) and
243
+ isinstance(val.node.expr, sympy.Symbol)
244
+ ):
245
+ symbol_to_node[val.node.expr] = node
246
+ continue
247
+
248
+ instantiate_node_partition_mapping(node)
249
+
250
+ if node.op == "call_function" and node.target in GLOBAL_STATE_NODES:
251
+ if node.target == torch._C._set_grad_enabled:
252
+ assert len(node.args) == 1
253
+ assert isinstance(node.args[0], bool)
254
+ active_grad = node
255
+ grad_regions[active_grad] = set({split_callback(node)})
256
+ elif node.target == torch.amp._enter_autocast:
257
+ # Should all be python constants
258
+ assert all(not isinstance(arg, Node) for arg in node.args)
259
+ active_autocasts.add(node)
260
+ autocast_regions[node] = set({split_callback(node)})
261
+ autocast_exits[node] = None
262
+ elif node.target == torch.amp._exit_autocast:
263
+ assert len(node.args) == 1
264
+ autocast_regions[node.args[0]].add(split_callback(node))
265
+ active_autocasts.remove(node.args[0])
266
+ autocast_exits[node.args[0]] = node
267
+
268
+ if active_grad is not None:
269
+ grad_regions[active_grad].add(split_callback(node))
270
+
271
+ for a in active_autocasts:
272
+ autocast_regions[a].add(split_callback(node))
273
+
274
+ assert all(v is not None for v in autocast_exits.values()), "autocast must exit"
275
+
276
+ autocast_regions = {k: sorted(v) for k, v in autocast_regions.items()}
277
+ grad_regions = {k: sorted(v) for k, v in grad_regions.items()}
278
+
279
+ if _LOGGER.isEnabledFor(logging.DEBUG):
280
+ _LOGGER.debug("autocast_regions: %s", autocast_regions)
281
+ _LOGGER.debug("grad_regions: %s", grad_regions)
282
+
283
+ assert_monotonically_increasing = bool(autocast_regions) or bool(grad_regions)
284
+
285
+ # split nodes into partitions
286
+ highest_partition = -1
287
+ for node in m.graph.nodes:
288
+ orig_nodes[node.name] = node
289
+
290
+ # TODO currently placeholders/parameters aren't put into random partitions,
291
+ # rather they're added to the graphs where they are used down below
292
+ if node.op in ["placeholder", "get_attr"]:
293
+ continue
294
+ if node.op == "output":
295
+ torch.fx.graph.map_arg(
296
+ node.args[0], lambda n: record_cross_partition_use(n, None)
297
+ )
298
+ continue
299
+
300
+ if assert_monotonically_increasing:
301
+ pid = split_callback(node)
302
+ assert highest_partition <= pid,\
303
+ ("autocast or set_grad_enabled require monotonically increasing partitions:"
304
+ f"highest: {highest_partition}, this node's: {pid}")
305
+ highest_partition = pid
306
+
307
+ # do not capture cross-partition dependencies for global state nodes as they will be
308
+ # self-contained - their setup and unwind will be isolated to each partition submodule.
309
+ if node.target not in GLOBAL_STATE_NODES:
310
+ torch.fx.graph.map_arg(
311
+ node.args, lambda def_node: record_cross_partition_use(def_node, node)
312
+ )
313
+ torch.fx.graph.map_arg(
314
+ node.kwargs, lambda def_node: record_cross_partition_use(def_node, node)
315
+ ) # noqa: B950
316
+
317
+ original_partition_order = list(partitions.keys())
318
+ # find partitions with no dependencies
319
+ root_partitions: List[str] = []
320
+ for partition_name, partition in partitions.items():
321
+ if not len(partition.dependencies):
322
+ root_partitions.append(partition_name)
323
+
324
+ # check partitions for circular dependencies and create topological partition ordering
325
+ sorted_partitions: List[str] = []
326
+ while root_partitions:
327
+ root_partition = root_partitions.pop()
328
+ sorted_partitions.append(root_partition)
329
+ for dependent in partitions[root_partition].dependents:
330
+ partitions[dependent].dependencies.pop(root_partition)
331
+ if not partitions[dependent].dependencies:
332
+ root_partitions.append(dependent)
333
+ if len(sorted_partitions) != len(partitions):
334
+ raise RuntimeError("cycle exists between partitions!")
335
+
336
+ # Enter prelude
337
+ for regions_mapping in [autocast_regions, grad_regions]:
338
+ for node, regions in regions_mapping.items():
339
+ assert len(regions) > 0
340
+ partitions[str(regions[0])].environment[node] = node
341
+ for r in regions[1:]:
342
+ partition = partitions[str(r)]
343
+ new_node = partition.graph.create_node(
344
+ op=node.op,
345
+ target=node.target,
346
+ args=tuple(arg for arg in node.args),
347
+ kwargs={},
348
+ type_expr=node.type,
349
+ )
350
+ new_node.meta = node.meta.copy() # is it really a good idea to copy this?
351
+ partition.environment[node] = new_node
352
+
353
+ # add placeholders to partition inputs
354
+ for partition_name in sorted_partitions:
355
+ partition = partitions[partition_name]
356
+ for inp in partition.inputs:
357
+ placeholder = partition.graph.placeholder(
358
+ inp,
359
+ type_expr=orig_nodes[inp].type,
360
+ )
361
+ placeholder.meta = orig_nodes[inp].meta.copy()
362
+ partition.environment[orig_nodes[inp]] = placeholder
363
+
364
+ # Transform nodes and collect targets for partition's submodule
365
+ for node in m.graph.nodes:
366
+ if hasattr(node, "_fx_partition"):
367
+ partition = partitions[node._fx_partition]
368
+
369
+ # swap out old graph nodes in kw/args with references to new nodes in this submodule
370
+ environment = partition.environment
371
+ gathered_args = torch.fx.graph.map_arg(node.args, lambda n: environment[n])
372
+ gathered_kwargs = torch.fx.graph.map_arg(
373
+ node.kwargs, lambda n: environment[n]
374
+ )
375
+
376
+ if node.op not in ["call_module", "get_attr"]:
377
+ target = node.target
378
+ else:
379
+ target_atoms = node.target.split(".")
380
+ target_attr = m
381
+ for atom in target_atoms:
382
+ if not hasattr(target_attr, atom):
383
+ raise AttributeError(f"Operator target {node.target} not found!")
384
+ target_attr = getattr(target_attr, atom)
385
+ # target = target_atoms[-1]
386
+ target = "_".join(target_atoms)
387
+ partition.targets[target] = target_attr
388
+ # Fill in the passed-in mapping from new qualname to old qualname
389
+ if qualname_map is not None:
390
+ # When creating the split module later, the submodules will have
391
+ # path prefix matching the corresponding partition's submod_name
392
+ qualname = f"{partition.submod_name}.{target}"
393
+ qualname_map[qualname] = node.target
394
+
395
+ assert isinstance(gathered_args, tuple)
396
+ assert isinstance(gathered_kwargs, dict)
397
+ new_node = partition.graph.create_node(
398
+ op=node.op,
399
+ target=target,
400
+ args=gathered_args,
401
+ kwargs=gathered_kwargs,
402
+ type_expr=node.type,
403
+ )
404
+ new_node.meta = node.meta.copy()
405
+ partition.environment[node] = new_node
406
+
407
+ # Exit epilogue
408
+ for regions_mapping in [autocast_regions]:
409
+ for node in reversed(regions_mapping):
410
+ regions = regions_mapping[node]
411
+ assert len(regions) > 0
412
+ for r in regions[:-1]:
413
+ partition = partitions[str(r)]
414
+ exit_node = autocast_exits[node]
415
+ assert exit_node is not None, "Missing exit node"
416
+ new_node = partition.graph.create_node(
417
+ op=exit_node.op,
418
+ target=exit_node.target,
419
+ args=(partition.environment[node],),
420
+ kwargs={},
421
+ type_expr=exit_node.type,
422
+ )
423
+ new_node.meta = exit_node.meta.copy() # is it really a good idea to copy this?
424
+
425
+ # original module environment dict mapping node names to nodes
426
+ orig_mod_env: Dict[str, Node] = {}
427
+ # Set up values to construct base module
428
+ base_mod_env: Dict[str, Node] = {}
429
+ base_mod_graph: torch.fx.graph.Graph = torch.fx.graph.Graph()
430
+ base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule] = {}
431
+ if not keep_original_order:
432
+ for node in m.graph.nodes:
433
+ base_mod_env, base_mod_attrs = construct_graph(
434
+ node, base_mod_env, base_mod_attrs
435
+ )
436
+
437
+ else:
438
+ # Go through the graph to construct the mapping dict
439
+ for node in m.graph.nodes:
440
+ orig_mod_env[node.name] = node
441
+
442
+ # Do some things iterating over the partitions in topological order again:
443
+ # 1) Finish off submodule Graphs by setting corresponding outputs
444
+ # 2) Construct GraphModules for each submodule
445
+ # 3) Construct the base graph by emitting calls to those submodules in
446
+ # topological order or original order specified by keep_original_order
447
+
448
+ construct_order_partitions = (
449
+ sorted_partitions if not keep_original_order else original_partition_order
450
+ )
451
+
452
+ already_constructed_attr_nodes = set()
453
+ for partition_name in construct_order_partitions:
454
+ partition = partitions[partition_name]
455
+
456
+ # Set correct output values
457
+ output_vals = tuple(
458
+ partition.environment[orig_nodes[name]] for name in partition.outputs
459
+ )
460
+
461
+ # skip output node generation if there are no output values
462
+ num_output_vals = len(output_vals)
463
+ if num_output_vals == 1:
464
+ partition.graph.output(output_vals[0])
465
+ elif num_output_vals > 1:
466
+ partition.graph.output(output_vals)
467
+
468
+ if keep_original_order:
469
+ # first get the attr nodes required by this partition
470
+ orig_mod_attr_nodes: List[Node] = [
471
+ orig_mod_env[key] for key in partition.inputs
472
+ ]
473
+ # Construct GraphModule for this partition
474
+ for node in orig_mod_attr_nodes: # type: ignore[attr-defined]
475
+ if node in already_constructed_attr_nodes:
476
+ continue
477
+ base_mod_env, base_mod_attrs = construct_graph(
478
+ node, base_mod_env, base_mod_attrs
479
+ )
480
+ already_constructed_attr_nodes.add(node)
481
+
482
+ base_mod_attrs[partition.submod_name] = torch.fx.graph_module.GraphModule(
483
+ partition.targets, partition.graph
484
+ ) # noqa: B950
485
+
486
+ # Emit call in base graph to this submodule
487
+ output_val = base_mod_graph.call_module(
488
+ partition.submod_name,
489
+ tuple(base_mod_env[name] for name in partition.inputs),
490
+ )
491
+
492
+ num_outputs = len(partition.outputs)
493
+ if num_outputs > 1:
494
+ # Unpack multiple return values from submodule
495
+ output_val_proxy = torch.fx.proxy.Proxy(output_val)
496
+ for i, output_name in enumerate(partition.outputs):
497
+ base_mod_env[output_name] = output_val_proxy[i].node # type: ignore[index]
498
+ elif num_outputs == 1:
499
+ base_mod_env[next(iter(partition.outputs))] = output_val
500
+
501
+ for node in m.graph.nodes:
502
+ if node.op == "output":
503
+ base_mod_graph.output(
504
+ torch.fx.graph.map_arg(node.args[0], lambda n: base_mod_env[n.name])
505
+ ) # noqa: B950
506
+
507
+ return torch.fx.graph_module.GraphModule(base_mod_attrs, base_mod_graph)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/split_utils.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from dataclasses import dataclass, field
3
+ from typing import Dict, List, Optional, Tuple, Type, Union
4
+
5
+ import torch.fx
6
+ from torch.fx._compatibility import compatibility
7
+ from torch.fx.graph import map_arg
8
+ from torch.fx.passes.utils import HolderModule, lift_subgraph_as_module
9
+
10
+ from .tools_common import NodeList
11
+
12
+ __all__ = ["getattr_recursive", "setattr_recursive", "Component", "split_by_tags"]
13
+
14
+
15
+ @compatibility(is_backward_compatible=False)
16
+ def getattr_recursive(obj, name):
17
+ for layer in name.split("."):
18
+ if hasattr(obj, layer):
19
+ obj = getattr(obj, layer)
20
+ else:
21
+ return None
22
+ return obj
23
+
24
+
25
+ @compatibility(is_backward_compatible=False)
26
+ def setattr_recursive(obj, attr, value):
27
+ if "." not in attr:
28
+ setattr(obj, attr, value)
29
+ else:
30
+ layer = attr.split(".")
31
+ setattr_recursive(getattr(obj, layer[0]), ".".join(layer[1:]), value)
32
+
33
+
34
+ @compatibility(is_backward_compatible=False)
35
+ @dataclass
36
+ class Component:
37
+ """
38
+ A component serves as a container for a subgraph we want to create afterwards.
39
+ """
40
+
41
+ graph: torch.fx.Graph
42
+ order: int
43
+ name: str
44
+
45
+ # Stores the placeholder nodes in `graph`.
46
+ input_placeholders: List = field(default_factory=list)
47
+
48
+ # Store the nodes in original graph that are placeholder in `graph`.
49
+ orig_inputs: List = field(default_factory=list)
50
+
51
+ # Store the nodes in original graph that are outputs in `graph`.
52
+ orig_outputs: List = field(default_factory=list)
53
+
54
+ # Mapping from get_attr node in original graph to get_attr node in `graph`.
55
+ getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)
56
+ constructor_args: List[str] = field(default_factory=list)
57
+ gm: Optional[torch.fx.GraphModule] = None
58
+
59
+
60
+ @compatibility(is_backward_compatible=False)
61
+ def split_by_tags(
62
+ gm: torch.fx.GraphModule,
63
+ tags: List[str],
64
+ return_fqn_mapping: bool = False,
65
+ GraphModuleCls: Type[torch.fx.GraphModule] = torch.fx.GraphModule,
66
+ ) -> Union[torch.fx.GraphModule, Tuple[torch.fx.GraphModule, Dict[str, str]]]:
67
+ """
68
+ Splits a GraphModule using tags on its graph nodes. We honor the order of
69
+ tags. For example, we have tags = ["a", "b", "c"], the function will create
70
+ the initial submodules in the order of "a", "b", "c".
71
+
72
+ To set a tag:
73
+ gm.graph.nodes[idx].tag = "mytag"
74
+
75
+ This will result in all nodes with the same tag being extracted and placed in their
76
+ own submodule. For placeholder, output and get_attr node, the tag is ignored. placeholder
77
+ and output nodes are created when needed while get_attr nodes get copied to submodules
78
+ where they are used.
79
+
80
+ Given the following module def:
81
+
82
+ class SimpleModule(torch.nn.Module):
83
+ def __init__(self):
84
+ super().__init__()
85
+ self.linear1 = torch.nn.Linear(...)
86
+ self.linear2 = torch.nn.Linear(...)
87
+ self.linear3 = torch.nn.Linear(...)
88
+
89
+ def forward(self, in1, in2):
90
+ r1 = self.linear1(in1)
91
+ r2 = self.linear2(in2)
92
+ r3 = torch.cat([r1, r2])
93
+ return self.linear3(r3)
94
+
95
+ Marking the node corresponding to in1 with the tag sc.REQUEST_ONLY.lower() results in the following split:
96
+
97
+ ro:
98
+ def forward(self, in1):
99
+ self = self.root
100
+ linear1 = self.linear1(in1)
101
+ return linear1
102
+
103
+ main:
104
+ def forward(self, in2, linear1):
105
+ self = self.root
106
+ linear2 = self.linear2(in2)
107
+ cat_1 = torch.cat([linear1, linear2])
108
+ linear3 = self.linear3(cat_1)
109
+ return linear3
110
+
111
+ main:
112
+ def forward(self, in1, in2):
113
+ self = self.root
114
+ ro_0 = self.ro_0(in1)
115
+ main_1 = self.main_1(in2, ro_0)
116
+ return main_1
117
+
118
+ Returns:
119
+ split_gm: torch fx graph after split
120
+ orig_to_split_fqn_mapping: a map between the original fqn and the fqn
121
+ after split for call_module and get_attr.
122
+ """
123
+
124
+ def flatten(x: torch.fx.node.Argument) -> NodeList:
125
+ """
126
+ Stores nodes in x to a list and returns the list.
127
+ """
128
+ r: NodeList = []
129
+ map_arg(x, r.append)
130
+ return r
131
+
132
+ # Mapping from node in original module to node in created submodule.
133
+ node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
134
+
135
+ # Mapping from node in original module or created submodules to
136
+ # corresponding component.
137
+ node_to_component: Dict[torch.fx.Node, Component] = {}
138
+
139
+ # Mapping from tag to the corresponding component.
140
+ tag_to_component: Dict[str, Component] = {}
141
+
142
+ # Stores all components.
143
+ all_components: List[Component] = []
144
+
145
+ # Stores nodes that will be used in main graph.
146
+ used_in_main: Dict[torch.fx.Node, None] = {}
147
+
148
+ # Main graph after split.
149
+ main_g = torch.fx.Graph()
150
+
151
+ # Mapping from node in original module to node in main graph after split.
152
+ main_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
153
+
154
+ # Output node of original module.
155
+ output_node: Optional[torch.fx.Node] = None
156
+
157
+ # Create a component for each tag, we don't expect to create other components afterwards.
158
+ for tag in tags:
159
+ comp = Component(torch.fx.Graph(), len(all_components), f"{tag}")
160
+ all_components.append(comp)
161
+ tag_to_component[tag] = comp
162
+
163
+ # Traverse the nodes in original graph and take care of them.
164
+ for node in gm.graph.nodes:
165
+ if node.op == "output":
166
+ if output_node is not None:
167
+ raise RuntimeError("Multiple output nodes in graph!")
168
+ output_node = node
169
+ continue
170
+
171
+ # Placeholders in the original graph get copied to main graph.
172
+ if node.op == "placeholder":
173
+ main_remapping[node] = main_g.placeholder(node.name, type_expr=node.type)
174
+ main_remapping[node].meta = copy.copy(node.meta)
175
+ continue
176
+
177
+ # Get_attr nodes are ignored because we are not tagging them.
178
+ # Instead, we copy them directly to the submodules use them afterwards.
179
+ if node.op == "get_attr":
180
+ continue
181
+
182
+ # Now we process callable nodes which are nodes with op of call_module,
183
+ # call_function or call_method. Every callable nodes should be tagged.
184
+ assert hasattr(node, "tag")
185
+
186
+ upstream_components = [
187
+ node_to_component[x]
188
+ for x in flatten(node.args) + flatten(node.kwargs)
189
+ if x.op not in {"placeholder", "get_attr"}
190
+ ]
191
+
192
+ comp = tag_to_component[node.tag]
193
+ node_to_component[node] = comp
194
+
195
+ # Max order of upperstream components.
196
+ mx = max((c.order for c in upstream_components), default=0)
197
+
198
+ # Expect the component for `node` has higher order then its upstream components.
199
+ assert comp.order >= mx
200
+
201
+ # Map a input of `node` to nodes in the component's graph.
202
+ def remap_func(x):
203
+ # If input is a get_attr node, copy it to current component's graph.
204
+ # Returns the get_attr node in current component's graph.
205
+ if x.op == "get_attr":
206
+ if x not in comp.getattr_maps:
207
+ comp.getattr_maps[x] = comp.graph.get_attr(
208
+ x.target, type_expr=x.type
209
+ )
210
+ return comp.getattr_maps[x]
211
+
212
+ # If input is not a placeholder, it should have been put into a component
213
+ # already. If it's the current component then we return the corresponding
214
+ # node in the component.
215
+ if x.op != "placeholder" and node_to_component[x] == comp:
216
+ return node_remapping[x]
217
+
218
+ # If input is a placeholder or it's in other components, we want to make it
219
+ # as a placeholder in current component's graph.
220
+ if x not in comp.orig_inputs:
221
+ comp.orig_inputs.append(x)
222
+ placeholder = comp.graph.placeholder(x.name, type_expr=x.type)
223
+ placeholder.meta = copy.copy(x.meta)
224
+ comp.input_placeholders.append(placeholder)
225
+ used_in_main[x] = None
226
+
227
+ return comp.input_placeholders[comp.orig_inputs.index(x)]
228
+
229
+ n = comp.graph.node_copy(node, remap_func)
230
+ n.tag = node.tag # type: ignore[attr-defined]
231
+ node_remapping[node] = n
232
+ node_to_component[n] = comp
233
+
234
+ if output_node is None:
235
+ raise RuntimeError("Graph had no output node!")
236
+
237
+ for x in flatten(output_node.args[0]):
238
+ if x.op == "get_attr":
239
+ # We don't need components mapping for nodes of type "get_attr"
240
+ # that are consumed by the output. Only need to make sure we create
241
+ # corresponding counterparts in the resulting graph.
242
+ main_remapping[x] = main_g.get_attr(x.name, type_expr=x.type)
243
+ else:
244
+ # All component results consumed by the output node should be
245
+ # marked as "used in main".
246
+ used_in_main[x] = None
247
+
248
+ # If a node is used in main graph then we mark it as an output in the component
249
+ # it belongs to.
250
+ for n in used_in_main:
251
+ if n.op != "placeholder":
252
+ node_to_component[n].orig_outputs.append(n)
253
+
254
+ # Now we create a graphmodule for each component.
255
+ orig_to_split_fqn_mapping: Dict[str, str] = {}
256
+ for comp in all_components:
257
+ outs = tuple(map(node_remapping.__getitem__, comp.orig_outputs))
258
+
259
+ # Take care of the args of FX output node. If there's a single
260
+ # output then the output node args is like (output_single), else
261
+ # if there're multiple outputs then the output node args is like
262
+ # ((output_0, output_1, ...)).
263
+ comp.graph.output(outs[0] if len(outs) == 1 else outs)
264
+
265
+ comp.gm, comp_orig_to_split_fqn_mapping = lift_subgraph_as_module(
266
+ gm, subgraph=comp.graph, comp_name=comp.name
267
+ )
268
+ orig_to_split_fqn_mapping.update(comp_orig_to_split_fqn_mapping)
269
+
270
+ # Create a call_module node in main graph.
271
+ main_node = main_g.call_module(
272
+ comp.name,
273
+ args=tuple(map(main_remapping.__getitem__, comp.orig_inputs)),
274
+ kwargs=None,
275
+ )
276
+
277
+ if len(outs) == 1:
278
+ main_remapping[comp.orig_outputs[0]] = main_node
279
+ else:
280
+ for i, o in enumerate(comp.orig_outputs):
281
+ # Use Proxy to record getitem access.
282
+ main_remapping[o] = torch.fx.Proxy(main_node)[i].node # type: ignore[index]
283
+
284
+ main_g.output(map_arg(output_node.args[0], main_remapping.__getitem__))
285
+ main_root = HolderModule({comp.name: comp.gm for comp in all_components})
286
+ main_g._codegen = gm.graph._codegen
287
+
288
+ # If the output nodes consumes get_attr directly in the original graph,
289
+ # then we need to make sure get_attr is copied to the new graph.
290
+ for x in flatten(output_node.args[0]):
291
+ if x.op == "get_attr":
292
+ setattr(main_root, x.name, getattr_recursive(gm, x.target)) # type: ignore[arg-type]
293
+
294
+ result_gm = GraphModuleCls(main_root, main_g)
295
+ if return_fqn_mapping:
296
+ return result_gm, orig_to_split_fqn_mapping
297
+
298
+ return result_gm
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py ADDED
@@ -0,0 +1,871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ from collections import defaultdict
4
+ from dataclasses import dataclass
5
+ from typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple
6
+ import logging
7
+
8
+ import torch
9
+ from torch.fx.passes.graph_manipulation import get_size_of_node
10
+ from torch.fx.node import map_arg
11
+ from torch.fx._compatibility import compatibility
12
+
13
+ from .operator_support import (
14
+ get_node_target,
15
+ OperatorSupportBase,
16
+ )
17
+ from .graph_drawer import FxGraphDrawer
18
+ from .shape_prop import ShapeProp
19
+ from .split_utils import split_by_tags
20
+ from .tools_common import (
21
+ FxNetAccFusionsFinder,
22
+ CALLABLE_NODE_OPS,
23
+ Tensors,
24
+ NodeList,
25
+ NodeSet,
26
+ is_node_output_tensor,
27
+ )
28
+
29
+
30
+ __all__ = ['FxNetAccNodesFinder', 'FxNetSplitterInternalError', 'Subgraph', 'SplitResult', 'generate_inputs_for_submodules']
31
+ _LOGGER = logging.getLogger(__name__)
32
+
33
+ DEFAULT_MIN_ACC_MODULE_SIZE = 1
34
+ DEFAULT_SKIP_FUSION = False
35
+ DEFAULT_ALLOW_NON_TENSOR = False
36
+
37
+ class _SplitterSettingBase:
38
+ def __init__(
39
+ self,
40
+ min_acc_module_size=DEFAULT_MIN_ACC_MODULE_SIZE,
41
+ skip_fusion=DEFAULT_SKIP_FUSION,
42
+ allow_non_tensor=DEFAULT_ALLOW_NON_TENSOR
43
+ ):
44
+ parser = argparse.ArgumentParser()
45
+ parser.add_argument(
46
+ "--min-acc-module-size",
47
+ "--min_acc_module_size",
48
+ required=False,
49
+ type=int,
50
+ help="Minimum size limit of an accelerator subgraph.",
51
+ )
52
+ parser.add_argument(
53
+ "--skip-fusion",
54
+ "--skip_fusion",
55
+ default=False,
56
+ action="store_true",
57
+ help="If true then no fusion groups. Fusion group is used to "
58
+ "enforce no non-tensor data flow between submodules. If we don't "
59
+ "have this constrain, setting this to false is recommended as it "
60
+ "can reduce overhead.",
61
+ )
62
+ parser.add_argument(
63
+ "--allow-non-tensor",
64
+ "--allow_non_tensor",
65
+ default=False,
66
+ action="store_true",
67
+ help="For some backends non-tensor data flow between cpu and them "
68
+ "are not allowed. Therefore, if a node supported by accelerator but "
69
+ "it has non-tensor inputs or outputs to a cpu node we would want to "
70
+ "consider it as a cpu node during splitting. However, for some backends "
71
+ "we might not care about non-tensor data flow and we can set this option "
72
+ "to true to disable the functionality that prevent non-tensor data flow.",
73
+ )
74
+ args, unknown = parser.parse_known_args()
75
+
76
+ self.min_acc_module_size: int = args.min_acc_module_size if args.min_acc_module_size else min_acc_module_size
77
+ self.skip_fusion: bool = args.skip_fusion if args.skip_fusion else skip_fusion
78
+ self.allow_non_tensor: bool = args.allow_non_tensor if args.allow_non_tensor else allow_non_tensor
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ class FxNetAccNodesFinder:
83
+ """
84
+ Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor
85
+ input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.
86
+
87
+ I.e. if we have a chain:
88
+
89
+ ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1
90
+
91
+ where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.
92
+
93
+ This behavior can be turned off by passing allow_non_tensor=True.
94
+ """
95
+
96
+ def __init__(
97
+ self,
98
+ module: torch.fx.GraphModule,
99
+ operator_support: OperatorSupportBase,
100
+ allow_non_tensor: bool,
101
+ ):
102
+ self.module = module
103
+ self.operator_support = operator_support
104
+ self.allow_non_tensor = allow_non_tensor
105
+
106
+ def reduce_acc_nodes_non_tensor_input_helper(
107
+ self, cpu_worklist: NodeList
108
+ ):
109
+ """
110
+ Transitively excludes nodes from ACC supported set.
111
+ For every node in the worklist:
112
+ - removes its downstream ACC nodes from ACC supported set,
113
+ - if any downstream ACC node produces non-tensor output,
114
+ then it gets added into the worklist.
115
+ """
116
+ while cpu_worklist:
117
+ node = cpu_worklist.pop(0)
118
+
119
+ for user in node.users:
120
+ if user in self.acc_nodes:
121
+ self.acc_nodes.remove(user)
122
+ if not is_node_output_tensor(user):
123
+ cpu_worklist.append(user)
124
+
125
+ def reduce_acc_nodes_non_tensor_input(self):
126
+ """
127
+ Excludes nodes from ACC supported set that have direct
128
+ upstream CPU nodes that produce non-tensor outputs.
129
+ """
130
+ non_tensor_cpu_nodes: NodeList = []
131
+
132
+ for node in self.module.graph.nodes:
133
+ if node.op not in CALLABLE_NODE_OPS:
134
+ continue
135
+ if node in self.acc_nodes:
136
+ continue
137
+ if is_node_output_tensor(node):
138
+ continue
139
+ non_tensor_cpu_nodes.append(node)
140
+
141
+ self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)
142
+
143
+ def reduce_acc_nodes_non_tensor_output(self):
144
+ """
145
+ Excludes nodes from ACC supported set that produce non-tensor
146
+ outputs and have downstream CPU nodes.
147
+ """
148
+ while True:
149
+ new_cpu_nodes: NodeList = []
150
+
151
+ for acc_node in self.acc_nodes:
152
+ if is_node_output_tensor(acc_node):
153
+ continue
154
+ for user in acc_node.users:
155
+ if user not in self.acc_nodes:
156
+ new_cpu_nodes.append(acc_node)
157
+ break
158
+
159
+ if not new_cpu_nodes:
160
+ break
161
+
162
+ for new_cpu_node in new_cpu_nodes:
163
+ self.acc_nodes.remove(new_cpu_node)
164
+
165
+ self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)
166
+
167
+ def __call__(self) -> NodeSet:
168
+ submodules = dict(self.module.named_modules())
169
+ self.acc_nodes = {
170
+ n
171
+ for n in self.module.graph.nodes
172
+ if n.op in CALLABLE_NODE_OPS
173
+ and self.operator_support.is_node_supported(submodules, n)
174
+ }
175
+
176
+ if not self.allow_non_tensor:
177
+ self.reduce_acc_nodes_non_tensor_input()
178
+ self.reduce_acc_nodes_non_tensor_output()
179
+
180
+ return self.acc_nodes
181
+
182
+ @compatibility(is_backward_compatible=False)
183
+ class FxNetSplitterInternalError(Exception):
184
+ pass
185
+
186
+ @compatibility(is_backward_compatible=False)
187
+ @dataclass
188
+ class Subgraph:
189
+ is_acc: bool
190
+ nodes: NodeList
191
+
192
+
193
+ @compatibility(is_backward_compatible=False)
194
+ class SplitResult(NamedTuple):
195
+ """
196
+ Stores the results of the splitter.
197
+
198
+ Attributes:
199
+ split_module: root module after splitting.
200
+ submodule_inputs: a dict that maps submodule name to its inputs.
201
+ non_acc_submodule_prefix: the prefix for non acc submodules. For
202
+ acc submodule the prefix is alwasy "_run_on_acc_".
203
+ """
204
+
205
+ split_module: torch.fx.GraphModule
206
+ submodule_inputs: Dict[str, Any]
207
+ non_acc_submodule_prefix: str
208
+
209
+
210
+ @compatibility(is_backward_compatible=False)
211
+ def generate_inputs_for_submodules(
212
+ model: torch.nn.Module,
213
+ inputs: Sequence[Any],
214
+ target_submodules: Iterable[str],
215
+ deepcopy: bool = False,
216
+ ) -> Dict[str, Any]:
217
+ """
218
+ Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this
219
+ function doesn't work.
220
+
221
+ Args:
222
+ model: root model.
223
+ inputs: inputs to the root model.
224
+ target_submodules: submodules that we want to generate inputs for.
225
+
226
+ Returns:
227
+ A dict that maps from submodule name to its inputs.
228
+ """
229
+
230
+ handles = []
231
+ results = {}
232
+ submodule_to_names = {mod: name for name, mod in model.named_modules()}
233
+
234
+ def pre_forward(module, module_inputs):
235
+ results[submodule_to_names[module]] = copy.deepcopy(module_inputs) if deepcopy else module_inputs
236
+
237
+ for name, mod in model.named_modules():
238
+ if name in target_submodules:
239
+ handles.append(mod.register_forward_pre_hook(pre_forward))
240
+
241
+ def clean_up_handles():
242
+ for h in handles:
243
+ h.remove()
244
+
245
+ try:
246
+ with torch.no_grad():
247
+ model(*inputs)
248
+ except Exception as e:
249
+ clean_up_handles()
250
+ raise e
251
+
252
+ clean_up_handles()
253
+ return results
254
+
255
+
256
+ class _SplitterBase:
257
+ """
258
+ Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.
259
+ Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.
260
+ Assumes that only "call_module", "call_function" and "call_method" from FX IR can potentially be executed on the accelerator.
261
+
262
+ Given the following graph:
263
+ ==> b ==>
264
+ // \\
265
+ a d
266
+ \\ //
267
+ ==> c ==>
268
+
269
+ class SimpleModule(torch.nn.Module):
270
+ def forward(self, a):
271
+ b = torch.sin(a)
272
+ c = torch.cos(a)
273
+ d = b + c
274
+ return d
275
+
276
+ and providing "operator_support" that indicates that 'b' and 'c' can be executed on the accelerator,
277
+ we will get the following split result:
278
+
279
+ main:
280
+ def forward(self, a):
281
+ run_on_acc_0_0 = self._run_on_acc_0_0(a)
282
+ getitem = run_on_acc_0_0[0]
283
+ getitem_1 = run_on_acc_0_0[1]
284
+ run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)
285
+ return run_on_cpu_1_1
286
+
287
+ _run_on_acc_0_0:
288
+ def forward(self, a):
289
+ sin_1 = torch.sin(a)
290
+ cos_1 = torch.cos(a)
291
+ return (sin_1, cos_1)
292
+
293
+ _run_on_cpu_1_1:
294
+ def forward(self, sin_1, cos_1):
295
+ add_1 = sin_1 + cos_1
296
+ return add_1
297
+ """
298
+
299
+ # PCIe bandwidth for the backend, default to 100 GB/s
300
+ PCIe_BW = 100 * 2 ** 30
301
+
302
+ def __init__(
303
+ self,
304
+ module: torch.fx.GraphModule,
305
+ sample_input: Sequence[Any],
306
+ operator_support: OperatorSupportBase,
307
+ settings: _SplitterSettingBase,
308
+ non_acc_submodule_name: str = "_run_on_cpu_",
309
+ ):
310
+ """
311
+ Preprocesses graph before splitting:
312
+ - finds nodes supported by ACC,
313
+ - finds fusion groups for ACC nodes having non-tensor IO,
314
+ - builds a graph of direct dependencies,
315
+ - builds a map of fused nodes to their fusions.
316
+ As a result we get self.acc_nodes, self.deps and self.fusions.
317
+ """
318
+ assert isinstance(module, torch.fx.GraphModule)
319
+
320
+ self.module = module
321
+ ShapeProp(self.module).propagate(*sample_input)
322
+
323
+ self.settings = settings
324
+ self.operator_support = operator_support
325
+ self.sample_input = sample_input
326
+ self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()
327
+
328
+ if self.settings.skip_fusion:
329
+ self.fusions = {}
330
+ else:
331
+ self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()
332
+
333
+ # Modify deps to add more deps for fused nodes
334
+ self.deps = self.find_deps()
335
+ self.update_deps_for_fusions()
336
+
337
+ self.non_acc_submodule_name = non_acc_submodule_name
338
+ self._node_submodule_map: Dict[str, str] = {}
339
+
340
+ # ===============================================================
341
+ # Helpers for ctor and initial state
342
+ # ===============================================================
343
+
344
+ def get_node_submodule_map(self) -> Dict[str, str]:
345
+ """ Returns a map from node name to submodule name, e.g.
346
+ node: main_module_impl_impl_over_arch_unary_multiple_embedding
347
+ _pooling_embedding_pooling_sparse_entity_equivalence_key
348
+ _proxy_embedding_bag
349
+ maps to submodule name of: _run_on_acc_1
350
+ """
351
+ return self._node_submodule_map
352
+
353
+ def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:
354
+ """
355
+ Builds a graph of node dependencies. Leaf nodes don't have any
356
+ dependencies and the "output" node doesn't have nodes depending on it.
357
+
358
+ Resulting graph has only direct dependencies, i.e. there are no
359
+ transitive dependencies.
360
+ """
361
+ deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
362
+ for node in self.module.graph.nodes:
363
+ if node.op not in CALLABLE_NODE_OPS:
364
+ continue
365
+
366
+ for user in node.users:
367
+ if user.op != "output":
368
+ deps[user].add(node)
369
+ return deps
370
+
371
+ def update_deps_for_fusions(self):
372
+ """
373
+ Updates graph of dependencies so that:
374
+ - nodes from the same fusion depend on the same set of outer nodes,
375
+ - outer nodes depending on a fusion depend on all nodes in that fusion.
376
+ """
377
+ for node in self.fusions:
378
+ fusion = self.fusions[node]
379
+ for fused_neighbor in fusion:
380
+ self.deps[node].update(self.deps[fused_neighbor] - fusion)
381
+
382
+ for user in fused_neighbor.users:
383
+ if user not in fusion:
384
+ self.deps[user].add(node)
385
+
386
+ # ===============================================================
387
+ # Helpers for preview
388
+ # ===============================================================
389
+
390
+ def _lower_model_to_backend(
391
+ self, mod: torch.fx.GraphModule, inputs: Tensors
392
+ ) -> torch.nn.Module:
393
+ """
394
+ Lower the model to a backend.
395
+ """
396
+
397
+ return mod
398
+
399
+ def _find_culprit(
400
+ self, mod: torch.fx.GraphModule, inputs: Tensors
401
+ ) -> str:
402
+ """
403
+ When an error occurs during lowering or running the lowered mod, we use this
404
+ function to find culprits in the `mod` that causes the error.
405
+ """
406
+
407
+ return "Unable to find a culprit because _find_culprit() function is not implemented."
408
+
409
+ def _draw_graph_based_on_node_support(
410
+ self, mod: torch.fx.GraphModule, supported_nodes: NodeList
411
+ ):
412
+ color_map = {
413
+ "default": "AliceBlue",
414
+ "supported": "chartreuse1",
415
+ "unsupported": "crimson",
416
+ }
417
+
418
+ class CustomDrawer(FxGraphDrawer):
419
+ def _get_node_style(self, node):
420
+ template = super()._get_node_style(node)
421
+ if node in supported_nodes:
422
+ template["fillcolor"] = color_map["supported"]
423
+ elif node.op in CALLABLE_NODE_OPS:
424
+ template["fillcolor"] = color_map["unsupported"]
425
+ else:
426
+ template["fillcolor"] = color_map["default"]
427
+
428
+ return template
429
+
430
+ drawer = CustomDrawer(mod, "node_support", ignore_getattr=True)
431
+ dot_graph = drawer.get_main_dot_graph()
432
+ dot_graph.write_raw("node_support.dot")
433
+
434
+ def node_support_preview(self, dump_graph: bool = False):
435
+ submodules = dict(self.module.named_modules())
436
+
437
+ supported_nodes: NodeList = []
438
+ supported_node_types = defaultdict(set)
439
+ unsupported_node_types = defaultdict(set)
440
+
441
+ def get_dtype(arg):
442
+ tensor_meta = arg.meta.get("tensor_meta")
443
+ return getattr(tensor_meta, "dtype", None)
444
+
445
+ for node in self.module.graph.nodes:
446
+ if node.op not in CALLABLE_NODE_OPS:
447
+ continue
448
+
449
+ target = get_node_target(submodules, node)
450
+
451
+ # Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.
452
+ arg_dtypes = [
453
+ get_dtype(arg) if isinstance(arg, torch.fx.Node) else None
454
+ for arg in node.args
455
+ ]
456
+
457
+ # Find last non-None element. If all elements are None, return max_len.
458
+ last_index = len(arg_dtypes) - next(
459
+ (
460
+ i
461
+ for i, dtype in enumerate(reversed(arg_dtypes))
462
+ if dtype is not None
463
+ ),
464
+ len(arg_dtypes),
465
+ )
466
+
467
+ # Strip None elements at the end.
468
+ arg_dtypes_tuple = tuple(arg_dtypes[:last_index])
469
+ kwarg_dtypes_tuple = tuple(
470
+ (k, get_dtype(arg))
471
+ for k, arg in node.kwargs.items()
472
+ if isinstance(arg, torch.fx.Node)
473
+ )
474
+
475
+ if self.operator_support.is_node_supported(submodules, node):
476
+ supported_nodes.append(node)
477
+ supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
478
+ else:
479
+ unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
480
+
481
+ if dump_graph:
482
+ self._draw_graph_based_on_node_support(self.module, supported_nodes)
483
+
484
+ reports = "\nSupported node types in the model:\n"
485
+ for t, dtypes in supported_node_types.items():
486
+ for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
487
+ reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
488
+
489
+ reports += "\nUnsupported node types in the model:\n"
490
+ for t, dtypes in unsupported_node_types.items():
491
+ for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
492
+ reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
493
+
494
+ print(reports)
495
+
496
+ # Return reports for testing purpose
497
+ return reports
498
+
499
+ def split_preview(self, dump_graph: bool = False):
500
+ reports = ""
501
+ subgraphs = self.put_nodes_into_subgraphs()
502
+ acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
503
+ cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
504
+ reports += f"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
505
+ reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
506
+
507
+ subgraphs = self.remove_small_acc_subgraphs(subgraphs)
508
+ acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
509
+ cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
510
+ reports += f"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
511
+ reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
512
+
513
+ for i, subgraph in enumerate(subgraphs):
514
+ reports += f"_run_on_acc_{i}: " if subgraph.is_acc else f"{self.non_acc_submodule_name}{i}: "
515
+ reports += f"{len(subgraph.nodes)} node(s)\n"
516
+
517
+ self.tag(subgraphs)
518
+ split_mod = self.split(remove_tag=True)
519
+ split_mod.eval()
520
+
521
+ if dump_graph:
522
+ drawer = FxGraphDrawer(
523
+ split_mod, "preview", ignore_getattr=True
524
+ )
525
+ dot_graphs = drawer.get_all_dot_graphs()
526
+ for name, dot_graph in dot_graphs.items():
527
+ dot_graph.write_raw(f"{name}.dot")
528
+
529
+ max_qps: float = self.PCIe_BW
530
+ bottleneck_module = ""
531
+
532
+ for node in split_mod.graph.nodes:
533
+ if node.op == "call_module" and "acc" in node.target:
534
+ reports += f"\nProcessing acc submodule {node.target}\n"
535
+
536
+ submod = getattr(split_mod, node.target)
537
+
538
+ def get_submod_inputs(main_mod, submod, example_inputs):
539
+ sub_inputs = None
540
+
541
+ def get_inputs(self, inputs):
542
+ nonlocal sub_inputs
543
+ sub_inputs = inputs
544
+
545
+ handle = submod.register_forward_pre_hook(get_inputs)
546
+ main_mod(*example_inputs)
547
+ handle.remove()
548
+ return sub_inputs
549
+
550
+ submod_inputs = get_submod_inputs(
551
+ split_mod, submod, self.sample_input
552
+ )
553
+ ShapeProp(submod).propagate(*submod_inputs)
554
+
555
+ total_input_bytes = 0
556
+ total_output_bytes = 0
557
+
558
+ reports += "Checking inputs...\n"
559
+ for n in submod.graph.nodes:
560
+ if n.op == "placeholder":
561
+ if not is_node_output_tensor(n):
562
+ reports += f"Input {n.name} is not a tensor, this might cause problems during lowering!\n"
563
+ else:
564
+ total_input_bytes += get_size_of_node(submod, n)[0]
565
+ if n.op == "output":
566
+ output_node = n
567
+
568
+ reports += "Checking outputs...\n"
569
+
570
+ def get_bytes(node: torch.fx.Node):
571
+ nonlocal total_output_bytes
572
+ nonlocal reports
573
+ if not is_node_output_tensor(node):
574
+ reports += f"Output {node.name} is not a tensor, this might cause problems during lowering!\n"
575
+ else:
576
+ total_output_bytes += get_size_of_node(submod, node)[0]
577
+
578
+ map_arg(output_node.args, get_bytes)
579
+ qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)
580
+ reports += f"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},"
581
+ reports += f" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\n"
582
+
583
+ if qps < max_qps:
584
+ max_qps = qps
585
+ bottleneck_module = node.target
586
+
587
+ try:
588
+ lowered_submod = self._lower_model_to_backend(submod, submod_inputs)
589
+ except RuntimeError:
590
+ reports += "Run into an error during lowering!\n"
591
+ reports += self._find_culprit(submod, submod_inputs)
592
+ continue
593
+
594
+ try:
595
+ lowered_submod(*submod_inputs)
596
+ except RuntimeError:
597
+ reports += "Run into an error during inference!\n"
598
+ reports += self._find_culprit(submod, submod_inputs)
599
+ else:
600
+ reports += "Lowering and running succeed!\n"
601
+
602
+ reports += f"\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},"
603
+ reports += f" bottleneck is submodule {bottleneck_module}."
604
+ print(reports)
605
+
606
+ # return the reports for testing purposes
607
+ return reports
608
+
609
+ # ===============================================================
610
+ # Helpers for extend_acc_subgraph() method
611
+ # ===============================================================
612
+
613
+ def find_reverse_deps(
614
+ self, tag_id: Optional[int] = None
615
+ ) -> Dict[torch.fx.Node, NodeSet]:
616
+ """
617
+ Builds reversed topological node dependencies, if tag_id is specified,
618
+ we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.
619
+ """
620
+ result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
621
+
622
+ for node in self.module.graph.nodes:
623
+ if node.op not in CALLABLE_NODE_OPS:
624
+ continue
625
+
626
+ for user in node.users:
627
+ if user.op not in CALLABLE_NODE_OPS:
628
+ continue
629
+
630
+ if tag_id is None or (int(user.tag.split("_")[-1]) < tag_id):
631
+ result[node].add(user)
632
+
633
+ return result
634
+
635
+ def update_reverse_deps_for_fusions(
636
+ self, deps: Dict[torch.fx.Node, NodeSet]
637
+ ):
638
+ processed_node = set()
639
+
640
+ for node, fusion in self.fusions.items():
641
+ if node in processed_node:
642
+ continue
643
+
644
+ new_dep = set()
645
+
646
+ # Create a new dependency set which include all the
647
+ # dependencies of the nodes in the fusion group
648
+ for n in fusion:
649
+ new_dep.update(deps[n])
650
+
651
+ # Exclude nodes in the fusion
652
+ new_dep.difference_update(fusion)
653
+
654
+ # Update dependency
655
+ for n in fusion:
656
+ deps[n] = new_dep
657
+
658
+ for arg in n.all_input_nodes:
659
+ if arg not in fusion:
660
+ deps[arg].update(fusion)
661
+
662
+ processed_node.add(n)
663
+
664
+ def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:
665
+ """
666
+ Finds parent nodes of the `tag` subgraph.
667
+
668
+ Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph
669
+ and is not a placeholder, we consider it as the parent node of the subgraph.
670
+ """
671
+ parent_nodes = set()
672
+
673
+ for node in self.module.graph.nodes:
674
+ if node.op in CALLABLE_NODE_OPS and node.tag == tag:
675
+ for arg in node.all_input_nodes:
676
+ if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:
677
+ parent_nodes.add(arg)
678
+
679
+ return parent_nodes
680
+
681
+ def extend_acc_subgraph(self, tag: str):
682
+ """
683
+ Extend the acc subgraph with `tag` going the reversed topological direction.
684
+ """
685
+ # Dict that maps node to its users and ignore users that
686
+ # are in the subgraph that has greater tag
687
+ deps = self.find_reverse_deps(tag_id=int(tag.split("_")[-1]))
688
+ self.update_reverse_deps_for_fusions(deps)
689
+
690
+ # Parent nodes of the subgraph
691
+ parent_nodes = self.find_parent_nodes_of_subgraph(tag)
692
+
693
+ visited_nodes: NodeSet = set()
694
+
695
+ while parent_nodes:
696
+ node = None
697
+
698
+ # Find a acc node that depends on visited nodes only
699
+ for n in parent_nodes:
700
+ if deps[n] <= visited_nodes and n in self.acc_nodes:
701
+ node = n
702
+ break
703
+
704
+ if node is None:
705
+ break
706
+
707
+ # Put the node into `tag` subgraph
708
+ node.tag = tag # type: ignore[attr-defined]
709
+ parent_nodes.remove(node)
710
+ visited_nodes.add(node)
711
+
712
+ # If node is in a fusion group, add all fusion buddies to parent nodes
713
+ if node in self.fusions:
714
+ for fusion_node in self.fusions[node]:
715
+ if fusion_node not in visited_nodes:
716
+ parent_nodes.add(fusion_node)
717
+
718
+ # Add inputs of the node to parent nodes
719
+ for arg in node.all_input_nodes:
720
+ if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:
721
+ parent_nodes.add(arg)
722
+
723
+ # ===============================================================
724
+ # Helpers for split() method
725
+ # ===============================================================
726
+
727
+ def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:
728
+ """
729
+ Finds nodes that consume module inputs or get_attr nodes.
730
+ """
731
+ starter_cpu_nodes: NodeSet = set()
732
+ starter_acc_nodes: NodeSet = set()
733
+ for node in self.module.graph.nodes:
734
+ if node.op not in {"placeholder", "get_attr"}:
735
+ continue
736
+ for user in node.users:
737
+ if user in self.acc_nodes:
738
+ starter_acc_nodes.add(user)
739
+ else:
740
+ starter_cpu_nodes.add(user)
741
+ return starter_cpu_nodes, starter_acc_nodes
742
+
743
+ def put_nodes_into_subgraphs(self) -> List[Subgraph]:
744
+ # We start graph traversal from leaf nodes
745
+ current_cpu_nodes, current_acc_nodes = self.starter_nodes()
746
+ visited_nodes: NodeSet = set()
747
+
748
+ # Determine which subgraph to start from based on which subgraph has
749
+ # 0-dep node
750
+ acc_subgraph: bool = not any(len(self.deps[n]) == 0 for n in current_cpu_nodes)
751
+
752
+ current_subgraph_nodes: NodeList = []
753
+
754
+ # Result accumulator
755
+ subgraphs: List[Subgraph] = []
756
+ while current_cpu_nodes or current_acc_nodes:
757
+ # Find the first node that should belong to the current subgraph and has all dependencies resolved
758
+ current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes
759
+ node = next(
760
+ (n for n in current_nodes if self.deps[n] <= visited_nodes),
761
+ None,
762
+ )
763
+
764
+ # If nothing was found, then it's time to flip the mode and start a new subgraph
765
+ if node is None:
766
+ if not current_subgraph_nodes:
767
+ raise FxNetSplitterInternalError("Subgraph can't be empty")
768
+
769
+ subgraphs.append(
770
+ Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
771
+ )
772
+ acc_subgraph = not acc_subgraph
773
+ current_subgraph_nodes = []
774
+ continue
775
+
776
+ current_nodes.remove(node)
777
+ visited_nodes.add(node)
778
+ current_subgraph_nodes.append(node)
779
+
780
+ # Add fusion buddies
781
+ if node in self.fusions:
782
+ if node in self.acc_nodes:
783
+ current_acc_nodes.update(self.fusions[node] - visited_nodes)
784
+ else:
785
+ current_cpu_nodes.update(self.fusions[node] - visited_nodes)
786
+
787
+ # Put depending nodes into the queue
788
+ for user in node.users:
789
+ if user.op not in CALLABLE_NODE_OPS:
790
+ continue
791
+
792
+ # Add downstream nodes
793
+ if user in self.acc_nodes:
794
+ current_acc_nodes.add(user)
795
+ else:
796
+ current_cpu_nodes.add(user)
797
+
798
+ # Check if the last subgraph was not created
799
+ if current_subgraph_nodes:
800
+ subgraphs.append(
801
+ Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
802
+ )
803
+
804
+ if not subgraphs:
805
+ raise FxNetSplitterInternalError("Couldn't create subgraphs")
806
+
807
+ return subgraphs
808
+
809
+ def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:
810
+ """
811
+ This pass finds ACC submodules with less than specified size and merges
812
+ them with adjacent CPU submodules.
813
+ """
814
+ result: List[Subgraph] = []
815
+ for subgraph in subgraphs:
816
+ if subgraph.is_acc:
817
+ if len(subgraph.nodes) >= self.settings.min_acc_module_size:
818
+ result.append(subgraph)
819
+ else:
820
+ print(
821
+ "Eliminating acc subgraph because it's smaller than the threshold: "
822
+ f"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}"
823
+ )
824
+ if result:
825
+ result[-1].nodes.extend(subgraph.nodes)
826
+ else:
827
+ subgraph.is_acc = False
828
+ result.append(subgraph)
829
+ else:
830
+ if result and not result[-1].is_acc:
831
+ result[-1].nodes.extend(subgraph.nodes)
832
+ else:
833
+ result.append(subgraph)
834
+ return result
835
+
836
+ def tag(self, subgraphs: List[Subgraph]):
837
+ self.tags: List[str] = []
838
+ for subgraph in subgraphs:
839
+ tag = f"_run_on_acc_{len(self.tags)}" if subgraph.is_acc else f"{self.non_acc_submodule_name}{len(self.tags)}"
840
+ self.tags.append(tag)
841
+ for node in subgraph.nodes:
842
+ if hasattr(node, "tag"):
843
+ raise FxNetSplitterInternalError(f"Node {node} was already tagged")
844
+
845
+ node.tag = tag # type: ignore[attr-defined]
846
+ self._node_submodule_map[node.name] = tag
847
+
848
+ def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:
849
+ split_module = split_by_tags(self.module, self.tags)
850
+ if remove_tag:
851
+ for node in self.module.graph.nodes:
852
+ if hasattr(node, "tag"):
853
+ del node.tag
854
+ return split_module
855
+
856
+ def __call__(self) -> torch.fx.GraphModule:
857
+ subgraphs = self.put_nodes_into_subgraphs()
858
+ subgraphs = self.remove_small_acc_subgraphs(subgraphs)
859
+ acc_subgraphs_count = len([s for s in subgraphs if s.is_acc])
860
+ non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count
861
+ print(f"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs")
862
+ self.tag(subgraphs)
863
+ return self.split()
864
+
865
+ def generate_split_results(self) -> SplitResult:
866
+ split_module = self()
867
+ submodule_names = []
868
+ for name, mod in split_module.named_children():
869
+ submodule_names.append(name)
870
+ submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)
871
+ return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name)