applied-ai-018 commited on
Commit
270d531
·
verified ·
1 Parent(s): bed2b1d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py +4 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py +6 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py +121 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py +117 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py +395 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/utils.py +105 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/variable.py +85 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__init__.py +11 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py +44 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py +56 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py +112 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py +73 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py +421 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py +110 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py +2 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py +329 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py +75 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py +303 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py +731 -0
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc ADDED
Binary file (8.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc ADDED
Binary file (9.28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # mypy: disable-error-code=attr-defined
2
+ from .core import unify, reify # noqa: F403
3
+ from .more import unifiable # noqa: F403
4
+ from .variable import var, isvar, vars, variables, Var # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc ADDED
Binary file (2.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from .multipledispatch import dispatch # type: ignore[import]
3
+
4
+ namespace = {} # type: ignore[var-annotated]
5
+
6
+ dispatch = partial(dispatch, namespace=namespace)
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .core import unify, reify # type: ignore[attr-defined]
2
+ from .variable import isvar
3
+ from .utils import _toposort, freeze
4
+ from .unification_tools import groupby, first # type: ignore[import]
5
+
6
+
7
+ class Dispatcher:
8
+ def __init__(self, name):
9
+ self.name = name
10
+ self.funcs = {}
11
+ self.ordering = []
12
+
13
+ def add(self, signature, func):
14
+ self.funcs[freeze(signature)] = func
15
+ self.ordering = ordering(self.funcs)
16
+
17
+ def __call__(self, *args, **kwargs):
18
+ func, s = self.resolve(args)
19
+ return func(*args, **kwargs)
20
+
21
+ def resolve(self, args):
22
+ n = len(args)
23
+ for signature in self.ordering:
24
+ if len(signature) != n:
25
+ continue
26
+ s = unify(freeze(args), signature)
27
+ if s is not False:
28
+ result = self.funcs[signature]
29
+ return result, s
30
+ raise NotImplementedError("No match found. \nKnown matches: "
31
+ + str(self.ordering) + "\nInput: " + str(args))
32
+
33
+ def register(self, *signature):
34
+ def _(func):
35
+ self.add(signature, func)
36
+ return self
37
+ return _
38
+
39
+
40
+ class VarDispatcher(Dispatcher):
41
+ """ A dispatcher that calls functions with variable names
42
+ >>> # xdoctest: +SKIP
43
+ >>> d = VarDispatcher('d')
44
+ >>> x = var('x')
45
+ >>> @d.register('inc', x)
46
+ ... def f(x):
47
+ ... return x + 1
48
+ >>> @d.register('double', x)
49
+ ... def f(x):
50
+ ... return x * 2
51
+ >>> d('inc', 10)
52
+ 11
53
+ >>> d('double', 10)
54
+ 20
55
+ """
56
+ def __call__(self, *args, **kwargs):
57
+ func, s = self.resolve(args)
58
+ d = {k.token: v for k, v in s.items()}
59
+ return func(**d)
60
+
61
+
62
+ global_namespace = {} # type: ignore[var-annotated]
63
+
64
+
65
+ def match(*signature, **kwargs):
66
+ namespace = kwargs.get('namespace', global_namespace)
67
+ dispatcher = kwargs.get('Dispatcher', Dispatcher)
68
+
69
+ def _(func):
70
+ name = func.__name__
71
+
72
+ if name not in namespace:
73
+ namespace[name] = dispatcher(name)
74
+ d = namespace[name]
75
+
76
+ d.add(signature, func)
77
+
78
+ return d
79
+ return _
80
+
81
+
82
+ def supercedes(a, b):
83
+ """ ``a`` is a more specific match than ``b`` """
84
+ if isvar(b) and not isvar(a):
85
+ return True
86
+ s = unify(a, b)
87
+ if s is False:
88
+ return False
89
+ s = {k: v for k, v in s.items() if not isvar(k) or not isvar(v)}
90
+ if reify(a, s) == a:
91
+ return True
92
+ if reify(b, s) == b:
93
+ return False
94
+
95
+
96
+ # Taken from multipledispatch
97
+ def edge(a, b, tie_breaker=hash):
98
+ """ A should be checked before B
99
+ Tie broken by tie_breaker, defaults to ``hash``
100
+ """
101
+ if supercedes(a, b):
102
+ if supercedes(b, a):
103
+ return tie_breaker(a) > tie_breaker(b)
104
+ else:
105
+ return True
106
+ return False
107
+
108
+
109
+ # Taken from multipledispatch
110
+ def ordering(signatures):
111
+ """ A sane ordering of signatures to check, first to last
112
+ Topological sort of edges as given by ``edge`` and ``supercedes``
113
+ """
114
+ signatures = list(map(tuple, signatures))
115
+ edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
116
+ edges = groupby(first, edges)
117
+ for s in signatures:
118
+ if s not in edges:
119
+ edges[s] = []
120
+ edges = {k: [b for a, b in v] for k, v in edges.items()} # type: ignore[attr-defined, assignment]
121
+ return _toposort(edges)
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .core import unify, reify # type: ignore[attr-defined]
2
+ from .dispatch import dispatch
3
+
4
+
5
+ def unifiable(cls):
6
+ """ Register standard unify and reify operations on class
7
+ This uses the type and __dict__ or __slots__ attributes to define the
8
+ nature of the term
9
+ See Also:
10
+ >>> # xdoctest: +SKIP
11
+ >>> class A(object):
12
+ ... def __init__(self, a, b):
13
+ ... self.a = a
14
+ ... self.b = b
15
+ >>> unifiable(A)
16
+ <class 'unification.more.A'>
17
+ >>> x = var('x')
18
+ >>> a = A(1, 2)
19
+ >>> b = A(1, x)
20
+ >>> unify(a, b, {})
21
+ {~x: 2}
22
+ """
23
+ _unify.add((cls, cls, dict), unify_object)
24
+ _reify.add((cls, dict), reify_object)
25
+
26
+ return cls
27
+
28
+
29
+ #########
30
+ # Reify #
31
+ #########
32
+
33
+
34
+ def reify_object(o, s):
35
+ """ Reify a Python object with a substitution
36
+ >>> # xdoctest: +SKIP
37
+ >>> class Foo(object):
38
+ ... def __init__(self, a, b):
39
+ ... self.a = a
40
+ ... self.b = b
41
+ ... def __str__(self):
42
+ ... return "Foo(%s, %s)"%(str(self.a), str(self.b))
43
+ >>> x = var('x')
44
+ >>> f = Foo(1, x)
45
+ >>> print(f)
46
+ Foo(1, ~x)
47
+ >>> print(reify_object(f, {x: 2}))
48
+ Foo(1, 2)
49
+ """
50
+ if hasattr(o, '__slots__'):
51
+ return _reify_object_slots(o, s)
52
+ else:
53
+ return _reify_object_dict(o, s)
54
+
55
+
56
+ def _reify_object_dict(o, s):
57
+ obj = object.__new__(type(o))
58
+ d = reify(o.__dict__, s)
59
+ if d == o.__dict__:
60
+ return o
61
+ obj.__dict__.update(d)
62
+ return obj
63
+
64
+
65
+ def _reify_object_slots(o, s):
66
+ attrs = [getattr(o, attr) for attr in o.__slots__]
67
+ new_attrs = reify(attrs, s)
68
+ if attrs == new_attrs:
69
+ return o
70
+ else:
71
+ newobj = object.__new__(type(o))
72
+ for slot, attr in zip(o.__slots__, new_attrs):
73
+ setattr(newobj, slot, attr)
74
+ return newobj
75
+
76
+
77
+ @dispatch(slice, dict)
78
+ def _reify(o, s):
79
+ """ Reify a Python ``slice`` object """
80
+ return slice(*reify((o.start, o.stop, o.step), s))
81
+
82
+
83
+ #########
84
+ # Unify #
85
+ #########
86
+
87
+
88
+ def unify_object(u, v, s):
89
+ """ Unify two Python objects
90
+ Unifies their type and ``__dict__`` attributes
91
+ >>> # xdoctest: +SKIP
92
+ >>> class Foo(object):
93
+ ... def __init__(self, a, b):
94
+ ... self.a = a
95
+ ... self.b = b
96
+ ... def __str__(self):
97
+ ... return "Foo(%s, %s)"%(str(self.a), str(self.b))
98
+ >>> x = var('x')
99
+ >>> f = Foo(1, x)
100
+ >>> g = Foo(1, 2)
101
+ >>> unify_object(f, g, {})
102
+ {~x: 2}
103
+ """
104
+ if type(u) != type(v):
105
+ return False
106
+ if hasattr(u, '__slots__'):
107
+ return unify([getattr(u, slot) for slot in u.__slots__],
108
+ [getattr(v, slot) for slot in v.__slots__],
109
+ s)
110
+ else:
111
+ return unify(u.__dict__, v.__dict__, s)
112
+
113
+
114
+ @dispatch(slice, slice, dict)
115
+ def _unify(u, v, s):
116
+ """ Unify a Python ``slice`` object """
117
+ return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import operator
3
+ from functools import reduce
4
+ from collections.abc import Mapping
5
+
6
+ __all__ = ('merge', 'merge_with', 'valmap', 'keymap', 'itemmap',
7
+ 'valfilter', 'keyfilter', 'itemfilter',
8
+ 'assoc', 'dissoc', 'assoc_in', 'update_in', 'get_in')
9
+
10
+
11
+ def _get_factory(f, kwargs):
12
+ factory = kwargs.pop('factory', dict)
13
+ if kwargs:
14
+ raise TypeError(f"{f.__name__}() got an unexpected keyword argument '{kwargs.popitem()[0]}'")
15
+ return factory
16
+
17
+
18
+ def merge(*dicts, **kwargs):
19
+ """ Merge a collection of dictionaries
20
+
21
+ >>> merge({1: 'one'}, {2: 'two'})
22
+ {1: 'one', 2: 'two'}
23
+
24
+ Later dictionaries have precedence
25
+
26
+ >>> merge({1: 2, 3: 4}, {3: 3, 4: 4})
27
+ {1: 2, 3: 3, 4: 4}
28
+
29
+ See Also:
30
+ merge_with
31
+ """
32
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
33
+ dicts = dicts[0]
34
+ factory = _get_factory(merge, kwargs)
35
+
36
+ rv = factory()
37
+ for d in dicts:
38
+ rv.update(d)
39
+ return rv
40
+
41
+
42
+ def merge_with(func, *dicts, **kwargs):
43
+ """ Merge dictionaries and apply function to combined values
44
+
45
+ A key may occur in more than one dict, and all values mapped from the key
46
+ will be passed to the function as a list, such as func([val1, val2, ...]).
47
+
48
+ >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20})
49
+ {1: 11, 2: 22}
50
+
51
+ >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP
52
+ {1: 1, 2: 2, 3: 30}
53
+
54
+ See Also:
55
+ merge
56
+ """
57
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
58
+ dicts = dicts[0]
59
+ factory = _get_factory(merge_with, kwargs)
60
+
61
+ result = factory()
62
+ for d in dicts:
63
+ for k, v in d.items():
64
+ if k not in result:
65
+ result[k] = [v]
66
+ else:
67
+ result[k].append(v)
68
+ return valmap(func, result, factory)
69
+
70
+
71
+ def valmap(func, d, factory=dict):
72
+ """ Apply function to values of dictionary
73
+
74
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
75
+ >>> valmap(sum, bills) # doctest: +SKIP
76
+ {'Alice': 65, 'Bob': 45}
77
+
78
+ See Also:
79
+ keymap
80
+ itemmap
81
+ """
82
+ rv = factory()
83
+ rv.update(zip(d.keys(), map(func, d.values())))
84
+ return rv
85
+
86
+
87
+ def keymap(func, d, factory=dict):
88
+ """ Apply function to keys of dictionary
89
+
90
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
91
+ >>> keymap(str.lower, bills) # doctest: +SKIP
92
+ {'alice': [20, 15, 30], 'bob': [10, 35]}
93
+
94
+ See Also:
95
+ valmap
96
+ itemmap
97
+ """
98
+ rv = factory()
99
+ rv.update(zip(map(func, d.keys()), d.values()))
100
+ return rv
101
+
102
+
103
+ def itemmap(func, d, factory=dict):
104
+ """ Apply function to items of dictionary
105
+
106
+ >>> accountids = {"Alice": 10, "Bob": 20}
107
+ >>> itemmap(reversed, accountids) # doctest: +SKIP
108
+ {10: "Alice", 20: "Bob"}
109
+
110
+ See Also:
111
+ keymap
112
+ valmap
113
+ """
114
+ rv = factory()
115
+ rv.update(map(func, d.items()))
116
+ return rv
117
+
118
+
119
+ def valfilter(predicate, d, factory=dict):
120
+ """ Filter items in dictionary by value
121
+
122
+ >>> iseven = lambda x: x % 2 == 0
123
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
124
+ >>> valfilter(iseven, d)
125
+ {1: 2, 3: 4}
126
+
127
+ See Also:
128
+ keyfilter
129
+ itemfilter
130
+ valmap
131
+ """
132
+ rv = factory()
133
+ for k, v in d.items():
134
+ if predicate(v):
135
+ rv[k] = v
136
+ return rv
137
+
138
+
139
+ def keyfilter(predicate, d, factory=dict):
140
+ """ Filter items in dictionary by key
141
+
142
+ >>> iseven = lambda x: x % 2 == 0
143
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
144
+ >>> keyfilter(iseven, d)
145
+ {2: 3, 4: 5}
146
+
147
+ See Also:
148
+ valfilter
149
+ itemfilter
150
+ keymap
151
+ """
152
+ rv = factory()
153
+ for k, v in d.items():
154
+ if predicate(k):
155
+ rv[k] = v
156
+ return rv
157
+
158
+
159
+ def itemfilter(predicate, d, factory=dict):
160
+ """ Filter items in dictionary by item
161
+
162
+ >>> def isvalid(item):
163
+ ... k, v = item
164
+ ... return k % 2 == 0 and v < 4
165
+
166
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
167
+ >>> itemfilter(isvalid, d)
168
+ {2: 3}
169
+
170
+ See Also:
171
+ keyfilter
172
+ valfilter
173
+ itemmap
174
+ """
175
+ rv = factory()
176
+ for item in d.items():
177
+ if predicate(item):
178
+ k, v = item
179
+ rv[k] = v
180
+ return rv
181
+
182
+
183
+ def assoc(d, key, value, factory=dict):
184
+ """ Return a new dict with new key value pair
185
+
186
+ New dict has d[key] set to value. Does not modify the initial dictionary.
187
+
188
+ >>> assoc({'x': 1}, 'x', 2)
189
+ {'x': 2}
190
+ >>> assoc({'x': 1}, 'y', 3) # doctest: +SKIP
191
+ {'x': 1, 'y': 3}
192
+ """
193
+ d2 = factory()
194
+ d2.update(d)
195
+ d2[key] = value
196
+ return d2
197
+
198
+
199
+ def dissoc(d, *keys, **kwargs):
200
+ """ Return a new dict with the given key(s) removed.
201
+
202
+ New dict has d[key] deleted for each supplied key.
203
+ Does not modify the initial dictionary.
204
+
205
+ >>> dissoc({'x': 1, 'y': 2}, 'y')
206
+ {'x': 1}
207
+ >>> dissoc({'x': 1, 'y': 2}, 'y', 'x')
208
+ {}
209
+ >>> dissoc({'x': 1}, 'y') # Ignores missing keys
210
+ {'x': 1}
211
+ """
212
+ factory = _get_factory(dissoc, kwargs)
213
+ d2 = factory()
214
+
215
+ if len(keys) < len(d) * .6:
216
+ d2.update(d)
217
+ for key in keys:
218
+ if key in d2:
219
+ del d2[key]
220
+ else:
221
+ remaining = set(d)
222
+ remaining.difference_update(keys)
223
+ for k in remaining:
224
+ d2[k] = d[k]
225
+ return d2
226
+
227
+
228
+ def assoc_in(d, keys, value, factory=dict):
229
+ """ Return a new dict with new, potentially nested, key value pair
230
+
231
+ >>> purchase = {'name': 'Alice',
232
+ ... 'order': {'items': ['Apple', 'Orange'],
233
+ ... 'costs': [0.50, 1.25]},
234
+ ... 'credit card': '5555-1234-1234-1234'}
235
+ >>> assoc_in(purchase, ['order', 'costs'], [0.25, 1.00]) # doctest: +SKIP
236
+ {'credit card': '5555-1234-1234-1234',
237
+ 'name': 'Alice',
238
+ 'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}}
239
+ """
240
+ return update_in(d, keys, lambda x: value, value, factory)
241
+
242
+
243
+ def update_in(d, keys, func, default=None, factory=dict):
244
+ """ Update value in a (potentially) nested dictionary
245
+
246
+ inputs:
247
+ d - dictionary on which to operate
248
+ keys - list or tuple giving the location of the value to be changed in d
249
+ func - function to operate on that value
250
+
251
+ If keys == [k0,..,kX] and d[k0]..[kX] == v, update_in returns a copy of the
252
+ original dictionary with v replaced by func(v), but does not mutate the
253
+ original dictionary.
254
+
255
+ If k0 is not a key in d, update_in creates nested dictionaries to the depth
256
+ specified by the keys, with the innermost value set to func(default).
257
+
258
+ >>> inc = lambda x: x + 1
259
+ >>> update_in({'a': 0}, ['a'], inc)
260
+ {'a': 1}
261
+
262
+ >>> transaction = {'name': 'Alice',
263
+ ... 'purchase': {'items': ['Apple', 'Orange'],
264
+ ... 'costs': [0.50, 1.25]},
265
+ ... 'credit card': '5555-1234-1234-1234'}
266
+ >>> update_in(transaction, ['purchase', 'costs'], sum) # doctest: +SKIP
267
+ {'credit card': '5555-1234-1234-1234',
268
+ 'name': 'Alice',
269
+ 'purchase': {'costs': 1.75, 'items': ['Apple', 'Orange']}}
270
+
271
+ >>> # updating a value when k0 is not in d
272
+ >>> update_in({}, [1, 2, 3], str, default="bar")
273
+ {1: {2: {3: 'bar'}}}
274
+ >>> update_in({1: 'foo'}, [2, 3, 4], inc, 0)
275
+ {1: 'foo', 2: {3: {4: 1}}}
276
+ """
277
+ ks = iter(keys)
278
+ k = next(ks)
279
+
280
+ rv = inner = factory()
281
+ rv.update(d)
282
+
283
+ for key in ks:
284
+ if k in d:
285
+ d = d[k]
286
+ dtemp = factory()
287
+ dtemp.update(d)
288
+ else:
289
+ d = dtemp = factory()
290
+
291
+ inner[k] = inner = dtemp
292
+ k = key
293
+
294
+ if k in d:
295
+ inner[k] = func(d[k])
296
+ else:
297
+ inner[k] = func(default)
298
+ return rv
299
+
300
+
301
+ def get_in(keys, coll, default=None, no_default=False):
302
+ """ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
303
+
304
+ If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
305
+ ``no_default`` is specified, then it raises KeyError or IndexError.
306
+
307
+ ``get_in`` is a generalization of ``operator.getitem`` for nested data
308
+ structures such as dictionaries and lists.
309
+
310
+ >>> transaction = {'name': 'Alice',
311
+ ... 'purchase': {'items': ['Apple', 'Orange'],
312
+ ... 'costs': [0.50, 1.25]},
313
+ ... 'credit card': '5555-1234-1234-1234'}
314
+ >>> get_in(['purchase', 'items', 0], transaction)
315
+ 'Apple'
316
+ >>> get_in(['name'], transaction)
317
+ 'Alice'
318
+ >>> get_in(['purchase', 'total'], transaction)
319
+ >>> get_in(['purchase', 'items', 'apple'], transaction)
320
+ >>> get_in(['purchase', 'items', 10], transaction)
321
+ >>> get_in(['purchase', 'total'], transaction, 0)
322
+ 0
323
+ >>> get_in(['y'], {}, no_default=True)
324
+ Traceback (most recent call last):
325
+ ...
326
+ KeyError: 'y'
327
+
328
+ See Also:
329
+ itertoolz.get
330
+ operator.getitem
331
+ """
332
+ try:
333
+ return reduce(operator.getitem, keys, coll)
334
+ except (KeyError, IndexError, TypeError):
335
+ if no_default:
336
+ raise
337
+ return default
338
+
339
+
340
+ def getter(index):
341
+ if isinstance(index, list):
342
+ if len(index) == 1:
343
+ index = index[0]
344
+ return lambda x: (x[index],)
345
+ elif index:
346
+ return operator.itemgetter(*index)
347
+ else:
348
+ return lambda x: ()
349
+ else:
350
+ return operator.itemgetter(index)
351
+
352
+
353
+ def groupby(key, seq):
354
+ """ Group a collection by a key function
355
+
356
+ >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
357
+ >>> groupby(len, names) # doctest: +SKIP
358
+ {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
359
+
360
+ >>> iseven = lambda x: x % 2 == 0
361
+ >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
362
+ {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
363
+
364
+ Non-callable keys imply grouping on a member.
365
+
366
+ >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
367
+ ... {'name': 'Bob', 'gender': 'M'},
368
+ ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
369
+ {'F': [{'gender': 'F', 'name': 'Alice'}],
370
+ 'M': [{'gender': 'M', 'name': 'Bob'},
371
+ {'gender': 'M', 'name': 'Charlie'}]}
372
+
373
+ Not to be confused with ``itertools.groupby``
374
+
375
+ See Also:
376
+ countby
377
+ """
378
+ if not callable(key):
379
+ key = getter(key)
380
+ d = collections.defaultdict(lambda: [].append) # type: ignore[var-annotated]
381
+ for item in seq:
382
+ d[key(item)](item)
383
+ rv = {}
384
+ for k, v in d.items():
385
+ rv[k] = v.__self__ # type: ignore[var-annotated, attr-defined]
386
+ return rv
387
+
388
+
389
+ def first(seq):
390
+ """ The first element in a sequence
391
+
392
+ >>> first('ABC')
393
+ 'A'
394
+ """
395
+ return next(iter(seq))
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/utils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
2
+ def hashable(x):
3
+ try:
4
+ hash(x)
5
+ return True
6
+ except TypeError:
7
+ return False
8
+
9
+
10
+ def transitive_get(key, d):
11
+ """ Transitive dict.get
12
+ >>> d = {1: 2, 2: 3, 3: 4}
13
+ >>> d.get(1)
14
+ 2
15
+ >>> transitive_get(1, d)
16
+ 4
17
+ """
18
+ while hashable(key) and key in d:
19
+ key = d[key]
20
+ return key
21
+
22
+
23
+ def raises(err, lamda):
24
+ try:
25
+ lamda()
26
+ return False
27
+ except err:
28
+ return True
29
+
30
+
31
+ # Taken from theano/theano/gof/sched.py
32
+ # Avoids licensing issues because this was written by Matthew Rocklin
33
+ def _toposort(edges):
34
+ """ Topological sort algorithm by Kahn [1] - O(nodes + vertices)
35
+ inputs:
36
+ edges - a dict of the form {a: {b, c}} where b and c depend on a
37
+ outputs:
38
+ L - an ordered list of nodes that satisfy the dependencies of edges
39
+ >>> # xdoctest: +SKIP
40
+ >>> _toposort({1: (2, 3), 2: (3, )})
41
+ [1, 2, 3]
42
+ Closely follows the wikipedia page [2]
43
+ [1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
44
+ Communications of the ACM
45
+ [2] http://en.wikipedia.org/wiki/Toposort#Algorithms
46
+ """
47
+ incoming_edges = reverse_dict(edges)
48
+ incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
49
+ S = ({v for v in edges if v not in incoming_edges})
50
+ L = []
51
+
52
+ while S:
53
+ n = S.pop()
54
+ L.append(n)
55
+ for m in edges.get(n, ()):
56
+ assert n in incoming_edges[m]
57
+ incoming_edges[m].remove(n)
58
+ if not incoming_edges[m]:
59
+ S.add(m)
60
+ if any(incoming_edges.get(v, None) for v in edges):
61
+ raise ValueError("Input has cycles")
62
+ return L
63
+
64
+
65
+ def reverse_dict(d):
66
+ """Reverses direction of dependence dict
67
+ >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
68
+ >>> reverse_dict(d) # doctest: +SKIP
69
+ {1: ('a',), 2: ('a', 'b'), 3: ('b',)}
70
+ :note: dict order are not deterministic. As we iterate on the
71
+ input dict, it make the output of this function depend on the
72
+ dict order. So this function output order should be considered
73
+ as undeterministic.
74
+ """
75
+ result = {} # type: ignore[var-annotated]
76
+ for key in d:
77
+ for val in d[key]:
78
+ result[val] = result.get(val, tuple()) + (key, )
79
+ return result
80
+
81
+
82
+ def xfail(func):
83
+ try:
84
+ func()
85
+ raise Exception("XFailed test passed") # pragma:nocover
86
+ except Exception:
87
+ pass
88
+
89
+
90
+ def freeze(d):
91
+ """ Freeze container to hashable form
92
+ >>> freeze(1)
93
+ 1
94
+ >>> freeze([1, 2])
95
+ (1, 2)
96
+ >>> freeze({1: 2}) # doctest: +SKIP
97
+ frozenset([(1, 2)])
98
+ """
99
+ if isinstance(d, dict):
100
+ return frozenset(map(freeze, d.items()))
101
+ if isinstance(d, set):
102
+ return frozenset(map(freeze, d))
103
+ if isinstance(d, (tuple, list)):
104
+ return tuple(map(freeze, d))
105
+ return d
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/variable.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from .utils import hashable
3
+ from .dispatch import dispatch
4
+
5
+ _global_logic_variables = set() # type: ignore[var-annotated]
6
+ _glv = _global_logic_variables
7
+
8
+
9
+ class Var:
10
+ """ Logic Variable """
11
+
12
+ _id = 1
13
+
14
+ def __new__(cls, *token):
15
+ if len(token) == 0:
16
+ token = f"_{Var._id}" # type: ignore[assignment]
17
+ Var._id += 1
18
+ elif len(token) == 1:
19
+ token = token[0]
20
+
21
+ obj = object.__new__(cls)
22
+ obj.token = token # type: ignore[attr-defined]
23
+ return obj
24
+
25
+ def __str__(self):
26
+ return "~" + str(self.token) # type: ignore[attr-defined]
27
+ __repr__ = __str__
28
+
29
+ def __eq__(self, other):
30
+ return type(self) == type(other) and self.token == other.token # type: ignore[attr-defined]
31
+
32
+ def __hash__(self):
33
+ return hash((type(self), self.token)) # type: ignore[attr-defined]
34
+
35
+
36
+ def var():
37
+ return lambda *args: Var(*args)
38
+
39
+
40
+ def vars():
41
+ return lambda n: [var() for i in range(n)]
42
+
43
+
44
+ @dispatch(Var)
45
+ def isvar(v):
46
+ return True
47
+
48
+ isvar
49
+
50
+
51
+ @dispatch(object) # type: ignore[no-redef]
52
+ def isvar(o):
53
+ return not not _glv and hashable(o) and o in _glv
54
+
55
+
56
+ @contextmanager
57
+ def variables(*variables):
58
+ """
59
+ Context manager for logic variables
60
+
61
+ Example:
62
+ >>> # xdoctest: +SKIP("undefined vars")
63
+ >>> from __future__ import with_statement
64
+ >>> with variables(1):
65
+ ... print(isvar(1))
66
+ True
67
+ >>> print(isvar(1))
68
+ False
69
+ >>> # Normal approach
70
+ >>> from unification import unify
71
+ >>> x = var('x')
72
+ >>> unify(x, 1)
73
+ {~x: 1}
74
+ >>> # Context Manager approach
75
+ >>> with variables('x'):
76
+ ... print(unify('x', 1))
77
+ {'x': 1}
78
+ """
79
+ old_global_logic_variables = _global_logic_variables.copy()
80
+ _global_logic_variables.update(set(variables))
81
+ try:
82
+ yield
83
+ finally:
84
+ _global_logic_variables.clear()
85
+ _global_logic_variables.update(old_global_logic_variables)
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import graph_drawer
2
+ from . import graph_manipulation
3
+ from . import net_min_base
4
+ from . import operator_support
5
+ from . import param_fetch
6
+ from . import reinplace
7
+ from . import shape_prop
8
+ from . import split_module
9
+ from . import split_utils
10
+ from . import splitter_base
11
+ from . import tools_common
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (579 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc ADDED
Binary file (7.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import torch
4
+
5
+
6
+ def annotate_getitem_nodes(graph: torch.fx.Graph) -> None:
7
+ """
8
+ Annotate the type of getitem nodes, inferred from the type of sequence node.
9
+ If sequence node is not annotated with a type, do nothing.
10
+ Currently support getitem nodes from Tuple, List, and NamedTuple sequence node.
11
+
12
+ This is helpful since annotations on local names within function are lost during FX transforms.
13
+ Adding back known type annotation for getitem nodes to improve jit scriptability.
14
+
15
+ Args:
16
+ graph (Graph): The graph to be annotated
17
+ """
18
+ for node in graph.nodes:
19
+ if node.target == operator.getitem:
20
+ sequence_node, index_node = node.args
21
+ if not sequence_node.type:
22
+ continue
23
+ # container types
24
+ if hasattr(sequence_node.type, "_name"):
25
+ parameterized_types = sequence_node.type.__args__
26
+ if sequence_node.type._name == "Tuple":
27
+ if len(parameterized_types) == 2 and isinstance(
28
+ parameterized_types[1], type(...)
29
+ ):
30
+ node.type = parameterized_types[0]
31
+ else:
32
+ assert len(parameterized_types) > index_node
33
+ node_type = parameterized_types[index_node]
34
+ node.type = node_type
35
+ elif sequence_node.type._name == "List":
36
+ assert len(parameterized_types) == 1
37
+ node.type = parameterized_types[0]
38
+ # NamedTuple type
39
+ elif hasattr(sequence_node.type, "__annotations__"):
40
+ if sequence_node.type == torch.Tensor:
41
+ continue
42
+ sequence_node_field_types = sequence_node.type.__annotations__
43
+ field_name = sequence_node.type._fields[index_node]
44
+ node.type = sequence_node_field_types[field_name]
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
3
+ from torch.fx.passes.operator_support import OperatorSupport
4
+ from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
5
+ from torch.fx.passes.fake_tensor_prop import FakeTensorProp
6
+ from torch.utils import _pytree as pytree
7
+
8
+ import operator
9
+
10
+ class CudaGraphsSupport(OperatorSupport):
11
+ # TODO: why is submodules passed here
12
+ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
13
+ if node.op not in CALLABLE_NODE_OPS:
14
+ return False
15
+
16
+ if node.target in [torch.ops.aten.embedding_dense_backward.default]:
17
+ return False
18
+
19
+ if node.target in [operator.getitem]:
20
+ return True
21
+
22
+ found_not_cuda = False
23
+
24
+ def meta_fk(meta):
25
+ return meta["val"] if "val" in meta else meta["fake_result"]
26
+
27
+ def find_not_cuda(t):
28
+ nonlocal found_not_cuda
29
+ if isinstance(t, torch.Tensor) and t.device.type != 'cuda':
30
+ found_not_cuda = True
31
+
32
+ for n in node.all_input_nodes:
33
+ pytree.tree_map_(find_not_cuda, meta_fk(n.meta))
34
+
35
+ pytree.tree_map_(find_not_cuda, meta_fk(node.meta))
36
+
37
+ # NB: factory function is accounted for because the result would be
38
+ # cpu or cuda
39
+
40
+ return not found_not_cuda
41
+
42
+ def partition_cudagraphs(gm, inputs):
43
+ """
44
+ Partition an FX graph into sub-GraphModules that can be validly run under
45
+ CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
46
+ must involve CUDA tensors only/
47
+ """
48
+
49
+ FakeTensorProp(gm).propagate(*inputs)
50
+ supported_ops = CudaGraphsSupport()
51
+ # TODO: single node partition may be wrong due to the pessimization
52
+ # from copying in and out the data. Check in benchmarks, perhaps
53
+ partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)
54
+ partitions = partitioner.propose_partitions()
55
+ fused_graph = partitioner.fuse_partitions(partitions)
56
+ return fused_graph
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple, Any
2
+
3
+ import torch
4
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
5
+ from torch.utils._pytree import tree_flatten
6
+
7
+ from torch.fx import GraphModule, Graph
8
+ from torch.fx import Node
9
+
10
+ aten = torch.ops.aten
11
+
12
+
13
+ # stateful ops are banned from CSE
14
+ rand_ops = {aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm} # noqa: E501,B950
15
+
16
+ inplace_ops = {aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_} # noqa: E501
17
+
18
+
19
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
20
+ def get_CSE_banned_ops():
21
+ return rand_ops.union(inplace_ops)
22
+
23
+
24
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
25
+ class CSEPass(PassBase):
26
+
27
+ def __init__(self, banned_ops=None):
28
+ """
29
+ This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node.
30
+
31
+ For functional dialects, user would only need to specify the random ops in ban list.
32
+
33
+ Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects.
34
+ If your dialect contains stateful operators, please customized the banned_ops.
35
+
36
+ """
37
+ if banned_ops is None:
38
+ banned_ops = set()
39
+ self.banned_ops = banned_ops
40
+ super().__init__()
41
+
42
+ def call(self, graph_module: GraphModule) -> PassResult:
43
+ """
44
+ Return a new copy of torch.fx.GraphModule with CSE applied to the input graph
45
+
46
+ Example usage:
47
+
48
+ from torch.fx.experimental.proxy_tensor import make_fx
49
+ def f(a):
50
+ b = a * a
51
+ c = a * a
52
+ return b+c
53
+
54
+ p = CSEPass()
55
+ traced_graph = make_fx(f)(torch.tensor(1))
56
+ print(traced_graph)
57
+ result = p(traced_graph)
58
+ print(result.graph_module)
59
+ """
60
+ def get_aten_target(node):
61
+ if hasattr(node.target, 'overloadpacket'):
62
+ return node.target.overloadpacket
63
+ return node.target
64
+
65
+ modified = False
66
+ new_graph = Graph()
67
+ env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph
68
+ hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph
69
+ token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token
70
+ for n in graph_module.graph.nodes:
71
+ # The placeholder, output, and get_attr nodes are copied to the new graph without change
72
+ # do not CSE away random operations
73
+ if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops:
74
+ new_node = new_graph.node_copy(n, lambda x: env[x])
75
+ env[n] = new_node
76
+ else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
77
+ # substitute args and kwargs members to their mapping in env if exists
78
+ # specs can be used to reconstruct nested list/dictionaries
79
+ def substitute(arg_list):
80
+ arg_list, spec = tree_flatten(arg_list)
81
+ for i in range(len(arg_list)):
82
+ v = arg_list[i]
83
+ if isinstance(v, Node) and v in env:
84
+ arg_list[i] = env[v]
85
+ return tuple(arg_list), spec
86
+ args, args_spec = substitute(n.args)
87
+ kwargs, kwargs_spec = substitute(n.kwargs)
88
+
89
+ # each token corresponds to a unique node
90
+ # nodes with the same token can be substituted
91
+ token = {"target": n.target, "args": args, "args_spec": args_spec,
92
+ "kwargs": kwargs, "kwargs_spec": kwargs_spec}
93
+
94
+ # hash substituted args to a number, do not hash specs because specs are not hashable
95
+ hash_arg = hash((args, kwargs))
96
+ hash_val = (n.target, hash_arg)
97
+
98
+ # check if a node has a substitute and can be eliminated
99
+ hash_val_in_hash_env = hash_val in hash_env
100
+ if hash_val_in_hash_env and token_map[hash_val] == token:
101
+ modified = True # substitution happens and the graph is modified
102
+ env[n] = hash_env[hash_val]
103
+ continue
104
+
105
+ new_node = new_graph.node_copy(n, lambda x: env[x])
106
+ env[n] = new_node
107
+ if not hash_val_in_hash_env:
108
+ hash_env[hash_val] = new_node
109
+ token_map[hash_val] = token
110
+
111
+ csed_gm = GraphModule(graph_module, new_graph)
112
+ return PassResult(csed_gm, modified)
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch.fx
4
+ from torch.fx import Node
5
+ from torch.fx._compatibility import compatibility
6
+ from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
7
+ from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake
8
+ from torch.fx.node import map_aggregate
9
+
10
+ __all__ = ['FakeTensorProp']
11
+
12
+ @compatibility(is_backward_compatible=False)
13
+ class FakeTensorProp(torch.fx.Interpreter):
14
+ """
15
+ Execute an FX graph Node-by-Node and record a fake tensor representing
16
+ the metadata for the node. Unlike ShapeProp, (1) this propagation
17
+ is cheap--it does the propagation with meta tensors which do not actually
18
+ store data, and (2) the fake tensors have much more fine grained information,
19
+ e.g., they have accurate alias information that can be consulted by looking
20
+ at the storages.
21
+
22
+ Args:
23
+ module (GraphModule): The module to be executed
24
+ mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
25
+ """
26
+ def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
27
+ super().__init__(module)
28
+ if mode is None:
29
+ mode = FakeTensorMode()
30
+ self._mode = mode
31
+
32
+ def run_node(self, n: Node):
33
+ import sympy
34
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
35
+
36
+ result = super().run_node(n)
37
+ sym = None
38
+ if (
39
+ 'val' in n.meta and
40
+ isinstance(v := n.meta['val'], torch.SymInt) and
41
+ isinstance(v.node.expr, sympy.Symbol) and free_unbacked_symbols(v)
42
+ ):
43
+ sym = v
44
+
45
+ def extract_val(obj):
46
+ if isinstance(obj, FakeTensor):
47
+ return snapshot_fake(obj)
48
+ elif isinstance(obj, torch.Tensor):
49
+ # TODO: How is it possible that we get a non fake tensor? We
50
+ # should be running under the mode...
51
+ return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
52
+ elif isinstance(obj, py_sym_types):
53
+ return obj
54
+ else:
55
+ return None
56
+
57
+ meta = map_aggregate(result, extract_val)
58
+ if meta is not None:
59
+ n.meta['val'] = meta
60
+ if sym is not None:
61
+ torch._check(meta == v)
62
+ return result
63
+
64
+ def propagate(self, *args):
65
+ fake_args = [
66
+ self._mode.from_tensor(a) if isinstance(a, torch.Tensor) else a
67
+ for a in args
68
+ ]
69
+ return self.propagate_dont_convert_inputs(*fake_args)
70
+
71
+ def propagate_dont_convert_inputs(self, *args):
72
+ with self._mode:
73
+ return super().run(*args)
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import hashlib
3
+ import torch
4
+ import torch.fx
5
+ from typing import Any, Dict, Optional, TYPE_CHECKING
6
+ from torch.fx.node import _get_qualified_name, _format_arg
7
+ from torch.fx.graph import _parse_stack_trace
8
+ from torch.fx.passes.shape_prop import TensorMetadata
9
+ from torch.fx._compatibility import compatibility
10
+ from itertools import chain
11
+
12
+ __all__ = ['FxGraphDrawer']
13
+ try:
14
+ import pydot
15
+ HAS_PYDOT = True
16
+ except ImportError:
17
+ HAS_PYDOT = False
18
+
19
+ _COLOR_MAP = {
20
+ "placeholder": '"AliceBlue"',
21
+ "call_module": "LemonChiffon1",
22
+ "get_param": "Yellow2",
23
+ "get_attr": "LightGrey",
24
+ "output": "PowderBlue",
25
+ }
26
+
27
+ _HASH_COLOR_MAP = [
28
+ "CadetBlue1",
29
+ "Coral",
30
+ "DarkOliveGreen1",
31
+ "DarkSeaGreen1",
32
+ "GhostWhite",
33
+ "Khaki1",
34
+ "LavenderBlush1",
35
+ "LightSkyBlue",
36
+ "MistyRose1",
37
+ "MistyRose2",
38
+ "PaleTurquoise2",
39
+ "PeachPuff1",
40
+ "Salmon",
41
+ "Thistle1",
42
+ "Thistle3",
43
+ "Wheat1",
44
+ ]
45
+
46
+ _WEIGHT_TEMPLATE = {
47
+ "fillcolor": "Salmon",
48
+ "style": '"filled,rounded"',
49
+ "fontcolor": "#000000",
50
+ }
51
+
52
+ if HAS_PYDOT:
53
+ @compatibility(is_backward_compatible=False)
54
+ class FxGraphDrawer:
55
+ """
56
+ Visualize a torch.fx.Graph with graphviz
57
+ Basic usage:
58
+ g = FxGraphDrawer(symbolic_traced, "resnet18")
59
+ g.get_dot_graph().write_svg("a.svg")
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ graph_module: torch.fx.GraphModule,
65
+ name: str,
66
+ ignore_getattr: bool = False,
67
+ ignore_parameters_and_buffers: bool = False,
68
+ skip_node_names_in_args: bool = True,
69
+ parse_stack_trace: bool = False,
70
+ dot_graph_shape: Optional[str] = None,
71
+ ):
72
+ self._name = name
73
+ self.dot_graph_shape = (
74
+ dot_graph_shape if dot_graph_shape is not None else "record"
75
+ )
76
+ _WEIGHT_TEMPLATE["shape"] = self.dot_graph_shape
77
+
78
+ self._dot_graphs = {
79
+ name: self._to_dot(
80
+ graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args, parse_stack_trace
81
+ )
82
+ }
83
+
84
+ for node in graph_module.graph.nodes:
85
+ if node.op != "call_module":
86
+ continue
87
+
88
+ leaf_node = self._get_leaf_node(graph_module, node)
89
+
90
+ if not isinstance(leaf_node, torch.fx.GraphModule):
91
+ continue
92
+
93
+
94
+ self._dot_graphs[f"{name}_{node.target}"] = self._to_dot(
95
+ leaf_node,
96
+ f"{name}_{node.target}",
97
+ ignore_getattr,
98
+ ignore_parameters_and_buffers,
99
+ skip_node_names_in_args,
100
+ parse_stack_trace,
101
+ )
102
+
103
+ def get_dot_graph(self, submod_name=None) -> pydot.Dot:
104
+ """
105
+ Visualize a torch.fx.Graph with graphviz
106
+ Example:
107
+ >>> # xdoctest: +REQUIRES(module:pydot)
108
+ >>> # define module
109
+ >>> class MyModule(torch.nn.Module):
110
+ >>> def __init__(self):
111
+ >>> super().__init__()
112
+ >>> self.linear = torch.nn.Linear(4, 5)
113
+ >>> def forward(self, x):
114
+ >>> return self.linear(x).clamp(min=0.0, max=1.0)
115
+ >>> module = MyModule()
116
+ >>> # trace the module
117
+ >>> symbolic_traced = torch.fx.symbolic_trace(module)
118
+ >>> # setup output file
119
+ >>> import ubelt as ub
120
+ >>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir()
121
+ >>> fpath = dpath / 'linear.svg'
122
+ >>> # draw the graph
123
+ >>> g = FxGraphDrawer(symbolic_traced, "linear")
124
+ >>> g.get_dot_graph().write_svg(fpath)
125
+ """
126
+ if submod_name is None:
127
+ return self.get_main_dot_graph()
128
+ else:
129
+ return self.get_submod_dot_graph(submod_name)
130
+
131
+ def get_main_dot_graph(self) -> pydot.Dot:
132
+ return self._dot_graphs[self._name]
133
+
134
+ def get_submod_dot_graph(self, submod_name) -> pydot.Dot:
135
+ return self._dot_graphs[f"{self._name}_{submod_name}"]
136
+
137
+ def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]:
138
+ return self._dot_graphs
139
+
140
+ def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]:
141
+
142
+ template = {
143
+ "shape": self.dot_graph_shape,
144
+ "fillcolor": "#CAFFE3",
145
+ "style": '"filled,rounded"',
146
+ "fontcolor": "#000000",
147
+ }
148
+ if node.op in _COLOR_MAP:
149
+ template["fillcolor"] = _COLOR_MAP[node.op]
150
+ else:
151
+ # Use a random color for each node; based on its name so it's stable.
152
+ target_name = node._pretty_print_target(node.target)
153
+ target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16)
154
+ template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)]
155
+ return template
156
+
157
+ def _get_leaf_node(
158
+ self, module: torch.nn.Module, node: torch.fx.Node
159
+ ) -> torch.nn.Module:
160
+ py_obj = module
161
+ assert isinstance(node.target, str)
162
+ atoms = node.target.split(".")
163
+ for atom in atoms:
164
+ if not hasattr(py_obj, atom):
165
+ raise RuntimeError(
166
+ str(py_obj) + " does not have attribute " + atom + "!"
167
+ )
168
+ py_obj = getattr(py_obj, atom)
169
+ return py_obj
170
+
171
+ def _typename(self, target: Any) -> str:
172
+ if isinstance(target, torch.nn.Module):
173
+ ret = torch.typename(target)
174
+ elif isinstance(target, str):
175
+ ret = target
176
+ else:
177
+ ret = _get_qualified_name(target)
178
+
179
+ # Escape "{" and "}" to prevent dot files like:
180
+ # https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc
181
+ # which triggers `Error: bad label format (...)` from dot
182
+ return ret.replace("{", r"\{").replace("}", r"\}")
183
+
184
+ # shorten path to avoid drawing long boxes
185
+ # for full path = '/home/weif/pytorch/test.py'
186
+ # return short path = 'pytorch/test.py'
187
+ def _shorten_file_name(
188
+ self,
189
+ full_file_name: str,
190
+ truncate_to_last_n: int = 2,
191
+ ):
192
+ splits = full_file_name.split('/')
193
+ if len(splits) >= truncate_to_last_n:
194
+ return '/'.join(splits[-truncate_to_last_n:])
195
+ return full_file_name
196
+
197
+
198
+ def _get_node_label(
199
+ self,
200
+ module: torch.fx.GraphModule,
201
+ node: torch.fx.Node,
202
+ skip_node_names_in_args: bool,
203
+ parse_stack_trace: bool,
204
+ ) -> str:
205
+ def _get_str_for_args_kwargs(arg):
206
+ if isinstance(arg, tuple):
207
+ prefix, suffix = r"|args=(\l", r",\n)\l"
208
+ arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg]
209
+ elif isinstance(arg, dict):
210
+ prefix, suffix = r"|kwargs={\l", r",\n}\l"
211
+ arg_strs_list = [
212
+ f"{k}: {_format_arg(v, max_list_len=8)}"
213
+ for k, v in arg.items()
214
+ ]
215
+ else: # Fall back to nothing in unexpected case.
216
+ return ""
217
+
218
+ # Strip out node names if requested.
219
+ if skip_node_names_in_args:
220
+ arg_strs_list = [a for a in arg_strs_list if "%" not in a]
221
+ if len(arg_strs_list) == 0:
222
+ return ""
223
+ arg_strs = prefix + r",\n".join(arg_strs_list) + suffix
224
+ if len(arg_strs_list) == 1:
225
+ arg_strs = arg_strs.replace(r"\l", "").replace(r"\n", "")
226
+ return arg_strs.replace("{", r"\{").replace("}", r"\}")
227
+
228
+
229
+ label = "{" + f"name=%{node.name}|op_code={node.op}\n"
230
+
231
+ if node.op == "call_module":
232
+ leaf_module = self._get_leaf_node(module, node)
233
+ label += r"\n" + self._typename(leaf_module) + r"\n|"
234
+ extra = ""
235
+ if hasattr(leaf_module, "__constants__"):
236
+ extra = r"\n".join(
237
+ [f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr]
238
+ )
239
+ label += extra + r"\n"
240
+ else:
241
+ label += f"|target={self._typename(node.target)}" + r"\n"
242
+ if len(node.args) > 0:
243
+ label += _get_str_for_args_kwargs(node.args)
244
+ if len(node.kwargs) > 0:
245
+ label += _get_str_for_args_kwargs(node.kwargs)
246
+ label += f"|num_users={len(node.users)}" + r"\n"
247
+
248
+ tensor_meta = node.meta.get('tensor_meta')
249
+ label += self._tensor_meta_to_label(tensor_meta)
250
+
251
+ # for original fx graph
252
+ # print buf=buf0, n_origin=6
253
+ buf_meta = node.meta.get('buf_meta', None)
254
+ if buf_meta is not None:
255
+ label += f"|buf={buf_meta.name}" + r"\n"
256
+ label += f"|n_origin={buf_meta.n_origin}" + r"\n"
257
+
258
+ # for original fx graph
259
+ # print file:lineno code
260
+ if parse_stack_trace and node.stack_trace is not None:
261
+ parsed_stack_trace = _parse_stack_trace(node.stack_trace)
262
+ fname = self._shorten_file_name(parsed_stack_trace.file)
263
+ label += f"|file={fname}:{parsed_stack_trace.lineno} {parsed_stack_trace.code}" + r"\n"
264
+
265
+
266
+ return label + "}"
267
+
268
+ def _tensor_meta_to_label(self, tm) -> str:
269
+ if tm is None:
270
+ return ""
271
+ elif isinstance(tm, TensorMetadata):
272
+ return self._stringify_tensor_meta(tm)
273
+ elif isinstance(tm, list):
274
+ result = ""
275
+ for item in tm:
276
+ result += self._tensor_meta_to_label(item)
277
+ return result
278
+ elif isinstance(tm, dict):
279
+ result = ""
280
+ for v in tm.values():
281
+ result += self._tensor_meta_to_label(v)
282
+ return result
283
+ elif isinstance(tm, tuple):
284
+ result = ""
285
+ for item in tm:
286
+ result += self._tensor_meta_to_label(item)
287
+ return result
288
+ else:
289
+ raise RuntimeError(f"Unsupported tensor meta type {type(tm)}")
290
+
291
+ def _stringify_tensor_meta(self, tm: TensorMetadata) -> str:
292
+ result = ""
293
+ if not hasattr(tm, "dtype"):
294
+ print("tm", tm)
295
+ result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n"
296
+ result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n"
297
+ result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n"
298
+ result += "|" + "stride" + "=" + str(tm.stride) + r"\n"
299
+ if tm.is_quantized:
300
+ assert tm.qparams is not None
301
+ assert "qscheme" in tm.qparams
302
+ qscheme = tm.qparams["qscheme"]
303
+ if qscheme in {
304
+ torch.per_tensor_affine,
305
+ torch.per_tensor_symmetric,
306
+ }:
307
+ result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
308
+ result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
309
+ elif qscheme in {
310
+ torch.per_channel_affine,
311
+ torch.per_channel_symmetric,
312
+ torch.per_channel_affine_float_qparams,
313
+ }:
314
+ result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
315
+ result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
316
+ result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n"
317
+ else:
318
+ raise RuntimeError(f"Unsupported qscheme: {qscheme}")
319
+ result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n"
320
+ return result
321
+
322
+ def _get_tensor_label(self, t: torch.Tensor) -> str:
323
+ return str(t.dtype) + str(list(t.shape)) + r"\n"
324
+
325
+ # when parse_stack_trace=True
326
+ # print file:lineno code
327
+ def _to_dot(
328
+ self,
329
+ graph_module: torch.fx.GraphModule,
330
+ name: str,
331
+ ignore_getattr: bool,
332
+ ignore_parameters_and_buffers: bool,
333
+ skip_node_names_in_args: bool,
334
+ parse_stack_trace: bool,
335
+ ) -> pydot.Dot:
336
+ """
337
+ Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph.
338
+ If ignore_parameters_and_buffers is True, the parameters and buffers
339
+ created with the module will not be added as nodes and edges.
340
+ """
341
+
342
+ # "TB" means top-to-bottom rank direction in layout
343
+ dot_graph = pydot.Dot(name, rankdir="TB")
344
+
345
+
346
+ buf_name_to_subgraph = {}
347
+
348
+ for node in graph_module.graph.nodes:
349
+ if ignore_getattr and node.op == "get_attr":
350
+ continue
351
+
352
+ style = self._get_node_style(node)
353
+ dot_node = pydot.Node(
354
+ node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args, parse_stack_trace), **style
355
+ )
356
+
357
+ current_graph = dot_graph
358
+
359
+ buf_meta = node.meta.get('buf_meta', None)
360
+ if buf_meta is not None and buf_meta.n_origin > 1:
361
+ buf_name = buf_meta.name
362
+ if buf_name not in buf_name_to_subgraph:
363
+ buf_name_to_subgraph[buf_name] = pydot.Cluster(buf_name, label=buf_name)
364
+ current_graph = buf_name_to_subgraph.get(buf_name)
365
+
366
+ current_graph.add_node(dot_node)
367
+
368
+ def get_module_params_or_buffers():
369
+ for pname, ptensor in chain(
370
+ leaf_module.named_parameters(), leaf_module.named_buffers()
371
+ ):
372
+ pname1 = node.name + "." + pname
373
+ label1 = (
374
+ pname1 + "|op_code=get_" + "parameter"
375
+ if isinstance(ptensor, torch.nn.Parameter)
376
+ else "buffer" + r"\l"
377
+ )
378
+ dot_w_node = pydot.Node(
379
+ pname1,
380
+ label="{" + label1 + self._get_tensor_label(ptensor) + "}",
381
+ **_WEIGHT_TEMPLATE,
382
+ )
383
+ dot_graph.add_node(dot_w_node)
384
+ dot_graph.add_edge(pydot.Edge(pname1, node.name))
385
+
386
+ if node.op == "call_module":
387
+ leaf_module = self._get_leaf_node(graph_module, node)
388
+
389
+ if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule):
390
+ get_module_params_or_buffers()
391
+
392
+ for subgraph in buf_name_to_subgraph.values():
393
+ subgraph.set('color', 'royalblue')
394
+ subgraph.set('penwidth', '2')
395
+ dot_graph.add_subgraph(subgraph)
396
+
397
+ for node in graph_module.graph.nodes:
398
+ if ignore_getattr and node.op == "get_attr":
399
+ continue
400
+
401
+ for user in node.users:
402
+ dot_graph.add_edge(pydot.Edge(node.name, user.name))
403
+
404
+ return dot_graph
405
+
406
+ else:
407
+ if not TYPE_CHECKING:
408
+ @compatibility(is_backward_compatible=False)
409
+ class FxGraphDrawer:
410
+ def __init__(
411
+ self,
412
+ graph_module: torch.fx.GraphModule,
413
+ name: str,
414
+ ignore_getattr: bool = False,
415
+ ignore_parameters_and_buffers: bool = False,
416
+ skip_node_names_in_args: bool = True,
417
+ parse_stack_trace: bool = False,
418
+ dot_graph_shape: Optional[str] = None,
419
+ ):
420
+ raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install '
421
+ 'pydot through your favorite Python package manager.')
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, NamedTuple, Optional
2
+
3
+ import torch
4
+ from torch.fx._compatibility import compatibility
5
+ from torch.fx.graph import Graph
6
+ from torch.fx.graph_module import GraphModule
7
+ from torch.fx.node import (
8
+ map_arg,
9
+ Node,
10
+ Target,
11
+ )
12
+ from torch.fx.passes.shape_prop import ShapeProp
13
+
14
+ __all__ = ['replace_target_nodes_with', 'size_bytes', 'get_size_of_all_nodes', 'get_tensor_meta',
15
+ 'get_size_of_node']
16
+
17
+ @compatibility(is_backward_compatible=False)
18
+ def replace_target_nodes_with(
19
+ fx_module: GraphModule,
20
+ old_op: str,
21
+ old_target: Target,
22
+ new_op: str,
23
+ new_target: Target,
24
+ ):
25
+ """Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
26
+ and updates them to match the new op code and target"""
27
+ new_graph = Graph()
28
+ val_map: Dict[Node, Node] = {}
29
+ for node in fx_module.graph.nodes:
30
+ if node.op == old_op and node.target == old_target:
31
+ args = map_arg(node.args, lambda n: val_map[n])
32
+ kwargs = map_arg(node.kwargs, lambda n: val_map[n])
33
+ assert isinstance(args, tuple)
34
+ assert isinstance(kwargs, dict)
35
+ val_map[node] = new_graph.create_node(
36
+ new_op, new_target, args, kwargs, node.name
37
+ )
38
+ else:
39
+ val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
40
+ fx_module.graph = new_graph
41
+
42
+
43
+ @compatibility(is_backward_compatible=False)
44
+ class size_bytes(NamedTuple):
45
+ output_size: int
46
+ total_size: int
47
+
48
+
49
+ @compatibility(is_backward_compatible=False)
50
+ def get_size_of_all_nodes(
51
+ fx_module: GraphModule, args: Optional[List[torch.Tensor]] = None
52
+ ) -> None:
53
+ """Given a fx graph module, update each node with its total size (weights + bias + output)
54
+ and its output_size(output). For a non-module node, the total size is the output size.
55
+ return total size"""
56
+ if args is not None:
57
+ # Mark shape and dtype for each node (node.shape and node.dtype)
58
+ ShapeProp(fx_module).propagate(*args)
59
+ # Calculate the total size of the whole fx graph
60
+ total_size_of_graph = 0.0
61
+ for node in fx_module.graph.nodes:
62
+ if node.op == "output":
63
+ break
64
+ node.size_bytes = get_size_of_node(fx_module, node)
65
+ return
66
+
67
+
68
+ @compatibility(is_backward_compatible=False)
69
+ def get_tensor_meta(node: Node) -> Any:
70
+ tensor_meta = node.meta.get("tensor_meta")
71
+
72
+ if not tensor_meta:
73
+ raise RuntimeError(
74
+ f"Node {node} has no tensor metadata associated with it! "
75
+ f"Check that shape propagation has run."
76
+ )
77
+
78
+ return tensor_meta
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
83
+ """Given a node with node.dtype and node.shape, return its total size and its output size.
84
+ total_size = weights + bias + output_size
85
+ """
86
+ # Total num of elements
87
+ total_num_of_elems = 0
88
+ # For a module, conside all parameters
89
+ if node.op == "call_module":
90
+ submodule_dict = dict(fx_module.named_modules())
91
+ submodule = submodule_dict[node.target]
92
+ parameters = submodule.named_parameters()
93
+ # Parameters are named tuples
94
+ for name, p in parameters:
95
+ total_num_of_elems += p.numel()
96
+ # Don't forget the output size
97
+ # node.shape is the shape of this node's output
98
+ tensor_meta = get_tensor_meta(node)
99
+ output_elem = tensor_meta.shape.numel()
100
+ total_num_of_elems += output_elem
101
+ # Assume for now if it's quantized then it's qint8 or quint8
102
+ if tensor_meta.is_quantized:
103
+ size_per_elem_bytes = torch._empty_affine_quantized(
104
+ [], dtype=tensor_meta.dtype
105
+ ).element_size()
106
+ else:
107
+ size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
108
+ total_size = size_per_elem_bytes * total_num_of_elems
109
+ output_size = size_per_elem_bytes * output_elem
110
+ return size_bytes(output_size, total_size)
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ from . import pass_manager
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (231 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc ADDED
Binary file (9.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc ADDED
Binary file (9.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.passes.utils.fuser_utils import fuse_by_partitions
2
+ import collections
3
+ import itertools
4
+ import logging
5
+
6
+ from copy import copy
7
+ from typing import Dict, Iterable, List, Optional, Sequence, Set
8
+
9
+ from torch.fx.graph_module import GraphModule
10
+ from torch.fx.node import Node, _get_qualified_name
11
+ from torch.fx.passes.operator_support import OperatorSupportBase
12
+
13
+
14
+ logger = logging.getLogger(__name__)
15
+ logger.setLevel(logging.WARNING)
16
+
17
+ class Partition:
18
+ def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None):
19
+ self.id = id
20
+ self.nodes: Set[Node] = set(nodes) if nodes is not None else set()
21
+
22
+ def __repr__(self) -> str:
23
+ return str(self.nodes)
24
+
25
+ def add_node(self, node: Node):
26
+ self.nodes.add(node)
27
+
28
+ def remove_node(self, node: Node):
29
+ self.nodes.remove(node)
30
+
31
+ def size(self):
32
+ return len(self.nodes)
33
+
34
+ class _DependencyViewer:
35
+ def __init__(self, graph_module: GraphModule):
36
+ self.upstreams = collections.defaultdict(set)
37
+ self.downstreams = collections.defaultdict(set)
38
+
39
+ for node in graph_module.graph.nodes:
40
+ for input_node in node.all_input_nodes:
41
+ # add input_node and input_node's upstream dependency
42
+ self.upstreams[node].add(input_node)
43
+ self.upstreams[node].update(self.upstreams[input_node])
44
+
45
+ for node in reversed(graph_module.graph.nodes):
46
+ for output_node in node.users:
47
+ # add output_node and output_node's downstream dependency
48
+ self.downstreams[node].add(output_node)
49
+ self.downstreams[node].update(self.downstreams[output_node])
50
+
51
+ def downstreams_of(self, node: Node) -> Set[Node]:
52
+ return self.downstreams[node]
53
+
54
+ def upstreams_of(self, node: Node) -> Set[Node]:
55
+ return self.upstreams[node]
56
+
57
+ class CapabilityBasedPartitioner:
58
+
59
+ def __init__(self,
60
+ graph_module: GraphModule,
61
+ operator_support: OperatorSupportBase,
62
+ allows_single_node_partition: bool = False,
63
+ non_compute_ops: Optional[Sequence[str]] = None,
64
+ allowed_single_node_partition_ops: Optional[Sequence[str]] = None,
65
+ ) -> None:
66
+ self.graph_module = graph_module
67
+ self.operator_support = operator_support
68
+ self.allows_single_node_partition = allows_single_node_partition
69
+ self.non_compute_ops = non_compute_ops if non_compute_ops is not None else []
70
+ self.allowed_single_node_partition_ops = (
71
+ allowed_single_node_partition_ops
72
+ if allowed_single_node_partition_ops is not None
73
+ else []
74
+ )
75
+ self.dependency_viewer = _DependencyViewer(graph_module)
76
+
77
+ def __is_node_supported(self, node: Node) -> bool:
78
+ return (
79
+ self.operator_support.is_node_supported(dict(self.graph_module.named_modules()), node)
80
+ )
81
+
82
+ def propose_partitions(self) -> List[Partition]:
83
+ # partition_map is a mapping from partition id to a set of partition id's.
84
+ # The value set contains all the partition ids that can be reached by doing a
85
+ # DFS starting from the partition id in the key.
86
+ partition_map : Dict[int, Set] = collections.defaultdict(set)
87
+
88
+ # assumptions: nodes in candidate list is sorted in topological order
89
+ assignment: Dict[Node, int] = {} # mapping from node to partition_id
90
+ partitions_by_id: Dict[int, Partition] = {} # mapping from partition_id to partition
91
+ new_partition_id = itertools.count()
92
+
93
+ # try to merge partition other_id into partition self_id
94
+ # merge only happens if the end graph doesn't contain cyclic dependency
95
+ # returns `True` when merge happens, `False` otherwise.
96
+ def maybe_merge_partition(self_id: int, other_id: int):
97
+ # merged_nodes is the union of nodes in two partition to-be-merged
98
+ merged_nodes = copy(partitions_by_id[self_id].nodes)
99
+ merged_nodes.update(partitions_by_id[other_id].nodes)
100
+
101
+ def dfs_iter_find_cycle(all_user_nodes: List[Node]):
102
+ for user_node in all_user_nodes:
103
+ visited_partition_ids = set()
104
+
105
+ for path_node in self.dependency_viewer.downstreams_of(user_node):
106
+ # If any of the nodes in the dfs path of this node are in the merged_nodes
107
+ # list then there is a cycle in the graph.
108
+ if path_node in merged_nodes:
109
+ return True
110
+
111
+ # If any of the nodes in the dfs path of this node are in the assignment
112
+ # map then we have to make sure that the partitions that these nodes belong
113
+ # to do not form a cycle with the current partitions being merged. This means
114
+ # iterating through all the nodes in all the parititons that are traversed in
115
+ # the dfs path and checking if they are in the merged_nodes list.
116
+ if path_node in assignment:
117
+ partition_id = assignment[path_node]
118
+ # If the partition id has already been visited then we know that it doesn't
119
+ # form a cycle with the current partitions being merged.
120
+ if partition_id in visited_partition_ids:
121
+ continue
122
+ p_map = partition_map[partition_id]
123
+ if self_id in p_map or other_id in p_map:
124
+ return True
125
+
126
+ visited_partition_ids.add(partition_id)
127
+
128
+ return False
129
+
130
+ # check if merge would create cyclic dependency.
131
+ all_user_nodes = []
132
+ for node in merged_nodes:
133
+ for user_node in node.users:
134
+ if user_node not in merged_nodes:
135
+ all_user_nodes.append(user_node)
136
+
137
+ if dfs_iter_find_cycle(all_user_nodes):
138
+ # return false indicating cyclic dependency found and
139
+ # merge is aborted
140
+ return False
141
+
142
+ # no cyclic dependency found, move forward with the merge
143
+ # updating partition nodes
144
+ partitions_by_id[self_id].nodes = merged_nodes
145
+ # updating assignment map
146
+ for node in partitions_by_id[other_id].nodes:
147
+ assignment[node] = self_id
148
+ # delete other partition
149
+ del partitions_by_id[other_id]
150
+
151
+ partition_map[self_id] = partition_map[self_id].union(partition_map[other_id])
152
+ del partition_map[other_id]
153
+
154
+ return True
155
+
156
+ def merge_single_node(node: Node, id: Optional[int]):
157
+ def _update_partition_map(node: Node, id: int):
158
+ # Iterate through all the downstream nodes of this node and update the partition map
159
+ # to indicate that there is a path from the partition id of this node to the target
160
+ # partition id.
161
+ downstream_nodes = self.dependency_viewer.downstreams_of(node)
162
+ for curr_node in downstream_nodes:
163
+ target_id = assignment.get(curr_node, None)
164
+ if target_id is not None:
165
+ partition_map[id].add(target_id)
166
+
167
+ # Iterate through all the upstream nodes of this node and update the partition map
168
+ # to indicate that there is a path from the partition id of the upstream node to the
169
+ # current node's partition id.
170
+ upstream_nodes = self.dependency_viewer.upstreams_of(node)
171
+ for curr_node in upstream_nodes:
172
+ source_id = assignment.get(curr_node, None)
173
+ if source_id is not None:
174
+ partition_map[source_id].add(id)
175
+
176
+ if node in assignment:
177
+ partitions_by_id[assignment[node]].remove_node(node)
178
+
179
+ if id is None:
180
+ assignment.pop(node)
181
+ elif id not in partitions_by_id:
182
+ assignment[node] = id
183
+ partitions_by_id[id] = Partition(id=id, nodes=[node])
184
+ _update_partition_map(node, id)
185
+ else:
186
+ assignment[node] = id
187
+ partitions_by_id[id].add_node(node)
188
+ _update_partition_map(node, id)
189
+
190
+ logger.debug("Proposing partitions...")
191
+
192
+ for node in reversed(self.graph_module.graph.nodes):
193
+ # use Dict as an ordered set to ensure deterministic partitioning result, don't care value
194
+ merge_candidates: Dict[int, None] = {}
195
+
196
+ # Note a limited horizontal fusion is enabled:
197
+ # when `node` is not supported, the code below attempts to fuse consumer of `node`.
198
+ #
199
+ # I don't see a need to add a knob to disable horizontal fusion yet, we can short-cut
200
+ # the fusion by adding an `else` block here to skip horizontal fusion.
201
+ if self.__is_node_supported(node) and node not in assignment:
202
+ partition_id = next(new_partition_id)
203
+ merge_single_node(node, partition_id)
204
+ merge_candidates[partition_id] = None
205
+
206
+ # merge all possible partitions
207
+ for node in assignment:
208
+ merge_candidates[assignment[node]] = None
209
+
210
+ merge_candidates_list = list(merge_candidates.keys())
211
+ if len(merge_candidates_list) > 1:
212
+ self_id = merge_candidates_list[0]
213
+ for other_id in merge_candidates_list[1:]:
214
+ # note: merge partition `other_id` into partition `self_id` if
215
+ # it doesn't create cyclic dependency in the graph, otherwise,
216
+ # this is a no-op
217
+ maybe_merge_partition(self_id, other_id)
218
+
219
+ # post processing to re-assign "getitem" nodes into upstream partition
220
+ logger.debug("Reassigning getitem nodes to its producer node's partition...")
221
+ nodes_reassignment: Dict[Node, int] = {}
222
+ for node in self.graph_module.graph.nodes:
223
+ is_tuple_output = True
224
+ for user in node.users:
225
+ if user.op != "call_function" or \
226
+ _get_qualified_name(user.target) != "_operator.getitem": # type: ignore[arg-type]
227
+ is_tuple_output = False
228
+ break
229
+
230
+ # node has tuple outputs, re-assign all following getitem node into node's partition
231
+ if is_tuple_output:
232
+ id = assignment.get(node, None) # type: ignore[arg-type]
233
+ for user in node.users:
234
+ if assignment.get(user, None) != id: # type: ignore[arg-type]
235
+ nodes_reassignment[user] = id # type: ignore[assignment]
236
+ for node, id in nodes_reassignment.items():
237
+ merge_single_node(node, id)
238
+
239
+ # filter out single node partitions
240
+ if not self.allows_single_node_partition:
241
+ logger.debug("Filtering out single node partitions...")
242
+ default_non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"}
243
+ non_compute_ops = default_non_compute_ops.union(set(self.non_compute_ops))
244
+ partitions_to_remove: List[int] = []
245
+ for id, partition in partitions_by_id.items():
246
+ compute_node_count = 0
247
+ for node in partition.nodes:
248
+ if node.op == "call_function":
249
+ assert callable(node.target)
250
+ if _get_qualified_name(node.target) not in non_compute_ops:
251
+ compute_node_count += 1
252
+ if _get_qualified_name(node.target) in self.allowed_single_node_partition_ops:
253
+ compute_node_count += 1
254
+ if compute_node_count <= 1:
255
+ partitions_to_remove.append(id)
256
+ for id in partitions_to_remove:
257
+ del partitions_by_id[id]
258
+
259
+ logger.debug("Partitions proposed:")
260
+ for id, partition in partitions_by_id.items():
261
+ logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
262
+
263
+ return list(partitions_by_id.values())
264
+
265
+ def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
266
+ logger.debug("Fusing partitions...")
267
+ # fuse_by_partitions expects partitions in List[List[Node]]: [ [node0, node1], [node2, node3] ]
268
+ return fuse_by_partitions(self.graph_module, [list(partition.nodes) for partition in partitions])
269
+
270
+ # remove non-compute-ops that sits at the boundary of a partition.
271
+ def remove_bookend_non_compute_ops(self, partitions: List[Partition]):
272
+ non_compute_ops = set(self.non_compute_ops)
273
+
274
+ def is_non_compute_node(node: Node):
275
+ return node.op == "call_function" and \
276
+ _get_qualified_name(node.target) in non_compute_ops # type: ignore[arg-type]
277
+
278
+ # cache transparent nodes
279
+ transparent_input_nodes: Dict[Node, bool] = {}
280
+ transparent_output_nodes: Dict[Node, bool] = {}
281
+
282
+ def is_transparent_input_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
283
+ if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
284
+ return True
285
+ if node in transparent_input_nodes:
286
+ return transparent_input_nodes[node]
287
+ if is_non_compute_node(node):
288
+ for input_n in node.all_input_nodes:
289
+ if not is_transparent_input_node(input_n, partition, removed_nodes):
290
+ transparent_input_nodes[node] = False
291
+ return False
292
+ transparent_input_nodes[node] = True
293
+ return True
294
+ transparent_input_nodes[node] = False
295
+ return False
296
+
297
+ def is_transparent_output_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
298
+ if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
299
+ return True
300
+ if node in transparent_output_nodes:
301
+ return transparent_output_nodes[node]
302
+ if is_non_compute_node(node):
303
+ for output_n in node.users:
304
+ if not is_transparent_output_node(output_n, partition, removed_nodes):
305
+ transparent_output_nodes[node] = False
306
+ return False
307
+ transparent_output_nodes[node] = True
308
+ return True
309
+ transparent_output_nodes[node] = False
310
+ return False
311
+
312
+ for partition in partitions:
313
+ # Note it's ok to use `set` here, since we are only query if a node
314
+ # has been removed. We are NEVER going to iterate on nodes inside
315
+ # the set.
316
+ remove_node: Set[Node] = set()
317
+ for node in partition.nodes:
318
+ if is_non_compute_node(node) and \
319
+ (is_transparent_input_node(node, partition.nodes, remove_node) or
320
+ is_transparent_output_node(node, partition.nodes, remove_node)):
321
+ remove_node.add(node)
322
+
323
+ if len(remove_node) != 0:
324
+ partition.nodes = partition.nodes - remove_node
325
+
326
+ def partition_and_fuse(self) -> GraphModule:
327
+ partitions = self.propose_partitions()
328
+ fused_gm = self.fuse_partitions(partitions)
329
+ return fused_gm
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from collections import namedtuple
3
+ from typing import Optional
4
+
5
+ from torch.fx.graph_module import GraphModule
6
+ from torch.fx._compatibility import compatibility
7
+
8
+
9
+ __all__ = ['PassResult', 'PassBase']
10
+
11
+ @compatibility(is_backward_compatible=False)
12
+ class PassResult(namedtuple("PassResult", ["graph_module", "modified"])):
13
+ """
14
+ Result of a pass:
15
+ graph_module: The modified graph module
16
+ modified: A flag for if the pass has modified the graph module
17
+ """
18
+ def __new__(cls, graph_module, modified):
19
+ return super().__new__(cls, graph_module, modified)
20
+
21
+ @compatibility(is_backward_compatible=False)
22
+ class PassBase(abc.ABC):
23
+ """
24
+ Base interface for implementing passes.
25
+
26
+ It is required to implement the `call` function so that we can directly
27
+ pass instances of the Pass directly to the PassManager and call them as a
28
+ function.
29
+
30
+ We can directly pass an instance of a class implementing this interface into
31
+ the PassManager's `passes` attribute.
32
+ """
33
+
34
+ def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:
35
+ """
36
+ Runs the precondition check, the pass itself, and the postcondition check.
37
+ """
38
+
39
+ self.requires(graph_module)
40
+ res = self.call(graph_module)
41
+ self.ensures(graph_module)
42
+ return res
43
+
44
+ @abc.abstractmethod
45
+ def call(self, graph_module: GraphModule) -> Optional[PassResult]:
46
+ """
47
+ The pass that is run through the given graph module. To implement a
48
+ pass, it is required to implement this function.
49
+
50
+ Args:
51
+ graph_module: The graph module we will run a pass on
52
+ """
53
+ pass
54
+
55
+ def requires(self, graph_module: GraphModule) -> None: # noqa: B027
56
+ """
57
+ This function will be called before the pass is run and will check that
58
+ the given graph module contains the preconditions needed to run the
59
+ pass. It is not required to implement this function.
60
+
61
+ Args:
62
+ graph_module: The graph module we will run checks on
63
+ """
64
+ pass
65
+
66
+ def ensures(self, graph_module: GraphModule) -> None: # noqa: B027
67
+ """
68
+ This function will be called after the pass is run and will check that
69
+ the given graph module contains the postconditions needed to run the
70
+ pass. It is not required to implement this function.
71
+
72
+ Args:
73
+ graph_module: The graph module we will run checks on
74
+ """
75
+ pass
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import logging
3
+ from queue import Queue
4
+ from functools import wraps
5
+ from typing import Callable, Dict, List
6
+
7
+ import torch.nn as nn
8
+ from torch.fx.graph_module import GraphModule
9
+ from torch.fx._compatibility import compatibility
10
+ from torch.fx.passes.infra.pass_base import PassResult
11
+
12
+ logger = logging.getLogger(__name__)
13
+ logger.setLevel(logging.WARNING)
14
+
15
+ __all__ = ['pass_result_wrapper', 'this_before_that_pass_constraint', 'PassManager']
16
+
17
+ @compatibility(is_backward_compatible=False)
18
+ def pass_result_wrapper(fn: Callable) -> Callable:
19
+ """
20
+ Wrapper for passes which currently do not return a PassResult.
21
+ This wrapper makes them return a PassResult containing the modified object
22
+ and True for the "modified" flag.
23
+
24
+ Args:
25
+ fn (Callable[Module, Any])
26
+
27
+ Returns:
28
+ wrapped_fn (Callable[Module, PassResult])
29
+ """
30
+ if fn is None:
31
+ return None
32
+
33
+ @wraps(fn)
34
+ def wrapped_fn(gm):
35
+ res = fn(gm)
36
+ if res is None:
37
+ return PassResult(gm, True)
38
+ if isinstance(res, PassResult):
39
+ return res
40
+ elif isinstance(res, nn.Module):
41
+ return PassResult(res, True)
42
+
43
+ if not inspect.isfunction(fn):
44
+ wrapped_fn.__name__ = type(fn).__name__
45
+
46
+ return wrapped_fn
47
+
48
+ def _validate_pass_schedule_constraint(
49
+ constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
50
+ ) -> None:
51
+ for i, a in enumerate(passes):
52
+ for j, b in enumerate(passes[i + 1 :]):
53
+ if constraint(a, b):
54
+ continue
55
+ raise RuntimeError(
56
+ f"pass schedule constraint violated. Expected {a} before {b}"
57
+ f" but found {a} at index {i} and {b} at index{j} in pass"
58
+ f" list."
59
+ )
60
+
61
+ def _topological_sort_passes(
62
+ passes: List[Callable], constraints: List[Callable]
63
+ ) -> List[Callable]:
64
+ """
65
+ Args
66
+ passes: Passes that we are ordering
67
+ constraints: Constraints applied on these passes
68
+
69
+ Returns
70
+ A sorted list of callables and a boolean of if a circular dependency
71
+ existed
72
+ """
73
+ if len(constraints) == 0:
74
+ return passes
75
+
76
+ # Contruct a graph mapping nodes to a list of their users
77
+ graph: Dict[Callable, List[Callable]] = {p : [] for p in passes}
78
+ indegree_map: Dict[Callable, int] = dict.fromkeys(passes, 0)
79
+ candidates: Queue = Queue()
80
+ for a in passes:
81
+ for b in passes:
82
+ if a == b:
83
+ continue
84
+
85
+ for constraint in constraints:
86
+ if not constraint(a, b):
87
+ graph[b].append(a)
88
+ indegree_map[a] += 1
89
+
90
+ if indegree_map[a] == 0:
91
+ candidates.put(a)
92
+
93
+ visited: Dict[Callable, bool] = dict.fromkeys(passes, False)
94
+ sorted_passes: List[Callable] = []
95
+
96
+ while not candidates.empty():
97
+ p = candidates.get()
98
+ sorted_passes.append(p)
99
+ visited[p] = True
100
+
101
+ for n in graph[p]:
102
+ if not visited[n]:
103
+ indegree_map[n] -= 1
104
+ if indegree_map[n] == 0:
105
+ candidates.put(n)
106
+
107
+ # Check if there are unvisited nodes (aka cycles in the graph)
108
+ cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys()))
109
+ if len(cycle_passes) != 0:
110
+ error = f"Circular dependency detected within the following passes: {cycle_passes}"
111
+ raise RuntimeError(error)
112
+
113
+ return sorted_passes
114
+
115
+ @compatibility(is_backward_compatible=False)
116
+ def this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable:
117
+ """
118
+ Defines a partial order ('depends on' function) where `this` must occur
119
+ before `that`.
120
+
121
+ For example, the following pass list and constraint list would be invalid.
122
+ ```
123
+ passes = [pass_b, pass_a]
124
+
125
+ constraints = [
126
+ this_before_that_pass_constraint(pass_a, pass_b)
127
+ ]
128
+ ```
129
+
130
+ Args:
131
+ this (Callable): pass which should occur first
132
+ that (Callable): pass which should occur later
133
+
134
+ Returns:
135
+ depends_on (Callable[[Object, Object], bool]
136
+ """
137
+
138
+ def depends_on(a: Callable, b: Callable):
139
+ if a == that and b == this:
140
+ return False
141
+ return True
142
+
143
+ return depends_on
144
+
145
+
146
+ @compatibility(is_backward_compatible=False)
147
+ class PassManager:
148
+ """
149
+ Construct a PassManager.
150
+
151
+ Collects passes and constraints. This defines the pass schedule, manages
152
+ pass constraints and pass execution.
153
+
154
+ Args:
155
+ passes (Optional[List[Callable]]): List of passes. A pass is a
156
+ callable which modifies an object and returns a PassResult
157
+ constraint (Optional[List[Callable]]): List of constraints. A
158
+ constraint is a callable which takes two passes (A, B) and returns
159
+ True if A depends on B and False otherwise. See implementation of
160
+ `this_before_that_pass_constraint` for example.
161
+ steps (int): Max number of times we run the passes (default = 1).
162
+ run_checks_after_each_pass (bool): Whether to run checks and linting
163
+ after each pass
164
+ suppress_check_failures (bool): Whether to raise errors when running
165
+ checks
166
+ """
167
+
168
+ passes: List[Callable[[nn.Module], PassResult]]
169
+ constraints: List[Callable[[Callable, Callable], bool]]
170
+ _validated: bool = False
171
+ steps: int = 1
172
+
173
+ def __init__(
174
+ self,
175
+ passes=None,
176
+ constraints=None,
177
+ steps=None,
178
+ run_checks_after_each_pass: bool = False,
179
+ suppress_check_failures: bool = False,
180
+ ):
181
+ self.passes = passes or []
182
+ self.constraints = constraints or []
183
+ if steps:
184
+ self.steps = steps
185
+
186
+ self.run_checks_after_each_pass = run_checks_after_each_pass
187
+ self.suppress_check_failures = suppress_check_failures
188
+
189
+ def add_pass(self, _pass: Callable):
190
+ """
191
+ Adds a pass into the current list of passes.
192
+ """
193
+ self.passes.append(_pass)
194
+ self._validated = False
195
+
196
+ def add_constraint(self, constraint: Callable):
197
+ """
198
+ Adds a constraint into the current list of constraints.
199
+ """
200
+ self.constraints.append(constraint)
201
+ self._validated = False
202
+
203
+ def validate_constraints(self):
204
+ """
205
+ Validates that current pass schedule defined by `self.passes` is valid
206
+ according to all constraints in `self.constraints`
207
+ """
208
+ if self._validated:
209
+ return
210
+ for constraint in self.constraints:
211
+ _validate_pass_schedule_constraint(constraint, self.passes)
212
+ self._validated = True
213
+
214
+ def solve_constraints(self):
215
+ """
216
+ Finds a valid traversal order based on the given constraints and orders
217
+ the passes based on this order.
218
+
219
+ If a circular dependency exists between the constraints and steps = 1,
220
+ then we will raise an error because if steps != 1 this means that we
221
+ will re-run the passes, allowing for circular dependencies.
222
+ """
223
+ self.passes = _topological_sort_passes(self.passes, self.constraints)
224
+ self._validated = True
225
+
226
+ def add_checks(self, check: Callable) -> None:
227
+ """
228
+ Adds a function which takes runs various checks on a given graph module.
229
+ This function is run before and after each pass if the
230
+ `run_checks_after_each_pass` flag is enabled.
231
+ """
232
+ sig = inspect.signature(check)
233
+
234
+ if len(list(sig.parameters.values())) != 1:
235
+ raise TypeError("PassManager check function should only take in one variable, a module")
236
+
237
+ setattr(self, "check", check) # noqa: B010
238
+
239
+ def check(self, module: nn.Module) -> None:
240
+ pass
241
+
242
+ def __call__(self, module: nn.Module) -> PassResult:
243
+ """
244
+ Runs a list of passes in the order based on `self.passes` on the given
245
+ graph module. Each time a pass is run, checks and linting will be run on
246
+ the graph module if `run_checks_after_each_pass` is set.
247
+
248
+ If the module is a graph module, we will run the list of passes until
249
+ the graph stops changing, or until `steps` number of times.
250
+ """
251
+ # Order the passes based on the constraints
252
+ if not self._validated:
253
+ self.solve_constraints()
254
+
255
+ # Check graph invariants
256
+ self.check(module)
257
+
258
+ # Run the set of passes `steps` number of times or until the graph stops
259
+ # changing
260
+ overall_modified = False
261
+ for _ in range(self.steps):
262
+ modified = False
263
+
264
+ # Run the set of passes on the graph module
265
+ for i, fn in enumerate(self.passes):
266
+ fn_name = fn.__name__ if inspect.isfunction(fn) else type(fn).__name__
267
+ logger.debug("Running pass '%s'", fn_name)
268
+
269
+ try:
270
+ res = fn(module)
271
+
272
+ if not isinstance(res, PassResult) and not hasattr(
273
+ res, "graph_module"
274
+ ):
275
+ raise TypeError(
276
+ f"The result of the pass {fn_name} should be type PassResult."
277
+ + "Please wrap it with pass_result_wrapper()"
278
+ )
279
+ module = res.graph_module
280
+ modified = modified or res.modified
281
+
282
+ if isinstance(module, GraphModule):
283
+ logger.debug("Graph after pass '%s': %s", fn_name, module.graph)
284
+ module.recompile()
285
+
286
+ # Check graph invariants
287
+ if self.run_checks_after_each_pass:
288
+ self.check(module)
289
+
290
+ except Exception as e:
291
+ prev_pass_names = [
292
+ p.__name__ if inspect.isfunction(p) else type(p).__name__
293
+ for p in self.passes[:i]
294
+ ]
295
+ msg = f"An error occurred when running the '{fn_name}' pass after the following passes: {prev_pass_names}"
296
+ raise Exception(msg) from e
297
+
298
+ # If the graph no longer changes, then we can stop running these passes
299
+ overall_modified = overall_modified or modified
300
+ if not modified:
301
+ break
302
+
303
+ return PassResult(module, overall_modified)
llmeval-env/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dataclasses import dataclass
3
+ from typing import Any, Callable, Dict, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.fx
7
+
8
+ from torch.fx._compatibility import compatibility
9
+ from torch.fx.node import map_arg
10
+
11
+ from .shape_prop import ShapeProp
12
+ from .split_utils import split_by_tags
13
+ from .tools_common import (
14
+ CALLABLE_NODE_OPS,
15
+ FxNetAccFusionsFinder,
16
+ Names,
17
+ NodeList,
18
+ NodeSet,
19
+ TensorOrTensors,
20
+ Tensors,
21
+ )
22
+
23
+ __all__ = [
24
+ "FxNetMinimizerBadModuleError",
25
+ "FxNetMinimizerRunFuncError",
26
+ "FxNetMinimizerResultMismatchError",
27
+ ]
28
+
29
+ _LOGGER = logging.getLogger(__name__)
30
+
31
+
32
+ @compatibility(is_backward_compatible=False)
33
+ class FxNetMinimizerBadModuleError(Exception):
34
+ """
35
+ Raised if failed to split out a minimize module
36
+ """
37
+
38
+ pass
39
+
40
+
41
+ @compatibility(is_backward_compatible=False)
42
+ class FxNetMinimizerRunFuncError(Exception):
43
+ """
44
+ Raised if error occurs during run_a or run_b functions
45
+ """
46
+
47
+ pass
48
+
49
+
50
+ @compatibility(is_backward_compatible=False)
51
+ class FxNetMinimizerResultMismatchError(Exception):
52
+ """
53
+ Raised if comparing function thinks the results are mismatching.
54
+ """
55
+
56
+ pass
57
+
58
+
59
+ @dataclass
60
+ class _MinimizerSettingBase:
61
+ """
62
+ Args:
63
+ `accumulate_error`: Instead of using a's input for both converted module to verify
64
+ , use the previous outputs of each converted module as input to accumulate the
65
+ errors.
66
+
67
+ `traverse_method`: "sequential" or "binary" or "accumulate"
68
+ Determine the way of traverse the nodes in FX module.
69
+
70
+ `find_all`: Minimizer will go through the entire model and return all problematic nodes.
71
+
72
+ `return_intermediate`: If true, when using `run_nodes()` function to run the
73
+ model, intermediate results of all the ops will be returned as output.
74
+ """
75
+
76
+ accumulate_error: bool = False
77
+ traverse_method: str = "sequential"
78
+ find_all: bool = False
79
+ return_intermediate: bool = False
80
+
81
+ def __str__(self):
82
+ settings_str = "FX Minimizer Settings:\n"
83
+
84
+ for k, v in vars(self).items():
85
+ settings_str += f"\t{k}: {v}\n"
86
+
87
+ return settings_str
88
+
89
+
90
+ class _MinimizerBase:
91
+ """
92
+ This class is used to automatically find problematic nodes in a model. It takes a FX
93
+ graphmodule and generate some submodules while traverse the graph. Then two functions
94
+ `run_a` and `run_b` will be used to run the same submodule and a function `compare_fn`
95
+ will be used to compare the results.
96
+
97
+ Currently we provides two ways to traverse the graph and generate submodules.
98
+ 1. Sequential traversal: this will traverse the graph node by node and generate
99
+ one submodule with one sigle node.
100
+ 2. Binary searching: this will do a binary search style traversal on the graph.
101
+
102
+ For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP.
103
+ """
104
+
105
+ def __init__(
106
+ self,
107
+ module: torch.fx.GraphModule,
108
+ sample_input: Tensors,
109
+ compare_fn: Callable[
110
+ [TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool]
111
+ ],
112
+ settings: _MinimizerSettingBase,
113
+ module_exporter: Optional[
114
+ Callable[
115
+ [List[torch.Tensor], torch.fx.GraphModule, str],
116
+ None
117
+ ]
118
+ ] = None,
119
+ ):
120
+ assert isinstance(module, torch.fx.GraphModule)
121
+
122
+ self.module = module
123
+ self.sample_input = sample_input
124
+ self.compare_fn = compare_fn
125
+ self.module_exporter = module_exporter
126
+ self.settings = settings
127
+
128
+ # Stores outputs of run_a function
129
+ self.a_outputs: Dict[str, Any] = {}
130
+
131
+ # Stores outputs of run_b function
132
+ self.b_outputs: Dict[str, Any] = {}
133
+
134
+ # Stores the results of compare_fn
135
+ self.results: Dict[Any, Any] = {}
136
+
137
+ # Stores the report for the runs
138
+ self.reports: List[List[str]] = []
139
+
140
+ # Current iteration
141
+ self.iteration: int = 0
142
+
143
+ callable_nodes = {
144
+ node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS
145
+ }
146
+ ShapeProp(self.module).propagate(*self.sample_input)
147
+ self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)()
148
+
149
+ # Check if number of input in sample_input matches the number of placeholders
150
+ placeholders = [
151
+ node.name for node in self.module.graph.nodes if node.op == "placeholder"
152
+ ]
153
+ assert len(placeholders) == len(self.sample_input)
154
+
155
+ # Store sample_input
156
+ for i, name in enumerate(placeholders):
157
+ self.a_outputs[name] = sample_input[i]
158
+ self.b_outputs[name] = sample_input[i]
159
+
160
+ def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
161
+ """
162
+ Run `mod` with `inputs` and generate output. The output will be compared with
163
+ output of run_b().
164
+ """
165
+ raise RuntimeError("run_a() is not implemented.")
166
+
167
+ def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
168
+ """
169
+ Run `mod` with `inputs` and generate output. The output will be compared with
170
+ output of run_a().
171
+ """
172
+ raise RuntimeError("run_b() is not implemented.")
173
+
174
+ def _store_outputs(
175
+ self,
176
+ a_result: TensorOrTensors,
177
+ b_result: TensorOrTensors,
178
+ submodule: torch.fx.GraphModule,
179
+ ):
180
+ """
181
+ Store the outputs of self.run_a() and self.run_b() into self.a_outputs and
182
+ self.b_outputs, so that we can use them when execute preceding nodes that
183
+ use those outputs as inputs.
184
+
185
+ Args:
186
+ a_result: Output of self.run_a(). Could be a tensor or tensors.
187
+ b_result: Output of self.run_b(). Could be a tensor or tensors.
188
+ submodule: The module that generates a_result and b_result.
189
+ """
190
+ output_node = next(
191
+ node for node in submodule.graph.nodes if node.op == "output"
192
+ )
193
+
194
+ # Only one output
195
+ if isinstance(output_node.args[0], torch.fx.Node):
196
+ self.a_outputs[output_node.args[0].name] = a_result
197
+ self.b_outputs[output_node.args[0].name] = b_result
198
+ # Multiple outputs
199
+ else:
200
+ for i, arg in enumerate(output_node.args[0]):
201
+ self.a_outputs[arg.name] = a_result[i]
202
+ self.b_outputs[arg.name] = b_result[i]
203
+
204
+ def _get_submod_inputs(
205
+ self, main_module: torch.fx.GraphModule, submod_path: str
206
+ ) -> Tuple[Tensors, Tensors]:
207
+ """
208
+ Try get submodule inputs from stored outputs. If not found then use
209
+ torch_glow.get_submod_inputs to get the inputs.
210
+
211
+ If accumulate_error is False, use a_input for run_a() and run_b()
212
+ otherwise use a_input for run_a and b_input for run_b.
213
+
214
+ Args:
215
+ main_module: Top-levlel fx module.
216
+ submod_path: Path to the submodule we want to run and compare results.
217
+
218
+ Returns:
219
+ a_input: List of tensor(s) that will be used by run_a() as submodule inputs.
220
+ b_input: List of tensor(s) that will be used by run_b() as submodule inputs.
221
+ """
222
+ a_input = []
223
+ b_input = []
224
+ submodule = getattr(main_module, submod_path)
225
+ placeholders = [
226
+ node.name for node in submodule.graph.nodes if node.op == "placeholder"
227
+ ]
228
+
229
+ # If all placeholder can be found in stored outputs, use stored
230
+ # outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs`
231
+ # to get the inputs.
232
+ if set(placeholders) <= self.a_outputs.keys():
233
+ for name in placeholders:
234
+ a_input.append(self.a_outputs[name])
235
+ b_input.append(self.b_outputs[name])
236
+ else:
237
+ if self.settings.accumulate_error:
238
+ print(f"Can't find previous stored outputs named {placeholders}!")
239
+
240
+ def get_inputs(self: torch.nn.Module, inputs: Any):
241
+ nonlocal a_input
242
+ a_input = inputs
243
+
244
+ # Use forward hook to get the inputs to the submodule
245
+ handle = submodule.register_forward_pre_hook(get_inputs)
246
+ main_module(*self.sample_input)
247
+ handle.remove()
248
+
249
+ b_input = a_input
250
+
251
+ if not self.settings.accumulate_error:
252
+ return a_input, a_input
253
+
254
+ return a_input, b_input
255
+
256
+ def _tag_nodes(self, selected_nodes: NodeSet):
257
+ """
258
+ Tag selected nodes with tag "minimize". Nodes with the same tags will
259
+ be split to the same submodule afterwards.
260
+
261
+ Args:
262
+ selected_nodes: Nodes that we want to minimize. We will tag those nodes
263
+ with "minimize", all preceding nodes with "main_0" and all following
264
+ nodes with "main_1".
265
+ """
266
+ for node in self.module.graph.nodes:
267
+ if node.op not in CALLABLE_NODE_OPS:
268
+ continue
269
+
270
+ if node in selected_nodes:
271
+ node.tag = "minimize"
272
+ elif any(
273
+ n.tag in {"minimize", "main_1"}
274
+ for n in node.all_input_nodes
275
+ if n.op in CALLABLE_NODE_OPS
276
+ ):
277
+ node.tag = "main_1"
278
+ else:
279
+ node.tag = "main_0"
280
+
281
+ def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]:
282
+ """
283
+ Split self.module so that one submodule consists of `nodes` and only `nodes`.
284
+
285
+ Args:
286
+ nodes: Nodes that we want to include in the minimize submodule.
287
+
288
+ Returns:
289
+ split_module (torch.fx.GraphModule): the module after split.
290
+ submodule_name (str): the name of the submodule that consists of `nodes`.
291
+ """
292
+ # Color provided nodes
293
+ self._tag_nodes(nodes)
294
+
295
+ # Split module based on coloring
296
+ split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"])
297
+
298
+ # Find submodule containing colored nodes
299
+ submodule_name: str = ""
300
+ for child_name, _ in split_module.named_children():
301
+ # Skip submodules we're not interested in at the moment
302
+ if "minimize" not in child_name:
303
+ continue
304
+
305
+ if submodule_name == "":
306
+ submodule_name = child_name
307
+ else:
308
+ raise FxNetMinimizerBadModuleError(
309
+ f"Expected only one minimize submodule with nodes {nodes}"
310
+ )
311
+
312
+ if submodule_name == "":
313
+ raise FxNetMinimizerBadModuleError(
314
+ f"Minimize submodule was not found with nodes {nodes}"
315
+ )
316
+
317
+ return split_module, submodule_name
318
+
319
+ def _run_and_compare(
320
+ self, split_module: torch.fx.GraphModule, submod_name: str, output_names: Names
321
+ ):
322
+ """
323
+ Run the submodule in `split_module` that has name `submod_name`
324
+ using `self.run_a` and `self.run_b` and compare their results.
325
+
326
+ Args:
327
+ split_module: Main module that contains the minimize submodule.
328
+ submod_name: Name of the minimize submodule.
329
+ output_names: Names of the node we want to output. If None, we
330
+ will use the original output.
331
+ """
332
+ submodule = getattr(split_module, submod_name)
333
+ a_input, b_input = self._get_submod_inputs(split_module, submod_name)
334
+
335
+ if len(self.reports) == 0:
336
+ self.reports.append([])
337
+ self.iteration = 1
338
+
339
+ report = self.reports[self.iteration - 1]
340
+ report.append("Run and compare ...")
341
+
342
+ if output_names:
343
+ output_nodes: NodeList = []
344
+ for node in submodule.graph.nodes:
345
+ if node.op == "output":
346
+ submodule.graph.erase_node(node)
347
+
348
+ if node.name in output_names:
349
+ output_nodes.append(node)
350
+
351
+ submodule.graph.output(
352
+ output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes)
353
+ )
354
+ submodule.graph.lint()
355
+ submodule.recompile()
356
+
357
+ # Use name of args in output node as key to store comparison result
358
+ for node in submodule.graph.nodes:
359
+ if node.op == "output":
360
+ result_key = map_arg(node.args, lambda x: x.name)
361
+
362
+ try:
363
+ a_result = self.run_a(submodule, a_input)
364
+ b_result = self.run_b(submodule, b_input)
365
+ self._store_outputs(a_result, b_result, submodule)
366
+ except Exception as e:
367
+ report.append(f"Exception raised when running {submod_name}: {e}")
368
+ raise FxNetMinimizerRunFuncError( # noqa: TRY200
369
+ f"Exception raised when running {submod_name}: {e}"
370
+ )
371
+
372
+ # Compare results
373
+ names: Names = output_names
374
+ if output_names is None:
375
+ names = [str(v) for v in result_key] # type: ignore[possibly-undefined]
376
+
377
+ numeric_result, bool_result = self.compare_fn(a_result, b_result, names)
378
+
379
+ self.results[result_key] = numeric_result # type: ignore[possibly-undefined]
380
+ report.append(f"Numerical accuracy = {numeric_result}")
381
+ if not bool_result:
382
+ report.append(f"Result mismatch for {result_key}")
383
+ if self.module_exporter:
384
+ self.module_exporter(
385
+ List[torch.Tensor](a_input), submodule, str(result_key[0]) + "_cpu",
386
+ )
387
+ self.module_exporter(
388
+ List[torch.Tensor](b_input), submodule, str(result_key[0]) + "_acc",
389
+ )
390
+ raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
391
+
392
+ def _binary_search_impl(
393
+ self, all_nodes: NodeList, start_idx: int, end_idx: int
394
+ ) -> NodeSet:
395
+ """
396
+ Recursive binary search implementation.
397
+ """
398
+ nodes: NodeList = all_nodes[start_idx:end_idx]
399
+
400
+ report: List[str] = []
401
+ self.reports.append(report)
402
+ self.iteration += 1
403
+ report.append(f"Binary search iteration {self.iteration}.")
404
+ report.append(
405
+ f"From node index {start_idx} to {end_idx-1}. "
406
+ f"Size of the interested node list is {len(nodes)}"
407
+ )
408
+
409
+ cur_nodes: NodeSet = set(nodes)
410
+
411
+ for node in nodes:
412
+ if node in self.fusions:
413
+ cur_nodes.update(self.fusions[node])
414
+
415
+ try:
416
+ split_module, submod_name = self._build_submodule(cur_nodes)
417
+ self._run_and_compare(split_module, submod_name, [])
418
+ except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
419
+
420
+ if len(nodes) == 1:
421
+ report.append(
422
+ f"This is the last node in the sub-module. "
423
+ f"Search in the current branch is successful with culprit = {cur_nodes}."
424
+ )
425
+ self.print_report(report)
426
+ return cur_nodes
427
+
428
+ report.append(
429
+ "Proceed to split and lower the halves of the current "
430
+ "sub-module individually."
431
+ )
432
+ self.print_report(report)
433
+
434
+ mid = len(nodes) // 2
435
+ culprits = self._binary_search_impl(all_nodes, start_idx, start_idx + mid)
436
+
437
+ if len(culprits) != 0 and not self.settings.find_all:
438
+ return culprits
439
+
440
+ culprits = self._binary_search_impl(all_nodes, start_idx + mid, end_idx)
441
+
442
+ if len(culprits) == 0:
443
+ report.append(
444
+ f"Further split and lowering found no errors. "
445
+ f"Unable to minimize the submodule with list of nodes: {nodes}"
446
+ )
447
+ self.print_report(report)
448
+
449
+ return culprits
450
+ else:
451
+ report.append("No discrepancy found.")
452
+ self.print_report(report)
453
+ return set()
454
+
455
+ def _binary_traverse(self, nodes: NodeList) -> NodeSet:
456
+ """
457
+ Binary search on `nodes` for culprit.
458
+ """
459
+ return self._binary_search_impl(nodes, 0, len(nodes))
460
+
461
+ def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
462
+ """
463
+ Traverse `nodes` one by one and determine if any of them is a culprit.
464
+ """
465
+ culprits: NodeSet = set()
466
+
467
+ for node in nodes:
468
+ report: List[str] = []
469
+ self.reports.append(report)
470
+ self.iteration += 1
471
+ report.append(f"Sequential traverse iteration {self.iteration}.")
472
+ report.append(f"Visit node: {node.name}")
473
+
474
+ _LOGGER.info("Visit node: %s", node.name)
475
+ cur_nodes: NodeSet = {node}
476
+
477
+ if node in self.fusions:
478
+ cur_nodes = self.fusions[node]
479
+
480
+ try:
481
+ split_module, submod_name = self._build_submodule(cur_nodes)
482
+ self._run_and_compare(split_module, submod_name, [node.name])
483
+ self.print_report(report)
484
+ except (FxNetMinimizerResultMismatchError):
485
+ culprits.add(node)
486
+ report.append(f"Found culprit from numeric error: {node}")
487
+ self.print_report(report)
488
+ if not self.settings.find_all:
489
+ return culprits
490
+ except (FxNetMinimizerRunFuncError):
491
+ culprits.update(cur_nodes)
492
+ report.append(f"Found culprit from run error: {node}")
493
+ self.print_report(report)
494
+ if not self.settings.find_all:
495
+ return culprits
496
+
497
+ return culprits
498
+
499
+ def _defined_traverse(self, nodes: NodeList) -> NodeSet:
500
+ """
501
+ run user defined `nodes` and determine if it is a culprit.
502
+ """
503
+ culprits: NodeSet = set()
504
+
505
+ first_node_name = nodes[0].name
506
+ output_node_name = nodes[-1].name
507
+ report = [f"Defined graph from {first_node_name} to {output_node_name}"]
508
+ cur_nodes: NodeSet = set(nodes)
509
+ try:
510
+ split_module, submod_name = self._build_submodule(cur_nodes)
511
+ self._run_and_compare(split_module, submod_name, [output_node_name])
512
+ self.print_report(report)
513
+ except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
514
+ report.append(f"Found culprit {cur_nodes}")
515
+ self.print_report(report)
516
+ return culprits
517
+
518
+ return culprits
519
+
520
+ def _accumulate_traverse(self, nodes: NodeList) -> NodeSet:
521
+ culprits: NodeSet = set()
522
+ nodes_to_run: NodeSet = set()
523
+
524
+ # find_all is not supported for accumulate traversal because all the
525
+ # ops run on NNPI. So we return after the first op that raises error.
526
+ if self.settings.find_all:
527
+ print("'Find All' mode is not supported in accumulate traversal.")
528
+ return culprits
529
+
530
+ for node in nodes:
531
+ report: List[str] = []
532
+ self.reports.append(report)
533
+ self.iteration += 1
534
+ report.append(f"Accumulate traverse iteration {self.iteration}.")
535
+
536
+ nodes_to_run.add(node)
537
+
538
+ node_name = node.name
539
+ if node_name is not None and isinstance(node_name, tuple):
540
+ node_name = node_name[0]
541
+ assert node_name is not None and isinstance(
542
+ node_name, str
543
+ ), f"minimize: node_name: {node_name}"
544
+
545
+ report.append(f"Add node: {node_name}")
546
+
547
+ try:
548
+ split_module, submod_name = self._build_submodule(nodes_to_run)
549
+ self._run_and_compare(split_module, submod_name, [node_name])
550
+ self.print_report(report)
551
+ except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
552
+ culprits.add(node)
553
+ report.append(f"Found culprit {node}")
554
+ self.print_report(report)
555
+ return culprits
556
+
557
+ return culprits
558
+
559
+ def _skip_traverse_impl(self, all_nodes: NodeList, start_idx: int, end_idx: int) -> NodeSet:
560
+ """
561
+ Skip certain nodes in graph based on settings
562
+ """
563
+ culprits: NodeSet = set()
564
+ nodes: NodeList = all_nodes[start_idx:end_idx]
565
+
566
+ report: List[str] = []
567
+ self.reports.append(report)
568
+ self.iteration += 1
569
+ report.append(f" Nodes block {self.iteration}.")
570
+ report.append(
571
+ f"From node index {start_idx} to {end_idx-1}. "
572
+ f"Size of the interested node list is {len(nodes)}"
573
+ )
574
+
575
+ cur_nodes: NodeSet = set(nodes)
576
+
577
+ for node in nodes:
578
+ if node in self.fusions:
579
+ cur_nodes.update(self.fusions[node])
580
+
581
+ try:
582
+ split_module, submod_name = self._build_submodule(cur_nodes)
583
+ self._run_and_compare(split_module, submod_name, [])
584
+ except (FxNetMinimizerResultMismatchError):
585
+ culprits.update(cur_nodes)
586
+ report.append(f"Found culprit from numeric error: {cur_nodes}")
587
+ self.print_report(report)
588
+ return culprits
589
+ except (FxNetMinimizerRunFuncError):
590
+ culprits.update(cur_nodes)
591
+ report.append(f"Found culprit from run error: {node}")
592
+ self.print_report(report)
593
+ return culprits
594
+ else:
595
+ report.append("No discrepancy found.")
596
+ self.print_report(report)
597
+ return set()
598
+
599
+
600
+ def _skip_traverse(self, all_nodes: NodeList, skip_nodes: List) -> NodeSet:
601
+ """
602
+ Skip certain nodes in graph based on settings
603
+ """
604
+ start_idx = 0
605
+ num_nodes = len(all_nodes)
606
+ idx = 0
607
+ culprits = set()
608
+ while idx < num_nodes:
609
+ node = all_nodes[idx]
610
+ if (node.name in skip_nodes): # skip the node
611
+ if idx > start_idx:
612
+ culprits = self._skip_traverse_impl(all_nodes, start_idx, idx)
613
+ start_idx = idx + 1
614
+ elif idx == num_nodes - 1 and start_idx <= idx: # last node
615
+ culprits = self._skip_traverse_impl(all_nodes, start_idx, idx + 1)
616
+ idx += 1
617
+
618
+ return culprits
619
+
620
+
621
+
622
+ def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:
623
+ """
624
+ Collect nodes in the model that between nodes with name of `start` and `end`.
625
+ These two nodes are also included.
626
+ """
627
+ nodes: NodeList = []
628
+ add_node = start is None
629
+
630
+ for node in self.module.graph.nodes:
631
+ if node.op not in CALLABLE_NODE_OPS:
632
+ continue
633
+
634
+ if node.name == start:
635
+ add_node = True
636
+
637
+ if add_node:
638
+ nodes.append(node)
639
+
640
+ if node.name == end:
641
+ break
642
+
643
+ return nodes
644
+
645
+ def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None):
646
+ """
647
+ Run part of the model from `start` node to `end` node. If `start` is None
648
+ then we start from the beginning of the model. If `end` is None then we
649
+ stop at the end of the model.
650
+
651
+ Args:
652
+ start: The name of the node which is the first node of the submodule
653
+ we want to run. If set to None, then we'll start with the first
654
+ node of the model.
655
+ end: The name of the node which is the last node of the submodule we
656
+ want to run. If set to None, we'll end with the last node of the
657
+ model.
658
+ """
659
+ nodes = self._collect_nodes(start, end)
660
+ cur_nodes = set(nodes)
661
+
662
+ for node in nodes:
663
+ if node in self.fusions:
664
+ cur_nodes.update(self.fusions[node])
665
+
666
+ output_names = []
667
+ if self.settings.return_intermediate:
668
+ output_names = [node.name for node in nodes]
669
+
670
+ try:
671
+ split_module, submod_name = self._build_submodule(cur_nodes)
672
+ self._run_and_compare(split_module, submod_name, output_names)
673
+ except (
674
+ FxNetMinimizerRunFuncError,
675
+ FxNetMinimizerResultMismatchError,
676
+ ) as e:
677
+ print(e)
678
+
679
+ def print_report(self, report: List[str]):
680
+ for i in range(len(report)):
681
+ if i > 0:
682
+ print(" . " + report[i])
683
+ else:
684
+ print(report[i])
685
+
686
+ def print_reports(self):
687
+ for report in self.reports:
688
+ self.print_report(report)
689
+
690
+ def minimize(
691
+ self, start: Optional[str] = None, end: Optional[str] = None, skip_nodes: Optional[List] = None,
692
+ ) -> NodeSet:
693
+ """
694
+ Minimizing the model from node with name `start` to node with name `end` base
695
+ on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or
696
+ FxNetMinimizerResultMismatchError errors.
697
+
698
+ Args:
699
+ start: The name of the node where we want to start minimizing. If set
700
+ to None, then we'll start with the first node of the model.
701
+ end: The name of the node where we want to terminate minimizing. If
702
+ set to None, we'll end with the last node of the model.
703
+
704
+ Returns:
705
+ nodes: A list of nodes that causes FxNetMinimizerRunFuncError or
706
+ FxNetMinimizerResultMismatchError errors during minimizing.
707
+ """
708
+
709
+ print(self.settings)
710
+ print(self.module.graph)
711
+
712
+ nodes = self._collect_nodes(start, end)
713
+
714
+ if self.settings.traverse_method == "sequential":
715
+ return self._sequential_traverse(nodes)
716
+
717
+ if self.settings.traverse_method == "binary":
718
+ return self._binary_traverse(nodes)
719
+
720
+ if self.settings.traverse_method == "accumulate":
721
+ return self._accumulate_traverse(nodes)
722
+
723
+ if self.settings.traverse_method == "skip":
724
+ if (skip_nodes is None):
725
+ raise RuntimeError("'skip_nodes' can't be None when 'traverse_method' is 'skip'.")
726
+ return self._skip_traverse(nodes, skip_nodes)
727
+
728
+ if self.settings.traverse_method == "defined":
729
+ return self._defined_traverse(nodes)
730
+
731
+ raise RuntimeError(f"Unknown traverse method {self.settings.traverse_method}!")