Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_config.py +42 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py +58 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py +1078 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py +289 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/debug.py +31 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py +914 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py +171 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py +268 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py +557 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py +1281 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py +1040 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py +348 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py +52 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py +29 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/normalize.py +162 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/optimization.py +405 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py +317 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py +924 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/recording.py +453 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py +16 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py +121 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py +111 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/sym_node.py +1145 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/symbolic_shapes.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py +4 -0
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc
ADDED
Binary file (1.21 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc
ADDED
Binary file (33.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc
ADDED
Binary file (823 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc
ADDED
Binary file (216 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc
ADDED
Binary file (54.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc
ADDED
Binary file (20 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc
ADDED
Binary file (25.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc
ADDED
Binary file (14.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc
ADDED
Binary file (3.77 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc
ADDED
Binary file (2.34 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc
ADDED
Binary file (1.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc
ADDED
Binary file (29.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc
ADDED
Binary file (1.23 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc
ADDED
Binary file (4.52 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc
ADDED
Binary file (9.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc
ADDED
Binary file (26.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc
ADDED
Binary file (9.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc
ADDED
Binary file (26.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_config.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
# [@compile_ignored: debug] Uses z3 for validating the guard optimizations transformations.
|
5 |
+
translation_validation = (
|
6 |
+
os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION", "0") == "1"
|
7 |
+
)
|
8 |
+
# Timeout (in milliseconds) for z3 finding a solution.
|
9 |
+
# [@compile_ignored: debug]
|
10 |
+
translation_validation_timeout = int(
|
11 |
+
os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION_TIMEOUT", "600000")
|
12 |
+
)
|
13 |
+
# Disables bisection for translation validation.
|
14 |
+
#
|
15 |
+
# Translation validation bisection is enabled by default, if translation validation
|
16 |
+
# is also enabled. This should help finding guard simplification issues. However,
|
17 |
+
# since validation uses Z3 for bisecting, it might take a lot of time.
|
18 |
+
#
|
19 |
+
# Set this configuration option so as to avoid bisecting.
|
20 |
+
# [@compile_ignored: debug]
|
21 |
+
translation_validation_no_bisect = (
|
22 |
+
os.environ.get("TORCHDYNAMO_TRANSLATION_NO_BISECT", "0") == "1"
|
23 |
+
)
|
24 |
+
# Checks whether replaying ShapeEnv events on a freshly constructed one yields
|
25 |
+
# the a ShapeEnv with the same state. This should be used only in testing.
|
26 |
+
check_shape_env_recorded_events = False
|
27 |
+
|
28 |
+
|
29 |
+
# [@compile_ignored: debug] Show a warning for every specialization
|
30 |
+
print_specializations = False
|
31 |
+
|
32 |
+
# wraps (un)equalities with 'Not' class after recording the correct expression
|
33 |
+
# in the FX graph. This should incorrectly construct the divisible and replacement
|
34 |
+
# lists, and incorrectly issue guards.
|
35 |
+
inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY = False
|
36 |
+
|
37 |
+
# [@compile_ignored: debug] Validate that ShapeEnv's version key is updated correctly
|
38 |
+
validate_shape_env_verison_key = False
|
39 |
+
|
40 |
+
from torch.utils._config_module import install_config_module
|
41 |
+
|
42 |
+
install_config_module(sys.modules[__name__])
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Type
|
2 |
+
|
3 |
+
__all__ = ["SymDispatchMode", "handle_sym_dispatch", "sym_function_mode"]
|
4 |
+
|
5 |
+
SYM_FUNCTION_MODE = None
|
6 |
+
|
7 |
+
|
8 |
+
# SymDispatchMode gets invoked whenever an operation is processed on
|
9 |
+
# a PySymInt. When this occurs, you get called at __sym_dispatch__
|
10 |
+
# with the operation in question. This is symmetric to TorchDispatchMode
|
11 |
+
# but with some caveats:
|
12 |
+
#
|
13 |
+
# - In TorchDispatchMode, you get the same arguments as what a user
|
14 |
+
# invoked your API with; e.g., if you call torch.ops.aten.foo(a, b),
|
15 |
+
# you get (a, b) as args to your call. In SymDispatchMode, if
|
16 |
+
# you call a + b (where a and b are SymInts), you will get
|
17 |
+
# (a.node, b.node) as your args (these are PySymInts)
|
18 |
+
#
|
19 |
+
# - SymInt/PySymInt don't have FX proxy support (unlike, e.g., Tensor).
|
20 |
+
# So you have to manually call Tracer/create_node to write into
|
21 |
+
# the graph. See ProxySymDispatchMode for an example
|
22 |
+
#
|
23 |
+
class SymDispatchMode:
|
24 |
+
def __sym_dispatch__(self, func, types, args, kwargs):
|
25 |
+
raise NotImplementedError()
|
26 |
+
|
27 |
+
def __enter__(self):
|
28 |
+
global SYM_FUNCTION_MODE
|
29 |
+
old = SYM_FUNCTION_MODE
|
30 |
+
if hasattr(self, "inner"):
|
31 |
+
raise RuntimeError(
|
32 |
+
f"{self} has already been used as a mode. Please use a fresh version"
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
self.inner = old
|
36 |
+
SYM_FUNCTION_MODE = self
|
37 |
+
return self
|
38 |
+
|
39 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
40 |
+
global SYM_FUNCTION_MODE
|
41 |
+
SYM_FUNCTION_MODE = self.inner
|
42 |
+
|
43 |
+
|
44 |
+
def handle_sym_dispatch(func, args, kwargs):
|
45 |
+
global SYM_FUNCTION_MODE
|
46 |
+
mode = sym_function_mode()
|
47 |
+
assert mode
|
48 |
+
SYM_FUNCTION_MODE = mode.inner
|
49 |
+
try:
|
50 |
+
# TODO: properly compute types
|
51 |
+
types: List[Type] = []
|
52 |
+
return mode.__sym_dispatch__(func, types, args, kwargs)
|
53 |
+
finally:
|
54 |
+
SYM_FUNCTION_MODE = mode
|
55 |
+
|
56 |
+
|
57 |
+
def sym_function_mode():
|
58 |
+
return SYM_FUNCTION_MODE
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py
ADDED
@@ -0,0 +1,1078 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
from collections import deque
|
3 |
+
from typing import Dict, List, Set, NamedTuple, Tuple, Deque
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch.fx.passes.graph_manipulation import get_size_of_all_nodes
|
7 |
+
from torch.fx.experimental.partitioner_utils import (
|
8 |
+
Partition,
|
9 |
+
Device,
|
10 |
+
PartitionerConfig,
|
11 |
+
get_partition_to_latency_mapping,
|
12 |
+
get_latency_of_partitioned_graph,
|
13 |
+
NodeLatency,
|
14 |
+
get_extra_size_of,
|
15 |
+
PartitionMode,
|
16 |
+
)
|
17 |
+
from torch.fx.graph_module import GraphModule
|
18 |
+
from torch.fx.node import Node, map_arg
|
19 |
+
from torch.fx.passes.split_module import split_module
|
20 |
+
|
21 |
+
|
22 |
+
class DAGNode:
|
23 |
+
"""DAGNode class maintains useful information for a partition (submodule),
|
24 |
+
and its input submodules and output submodules.
|
25 |
+
"""
|
26 |
+
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
submodule_node: Node,
|
30 |
+
input_nodes: List[Node],
|
31 |
+
output_nodes: List[Node],
|
32 |
+
logical_device_ids: List[int],
|
33 |
+
size_bytes: int,
|
34 |
+
) -> None:
|
35 |
+
self.submodule_node: Node = submodule_node
|
36 |
+
self.input_nodes: List[Node] = input_nodes
|
37 |
+
self.output_nodes: List[Node] = output_nodes
|
38 |
+
self.logical_device_ids: List[int] = logical_device_ids
|
39 |
+
self.size_bytes = size_bytes
|
40 |
+
|
41 |
+
def __str__(self) -> str:
|
42 |
+
return str(self.submodule_node)
|
43 |
+
|
44 |
+
|
45 |
+
class DAG:
|
46 |
+
"""DAG class contains all the DAG nodes"""
|
47 |
+
|
48 |
+
def __init__(self) -> None:
|
49 |
+
self.nodes: List[DAGNode] = []
|
50 |
+
|
51 |
+
def create_node(
|
52 |
+
self,
|
53 |
+
submodule_node: Node,
|
54 |
+
input_nodes: List[Node],
|
55 |
+
output_nodes: List[Node],
|
56 |
+
logical_devices: List[int],
|
57 |
+
size_bytes: int,
|
58 |
+
) -> None:
|
59 |
+
node = DAGNode(
|
60 |
+
submodule_node, input_nodes, output_nodes, logical_devices, size_bytes
|
61 |
+
)
|
62 |
+
self.nodes.append(node)
|
63 |
+
|
64 |
+
|
65 |
+
class PartitionResult(NamedTuple):
|
66 |
+
"""NameTuple used for returning DAG and a new fx module"""
|
67 |
+
|
68 |
+
dag: DAG
|
69 |
+
module_with_submodules: GraphModule
|
70 |
+
|
71 |
+
|
72 |
+
"""Followings are some helper functions for partition manipulation"""
|
73 |
+
|
74 |
+
|
75 |
+
def reset_partition_device(partitions):
|
76 |
+
for partition in partitions:
|
77 |
+
partition.logical_device_ids = []
|
78 |
+
|
79 |
+
|
80 |
+
def combine_two_partitions(
|
81 |
+
partition_0: Partition, partition_1: Partition, partitions: List[Partition]
|
82 |
+
) -> None:
|
83 |
+
"""Given a list of partitions and its two partitions,
|
84 |
+
combine these two partitions into a new one appending to the partitions
|
85 |
+
and remove the previous two partitions from the list of partitions
|
86 |
+
"""
|
87 |
+
partition = Partition(len(partitions))
|
88 |
+
partition.nodes = partition_0.nodes.union(partition_1.nodes)
|
89 |
+
partition.recalculate_mem_size()
|
90 |
+
partitions.append(partition)
|
91 |
+
partitions.remove(partition_0)
|
92 |
+
partitions.remove(partition_1)
|
93 |
+
reorganize_partitions(partitions)
|
94 |
+
return
|
95 |
+
|
96 |
+
|
97 |
+
def set_parents_and_children(partitions: List[Partition]) -> None:
|
98 |
+
"""Given a list of partitions, mark parents and children for each partition"""
|
99 |
+
# Go through all nodes in a partition.
|
100 |
+
# If a node's user is in other partition,
|
101 |
+
# then the other partition is this partition's children.
|
102 |
+
# This partition is the other partition's parent
|
103 |
+
for partition in partitions:
|
104 |
+
partition.children = set()
|
105 |
+
partition.parents = set()
|
106 |
+
for partition in partitions:
|
107 |
+
for node in partition.nodes:
|
108 |
+
# For each node in the current partition, find its users
|
109 |
+
users = node.users
|
110 |
+
for n in users:
|
111 |
+
# Find which the partition the user node belongs to.
|
112 |
+
# Note that if the node itself is also belongs to that partition,
|
113 |
+
# that partition is not the child of the current partition
|
114 |
+
for p in partitions:
|
115 |
+
if p != partition and n in p.nodes and node not in p.nodes:
|
116 |
+
partition.children.add(p)
|
117 |
+
p.parents.add(partition)
|
118 |
+
return
|
119 |
+
|
120 |
+
|
121 |
+
def reorganize_partitions(partitions: List[Partition]) -> None:
|
122 |
+
"""Given a list of partitions, reorganize partition id,
|
123 |
+
its parents and its children for each partition
|
124 |
+
"""
|
125 |
+
# Rearrange partition ids
|
126 |
+
for i, partition in enumerate(partitions):
|
127 |
+
partition.partition_id = i
|
128 |
+
set_parents_and_children(partitions)
|
129 |
+
return
|
130 |
+
|
131 |
+
|
132 |
+
def get_bfs_level_partition(partitions: List[Partition]) -> None:
|
133 |
+
"""Given a list of partitions,
|
134 |
+
mark the bfs level for each partition
|
135 |
+
"""
|
136 |
+
current_level: Set[Partition] = set()
|
137 |
+
visited: Set[Partition] = set()
|
138 |
+
for partition in partitions:
|
139 |
+
# If a partition has no parent, it should be in root level
|
140 |
+
if len(partition.parents) == 0:
|
141 |
+
current_level.add(partition)
|
142 |
+
next_level: Set[Partition] = set()
|
143 |
+
level = 0
|
144 |
+
# bfs
|
145 |
+
while current_level:
|
146 |
+
partition = current_level.pop()
|
147 |
+
partition.bfs_level = level
|
148 |
+
visited.add(partition)
|
149 |
+
children = partition.children
|
150 |
+
for child in children:
|
151 |
+
if child not in next_level:
|
152 |
+
next_level.add(child)
|
153 |
+
if not current_level:
|
154 |
+
current_level = next_level.copy()
|
155 |
+
next_level = set()
|
156 |
+
level += 1
|
157 |
+
return
|
158 |
+
|
159 |
+
|
160 |
+
def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
|
161 |
+
"""Given a list of partitions,return node to partition mapping"""
|
162 |
+
node_to_partition: Dict[Node, int] = {}
|
163 |
+
for partition in partitions:
|
164 |
+
for node in partition.nodes:
|
165 |
+
node_to_partition[node] = partition.partition_id
|
166 |
+
return node_to_partition
|
167 |
+
|
168 |
+
|
169 |
+
def get_logical_id_to_device(devices: List[Device]) -> Dict[int, Device]:
|
170 |
+
"""Get a mapping from device logical ID to Device object."""
|
171 |
+
logical_id_to_device: Dict[int, Device] = {}
|
172 |
+
for d in devices:
|
173 |
+
logical_id_to_device[d.logical_id] = d
|
174 |
+
return logical_id_to_device
|
175 |
+
|
176 |
+
|
177 |
+
def get_device_partition_stats(
|
178 |
+
partitions: List[Partition], devices: List[Device]
|
179 |
+
) -> Tuple[Dict[Device, List[Partition]], Dict[Device, int], List[Partition]]:
|
180 |
+
"""Given a list of partitions and a list of devices, returns:
|
181 |
+
1. A mapping from device to partitions on it;
|
182 |
+
2. A mapping from device to its remaining memory size;
|
183 |
+
3. A list of partitions that do not have a device.
|
184 |
+
"""
|
185 |
+
# logical id to device
|
186 |
+
logical_id_to_device = get_logical_id_to_device(devices)
|
187 |
+
# Track partitions on device
|
188 |
+
device_to_partitions: Dict[Device, List[Partition]] = {}
|
189 |
+
# Track device's left mem size
|
190 |
+
device_to_left_mem_bytes: Dict[Device, int] = {}
|
191 |
+
for d in devices:
|
192 |
+
device_to_partitions[d] = []
|
193 |
+
device_to_left_mem_bytes[d] = d.available_mem_bytes
|
194 |
+
|
195 |
+
# Deal with the partitions that already have a device
|
196 |
+
# and also collect all partitions without a device (no_device_partitions)
|
197 |
+
no_device_partitions = []
|
198 |
+
for partition in partitions:
|
199 |
+
if partition.logical_device_ids != []:
|
200 |
+
for logical_id in partition.logical_device_ids:
|
201 |
+
device = logical_id_to_device[logical_id]
|
202 |
+
device_to_partitions[device].append(partition)
|
203 |
+
device_to_left_mem_bytes[device] -= partition.used_mem_bytes
|
204 |
+
else:
|
205 |
+
no_device_partitions.append(partition)
|
206 |
+
|
207 |
+
return (
|
208 |
+
device_to_partitions,
|
209 |
+
device_to_left_mem_bytes,
|
210 |
+
no_device_partitions,
|
211 |
+
)
|
212 |
+
|
213 |
+
|
214 |
+
def get_device_to_partitions_mapping(
|
215 |
+
partitions: List[Partition], devices: List[Device]
|
216 |
+
):
|
217 |
+
"""Given a list of partitions and a list of devices,
|
218 |
+
map each partition into a device.
|
219 |
+
"""
|
220 |
+
|
221 |
+
def calculate_extra_mem_bytes_needed_for(
|
222 |
+
partition: Partition, partitions: List[Partition]
|
223 |
+
):
|
224 |
+
all_nodes: Set[Node] = set()
|
225 |
+
for p in partitions:
|
226 |
+
all_nodes = all_nodes.union(p.nodes)
|
227 |
+
if len(all_nodes) == 0:
|
228 |
+
return partition.used_mem_bytes
|
229 |
+
all_nodes = all_nodes.union(partition.nodes)
|
230 |
+
extra_size_needed = 0
|
231 |
+
for node in partition.nodes:
|
232 |
+
extra_size_needed += get_extra_size_of(node, all_nodes)
|
233 |
+
return extra_size_needed
|
234 |
+
|
235 |
+
def find_device_for(partition: Partition):
|
236 |
+
"""Given a partition, find a logical device for the partition
|
237 |
+
The algorithm is to put the partition on the device
|
238 |
+
that has just enough mem left for that partition.
|
239 |
+
device_to_left_mem_bytes is a dictionary between device and its left mem size
|
240 |
+
sorted by its left mem size
|
241 |
+
"""
|
242 |
+
for d in device_to_left_mem_bytes:
|
243 |
+
extra_size_needed = calculate_extra_mem_bytes_needed_for(
|
244 |
+
partition, device_to_partitions[d]
|
245 |
+
)
|
246 |
+
if extra_size_needed < device_to_left_mem_bytes[d]:
|
247 |
+
device_to_partitions[d].append(partition)
|
248 |
+
partition.logical_device_ids.append(d.logical_id)
|
249 |
+
device_to_left_mem_bytes[d] -= extra_size_needed
|
250 |
+
return True
|
251 |
+
return False
|
252 |
+
|
253 |
+
(
|
254 |
+
device_to_partitions,
|
255 |
+
device_to_left_mem_bytes,
|
256 |
+
no_device_partitions,
|
257 |
+
) = get_device_partition_stats(partitions, devices)
|
258 |
+
|
259 |
+
# Find devices for all the partitions without a device
|
260 |
+
found_device = True
|
261 |
+
for partition in no_device_partitions:
|
262 |
+
device_to_left_mem_bytes = dict(sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1]))
|
263 |
+
found_device = find_device_for(partition)
|
264 |
+
if not found_device:
|
265 |
+
break
|
266 |
+
return found_device
|
267 |
+
|
268 |
+
|
269 |
+
def check_dependency(partition):
|
270 |
+
"""Given a partition,check if there is a circular dependency on
|
271 |
+
this partition using bfs
|
272 |
+
"""
|
273 |
+
visited: Set[Partition] = {partition}
|
274 |
+
queue: Deque[Partition] = deque([partition])
|
275 |
+
while queue:
|
276 |
+
p = queue.popleft()
|
277 |
+
for child in p.children:
|
278 |
+
if child == partition:
|
279 |
+
return True
|
280 |
+
else:
|
281 |
+
if child not in visited:
|
282 |
+
visited.add(child)
|
283 |
+
queue.append(child)
|
284 |
+
return False
|
285 |
+
|
286 |
+
|
287 |
+
class Partitioner:
|
288 |
+
"""A fx module may not fit into one device.
|
289 |
+
Partitioner class helps partition one fx module into submodules (partitions),
|
290 |
+
so that the submodules can be executed crossing different accelerators.
|
291 |
+
The main function of this class is self.partition_graph.
|
292 |
+
It partitions the fx module based on the scheme specified in partition_config
|
293 |
+
A DAG structure is returned
|
294 |
+
along with a new fx module with submodule nodes.
|
295 |
+
"""
|
296 |
+
|
297 |
+
def __init__(self) -> None:
|
298 |
+
self.partitions: List[Partition] = []
|
299 |
+
self.node_to_partition: Dict[Node, int] = {}
|
300 |
+
self.devices: List[Device] = []
|
301 |
+
|
302 |
+
def partition_graph(
|
303 |
+
self,
|
304 |
+
fx_module: GraphModule,
|
305 |
+
torch_module: torch.nn.Module,
|
306 |
+
partitioner_config: PartitionerConfig,
|
307 |
+
) -> PartitionResult:
|
308 |
+
"""Given the fx module, torch module and partitioner_config,
|
309 |
+
find the partitions, do the partitions,
|
310 |
+
and then return a DAG and a new fx module with submodule nodes (partitions)
|
311 |
+
"""
|
312 |
+
self.graph_module = fx_module
|
313 |
+
self.torch_module = torch_module
|
314 |
+
self.devices = partitioner_config.devices
|
315 |
+
if len(self.devices) == 0:
|
316 |
+
raise RuntimeError("No devices")
|
317 |
+
# Tag the size in bytes to all nodes in the graph_module.
|
318 |
+
get_size_of_all_nodes(self.graph_module)
|
319 |
+
# Check if there are op nodes in the fx module
|
320 |
+
nodes = self.graph_module.graph.nodes
|
321 |
+
if all(node.op in {"placeholder", "get_attr", "output"} for node in nodes):
|
322 |
+
raise RuntimeError("No Partition since no operations in the module")
|
323 |
+
# Calculate total size of the fx module
|
324 |
+
total_size_of_graph = 0
|
325 |
+
for node in nodes:
|
326 |
+
if node.op == "output":
|
327 |
+
break
|
328 |
+
total_size_of_graph += node.size_bytes.total_size
|
329 |
+
# Find the device with the max mem size
|
330 |
+
device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
|
331 |
+
# AOT based partition
|
332 |
+
if partitioner_config.mode == PartitionMode.aot_based:
|
333 |
+
self.aot_based_partition(
|
334 |
+
partitioner_config.node_to_partition_mapping,
|
335 |
+
partitioner_config.partition_to_logical_device_mapping,
|
336 |
+
)
|
337 |
+
# Single partition if the whole module can be fit into one device
|
338 |
+
elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
|
339 |
+
self.find_single_partition(
|
340 |
+
total_size_of_graph, logical_device_id=device_with_max_mem.logical_id
|
341 |
+
)
|
342 |
+
elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
|
343 |
+
raise RuntimeError("Devices have no enough memory for the module")
|
344 |
+
else:
|
345 |
+
# Sparse nn based partition
|
346 |
+
if partitioner_config.mode == PartitionMode.sparse_nn:
|
347 |
+
available_mem_bytes = self.devices[0].available_mem_bytes
|
348 |
+
if not all(
|
349 |
+
device.available_mem_bytes == available_mem_bytes
|
350 |
+
for device in self.devices
|
351 |
+
):
|
352 |
+
raise RuntimeError("All devices must have same memory size!")
|
353 |
+
# sparse_nn_partition only support same memory size
|
354 |
+
# TODO: add different size support for sparse_nn_partition
|
355 |
+
self.sparse_nn_partition(available_mem_bytes)
|
356 |
+
# Cost aware partition
|
357 |
+
elif partitioner_config.mode == PartitionMode.cost_aware:
|
358 |
+
self.cost_aware_partition(
|
359 |
+
partitioner_config.transfer_rate_bytes_per_sec,
|
360 |
+
partitioner_config.node_to_latency_mapping,
|
361 |
+
)
|
362 |
+
# KL based partition
|
363 |
+
elif partitioner_config.mode == PartitionMode.kl_based:
|
364 |
+
self.kl_based_partition(
|
365 |
+
partitioner_config.transfer_rate_bytes_per_sec,
|
366 |
+
partitioner_config.node_to_latency_mapping,
|
367 |
+
)
|
368 |
+
else:
|
369 |
+
self.size_based_partition()
|
370 |
+
|
371 |
+
# Saturate host if possible.
|
372 |
+
if partitioner_config.saturate_host:
|
373 |
+
self.saturate_host()
|
374 |
+
|
375 |
+
# Partition the graph module based on the partition assignment.
|
376 |
+
module_with_submodules = self.do_partition()
|
377 |
+
|
378 |
+
# The DAG contains DAGNodes with info of each partition's input nodes, output nodes
|
379 |
+
# and how partitions are connected.
|
380 |
+
dag = self.dump_dag(module_with_submodules)
|
381 |
+
ret = PartitionResult(dag, module_with_submodules)
|
382 |
+
return ret
|
383 |
+
|
384 |
+
def find_single_partition(
|
385 |
+
self, total_size_of_graph, logical_device_id: int = 0
|
386 |
+
) -> None:
|
387 |
+
"""Fit the whole fx module into one device"""
|
388 |
+
partition_0 = self.create_partition()
|
389 |
+
for node in self.graph_module.graph.nodes:
|
390 |
+
if node.op == "output":
|
391 |
+
# Skip the output node, but there can
|
392 |
+
# be nodes after the output in certain cases.
|
393 |
+
continue
|
394 |
+
partition_0.nodes.add(node)
|
395 |
+
partition_0.used_mem_bytes = total_size_of_graph
|
396 |
+
partition_0.logical_device_ids = [logical_device_id]
|
397 |
+
# Get the node to partition mapping
|
398 |
+
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
|
399 |
+
return
|
400 |
+
|
401 |
+
def size_based_partition(self) -> None:
|
402 |
+
"""This method is to partition the fx module based on memory size.
|
403 |
+
It uses greedy approach. The result may not be the best.
|
404 |
+
The basic idea is:
|
405 |
+
Step 1:
|
406 |
+
Find a device which has enough memory to fit the current node, create a empty partition
|
407 |
+
with the size of that device.
|
408 |
+
Then keep adding the following nodes into the partition until the partition is full.
|
409 |
+
Step 2:
|
410 |
+
Repeat Step 1 until no device left
|
411 |
+
Step 3:
|
412 |
+
If some nodes are left, create a partition for each left node (single node partition).
|
413 |
+
and then try to map those partitions into logical devices with enough mem left.
|
414 |
+
"""
|
415 |
+
|
416 |
+
def find_device_based_on_size(node) -> Device:
|
417 |
+
"""Given a node, this function is to find a logical device
|
418 |
+
that could fit the node.
|
419 |
+
"""
|
420 |
+
mem_size_needed = get_extra_size_of(node, set())
|
421 |
+
device = Device("", -1, -1)
|
422 |
+
for d in self.devices:
|
423 |
+
if (
|
424 |
+
d not in occupied_devices
|
425 |
+
and d.available_mem_bytes >= mem_size_needed
|
426 |
+
):
|
427 |
+
device = d
|
428 |
+
break
|
429 |
+
if device.available_mem_bytes < 0:
|
430 |
+
raise RuntimeError(str(node) + "is too large to fit any device")
|
431 |
+
occupied_devices.append(device)
|
432 |
+
return device
|
433 |
+
|
434 |
+
# Track partition and its left mem size
|
435 |
+
partition_to_left_mem_bytes: Dict[Partition, int] = {}
|
436 |
+
# Track all the devices that have been used
|
437 |
+
occupied_devices: List[Device] = []
|
438 |
+
partition = self.create_partition()
|
439 |
+
for node in self.graph_module.graph.nodes:
|
440 |
+
if node.op in {"call_module", "call_method", "call_function"}:
|
441 |
+
# Check if there are devices left
|
442 |
+
if len(self.partitions) <= len(self.devices):
|
443 |
+
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
|
444 |
+
# Check if the current partition is the very first partition
|
445 |
+
if partition.used_mem_bytes == 0:
|
446 |
+
# Find a device to fit the first node, return available mem size
|
447 |
+
device = find_device_based_on_size(node)
|
448 |
+
occupied_devices.append(device)
|
449 |
+
# Update partition and its left mem size
|
450 |
+
partition_to_left_mem_bytes[
|
451 |
+
partition
|
452 |
+
] = device.available_mem_bytes
|
453 |
+
# Update available mem for the current partition
|
454 |
+
partition.logical_device_ids.append(device.logical_id)
|
455 |
+
else:
|
456 |
+
# The current partition is not the first partition
|
457 |
+
# Check if the current node can fit into current partition
|
458 |
+
if (
|
459 |
+
partition_to_left_mem_bytes[partition]
|
460 |
+
< total_size_of_input_nodes
|
461 |
+
):
|
462 |
+
# Check if no device is left
|
463 |
+
if len(self.partitions) == len(self.devices):
|
464 |
+
# No device is left
|
465 |
+
# Put the previous partitions into a list (non_single_node_partitions)
|
466 |
+
non_single_node_partitions = self.partitions[:]
|
467 |
+
# Create the first single node partition for the current node
|
468 |
+
self.create_single_node_partition(node)
|
469 |
+
continue
|
470 |
+
# Some devices are still left
|
471 |
+
# Create a new partition with a mem size that is enough for the current node
|
472 |
+
device = find_device_based_on_size(node)
|
473 |
+
partition = self.create_partition()
|
474 |
+
total_size_of_input_nodes = get_extra_size_of(
|
475 |
+
node, partition.nodes
|
476 |
+
)
|
477 |
+
partition_to_left_mem_bytes[
|
478 |
+
partition
|
479 |
+
] = device.available_mem_bytes
|
480 |
+
partition.logical_device_ids.append(device.logical_id)
|
481 |
+
partition.add_node(node)
|
482 |
+
partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
|
483 |
+
# Create single node partitions if no device is left
|
484 |
+
else:
|
485 |
+
self.create_single_node_partition(node)
|
486 |
+
reorganize_partitions(self.partitions)
|
487 |
+
# Get the node to partition mapping
|
488 |
+
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
|
489 |
+
# Mapping all partitions into device
|
490 |
+
found_partition_to_device_mapping = get_device_to_partitions_mapping(
|
491 |
+
self.partitions, self.devices
|
492 |
+
)
|
493 |
+
if not found_partition_to_device_mapping:
|
494 |
+
raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
|
495 |
+
return
|
496 |
+
|
497 |
+
def saturate_host(self) -> None:
|
498 |
+
"""Saturate host by assigning replicates to unused devices with enough memory.
|
499 |
+
It uses a greedy approach to find a next available set of devices to place all split
|
500 |
+
partitions: For each used device, it searches for an idle device with minimal memory
|
501 |
+
size that can hold all the partition located on that device; If the search is successful
|
502 |
+
for all used devices, it then assigns the new devices' logical ID to the corresponding
|
503 |
+
partition.
|
504 |
+
"""
|
505 |
+
(
|
506 |
+
device_to_partitions,
|
507 |
+
device_to_left_mem_bytes,
|
508 |
+
no_device_partitions,
|
509 |
+
) = get_device_partition_stats(self.partitions, self.devices)
|
510 |
+
|
511 |
+
assert (
|
512 |
+
len(no_device_partitions) == 0
|
513 |
+
), f"Expect no_device_partitions has 0 device, but get {len(no_device_partitions)}"
|
514 |
+
|
515 |
+
# Devices that hold partitions
|
516 |
+
used_devices = [d for d in self.devices if len(device_to_partitions[d]) > 0]
|
517 |
+
# Track replicates of the assigned devices
|
518 |
+
replicated_device_to_used_device: Dict[Device, Device] = {}
|
519 |
+
|
520 |
+
while len(used_devices) * 2 + len(replicated_device_to_used_device) <= len(
|
521 |
+
self.devices
|
522 |
+
):
|
523 |
+
# Success flag for this round
|
524 |
+
success = True
|
525 |
+
# Devices that have not been assigned
|
526 |
+
idle_devices = [
|
527 |
+
d
|
528 |
+
for d in self.devices
|
529 |
+
if d not in used_devices and d not in replicated_device_to_used_device
|
530 |
+
]
|
531 |
+
# Temporary mapping from replicated device to original device
|
532 |
+
temp_replicate_mapping = {}
|
533 |
+
|
534 |
+
# Find a new device to replicate all partitions on an used device
|
535 |
+
for used_device in used_devices:
|
536 |
+
# Idle devices that have enough memory
|
537 |
+
available_devices = [
|
538 |
+
d
|
539 |
+
for d in idle_devices
|
540 |
+
if d.available_mem_bytes
|
541 |
+
>= used_device.available_mem_bytes
|
542 |
+
- device_to_left_mem_bytes[used_device]
|
543 |
+
]
|
544 |
+
if len(available_devices) == 0:
|
545 |
+
success = False
|
546 |
+
break
|
547 |
+
new_device = min(available_devices, key=lambda d: d.available_mem_bytes)
|
548 |
+
idle_devices.remove(new_device)
|
549 |
+
temp_replicate_mapping[new_device] = used_device
|
550 |
+
|
551 |
+
if not success:
|
552 |
+
break
|
553 |
+
replicated_device_to_used_device.update(temp_replicate_mapping)
|
554 |
+
|
555 |
+
# Update logical device IDs assigned to the partitions
|
556 |
+
for (
|
557 |
+
replicate_device,
|
558 |
+
original_device,
|
559 |
+
) in replicated_device_to_used_device.items():
|
560 |
+
logical_id = replicate_device.logical_id
|
561 |
+
for partition in device_to_partitions[original_device]:
|
562 |
+
partition.logical_device_ids.append(logical_id)
|
563 |
+
for p in self.partitions:
|
564 |
+
print(p.logical_device_ids)
|
565 |
+
|
566 |
+
def do_partition(self) -> GraphModule:
|
567 |
+
"""Return a new fx module with submodule nodes (partitions)."""
|
568 |
+
module_with_submodules = split_module(
|
569 |
+
self.graph_module,
|
570 |
+
self.torch_module,
|
571 |
+
lambda node: self.node_to_partition[node],
|
572 |
+
)
|
573 |
+
return module_with_submodules
|
574 |
+
|
575 |
+
def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
|
576 |
+
"""Return the dag structure and the new fx module with submodules."""
|
577 |
+
dag = DAG()
|
578 |
+
for node in module_with_submodules.graph.nodes:
|
579 |
+
if node.op == "output":
|
580 |
+
break
|
581 |
+
if node.op in {"placeholder", "get_attr"}:
|
582 |
+
continue
|
583 |
+
if node.target == operator.__getitem__:
|
584 |
+
continue
|
585 |
+
input_nodes: Dict[Node, None] = {}
|
586 |
+
map_arg(node.args, input_nodes.setdefault)
|
587 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
588 |
+
# When a node has two or more output nodes,
|
589 |
+
# it outputs its result to 'getitem' nodes.
|
590 |
+
# Those 'getitem' nodes are the output node for this node.
|
591 |
+
# Otherwise, the output node is this node itself.
|
592 |
+
if len(node.users) > 1:
|
593 |
+
output_nodes = list(node.users)
|
594 |
+
else:
|
595 |
+
output_nodes = [node]
|
596 |
+
partition_id = int(node.name.rsplit("_", 1)[-1])
|
597 |
+
device_ids = self.partitions[partition_id].logical_device_ids
|
598 |
+
size_bytes = self.partitions[partition_id].used_mem_bytes
|
599 |
+
dag.create_node(
|
600 |
+
node, list(input_nodes), output_nodes, device_ids, size_bytes
|
601 |
+
)
|
602 |
+
return dag
|
603 |
+
|
604 |
+
def create_partition(self) -> Partition:
|
605 |
+
"""Create a partition and append it to self.partitions."""
|
606 |
+
partition_id = len(self.partitions)
|
607 |
+
partition = Partition(partition_id)
|
608 |
+
self.partitions.append(partition)
|
609 |
+
return partition
|
610 |
+
|
611 |
+
def create_single_node_partition(self, node):
|
612 |
+
"""Create a partition for a single node"""
|
613 |
+
partition = self.create_partition()
|
614 |
+
partition.add_node(node)
|
615 |
+
return
|
616 |
+
|
617 |
+
def sparse_nn_partition(self, available_mem_bytes: int) -> None:
|
618 |
+
"""This method partition a sparse nn module.
|
619 |
+
It is size based partition but different from size_based_partition,
|
620 |
+
it only works when all the devices have same memory size (available_mem_bytes).
|
621 |
+
In the future, devices with different mem sizes will be supported like size_based_partition.
|
622 |
+
It first traverse all the nodes and do the partitions based on the same memory size.
|
623 |
+
If the current partition has no enough memory left for a new op node
|
624 |
+
(call_module, call_method, call_function), a new partition is created.
|
625 |
+
When crossing the boundary between non-embedding nodes and embedding nodes,
|
626 |
+
a new partition is created regardlessly.
|
627 |
+
For example, if the current node is a non-embedding node but the next node is an
|
628 |
+
embedding node, a new partition is created for the next node.
|
629 |
+
After the partition, the partitions are combined as much as possible.
|
630 |
+
The rule is that a non-embedding partition only
|
631 |
+
combines with another non-embedding one.
|
632 |
+
So as the embedding partitions.
|
633 |
+
"""
|
634 |
+
|
635 |
+
def combine_partitions_based_on_size(
|
636 |
+
partitions: List[Partition], available_mem_bytes: int
|
637 |
+
) -> None:
|
638 |
+
"""Combining small partitions together to keep as less partitions as possible.
|
639 |
+
Here is an example of the algorithm to do this:
|
640 |
+
Assume some partitions, we first sort them based on partition used memory size.
|
641 |
+
[(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
|
642 |
+
The available memory is 10.
|
643 |
+
step 1: self.find_partition_to_combine_based_on_size()
|
644 |
+
First, mark bfs level for each partition
|
645 |
+
Second, look the smallest partition, partition_4: 10 - 1 = 9
|
646 |
+
It means any partition has a used memory equal or less than 9 could combine this partition
|
647 |
+
We go from the largest and selection partition_0.
|
648 |
+
Check the bfs level for two partitions, if the level difference is less than 2,
|
649 |
+
it can be combined.
|
650 |
+
step 2: repeat step 1 until no partitions can be combined
|
651 |
+
"""
|
652 |
+
find_combination = True
|
653 |
+
while find_combination:
|
654 |
+
# Sort partitions based on memory size
|
655 |
+
sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
|
656 |
+
# Mark bfs level
|
657 |
+
get_bfs_level_partition(self.partitions)
|
658 |
+
find_combination, partitions = find_partition_to_combine_based_on_size(
|
659 |
+
sorted_partitions, available_mem_bytes, partitions
|
660 |
+
)
|
661 |
+
return
|
662 |
+
|
663 |
+
def calculate_mem_bytes_needed(p1, p2):
|
664 |
+
"""Given two partitions, calculate how many mem bytes
|
665 |
+
are needed if two partitions are combined
|
666 |
+
"""
|
667 |
+
nodes = p1.nodes.union(p2.nodes)
|
668 |
+
mem_bytes_needed = 0
|
669 |
+
for node in nodes:
|
670 |
+
mem_bytes_needed += get_extra_size_of(node, nodes)
|
671 |
+
return mem_bytes_needed
|
672 |
+
|
673 |
+
def find_partition_to_combine_based_on_size(
|
674 |
+
sorted_partitions: List[Partition],
|
675 |
+
available_mem_bytes: int,
|
676 |
+
partitions: List[Partition],
|
677 |
+
) -> Tuple[bool, List[Partition]]:
|
678 |
+
"""step 1 in combine_partition_based_on_size()"""
|
679 |
+
find_combination = False
|
680 |
+
smallest_partition = sorted_partitions.pop(0)
|
681 |
+
for p in sorted_partitions[::-1]:
|
682 |
+
if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
|
683 |
+
# Calculate how many bytes needed if combined
|
684 |
+
mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
|
685 |
+
if mem_bytes_needed <= available_mem_bytes:
|
686 |
+
combine_two_partitions(p, smallest_partition, self.partitions)
|
687 |
+
partitions.remove(smallest_partition)
|
688 |
+
partitions.remove(p)
|
689 |
+
partitions.append(self.partitions[-1])
|
690 |
+
find_combination = True
|
691 |
+
break
|
692 |
+
return find_combination, partitions
|
693 |
+
|
694 |
+
def reset_partition_in_sparse_nn(partition, new_partition=True):
|
695 |
+
"""If crossing the boundary between non-embedding nodes and
|
696 |
+
embedding nodes, create a new partition
|
697 |
+
"""
|
698 |
+
if in_embedding_region:
|
699 |
+
embedding_partitions.append(partition)
|
700 |
+
else:
|
701 |
+
non_embedding_partitions.append(partition)
|
702 |
+
if new_partition:
|
703 |
+
partition = self.create_partition()
|
704 |
+
partition.left_mem_bytes = available_mem_bytes
|
705 |
+
return partition
|
706 |
+
return None
|
707 |
+
|
708 |
+
def is_embedding_node(node: Node) -> bool:
|
709 |
+
"""Check if a node is an embedding node"""
|
710 |
+
if node.op == "call_module":
|
711 |
+
submodule = self.graph_module
|
712 |
+
for atom in str(node.target).split("."):
|
713 |
+
if not hasattr(submodule, atom):
|
714 |
+
raise RuntimeError(
|
715 |
+
f"Module {submodule} has no attribute {atom}"
|
716 |
+
)
|
717 |
+
submodule = getattr(submodule, atom)
|
718 |
+
if "Embedding" in str(submodule):
|
719 |
+
return True
|
720 |
+
return False
|
721 |
+
|
722 |
+
# Track embedding partitions and non-embedding partitions separately
|
723 |
+
embedding_partitions: List[Partition] = []
|
724 |
+
non_embedding_partitions: List[Partition] = []
|
725 |
+
# A Flag to check the boundary
|
726 |
+
in_embedding_region: bool = False
|
727 |
+
partition = self.create_partition()
|
728 |
+
for node in self.graph_module.graph.nodes:
|
729 |
+
if node.op in {"call_module", "call_method", "call_function"}:
|
730 |
+
# Check if crossing the boundary between embedding nodes and non embedding nodes
|
731 |
+
if is_embedding_node(node) != in_embedding_region:
|
732 |
+
# Crossing the boundary
|
733 |
+
# Check if the current partition is an empty partition
|
734 |
+
if partition.used_mem_bytes != 0:
|
735 |
+
# The current partition isn't an empty partition. Create a new one.
|
736 |
+
partition = reset_partition_in_sparse_nn(partition)
|
737 |
+
in_embedding_region = not in_embedding_region
|
738 |
+
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
|
739 |
+
if (
|
740 |
+
total_size_of_input_nodes + partition.used_mem_bytes
|
741 |
+
> available_mem_bytes
|
742 |
+
):
|
743 |
+
partition = reset_partition_in_sparse_nn(partition)
|
744 |
+
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
|
745 |
+
if total_size_of_input_nodes > available_mem_bytes:
|
746 |
+
raise RuntimeError(
|
747 |
+
node.target + "is too large to fit into a device"
|
748 |
+
)
|
749 |
+
partition.add_node(node)
|
750 |
+
reset_partition_in_sparse_nn(partition, new_partition=False)
|
751 |
+
# Set parents and children for partitions
|
752 |
+
set_parents_and_children(self.partitions)
|
753 |
+
# Combining non-embedding partitions
|
754 |
+
combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
|
755 |
+
# Combining embedding partitions
|
756 |
+
combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
|
757 |
+
total_size_of_non_embedding_partitions = 0
|
758 |
+
for partition in non_embedding_partitions:
|
759 |
+
total_size_of_non_embedding_partitions += partition.used_mem_bytes
|
760 |
+
# Check if devices are enough for all partitions
|
761 |
+
if len(embedding_partitions) > len(self.devices):
|
762 |
+
msg = (
|
763 |
+
"Need "
|
764 |
+
+ str(len(embedding_partitions))
|
765 |
+
+ " devices, but only "
|
766 |
+
+ str(len(self.devices))
|
767 |
+
+ " provided"
|
768 |
+
)
|
769 |
+
raise RuntimeError(msg)
|
770 |
+
occupied_devices = []
|
771 |
+
for i, partition in enumerate(embedding_partitions):
|
772 |
+
# Check if all non-embedding partitions can fit into embedding partition devices
|
773 |
+
if (
|
774 |
+
total_size_of_non_embedding_partitions + partition.used_mem_bytes
|
775 |
+
> available_mem_bytes
|
776 |
+
):
|
777 |
+
raise RuntimeError(
|
778 |
+
"partition_"
|
779 |
+
+ str(partition.partition_id)
|
780 |
+
+ "(embedding partition) and non embedding partitions can not fit into one device"
|
781 |
+
)
|
782 |
+
else:
|
783 |
+
# Add logical device to the partition
|
784 |
+
partition.logical_device_ids = [self.devices[i].logical_id]
|
785 |
+
occupied_devices.append(self.devices[i].logical_id)
|
786 |
+
# Add logical devices to the non_embedding_partitions
|
787 |
+
for partition in non_embedding_partitions:
|
788 |
+
partition.logical_device_ids = occupied_devices
|
789 |
+
# Get the node to partition mapping
|
790 |
+
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
|
791 |
+
return
|
792 |
+
|
793 |
+
def cost_aware_partition(
|
794 |
+
self,
|
795 |
+
transfer_rate_bytes_per_sec: float,
|
796 |
+
node_to_latency_mapping: Dict[Node, NodeLatency],
|
797 |
+
) -> None:
|
798 |
+
"""This method is to partition the fx module based on the cost.
|
799 |
+
The cost is the total latency of running the whole fx module.
|
800 |
+
In partitioner_utils.py, the cost model is built.
|
801 |
+
The cost aware partition algorithm is:
|
802 |
+
#1. At every beginning, each node is a partition.
|
803 |
+
Then we map all the partitions to the devices
|
804 |
+
and calculate the cost
|
805 |
+
#2. Then try to pre-combine any two of the partitions if the two
|
806 |
+
partitions can be combined.
|
807 |
+
(the bfs level is less than 2 or two partitions are connected and
|
808 |
+
can find partition to device mapping)
|
809 |
+
See if any partition pair could reduce the current cost.
|
810 |
+
Choose the pair that shows the minimum cost and then combine them
|
811 |
+
#3. Repeat #2 until the cost cannot be reduced.
|
812 |
+
"""
|
813 |
+
|
814 |
+
def try_combining_partitions(p0_index, p1_index, partitions) -> float:
|
815 |
+
"""Given two partitions and a list of partitions, combine these two partitions
|
816 |
+
and see what is the cost of the modified partition list
|
817 |
+
"""
|
818 |
+
p0 = partitions[p0_index]
|
819 |
+
p1 = partitions[p1_index]
|
820 |
+
"""If two partitions' bfs level are less than 2 or two partitions are connected to each other,
|
821 |
+
then they can be combined
|
822 |
+
"""
|
823 |
+
if (
|
824 |
+
(abs(p0.bfs_level - p1.bfs_level) <= 1)
|
825 |
+
or (p0 in p1.parents)
|
826 |
+
or p0 in (p1.children)
|
827 |
+
):
|
828 |
+
combine_two_partitions(p0, p1, partitions)
|
829 |
+
# Check if a circular dependency exists after combining
|
830 |
+
if check_dependency(partitions[-1]):
|
831 |
+
return float("inf")
|
832 |
+
# Check if the modified partition list can be mapped to devices after combination
|
833 |
+
reset_partition_device(partitions)
|
834 |
+
found_deivce = get_device_to_partitions_mapping(
|
835 |
+
partitions, self.devices
|
836 |
+
)
|
837 |
+
if not found_deivce:
|
838 |
+
return float("inf")
|
839 |
+
# Calculate the new cost
|
840 |
+
partition_to_latency_mapping = get_partition_to_latency_mapping(
|
841 |
+
partitions, node_to_latency_mapping
|
842 |
+
)
|
843 |
+
cost = get_latency_of_partitioned_graph(
|
844 |
+
partitions,
|
845 |
+
partition_to_latency_mapping,
|
846 |
+
transfer_rate_bytes_per_sec,
|
847 |
+
)
|
848 |
+
return cost
|
849 |
+
# If two partition can not be combined, the cost is inf
|
850 |
+
return float("inf")
|
851 |
+
|
852 |
+
def search_combination(
|
853 |
+
transfer_rate_bytes_per_sec, node_to_latency_mapping
|
854 |
+
) -> bool:
|
855 |
+
"""Given transfer rate between partitions and each node's latency,
|
856 |
+
find two partitions to combine so the cost of the partitions can
|
857 |
+
be reduced.
|
858 |
+
The algorithm is :
|
859 |
+
1. Go through all the partition pairs and see
|
860 |
+
if any pair of partitions can be combined.
|
861 |
+
2. Calculate the cost after the combination.
|
862 |
+
3. Select the minimum cost and combine its corresponding partition pair.
|
863 |
+
"""
|
864 |
+
partition_to_latency_mapping = get_partition_to_latency_mapping(
|
865 |
+
self.partitions, node_to_latency_mapping
|
866 |
+
)
|
867 |
+
cost = get_latency_of_partitioned_graph(
|
868 |
+
self.partitions,
|
869 |
+
partition_to_latency_mapping,
|
870 |
+
transfer_rate_bytes_per_sec,
|
871 |
+
)
|
872 |
+
if len(self.partitions) == 1:
|
873 |
+
return False
|
874 |
+
partition_pair: List[int] = []
|
875 |
+
for i in range(len(self.partitions) - 1):
|
876 |
+
for j in range(i + 1, len(self.partitions)):
|
877 |
+
# Try to combine the partition pair
|
878 |
+
# and see the new cost after combination
|
879 |
+
new_cost = try_combining_partitions(i, j, self.partitions[:])
|
880 |
+
if new_cost <= cost:
|
881 |
+
partition_pair = [i, j]
|
882 |
+
cost = new_cost
|
883 |
+
reorganize_partitions(self.partitions)
|
884 |
+
# If a partition pair is found, combine them
|
885 |
+
if len(partition_pair) != 0:
|
886 |
+
p0 = self.partitions[partition_pair[0]]
|
887 |
+
p1 = self.partitions[partition_pair[1]]
|
888 |
+
combine_two_partitions(p0, p1, self.partitions)
|
889 |
+
get_bfs_level_partition(self.partitions)
|
890 |
+
reset_partition_device(self.partitions)
|
891 |
+
get_device_to_partitions_mapping(self.partitions, self.devices)
|
892 |
+
return len(partition_pair) != 0
|
893 |
+
|
894 |
+
for node in self.graph_module.graph.nodes:
|
895 |
+
if node.op not in {"placeholder", "get_attr", "output"}:
|
896 |
+
self.create_single_node_partition(node)
|
897 |
+
# Set up parent partitions and children partitions for each partition
|
898 |
+
set_parents_and_children(self.partitions)
|
899 |
+
# Get bfs level for each partition
|
900 |
+
get_bfs_level_partition(self.partitions)
|
901 |
+
find_combination = True
|
902 |
+
while find_combination:
|
903 |
+
# Search for a pair partition to generate the minimum new cost,
|
904 |
+
# then combine them
|
905 |
+
find_combination = search_combination(
|
906 |
+
transfer_rate_bytes_per_sec, node_to_latency_mapping
|
907 |
+
)
|
908 |
+
# Make sure all partitions are set up correctly
|
909 |
+
reorganize_partitions(self.partitions)
|
910 |
+
# Set up node to partition mapping
|
911 |
+
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
|
912 |
+
return
|
913 |
+
|
914 |
+
def kl_based_partition(
|
915 |
+
self,
|
916 |
+
transfer_rate_bytes_per_sec: float,
|
917 |
+
node_to_latency_mapping: Dict[Node, NodeLatency],
|
918 |
+
) -> None:
|
919 |
+
"""This function is a cost aware partition based
|
920 |
+
on Kernighan-Lin algorithm.
|
921 |
+
First, the graph is partitioned using size_based_partition.
|
922 |
+
Then, each node is swapped with any other node in a different
|
923 |
+
partition, and at the same time, the cost is estimated after
|
924 |
+
the swapping.
|
925 |
+
For example, we have nodes n0, n1, n2, n3 and n4.
|
926 |
+
Using size_based_partition, n0 and n1 are in Partition p0.
|
927 |
+
n2, n3 and n4 in Partition p1. The current cost is estimated.
|
928 |
+
We first tried using n0 to swap with n2 from the other partition.
|
929 |
+
Then we see that swapping n0 and n2 shows a lower cost
|
930 |
+
than the current cost and it is the minimum among other pairs like
|
931 |
+
(n0, None)(This means moving n0 to Partition without swapping other nodes),
|
932 |
+
(n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
|
933 |
+
as the current cost.
|
934 |
+
Then We repeat this process for all the other nodes until all swapping pairs
|
935 |
+
are tried.
|
936 |
+
"""
|
937 |
+
|
938 |
+
def swap_nodes(n0, n1, p0, p1):
|
939 |
+
# Either n0 or n1 could be None
|
940 |
+
# That means we simply move the node
|
941 |
+
# to another partition
|
942 |
+
if n0 is not None:
|
943 |
+
p0.remove_node(n0)
|
944 |
+
p1.add_node(n0)
|
945 |
+
if n1 is not None:
|
946 |
+
p0.add_node(n1)
|
947 |
+
p1.remove_node(n1)
|
948 |
+
|
949 |
+
def try_swap_nodes(
|
950 |
+
n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
|
951 |
+
):
|
952 |
+
cost = float("inf")
|
953 |
+
swap_nodes(n0, n1, p0, p1)
|
954 |
+
# Reorganize partitions after swapping
|
955 |
+
reorganize_partitions(self.partitions)
|
956 |
+
# Check if there is a circular dependency after swapping
|
957 |
+
if (not check_dependency(p0)) and (not check_dependency(p1)):
|
958 |
+
reset_partition_device(self.partitions)
|
959 |
+
partition_to_latency_mapping = get_partition_to_latency_mapping(
|
960 |
+
self.partitions, node_to_latency_mapping
|
961 |
+
)
|
962 |
+
# Check if all partitions can be mapped to logical devices after swapping
|
963 |
+
found_device = get_device_to_partitions_mapping(
|
964 |
+
self.partitions, self.devices
|
965 |
+
)
|
966 |
+
if not found_device:
|
967 |
+
cost = float("inf")
|
968 |
+
else:
|
969 |
+
cost = get_latency_of_partitioned_graph(
|
970 |
+
self.partitions,
|
971 |
+
partition_to_latency_mapping,
|
972 |
+
transfer_rate_bytes_per_sec,
|
973 |
+
)
|
974 |
+
# Swap back and reset all partitions back to original
|
975 |
+
swap_nodes(n1, n0, p0, p1)
|
976 |
+
reorganize_partitions(self.partitions)
|
977 |
+
reset_partition_device(self.partitions)
|
978 |
+
get_device_to_partitions_mapping(self.partitions, self.devices)
|
979 |
+
return cost
|
980 |
+
|
981 |
+
def swap_node_to_partition(
|
982 |
+
node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
|
983 |
+
):
|
984 |
+
"""This function helps to swap one node from partition p0
|
985 |
+
with all the nodes in another partition p1
|
986 |
+
"""
|
987 |
+
p1_nodes = list(p1.nodes) + [None]
|
988 |
+
min_cost = float("inf")
|
989 |
+
node_pair: List[Node] = []
|
990 |
+
for n1 in p1_nodes:
|
991 |
+
# Ignore the node if it is not a op node
|
992 |
+
if n1 is not None and n1.op in {"placeholder", "get_attr"}:
|
993 |
+
continue
|
994 |
+
# Try swapping node in p0 with n1 in p1
|
995 |
+
cost = try_swap_nodes(
|
996 |
+
node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
|
997 |
+
)
|
998 |
+
if cost < min_cost:
|
999 |
+
node_pair = [node, n1]
|
1000 |
+
min_cost = cost
|
1001 |
+
return cost, node_pair
|
1002 |
+
|
1003 |
+
# First use size_base_partition
|
1004 |
+
self.size_based_partition()
|
1005 |
+
partition_to_latency_mapping = get_partition_to_latency_mapping(
|
1006 |
+
self.partitions, node_to_latency_mapping
|
1007 |
+
)
|
1008 |
+
# Calculate the cost of the partitions
|
1009 |
+
cost = get_latency_of_partitioned_graph(
|
1010 |
+
self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
|
1011 |
+
)
|
1012 |
+
# Keep tracking the node pair that shows the better cost
|
1013 |
+
node_pair: List[Node] = []
|
1014 |
+
# Keep tracking the partition pair of node pair
|
1015 |
+
partition_pair: List[Partition] = []
|
1016 |
+
# Collect all the op nodes from the graph
|
1017 |
+
op_nodes = []
|
1018 |
+
for n in self.graph_module.graph.nodes:
|
1019 |
+
if n.op not in {"placeholder", "get_attr", "output"}:
|
1020 |
+
op_nodes.append(n)
|
1021 |
+
for node in op_nodes:
|
1022 |
+
# Find which partition the current node belongs
|
1023 |
+
p0_index = self.node_to_partition[node]
|
1024 |
+
p0 = self.partitions[p0_index]
|
1025 |
+
# Go through all the other partitions to swap
|
1026 |
+
# with other nodes from those partitions
|
1027 |
+
for p1_index, _ in enumerate(self.partitions):
|
1028 |
+
if p0_index != p1_index:
|
1029 |
+
p1 = self.partitions[p1_index]
|
1030 |
+
new_cost, new_node_pair = swap_node_to_partition(
|
1031 |
+
node,
|
1032 |
+
p0,
|
1033 |
+
p1,
|
1034 |
+
node_to_latency_mapping,
|
1035 |
+
transfer_rate_bytes_per_sec,
|
1036 |
+
)
|
1037 |
+
# Update the cost
|
1038 |
+
# Track the swapped node pair and their partitions
|
1039 |
+
if new_cost < cost:
|
1040 |
+
cost = new_cost
|
1041 |
+
node_pair = new_node_pair
|
1042 |
+
partition_pair = [p0, p1]
|
1043 |
+
# Do the swapping after trying all the nodes from a partition
|
1044 |
+
if len(node_pair) != 0:
|
1045 |
+
swap_nodes(
|
1046 |
+
node_pair[0], node_pair[1], partition_pair[0], partition_pair[1]
|
1047 |
+
)
|
1048 |
+
reorganize_partitions(self.partitions)
|
1049 |
+
get_device_to_partitions_mapping(self.partitions, self.devices)
|
1050 |
+
reorganize_partitions(self.partitions)
|
1051 |
+
# Mapping the device to the partition
|
1052 |
+
get_device_to_partitions_mapping(self.partitions, self.devices)
|
1053 |
+
return
|
1054 |
+
|
1055 |
+
def aot_based_partition(
|
1056 |
+
self, node_to_partition_mapping, partition_to_logical_device_mapping
|
1057 |
+
):
|
1058 |
+
"""This function helps to rebuild the partitions given the nodes and its
|
1059 |
+
corresponding partition id
|
1060 |
+
"""
|
1061 |
+
partition_id_to_partition_mapping: Dict[int, Partition] = {}
|
1062 |
+
self.node_to_partition = node_to_partition_mapping
|
1063 |
+
for node in self.node_to_partition:
|
1064 |
+
partition_id = self.node_to_partition[node]
|
1065 |
+
# If the requested partition has not been created, create the partition
|
1066 |
+
if partition_id not in partition_id_to_partition_mapping:
|
1067 |
+
partition = Partition(partition_id)
|
1068 |
+
self.partitions.append(partition)
|
1069 |
+
partition_id_to_partition_mapping[partition_id] = partition
|
1070 |
+
partition.logical_device_ids = partition_to_logical_device_mapping[
|
1071 |
+
partition_id
|
1072 |
+
]
|
1073 |
+
else:
|
1074 |
+
partition = partition_id_to_partition_mapping[
|
1075 |
+
self.node_to_partition[node]
|
1076 |
+
]
|
1077 |
+
# Add the current node into the partition
|
1078 |
+
partition.add_node(node)
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from typing import Callable, Dict, Optional, Set, Union
|
3 |
+
|
4 |
+
import torch.fx
|
5 |
+
from torch.fx.node import map_arg
|
6 |
+
from torch.fx.passes.split_module import split_module
|
7 |
+
|
8 |
+
|
9 |
+
__all__ = ['FoldedGraphModule', 'get_unique_attr_name_in_module', 'split_const_subgraphs']
|
10 |
+
|
11 |
+
class FoldedGraphModule(torch.fx.GraphModule):
|
12 |
+
"""
|
13 |
+
FoldedGraphModule is a GraphModule which also contains another
|
14 |
+
`const_subgraph_module` representing a subgraph which has all const attr
|
15 |
+
inputs and which can be run once before running the main standard
|
16 |
+
`graph`. The `const_output_names` are the ordered list names of attrs which
|
17 |
+
represent what each respective output from the const_subgraph should be set
|
18 |
+
on which attrs.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
root: torch.nn.Module,
|
24 |
+
graph: torch.fx.Graph,
|
25 |
+
const_subgraph: Optional[torch.fx.Graph] = None,
|
26 |
+
fx_const_folded_attrs_name: Optional[str] = None,
|
27 |
+
device_for_folded_attrs: str = "cuda",
|
28 |
+
):
|
29 |
+
super().__init__(root, graph)
|
30 |
+
self.const_subgraph_module = (
|
31 |
+
None
|
32 |
+
if const_subgraph is None
|
33 |
+
else torch.fx.GraphModule(root, const_subgraph)
|
34 |
+
)
|
35 |
+
self.has_folding_been_run = False
|
36 |
+
self.fx_const_folded_attrs_name = fx_const_folded_attrs_name
|
37 |
+
self.device_for_folded_attrs = device_for_folded_attrs
|
38 |
+
|
39 |
+
def __call__(self, *args, **kwargs):
|
40 |
+
if not self.has_folding_been_run:
|
41 |
+
self.run_folding()
|
42 |
+
return super().__call__(*args)
|
43 |
+
|
44 |
+
def run_folding(self):
|
45 |
+
# If there's no const subgraph module or attr output names to use, return
|
46 |
+
# early as there is no const folding to perform.
|
47 |
+
if (
|
48 |
+
self.const_subgraph_module is None
|
49 |
+
or self.fx_const_folded_attrs_name is None
|
50 |
+
):
|
51 |
+
return
|
52 |
+
|
53 |
+
assert not self.has_folding_been_run
|
54 |
+
self.has_folding_been_run = True
|
55 |
+
|
56 |
+
# Actually run const folding subgraph. Note that single attr const fold
|
57 |
+
# subgraphs output a single Tensor while multiple outputs are returned as
|
58 |
+
# Tuple[Tensor,].
|
59 |
+
folded_attrs = self.const_subgraph_module()
|
60 |
+
|
61 |
+
def _create_param(i):
|
62 |
+
return torch.nn.Parameter(
|
63 |
+
i
|
64 |
+
if not isinstance(i, int)
|
65 |
+
else torch.Tensor([i]).to(device=self.device_for_folded_attrs),
|
66 |
+
requires_grad=i.requires_grad if isinstance(i, torch.Tensor) else False,
|
67 |
+
)
|
68 |
+
|
69 |
+
params = (
|
70 |
+
torch.nn.ParameterList([_create_param(i) for i in folded_attrs])
|
71 |
+
if isinstance(folded_attrs, tuple)
|
72 |
+
else _create_param(folded_attrs)
|
73 |
+
)
|
74 |
+
setattr(self, self.fx_const_folded_attrs_name, params)
|
75 |
+
|
76 |
+
|
77 |
+
def _inline_module(gm: torch.fx.GraphModule, inline_mod_name: str):
|
78 |
+
"""
|
79 |
+
Given `gm` and some graph module which is called with target name `inline_mod_name`,
|
80 |
+
this helper will inline all of the nodes from that called graph module into `gm`.
|
81 |
+
"""
|
82 |
+
# Fetch the inner graph module that we want to inline inside `gm`.
|
83 |
+
inline_mod = dict(gm.named_modules())[inline_mod_name]
|
84 |
+
assert isinstance(inline_mod, torch.fx.GraphModule)
|
85 |
+
call_mod_node_to_replace = None
|
86 |
+
for node in gm.graph.nodes:
|
87 |
+
if node.op == "call_module" and node.target == inline_mod_name:
|
88 |
+
call_mod_node_to_replace = node
|
89 |
+
break
|
90 |
+
assert call_mod_node_to_replace is not None
|
91 |
+
|
92 |
+
# Now actually do the swap. Note that we have to keep track of new nodes that are
|
93 |
+
# copied into `gm` -- we do this via replacement_mapping.
|
94 |
+
call_mod_args = call_mod_node_to_replace.args
|
95 |
+
replacement_mapping: Dict[torch.fx.Node, torch.fx.Node] = {}
|
96 |
+
ph_count = 0
|
97 |
+
|
98 |
+
def replacement_fn(node):
|
99 |
+
new_node = replacement_mapping[node]
|
100 |
+
new_node.meta = node.meta.copy()
|
101 |
+
return new_node
|
102 |
+
|
103 |
+
for inline_node in inline_mod.graph.nodes:
|
104 |
+
if inline_node.op == "placeholder":
|
105 |
+
replacement_mapping[inline_node] = call_mod_args[ph_count]
|
106 |
+
ph_count += 1
|
107 |
+
continue
|
108 |
+
|
109 |
+
if inline_node.op == "output":
|
110 |
+
outputs = inline_node.args[0]
|
111 |
+
output_replacements = map_arg(outputs, replacement_fn)
|
112 |
+
call_mod_node_to_replace.replace_all_uses_with(output_replacements)
|
113 |
+
continue
|
114 |
+
|
115 |
+
with gm.graph.inserting_before(call_mod_node_to_replace):
|
116 |
+
new_node = gm.graph.node_copy(inline_node, replacement_fn)
|
117 |
+
replacement_mapping[inline_node] = new_node
|
118 |
+
|
119 |
+
gm.graph.eliminate_dead_code()
|
120 |
+
|
121 |
+
|
122 |
+
def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str:
|
123 |
+
"""
|
124 |
+
Make sure the name is unique (in a module) and can represents an attr.
|
125 |
+
"""
|
126 |
+
# Delete all characters that are illegal in a Python identifier.
|
127 |
+
name = re.sub("[^0-9a-zA-Z_]+", "_", name)
|
128 |
+
if name[0].isdigit():
|
129 |
+
name = f"_{name}"
|
130 |
+
# Now make sure it is in fact unique to the module by incrementing suffix value.
|
131 |
+
while hasattr(mod_traced, name):
|
132 |
+
match = re.match(r"(.*)_(\d+)$", name)
|
133 |
+
if match is None:
|
134 |
+
name = name + "_1"
|
135 |
+
else:
|
136 |
+
base, num = match.group(1, 2)
|
137 |
+
name = f"{base}_{int(num) + 1}"
|
138 |
+
|
139 |
+
return name
|
140 |
+
|
141 |
+
|
142 |
+
def split_const_subgraphs(
|
143 |
+
module: Union[torch.nn.Module, torch.fx.GraphModule],
|
144 |
+
skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
|
145 |
+
device_for_folded_attrs: str = "cpu",
|
146 |
+
) -> FoldedGraphModule:
|
147 |
+
"""
|
148 |
+
Looks through `module` for any nodes that have all constant attribute inputs
|
149 |
+
and separates them out into their own constant subgraph, and returns a
|
150 |
+
FoldedGraphModule which runs that constant subgraph on the first run to set
|
151 |
+
attributes on the module prior to running the non-constant portion of the
|
152 |
+
graph.
|
153 |
+
"""
|
154 |
+
if not isinstance(module, torch.fx.GraphModule):
|
155 |
+
mod_traced = torch.fx.symbolic_trace(module)
|
156 |
+
else:
|
157 |
+
mod_traced = module
|
158 |
+
|
159 |
+
# Build up a list of const_nodes, defined as nodes that are themselves
|
160 |
+
# get_attrs, or have all get_attr or other constant node inputs.
|
161 |
+
const_nodes: Set[torch.fx.Node] = set()
|
162 |
+
found_const_folding = False
|
163 |
+
for node in mod_traced.graph.nodes:
|
164 |
+
# Skip over placeholders/outputs because they can't be const folded and
|
165 |
+
# we don't want to add tags to them.
|
166 |
+
if node.op in {"placeholder", "output"}:
|
167 |
+
continue
|
168 |
+
|
169 |
+
# If the node itself is constant, or all of its inputs are constant,
|
170 |
+
# then tag it as constant.
|
171 |
+
if node.op != "get_attr" and not set(node.all_input_nodes).issubset(
|
172 |
+
const_nodes
|
173 |
+
):
|
174 |
+
continue
|
175 |
+
|
176 |
+
# If provided skip folding function says to skip, then skip.
|
177 |
+
if skip_folding_node_fn and skip_folding_node_fn(node):
|
178 |
+
continue
|
179 |
+
|
180 |
+
# Skip folding side-effectful functions
|
181 |
+
if node.is_impure():
|
182 |
+
continue
|
183 |
+
|
184 |
+
# Must be a constant foldable node at this point.
|
185 |
+
const_nodes.add(node)
|
186 |
+
if node.op != "get_attr":
|
187 |
+
found_const_folding = True
|
188 |
+
|
189 |
+
# If we did not find any const folding then return early without a const fold subgraph.
|
190 |
+
if not found_const_folding:
|
191 |
+
return FoldedGraphModule(mod_traced, mod_traced.graph)
|
192 |
+
|
193 |
+
# Partition the module into two: submod_0 for constant folding subgraph, and
|
194 |
+
# submod_1 for the rest.
|
195 |
+
def mod_partition(node: torch.fx.Node):
|
196 |
+
return 0 if node in const_nodes else 1
|
197 |
+
|
198 |
+
split = split_module(mod_traced, module, mod_partition)
|
199 |
+
|
200 |
+
const_gm, non_const_gm = split.submod_0, split.submod_1
|
201 |
+
const_mod_name, non_const_mod_name = "submod_0", "submod_1"
|
202 |
+
|
203 |
+
# The module that a call_module node refers to gets copied to submodules during split.
|
204 |
+
# The path to the module also gets inlined, i.e. mod.a.b -> mod_a_b. Here we need to
|
205 |
+
# attach inlined modules to `split` as it's the owning module now.
|
206 |
+
for node in non_const_gm.graph.nodes:
|
207 |
+
if node.op == "call_module":
|
208 |
+
setattr(split, node.target, getattr(non_const_gm, node.target))
|
209 |
+
for node in const_gm.graph.nodes:
|
210 |
+
if node.op == "call_module":
|
211 |
+
setattr(split, node.target, getattr(const_gm, node.target))
|
212 |
+
|
213 |
+
# split_module currently does not use get_attrs for attrs. Instead it passes
|
214 |
+
# them in as args from the parent module, which used get_attrs. Here we set
|
215 |
+
# them as get_attrs inside const_gm, allowing for running folding without
|
216 |
+
# somehow a priori knowing the attrs that should be passed as args. We can
|
217 |
+
# unconditionally do this for all placeholders because we know all
|
218 |
+
# placeholders to const_gm must be constants accessible via get_attr.
|
219 |
+
call_const_gm_args = None
|
220 |
+
for node in split.graph.nodes:
|
221 |
+
if node.op == "call_module":
|
222 |
+
if node.target == const_mod_name:
|
223 |
+
call_const_gm_args = node.args
|
224 |
+
break
|
225 |
+
assert call_const_gm_args is not None
|
226 |
+
|
227 |
+
# Here we do the actual replacement of placeholders to get_attrs. Note that here we
|
228 |
+
# set the const_gm.graph into a new root_const_gm with split as the root module,
|
229 |
+
# because we are fetching attributes directly from the root module, instead of
|
230 |
+
# fetching them from const_gm. Example: The const_gm must have some format like:
|
231 |
+
# graph():
|
232 |
+
# %inp : [num_users=1] = placeholder[target=const_inp]
|
233 |
+
# %add : [num_users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {})
|
234 |
+
# return add
|
235 |
+
# We replace that with the following, which does not have any placeholders:
|
236 |
+
# graph():
|
237 |
+
# %inp_1 : [num_users=1] = get_attr[target=const_inp]
|
238 |
+
# %add : [num_users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {})
|
239 |
+
# return add
|
240 |
+
root_const_gm = torch.fx.GraphModule(split, const_gm.graph)
|
241 |
+
for node in root_const_gm.graph.nodes:
|
242 |
+
if node.op == "output":
|
243 |
+
multiple_outputs = isinstance(node.args[0], tuple)
|
244 |
+
continue
|
245 |
+
if node.op != "placeholder":
|
246 |
+
continue
|
247 |
+
in_node = next(n for n in call_const_gm_args if n.name == node.target)
|
248 |
+
assert in_node.op == "get_attr"
|
249 |
+
with root_const_gm.graph.inserting_before(node):
|
250 |
+
new_node = root_const_gm.graph.get_attr(in_node.target)
|
251 |
+
new_node.meta = node.meta.copy()
|
252 |
+
node.replace_all_uses_with(new_node)
|
253 |
+
root_const_gm.graph.erase_node(node)
|
254 |
+
assert "multiple_outputs" in locals()
|
255 |
+
|
256 |
+
# Now find the call to const_gm inside split, and replace it with a getattr to the
|
257 |
+
# folded tensor(s) that result from constant folding. Note that we don't need to
|
258 |
+
# worry about whether this is one or more tensors because the original graph
|
259 |
+
# correctly uses getitem to extract individual tensors if there are multiple folded.
|
260 |
+
fx_const_folded_attrs_name = get_unique_attr_name_in_module(
|
261 |
+
split, "_FX_CONST_FOLDED_ATTRS"
|
262 |
+
)
|
263 |
+
setattr(
|
264 |
+
split,
|
265 |
+
fx_const_folded_attrs_name,
|
266 |
+
torch.nn.ParameterList() if multiple_outputs else torch.nn.Parameter(),
|
267 |
+
)
|
268 |
+
for node in split.graph.nodes:
|
269 |
+
if node.op == "call_module" and node.target == const_mod_name:
|
270 |
+
with node.graph.inserting_before(node):
|
271 |
+
folded_attrs = node.graph.get_attr(fx_const_folded_attrs_name)
|
272 |
+
folded_attrs.meta = node.meta.copy()
|
273 |
+
node.replace_all_uses_with(folded_attrs)
|
274 |
+
break
|
275 |
+
|
276 |
+
split.graph.eliminate_dead_code()
|
277 |
+
|
278 |
+
# Finally, inline the non-constant submod into the split submod. This is so that the
|
279 |
+
# original caller who may have passed in a graph module will get back out a graph
|
280 |
+
# module whose graph is traced to the same granularity.
|
281 |
+
_inline_module(split, non_const_mod_name)
|
282 |
+
|
283 |
+
return FoldedGraphModule(
|
284 |
+
split,
|
285 |
+
split.graph,
|
286 |
+
root_const_gm.graph,
|
287 |
+
fx_const_folded_attrs_name,
|
288 |
+
device_for_folded_attrs,
|
289 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/debug.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.fx as fx
|
2 |
+
|
3 |
+
def set_trace(gm: fx.GraphModule) -> fx.GraphModule:
|
4 |
+
"""
|
5 |
+
Sets a breakpoint in `gm`'s generated python code. It drops into pdb when
|
6 |
+
`gm` gets run.
|
7 |
+
|
8 |
+
Args:
|
9 |
+
gm: graph module to insert breakpoint. It is then recompiled for it to
|
10 |
+
take effect.
|
11 |
+
|
12 |
+
Returns:
|
13 |
+
the `gm` with breakpoint inserted.
|
14 |
+
"""
|
15 |
+
def insert_pdb(body):
|
16 |
+
return ["import pdb; pdb.set_trace()\n", *body]
|
17 |
+
|
18 |
+
with gm.graph.on_generate_code(
|
19 |
+
make_transformer=lambda cur_transform: (
|
20 |
+
# new code transformer to register
|
21 |
+
lambda body: (
|
22 |
+
insert_pdb(
|
23 |
+
cur_transform(body) if cur_transform
|
24 |
+
else body
|
25 |
+
)
|
26 |
+
)
|
27 |
+
)
|
28 |
+
):
|
29 |
+
gm.recompile()
|
30 |
+
|
31 |
+
return gm
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py
ADDED
@@ -0,0 +1,914 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import reduce
|
2 |
+
import torch
|
3 |
+
import operator
|
4 |
+
from torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise
|
5 |
+
from typing import Callable, Dict
|
6 |
+
from torch.fx.node import Target, Node
|
7 |
+
from torch.nn.modules.batchnorm import BatchNorm2d
|
8 |
+
from torch.nn.modules.conv import Conv2d
|
9 |
+
from torch.fx.experimental.refinement_types import Equality
|
10 |
+
import itertools
|
11 |
+
|
12 |
+
from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
|
13 |
+
|
14 |
+
import sympy
|
15 |
+
|
16 |
+
_INFERENCE_RULES: Dict[Target, Callable] = {}
|
17 |
+
_REFINEMENT_RULES: Dict[Target, Callable] = {}
|
18 |
+
_RULES: Dict[Target, Callable] = {}
|
19 |
+
|
20 |
+
|
21 |
+
def expand_to_tensor_dim(t, n):
|
22 |
+
"""
|
23 |
+
Expand a type to the desired tensor dimension if possible
|
24 |
+
Raise an error otherwise.
|
25 |
+
- t is the given type
|
26 |
+
- n is a number of dimensions to expand to
|
27 |
+
"""
|
28 |
+
if t == Dyn:
|
29 |
+
dims = [Dyn] * n
|
30 |
+
return TensorType(tuple(dims))
|
31 |
+
elif isinstance(t, TensorType):
|
32 |
+
if len(t.__args__) != n:
|
33 |
+
raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')
|
34 |
+
return t
|
35 |
+
else:
|
36 |
+
raise TypeError(f'Cannot match the type {t}')
|
37 |
+
|
38 |
+
|
39 |
+
def broadcast_types(t1, t2):
|
40 |
+
"""
|
41 |
+
Applies broadcasting to both given types such that they
|
42 |
+
become consistent with eachother and returns two new
|
43 |
+
resulting types
|
44 |
+
"""
|
45 |
+
|
46 |
+
# if either type is Dyn, do nothing since the types are already consistent
|
47 |
+
if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
|
48 |
+
return t1, t2
|
49 |
+
|
50 |
+
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
|
51 |
+
s1 = len(t1.__args__)
|
52 |
+
s2 = len(t2.__args__)
|
53 |
+
|
54 |
+
new_t1 = list(t1.__args__)
|
55 |
+
new_t2 = list(t2.__args__)
|
56 |
+
|
57 |
+
# We make the types the same length which is the first requirement
|
58 |
+
# for consistency
|
59 |
+
if s1 > s2:
|
60 |
+
for i in range(s1 - s2):
|
61 |
+
new_t2.insert(0, 1)
|
62 |
+
|
63 |
+
elif s2 > s1:
|
64 |
+
for i in range(s2 - s1):
|
65 |
+
new_t1.insert(0, 1)
|
66 |
+
|
67 |
+
# we replace occurrences of "1" with each tensor with
|
68 |
+
# the corresponding type from the other tensor
|
69 |
+
for i, (x, y) in enumerate(zip(new_t1, new_t2)):
|
70 |
+
if x == 1:
|
71 |
+
new_t1[i] = y
|
72 |
+
elif y == 1:
|
73 |
+
new_t2[i] = x
|
74 |
+
|
75 |
+
# at this point our tensors should be consistent
|
76 |
+
# and we can apply the element-wise operation and find the right dimension
|
77 |
+
# for the output of the operation
|
78 |
+
(t1, t2) = TensorType(tuple(new_t1)), TensorType(tuple(new_t2))
|
79 |
+
return (t1, t2)
|
80 |
+
else:
|
81 |
+
raise TypeError(f'Cannot broadcast types {t1} and {t2}')
|
82 |
+
|
83 |
+
def register_inference_rule(call_target):
|
84 |
+
def register(fn):
|
85 |
+
if call_target in _INFERENCE_RULES:
|
86 |
+
raise RuntimeError(f'Inference rule already registered for {call_target}!')
|
87 |
+
_INFERENCE_RULES[call_target] = fn
|
88 |
+
return fn
|
89 |
+
return register
|
90 |
+
|
91 |
+
def register_refinement_rule(call_target):
|
92 |
+
def register(fn):
|
93 |
+
if call_target in _REFINEMENT_RULES:
|
94 |
+
raise RuntimeError(f'Refinement rule already registered for {call_target}!')
|
95 |
+
_REFINEMENT_RULES[call_target] = fn
|
96 |
+
return fn
|
97 |
+
return register
|
98 |
+
|
99 |
+
def register_algebraic_expressions_inference_rule(call_target):
|
100 |
+
def register(fn):
|
101 |
+
if call_target in _RULES:
|
102 |
+
raise RuntimeError(f'Rule already registered for {call_target}!')
|
103 |
+
_RULES[call_target] = fn
|
104 |
+
return fn
|
105 |
+
return register
|
106 |
+
|
107 |
+
@register_inference_rule(torch.add)
|
108 |
+
@register_inference_rule(operator.add)
|
109 |
+
def add_inference_rule(n: Node):
|
110 |
+
"""
|
111 |
+
Apply the addition inference rule. This includes:
|
112 |
+
- scalar addition
|
113 |
+
- broadcasting semantics
|
114 |
+
|
115 |
+
Note that we always return the least precise type between
|
116 |
+
the operands (after applying broadcasting) to be the final type of the operation
|
117 |
+
|
118 |
+
Note that we do not modify the operand types themselves after applying broadcasting
|
119 |
+
to them. We only use them to calculate the final type
|
120 |
+
"""
|
121 |
+
assert isinstance(n.args[0], Node)
|
122 |
+
assert isinstance(n.args[1], Node)
|
123 |
+
t1 = n.args[0].type
|
124 |
+
t2 = n.args[1].type
|
125 |
+
|
126 |
+
# handle scalar addition
|
127 |
+
if t1 == int and isinstance(t2, TensorType):
|
128 |
+
n.type = t2
|
129 |
+
return n.type
|
130 |
+
|
131 |
+
# handle scalar addition
|
132 |
+
elif t2 == int and isinstance(t1, TensorType):
|
133 |
+
n.type = t1
|
134 |
+
return n.type
|
135 |
+
|
136 |
+
# we bring the new types to the point where
|
137 |
+
# we can check for consistency
|
138 |
+
# any inconsistency would not have been caused
|
139 |
+
# by broadcasting at this point
|
140 |
+
(new_t1, new_t2) = broadcast_types(t1, t2)
|
141 |
+
|
142 |
+
if new_t1 != t1 or new_t2 != t2:
|
143 |
+
n.meta['broadcast'] = True
|
144 |
+
n.meta[str(n.args[0])] = new_t1
|
145 |
+
n.meta[str(n.args[1])] = new_t2
|
146 |
+
|
147 |
+
else:
|
148 |
+
n.meta['broadcast'] = False
|
149 |
+
|
150 |
+
new_t1 = t1 if not n.meta['broadcast'] else new_t1
|
151 |
+
new_t2 = t2 if not n.meta['broadcast'] else new_t2
|
152 |
+
|
153 |
+
# we check for consistency between the new types
|
154 |
+
if is_consistent(new_t1, new_t2):
|
155 |
+
# we return the less precise type because
|
156 |
+
# broadcasting may have happened
|
157 |
+
# for operands with shape [1,2,Dyn] and [1,2,1]
|
158 |
+
# we have to assign the node [1,2,Dyn]
|
159 |
+
if is_more_precise(new_t1, new_t2):
|
160 |
+
n.type = new_t2
|
161 |
+
else:
|
162 |
+
n.type = new_t1
|
163 |
+
return n.type
|
164 |
+
else:
|
165 |
+
raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.'
|
166 |
+
f' Types should match ')
|
167 |
+
|
168 |
+
@register_inference_rule(getattr)
|
169 |
+
def get_attr_inference_rule(n: Node, traced):
|
170 |
+
"""
|
171 |
+
The current getattr rule only handles the shape attribute
|
172 |
+
Can be extended to other attributes
|
173 |
+
The most representitive type we have is "Dyn" but the system
|
174 |
+
can be extended with more types, such as a type to represent shapes
|
175 |
+
"""
|
176 |
+
attr_node = n.args[0]
|
177 |
+
attr_name = n.args[1]
|
178 |
+
|
179 |
+
if attr_name == "shape":
|
180 |
+
n.type = Dyn
|
181 |
+
else:
|
182 |
+
raise TypeError("Not yet implemented")
|
183 |
+
|
184 |
+
# TODO. We leave it like this till we add a type to represent tensor sizes
|
185 |
+
return n.type
|
186 |
+
|
187 |
+
@register_inference_rule(torch.transpose)
|
188 |
+
def transpose_inference_rule(n: Node):
|
189 |
+
"""
|
190 |
+
We check that dimensions for the transpose operations
|
191 |
+
are within range of the tensor type of the node
|
192 |
+
"""
|
193 |
+
if n.target == torch.transpose:
|
194 |
+
assert isinstance(n.args[0], Node)
|
195 |
+
t = n.args[0].type
|
196 |
+
|
197 |
+
assert isinstance(n.args[1], int)
|
198 |
+
assert isinstance(n.args[2], int)
|
199 |
+
dim1, dim2 = n.args[1], n.args[2]
|
200 |
+
|
201 |
+
if t == Dyn:
|
202 |
+
n.type = Dyn
|
203 |
+
return n.type
|
204 |
+
|
205 |
+
elif isinstance(t, TensorType):
|
206 |
+
if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):
|
207 |
+
new_type = list(t.__args__)
|
208 |
+
new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1]
|
209 |
+
final = TensorType(new_type)
|
210 |
+
n.type = get_greatest_upper_bound(n.type, final)
|
211 |
+
return n.type
|
212 |
+
else:
|
213 |
+
raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
|
214 |
+
else:
|
215 |
+
raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
|
216 |
+
|
217 |
+
|
218 |
+
@register_inference_rule(torch.reshape)
|
219 |
+
def reshape_inference_rule(n: Node):
|
220 |
+
"""
|
221 |
+
Without dynamism, the rule checks that the
|
222 |
+
product of the elements of the argument tensor
|
223 |
+
type is equal to the product of the elements
|
224 |
+
of the required shape. We gradualize this rule
|
225 |
+
by adding a case to handle fully dynamic input
|
226 |
+
as well as input where some of the tensor dimensions
|
227 |
+
are unknown. In this case we check for divisibility
|
228 |
+
"""
|
229 |
+
assert isinstance(n.args[0], Node)
|
230 |
+
t1 = n.args[0].type
|
231 |
+
|
232 |
+
assert isinstance(n.args[1], list)
|
233 |
+
t2 = n.args[1]
|
234 |
+
t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])
|
235 |
+
|
236 |
+
# if we do not know the original tensor dimension,
|
237 |
+
# we return the required dimension
|
238 |
+
if t1 == Dyn:
|
239 |
+
n.type = t2_type
|
240 |
+
return t2_type
|
241 |
+
|
242 |
+
# if any of the dimensions are unknown,
|
243 |
+
# we check for divisibility
|
244 |
+
elif isinstance(t1, TensorType):
|
245 |
+
assert isinstance(t1, TensorType)
|
246 |
+
a = [e if e != Dyn else 1 for e in t1.__args__]
|
247 |
+
p1 = reduce(lambda x, y: x * y, a)
|
248 |
+
p2 = reduce(lambda x, y: x * y, t2)
|
249 |
+
if p1 % p2 == 0 or p2 % p1 == 0:
|
250 |
+
n.type = t2_type
|
251 |
+
return t2_type
|
252 |
+
else:
|
253 |
+
raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
|
254 |
+
else:
|
255 |
+
raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
|
256 |
+
|
257 |
+
@register_inference_rule(BatchNorm2d)
|
258 |
+
def bn2d_inference_rule(n: Node, module_instance):
|
259 |
+
"""
|
260 |
+
Given a BatchNorm2D instance and a node check the following conditions:
|
261 |
+
- the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4)
|
262 |
+
- the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
|
263 |
+
- t is consistent with t'
|
264 |
+
- x_2 is consistent with the module's num_features
|
265 |
+
- x_2' is consistent with the module's num_features
|
266 |
+
output type: the more precise type of t and t'
|
267 |
+
"""
|
268 |
+
assert isinstance(n.args[0], Node)
|
269 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
|
270 |
+
arg_type = n.args[0].type
|
271 |
+
n.type = expand_to_tensor_dim(n.type, 4)
|
272 |
+
|
273 |
+
# we check the conditions on the incoming argument
|
274 |
+
# and any existing annotation
|
275 |
+
# we also check for consistency between both annotations
|
276 |
+
if is_consistent(arg_type.__args__[1], module_instance.num_features) and \
|
277 |
+
is_consistent(n.type.__args__[1], module_instance.num_features) and \
|
278 |
+
is_consistent(arg_type, n.type):
|
279 |
+
|
280 |
+
# we choose the more precise type
|
281 |
+
# to be the node type
|
282 |
+
# so if an incoming argument has more type information
|
283 |
+
# we set this node's type to be the argument type
|
284 |
+
n.type = get_greatest_upper_bound(arg_type, n.type)
|
285 |
+
return n.type
|
286 |
+
else:
|
287 |
+
raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')
|
288 |
+
|
289 |
+
|
290 |
+
def calculate_out_dimension(d_in, module_instance, index):
|
291 |
+
"""
|
292 |
+
For calculating h_in and w_out according to the conv2D documentation
|
293 |
+
"""
|
294 |
+
padding = (module_instance.padding, module_instance.padding) \
|
295 |
+
if isinstance(module_instance.padding, int) else module_instance.padding
|
296 |
+
kernel_size = (module_instance.kernel_size, module_instance.kernel_size) \
|
297 |
+
if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size
|
298 |
+
stride = (module_instance.stride, module_instance.stride) \
|
299 |
+
if isinstance(module_instance.stride, int) else module_instance.stride
|
300 |
+
dilation = (module_instance.dilation, module_instance.dilation) \
|
301 |
+
if isinstance(module_instance.dilation, int) else module_instance.dilation
|
302 |
+
|
303 |
+
DIMENSION_TYPES = (int, sympy.Symbol)
|
304 |
+
|
305 |
+
if d_in == Dyn:
|
306 |
+
return Dyn
|
307 |
+
|
308 |
+
elif isinstance(d_in, DIMENSION_TYPES):
|
309 |
+
n = d_in + 2 * padding[index] - \
|
310 |
+
dilation[index] * \
|
311 |
+
(kernel_size[index] - 1) - 1
|
312 |
+
|
313 |
+
return (n // stride[0]) + 1
|
314 |
+
|
315 |
+
else:
|
316 |
+
raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}')
|
317 |
+
|
318 |
+
|
319 |
+
def get_greatest_upper_bound(type1, type2):
|
320 |
+
"""
|
321 |
+
Get the most precise type that's consistent with the given types
|
322 |
+
"""
|
323 |
+
if type1 == Dyn:
|
324 |
+
return type2
|
325 |
+
elif type2 == Dyn:
|
326 |
+
return type1
|
327 |
+
elif isinstance(type1, TensorType) and isinstance(type2, TensorType):
|
328 |
+
if not is_consistent(type1, type2):
|
329 |
+
raise TypeError(f'Inconsistent types {type1}, {type2}')
|
330 |
+
gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)]
|
331 |
+
return TensorType(tuple(gub))
|
332 |
+
|
333 |
+
|
334 |
+
@register_inference_rule(Conv2d)
|
335 |
+
def conv2d_inference_rule(n: Node, module_instance):
|
336 |
+
"""
|
337 |
+
Given a Conv2D instance and a node check the following conditions:
|
338 |
+
- the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W)
|
339 |
+
- the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
|
340 |
+
- x_2 is consistent with the module's in_channels
|
341 |
+
- let o = (x_1, out_channels, H_out, W_out)
|
342 |
+
then the output is the greatest upper bound of o and the existing node type t'.
|
343 |
+
"""
|
344 |
+
assert isinstance(n.args[0], Node)
|
345 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
|
346 |
+
arg_type = n.args[0].type
|
347 |
+
curr_node_type = expand_to_tensor_dim(n.type, 4)
|
348 |
+
|
349 |
+
if is_consistent(arg_type.__args__[1], module_instance.in_channels):
|
350 |
+
w_in = arg_type.__args__[3]
|
351 |
+
h_in = arg_type.__args__[2]
|
352 |
+
h_out = calculate_out_dimension(h_in, module_instance, 0)
|
353 |
+
w_out = calculate_out_dimension(w_in, module_instance, 1)
|
354 |
+
new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))
|
355 |
+
gub = get_greatest_upper_bound(new_type, curr_node_type)
|
356 |
+
n.type = gub
|
357 |
+
return n.type
|
358 |
+
else:
|
359 |
+
raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}')
|
360 |
+
|
361 |
+
|
362 |
+
@register_inference_rule(torch.nn.ReLU)
|
363 |
+
def relu_inference_rule(n: Node, module_instance):
|
364 |
+
"""
|
365 |
+
Input and output shapes should be equal.
|
366 |
+
"""
|
367 |
+
assert isinstance(n.args[0], Node)
|
368 |
+
|
369 |
+
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
|
370 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
|
371 |
+
|
372 |
+
if isinstance(n.args[0].type, TensorType):
|
373 |
+
n.type = get_greatest_upper_bound(n.args[0].type, n.type)
|
374 |
+
return n.type
|
375 |
+
|
376 |
+
|
377 |
+
def maxpool2d_check(typ, module_instance):
|
378 |
+
"""
|
379 |
+
Applies the maxpool2d shape information to the input
|
380 |
+
this affects the last two dimensions
|
381 |
+
"""
|
382 |
+
new_type_list = list(typ.__args__)
|
383 |
+
if len(new_type_list) == 4 or len(new_type_list) == 3:
|
384 |
+
w_in = new_type_list[-1]
|
385 |
+
h_in = new_type_list[-2]
|
386 |
+
|
387 |
+
h_out = calculate_out_dimension(h_in, module_instance, 0)
|
388 |
+
w_out = calculate_out_dimension(w_in, module_instance, 1)
|
389 |
+
|
390 |
+
new_type_list[-1] = w_out
|
391 |
+
new_type_list[-2] = h_out
|
392 |
+
return TensorType(tuple(new_type_list))
|
393 |
+
|
394 |
+
else:
|
395 |
+
raise TypeError(f'Wrong size {typ} for {module_instance}')
|
396 |
+
|
397 |
+
|
398 |
+
@register_inference_rule(torch.nn.MaxPool2d)
|
399 |
+
def maxpool2d_inference_rule(n: Node, module_instance):
|
400 |
+
"""
|
401 |
+
Given a MaxPool2D instance and a node check the following conditions:
|
402 |
+
- Input size matches size 3 or 4
|
403 |
+
- Current node type is consistent with the output type we will calculate
|
404 |
+
- Input size matches output size and the last two dimensions of the output
|
405 |
+
are w_out and h_out. The remaining dimensions are the same as the input
|
406 |
+
- Our final result is the greatest upper bound of the output we calculate
|
407 |
+
and the current node type.
|
408 |
+
"""
|
409 |
+
assert isinstance(n.args[0], Node)
|
410 |
+
|
411 |
+
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
|
412 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
|
413 |
+
if isinstance(n.args[0].type, TensorType):
|
414 |
+
output = maxpool2d_check(n.args[0].type, module_instance)
|
415 |
+
n.type = get_greatest_upper_bound(output, n.type)
|
416 |
+
return n.type
|
417 |
+
|
418 |
+
|
419 |
+
|
420 |
+
def linear_check(tensor_type, module_instance):
|
421 |
+
"""
|
422 |
+
Checks that an input tensor type satisfies the conditions for linear operation
|
423 |
+
and returns the output type based on in and out features given by module_instance
|
424 |
+
"""
|
425 |
+
if len(tensor_type.__args__) >= 2:
|
426 |
+
if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):
|
427 |
+
new_type_args = list(tensor_type.__args__)
|
428 |
+
new_type_args[-1] = module_instance.out_features
|
429 |
+
return TensorType(tuple(new_type_args))
|
430 |
+
else:
|
431 |
+
raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')
|
432 |
+
else:
|
433 |
+
raise TypeError(f'Type {tensor_type} must have rank 2 or more.')
|
434 |
+
|
435 |
+
|
436 |
+
@register_inference_rule(torch.nn.Linear)
|
437 |
+
def linear_inference_rule(n: Node, module_instance):
|
438 |
+
"""
|
439 |
+
Applies the shape information to the input then gets the greatest upper bound
|
440 |
+
of the resulting type and the existing type
|
441 |
+
"""
|
442 |
+
assert isinstance(n.args[0], Node)
|
443 |
+
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
|
444 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
|
445 |
+
if isinstance(n.args[0].type, TensorType):
|
446 |
+
output_type = linear_check(n.args[0].type, module_instance)
|
447 |
+
n.type = get_greatest_upper_bound(output_type, n.type)
|
448 |
+
return n.type
|
449 |
+
|
450 |
+
|
451 |
+
def adaptiveavgpool2d_check(tensor_type, module_instance):
|
452 |
+
output_size = module_instance.output_size
|
453 |
+
if isinstance(output_size, int):
|
454 |
+
output_size = [output_size, output_size]
|
455 |
+
elif isinstance(output_size, tuple):
|
456 |
+
output_size = list(output_size)
|
457 |
+
if output_size[0] is None:
|
458 |
+
output_size[0] = output_size[1]
|
459 |
+
if output_size[1] is None:
|
460 |
+
output_size[1] = output_size[0]
|
461 |
+
|
462 |
+
new_type_list = list(tensor_type.__args__)
|
463 |
+
|
464 |
+
if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3:
|
465 |
+
new_type_list[-1] = output_size[1]
|
466 |
+
new_type_list[-2] = output_size[0]
|
467 |
+
|
468 |
+
return TensorType(tuple(new_type_list))
|
469 |
+
|
470 |
+
else:
|
471 |
+
raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}')
|
472 |
+
|
473 |
+
@register_inference_rule(torch.nn.AdaptiveAvgPool2d)
|
474 |
+
def adaptiveavgpool2d_inference_rule(n: Node, module_instance):
|
475 |
+
"""
|
476 |
+
The input and output sizes should be the same except for the last
|
477 |
+
two dimensions taken from the input, which represent width and height
|
478 |
+
"""
|
479 |
+
assert isinstance(n.args[0], Node)
|
480 |
+
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
|
481 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
|
482 |
+
if isinstance(n.args[0].type, TensorType):
|
483 |
+
output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)
|
484 |
+
n.type = get_greatest_upper_bound(n.type, output_type)
|
485 |
+
return n.type
|
486 |
+
|
487 |
+
def flatten_check(tensor_type, start_dim, end_dim):
|
488 |
+
l = len(tensor_type.__args__)
|
489 |
+
|
490 |
+
start_dim = l if start_dim == -1 else abs(start_dim)
|
491 |
+
end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
|
492 |
+
|
493 |
+
if 0 <= start_dim <= (l - 1) and 0 <= end_dim <= l and start_dim < end_dim:
|
494 |
+
my_args = list(tensor_type.__args__)
|
495 |
+
lhs = my_args[0:start_dim]
|
496 |
+
rhs = my_args[end_dim:]
|
497 |
+
mid = my_args[start_dim:end_dim]
|
498 |
+
if Dyn in mid:
|
499 |
+
mid = [Dyn]
|
500 |
+
else:
|
501 |
+
mid = [reduce(lambda x, y: x * y, my_args[start_dim:end_dim])]
|
502 |
+
new_type_list = lhs + mid + rhs
|
503 |
+
return TensorType(tuple(new_type_list))
|
504 |
+
else:
|
505 |
+
raise TypeError(f'Incompatible dimensions {start_dim}, {end_dim - 1} in type {tensor_type}')
|
506 |
+
|
507 |
+
@register_inference_rule(torch.flatten)
|
508 |
+
def flatten_inference_rule(n: Node):
|
509 |
+
"""
|
510 |
+
Applies the flatten shape information to the input then gets the
|
511 |
+
greatest upper bound of the resulting type and the existing type
|
512 |
+
"""
|
513 |
+
assert isinstance(n.args[0], Node)
|
514 |
+
|
515 |
+
# set the default start and end dims
|
516 |
+
start_dim = 1
|
517 |
+
end_dim = -1
|
518 |
+
|
519 |
+
if len(n.args) > 1:
|
520 |
+
assert isinstance(n.args[1], int)
|
521 |
+
start_dim = n.args[1]
|
522 |
+
|
523 |
+
if len(n.args) > 2:
|
524 |
+
assert isinstance(n.args[2], int)
|
525 |
+
end_dim = n.args[2]
|
526 |
+
|
527 |
+
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
|
528 |
+
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
|
529 |
+
|
530 |
+
if isinstance(n.args[0].type, TensorType):
|
531 |
+
output_type = flatten_check(n.args[0].type, start_dim, end_dim)
|
532 |
+
n.type = get_greatest_upper_bound(output_type , n.type)
|
533 |
+
|
534 |
+
return n.type
|
535 |
+
|
536 |
+
class GraphTypeChecker:
|
537 |
+
def __init__(self, env, traced):
|
538 |
+
self.env = env
|
539 |
+
self.traced = traced
|
540 |
+
|
541 |
+
def type_check(self):
|
542 |
+
"""
|
543 |
+
A gradual type checker for graphs
|
544 |
+
Effect: every node's field type will be
|
545 |
+
populated with a type after type-checking is done
|
546 |
+
"""
|
547 |
+
graph = self.traced.graph
|
548 |
+
|
549 |
+
# type check every node with gradual type rules
|
550 |
+
# if any node does not type check return false
|
551 |
+
for n in graph.nodes:
|
552 |
+
self.type_check_node(n)
|
553 |
+
return True
|
554 |
+
|
555 |
+
def type_check_node(self, n: Node):
|
556 |
+
"""
|
557 |
+
Type check a given fx node.
|
558 |
+
Current operations:
|
559 |
+
- Reshape
|
560 |
+
- Transpose
|
561 |
+
- Add
|
562 |
+
- Relu
|
563 |
+
- conv2d
|
564 |
+
- batchnorm2d
|
565 |
+
- flatten
|
566 |
+
- maxpool2d
|
567 |
+
- adaptiveavgpool2d
|
568 |
+
- linear
|
569 |
+
"""
|
570 |
+
if n.type is None:
|
571 |
+
n.type = Dyn
|
572 |
+
|
573 |
+
if n.op == 'placeholder':
|
574 |
+
return n.type
|
575 |
+
|
576 |
+
elif n.op == 'get_attr':
|
577 |
+
t = get_parameter(self.traced, n.target) # type: ignore[arg-type]
|
578 |
+
if isinstance(t.data, torch.Tensor):
|
579 |
+
n.type = TensorType(t.data.shape)
|
580 |
+
return n.type
|
581 |
+
|
582 |
+
elif n.op == 'call_function':
|
583 |
+
if n.target == getattr:
|
584 |
+
assert getattr in _INFERENCE_RULES
|
585 |
+
return _INFERENCE_RULES[n.target](n, self.traced)
|
586 |
+
|
587 |
+
elif n.target in _INFERENCE_RULES:
|
588 |
+
return _INFERENCE_RULES[n.target](n)
|
589 |
+
else:
|
590 |
+
raise RuntimeError(f'No inference rule registered for target {n.target}!')
|
591 |
+
|
592 |
+
elif n.op == 'call_module':
|
593 |
+
module_instance = self.traced.get_submodule(n.target)
|
594 |
+
if type(module_instance) in _INFERENCE_RULES:
|
595 |
+
return _INFERENCE_RULES[type(module_instance)](n, module_instance)
|
596 |
+
else:
|
597 |
+
raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
|
598 |
+
|
599 |
+
elif n.op == 'output':
|
600 |
+
def get_node_type(a):
|
601 |
+
return a.type
|
602 |
+
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
|
603 |
+
return n.type
|
604 |
+
|
605 |
+
else:
|
606 |
+
raise NotImplementedError(f"Method {n.op} not yet implemented")
|
607 |
+
|
608 |
+
|
609 |
+
@register_refinement_rule(Conv2d)
|
610 |
+
def conv_refinement_rule(n: Node):
|
611 |
+
"""
|
612 |
+
The equality constraints are between the first dimension of
|
613 |
+
the input and output
|
614 |
+
"""
|
615 |
+
res = []
|
616 |
+
assert isinstance(n.args[0], Node)
|
617 |
+
arg_type = n.args[0].type
|
618 |
+
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
|
619 |
+
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
|
620 |
+
return res
|
621 |
+
|
622 |
+
|
623 |
+
@register_refinement_rule(torch.nn.Linear)
|
624 |
+
def linear_refinement_rule(n: Node):
|
625 |
+
"""
|
626 |
+
The equality constraints are between the first dimension of
|
627 |
+
the input and output
|
628 |
+
"""
|
629 |
+
res = []
|
630 |
+
assert isinstance(n.args[0], Node)
|
631 |
+
arg_type = n.args[0].type
|
632 |
+
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
|
633 |
+
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
|
634 |
+
return res
|
635 |
+
|
636 |
+
@register_refinement_rule(BatchNorm2d)
|
637 |
+
@register_refinement_rule(torch.nn.ReLU)
|
638 |
+
def all_eq(n: Node):
|
639 |
+
"""
|
640 |
+
For operations where the input shape is equal to the output shape
|
641 |
+
"""
|
642 |
+
res = []
|
643 |
+
assert isinstance(n.args[0], Node)
|
644 |
+
arg_type = n.args[0].type
|
645 |
+
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
|
646 |
+
args1 = arg_type.__args__
|
647 |
+
args2 = n.type.__args__
|
648 |
+
res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
|
649 |
+
return res
|
650 |
+
|
651 |
+
|
652 |
+
@register_refinement_rule(torch.nn.AdaptiveAvgPool2d)
|
653 |
+
@register_refinement_rule(torch.nn.MaxPool2d)
|
654 |
+
def first_two_eq(n: Node):
|
655 |
+
"""
|
656 |
+
For operations where the first two dimensions of the input and output shape
|
657 |
+
are equal
|
658 |
+
"""
|
659 |
+
res = []
|
660 |
+
assert isinstance(n.args[0], Node)
|
661 |
+
arg_type = n.args[0].type
|
662 |
+
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
|
663 |
+
args1 = arg_type.__args__
|
664 |
+
args2 = n.type.__args__
|
665 |
+
res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
|
666 |
+
return res
|
667 |
+
|
668 |
+
|
669 |
+
@register_refinement_rule(torch.add)
|
670 |
+
@register_refinement_rule(operator.add)
|
671 |
+
def element_wise_eq(n: Node):
|
672 |
+
"""
|
673 |
+
For element-wise operations and handles broadcasting.
|
674 |
+
Note that after applying broadcasting to the arguments
|
675 |
+
we are able to determine if certain dimensions have not been broadcast
|
676 |
+
if they are symbolicallu equal.
|
677 |
+
|
678 |
+
in this case, we can establish equality between those dimensions and the
|
679 |
+
corresponding output dimensions.
|
680 |
+
|
681 |
+
Note that it takes two iterations for this result. One iteration to establish
|
682 |
+
equality between certain dimensions of the operands (requiring the whole solver
|
683 |
+
including unification) and another iteration to establish equality between the operands
|
684 |
+
and the resulting type, requiring another round of constraint generation and unificaiton.
|
685 |
+
"""
|
686 |
+
res = []
|
687 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
688 |
+
arg_type1 = n.args[0].type
|
689 |
+
arg_type2 = n.args[1].type
|
690 |
+
if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType):
|
691 |
+
args1, args2 = broadcast_types(arg_type1, arg_type2)
|
692 |
+
# by this point, we know that args1 and args2 are the same size.
|
693 |
+
a1 = args1.__args__
|
694 |
+
a2 = args2.__args__
|
695 |
+
a3 = n.type.__args__
|
696 |
+
|
697 |
+
# we would be here in the second iteration where we establish equality
|
698 |
+
# between operand type dimensions and the resulting type dimensions
|
699 |
+
r = []
|
700 |
+
for x, y, z in zip(a1, a2, a3):
|
701 |
+
if x == y:
|
702 |
+
r.append(Equality(x, z))
|
703 |
+
res = r
|
704 |
+
return res
|
705 |
+
|
706 |
+
|
707 |
+
@register_refinement_rule(torch.flatten)
|
708 |
+
def flatten_refinement_rule(n: Node):
|
709 |
+
"""
|
710 |
+
Generates equality constraints between the dimensions of the input and output
|
711 |
+
that will not be involved in the flatten operation
|
712 |
+
"""
|
713 |
+
assert isinstance(n.args[0], Node)
|
714 |
+
|
715 |
+
eq_const = []
|
716 |
+
|
717 |
+
start_dim = 1
|
718 |
+
end_dim = -1
|
719 |
+
|
720 |
+
if len(n.args) > 1:
|
721 |
+
assert isinstance(n.args[1], int)
|
722 |
+
start_dim = n.args[1]
|
723 |
+
|
724 |
+
if len(n.args) > 2:
|
725 |
+
assert isinstance(n.args[2], int)
|
726 |
+
end_dim = n.args[2]
|
727 |
+
|
728 |
+
if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):
|
729 |
+
l = len(n.type.__args__)
|
730 |
+
arg_type = n.args[0].type
|
731 |
+
start_dim = l if start_dim == -1 else start_dim
|
732 |
+
end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
|
733 |
+
|
734 |
+
for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):
|
735 |
+
eq_const.append(Equality(t1, t2))
|
736 |
+
|
737 |
+
for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):
|
738 |
+
eq_const.append(Equality(t1, t2))
|
739 |
+
return eq_const
|
740 |
+
|
741 |
+
|
742 |
+
@register_algebraic_expressions_inference_rule(Conv2d)
|
743 |
+
def conv_rule(n: Node, module_instance):
|
744 |
+
"""
|
745 |
+
Represents the outout in terms of an algrbraic expression w.r.t
|
746 |
+
the input when possible
|
747 |
+
"""
|
748 |
+
assert isinstance(n.args[0], Node)
|
749 |
+
arg_type = n.args[0].type
|
750 |
+
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
|
751 |
+
w_in = arg_type.__args__[3]
|
752 |
+
h_in = arg_type.__args__[2]
|
753 |
+
h_out = calculate_out_dimension(h_in, module_instance, 0)
|
754 |
+
w_out = calculate_out_dimension(w_in, module_instance, 1)
|
755 |
+
new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))
|
756 |
+
n.type = new_type
|
757 |
+
return new_type
|
758 |
+
|
759 |
+
class Refine:
|
760 |
+
"""
|
761 |
+
Symbolic shape inference.
|
762 |
+
Generates constraints over type variables.
|
763 |
+
Currently all constraints are equality constraints.
|
764 |
+
"""
|
765 |
+
def __init__(self, traced):
|
766 |
+
self.constraints = []
|
767 |
+
self.traced = traced
|
768 |
+
self.symbol_iter = itertools.count(start=0, step=1)
|
769 |
+
|
770 |
+
def refine(self):
|
771 |
+
"""
|
772 |
+
Generates constraints for
|
773 |
+
every node in the graph based on
|
774 |
+
the operation.
|
775 |
+
"""
|
776 |
+
graph = self.traced.graph
|
777 |
+
for n in graph.nodes:
|
778 |
+
self.refine_node(n)
|
779 |
+
return True
|
780 |
+
|
781 |
+
def symbolic_relations(self):
|
782 |
+
"""
|
783 |
+
Infers algebraic relations
|
784 |
+
"""
|
785 |
+
graph = self.traced.graph
|
786 |
+
for n in graph.nodes:
|
787 |
+
self.infer_symbolic_relations(n)
|
788 |
+
return True
|
789 |
+
|
790 |
+
def replace_dyn_with_fresh_var(self, typ):
|
791 |
+
"""
|
792 |
+
Replace all unknown types with fresh type variables.
|
793 |
+
"""
|
794 |
+
if typ == Dyn:
|
795 |
+
new_symbol = Var(next(self.symbol_iter))
|
796 |
+
return new_symbol
|
797 |
+
elif isinstance(typ, TensorType):
|
798 |
+
new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]
|
799 |
+
return TensorType(tuple(new_args))
|
800 |
+
elif isinstance(typ, list):
|
801 |
+
return [self.replace_dyn_with_fresh_var(t) for t in typ]
|
802 |
+
elif isinstance(typ, tuple):
|
803 |
+
return (self.replace_dyn_with_fresh_var(t) for t in typ)
|
804 |
+
else:
|
805 |
+
return typ
|
806 |
+
|
807 |
+
|
808 |
+
def convert_to_sympy_symbols(self, typ):
|
809 |
+
"""
|
810 |
+
Replace all unknown types with fresh type variables.
|
811 |
+
"""
|
812 |
+
if isinstance(typ, Var):
|
813 |
+
return sympy.symbols(str(typ))
|
814 |
+
elif isinstance(typ, TensorType):
|
815 |
+
new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__]
|
816 |
+
return TensorType(tuple(new_args))
|
817 |
+
elif isinstance(typ, list):
|
818 |
+
return [self.convert_to_sympy_symbols(t) for t in typ]
|
819 |
+
elif isinstance(typ, tuple):
|
820 |
+
return (self.convert_to_sympy_symbols(t) for t in typ)
|
821 |
+
else:
|
822 |
+
return typ
|
823 |
+
|
824 |
+
def refine_node(self, n: Node):
|
825 |
+
"""
|
826 |
+
Returns a list of equality constraints for
|
827 |
+
call_module and call_function nodes.
|
828 |
+
Models the relation between input and output dimensions
|
829 |
+
using constraints in case they are both tensors.
|
830 |
+
All operations used in resnet50 are defined.
|
831 |
+
"""
|
832 |
+
if n.type is None:
|
833 |
+
n.type = Dyn
|
834 |
+
|
835 |
+
n.type = self.replace_dyn_with_fresh_var(n.type)
|
836 |
+
|
837 |
+
if n.op == 'call_function':
|
838 |
+
if n.target in _REFINEMENT_RULES:
|
839 |
+
self.constraints += _REFINEMENT_RULES[n.target](n)
|
840 |
+
else:
|
841 |
+
pass
|
842 |
+
|
843 |
+
if n.op == 'call_module':
|
844 |
+
module_instance = self.traced.get_submodule(n.target)
|
845 |
+
if type(module_instance) in _REFINEMENT_RULES:
|
846 |
+
self.constraints += _REFINEMENT_RULES[type(module_instance)](n)
|
847 |
+
else:
|
848 |
+
pass
|
849 |
+
|
850 |
+
if n.op == 'output':
|
851 |
+
def get_node_type(a):
|
852 |
+
return a.type
|
853 |
+
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
|
854 |
+
return n.type
|
855 |
+
|
856 |
+
else:
|
857 |
+
pass
|
858 |
+
|
859 |
+
def infer_symbolic_relations(self, n: Node):
|
860 |
+
n.type = self.convert_to_sympy_symbols(n.type)
|
861 |
+
if n.op == 'call_function':
|
862 |
+
if n.target in _RULES:
|
863 |
+
return _RULES[n.target](n)
|
864 |
+
else:
|
865 |
+
pass
|
866 |
+
|
867 |
+
if n.op == 'call_module':
|
868 |
+
module_instance = self.traced.get_submodule(n.target)
|
869 |
+
if type(module_instance) in _RULES:
|
870 |
+
return _RULES[type(module_instance)](n, module_instance)
|
871 |
+
else:
|
872 |
+
pass
|
873 |
+
|
874 |
+
if n.op == 'output':
|
875 |
+
def get_node_type(a):
|
876 |
+
return a.type
|
877 |
+
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
|
878 |
+
return n.type
|
879 |
+
|
880 |
+
else:
|
881 |
+
pass
|
882 |
+
|
883 |
+
def get_parameter(traced, target: str):
|
884 |
+
"""
|
885 |
+
Returns the parameter given by ``target`` if it exists,
|
886 |
+
otherwise throws an error.
|
887 |
+
|
888 |
+
See the docstring for ``get_submodule`` for a more detailed
|
889 |
+
explanation of this method's functionality as well as how to
|
890 |
+
correctly specify ``target``.
|
891 |
+
|
892 |
+
Args:
|
893 |
+
target: The fully-qualified string name of the Parameter
|
894 |
+
to look for. (See ``get_submodule`` for how to specify a
|
895 |
+
fully-qualified string.)
|
896 |
+
|
897 |
+
Returns:
|
898 |
+
torch.nn.Parameter: The Parameter referenced by ``target``
|
899 |
+
|
900 |
+
Raises:
|
901 |
+
AttributeError: If the target string references an invalid
|
902 |
+
path or resolves to something that is not an
|
903 |
+
``nn.Parameter``
|
904 |
+
"""
|
905 |
+
module_path, _, param_name = target.rpartition(".")
|
906 |
+
|
907 |
+
mod: torch.nn.Module = traced.get_submodule(module_path)
|
908 |
+
|
909 |
+
if not hasattr(mod, param_name):
|
910 |
+
raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`")
|
911 |
+
|
912 |
+
param: torch.nn.Parameter = getattr(mod, param_name)
|
913 |
+
|
914 |
+
return param
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch.fx.node import Node
|
4 |
+
from torch.fx._symbolic_trace import symbolic_trace
|
5 |
+
from torch.fx.passes.tools_common import legalize_graph
|
6 |
+
import itertools
|
7 |
+
import operator
|
8 |
+
|
9 |
+
from typing import Dict, List, Tuple
|
10 |
+
|
11 |
+
|
12 |
+
def split_result_tensors(
|
13 |
+
result: torch.Tensor, inputs: List[torch.Tensor]
|
14 |
+
) -> Tuple[torch.Tensor, ...]:
|
15 |
+
"""
|
16 |
+
A free function for use in the merge_matmul graph transformation below that
|
17 |
+
splits the output from a merged matmul into the individual results for each
|
18 |
+
input tensor.
|
19 |
+
|
20 |
+
Arguments:
|
21 |
+
result: The merged matmul result tensor.
|
22 |
+
inputs: The list of inputs that were merged into one for the matmul.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
List of matmul results for each input tensor.
|
26 |
+
"""
|
27 |
+
# When fx tracer is running, x.shape[0] will be torch.fx.Attribute but we
|
28 |
+
# need an int even when tracing
|
29 |
+
if isinstance(result, torch.fx.Proxy):
|
30 |
+
splits = [0] * len(inputs)
|
31 |
+
else:
|
32 |
+
splits = [x.shape[0] for x in inputs]
|
33 |
+
|
34 |
+
return torch.split(result, splits)
|
35 |
+
|
36 |
+
|
37 |
+
def may_depend_on(a: Node, b: Node, search_depth: int = 6):
|
38 |
+
"""
|
39 |
+
Determine if one node depends on another in a torch.fx.Graph.
|
40 |
+
|
41 |
+
Arguments:
|
42 |
+
a: The node that may have a dependency on b.
|
43 |
+
b: The node that a may have a dependency on.
|
44 |
+
search_depth: In the case of an indirect dependency, this function
|
45 |
+
searches upto this many nodes away in search of a
|
46 |
+
data dependency. If none is found, the function
|
47 |
+
makes the conservative assumption that there is a
|
48 |
+
dependency.
|
49 |
+
|
50 |
+
Returns:
|
51 |
+
True if a may depend on b, False if it definitely does not.
|
52 |
+
"""
|
53 |
+
# Equivalence is defined as dependence.
|
54 |
+
if a == b:
|
55 |
+
return True
|
56 |
+
|
57 |
+
# If a has no inputs, it cannot depend on b.
|
58 |
+
if len(a.all_input_nodes) == 0:
|
59 |
+
return False
|
60 |
+
|
61 |
+
# If the search depth has been exhausted and no conclusion has been
|
62 |
+
# reached, assume that there is a data dependency.
|
63 |
+
if search_depth == 0:
|
64 |
+
return True
|
65 |
+
|
66 |
+
# Recursively check all inputs of a.
|
67 |
+
for inp in a.all_input_nodes:
|
68 |
+
if may_depend_on(inp, b, search_depth - 1):
|
69 |
+
return True
|
70 |
+
|
71 |
+
return False
|
72 |
+
|
73 |
+
|
74 |
+
def are_nodes_independent(nodes: List[Node]):
|
75 |
+
"""
|
76 |
+
Check if all of the given nodes are pairwise-data independent.
|
77 |
+
|
78 |
+
Arguments:
|
79 |
+
nodes: The nodes to check for data dependencies.
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
True if any pair in nodes has a data dependency.
|
83 |
+
"""
|
84 |
+
# For each pair in nodes:
|
85 |
+
for i, j in itertools.combinations(nodes, 2):
|
86 |
+
if may_depend_on(i, j) or may_depend_on(j, i):
|
87 |
+
return False
|
88 |
+
|
89 |
+
return True
|
90 |
+
|
91 |
+
|
92 |
+
def merge_matmul(in_mod: torch.nn.Module):
|
93 |
+
"""
|
94 |
+
A graph transformation that merges matrix multiplication operations that share the same right-hand
|
95 |
+
side operand into one large matrix multiplication.
|
96 |
+
____ _________ _________
|
97 |
+
---- | | | | M| A * C |
|
98 |
+
M| A | T| B | * K| C | = |---------|
|
99 |
+
---- , | | | | T| B * C |
|
100 |
+
K ---- --------- ---------
|
101 |
+
K R R
|
102 |
+
"""
|
103 |
+
gm = symbolic_trace(in_mod)
|
104 |
+
|
105 |
+
rhs_users: Dict[Node, List[Node]] = {}
|
106 |
+
lhs_users: Dict[Node, List[Node]] = {}
|
107 |
+
|
108 |
+
# Populate rhs_users and lhs_users - maps from LHS/RHS matrix multiply operands to
|
109 |
+
# the matmul of which they are the LHS/RHS.
|
110 |
+
for node in gm.graph.nodes:
|
111 |
+
if node.op != "call_function" or node.target is not torch.matmul:
|
112 |
+
continue
|
113 |
+
|
114 |
+
lhs, rhs = node.args
|
115 |
+
|
116 |
+
# TODO: Properly handle aliasing caused by get_attr. For now,
|
117 |
+
# use the attribute name as the operand if the node is a
|
118 |
+
# get_attr.
|
119 |
+
lhs = lhs.target if lhs.op == "get_attr" else lhs
|
120 |
+
rhs = rhs.target if rhs.op == "get_attr" else rhs
|
121 |
+
|
122 |
+
lhs_users.setdefault(lhs, []).append(node)
|
123 |
+
rhs_users.setdefault(rhs, []).append(node)
|
124 |
+
|
125 |
+
for rhs, mms in rhs_users.items():
|
126 |
+
# There must be at least matmuls for a merge to make sense.
|
127 |
+
if len(mms) < 2:
|
128 |
+
continue
|
129 |
+
|
130 |
+
# All matmuls must not depend on each other directly or indirectly
|
131 |
+
# in order for the merge to be possible.
|
132 |
+
if not are_nodes_independent(mms):
|
133 |
+
continue
|
134 |
+
|
135 |
+
lhs_vals = [mm.args[0] for mm in mms]
|
136 |
+
|
137 |
+
# Merge the matmul.
|
138 |
+
# Collect a list of LHS operands and the single RHS operand.
|
139 |
+
lhs = [gm.graph.get_attr(l) if isinstance(l, str) else l for l in lhs_vals]
|
140 |
+
rhs = gm.graph.get_attr(rhs) if isinstance(rhs, str) else rhs
|
141 |
+
|
142 |
+
# Concatenate all the LHS operands.
|
143 |
+
merge_mm_cat = gm.graph.call_function(torch.cat, (lhs,), {})
|
144 |
+
|
145 |
+
# Multiply the concatenated LHS operands with the one RHS. This will produce
|
146 |
+
# the same results as all the individual matmuls involving rhs in the original graph,
|
147 |
+
# but they will all be concatenated together.
|
148 |
+
merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {})
|
149 |
+
|
150 |
+
# Split the result of the merged matmul using the shapes of the LHS operands
|
151 |
+
# to ascertain how large each chunk should be.
|
152 |
+
merge_mm_split = gm.graph.call_function(
|
153 |
+
split_result_tensors, (merge_mm, lhs), {}
|
154 |
+
)
|
155 |
+
merge_mm_res = [
|
156 |
+
gm.graph.call_function(operator.getitem, (merge_mm_split, out), {})
|
157 |
+
for out in range(len(lhs))
|
158 |
+
]
|
159 |
+
|
160 |
+
# Replace all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul.
|
161 |
+
for old, new in zip(mms, merge_mm_res):
|
162 |
+
old.replace_all_uses_with(new)
|
163 |
+
gm.graph.erase_node(old)
|
164 |
+
|
165 |
+
# All of the new nodes created above were inserted at the end, so we need to sort
|
166 |
+
# the nodes topologically to make sure all definitions precede uses.
|
167 |
+
legalize_graph(gm)
|
168 |
+
|
169 |
+
gm.recompile()
|
170 |
+
gm.graph.lint()
|
171 |
+
return gm
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.fx
|
3 |
+
import warnings
|
4 |
+
import functools
|
5 |
+
import builtins
|
6 |
+
|
7 |
+
from typing import Any, Callable, Dict, Optional, Union
|
8 |
+
|
9 |
+
def embedding_override(self, input):
|
10 |
+
return torch.empty(*input.shape, self.weight.shape[-1], device='meta')
|
11 |
+
|
12 |
+
|
13 |
+
def nn_layernorm_override(self, input):
|
14 |
+
return input
|
15 |
+
|
16 |
+
|
17 |
+
def torch_relu_override(x):
|
18 |
+
return x
|
19 |
+
|
20 |
+
|
21 |
+
def torch_nn_relu_override(self, x):
|
22 |
+
return x
|
23 |
+
|
24 |
+
|
25 |
+
def functional_relu_override(x, inplace=False):
|
26 |
+
assert not inplace, 'dont support inplace functional.relu for metatensor analysis'
|
27 |
+
return x
|
28 |
+
|
29 |
+
|
30 |
+
def torch_where_override(condition, x, y):
|
31 |
+
# torch.where returns the broadcasted tensor of condition, x, and y,
|
32 |
+
# so hack it by using addition
|
33 |
+
return condition.to(device='meta') + x.to(device='meta') + y.to(device='meta')
|
34 |
+
|
35 |
+
|
36 |
+
def torch_abs_override(input, *, out=None):
|
37 |
+
assert out is None, 'Dont support in-place abs for MetaTensor analysis'
|
38 |
+
return input
|
39 |
+
|
40 |
+
manual_meta_overrides : Dict[Callable, Callable] = {
|
41 |
+
torch.nn.Embedding: embedding_override,
|
42 |
+
torch.nn.LayerNorm: nn_layernorm_override,
|
43 |
+
torch.relu: torch_relu_override,
|
44 |
+
torch.nn.functional.relu: functional_relu_override,
|
45 |
+
torch.nn.ReLU: torch_nn_relu_override,
|
46 |
+
torch.where: torch_where_override,
|
47 |
+
torch.abs: torch_abs_override,
|
48 |
+
}
|
49 |
+
|
50 |
+
def gen_constructor_wrapper(target):
|
51 |
+
@functools.wraps(target)
|
52 |
+
def wrapper(*args, **kwargs):
|
53 |
+
proxy = None
|
54 |
+
|
55 |
+
def check_has_proxy(v):
|
56 |
+
if isinstance(v, torch.fx.Proxy):
|
57 |
+
nonlocal proxy
|
58 |
+
proxy = v
|
59 |
+
torch.fx.node.map_aggregate(args, check_has_proxy)
|
60 |
+
torch.fx.node.map_aggregate(kwargs, check_has_proxy)
|
61 |
+
|
62 |
+
if proxy is not None:
|
63 |
+
return proxy.tracer.create_proxy('call_function', target, args, kwargs)
|
64 |
+
else:
|
65 |
+
return target(*args, **kwargs)
|
66 |
+
return wrapper, target
|
67 |
+
|
68 |
+
class MetaProxy(torch.fx.Proxy):
|
69 |
+
def install_tensor_meta(self, tensor_meta):
|
70 |
+
self._tensor_meta = tensor_meta
|
71 |
+
|
72 |
+
def size(self, dim=None):
|
73 |
+
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
|
74 |
+
return self._tensor_meta.size(*[dim] if dim else [])
|
75 |
+
return self.tracer.create_proxy('call_method', 'size', (self, dim) if dim else (self,), {})
|
76 |
+
|
77 |
+
def dim(self):
|
78 |
+
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
|
79 |
+
return self._tensor_meta.dim()
|
80 |
+
return self.tracer.create_proxy('call_method', 'dim', (self,), {})
|
81 |
+
|
82 |
+
@property
|
83 |
+
def shape(self):
|
84 |
+
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
|
85 |
+
return self._tensor_meta.shape
|
86 |
+
return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'shape'), {})
|
87 |
+
|
88 |
+
@property
|
89 |
+
def dtype(self):
|
90 |
+
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
|
91 |
+
return self._tensor_meta.dtype
|
92 |
+
return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'dtype'), {})
|
93 |
+
|
94 |
+
@property
|
95 |
+
def device(self):
|
96 |
+
# Hack so we can track when devices are used. During meta-tensor propagation,
|
97 |
+
# replace these values with a constant 'meta'
|
98 |
+
return MetaDeviceAttribute(self, 'device')
|
99 |
+
|
100 |
+
def __getattr__(self, k):
|
101 |
+
if k == '_tensor_meta':
|
102 |
+
return self.__getattribute__(k)
|
103 |
+
# note: not added to the graph yet, if this is a method call
|
104 |
+
# we peephole optimize to the method invocation
|
105 |
+
return MetaAttribute(self, k)
|
106 |
+
|
107 |
+
class MetaAttribute(MetaProxy):
|
108 |
+
def __init__(self, root, attr: str):
|
109 |
+
|
110 |
+
self.root = root
|
111 |
+
self.attr = attr
|
112 |
+
self.tracer = root.tracer
|
113 |
+
self._node = None
|
114 |
+
|
115 |
+
@property
|
116 |
+
def node(self):
|
117 |
+
# the node for attributes is added lazily, since most will just be method calls
|
118 |
+
# which do not rely on the getitem call
|
119 |
+
if self._node is None:
|
120 |
+
self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
|
121 |
+
return self._node
|
122 |
+
|
123 |
+
def __call__(self, *args, **kwargs):
|
124 |
+
return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
|
125 |
+
|
126 |
+
class MetaDeviceAttribute(MetaAttribute):
|
127 |
+
pass
|
128 |
+
|
129 |
+
def proxys_to_metas(v):
|
130 |
+
if isinstance(v, MetaDeviceAttribute):
|
131 |
+
return 'meta'
|
132 |
+
if isinstance(v, torch.fx.Proxy):
|
133 |
+
assert isinstance(v, MetaProxy), f'Expected MetaProxy but got {type(v)}'
|
134 |
+
assert hasattr(v, '_tensor_meta'), 'MetaProxy does not have an associated meta'
|
135 |
+
return v._tensor_meta
|
136 |
+
return v
|
137 |
+
|
138 |
+
class MetaTracer(torch.fx.Tracer):
|
139 |
+
allow_insert_stateless_mods : bool = True
|
140 |
+
|
141 |
+
_TORCH_METHODS_TO_PATCH = ['arange', 'zeros', 'ones', 'full_like', 'eye']
|
142 |
+
|
143 |
+
def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
|
144 |
+
rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
|
145 |
+
|
146 |
+
if kind == 'placeholder' and target in self.meta_args:
|
147 |
+
rv.install_tensor_meta(self.meta_args[target])
|
148 |
+
return rv
|
149 |
+
|
150 |
+
if target in self.orig_fns:
|
151 |
+
# NOTE: tensor constructors in PyTorch define the `device` argument as
|
152 |
+
# *kwargs-only*. That is why this works. If you add methods to
|
153 |
+
# _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
|
154 |
+
# this will break and you will likely see issues where we cannot infer
|
155 |
+
# the size of the output.
|
156 |
+
if 'device' in kwargs:
|
157 |
+
kwargs['device'] = 'meta'
|
158 |
+
|
159 |
+
try:
|
160 |
+
args_metas = torch.fx.node.map_aggregate(args, proxys_to_metas)
|
161 |
+
kwargs_metas = torch.fx.node.map_aggregate(kwargs, proxys_to_metas)
|
162 |
+
|
163 |
+
if kind == 'call_function':
|
164 |
+
meta_target = manual_meta_overrides.get(target, target)
|
165 |
+
meta_out = meta_target(*args_metas, **kwargs_metas)
|
166 |
+
elif kind == 'call_method':
|
167 |
+
meta_out = getattr(args_metas[0], target)(*args_metas[1:], **kwargs_metas)
|
168 |
+
elif kind == 'call_module':
|
169 |
+
assert hasattr(self, 'orig_forward')
|
170 |
+
self._disable_module_getattr = True
|
171 |
+
try:
|
172 |
+
mod = self.root.get_submodule(target)
|
173 |
+
mod_type = type(mod)
|
174 |
+
if mod_type in manual_meta_overrides:
|
175 |
+
meta_out = manual_meta_overrides[mod_type](mod, *args_metas, **kwargs_metas)
|
176 |
+
else:
|
177 |
+
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
|
178 |
+
finally:
|
179 |
+
self._disable_module_getattr = False
|
180 |
+
elif kind == 'get_attr':
|
181 |
+
self._disable_module_getattr = True
|
182 |
+
try:
|
183 |
+
attr_itr = self.root
|
184 |
+
atoms = target.split('.')
|
185 |
+
for atom in atoms:
|
186 |
+
attr_itr = getattr(attr_itr, atom)
|
187 |
+
assert isinstance(attr_itr, torch.Tensor)
|
188 |
+
meta_out = attr_itr.to(device='meta')
|
189 |
+
finally:
|
190 |
+
self._disable_module_getattr = False
|
191 |
+
else:
|
192 |
+
return rv
|
193 |
+
|
194 |
+
# TODO
|
195 |
+
assert isinstance(rv, torch.fx.Proxy), 'Dont support composite output yet'
|
196 |
+
rv.install_tensor_meta(meta_out)
|
197 |
+
except Exception as e:
|
198 |
+
warnings.warn(f'Could not compute metadata for {kind} target {target}: {e}')
|
199 |
+
|
200 |
+
return rv
|
201 |
+
|
202 |
+
def getattr(self, attr, attr_val, parameter_proxy_cache):
|
203 |
+
if getattr(self, '_disable_module_getattr', False):
|
204 |
+
return attr_val
|
205 |
+
else:
|
206 |
+
return super().getattr(attr, attr_val, parameter_proxy_cache)
|
207 |
+
|
208 |
+
def call_module(self, m, forward, args, kwargs):
|
209 |
+
self.orig_forward = forward
|
210 |
+
return super().call_module(m, forward, args, kwargs)
|
211 |
+
|
212 |
+
def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str:
|
213 |
+
"""
|
214 |
+
Helper method which tries to insert a module that was not declared as submodule.
|
215 |
+
"""
|
216 |
+
idx = 0
|
217 |
+
mod_name = mod.__class__.__name__.lower()
|
218 |
+
path = f"{mod_name}_{idx}"
|
219 |
+
while hasattr(self.root, path):
|
220 |
+
path = f"{mod_name}_{idx}"
|
221 |
+
idx += 1
|
222 |
+
|
223 |
+
self.root.add_module(path, mod)
|
224 |
+
return path
|
225 |
+
|
226 |
+
def path_of_module(self, mod: torch.nn.Module) -> str:
|
227 |
+
try:
|
228 |
+
return super().path_of_module(mod)
|
229 |
+
except NameError as e:
|
230 |
+
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
|
231 |
+
path = self._insert_module_as_submodule(mod)
|
232 |
+
self.prev_module = path
|
233 |
+
return path
|
234 |
+
raise
|
235 |
+
|
236 |
+
def proxy(self, node):
|
237 |
+
return MetaProxy(node, self)
|
238 |
+
|
239 |
+
def trace(self, root, meta_args : Dict[str, torch.Tensor], concrete_args=None):
|
240 |
+
assert isinstance(meta_args, dict)
|
241 |
+
self.meta_args = meta_args
|
242 |
+
|
243 |
+
self.patched_torch_methods = {
|
244 |
+
target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
|
245 |
+
}
|
246 |
+
self.orig_fns = set()
|
247 |
+
|
248 |
+
for name, (wrapper, orig) in self.patched_torch_methods.items():
|
249 |
+
setattr(torch, name, wrapper)
|
250 |
+
self.orig_fns.add(orig)
|
251 |
+
|
252 |
+
try:
|
253 |
+
graph = super().trace(root, concrete_args)
|
254 |
+
graph._tracer_extras = {'meta_args': meta_args}
|
255 |
+
return graph
|
256 |
+
finally:
|
257 |
+
for name, (_, orig) in self.patched_torch_methods.items():
|
258 |
+
setattr(torch, name, orig)
|
259 |
+
|
260 |
+
|
261 |
+
def symbolic_trace(root : Union[torch.nn.Module, Callable[..., Any]],
|
262 |
+
meta_args : Optional[Dict[str, torch.Tensor]] = None,
|
263 |
+
concrete_args: Optional[Dict[str, Any]] = None) -> torch.fx.GraphModule:
|
264 |
+
tracer = MetaTracer()
|
265 |
+
graph = tracer.trace(root, meta_args, concrete_args)
|
266 |
+
name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
|
267 |
+
gm = torch.fx.GraphModule(tracer.root, graph, name)
|
268 |
+
return gm
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc
ADDED
Binary file (1.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc
ADDED
Binary file (715 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py
ADDED
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
|
2 |
+
op_mod, op_gt, op_lt, op_neq, op_eq
|
3 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
4 |
+
|
5 |
+
|
6 |
+
class Constraint:
|
7 |
+
pass
|
8 |
+
|
9 |
+
|
10 |
+
class Conj(Constraint):
|
11 |
+
def __init__(self, conjuncts):
|
12 |
+
"""
|
13 |
+
:param conjuncts: Conjunction of constraints
|
14 |
+
"""
|
15 |
+
self.conjucts = conjuncts
|
16 |
+
|
17 |
+
def __eq__(self, other):
|
18 |
+
if isinstance(other, Conj):
|
19 |
+
return self.conjucts == other.conjucts and self.conjucts == other.conjucts
|
20 |
+
else:
|
21 |
+
return False
|
22 |
+
|
23 |
+
def __repr__(self):
|
24 |
+
return f'And({self.conjucts})'
|
25 |
+
|
26 |
+
|
27 |
+
class Disj(Constraint):
|
28 |
+
def __init__(self, disjuncts):
|
29 |
+
"""
|
30 |
+
:param disjuncts: Disjunction of constraints
|
31 |
+
"""
|
32 |
+
self.disjuncts = disjuncts
|
33 |
+
|
34 |
+
def __eq__(self, other):
|
35 |
+
if isinstance(other, Disj):
|
36 |
+
return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
|
37 |
+
else:
|
38 |
+
return False
|
39 |
+
|
40 |
+
def __repr__(self):
|
41 |
+
return f'Or({self.disjuncts})'
|
42 |
+
|
43 |
+
|
44 |
+
class Prod(Constraint):
|
45 |
+
def __init__(self, products):
|
46 |
+
"""
|
47 |
+
:param products: lists of dimensions to multiply
|
48 |
+
"""
|
49 |
+
self.products = products
|
50 |
+
|
51 |
+
def __eq__(self, other):
|
52 |
+
if isinstance(other, Prod):
|
53 |
+
return self.products == other.products and self.products == other.products
|
54 |
+
else:
|
55 |
+
return False
|
56 |
+
|
57 |
+
def __repr__(self):
|
58 |
+
return f'Product({self.products})'
|
59 |
+
|
60 |
+
|
61 |
+
class T(Constraint):
|
62 |
+
"""
|
63 |
+
True
|
64 |
+
"""
|
65 |
+
def __init__(self):
|
66 |
+
pass
|
67 |
+
|
68 |
+
def __eq__(self, other):
|
69 |
+
return isinstance(other, T)
|
70 |
+
|
71 |
+
def __repr__(self):
|
72 |
+
return 'True'
|
73 |
+
|
74 |
+
class F(Constraint):
|
75 |
+
"""
|
76 |
+
False
|
77 |
+
"""
|
78 |
+
def __init__(self):
|
79 |
+
pass
|
80 |
+
|
81 |
+
def __eq__(self, other):
|
82 |
+
return isinstance(other, F)
|
83 |
+
|
84 |
+
def __repr__(self):
|
85 |
+
return 'False'
|
86 |
+
|
87 |
+
|
88 |
+
class BinaryConstraint(Constraint):
|
89 |
+
"""
|
90 |
+
Represents all binary operations
|
91 |
+
"""
|
92 |
+
def __init__(self, lhs, rhs, op):
|
93 |
+
"""
|
94 |
+
:param lhs: lhs of the constraint
|
95 |
+
:param rhs: rhs of the constraint
|
96 |
+
:param op: string representing the operation
|
97 |
+
"""
|
98 |
+
self.lhs = lhs
|
99 |
+
self.rhs = rhs
|
100 |
+
self.op = op
|
101 |
+
|
102 |
+
def __eq__(self, other):
|
103 |
+
if isinstance(other, BinaryConstraint):
|
104 |
+
return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
|
105 |
+
else:
|
106 |
+
return False
|
107 |
+
|
108 |
+
def __repr__(self):
|
109 |
+
return f'({self.lhs} {self.op} {self.rhs})'
|
110 |
+
|
111 |
+
|
112 |
+
class BinConstraintT(BinaryConstraint):
|
113 |
+
"""
|
114 |
+
Binary constraints about tensors
|
115 |
+
"""
|
116 |
+
def __init__(self, lhs, rhs, op):
|
117 |
+
assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \
|
118 |
+
(isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)
|
119 |
+
super().__init__(lhs, rhs, op)
|
120 |
+
|
121 |
+
def __eq__(self, other):
|
122 |
+
return super().__eq__(other)
|
123 |
+
|
124 |
+
|
125 |
+
class BinConstraintD(BinaryConstraint):
|
126 |
+
"""
|
127 |
+
Binary constraints about dimensions
|
128 |
+
"""
|
129 |
+
def __init__(self, lhs, rhs, op):
|
130 |
+
assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
|
131 |
+
assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
|
132 |
+
|
133 |
+
super().__init__(lhs, rhs, op)
|
134 |
+
|
135 |
+
def __eq__(self, other):
|
136 |
+
return super().__eq__(other)
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
class TGreatestUpperBound(Constraint):
|
141 |
+
"""
|
142 |
+
Greatest Upper bound for tensors with dynamic type
|
143 |
+
"""
|
144 |
+
def __init__(self, res, rhs1, rhs2):
|
145 |
+
"""
|
146 |
+
:param res: tensor variable that stores the result of the outout
|
147 |
+
:param rhs1: tensor or tensor variable
|
148 |
+
:param rhs2: tensor or tensor variabke
|
149 |
+
"""
|
150 |
+
self.res = res
|
151 |
+
self.rhs1 = rhs1
|
152 |
+
self.rhs2 = rhs2
|
153 |
+
|
154 |
+
def __repr__(self):
|
155 |
+
return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
|
156 |
+
|
157 |
+
def __eq__(self, other):
|
158 |
+
if isinstance(other, TGreatestUpperBound):
|
159 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
160 |
+
else:
|
161 |
+
return False
|
162 |
+
|
163 |
+
|
164 |
+
class DGreatestUpperBound(Constraint):
|
165 |
+
"""
|
166 |
+
Greatest Upper bound for dimensions
|
167 |
+
"""
|
168 |
+
def __init__(self, res, rhs1, rhs2):
|
169 |
+
"""
|
170 |
+
:param res: Dimension variable to store the result
|
171 |
+
:param rhs1: dimension variable 1
|
172 |
+
:param rhs2: dimension variable 2
|
173 |
+
"""
|
174 |
+
assert is_dim(res)
|
175 |
+
assert is_dim(rhs1)
|
176 |
+
assert is_dim(rhs2)
|
177 |
+
|
178 |
+
self.res = res
|
179 |
+
self.rhs1 = rhs1
|
180 |
+
self.rhs2 = rhs2
|
181 |
+
|
182 |
+
def __repr__(self):
|
183 |
+
return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
|
184 |
+
|
185 |
+
def __eq__(self, other):
|
186 |
+
if isinstance(other, DGreatestUpperBound):
|
187 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
188 |
+
else:
|
189 |
+
return False
|
190 |
+
|
191 |
+
|
192 |
+
class CanReshape(Constraint):
|
193 |
+
"""
|
194 |
+
can_reshape constraint
|
195 |
+
"""
|
196 |
+
def __init__(self, src, target):
|
197 |
+
"""
|
198 |
+
:param src: tensor variable
|
199 |
+
:param target: tensor
|
200 |
+
"""
|
201 |
+
self.src = src
|
202 |
+
self.target = target
|
203 |
+
|
204 |
+
def __repr__(self):
|
205 |
+
return f'can-reshape({self.src}, {self.target})'
|
206 |
+
|
207 |
+
def __eq__(self, other):
|
208 |
+
if isinstance(other, CanReshape):
|
209 |
+
return self.src == other.src and self.target == other.target
|
210 |
+
else:
|
211 |
+
return False
|
212 |
+
|
213 |
+
|
214 |
+
class IndexSelect(Constraint):
|
215 |
+
|
216 |
+
def __init__(self, tensor_size, input_var, dim_replace, index, output):
|
217 |
+
"""
|
218 |
+
Args:
|
219 |
+
input_var: input to index_select
|
220 |
+
tensor_size: tensor size we are considering
|
221 |
+
dim_replace: the dimension of the output at "index"
|
222 |
+
index: location of the dimensions to replace in the input
|
223 |
+
output: variable to store the result
|
224 |
+
"""
|
225 |
+
assert isinstance(input_var, TVar)
|
226 |
+
assert isinstance(output, TVar)
|
227 |
+
assert isinstance(dim_replace, DVar) or dim_replace == Dyn
|
228 |
+
assert isinstance(index, int)
|
229 |
+
|
230 |
+
self.input_var = input_var
|
231 |
+
self.tensor_size = tensor_size
|
232 |
+
self.dim_replace = dim_replace
|
233 |
+
self.index = index
|
234 |
+
self.output = output
|
235 |
+
|
236 |
+
def __repr__(self):
|
237 |
+
|
238 |
+
return f' {self.output} = ' \
|
239 |
+
f'IndexSelect({self.input_var}, ' \
|
240 |
+
f'tensor_size: {self.tensor_size}, ' \
|
241 |
+
f'{self.dim_replace}, ' \
|
242 |
+
f'{self.index})'
|
243 |
+
|
244 |
+
def __eq__(self, other):
|
245 |
+
if isinstance(other, IndexSelect):
|
246 |
+
return self.tensor_size == other.tensor_size and \
|
247 |
+
self.dim_replace == other.dim_replace and \
|
248 |
+
self.index == other.index and \
|
249 |
+
self.output == other.output and \
|
250 |
+
self.input_var == other.input_var
|
251 |
+
else:
|
252 |
+
return False
|
253 |
+
|
254 |
+
|
255 |
+
class Transpose(Constraint):
|
256 |
+
|
257 |
+
def __init__(self, tensor_size, input_var, index1, index2, output):
|
258 |
+
"""
|
259 |
+
Args:
|
260 |
+
tensor_size: current tensor size
|
261 |
+
input_var: variable to hold input
|
262 |
+
index1: dimension 1
|
263 |
+
index2: dimension 2
|
264 |
+
output: output that stores result
|
265 |
+
"""
|
266 |
+
assert isinstance(input_var, TVar)
|
267 |
+
assert isinstance(output, TVar)
|
268 |
+
assert isinstance(index1, int)
|
269 |
+
assert isinstance(index2, int)
|
270 |
+
|
271 |
+
self.input_var = input_var
|
272 |
+
self.tensor_size = tensor_size
|
273 |
+
self.index1 = index1
|
274 |
+
self.index2 = index2
|
275 |
+
self.output = output
|
276 |
+
|
277 |
+
def __repr__(self):
|
278 |
+
|
279 |
+
return f' {self.output} = ' \
|
280 |
+
f'Transpose({self.input_var}, ' \
|
281 |
+
f'tensor_size: {self.tensor_size}, ' \
|
282 |
+
f'{self.index1}, ' \
|
283 |
+
f'{self.index2})'
|
284 |
+
|
285 |
+
def __eq__(self, other):
|
286 |
+
if isinstance(other, Transpose):
|
287 |
+
return self.tensor_size == other.tensor_size and \
|
288 |
+
self.index1 == other.index1 and \
|
289 |
+
self.index2 == other.index2 and \
|
290 |
+
self.output == other.output and \
|
291 |
+
self.input_var == other.input_var
|
292 |
+
else:
|
293 |
+
return False
|
294 |
+
|
295 |
+
|
296 |
+
class GetItem(Constraint):
|
297 |
+
|
298 |
+
def __init__(self, tensor_size, index, res, input_var):
|
299 |
+
"""
|
300 |
+
Constraint for getting item given a tensor size
|
301 |
+
:param tensor_size: actual number
|
302 |
+
:param index: actual number representing the index
|
303 |
+
:param res: dimension variable to carry the item we get
|
304 |
+
:param input_var: a tensor variable from which we will get item
|
305 |
+
"""
|
306 |
+
assert isinstance(res, DVar)
|
307 |
+
|
308 |
+
self.res = res
|
309 |
+
self.tensor_size = tensor_size
|
310 |
+
self.index = index
|
311 |
+
self.input_var = input_var
|
312 |
+
|
313 |
+
def __repr__(self):
|
314 |
+
return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
|
315 |
+
|
316 |
+
def __eq__(self, other):
|
317 |
+
if isinstance(other, GetItem):
|
318 |
+
return self.res == other.res and \
|
319 |
+
self.tensor_size == other.tensor_size and \
|
320 |
+
self.index == other.index and \
|
321 |
+
self.input_var == other.input_var
|
322 |
+
else:
|
323 |
+
return False
|
324 |
+
|
325 |
+
class GetItemTensor(Constraint):
|
326 |
+
|
327 |
+
def __init__(self, tensor_size, index_tuple, res, input_var):
|
328 |
+
"""
|
329 |
+
Constraint for getting item given a tensor size
|
330 |
+
However, when the argument is a tuple, we will
|
331 |
+
expect a tensor
|
332 |
+
:param tensor_size: actual number representing the rank
|
333 |
+
:param index_tuple: tuple for indexing
|
334 |
+
:param res: tensor variable to carry the item we get
|
335 |
+
:param input_var: a tensor variable from which we will get item
|
336 |
+
"""
|
337 |
+
assert isinstance(res, TVar)
|
338 |
+
|
339 |
+
self.res = res
|
340 |
+
self.tensor_size = tensor_size
|
341 |
+
self.index_tuple = index_tuple
|
342 |
+
self.input_var = input_var
|
343 |
+
|
344 |
+
def __repr__(self):
|
345 |
+
return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
|
346 |
+
|
347 |
+
def __eq__(self, other):
|
348 |
+
if isinstance(other, GetItemTensor):
|
349 |
+
return self.res == other.res and \
|
350 |
+
self.tensor_size == other.tensor_size and \
|
351 |
+
self.index_tuple == other.index_tuple and \
|
352 |
+
self.input_var == other.input_var
|
353 |
+
else:
|
354 |
+
return False
|
355 |
+
|
356 |
+
class CalcConv(Constraint):
|
357 |
+
|
358 |
+
def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
|
359 |
+
"""
|
360 |
+
:param conv_result: the convolution result
|
361 |
+
:param input_var: input to convolution
|
362 |
+
:param c_out: output chanel type
|
363 |
+
:param kernel: kernel tuple
|
364 |
+
"""
|
365 |
+
self.conv_result = conv_result
|
366 |
+
self.input_var = input_var
|
367 |
+
self.c_out = c_out
|
368 |
+
self.kernel = kernel
|
369 |
+
self.padding = padding
|
370 |
+
self.stride = stride
|
371 |
+
self.dilation = dilation
|
372 |
+
self.matching_constraint = matching_constraint_vars
|
373 |
+
|
374 |
+
def __repr__(self):
|
375 |
+
return f'{self.conv_result} =' \
|
376 |
+
f' calc-conv({self.input_var},' \
|
377 |
+
f' {self.c_out}, {self.kernel}, ' \
|
378 |
+
f'{self.padding}, {self.stride},' \
|
379 |
+
f' {self.dilation})'
|
380 |
+
|
381 |
+
def __eq__(self, other):
|
382 |
+
if isinstance(other, CalcConv):
|
383 |
+
return self.conv_result == other.conv_result and self.input_var == other.input_var and \
|
384 |
+
self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
|
385 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
386 |
+
and self.matching_constraint == other.matching_constraint
|
387 |
+
else:
|
388 |
+
return False
|
389 |
+
|
390 |
+
|
391 |
+
class CalcMaxPool(Constraint):
|
392 |
+
|
393 |
+
def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
|
394 |
+
"""
|
395 |
+
:param maxpool_result: the result of maxpool
|
396 |
+
:param input_var: input to convolution
|
397 |
+
:param kernel: kernel tuple
|
398 |
+
"""
|
399 |
+
self.maxpool_result = maxpool_result
|
400 |
+
self.input_var = input_var
|
401 |
+
self.kernel = kernel
|
402 |
+
self.padding = padding
|
403 |
+
self.stride = stride
|
404 |
+
self.dilation = dilation
|
405 |
+
self.matching_constraint = matching_constraint_vars
|
406 |
+
|
407 |
+
def __repr__(self):
|
408 |
+
return f'{self.maxpool_result} =' \
|
409 |
+
f' calc-maxpool({self.input_var},' \
|
410 |
+
f' {self.kernel}, ' \
|
411 |
+
f'{self.padding}, {self.stride},' \
|
412 |
+
f' {self.dilation})'
|
413 |
+
|
414 |
+
def __eq__(self, other):
|
415 |
+
if isinstance(other, CalcMaxPool):
|
416 |
+
return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
|
417 |
+
and self.kernel == other.kernel and self.padding == other.padding \
|
418 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
419 |
+
and self.matching_constraint == other.matching_constraint
|
420 |
+
else:
|
421 |
+
return False
|
422 |
+
|
423 |
+
|
424 |
+
class ApplyBroadcasting(Constraint):
|
425 |
+
def __init__(self, res1, res2, input1, input2):
|
426 |
+
"""
|
427 |
+
:param res1: resulting tensor 1
|
428 |
+
:param res2: resulting tensor 2
|
429 |
+
:param input1: tensor variable 1
|
430 |
+
:param input2: tensor variable 2
|
431 |
+
"""
|
432 |
+
self.res1 = res1
|
433 |
+
self.res2 = res2
|
434 |
+
self.input1 = input1
|
435 |
+
self.input2 = input2
|
436 |
+
|
437 |
+
def __eq__(self, other):
|
438 |
+
if isinstance(other, ApplyBroadcasting):
|
439 |
+
return self.res1 == other.res1 \
|
440 |
+
and self.res2 == other.res2 \
|
441 |
+
and self.input1 == other.input1 \
|
442 |
+
and self.input2 == other.input2
|
443 |
+
else:
|
444 |
+
return False
|
445 |
+
|
446 |
+
def __repr__(self):
|
447 |
+
return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
|
448 |
+
|
449 |
+
|
450 |
+
class CalcProduct(Constraint):
|
451 |
+
"""
|
452 |
+
Given correct dimensions, calculate the product for flatten accounting for Dyn
|
453 |
+
"""
|
454 |
+
def __init__(self, start, end, flattened, dims_to_flatten):
|
455 |
+
"""
|
456 |
+
:param start: start index
|
457 |
+
:param end: end index
|
458 |
+
:param flattened: variable to store the product
|
459 |
+
:param dims_to_flatten: the type which we will flatten
|
460 |
+
"""
|
461 |
+
assert isinstance(dims_to_flatten, list)
|
462 |
+
assert isinstance(flattened, TVar)
|
463 |
+
assert isinstance(start, int)
|
464 |
+
assert isinstance(end, int)
|
465 |
+
|
466 |
+
self.start = start
|
467 |
+
self.end = end
|
468 |
+
self.dims_to_flatten = dims_to_flatten
|
469 |
+
self.flattened = flattened
|
470 |
+
|
471 |
+
def __eq__(self, other):
|
472 |
+
if isinstance(other, CalcProduct):
|
473 |
+
return self.start == other.start and self.end == other.end and \
|
474 |
+
self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
|
475 |
+
|
476 |
+
else:
|
477 |
+
return False
|
478 |
+
|
479 |
+
def __repr__(self):
|
480 |
+
return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
|
481 |
+
|
482 |
+
|
483 |
+
class TVar:
|
484 |
+
"""
|
485 |
+
Tensor variable with no tensor constructor
|
486 |
+
"""
|
487 |
+
def __init__(self, tvar):
|
488 |
+
"""
|
489 |
+
:param tvar: tensor variable
|
490 |
+
"""
|
491 |
+
self.tvar = tvar
|
492 |
+
|
493 |
+
def __repr__(self):
|
494 |
+
return f'TV({self.tvar})'
|
495 |
+
|
496 |
+
def __eq__(self, other):
|
497 |
+
if isinstance(other, TVar):
|
498 |
+
return self.tvar == other.tvar
|
499 |
+
else:
|
500 |
+
return False
|
501 |
+
|
502 |
+
|
503 |
+
class DVar:
|
504 |
+
"""
|
505 |
+
Dimension variable
|
506 |
+
"""
|
507 |
+
def __init__(self, c):
|
508 |
+
"""
|
509 |
+
:param c: character or number
|
510 |
+
"""
|
511 |
+
self.c = c
|
512 |
+
|
513 |
+
def __repr__(self):
|
514 |
+
return f'DV({self.c})'
|
515 |
+
|
516 |
+
def __eq__(self, other):
|
517 |
+
if isinstance(other, DVar):
|
518 |
+
return self.c == other.c
|
519 |
+
else:
|
520 |
+
return False
|
521 |
+
|
522 |
+
|
523 |
+
class BVar:
|
524 |
+
"""
|
525 |
+
Boolean variable
|
526 |
+
"""
|
527 |
+
def __init__(self, c):
|
528 |
+
"""
|
529 |
+
:param c: character or number
|
530 |
+
"""
|
531 |
+
self.c = c
|
532 |
+
|
533 |
+
def __repr__(self):
|
534 |
+
return f'BV({self.c})'
|
535 |
+
|
536 |
+
def __eq__(self, other):
|
537 |
+
if isinstance(other, BVar):
|
538 |
+
return self.c == other.c
|
539 |
+
else:
|
540 |
+
return False
|
541 |
+
|
542 |
+
|
543 |
+
def is_algebraic_expression(constraint):
|
544 |
+
if isinstance(constraint, BinConstraintD):
|
545 |
+
return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
|
546 |
+
else:
|
547 |
+
return isinstance(constraint, Prod)
|
548 |
+
|
549 |
+
|
550 |
+
def is_bool_expr(constraint):
|
551 |
+
if isinstance(constraint, BinConstraintD):
|
552 |
+
return constraint.op in [op_gt, op_lt, op_neq, op_eq]
|
553 |
+
else:
|
554 |
+
return isinstance(constraint, (BVar, Conj, Disj))
|
555 |
+
|
556 |
+
def is_dim(d):
|
557 |
+
return isinstance(d, (DVar, int)) or d == Dyn
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py
ADDED
@@ -0,0 +1,1281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import operator
|
3 |
+
import warnings
|
4 |
+
from typing import Callable, Dict, Iterable
|
5 |
+
|
6 |
+
from torch.fx._symbolic_trace import _assert_is_none
|
7 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \
|
8 |
+
Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \
|
9 |
+
TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound
|
10 |
+
from torch.fx.experimental.migrate_gradual_types.operation import \
|
11 |
+
op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul
|
12 |
+
from torch.fx.node import Target, Node
|
13 |
+
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \
|
14 |
+
gen_bvar
|
15 |
+
|
16 |
+
from torch.fx.tensor_type import Dyn, TensorType
|
17 |
+
from torch.nn.modules.conv import Conv2d
|
18 |
+
from torch.nn.modules.batchnorm import BatchNorm2d
|
19 |
+
|
20 |
+
_INFERENCE_RULES: Dict[Target, Callable] = {}
|
21 |
+
|
22 |
+
MAX_TENSOR_RANK = 4
|
23 |
+
|
24 |
+
def register_inference_rule(call_target):
|
25 |
+
def register(fn):
|
26 |
+
if call_target in _INFERENCE_RULES:
|
27 |
+
raise RuntimeError(f'Inference rule already registered for {call_target}!')
|
28 |
+
_INFERENCE_RULES[call_target] = fn
|
29 |
+
return fn
|
30 |
+
return register
|
31 |
+
|
32 |
+
|
33 |
+
def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter):
|
34 |
+
d, counter = gen_tensor_dims(n, counter)
|
35 |
+
c1 = BinConstraintT(input, TensorType(d), op_eq)
|
36 |
+
start_dim = n if start_dim == -1 else abs(start_dim)
|
37 |
+
end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1
|
38 |
+
c2 = CalcProduct(start_dim, end_dim, flattened, d)
|
39 |
+
nat_constraints = gen_nat_constraints(d)
|
40 |
+
return Conj([c1, c2, *nat_constraints]), counter
|
41 |
+
|
42 |
+
|
43 |
+
@register_inference_rule(getattr)
|
44 |
+
def get_attr_inference_rule(n: Node, symbols, constraints, counter):
|
45 |
+
"""
|
46 |
+
If the attribute is "device" then the tensor shape is preserved
|
47 |
+
"""
|
48 |
+
assert isinstance(n.args[0], Node)
|
49 |
+
assert isinstance(n.args[1], str)
|
50 |
+
output, counter = gen_tvar(counter)
|
51 |
+
symbols[n] = output
|
52 |
+
|
53 |
+
input = symbols[n.args[0]]
|
54 |
+
attr = n.args[1]
|
55 |
+
|
56 |
+
if attr == 'device':
|
57 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
58 |
+
else:
|
59 |
+
raise NotImplementedError('Not yet implemented')
|
60 |
+
|
61 |
+
@register_inference_rule(torch.bmm)
|
62 |
+
def bmm_inference_rule(n: Node, symbols, constraints, counter):
|
63 |
+
"""
|
64 |
+
Constraints that match the input to a size 3 tensor
|
65 |
+
and switch the dimensions according to the rules
|
66 |
+
of batch multiplication
|
67 |
+
"""
|
68 |
+
assert isinstance(n.args[0], Node)
|
69 |
+
assert isinstance(n.args[1], Node)
|
70 |
+
|
71 |
+
bmm_output, counter = gen_tvar(counter)
|
72 |
+
symbols[n] = bmm_output
|
73 |
+
|
74 |
+
bmm_input1 = symbols[n.args[0]]
|
75 |
+
bmm_input2 = symbols[n.args[1]]
|
76 |
+
|
77 |
+
dims_input1, counter = gen_tensor_dims(3, counter)
|
78 |
+
dims_input2, counter = gen_tensor_dims(3, counter)
|
79 |
+
|
80 |
+
inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
|
81 |
+
BinConstraintT(bmm_input2, Dyn, op_eq),
|
82 |
+
BinConstraintT(bmm_output, Dyn, op_eq)])
|
83 |
+
|
84 |
+
input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
|
85 |
+
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
|
86 |
+
BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)])
|
87 |
+
|
88 |
+
input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq),
|
89 |
+
BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
|
90 |
+
BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)])
|
91 |
+
|
92 |
+
consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)]
|
93 |
+
|
94 |
+
batch_size, counter = gen_dvar(counter)
|
95 |
+
|
96 |
+
inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
|
97 |
+
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
|
98 |
+
BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
|
99 |
+
*consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])])
|
100 |
+
|
101 |
+
return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter
|
102 |
+
|
103 |
+
|
104 |
+
@register_inference_rule("index_select")
|
105 |
+
def index_select_inference_rule(n: Node, symbols, constraints, counter):
|
106 |
+
"""
|
107 |
+
We constrain the second argument to a vector or Dyn.
|
108 |
+
The output replaces the input with the shape of the vector
|
109 |
+
at the position given by the index (first argument)
|
110 |
+
"""
|
111 |
+
# print(n.args)
|
112 |
+
assert isinstance(n.args[0], Node)
|
113 |
+
assert isinstance(n.args[1], int)
|
114 |
+
assert isinstance(n.args[2], Node)
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
index_select, counter = gen_tvar(counter)
|
119 |
+
symbols[n] = index_select
|
120 |
+
|
121 |
+
dims, counter = gen_tensor_dims(1, counter)
|
122 |
+
|
123 |
+
# equality constraint
|
124 |
+
is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)
|
125 |
+
is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)
|
126 |
+
|
127 |
+
c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select)
|
128 |
+
for i in range(MAX_TENSOR_RANK)])])
|
129 |
+
c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select)
|
130 |
+
for i in range(MAX_TENSOR_RANK)])])
|
131 |
+
|
132 |
+
return [Disj([c2, c3])], counter
|
133 |
+
|
134 |
+
|
135 |
+
@register_inference_rule("expand")
|
136 |
+
def expand_inference_rule(n: Node, symbols, constraints, counter):
|
137 |
+
"""
|
138 |
+
We generate the exact constraints as we do for tensor additions but we constraint
|
139 |
+
the rank of this expression to be equal to len(n.args[1:]) so that only
|
140 |
+
those cases get considered for the output
|
141 |
+
"""
|
142 |
+
assert isinstance(n.args[0], Node)
|
143 |
+
|
144 |
+
# define the output for expand
|
145 |
+
expand, counter = gen_tvar(counter)
|
146 |
+
symbols[n] = expand
|
147 |
+
|
148 |
+
# since we do not have two nodes here, we will construct an argument variable
|
149 |
+
e1 = symbols[n.args[0]]
|
150 |
+
e2, counter = gen_tvar(counter)
|
151 |
+
|
152 |
+
e2_nat_constraints = []
|
153 |
+
for arg in n.args[1:]:
|
154 |
+
assert isinstance(arg, (Node, int))
|
155 |
+
if isinstance(arg, Node):
|
156 |
+
assert isinstance(symbols[arg], DVar)
|
157 |
+
e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))
|
158 |
+
|
159 |
+
e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)
|
160 |
+
|
161 |
+
constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)
|
162 |
+
|
163 |
+
# constraint the output size
|
164 |
+
dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
|
165 |
+
nat_constraints = gen_nat_constraints(dims)
|
166 |
+
c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
|
167 |
+
constraints += c
|
168 |
+
|
169 |
+
return constraints, counter
|
170 |
+
|
171 |
+
|
172 |
+
@register_inference_rule(torch.nn.functional.gelu)
|
173 |
+
@register_inference_rule(torch.nn.functional.dropout)
|
174 |
+
@register_inference_rule(torch.nn.functional.softmax)
|
175 |
+
@register_inference_rule("detach")
|
176 |
+
@register_inference_rule("to")
|
177 |
+
@register_inference_rule("int")
|
178 |
+
@register_inference_rule("long")
|
179 |
+
@register_inference_rule("contiguous")
|
180 |
+
@register_inference_rule(torch.ones)
|
181 |
+
@register_inference_rule(torch.zeros)
|
182 |
+
def equality_inference_rule(n: Node, symbols, constraints, counter):
|
183 |
+
"""
|
184 |
+
We generate the constraint: input = output
|
185 |
+
"""
|
186 |
+
output, counter = gen_tvar(counter)
|
187 |
+
symbols[n] = output
|
188 |
+
|
189 |
+
if isinstance(n.args[0], Node):
|
190 |
+
input = symbols[n.args[0]]
|
191 |
+
if isinstance(input, TVar):
|
192 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
193 |
+
|
194 |
+
# then we have dimension variables
|
195 |
+
else:
|
196 |
+
for arg in n.args:
|
197 |
+
assert isinstance(symbols[arg], DVar)
|
198 |
+
my_size = [symbols[arg] for arg in n.args]
|
199 |
+
return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
|
200 |
+
|
201 |
+
elif isinstance(n.args[0], tuple):
|
202 |
+
# then the tuple is the size
|
203 |
+
assert len(n.args[0]) <= 4
|
204 |
+
my_size = [symbols[arg] for arg in n.args[0]]
|
205 |
+
return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
|
206 |
+
else:
|
207 |
+
raise NotImplementedError('Method not yet implemented')
|
208 |
+
|
209 |
+
|
210 |
+
@register_inference_rule("transpose")
|
211 |
+
def transpose_inference_rule(n: Node, symbols, constraints, counter):
|
212 |
+
"""
|
213 |
+
Can be considered as a sequence of two index selects, so we generate constraints accordingly
|
214 |
+
"""
|
215 |
+
assert isinstance(n.args[0], Node)
|
216 |
+
assert isinstance(n.args[1], int)
|
217 |
+
assert isinstance(n.args[2], int)
|
218 |
+
|
219 |
+
output, counter = gen_tvar(counter)
|
220 |
+
symbols[n] = output
|
221 |
+
|
222 |
+
from_arg = symbols[n.args[0]]
|
223 |
+
assert isinstance(from_arg, TVar)
|
224 |
+
|
225 |
+
# input and output are dyn
|
226 |
+
is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])
|
227 |
+
|
228 |
+
# or input is a tensor and we actually do the replacement
|
229 |
+
c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])
|
230 |
+
|
231 |
+
return [Disj([is_dyn, c3])], counter
|
232 |
+
|
233 |
+
|
234 |
+
@register_inference_rule("type_as")
|
235 |
+
def type_inference_rule(n: Node, symbols, constraints, counter):
|
236 |
+
"""
|
237 |
+
We generate the constraint: input = output
|
238 |
+
"""
|
239 |
+
assert isinstance(n.args[0], Node)
|
240 |
+
assert isinstance(n.args[1], Node)
|
241 |
+
|
242 |
+
output, counter = gen_tvar(counter)
|
243 |
+
symbols[n] = output
|
244 |
+
|
245 |
+
from_arg = symbols[n.args[0]]
|
246 |
+
to_arg = symbols[n.args[1]]
|
247 |
+
|
248 |
+
assert isinstance(from_arg, TVar)
|
249 |
+
assert isinstance(to_arg, TVar)
|
250 |
+
|
251 |
+
return [BinConstraintT(from_arg, to_arg, op_consistency),
|
252 |
+
BinConstraintT(output, to_arg, op_eq)], counter
|
253 |
+
|
254 |
+
@register_inference_rule("masked_fill_")
|
255 |
+
def masked_fill_inference_rule(n: Node, symbols, constraints, counter):
|
256 |
+
"""
|
257 |
+
Similar to addition. For now we implement the constraints when
|
258 |
+
the argument is a boolean tensor. There is also a case for when
|
259 |
+
it is a condition. We will leave this out for now.
|
260 |
+
"""
|
261 |
+
|
262 |
+
assert isinstance(n.args[0], Node)
|
263 |
+
assert isinstance(n.args[1], Node)
|
264 |
+
|
265 |
+
# We will retrieve the type variables from the symbol table
|
266 |
+
# and confirm they are tensor variables
|
267 |
+
|
268 |
+
e1 = symbols[n.args[0]]
|
269 |
+
e2 = symbols[n.args[1]]
|
270 |
+
|
271 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
272 |
+
masked_fill_tensor, counter = gen_tvar(counter)
|
273 |
+
symbols[n] = masked_fill_tensor
|
274 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)
|
275 |
+
else:
|
276 |
+
raise NotImplementedError('Not yet implemented')
|
277 |
+
|
278 |
+
|
279 |
+
@register_inference_rule(torch.nn.functional.embedding)
|
280 |
+
def embedding_inference_rule_functional(n: Node, symbols, constraints, counter):
|
281 |
+
assert isinstance(n.args[0], Node)
|
282 |
+
|
283 |
+
embedding_dim_weights = symbols[n.args[1]]
|
284 |
+
|
285 |
+
# will treat this as a static shape. So we will not use matching.
|
286 |
+
weight_dims, counter = gen_tensor_dims(2, counter)
|
287 |
+
equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq)
|
288 |
+
embedding_dim = weight_dims[1]
|
289 |
+
constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter)
|
290 |
+
return [equality_constraint] + constraints, counter
|
291 |
+
|
292 |
+
|
293 |
+
@register_inference_rule(torch.nn.modules.sparse.Embedding)
|
294 |
+
def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
295 |
+
"""
|
296 |
+
The output shape differs from the input shape in the last dimension
|
297 |
+
"""
|
298 |
+
assert isinstance(n.args[0], Node)
|
299 |
+
return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)
|
300 |
+
|
301 |
+
|
302 |
+
def gen_embedding_rules(n: Node, symbols, embedding_dim, counter):
|
303 |
+
|
304 |
+
embedding_output, counter = gen_tvar(counter)
|
305 |
+
symbols[n] = embedding_output
|
306 |
+
embedding_input = symbols[n.args[0]]
|
307 |
+
|
308 |
+
input_dyn = BinConstraintT(embedding_input, Dyn, op_eq)
|
309 |
+
output_dyn = BinConstraintT(embedding_output, Dyn, op_eq)
|
310 |
+
|
311 |
+
c1 = Conj([input_dyn, output_dyn])
|
312 |
+
c2 = []
|
313 |
+
|
314 |
+
for i in range(1, MAX_TENSOR_RANK):
|
315 |
+
new_dims, counter = gen_tensor_dims(i, counter)
|
316 |
+
nat_constraints = gen_nat_constraints(new_dims)
|
317 |
+
|
318 |
+
# we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases
|
319 |
+
c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq),
|
320 |
+
BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] +
|
321 |
+
nat_constraints)
|
322 |
+
c2.append(c_tensor_i)
|
323 |
+
|
324 |
+
return [Disj([c1, Disj(c2)])], counter
|
325 |
+
|
326 |
+
|
327 |
+
@register_inference_rule(torch.tensor)
|
328 |
+
def tensor_inference_rule(n: Node, symbols, constraints, counter):
|
329 |
+
"""
|
330 |
+
If the tensor is a scalar, we will skip it since we
|
331 |
+
do not support scalars yet. We will add support in the future
|
332 |
+
if it's needed. For our examples so far, scalars are not needed.
|
333 |
+
"""
|
334 |
+
return [], counter
|
335 |
+
|
336 |
+
|
337 |
+
@register_inference_rule("reshape")
|
338 |
+
@register_inference_rule("view")
|
339 |
+
def view_inference_rule(n: Node, symbols, constraints, counter):
|
340 |
+
"""
|
341 |
+
Similar to reshape but with an extra condition on the strides
|
342 |
+
"""
|
343 |
+
assert isinstance(n.args[0], Node)
|
344 |
+
|
345 |
+
# generate the new variable
|
346 |
+
my_view, counter = gen_tvar(counter)
|
347 |
+
symbols[n] = my_view
|
348 |
+
|
349 |
+
|
350 |
+
src_var = symbols[n.args[0]]
|
351 |
+
t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape
|
352 |
+
t2_type = []
|
353 |
+
num_constraints = []
|
354 |
+
|
355 |
+
for t in t2:
|
356 |
+
if t == -1:
|
357 |
+
var, counter = gen_dvar(counter)
|
358 |
+
t2_type.append(var)
|
359 |
+
num_constraints.append(BinConstraintD(var, Dyn, op_neq))
|
360 |
+
|
361 |
+
else:
|
362 |
+
num_constraints.append(BinConstraintD(t, Dyn, op_neq))
|
363 |
+
t2_type.append(t)
|
364 |
+
|
365 |
+
t2_type = TensorType(t2_type) # type: ignore[assignment]
|
366 |
+
|
367 |
+
c1 = BinConstraintT(my_view, t2_type, op_eq)
|
368 |
+
c2 = CanReshape(src_var, t2_type)
|
369 |
+
|
370 |
+
# TODO: add the extra check mentioned here:
|
371 |
+
# https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view
|
372 |
+
|
373 |
+
return [c1, c2] + num_constraints, counter # type: ignore[operator]
|
374 |
+
|
375 |
+
|
376 |
+
@register_inference_rule("size")
|
377 |
+
def size_inference_rule(n: Node, symbols, constraints, counter):
|
378 |
+
"""
|
379 |
+
The constraint is just lhs = rhs.
|
380 |
+
Ex: size = input_ids.size()
|
381 |
+
"""
|
382 |
+
|
383 |
+
|
384 |
+
if len(n.args) == 1:
|
385 |
+
# generate the new variable
|
386 |
+
size, counter = gen_tvar(counter)
|
387 |
+
symbols[n] = size
|
388 |
+
input = symbols[n.args[0]]
|
389 |
+
c = BinConstraintT(input, size, op_eq)
|
390 |
+
return [c], counter
|
391 |
+
|
392 |
+
elif len(n.args) == 2:
|
393 |
+
# TODO: review this rule; should input = dyn; output = dyn be included here?
|
394 |
+
if isinstance(n.args[1], int):
|
395 |
+
# generate the new variable
|
396 |
+
size_index, counter = gen_dvar(counter)
|
397 |
+
symbols[n] = size_index
|
398 |
+
input = symbols[n.args[0]]
|
399 |
+
c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]
|
400 |
+
c3 = BinConstraintD(0, size_index, op_leq)
|
401 |
+
|
402 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
403 |
+
output_dyn = BinConstraintD(size_index, Dyn, op_eq)
|
404 |
+
c1 = Conj([input_dyn, output_dyn])
|
405 |
+
|
406 |
+
return [Disj([c1, Conj([Disj(c2), c3])])], counter
|
407 |
+
|
408 |
+
else:
|
409 |
+
raise NotImplementedError
|
410 |
+
|
411 |
+
else:
|
412 |
+
raise NotImplementedError
|
413 |
+
|
414 |
+
|
415 |
+
def range_check(i, n):
|
416 |
+
"""
|
417 |
+
Checks if an index i is within range of a size n list
|
418 |
+
Args:
|
419 |
+
i: index
|
420 |
+
n: list size
|
421 |
+
|
422 |
+
Returns: Boolean
|
423 |
+
"""
|
424 |
+
if i >= 0:
|
425 |
+
return T() if i < n else F()
|
426 |
+
else:
|
427 |
+
return T() if i >= n else F()
|
428 |
+
|
429 |
+
|
430 |
+
@register_inference_rule(torch.cumsum)
|
431 |
+
def cumsum_inference_rule(n: Node, symbols, constraints, counter):
|
432 |
+
"""
|
433 |
+
Input and output shapes should be equal
|
434 |
+
We should verify that the index is valid
|
435 |
+
"""
|
436 |
+
assert isinstance(n.args[0], Node)
|
437 |
+
arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"]
|
438 |
+
assert isinstance(arg_1, int)
|
439 |
+
|
440 |
+
output, counter = gen_tvar(counter)
|
441 |
+
symbols[n] = output
|
442 |
+
input = symbols[n.args[0]]
|
443 |
+
|
444 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
445 |
+
output_dyn = BinConstraintT(output, Dyn, op_eq)
|
446 |
+
c1 = Conj([input_dyn, output_dyn])
|
447 |
+
c2 = []
|
448 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
449 |
+
new_dims, counter = gen_tensor_dims(i, counter)
|
450 |
+
|
451 |
+
nat_constraints = gen_nat_constraints(new_dims)
|
452 |
+
|
453 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq),
|
454 |
+
BinConstraintT(output, TensorType(new_dims), op_eq)] +
|
455 |
+
[range_check(arg_1, i)] + nat_constraints)
|
456 |
+
|
457 |
+
c2.append(c_tensor_i)
|
458 |
+
dyn_or_tensor = Disj([c1, Disj(c2)])
|
459 |
+
return [dyn_or_tensor], counter
|
460 |
+
|
461 |
+
|
462 |
+
@register_inference_rule(_assert_is_none)
|
463 |
+
def assert_inference_rule(n: Node, symbols, constraints, counter):
|
464 |
+
assert len(n.users) == 0
|
465 |
+
return [], counter
|
466 |
+
|
467 |
+
|
468 |
+
@register_inference_rule(operator.getitem)
|
469 |
+
def getitem_inference_rule(n: Node, symbols, constraints, counter):
|
470 |
+
assert isinstance(n.args[0], Node)
|
471 |
+
|
472 |
+
# dimension output case
|
473 |
+
if isinstance(n.args[1], int):
|
474 |
+
# create and store the new dimension variable
|
475 |
+
get_item_output, counter = gen_dvar(counter)
|
476 |
+
symbols[n] = get_item_output
|
477 |
+
|
478 |
+
# retrieve arg variables
|
479 |
+
get_item_arg = symbols[n.args[0]]
|
480 |
+
assert isinstance(get_item_arg, TVar)
|
481 |
+
|
482 |
+
|
483 |
+
# if the input is dynamic, we accept any index and return
|
484 |
+
# a dynamic dimension as output
|
485 |
+
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
|
486 |
+
output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
|
487 |
+
c1 = Conj([input_dyn, output_dyn])
|
488 |
+
|
489 |
+
# if the input is a tensor,
|
490 |
+
# generate a getItem constraint which will be expanded based on the
|
491 |
+
# tensor dimension.
|
492 |
+
|
493 |
+
c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)]
|
494 |
+
|
495 |
+
|
496 |
+
# since the output is a dimension, we make sure it's a natural number
|
497 |
+
# added as a conjunction to the disjunction of c2
|
498 |
+
c3 = BinConstraintD(0, get_item_output, op_leq)
|
499 |
+
return [Disj([c1, Conj([Disj(c2), c3])])], counter
|
500 |
+
|
501 |
+
# tensor output case
|
502 |
+
elif isinstance(n.args[1], tuple):
|
503 |
+
# create and store the new tensor variable
|
504 |
+
get_item_output, counter = gen_tvar(counter)
|
505 |
+
symbols[n] = get_item_output
|
506 |
+
|
507 |
+
# retrieve arg variables
|
508 |
+
if n.args[0] in symbols:
|
509 |
+
get_item_arg = symbols[n.args[0]]
|
510 |
+
assert isinstance(get_item_arg, TVar)
|
511 |
+
|
512 |
+
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
|
513 |
+
output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment]
|
514 |
+
c1 = Conj([input_dyn, output_dyn])
|
515 |
+
|
516 |
+
c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc]
|
517 |
+
for i in range(MAX_TENSOR_RANK)]
|
518 |
+
else:
|
519 |
+
# TODO: we should figure out why there is a key-error here.
|
520 |
+
return [], counter
|
521 |
+
|
522 |
+
return [Disj([c1, *c2])], counter
|
523 |
+
|
524 |
+
else:
|
525 |
+
raise RuntimeError('Method not yet implemented')
|
526 |
+
|
527 |
+
|
528 |
+
@register_inference_rule(operator.gt)
|
529 |
+
def gt_inference_rule(n: Node, symbols, constraints, counter):
|
530 |
+
assert isinstance(n.args[0], (Node, int))
|
531 |
+
assert isinstance(n.args[1], (Node, int))
|
532 |
+
|
533 |
+
# We make sure this node will not be used again. We do not
|
534 |
+
# generate a constraint about that node. Only about the operands.
|
535 |
+
|
536 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
537 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
538 |
+
|
539 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
540 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
541 |
+
gt_tensor, counter = gen_tvar(counter)
|
542 |
+
symbols[n] = gt_tensor
|
543 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor)
|
544 |
+
|
545 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
546 |
+
# This is meant to be used for flow analysis only
|
547 |
+
gt_constraint = BinConstraintD(e1, e2, op_gt)
|
548 |
+
|
549 |
+
my_gt, counter = gen_bvar(counter)
|
550 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
551 |
+
return [equality_constraint], counter
|
552 |
+
|
553 |
+
else:
|
554 |
+
raise RuntimeError('Sort Mismatch')
|
555 |
+
|
556 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
557 |
+
if isinstance(e1, DVar):
|
558 |
+
# This is meant to be used for flow analysis only
|
559 |
+
gt_constraint = BinConstraintD(e1, e2, op_gt)
|
560 |
+
|
561 |
+
my_gt, counter = gen_bvar(counter)
|
562 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
563 |
+
return [equality_constraint], counter
|
564 |
+
|
565 |
+
elif isinstance(e1, TVar) and isinstance(e2, int):
|
566 |
+
# then we made the wrong assumption about the argument being a tensor
|
567 |
+
# so we should fix the assumption
|
568 |
+
warnings.warn(f'Made the wrong assumption for node {n}. Correctness not guaranteed.')
|
569 |
+
|
570 |
+
new_e1, counter = gen_dvar(counter)
|
571 |
+
symbols[n.args[0]] = new_e1
|
572 |
+
symbols[n.args[0]]
|
573 |
+
|
574 |
+
gt_constraint = BinConstraintD(new_e1, e2, op_gt)
|
575 |
+
|
576 |
+
my_gt, counter = gen_bvar(counter)
|
577 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
578 |
+
return [equality_constraint], counter
|
579 |
+
|
580 |
+
else:
|
581 |
+
raise NotImplementedError('Method not yet implemented')
|
582 |
+
|
583 |
+
else:
|
584 |
+
raise NotImplementedError('Method not yet implemented')
|
585 |
+
|
586 |
+
|
587 |
+
@register_inference_rule(operator.eq)
|
588 |
+
def eq_inference_rule(n: Node, symbols, constraints, counter):
|
589 |
+
assert isinstance(n.args[0], (Node, int))
|
590 |
+
assert isinstance(n.args[1], (Node, int))
|
591 |
+
|
592 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
593 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
594 |
+
|
595 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
596 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
597 |
+
eq_tensor, counter = gen_tvar(counter)
|
598 |
+
symbols[n] = eq_tensor
|
599 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor)
|
600 |
+
|
601 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
602 |
+
# This is meant to be used for flow analysis only
|
603 |
+
eq_constraint = BinConstraintD(e1, e2, op_eq)
|
604 |
+
|
605 |
+
my_eq, counter = gen_bvar(counter)
|
606 |
+
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
|
607 |
+
return [equality_constraint], counter
|
608 |
+
|
609 |
+
else:
|
610 |
+
raise RuntimeError('Sort Mismatch')
|
611 |
+
|
612 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
613 |
+
if isinstance(e1, DVar):
|
614 |
+
# This is meant to be used for flow analysis only
|
615 |
+
eq_constraint = BinConstraintD(e1, e2, op_eq)
|
616 |
+
|
617 |
+
my_eq, counter = gen_bvar(counter)
|
618 |
+
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
|
619 |
+
return [equality_constraint], counter
|
620 |
+
else:
|
621 |
+
raise NotImplementedError('Method not yet implemented')
|
622 |
+
else:
|
623 |
+
raise NotImplementedError('Method not yet implemented')
|
624 |
+
|
625 |
+
@register_inference_rule(operator.ne)
|
626 |
+
def neq_inference_rule(n: Node, symbols, constraints, counter):
|
627 |
+
"""
|
628 |
+
Translates to inconsistent in gradual types.
|
629 |
+
To prove inequality, we should prove that
|
630 |
+
tensors are either different sizes or
|
631 |
+
disagree on at least one dimension
|
632 |
+
|
633 |
+
This is a WIP (works when the condition
|
634 |
+
is false. We are working on making this operation work
|
635 |
+
when the condition is true as well)
|
636 |
+
"""
|
637 |
+
assert isinstance(n.args[0], Node)
|
638 |
+
assert isinstance(n.args[1], tuple)
|
639 |
+
|
640 |
+
# implementing for size 3 and 4
|
641 |
+
if len(n.args[1]) == 3:
|
642 |
+
|
643 |
+
assert isinstance(n.args[1][0], (Node, int))
|
644 |
+
assert isinstance(n.args[1][1], (Node, int))
|
645 |
+
assert isinstance(n.args[1][2], (Node, int))
|
646 |
+
|
647 |
+
lhs = symbols[n.args[0]]
|
648 |
+
|
649 |
+
b, counter = gen_tensor_dims(4, counter)
|
650 |
+
input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq)
|
651 |
+
|
652 |
+
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
|
653 |
+
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
|
654 |
+
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
|
655 |
+
|
656 |
+
# dimensions not equal
|
657 |
+
my_ne, counter = gen_bvar(counter)
|
658 |
+
neq_1 = BinConstraintD(d1, b[0], op_neq)
|
659 |
+
neq_2 = BinConstraintD(d2, b[1], op_neq)
|
660 |
+
neq_3 = BinConstraintD(d3, b[2], op_neq)
|
661 |
+
|
662 |
+
# dimensions inconsistent
|
663 |
+
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1])
|
664 |
+
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2])
|
665 |
+
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3])
|
666 |
+
|
667 |
+
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3])
|
668 |
+
|
669 |
+
# we are covering size 3 and 4 only for now
|
670 |
+
ne_constraint = Conj([input_is_size3, dims_inconsistent])
|
671 |
+
|
672 |
+
my_ne, counter = gen_bvar(counter)
|
673 |
+
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
|
674 |
+
|
675 |
+
elif len(n.args[1]) == 4:
|
676 |
+
|
677 |
+
assert isinstance(n.args[1][0], (Node, int))
|
678 |
+
assert isinstance(n.args[1][1], (Node, int))
|
679 |
+
assert isinstance(n.args[1][2], (Node, int))
|
680 |
+
assert isinstance(n.args[1][3], (Node, int))
|
681 |
+
|
682 |
+
lhs = symbols[n.args[0]]
|
683 |
+
|
684 |
+
b1, counter = gen_dvar(counter)
|
685 |
+
b2, counter = gen_dvar(counter)
|
686 |
+
b3, counter = gen_dvar(counter)
|
687 |
+
b4, counter = gen_dvar(counter)
|
688 |
+
|
689 |
+
input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq)
|
690 |
+
|
691 |
+
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
|
692 |
+
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
|
693 |
+
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
|
694 |
+
d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]]
|
695 |
+
|
696 |
+
# dimensions not equal
|
697 |
+
my_ne, counter = gen_bvar(counter)
|
698 |
+
neq_1 = BinConstraintD(d1, b1, op_neq)
|
699 |
+
neq_2 = BinConstraintD(d2, b2, op_neq)
|
700 |
+
neq_3 = BinConstraintD(d3, b3, op_neq)
|
701 |
+
neq_4 = BinConstraintD(d4, b4, op_neq)
|
702 |
+
|
703 |
+
# dimensions to inconsistent
|
704 |
+
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1])
|
705 |
+
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2])
|
706 |
+
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3])
|
707 |
+
dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4])
|
708 |
+
|
709 |
+
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4])
|
710 |
+
|
711 |
+
ne_constraint = Conj([input_is_size4, dims_inconsistent])
|
712 |
+
|
713 |
+
my_ne, counter = gen_bvar(counter)
|
714 |
+
|
715 |
+
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
|
716 |
+
|
717 |
+
else:
|
718 |
+
raise NotImplementedError('Method not yet implemented')
|
719 |
+
|
720 |
+
return [equality_constraint], counter
|
721 |
+
|
722 |
+
|
723 |
+
@register_inference_rule(operator.lt)
|
724 |
+
def lt_inference_rule(n: Node, symbols, constraints, counter):
|
725 |
+
assert isinstance(n.args[0], (Node, int))
|
726 |
+
assert isinstance(n.args[1], (Node, int))
|
727 |
+
|
728 |
+
# We make sure this node will not be used again. We do not
|
729 |
+
# generate a constraint about that node. Only about the operands.
|
730 |
+
|
731 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
732 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
733 |
+
|
734 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
735 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
736 |
+
lt_tensor, counter = gen_tvar(counter)
|
737 |
+
symbols[n] = lt_tensor
|
738 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor)
|
739 |
+
|
740 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
741 |
+
# This is meant to be used for flow analysis only
|
742 |
+
lt_constraint = BinConstraintD(e1, e2, op_lt)
|
743 |
+
|
744 |
+
my_lt, counter = gen_bvar(counter)
|
745 |
+
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
|
746 |
+
return [equality_constraint], counter
|
747 |
+
|
748 |
+
else:
|
749 |
+
raise RuntimeError('Sort Mismatch')
|
750 |
+
|
751 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
752 |
+
if isinstance(e1, DVar):
|
753 |
+
# This is meant to be used for flow analysis only
|
754 |
+
lt_constraint = BinConstraintD(e1, e2, op_lt)
|
755 |
+
|
756 |
+
my_lt, counter = gen_bvar(counter)
|
757 |
+
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
|
758 |
+
return [equality_constraint], counter
|
759 |
+
else:
|
760 |
+
raise NotImplementedError('Method not yet implemented')
|
761 |
+
|
762 |
+
else:
|
763 |
+
raise NotImplementedError('Method not yet implemented')
|
764 |
+
|
765 |
+
|
766 |
+
@register_inference_rule(torch.full)
|
767 |
+
def full_inference_rule(n: Node, symbols, constraints, counter):
|
768 |
+
full, counter = gen_tvar(counter)
|
769 |
+
symbols[n] = full
|
770 |
+
res = []
|
771 |
+
|
772 |
+
assert isinstance(n.args[0], Iterable)
|
773 |
+
for arg in n.args[0]:
|
774 |
+
dim = arg if isinstance(arg, int) else symbols[arg]
|
775 |
+
res.append(dim)
|
776 |
+
c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type]
|
777 |
+
return [c], counter
|
778 |
+
|
779 |
+
|
780 |
+
# TODO normalize index
|
781 |
+
@register_inference_rule(torch.arange)
|
782 |
+
def arange_inference_rule(n: Node, symbols, constraints, counter):
|
783 |
+
start = 0
|
784 |
+
step = 1
|
785 |
+
|
786 |
+
if len(n.args) == 1:
|
787 |
+
end = symbols[n.args[0]]
|
788 |
+
else:
|
789 |
+
raise NotImplementedError('Not yet implemented')
|
790 |
+
|
791 |
+
# int((end - start) / step)
|
792 |
+
d1, counter = gen_dvar(counter)
|
793 |
+
size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
|
794 |
+
arange, counter = gen_tvar(counter)
|
795 |
+
symbols[n] = arange
|
796 |
+
|
797 |
+
# either the a parameter is a number or it is Dyn
|
798 |
+
c1 = Disj([BinConstraintD(end, Dyn, op_eq),
|
799 |
+
BinConstraintD(start, Dyn, op_eq),
|
800 |
+
BinConstraintD(step, Dyn, op_eq)])
|
801 |
+
c2 = BinConstraintD(d1, Dyn, op_eq)
|
802 |
+
both_dyn = Conj([c1, c2])
|
803 |
+
|
804 |
+
c11 = Conj([BinConstraintD(end, Dyn, op_neq),
|
805 |
+
BinConstraintD(start, Dyn, op_neq),
|
806 |
+
BinConstraintD(step, Dyn, op_neq)])
|
807 |
+
c22 = BinConstraintD(d1, Dyn, op_neq)
|
808 |
+
both_numbers = Conj([c11, c22, size_constraint])
|
809 |
+
|
810 |
+
return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter
|
811 |
+
|
812 |
+
def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var):
|
813 |
+
# additional vars that don't correspond to expressions
|
814 |
+
e11, counter = gen_tvar(counter)
|
815 |
+
e22, counter = gen_tvar(counter)
|
816 |
+
|
817 |
+
# generate constraints
|
818 |
+
c1 = TGreatestUpperBound(output_var, e11, e22)
|
819 |
+
c2 = ApplyBroadcasting(e11, e22, e1, e2)
|
820 |
+
c3 = BinConstraintT(e11, e22, op_consistency)
|
821 |
+
return [c1, c2, c3], counter
|
822 |
+
|
823 |
+
|
824 |
+
@register_inference_rule(operator.mul)
|
825 |
+
@register_inference_rule(torch.ne)
|
826 |
+
@register_inference_rule("ne")
|
827 |
+
@register_inference_rule(torch.add)
|
828 |
+
@register_inference_rule(operator.add)
|
829 |
+
def broadcasting_inference_rule(n: Node, symbols, constraints, counter):
|
830 |
+
|
831 |
+
op_code = None
|
832 |
+
if n.target == operator.add or n.target == torch.add:
|
833 |
+
op_code = op_add
|
834 |
+
elif n.target == operator.mul:
|
835 |
+
op_code = op_mul
|
836 |
+
|
837 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
838 |
+
if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
|
839 |
+
my_output, counter = gen_tvar(counter)
|
840 |
+
symbols[n] = my_output
|
841 |
+
e1 = symbols[n.args[0]]
|
842 |
+
e2 = symbols[n.args[1]]
|
843 |
+
|
844 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
|
845 |
+
else:
|
846 |
+
raise NotImplementedError('Method not yet implemented')
|
847 |
+
|
848 |
+
elif isinstance(n.args[0], Node) and isinstance(n.args[1], (int, float)):
|
849 |
+
if isinstance(symbols[n.args[0]], TVar):
|
850 |
+
my_output, counter = gen_tvar(counter)
|
851 |
+
symbols[n] = my_output
|
852 |
+
e1 = symbols[n.args[0]]
|
853 |
+
return [BinConstraintT(my_output, e1, op_eq)], counter
|
854 |
+
elif isinstance(symbols[n.args[0]], DVar):
|
855 |
+
my_output, counter = gen_dvar(counter)
|
856 |
+
symbols[n] = my_output
|
857 |
+
e1 = symbols[n.args[0]]
|
858 |
+
|
859 |
+
# we will propagate the runtime value here since this is regular addition
|
860 |
+
c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
|
861 |
+
BinConstraintD(0, my_output, op_leq)])
|
862 |
+
return [c], counter
|
863 |
+
|
864 |
+
elif isinstance(n.args[1], Node) and isinstance(n.args[0], (int, float)):
|
865 |
+
if isinstance(symbols[n.args[1]], TVar):
|
866 |
+
my_output, counter = gen_tvar(counter)
|
867 |
+
symbols[n] = my_output
|
868 |
+
e2 = symbols[n.args[1]]
|
869 |
+
return [BinConstraintT(my_output, e2, op_eq)], counter
|
870 |
+
elif isinstance(symbols[n.args[1]], DVar):
|
871 |
+
my_output, counter = gen_dvar(counter)
|
872 |
+
symbols[n] = my_output
|
873 |
+
e2 = symbols[n.args[1]]
|
874 |
+
|
875 |
+
# we will propagate the runtime value here since this is regular addition
|
876 |
+
c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
|
877 |
+
BinConstraintD(0, my_output, op_leq)])
|
878 |
+
return [c], counter
|
879 |
+
|
880 |
+
else:
|
881 |
+
raise NotImplementedError('Method not yet implemented')
|
882 |
+
|
883 |
+
else:
|
884 |
+
# TODO generate add constraints for scalar addition
|
885 |
+
raise NotImplementedError('Addition not yet implemented')
|
886 |
+
|
887 |
+
|
888 |
+
@register_inference_rule(torch.flatten)
|
889 |
+
def flatten_inference_rule(n: Node, symbols, constraints, counter):
|
890 |
+
assert isinstance(n.args[0], Node)
|
891 |
+
|
892 |
+
# generate the new variable
|
893 |
+
flattened, counter = gen_tvar(counter)
|
894 |
+
symbols[n] = flattened
|
895 |
+
|
896 |
+
input = symbols[n.args[0]]
|
897 |
+
|
898 |
+
# set the default start and end dims
|
899 |
+
start_dim = 1
|
900 |
+
end_dim = -1
|
901 |
+
|
902 |
+
if len(n.args) > 1:
|
903 |
+
assert isinstance(n.args[1], int)
|
904 |
+
start_dim = n.args[1]
|
905 |
+
|
906 |
+
if len(n.args) > 2:
|
907 |
+
assert isinstance(n.args[2], int)
|
908 |
+
end_dim = n.args[2]
|
909 |
+
|
910 |
+
c1 = BinConstraintT(input, Dyn, op_eq)
|
911 |
+
c2 = BinConstraintT(flattened, Dyn, op_eq)
|
912 |
+
both_dyn = Conj([c1, c2])
|
913 |
+
|
914 |
+
const = []
|
915 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
916 |
+
c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter)
|
917 |
+
const.append(c)
|
918 |
+
|
919 |
+
return [Disj([both_dyn, *const])], counter
|
920 |
+
|
921 |
+
|
922 |
+
@register_inference_rule(torch.nn.functional.layer_norm)
|
923 |
+
def layer_norm_functional(n: Node, symbols, constraints, counter):
|
924 |
+
"""
|
925 |
+
We generate the constraint: input = output
|
926 |
+
"""
|
927 |
+
assert isinstance(n.args[0], Node)
|
928 |
+
return gen_layer_norm_constraints(n, n.args[1], symbols, counter)
|
929 |
+
|
930 |
+
|
931 |
+
@register_inference_rule(torch.nn.LayerNorm)
|
932 |
+
def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
933 |
+
"""
|
934 |
+
Input and output shapes should be equal.
|
935 |
+
Input should be consistent with the normalized_shape
|
936 |
+
"""
|
937 |
+
assert isinstance(n.args[0], Node)
|
938 |
+
return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)
|
939 |
+
|
940 |
+
|
941 |
+
def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter):
|
942 |
+
output, counter = gen_tvar(counter)
|
943 |
+
symbols[n] = output
|
944 |
+
input = symbols[n.args[0]]
|
945 |
+
|
946 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
947 |
+
output_dyn = BinConstraintT(output, Dyn, op_eq)
|
948 |
+
|
949 |
+
c1 = Conj([input_dyn, output_dyn])
|
950 |
+
|
951 |
+
c2 = []
|
952 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
953 |
+
new_dims_rhs, counter = gen_tensor_dims(i, counter)
|
954 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs)
|
955 |
+
|
956 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq),
|
957 |
+
BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] +
|
958 |
+
add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) +
|
959 |
+
nat_constraints)
|
960 |
+
c2.append(c_tensor_i)
|
961 |
+
return [Disj([c1, Disj(c2)])], counter
|
962 |
+
|
963 |
+
@register_inference_rule(torch.nn.Dropout)
|
964 |
+
@register_inference_rule(torch.nn.ReLU)
|
965 |
+
def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
966 |
+
"""
|
967 |
+
Input and output shapes should be equal.
|
968 |
+
"""
|
969 |
+
assert isinstance(n.args[0], Node)
|
970 |
+
output, counter = gen_tvar(counter)
|
971 |
+
symbols[n] = output
|
972 |
+
input = symbols[n.args[0]]
|
973 |
+
assert isinstance(input, TVar)
|
974 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
975 |
+
|
976 |
+
|
977 |
+
@register_inference_rule(torch.nn.Linear)
|
978 |
+
def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
979 |
+
"""
|
980 |
+
Input and output sizes should be the same except for the last dimension
|
981 |
+
If the input is Dyn, then so should the output
|
982 |
+
"""
|
983 |
+
assert isinstance(n.args[0], Node)
|
984 |
+
return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)
|
985 |
+
|
986 |
+
|
987 |
+
@register_inference_rule("dim") # type: ignore[attr-defined]
|
988 |
+
def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
|
989 |
+
assert isinstance(n.args[0], Node)
|
990 |
+
my_dim, counter = gen_dvar(counter)
|
991 |
+
symbols[n] = my_dim
|
992 |
+
input = symbols[n.args[0]]
|
993 |
+
|
994 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
995 |
+
output_dyn = BinConstraintD(my_dim, Dyn, op_eq)
|
996 |
+
|
997 |
+
c1 = []
|
998 |
+
|
999 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
1000 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
1001 |
+
|
1002 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
|
1003 |
+
BinConstraintD(my_dim, i, op_eq)])
|
1004 |
+
c1.append(c_tensor_i)
|
1005 |
+
|
1006 |
+
return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
|
1007 |
+
|
1008 |
+
|
1009 |
+
@register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined]
|
1010 |
+
def torch_linear_inference_rule(n: Node, symbols, constraints, counter):
|
1011 |
+
assert isinstance(n.args[0], Node)
|
1012 |
+
weight_dims, counter = gen_tensor_dims(2, counter)
|
1013 |
+
equality_constraint = BinConstraintT(symbols[n.args[1]], TensorType(weight_dims), op_eq)
|
1014 |
+
constraints, counter = linear_constraints(n, weight_dims[1], weight_dims[0], symbols, counter)
|
1015 |
+
return [equality_constraint] + constraints, counter
|
1016 |
+
|
1017 |
+
|
1018 |
+
def linear_constraints(n: Node, in_features, out_features, symbols, counter):
|
1019 |
+
linear_output, counter = gen_tvar(counter)
|
1020 |
+
symbols[n] = linear_output
|
1021 |
+
linear_input = symbols[n.args[0]]
|
1022 |
+
|
1023 |
+
input_dyn = BinConstraintT(linear_input, Dyn, op_eq)
|
1024 |
+
output_dyn = BinConstraintT(linear_output, Dyn, op_eq)
|
1025 |
+
|
1026 |
+
c1 = Conj([input_dyn, output_dyn])
|
1027 |
+
|
1028 |
+
c2 = []
|
1029 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
1030 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
1031 |
+
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
|
1032 |
+
|
1033 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
|
1034 |
+
|
1035 |
+
c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq),
|
1036 |
+
BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] +
|
1037 |
+
add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) +
|
1038 |
+
nat_constraints)
|
1039 |
+
c2.append(c_tensor_i)
|
1040 |
+
return [Disj([c1, Disj(c2)])], counter
|
1041 |
+
|
1042 |
+
def add_layer_norm_constraints(input_dim, normalized_dim):
|
1043 |
+
"""
|
1044 |
+
The constraints say that the type has te form: [*, 1024, 1024]
|
1045 |
+
while the normalized_dim have the form [1024, 1024]
|
1046 |
+
Args:
|
1047 |
+
input_dim: Input shape of layer norm
|
1048 |
+
normalized_dim: normalized_dim parameter of the module instance
|
1049 |
+
|
1050 |
+
"""
|
1051 |
+
|
1052 |
+
# in this case we return false since there's a pattern mismatch
|
1053 |
+
if len(normalized_dim) > len(input_dim):
|
1054 |
+
return [F()]
|
1055 |
+
|
1056 |
+
else:
|
1057 |
+
constraints = []
|
1058 |
+
for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
|
1059 |
+
constraints.append(BinConstraintD(i, n, op_consistency))
|
1060 |
+
return constraints
|
1061 |
+
|
1062 |
+
|
1063 |
+
def add_linear_constraints(dims1, dims2, in_features, out_features):
|
1064 |
+
assert len(dims1) == len(dims2)
|
1065 |
+
constraints = []
|
1066 |
+
for i in range(len(dims1)):
|
1067 |
+
if i == len(dims1) - 1:
|
1068 |
+
constraints.append(BinConstraintD(dims1[i], in_features, op_consistency))
|
1069 |
+
constraints.append(BinConstraintD(dims2[i], out_features, op_eq))
|
1070 |
+
else:
|
1071 |
+
constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))
|
1072 |
+
|
1073 |
+
return constraints
|
1074 |
+
|
1075 |
+
|
1076 |
+
@register_inference_rule(torch.reshape)
|
1077 |
+
def reshape_inference_rule(n: Node, symbols, constraints, counter):
|
1078 |
+
assert isinstance(n.args[0], Node)
|
1079 |
+
|
1080 |
+
# generate the new variable
|
1081 |
+
my_reshape, counter = gen_tvar(counter)
|
1082 |
+
symbols[n] = my_reshape
|
1083 |
+
|
1084 |
+
src_var = symbols[n.args[0]]
|
1085 |
+
t2 = n.args[1]
|
1086 |
+
t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr]
|
1087 |
+
c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr]
|
1088 |
+
c2 = CanReshape(src_var, t2_type)
|
1089 |
+
|
1090 |
+
return [c1, c2], counter
|
1091 |
+
|
1092 |
+
|
1093 |
+
@register_inference_rule(BatchNorm2d)
|
1094 |
+
def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
1095 |
+
assert isinstance(n.args[0], Node)
|
1096 |
+
|
1097 |
+
# generate the new variable
|
1098 |
+
batchnorm_output, counter = gen_tvar(counter)
|
1099 |
+
symbols[n] = batchnorm_output
|
1100 |
+
batchnorm_input = symbols[n.args[0]]
|
1101 |
+
|
1102 |
+
# dim vars
|
1103 |
+
d1, counter = gen_dvar(counter)
|
1104 |
+
d2, counter = gen_dvar(counter)
|
1105 |
+
d3, counter = gen_dvar(counter)
|
1106 |
+
d4, counter = gen_dvar(counter)
|
1107 |
+
|
1108 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
1109 |
+
|
1110 |
+
c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching)
|
1111 |
+
c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq)
|
1112 |
+
return [c1, c2, *nat_constraints], counter
|
1113 |
+
|
1114 |
+
|
1115 |
+
@register_inference_rule(torch.nn.AdaptiveAvgPool2d)
|
1116 |
+
def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
1117 |
+
assert isinstance(n.args[0], Node)
|
1118 |
+
|
1119 |
+
avg_pool, counter = gen_tvar(counter)
|
1120 |
+
|
1121 |
+
symbols[n] = avg_pool
|
1122 |
+
input_var = symbols[n.args[0]]
|
1123 |
+
|
1124 |
+
# dim vars
|
1125 |
+
d1, counter = gen_dvar(counter)
|
1126 |
+
d2, counter = gen_dvar(counter)
|
1127 |
+
d3, counter = gen_dvar(counter)
|
1128 |
+
d4, counter = gen_dvar(counter)
|
1129 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
1130 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
1131 |
+
c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq)
|
1132 |
+
|
1133 |
+
return [c1, c2, *nat_constraints], counter
|
1134 |
+
|
1135 |
+
|
1136 |
+
@register_inference_rule(Conv2d)
|
1137 |
+
def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
1138 |
+
assert isinstance(n.args[0], Node)
|
1139 |
+
|
1140 |
+
my_conv, counter = gen_tvar(counter)
|
1141 |
+
symbols[n] = my_conv
|
1142 |
+
input_var = symbols[n.args[0]]
|
1143 |
+
|
1144 |
+
# dim vars
|
1145 |
+
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
|
1146 |
+
|
1147 |
+
# c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
|
1148 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
1149 |
+
|
1150 |
+
# c2 = DConsistency(module_instance.in_channels, d2)
|
1151 |
+
c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)
|
1152 |
+
|
1153 |
+
c3 = CalcConv(my_conv, input_var,
|
1154 |
+
module_instance.out_channels,
|
1155 |
+
module_instance.kernel_size,
|
1156 |
+
module_instance.padding,
|
1157 |
+
module_instance.stride,
|
1158 |
+
module_instance.dilation, [d1, d2, d3, d4])
|
1159 |
+
|
1160 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
1161 |
+
|
1162 |
+
return [c1, c2, c3, *nat_constraints], counter
|
1163 |
+
|
1164 |
+
|
1165 |
+
@register_inference_rule(torch.nn.MaxPool2d)
|
1166 |
+
def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
1167 |
+
assert isinstance(n.args[0], Node)
|
1168 |
+
maxpool, counter = gen_tvar(counter)
|
1169 |
+
symbols[n] = maxpool
|
1170 |
+
input_var = symbols[n.args[0]]
|
1171 |
+
|
1172 |
+
# dim vars
|
1173 |
+
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
|
1174 |
+
|
1175 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
1176 |
+
|
1177 |
+
c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding,
|
1178 |
+
module_instance.stride, module_instance.dilation, [d1, d2, d3, d4])
|
1179 |
+
|
1180 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
1181 |
+
|
1182 |
+
return [c1, c2, *nat_constraints], counter
|
1183 |
+
|
1184 |
+
|
1185 |
+
class ConstraintGenerator:
|
1186 |
+
def __init__(self, traced, graph=None):
|
1187 |
+
self.traced = traced # traced or tracer.root
|
1188 |
+
self.traced_params = dict(self.traced.named_parameters())
|
1189 |
+
self.constraints = []
|
1190 |
+
self.symbol_dict = {}
|
1191 |
+
self.graph = traced.graph if hasattr(traced, 'graph') else graph
|
1192 |
+
|
1193 |
+
|
1194 |
+
def generate_constraints(self, counter=0):
|
1195 |
+
"""
|
1196 |
+
Iterate through every node and generate constraints
|
1197 |
+
Effect: self.constraints will be populated with the final constraints
|
1198 |
+
"""
|
1199 |
+
graph = self.graph
|
1200 |
+
|
1201 |
+
all_constraints = []
|
1202 |
+
|
1203 |
+
for n in graph.nodes:
|
1204 |
+
(constraints, counter) = self.generate_constraints_node(n, counter)
|
1205 |
+
all_constraints += constraints
|
1206 |
+
|
1207 |
+
return Conj(all_constraints), counter
|
1208 |
+
|
1209 |
+
def generate_constraints_node(self, n: Node, counter):
|
1210 |
+
"""
|
1211 |
+
Generate constraints the given node:
|
1212 |
+
Currently supported operations:
|
1213 |
+
- Reshape
|
1214 |
+
- Add
|
1215 |
+
- conv2d
|
1216 |
+
"""
|
1217 |
+
|
1218 |
+
if n.op == 'placeholder':
|
1219 |
+
x, counter = gen_tvar(counter)
|
1220 |
+
self.symbol_dict[n] = x
|
1221 |
+
|
1222 |
+
my_type = n.type
|
1223 |
+
|
1224 |
+
if n.type != Dyn and (not isinstance(n.type, TensorType)):
|
1225 |
+
if n.type == torch.nn.parameter.Parameter:
|
1226 |
+
# since we have a parameter, the shape must be static
|
1227 |
+
assert 'example_value' in n.meta
|
1228 |
+
my_type = TensorType(n.meta['example_value'].size())
|
1229 |
+
else:
|
1230 |
+
my_type = Dyn
|
1231 |
+
|
1232 |
+
c1 = BinConstraintT(my_type, x, op_precision)
|
1233 |
+
c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq)
|
1234 |
+
return [c1, c2], counter
|
1235 |
+
|
1236 |
+
elif n.op == 'call_function':
|
1237 |
+
if n.target in _INFERENCE_RULES:
|
1238 |
+
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
|
1239 |
+
else:
|
1240 |
+
raise RuntimeError(f'No inference rule registered for target {n.target}!')
|
1241 |
+
|
1242 |
+
elif n.op == 'call_module':
|
1243 |
+
|
1244 |
+
module_instance = self.traced.get_submodule(n.target)
|
1245 |
+
if type(module_instance) in _INFERENCE_RULES:
|
1246 |
+
return _INFERENCE_RULES[type(module_instance)](n,
|
1247 |
+
module_instance,
|
1248 |
+
self.symbol_dict,
|
1249 |
+
self.constraints, counter)
|
1250 |
+
else:
|
1251 |
+
raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
|
1252 |
+
|
1253 |
+
elif n.op == 'call_method':
|
1254 |
+
if n.target in _INFERENCE_RULES:
|
1255 |
+
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
|
1256 |
+
else:
|
1257 |
+
raise RuntimeError(f'No inference rule registered for target {n.target}!')
|
1258 |
+
|
1259 |
+
elif n.op == 'get_attr':
|
1260 |
+
t = self.traced_params.get(n.target, None)
|
1261 |
+
|
1262 |
+
if isinstance(t, torch.Tensor):
|
1263 |
+
if len(t.shape) > 0:
|
1264 |
+
res = []
|
1265 |
+
for d in t.shape:
|
1266 |
+
res.append(d)
|
1267 |
+
attr_type = TensorType(res)
|
1268 |
+
output, counter = gen_tvar(counter)
|
1269 |
+
self.symbol_dict[n] = output
|
1270 |
+
return [BinConstraintT(output, attr_type, op_eq)], counter
|
1271 |
+
else:
|
1272 |
+
# scalar?
|
1273 |
+
return [], counter
|
1274 |
+
else:
|
1275 |
+
return [], counter
|
1276 |
+
|
1277 |
+
elif n.op == 'output':
|
1278 |
+
return [], counter
|
1279 |
+
|
1280 |
+
else:
|
1281 |
+
raise NotImplementedError(f"Method {n.op} not yet implemented")
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py
ADDED
@@ -0,0 +1,1040 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
import copy
|
3 |
+
import itertools
|
4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
|
5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
|
6 |
+
Transpose
|
7 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
|
8 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
|
9 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
|
10 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
|
11 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
|
12 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
|
13 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
|
14 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
|
15 |
+
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
|
16 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
17 |
+
from typing import Callable, Dict, List
|
18 |
+
|
19 |
+
_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
|
20 |
+
|
21 |
+
|
22 |
+
def register_transformation_rule(call_target):
|
23 |
+
def register(fn):
|
24 |
+
if call_target in _TRANSFORMATION_RULES:
|
25 |
+
raise RuntimeError(f'Transformation rule already registered for {call_target}!')
|
26 |
+
_TRANSFORMATION_RULES[call_target] = fn
|
27 |
+
return fn
|
28 |
+
return register
|
29 |
+
|
30 |
+
|
31 |
+
def valid_index(index, dims):
|
32 |
+
"""
|
33 |
+
Given a list of dimensions, checks if an index is valid in the list
|
34 |
+
"""
|
35 |
+
try:
|
36 |
+
dims[index]
|
37 |
+
return T()
|
38 |
+
except IndexError:
|
39 |
+
return F()
|
40 |
+
|
41 |
+
|
42 |
+
@register_transformation_rule(Transpose)
|
43 |
+
def transform_transpose(constraint, counter):
|
44 |
+
"""
|
45 |
+
Similar to a sequence of two index-selects
|
46 |
+
"""
|
47 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
48 |
+
is_valid_index1 = valid_index(constraint.index1, dims)
|
49 |
+
is_valid_index2 = valid_index(constraint.index2, dims)
|
50 |
+
new_dims = copy.deepcopy(dims)
|
51 |
+
nat_constraints = gen_nat_constraints(dims)
|
52 |
+
|
53 |
+
if is_valid_index1 == T() and is_valid_index2 == T():
|
54 |
+
new_dims[constraint.index1] = dims[constraint.index2]
|
55 |
+
new_dims[constraint.index2] = dims[constraint.index1]
|
56 |
+
|
57 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
58 |
+
*nat_constraints,
|
59 |
+
is_valid_index1, is_valid_index2,
|
60 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
61 |
+
return transformed_constraint, counter
|
62 |
+
|
63 |
+
|
64 |
+
@register_transformation_rule(IndexSelect)
|
65 |
+
def transform_index_select(constraint, counter):
|
66 |
+
"""
|
67 |
+
The constraints consider the given tensor size, checks if the index is valid
|
68 |
+
and if so, generates a constraint for replacing the input dimension
|
69 |
+
with the required dimension
|
70 |
+
"""
|
71 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
72 |
+
is_valid_index = valid_index(constraint.index, dims)
|
73 |
+
nat_constraints = gen_nat_constraints(dims)
|
74 |
+
|
75 |
+
# if the index is valid then replace the input dimension with the new dimension
|
76 |
+
# otherwise the dimension will not be replaced and the clause will contain False
|
77 |
+
if is_valid_index == T():
|
78 |
+
new_dims = copy.deepcopy(dims)
|
79 |
+
new_dims[constraint.index] = constraint.dim_replace
|
80 |
+
|
81 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
82 |
+
*nat_constraints,
|
83 |
+
is_valid_index,
|
84 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
85 |
+
|
86 |
+
# print(constraints)
|
87 |
+
return transformed_constraint, counter
|
88 |
+
|
89 |
+
|
90 |
+
@register_transformation_rule(GetItem)
|
91 |
+
def transform_get_item(constraint, counter):
|
92 |
+
"""
|
93 |
+
generate an equality of the form:
|
94 |
+
t = [a1, ..., an]
|
95 |
+
then generate constraints that check if the given index is valid
|
96 |
+
given this particular tensor size.
|
97 |
+
If the index is valid, generate a constraint to get the item
|
98 |
+
Note that we already handled the Dyn input case in the previous
|
99 |
+
step.
|
100 |
+
Args:
|
101 |
+
constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
|
102 |
+
counter: variable tracking
|
103 |
+
Returns: simplified constraints for GetItem
|
104 |
+
|
105 |
+
"""
|
106 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
107 |
+
nat_constraints = gen_nat_constraints(dims)
|
108 |
+
|
109 |
+
|
110 |
+
is_valid_index = valid_index(constraint.index, dims)
|
111 |
+
|
112 |
+
all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
113 |
+
*nat_constraints,
|
114 |
+
is_valid_index]
|
115 |
+
|
116 |
+
# if the index is valid, we generate a constraint for getting an item
|
117 |
+
# otherwise this clause will have been UNSAT due to the wrong index
|
118 |
+
if is_valid_index == T():
|
119 |
+
all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
|
120 |
+
|
121 |
+
return Conj(all_constraints), counter
|
122 |
+
|
123 |
+
def valid_index_tensor(index, dims):
|
124 |
+
"""
|
125 |
+
if the slice instances exceed the length of the dimensions
|
126 |
+
then this is a type error so we return False
|
127 |
+
"""
|
128 |
+
slice_count = 0
|
129 |
+
for s in index:
|
130 |
+
if isinstance(s, slice):
|
131 |
+
slice_count += 1
|
132 |
+
if slice_count > len(dims):
|
133 |
+
return F()
|
134 |
+
else:
|
135 |
+
return T()
|
136 |
+
|
137 |
+
@register_transformation_rule(GetItemTensor)
|
138 |
+
def transform_get_item_tensor(constraint, counter):
|
139 |
+
"""
|
140 |
+
When the index is a tuple, then the output will be a tensor
|
141 |
+
TODO: we have to check if this is the case for all HF models
|
142 |
+
|
143 |
+
The cases we are covering here are a tuple with one of:
|
144 |
+
- slice with default argument
|
145 |
+
- None
|
146 |
+
|
147 |
+
None appends 1 to the input tensor dimensions
|
148 |
+
so each occurrence of 'None' increases the rank by 1
|
149 |
+
|
150 |
+
slice with default arguments does not change the rank
|
151 |
+
"""
|
152 |
+
assert isinstance(constraint.index_tuple, tuple)
|
153 |
+
|
154 |
+
|
155 |
+
# generate a result tensor of the expected size
|
156 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
157 |
+
nat_constraints = gen_nat_constraints(dims)
|
158 |
+
|
159 |
+
# generate a place-holder list of the right rank
|
160 |
+
# where "slice" does not contribute to the rank and "None" does
|
161 |
+
none_c = constraint.index_tuple.count(None)
|
162 |
+
resulting_tensor_dims = (none_c + len(dims)) * [None]
|
163 |
+
|
164 |
+
dim_index = 0
|
165 |
+
for i in range(len(constraint.index_tuple)):
|
166 |
+
|
167 |
+
# append 1 to the right location of the resulting tensor
|
168 |
+
if constraint.index_tuple[i] is None:
|
169 |
+
resulting_tensor_dims[i] = 1
|
170 |
+
|
171 |
+
elif constraint.index_tuple[i] == slice(None, None, None):
|
172 |
+
pass
|
173 |
+
|
174 |
+
else:
|
175 |
+
raise NotImplementedError('Method not yet implemented')
|
176 |
+
|
177 |
+
# append the remaining dimensions to the right location
|
178 |
+
dim_index = 0
|
179 |
+
for i in range(len(resulting_tensor_dims)):
|
180 |
+
if resulting_tensor_dims[i] is None:
|
181 |
+
resulting_tensor_dims[i] = dims[dim_index]
|
182 |
+
dim_index += 1
|
183 |
+
|
184 |
+
# check if the index is valid
|
185 |
+
is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
|
186 |
+
|
187 |
+
# check if the resulting tensor is within bounds
|
188 |
+
if len(resulting_tensor_dims) > 4:
|
189 |
+
return F(), counter
|
190 |
+
|
191 |
+
else:
|
192 |
+
constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
193 |
+
BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
|
194 |
+
*nat_constraints,
|
195 |
+
is_valid_index]
|
196 |
+
return Conj(constraints), counter
|
197 |
+
|
198 |
+
|
199 |
+
@register_transformation_rule(BinConstraintT)
|
200 |
+
def generate_binconstraint_t(constraint, counter):
|
201 |
+
"""
|
202 |
+
Transform binary constraints for tensors
|
203 |
+
"""
|
204 |
+
|
205 |
+
# precision constraints
|
206 |
+
if constraint.op == op_precision:
|
207 |
+
if constraint.lhs == Dyn:
|
208 |
+
return T(), counter
|
209 |
+
elif isinstance(constraint.lhs, TensorType):
|
210 |
+
is_fully_static = all(d != Dyn for d in constraint.lhs.__args__)
|
211 |
+
if is_fully_static:
|
212 |
+
return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
|
213 |
+
else:
|
214 |
+
new_dims = []
|
215 |
+
|
216 |
+
for _ in range(len(constraint.lhs.__args__)):
|
217 |
+
dim, counter = gen_dvar(counter)
|
218 |
+
new_dims.append(dim)
|
219 |
+
|
220 |
+
new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
|
221 |
+
new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
|
222 |
+
[BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
|
223 |
+
[BinConstraintD(1, new_dim, op_leq) for
|
224 |
+
new_dim in new_dims]
|
225 |
+
return Conj(new_dim_constraints), counter
|
226 |
+
|
227 |
+
# matching
|
228 |
+
elif constraint.op == op_matching:
|
229 |
+
assert isinstance(constraint.rhs, TensorType)
|
230 |
+
d1 = constraint.rhs.__args__[0]
|
231 |
+
d2 = constraint.rhs.__args__[1]
|
232 |
+
d3 = constraint.rhs.__args__[2]
|
233 |
+
d4 = constraint.rhs.__args__[3]
|
234 |
+
|
235 |
+
conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
|
236 |
+
BinConstraintD(d1, Dyn, op_eq),
|
237 |
+
BinConstraintD(d2, Dyn, op_eq),
|
238 |
+
BinConstraintD(d3, Dyn, op_eq),
|
239 |
+
BinConstraintD(d4, Dyn, op_eq)]
|
240 |
+
return Disj([Conj(conj),
|
241 |
+
BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
|
242 |
+
|
243 |
+
elif constraint.op == op_consistency:
|
244 |
+
c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
|
245 |
+
[c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
|
246 |
+
|
247 |
+
return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
|
248 |
+
|
249 |
+
elif constraint.op == op_leq:
|
250 |
+
assert isinstance(constraint.rhs, int)
|
251 |
+
disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
|
252 |
+
for i in range(1, constraint.rhs + 1):
|
253 |
+
dims = []
|
254 |
+
for j in range(1, i + 1):
|
255 |
+
dim_var, counter = gen_dvar(counter)
|
256 |
+
dims.append(dim_var)
|
257 |
+
disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
|
258 |
+
return Disj(disj), counter
|
259 |
+
else:
|
260 |
+
return constraint, counter
|
261 |
+
|
262 |
+
|
263 |
+
@register_transformation_rule(BinConstraintD)
|
264 |
+
def generate_binconstraint_d(constraint, counter):
|
265 |
+
"""
|
266 |
+
Transform binary constraints for dimensions
|
267 |
+
"""
|
268 |
+
if constraint.op == op_precision:
|
269 |
+
if isinstance(constraint.lhs, int):
|
270 |
+
return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
|
271 |
+
elif constraint.lhs == Dyn:
|
272 |
+
return T(), counter
|
273 |
+
|
274 |
+
elif constraint.op == op_consistency:
|
275 |
+
return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
|
276 |
+
BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
|
277 |
+
|
278 |
+
else:
|
279 |
+
return constraint, counter
|
280 |
+
|
281 |
+
|
282 |
+
@register_transformation_rule(Conj)
|
283 |
+
def generate_conj(constraint, counter):
|
284 |
+
"""
|
285 |
+
Transform conjunctions
|
286 |
+
"""
|
287 |
+
new = []
|
288 |
+
for c in constraint.conjucts:
|
289 |
+
new_c, counter = transform_constraint(c, counter)
|
290 |
+
new.append(new_c)
|
291 |
+
return Conj(new), counter
|
292 |
+
|
293 |
+
|
294 |
+
@register_transformation_rule(Disj)
|
295 |
+
def generate_disj(constraint, counter):
|
296 |
+
"""
|
297 |
+
Transform disjunctions
|
298 |
+
"""
|
299 |
+
new = []
|
300 |
+
for c in constraint.disjuncts:
|
301 |
+
new_c, counter = transform_constraint(c, counter)
|
302 |
+
new.append(new_c)
|
303 |
+
return Disj(new), counter
|
304 |
+
|
305 |
+
|
306 |
+
@register_transformation_rule(TGreatestUpperBound)
|
307 |
+
def generate_gub(constraint, counter):
|
308 |
+
"""
|
309 |
+
Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
|
310 |
+
on dimensions
|
311 |
+
"""
|
312 |
+
c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
|
313 |
+
BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
|
314 |
+
|
315 |
+
[c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
|
316 |
+
|
317 |
+
return Disj([c1, c2, c3, c4, c5]), counter
|
318 |
+
|
319 |
+
|
320 |
+
@register_transformation_rule(DGreatestUpperBound)
|
321 |
+
def generate_d_gub(constraint, counter):
|
322 |
+
"""
|
323 |
+
Transform greatest upper bound for dimensions into equality constraints
|
324 |
+
"""
|
325 |
+
c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
|
326 |
+
c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
327 |
+
c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
328 |
+
return Disj([c1, c2, c3]), counter
|
329 |
+
|
330 |
+
|
331 |
+
@register_transformation_rule(CalcConv)
|
332 |
+
def generate_calc_conv(constraint, counter):
|
333 |
+
d, counter = gen_tensor_dims(4, counter)
|
334 |
+
conv_result = TensorType([d[0], d[1], d[2], d[3]])
|
335 |
+
|
336 |
+
# the convolution result is a tensor of size 4
|
337 |
+
c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
|
338 |
+
|
339 |
+
# the second dimension of the output is equal to the output channels
|
340 |
+
c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
|
341 |
+
|
342 |
+
# the input corresponds to the output in the first dimension of the convolution
|
343 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
344 |
+
|
345 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
346 |
+
|
347 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
348 |
+
BinConstraintD(0, d[1], op_leq),
|
349 |
+
BinConstraintD(0, d[2], op_leq),
|
350 |
+
BinConstraintD(0, d[3], op_leq)])
|
351 |
+
|
352 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
353 |
+
|
354 |
+
|
355 |
+
@register_transformation_rule(CalcMaxPool)
|
356 |
+
def generate_calc_maxpool(constraint, counter):
|
357 |
+
"""
|
358 |
+
Transform maxpool constraints
|
359 |
+
"""
|
360 |
+
d, counter = gen_tensor_dims(4, counter)
|
361 |
+
maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
|
362 |
+
|
363 |
+
# the maxpool result is a tensor of size 4
|
364 |
+
c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
|
365 |
+
|
366 |
+
# the input corresponds to the output in the first and second dimension of maxpool
|
367 |
+
c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
|
368 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
369 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
370 |
+
|
371 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
372 |
+
BinConstraintD(0, d[1], op_leq),
|
373 |
+
BinConstraintD(0, d[2], op_leq),
|
374 |
+
BinConstraintD(0, d[3], op_leq)])
|
375 |
+
|
376 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
377 |
+
|
378 |
+
|
379 |
+
@register_transformation_rule(CalcProduct)
|
380 |
+
def generate_calc_product(constraint, counter):
|
381 |
+
"""
|
382 |
+
Transform flatten constraints
|
383 |
+
"""
|
384 |
+
start = constraint.start
|
385 |
+
end = constraint.end
|
386 |
+
dims = constraint.dims_to_flatten
|
387 |
+
flattened = constraint.flattened
|
388 |
+
n = len(constraint.dims_to_flatten)
|
389 |
+
|
390 |
+
# this will be evaluated right here
|
391 |
+
boundary_check = (0 <= start and start < end and end <= n)
|
392 |
+
|
393 |
+
c_boundary = T() if boundary_check else F()
|
394 |
+
|
395 |
+
lhs = dims[0:start]
|
396 |
+
rhs = dims[end:]
|
397 |
+
mid = dims[start:end]
|
398 |
+
|
399 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
|
400 |
+
|
401 |
+
all_constraints = []
|
402 |
+
|
403 |
+
for p in all_possibilities:
|
404 |
+
p = list(p)
|
405 |
+
# this tells us there is a dynamic variable
|
406 |
+
contains_dyn = not(all(constraint.op == op_neq for constraint in p))
|
407 |
+
if contains_dyn:
|
408 |
+
mid_var = [Dyn]
|
409 |
+
total_constraints = lhs + mid_var + rhs
|
410 |
+
if len(total_constraints) > 4:
|
411 |
+
all_constraints.append(F())
|
412 |
+
else:
|
413 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
|
414 |
+
else:
|
415 |
+
new_var, counter = gen_dvar(counter)
|
416 |
+
mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
|
417 |
+
mid_var = [new_var]
|
418 |
+
total_constraints = lhs + mid_var + rhs
|
419 |
+
if len(total_constraints) > 4:
|
420 |
+
all_constraints.append(F())
|
421 |
+
else:
|
422 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
|
423 |
+
|
424 |
+
return Conj([Disj(all_constraints), c_boundary]), counter
|
425 |
+
|
426 |
+
|
427 |
+
@register_transformation_rule(CanReshape)
|
428 |
+
def generate_reshape(constraint, counter):
|
429 |
+
"""
|
430 |
+
Transform reshape constraints
|
431 |
+
"""
|
432 |
+
d, counter = gen_tensor_dims(4, counter)
|
433 |
+
|
434 |
+
d1 = d[0]
|
435 |
+
d2 = d[1]
|
436 |
+
d3 = d[2]
|
437 |
+
d4 = d[3]
|
438 |
+
|
439 |
+
target = constraint.target.__args__
|
440 |
+
|
441 |
+
is_fully_static = all(d != Dyn for d in target)
|
442 |
+
|
443 |
+
# dynamic tensor
|
444 |
+
c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
|
445 |
+
c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
|
446 |
+
c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
|
447 |
+
c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
|
448 |
+
c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
|
449 |
+
|
450 |
+
d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
|
451 |
+
d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
|
452 |
+
|
453 |
+
d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
|
454 |
+
d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
|
455 |
+
|
456 |
+
d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
457 |
+
d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
458 |
+
|
459 |
+
d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
460 |
+
d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
461 |
+
|
462 |
+
nat_d1 = BinConstraintD(0, d1, op_leq)
|
463 |
+
nat_d2 = BinConstraintD(0, d2, op_leq)
|
464 |
+
nat_d3 = BinConstraintD(0, d3, op_leq)
|
465 |
+
nat_d4 = BinConstraintD(0, d4, op_leq)
|
466 |
+
|
467 |
+
if is_fully_static:
|
468 |
+
# size 1 tensor
|
469 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
470 |
+
(Conj([d1_neq_dyn,
|
471 |
+
BinConstraintD(d1, Prod(target), op_eq)]))])
|
472 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
473 |
+
|
474 |
+
# size 2 tensor
|
475 |
+
all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
|
476 |
+
|
477 |
+
# size 3 tensor
|
478 |
+
all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
|
479 |
+
|
480 |
+
# size 4 tensor
|
481 |
+
all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
|
482 |
+
|
483 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
484 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
485 |
+
|
486 |
+
# then there must be exactly one occurrence of dyn
|
487 |
+
else:
|
488 |
+
new_target = []
|
489 |
+
|
490 |
+
for n in target:
|
491 |
+
if n != Dyn:
|
492 |
+
new_target.append(n)
|
493 |
+
|
494 |
+
# tensor 1
|
495 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
496 |
+
(Conj([d1_neq_dyn,
|
497 |
+
is_dim_div_by_target(new_target, d1)]))])
|
498 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
499 |
+
|
500 |
+
# tensor 2
|
501 |
+
c21 = Disj([d1_eq_dyn, d2_eq_dyn])
|
502 |
+
c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
|
503 |
+
all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
|
504 |
+
|
505 |
+
# tensor 3
|
506 |
+
c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
|
507 |
+
c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
|
508 |
+
all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
|
509 |
+
|
510 |
+
# tensor 4
|
511 |
+
c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
|
512 |
+
c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
|
513 |
+
all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
|
514 |
+
|
515 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
516 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
517 |
+
|
518 |
+
|
519 |
+
@register_transformation_rule(ApplyBroadcasting)
|
520 |
+
def generate_broadcasting(constraint, counter):
|
521 |
+
"""
|
522 |
+
Transform broadcasting constraints
|
523 |
+
"""
|
524 |
+
e11, e12 = constraint.res1, constraint.res2
|
525 |
+
e1, e2 = constraint.input1, constraint.input2
|
526 |
+
|
527 |
+
e1_dyn = BinConstraintT(e1, Dyn, op_eq)
|
528 |
+
e2_dyn = BinConstraintT(e2, Dyn, op_eq)
|
529 |
+
|
530 |
+
# Introduce dimensions
|
531 |
+
e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
|
532 |
+
e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
|
533 |
+
|
534 |
+
# dyn possibility
|
535 |
+
e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
|
536 |
+
e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
|
537 |
+
|
538 |
+
# tensor possibility
|
539 |
+
# generate dimensions to create tensors of size 1
|
540 |
+
final_tensor_1_constraint, _, _, nat_dims_1, counter = \
|
541 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
|
542 |
+
|
543 |
+
# generate dimensions to create tensors of size 2
|
544 |
+
final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
|
545 |
+
final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
|
546 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
|
547 |
+
|
548 |
+
# generate dimensions to create tensors of size 3
|
549 |
+
final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
|
550 |
+
final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
|
551 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
|
552 |
+
|
553 |
+
# generate dimensions to create tensors of size 4
|
554 |
+
final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
|
555 |
+
final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
|
556 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
|
557 |
+
|
558 |
+
final_result = Disj([
|
559 |
+
e1_dyn_constraint,
|
560 |
+
e2_dyn_constraint,
|
561 |
+
final_tensor_1_constraint,
|
562 |
+
final_tensor_2_constraint_no_padding,
|
563 |
+
final_tensor_2_constraint_padding_arg1,
|
564 |
+
final_tensor_2_constraint_padding_arg2,
|
565 |
+
final_tensor_3_constraint_no_padding,
|
566 |
+
final_tensor_3_constraint_padding_arg1,
|
567 |
+
final_tensor_3_constraint_padding_arg2,
|
568 |
+
final_tensor_4_constraint_no_padding,
|
569 |
+
final_tensor_4_constraint_padding_arg1,
|
570 |
+
final_tensor_4_constraint_padding_arg2
|
571 |
+
])
|
572 |
+
|
573 |
+
return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
|
574 |
+
|
575 |
+
|
576 |
+
def transform_constraint(constraint: Constraint, counter: int):
|
577 |
+
"""
|
578 |
+
Transforms a constraint into a simpler constraint.
|
579 |
+
Ex: precision and consistency are transformed to equality
|
580 |
+
Args:
|
581 |
+
constraint: constraint to be transformed
|
582 |
+
counter: for variable tracking
|
583 |
+
|
584 |
+
Returns: Constraint
|
585 |
+
|
586 |
+
"""
|
587 |
+
if type(constraint) in _TRANSFORMATION_RULES:
|
588 |
+
return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
|
589 |
+
|
590 |
+
else:
|
591 |
+
return constraint, counter
|
592 |
+
|
593 |
+
|
594 |
+
|
595 |
+
|
596 |
+
def calc_last_two_dims(constraint, d: List[DVar]):
|
597 |
+
"""
|
598 |
+
Generates constraints for the last two dimensions of a convolution or a maxpool output
|
599 |
+
Args:
|
600 |
+
constraint: CalcConv or CalcMaxPool
|
601 |
+
d: The list of output dimensions
|
602 |
+
|
603 |
+
Returns: Constraints for calculating the last two dimensions of the output
|
604 |
+
|
605 |
+
"""
|
606 |
+
|
607 |
+
assert isinstance(constraint, (CalcConv, CalcMaxPool))
|
608 |
+
|
609 |
+
b3 = constraint.matching_constraint[2]
|
610 |
+
b4 = constraint.matching_constraint[3]
|
611 |
+
|
612 |
+
b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
|
613 |
+
b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
|
614 |
+
|
615 |
+
d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
|
616 |
+
d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
|
617 |
+
|
618 |
+
# transform parameters into tuples incase they are not already
|
619 |
+
padding = (constraint.padding, constraint.padding) \
|
620 |
+
if isinstance(constraint.padding, int) else constraint.padding
|
621 |
+
kernel = (constraint.kernel, constraint.kernel) \
|
622 |
+
if isinstance(constraint.kernel, int) else constraint.kernel
|
623 |
+
stride = (constraint.stride, constraint.stride) \
|
624 |
+
if isinstance(constraint.stride, int) else constraint.stride
|
625 |
+
dilation = (constraint.dilation, constraint.dilation) \
|
626 |
+
if isinstance(constraint.dilation, int) else constraint.dilation
|
627 |
+
|
628 |
+
f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
|
629 |
+
f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
|
630 |
+
f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
|
631 |
+
f4 = BinConstraintD(f3, 1, op_add)
|
632 |
+
|
633 |
+
c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
|
634 |
+
|
635 |
+
f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
|
636 |
+
f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
|
637 |
+
f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
|
638 |
+
f44 = BinConstraintD(f33, 1, op_add)
|
639 |
+
|
640 |
+
c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
|
641 |
+
|
642 |
+
return c4, c5
|
643 |
+
|
644 |
+
|
645 |
+
def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
|
646 |
+
"""
|
647 |
+
Generate all possibilities of being equal or not equal to dyn for my_list
|
648 |
+
Args:
|
649 |
+
my_list: List of tensor dimensions
|
650 |
+
|
651 |
+
Returns: A list of a list of constraints. Each list of constraints corresponds to
|
652 |
+
one possibility about the values of the dimension variables
|
653 |
+
"""
|
654 |
+
# generate all possibilities of being equal or not equal to dyn for my_list
|
655 |
+
eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
|
656 |
+
neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
|
657 |
+
d_possibilities = []
|
658 |
+
|
659 |
+
for i in zip(eq_possibilities, neq_possibilities):
|
660 |
+
d_possibilities.append(list(i))
|
661 |
+
all_possibilities = list(itertools.product(*d_possibilities))
|
662 |
+
return all_possibilities
|
663 |
+
|
664 |
+
|
665 |
+
def is_target_div_by_dim(target: List[int], dim: List[DVar]):
|
666 |
+
"""
|
667 |
+
Generate constraints to check if the target dimensions are divisible by the input dimensions
|
668 |
+
Args:
|
669 |
+
target: Target dimensions
|
670 |
+
dim: Input dimensions
|
671 |
+
|
672 |
+
Returns: Constraints to check divisibility
|
673 |
+
|
674 |
+
"""
|
675 |
+
return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
|
676 |
+
|
677 |
+
|
678 |
+
def is_dim_div_by_target(target: List[int], dim: List[DVar]):
|
679 |
+
"""
|
680 |
+
Generate constraints to check if the input dimensions is divisible by the target dimensions
|
681 |
+
Args:
|
682 |
+
target: Target dimensions
|
683 |
+
dim: Input dimensions
|
684 |
+
|
685 |
+
Returns: Constraints to check divisibility
|
686 |
+
|
687 |
+
"""
|
688 |
+
return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
|
689 |
+
|
690 |
+
|
691 |
+
def gen_all_reshape_possibilities(list_of_dims, target):
|
692 |
+
"""
|
693 |
+
Consider all possibilities what the input dimensions could be (number or dynamic)
|
694 |
+
Then generate the appropriate constraints using multiplication or mod depending on the possibility
|
695 |
+
The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
|
696 |
+
for the input. Target is fixed because at most one dimension could be dyn.
|
697 |
+
We have different cases for this.
|
698 |
+
|
699 |
+
Args:
|
700 |
+
list_of_dims: The input list of dimensions
|
701 |
+
target: The tensor we want to reshape to
|
702 |
+
|
703 |
+
Returns: A disjunction of transformed reshape constraints
|
704 |
+
|
705 |
+
"""
|
706 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
|
707 |
+
|
708 |
+
all_constraints = []
|
709 |
+
|
710 |
+
for p in all_possibilities:
|
711 |
+
to_multiply = []
|
712 |
+
|
713 |
+
p = list(p)
|
714 |
+
|
715 |
+
for constraint in p:
|
716 |
+
assert isinstance(constraint, BinConstraintD)
|
717 |
+
if constraint.op == op_neq:
|
718 |
+
to_multiply.append(constraint.lhs)
|
719 |
+
|
720 |
+
if not to_multiply:
|
721 |
+
all_constraints.append(Conj(p))
|
722 |
+
|
723 |
+
elif len(to_multiply) < len(list_of_dims):
|
724 |
+
all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
|
725 |
+
else:
|
726 |
+
all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
|
727 |
+
Prod(target), op_eq)]))
|
728 |
+
|
729 |
+
return Disj(all_constraints)
|
730 |
+
|
731 |
+
|
732 |
+
def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
|
733 |
+
"""
|
734 |
+
Apply broadcasting to the 'index' dimension of tensor_input1.
|
735 |
+
Args:
|
736 |
+
tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
|
737 |
+
tensor_input2: represents the second input
|
738 |
+
res1: broadcasted result 1
|
739 |
+
res2: broadcasted result 2
|
740 |
+
index: the index to broadcast
|
741 |
+
padding: If padding was used, then tensor_input1[index] does not exist
|
742 |
+
|
743 |
+
Returns:
|
744 |
+
|
745 |
+
"""
|
746 |
+
if tensor_input1[index] is None:
|
747 |
+
assert padding
|
748 |
+
|
749 |
+
|
750 |
+
if not padding:
|
751 |
+
# then the inputs are the same length so they all have dimensions at "index"
|
752 |
+
return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
|
753 |
+
BinConstraintD(res1[index], res2[index], op_eq),
|
754 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
755 |
+
|
756 |
+
else:
|
757 |
+
# we don't set the input dimension to 1, since it doesn't exist.
|
758 |
+
return Conj([BinConstraintD(res1[index], res2[index], op_eq),
|
759 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
760 |
+
|
761 |
+
|
762 |
+
def apply_padding(e1_var: TVar,
|
763 |
+
e11: BinConstraintT,
|
764 |
+
e2: BinConstraintT,
|
765 |
+
e12: BinConstraintT,
|
766 |
+
d2: List[DVar],
|
767 |
+
d11: List[DVar],
|
768 |
+
d12: List[DVar],
|
769 |
+
counter: int):
|
770 |
+
"""
|
771 |
+
We are considering the possibility where one input has less dimensions than
|
772 |
+
another input, so we apply padding to the broadcasted results
|
773 |
+
|
774 |
+
Args:
|
775 |
+
e1_var: Variable representing the first input where padding will be
|
776 |
+
e11: constraint of the form e11 = Tensortype[d1, ..., dn]
|
777 |
+
e2: constraint of the form e2 = Tensortype[d1, ..., dn]
|
778 |
+
e12: constraint of the form e11 = Tensortype[d1, ..., dn]
|
779 |
+
d2: Tensor variables for the second input
|
780 |
+
d11: Tensor variables for the broadcasted first input
|
781 |
+
d12: Tensor variables for the broadcasted second input
|
782 |
+
counter: variable tracking
|
783 |
+
|
784 |
+
Returns: A new constraint whose goal is to apply padding to the broadcasted result
|
785 |
+
|
786 |
+
"""
|
787 |
+
|
788 |
+
res = []
|
789 |
+
|
790 |
+
# pad the shorter input with None so we can pass it to the broadcasting helper function
|
791 |
+
for i in range(1, len(d2)):
|
792 |
+
|
793 |
+
d1, counter = gen_tensor_dims(i, counter)
|
794 |
+
|
795 |
+
nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
|
796 |
+
|
797 |
+
e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
|
798 |
+
|
799 |
+
simulate_padding = [None] * (len(d2) - i)
|
800 |
+
|
801 |
+
assert len(simulate_padding + d1) == len(d2)
|
802 |
+
|
803 |
+
broadcast_padding = []
|
804 |
+
|
805 |
+
# for every padding size, we also consider broadcasting
|
806 |
+
for j in range(len(d2) - i):
|
807 |
+
broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
|
808 |
+
|
809 |
+
# we consider the possibilities for broadcasting for every dimension. Since we already
|
810 |
+
# padded d1, we do not consider it while broadcasting
|
811 |
+
all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
|
812 |
+
d2[(len(d2) - i):],
|
813 |
+
d11[(len(d2) - i):],
|
814 |
+
d12[(len(d2) - i):])
|
815 |
+
# combine all constraints into a conjunction
|
816 |
+
c = Conj([e1, e11, e2, e12,
|
817 |
+
*broadcast_padding,
|
818 |
+
all_broadcasting_possibilities,
|
819 |
+
*nat_constraints
|
820 |
+
])
|
821 |
+
res.append(c)
|
822 |
+
|
823 |
+
return Disj(res), counter
|
824 |
+
|
825 |
+
|
826 |
+
def no_broadcast_dim_with_index(d1: List[DVar],
|
827 |
+
d2: List[DVar],
|
828 |
+
d3: List[DVar],
|
829 |
+
d4: List[DVar],
|
830 |
+
i: int):
|
831 |
+
"""
|
832 |
+
Args:
|
833 |
+
d1: input 1
|
834 |
+
d2: input 2
|
835 |
+
d3: simulated broadcasting for input 1
|
836 |
+
d4: simulated broadcasting for input 2
|
837 |
+
i: the rank of the resulting tensor addition
|
838 |
+
|
839 |
+
Returns: Constraints for when no broadcasting occurs
|
840 |
+
"""
|
841 |
+
return Conj([
|
842 |
+
Disj([
|
843 |
+
Conj([BinConstraintD(d1[i], 1, op_eq),
|
844 |
+
BinConstraintD(d2[i], 1, op_eq)]),
|
845 |
+
|
846 |
+
Conj([BinConstraintD(d1[i], 1, op_neq),
|
847 |
+
BinConstraintD(d2[i], 1, op_neq)])]),
|
848 |
+
|
849 |
+
BinConstraintD(d1[i], d3[i], op_eq),
|
850 |
+
BinConstraintD(d2[i], d4[i], op_eq)])
|
851 |
+
|
852 |
+
|
853 |
+
|
854 |
+
def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
|
855 |
+
"""
|
856 |
+
Generate lists of DVar to represent tensor dimensions
|
857 |
+
Args:
|
858 |
+
num_tensors: the required number of tensors
|
859 |
+
dim_size: the number of dimensions for each tensor
|
860 |
+
counter: variable tracking
|
861 |
+
|
862 |
+
Returns: A list of a list of tensor dimensions
|
863 |
+
|
864 |
+
"""
|
865 |
+
res = []
|
866 |
+
|
867 |
+
for _ in range(num_tensors):
|
868 |
+
dims, counter = gen_tensor_dims(dim_size, counter)
|
869 |
+
res.append(dims)
|
870 |
+
|
871 |
+
return res, counter
|
872 |
+
|
873 |
+
|
874 |
+
def create_equality_constraints_for_broadcasting(e1: TVar,
|
875 |
+
e2: TVar,
|
876 |
+
e11: TVar,
|
877 |
+
e12: TVar,
|
878 |
+
d1: List[DVar],
|
879 |
+
d2: List[DVar],
|
880 |
+
d11: List[DVar],
|
881 |
+
d12: List[DVar]):
|
882 |
+
"""
|
883 |
+
Create equality constraints for when no broadcasting occurs
|
884 |
+
Args:
|
885 |
+
e1: Input 1
|
886 |
+
e2: Input 2
|
887 |
+
e11: Broadcasted input 1
|
888 |
+
e12: Broadcasted input 2
|
889 |
+
d1: Variables that store dimensions for e1
|
890 |
+
d2: Variables that store dimensions for e2
|
891 |
+
d11: Variables that store dimensions for e11
|
892 |
+
d12: Variables that store dimensions for e22
|
893 |
+
|
894 |
+
Returns: Four equality constraints
|
895 |
+
|
896 |
+
"""
|
897 |
+
|
898 |
+
e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
|
899 |
+
e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
|
900 |
+
e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
|
901 |
+
e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
|
902 |
+
return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
|
903 |
+
|
904 |
+
|
905 |
+
def gen_consistency_constraints(constraint: Constraint, counter: int):
|
906 |
+
"""
|
907 |
+
Args:
|
908 |
+
constraint: Consistency constraint on tensors
|
909 |
+
counter: for variable tracking
|
910 |
+
|
911 |
+
Returns: Equality and consistency constraints on dimensions
|
912 |
+
|
913 |
+
"""
|
914 |
+
|
915 |
+
all_constraints = []
|
916 |
+
|
917 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
918 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
919 |
+
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
|
920 |
+
|
921 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
|
922 |
+
|
923 |
+
c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
|
924 |
+
BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
|
925 |
+
[BinConstraintD(d1, d2, op_consistency) for
|
926 |
+
d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
|
927 |
+
|
928 |
+
all_constraints.append(c_tensor_i)
|
929 |
+
|
930 |
+
return all_constraints, counter
|
931 |
+
|
932 |
+
|
933 |
+
def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
|
934 |
+
"""
|
935 |
+
Args:
|
936 |
+
constraint: Greatest upper bound on tensors
|
937 |
+
counter: variable tracking
|
938 |
+
|
939 |
+
Returns: A set of equality constraints and DGreatestUpperBound constraints
|
940 |
+
|
941 |
+
"""
|
942 |
+
|
943 |
+
all_constraints = []
|
944 |
+
|
945 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
946 |
+
c = []
|
947 |
+
dims1, counter = gen_tensor_dims(i, counter)
|
948 |
+
c1tensor = TensorType(dims1)
|
949 |
+
|
950 |
+
dims2, counter = gen_tensor_dims(i, counter)
|
951 |
+
c2tensor = TensorType(dims2)
|
952 |
+
|
953 |
+
dims3, counter = gen_tensor_dims(i, counter)
|
954 |
+
c3tensor = TensorType(dims3)
|
955 |
+
|
956 |
+
c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
|
957 |
+
BinConstraintT(constraint.rhs2, c2tensor, op_eq),
|
958 |
+
BinConstraintT(constraint.res, c3tensor, op_eq)] + \
|
959 |
+
gen_nat_constraints(dims1 + dims2 + dims3)
|
960 |
+
|
961 |
+
assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
|
962 |
+
for i in range(len(c3tensor.__args__)):
|
963 |
+
c.append(DGreatestUpperBound(c3tensor.__args__[i],
|
964 |
+
c1tensor.__args__[i],
|
965 |
+
c2tensor.__args__[i]))
|
966 |
+
|
967 |
+
all_constraints.append(Conj(c))
|
968 |
+
return all_constraints, counter
|
969 |
+
|
970 |
+
|
971 |
+
def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
|
972 |
+
"""
|
973 |
+
Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
|
974 |
+
We look at all combinations for all dimensions in d1 and d2
|
975 |
+
Args:
|
976 |
+
d1: input1 dimensions
|
977 |
+
d2: input2 dimensions
|
978 |
+
d11: broadcasted input1 dimensions
|
979 |
+
d12: broadcasted input2 dimensions
|
980 |
+
|
981 |
+
Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
|
982 |
+
|
983 |
+
"""
|
984 |
+
|
985 |
+
size = len(d1)
|
986 |
+
|
987 |
+
res2 = []
|
988 |
+
|
989 |
+
for i in range(size):
|
990 |
+
t1 = broadcast_dim(d1, d2, d11, d12, i)
|
991 |
+
t2 = broadcast_dim(d2, d1, d12, d11, i)
|
992 |
+
t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
|
993 |
+
|
994 |
+
res2.append(Disj([t1, t2, t3]))
|
995 |
+
|
996 |
+
return Conj(res2)
|
997 |
+
|
998 |
+
|
999 |
+
def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
|
1000 |
+
"""
|
1001 |
+
Simulates broadcasting on e1 and e2 and returns the results
|
1002 |
+
respectively in e11 and e12. Because of gradual types,
|
1003 |
+
e1 and e2 may not be equal. Similarly, e11 and e12 may not
|
1004 |
+
be equal. e11 and e12 should be guaranteed to be consistent
|
1005 |
+
as they represent the shapes of the tensors to be added after
|
1006 |
+
broadcasting.
|
1007 |
+
Args:
|
1008 |
+
e1: TVar representing the type of input 1
|
1009 |
+
e2: TVar representing the type of input 2
|
1010 |
+
e11: TVar representing the representing broadcasted input 1
|
1011 |
+
e12: TVar representing the representing broadcasted input 2
|
1012 |
+
i: The rank of the resulting type of addition
|
1013 |
+
counter: for variable tracking
|
1014 |
+
|
1015 |
+
Returns: Simplified broadcasting constraints
|
1016 |
+
|
1017 |
+
"""
|
1018 |
+
dims, counter = gen_lists_of_dims(4, i, counter)
|
1019 |
+
[d1, d2, d3, d4] = dims
|
1020 |
+
nat_dims_i = gen_nat_constraints(list(itertools.chain(*dims)))
|
1021 |
+
|
1022 |
+
initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
|
1023 |
+
d1, d2, d3, d4)
|
1024 |
+
|
1025 |
+
[e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
|
1026 |
+
|
1027 |
+
# without padding, broadcast all possibilities for tensors of size i
|
1028 |
+
final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
|
1029 |
+
generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
|
1030 |
+
|
1031 |
+
# with padding, broadcast all possibilities for tensors of size i
|
1032 |
+
final_tensor_constraint_padding_arg1, counter = \
|
1033 |
+
apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
|
1034 |
+
|
1035 |
+
final_tensor_constraint_padding_arg2, counter = \
|
1036 |
+
apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
|
1037 |
+
|
1038 |
+
return final_tensor_constraint_no_padding, \
|
1039 |
+
final_tensor_constraint_padding_arg1, \
|
1040 |
+
final_tensor_constraint_padding_arg2, nat_dims_i, counter
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
op_add = '+'
|
2 |
+
op_sub = '-'
|
3 |
+
op_mul = '*'
|
4 |
+
op_div = '/'
|
5 |
+
op_eq = '='
|
6 |
+
op_neq = '!='
|
7 |
+
op_imp = '=>'
|
8 |
+
op_matching = '⊳'
|
9 |
+
op_consistency = '~'
|
10 |
+
op_precision = '⊑'
|
11 |
+
op_leq = '≤'
|
12 |
+
op_lt = '<'
|
13 |
+
op_gt = '>'
|
14 |
+
op_mod = '%'
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
|
2 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
|
3 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
|
4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
|
5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
|
6 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
|
7 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
|
8 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
9 |
+
|
10 |
+
try:
|
11 |
+
import z3 # type: ignore[import]
|
12 |
+
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
|
13 |
+
HAS_Z3 = True
|
14 |
+
|
15 |
+
def transform_to_z3(constraint, counter, dimension_dict):
|
16 |
+
if isinstance(constraint, Conj):
|
17 |
+
conjuncts = []
|
18 |
+
for c in constraint.conjucts:
|
19 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
20 |
+
conjuncts.append(new_c)
|
21 |
+
return z3.And(conjuncts), counter
|
22 |
+
|
23 |
+
elif isinstance(constraint, Disj):
|
24 |
+
disjuncts = []
|
25 |
+
for c in constraint.disjuncts:
|
26 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
27 |
+
disjuncts.append(new_c)
|
28 |
+
return z3.Or(disjuncts), counter
|
29 |
+
|
30 |
+
elif isinstance(constraint, T):
|
31 |
+
return True, counter
|
32 |
+
|
33 |
+
elif isinstance(constraint, F):
|
34 |
+
return False, counter
|
35 |
+
|
36 |
+
elif isinstance(constraint, BinConstraintT):
|
37 |
+
if constraint.op == op_eq:
|
38 |
+
lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
|
39 |
+
rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
|
40 |
+
return (lhs == rhs), counter
|
41 |
+
|
42 |
+
else:
|
43 |
+
raise NotImplementedError('Method not yet implemented')
|
44 |
+
|
45 |
+
elif isinstance(constraint, BinConstraintD):
|
46 |
+
if constraint.op == op_eq:
|
47 |
+
|
48 |
+
if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
|
49 |
+
transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
|
50 |
+
transformed_lhs = z3.Bool(constraint.lhs.c)
|
51 |
+
return transformed_lhs == transformed_rhs, counter
|
52 |
+
|
53 |
+
elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
|
54 |
+
# with dimension transformations we consider the encoding
|
55 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
56 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
57 |
+
return lhs == rhs, counter
|
58 |
+
|
59 |
+
else:
|
60 |
+
# then we have an algebraic expression which means that we disregard the
|
61 |
+
# first element of the encoding
|
62 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
63 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
64 |
+
return lhs == rhs, counter
|
65 |
+
|
66 |
+
# The assumption here is that the LHS and RHS must be dimensions
|
67 |
+
elif constraint.op == op_neq:
|
68 |
+
assert is_dim(constraint.lhs)
|
69 |
+
assert is_dim(constraint.rhs)
|
70 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
71 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
72 |
+
if constraint.rhs == Dyn or constraint.lhs == Dyn:
|
73 |
+
if constraint.rhs == Dyn:
|
74 |
+
return lhs.arg(0) == 1, counter
|
75 |
+
elif constraint.lhs == Dyn:
|
76 |
+
return rhs.arg(0) == 1, counter
|
77 |
+
|
78 |
+
# if one of the instances is a number
|
79 |
+
elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
|
80 |
+
if isinstance(constraint.lhs, int):
|
81 |
+
return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
82 |
+
|
83 |
+
elif isinstance(constraint.rhs, int):
|
84 |
+
return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
85 |
+
|
86 |
+
else:
|
87 |
+
return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
|
88 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
|
89 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
|
90 |
+
|
91 |
+
|
92 |
+
elif constraint.op == op_leq:
|
93 |
+
# if the dimensions are not dyn, this will come into effect
|
94 |
+
# there would have been another constraint specifying if a given dimension
|
95 |
+
# is dyn or not
|
96 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
97 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
98 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
99 |
+
return lhs <= rhs, counter
|
100 |
+
|
101 |
+
elif constraint.op == op_gt:
|
102 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
103 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
104 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
105 |
+
return lhs > rhs, counter
|
106 |
+
|
107 |
+
elif constraint.op == op_lt:
|
108 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
109 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
110 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
111 |
+
return lhs < rhs, counter
|
112 |
+
|
113 |
+
else:
|
114 |
+
raise NotImplementedError('operation not yet implemented')
|
115 |
+
|
116 |
+
else:
|
117 |
+
raise NotImplementedError('Operation not yet implemented')
|
118 |
+
|
119 |
+
|
120 |
+
def transform_var(tensor, counter, dimension_dict):
|
121 |
+
"""
|
122 |
+
Transforms tensor variables to a format understood by z3
|
123 |
+
Args:
|
124 |
+
tensor: Tensor variable or a tensor type potentially with variable dimensions
|
125 |
+
Returns: Transformed variable to a z3 format
|
126 |
+
|
127 |
+
"""
|
128 |
+
if isinstance(tensor, TensorType):
|
129 |
+
res = []
|
130 |
+
for t in tensor.__args__:
|
131 |
+
transformed, counter = transform_dimension(t, counter, dimension_dict)
|
132 |
+
res.append(transformed)
|
133 |
+
|
134 |
+
assert len(res) <= 4
|
135 |
+
if len(tensor.__args__) == 1:
|
136 |
+
return tensor_type.tensor1(res[0]), counter
|
137 |
+
elif len(tensor.__args__) == 2:
|
138 |
+
return tensor_type.tensor2(res[0], res[1]), counter
|
139 |
+
elif len(tensor.__args__) == 3:
|
140 |
+
return tensor_type.tensor3(res[0], res[1], res[2]), counter
|
141 |
+
elif len(tensor.__args__) == 4:
|
142 |
+
return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
|
143 |
+
|
144 |
+
elif tensor == Dyn:
|
145 |
+
return z3_dyn, counter
|
146 |
+
|
147 |
+
elif isinstance(tensor, TVar):
|
148 |
+
return z3.Const(tensor.tvar, tensor_type), counter
|
149 |
+
|
150 |
+
def transform_dimension(dimension, counter, dimension_dict):
|
151 |
+
"""
|
152 |
+
Takes a dimension variable or a number and transforms it to a tuple
|
153 |
+
according to our scheme
|
154 |
+
Args:
|
155 |
+
dimension: The dimension to be transformed
|
156 |
+
counter: variable tracking
|
157 |
+
|
158 |
+
Returns: tuple and the current counter
|
159 |
+
|
160 |
+
"""
|
161 |
+
if dimension == Dyn:
|
162 |
+
counter += 1
|
163 |
+
return D(0, z3.Int(counter)), counter
|
164 |
+
elif isinstance(dimension, int):
|
165 |
+
return D(1, dimension), counter
|
166 |
+
elif isinstance(dimension, DVar):
|
167 |
+
if dimension.c in dimension_dict:
|
168 |
+
return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
|
169 |
+
else:
|
170 |
+
counter += 1
|
171 |
+
dimension_dict[dimension.c] = counter
|
172 |
+
return D(z3.Int(counter), z3.Int(dimension.c)), counter
|
173 |
+
|
174 |
+
|
175 |
+
def transform_algebraic_expression(expr, counter, dimension_dict):
|
176 |
+
"""
|
177 |
+
Transforms an algebraic expression to z3 format
|
178 |
+
Args:
|
179 |
+
expr: An expression is either a dimension variable or an algebraic-expression
|
180 |
+
|
181 |
+
|
182 |
+
Returns: the transformed expression
|
183 |
+
|
184 |
+
"""
|
185 |
+
assert is_algebraic_expression(expr) or is_dim(expr)
|
186 |
+
|
187 |
+
if is_dim(expr):
|
188 |
+
transformed, counter = transform_dimension(expr, counter, dimension_dict)
|
189 |
+
return transformed.arg(1), counter
|
190 |
+
|
191 |
+
elif isinstance(expr, Prod):
|
192 |
+
|
193 |
+
dims = []
|
194 |
+
for dim in expr.products:
|
195 |
+
assert is_dim(dim)
|
196 |
+
d, counter = transform_dimension(dim, counter, dimension_dict)
|
197 |
+
dims.append(d.arg(1))
|
198 |
+
return z3.Product(dims), counter
|
199 |
+
|
200 |
+
elif is_algebraic_expression(expr):
|
201 |
+
|
202 |
+
lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
|
203 |
+
rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
|
204 |
+
|
205 |
+
if expr.op == op_sub:
|
206 |
+
c = lhs - rhs
|
207 |
+
|
208 |
+
elif expr.op == op_add:
|
209 |
+
c = lhs + rhs
|
210 |
+
|
211 |
+
elif expr.op == op_div:
|
212 |
+
c = lhs / rhs
|
213 |
+
|
214 |
+
elif expr.op == op_mul:
|
215 |
+
c = lhs * rhs
|
216 |
+
|
217 |
+
elif expr.op == op_mod:
|
218 |
+
c = lhs % rhs
|
219 |
+
|
220 |
+
else:
|
221 |
+
raise NotImplementedError('operation not yet implemented')
|
222 |
+
|
223 |
+
return c, counter
|
224 |
+
|
225 |
+
else:
|
226 |
+
raise RuntimeError
|
227 |
+
|
228 |
+
|
229 |
+
def transform_all_constraints(traced, counter=0):
|
230 |
+
"""
|
231 |
+
Given a trace, generates constraints and transforms them to z3 format
|
232 |
+
|
233 |
+
"""
|
234 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
235 |
+
|
236 |
+
generator = ConstraintGenerator(traced)
|
237 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
238 |
+
|
239 |
+
# print(new_constraints.conjucts[0])
|
240 |
+
# print(*new_constraints.conjucts, sep='\n')
|
241 |
+
|
242 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
243 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
244 |
+
# print(new_constraints)
|
245 |
+
# print(new_constraints.conjucts)
|
246 |
+
# new_constraints.conjucts = new_constraints.conjucts[:-1]
|
247 |
+
# print(*new_constraints.conjucts, sep='\n')
|
248 |
+
|
249 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
250 |
+
# print(transformed)
|
251 |
+
return transformed
|
252 |
+
|
253 |
+
def iterate_till_fixed_point(constraints, counter):
|
254 |
+
"""
|
255 |
+
Transform constraints till reaching a fixed point
|
256 |
+
"""
|
257 |
+
old_c = None
|
258 |
+
while old_c != constraints:
|
259 |
+
old_c = constraints
|
260 |
+
constraints, counter = transform_constraint(constraints, counter)
|
261 |
+
return constraints, counter
|
262 |
+
|
263 |
+
def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
|
264 |
+
"""
|
265 |
+
Takes a node and a graph and generates two sets of constraints.
|
266 |
+
One set constraints the node's constraints and another set
|
267 |
+
constraints the negation of the node's constraints
|
268 |
+
Args:
|
269 |
+
tracer_root: the root for getting the module instances
|
270 |
+
graph: the graph so far in the tracing process
|
271 |
+
node: node that represents a conditional
|
272 |
+
counter: variable tracking
|
273 |
+
|
274 |
+
Returns: Two sets of constraints. One with a conjunction with the
|
275 |
+
the conditional constraint and the other with a conjunction with
|
276 |
+
its negation.
|
277 |
+
|
278 |
+
"""
|
279 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
280 |
+
|
281 |
+
generator = ConstraintGenerator(tracer_root, graph)
|
282 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
283 |
+
|
284 |
+
condition_constraint = new_constraints.conjucts[-1]
|
285 |
+
|
286 |
+
# we know the constraint is a conjunction where the last constraint is about the conditional
|
287 |
+
# so remove the last constraint
|
288 |
+
new_constraints.conjucts = new_constraints.conjucts[:-1]
|
289 |
+
|
290 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
291 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
292 |
+
|
293 |
+
|
294 |
+
# since the function returns a list of one element, we get the first element
|
295 |
+
# we are only interested in the RHS in this case because the LHS just stores
|
296 |
+
# the result
|
297 |
+
|
298 |
+
# we make sure the constraint is of the form:
|
299 |
+
# c = b where b is a boolean expression
|
300 |
+
# and we consider b (constraint.rhs) for transformation
|
301 |
+
assert isinstance(condition_constraint.lhs, BVar)
|
302 |
+
assert is_bool_expr(condition_constraint.rhs)
|
303 |
+
condition_constraint_rhs = condition_constraint.rhs
|
304 |
+
|
305 |
+
# transform the condition constraint
|
306 |
+
condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
|
307 |
+
|
308 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
309 |
+
|
310 |
+
transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
|
311 |
+
|
312 |
+
negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
|
313 |
+
|
314 |
+
return z3.And([transformed, transformed_condition_constraint]),\
|
315 |
+
z3.And([transformed, negation_transformed_condition_constraint])
|
316 |
+
|
317 |
+
|
318 |
+
def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
|
319 |
+
"""
|
320 |
+
Given an IR and a node representing a conditional, evaluate the conditional
|
321 |
+
and its negation
|
322 |
+
Args:
|
323 |
+
tracer_root: Tracer root for module instances
|
324 |
+
node: The node to be evaluated
|
325 |
+
|
326 |
+
Returns: the results of evaluating the condition and the negation with
|
327 |
+
the rest of the constraints
|
328 |
+
|
329 |
+
"""
|
330 |
+
|
331 |
+
transformed_positive, transformed_negative = \
|
332 |
+
transform_all_constraints_trace_time(tracer_root, graph, node, counter)
|
333 |
+
|
334 |
+
s = z3.Solver()
|
335 |
+
s.add(transformed_positive)
|
336 |
+
if user_constraints is not None:
|
337 |
+
s.add(user_constraints)
|
338 |
+
condition = s.check()
|
339 |
+
|
340 |
+
s = z3.Solver()
|
341 |
+
s.add(transformed_negative)
|
342 |
+
if user_constraints is not None:
|
343 |
+
s.add(user_constraints)
|
344 |
+
negation = s.check()
|
345 |
+
return condition, negation
|
346 |
+
|
347 |
+
except ImportError:
|
348 |
+
HAS_Z3 = False
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
|
2 |
+
BVar
|
3 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq
|
4 |
+
|
5 |
+
|
6 |
+
def gen_tvar(curr):
|
7 |
+
"""
|
8 |
+
Generate a tensor variable
|
9 |
+
:param curr: The current counter
|
10 |
+
:return: a tensor variable and the updated counter
|
11 |
+
"""
|
12 |
+
curr += 1
|
13 |
+
return TVar(curr), curr
|
14 |
+
|
15 |
+
|
16 |
+
def gen_dvar(curr):
|
17 |
+
"""
|
18 |
+
Generate a dimension variable
|
19 |
+
:param curr: the current counter
|
20 |
+
:return: a dimension variable and an updated counter
|
21 |
+
"""
|
22 |
+
curr += 1
|
23 |
+
return DVar(curr), curr
|
24 |
+
|
25 |
+
def gen_bvar(curr):
|
26 |
+
"""
|
27 |
+
Generate a boolean variable
|
28 |
+
:param curr: the current counter
|
29 |
+
:return: a boolean variable and an updated counter
|
30 |
+
"""
|
31 |
+
curr += 1
|
32 |
+
return BVar(curr), curr
|
33 |
+
|
34 |
+
def gen_tensor_dims(n, curr):
|
35 |
+
"""
|
36 |
+
Generate a list of tensor dimensions
|
37 |
+
:param n: the number of dimensions
|
38 |
+
:param curr: the current counter
|
39 |
+
:return: a list of dimension variables and an updated counter
|
40 |
+
"""
|
41 |
+
dims = []
|
42 |
+
for _ in range(n):
|
43 |
+
dvar, curr = gen_dvar(curr)
|
44 |
+
dims.append(dvar)
|
45 |
+
return dims, curr
|
46 |
+
|
47 |
+
|
48 |
+
def gen_nat_constraints(list_of_dims):
|
49 |
+
"""
|
50 |
+
Generate natural number constraints for dimensions
|
51 |
+
"""
|
52 |
+
return [BinConstraintD(0, d, op_leq) for d in list_of_dims]
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
import z3 # type: ignore[import]
|
3 |
+
HAS_Z3 = True
|
4 |
+
# dynamic type
|
5 |
+
dyn = z3.DeclareSort('Dyn')
|
6 |
+
dyn_type = z3.Const('dyn', dyn)
|
7 |
+
|
8 |
+
# dimension
|
9 |
+
dim = z3.Datatype('dim')
|
10 |
+
dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort()))
|
11 |
+
dim = dim.create()
|
12 |
+
|
13 |
+
# tensors
|
14 |
+
tensor_type = z3.Datatype('TensorType')
|
15 |
+
tensor_type.declare('Dyn', ('dyn', dyn))
|
16 |
+
tensor_type.declare('tensor1', ('0', dim))
|
17 |
+
tensor_type.declare('tensor2', ('0', dim), ('1', dim))
|
18 |
+
tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim))
|
19 |
+
tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim))
|
20 |
+
tensor_type = tensor_type.create()
|
21 |
+
|
22 |
+
# create dimension
|
23 |
+
D = dim.dim
|
24 |
+
|
25 |
+
z3_dyn = tensor_type.Dyn(dyn_type)
|
26 |
+
|
27 |
+
|
28 |
+
except ImportError:
|
29 |
+
HAS_Z3 = False
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/normalize.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
from typing import Any, Callable, Dict, Tuple, Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.fx
|
6 |
+
import torch.fx as fx
|
7 |
+
from torch.fx import Transformer, Proxy
|
8 |
+
from torch.fx.node import Argument, Target, Node, map_aggregate
|
9 |
+
from torch.fx.operator_schemas import (
|
10 |
+
normalize_module,
|
11 |
+
normalize_function,
|
12 |
+
create_type_hint,
|
13 |
+
)
|
14 |
+
|
15 |
+
from .schema_type_annotation import AnnotateTypesWithSchema
|
16 |
+
|
17 |
+
|
18 |
+
class NormalizeArgs(Transformer):
|
19 |
+
"""
|
20 |
+
Normalize arguments to Python targets. This means that
|
21 |
+
`args/kwargs` will be matched up to the module/functional's
|
22 |
+
signature and rewritten to exclusively kwargs in positional order
|
23 |
+
if `normalize_to_only_use_kwargs` is true. Also populates default
|
24 |
+
values. Does not support positional-only parameters or varargs
|
25 |
+
parameters (*args, **kwargs).
|
26 |
+
|
27 |
+
If the nodes have 'type' metadata, it will use it to disambiguate
|
28 |
+
overloads. Otherwise, it will throw an error.
|
29 |
+
|
30 |
+
Example usage:
|
31 |
+
m = torchvision.models.resnet18()
|
32 |
+
traced = torch.fx.symbolic_trace(m)
|
33 |
+
traced = NormalizeArgs(traced).transform()
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(
|
37 |
+
self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True
|
38 |
+
):
|
39 |
+
super().__init__(module)
|
40 |
+
self.node_map: Dict[Proxy, Node] = {}
|
41 |
+
self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs
|
42 |
+
|
43 |
+
def run_node(self, n: Node) -> Any:
|
44 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
45 |
+
|
46 |
+
def get_type(arg):
|
47 |
+
if isinstance(arg, fx.Node):
|
48 |
+
return n.meta["type"] if "type" in n.meta else None
|
49 |
+
return type(arg)
|
50 |
+
|
51 |
+
arg_types = map_aggregate(n.args, get_type)
|
52 |
+
assert isinstance(arg_types, tuple)
|
53 |
+
arg_types = tuple([create_type_hint(i) for i in arg_types])
|
54 |
+
kwarg_types = {k: get_type(v) for k, v in kwargs.items()}
|
55 |
+
if n.op == "call_function":
|
56 |
+
out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types)
|
57 |
+
else:
|
58 |
+
out = super().run_node(n)
|
59 |
+
if n.op != "output":
|
60 |
+
self.node_map[out] = n
|
61 |
+
out.node.meta = n.meta
|
62 |
+
out.node.type = n.type
|
63 |
+
return out
|
64 |
+
|
65 |
+
def call_function(
|
66 |
+
self,
|
67 |
+
target: Target,
|
68 |
+
args: Tuple[Argument, ...],
|
69 |
+
kwargs: Dict[str, Any],
|
70 |
+
arg_types: Optional[Tuple[Any, ...]] = None,
|
71 |
+
kwarg_types: Optional[Dict[str, Any]] = None,
|
72 |
+
):
|
73 |
+
assert callable(target)
|
74 |
+
new_args_and_kwargs = normalize_function(
|
75 |
+
target,
|
76 |
+
args, # type: ignore[arg-type]
|
77 |
+
kwargs,
|
78 |
+
arg_types, # type: ignore[arg-type]
|
79 |
+
kwarg_types,
|
80 |
+
self.normalize_to_only_use_kwargs,
|
81 |
+
)
|
82 |
+
if new_args_and_kwargs:
|
83 |
+
new_args, new_kwargs = new_args_and_kwargs
|
84 |
+
return self.tracer.create_proxy(
|
85 |
+
"call_function", target, new_args, new_kwargs
|
86 |
+
)
|
87 |
+
else:
|
88 |
+
return super().call_function(target, args, kwargs)
|
89 |
+
|
90 |
+
def call_module(
|
91 |
+
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
|
92 |
+
):
|
93 |
+
assert isinstance(target, str)
|
94 |
+
new_args_and_kwargs = normalize_module(
|
95 |
+
self.module,
|
96 |
+
target,
|
97 |
+
args, # type: ignore[arg-type]
|
98 |
+
kwargs,
|
99 |
+
self.normalize_to_only_use_kwargs,
|
100 |
+
)
|
101 |
+
if new_args_and_kwargs:
|
102 |
+
new_args, new_kwargs = new_args_and_kwargs
|
103 |
+
return super().call_module(target, new_args, new_kwargs)
|
104 |
+
else:
|
105 |
+
return super().call_module(target, args, kwargs)
|
106 |
+
|
107 |
+
|
108 |
+
class NormalizeOperators(AnnotateTypesWithSchema):
|
109 |
+
"""
|
110 |
+
Normalize callsites that are different ways of "spelling" the same
|
111 |
+
invocation into a single, canonical call. Currently supports:
|
112 |
+
|
113 |
+
1. Normalize operators (e.g. operator.add) to the `torch` ops they
|
114 |
+
ultimately invoke (e.g. torch.add) when it is possible to statically
|
115 |
+
reason that
|
116 |
+
|
117 |
+
Example usage:
|
118 |
+
|
119 |
+
m = torchvision.models.resnet18()
|
120 |
+
|
121 |
+
traced = torch.fx.symbolic_trace(m)
|
122 |
+
|
123 |
+
traced = NormalizeOperators(traced).transform()
|
124 |
+
"""
|
125 |
+
|
126 |
+
binary_magic_method_remap: Dict[
|
127 |
+
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
|
128 |
+
] = {
|
129 |
+
torch.add: operator.add,
|
130 |
+
torch.mul: operator.mul,
|
131 |
+
torch.sub: operator.sub,
|
132 |
+
torch.div: operator.truediv,
|
133 |
+
torch.floor_divide: operator.floordiv,
|
134 |
+
torch.remainder: operator.mod,
|
135 |
+
torch.eq: operator.eq,
|
136 |
+
torch.ne: operator.ne,
|
137 |
+
torch.lt: operator.lt,
|
138 |
+
torch.le: operator.le,
|
139 |
+
torch.gt: operator.gt,
|
140 |
+
torch.ge: operator.ge,
|
141 |
+
}
|
142 |
+
|
143 |
+
def call_function(
|
144 |
+
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
|
145 |
+
):
|
146 |
+
# Normalize operators according to the magic methods implemented on tensors here:
|
147 |
+
# https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950
|
148 |
+
|
149 |
+
assert callable(target)
|
150 |
+
|
151 |
+
if target in self.binary_magic_method_remap:
|
152 |
+
if len(args) != 2:
|
153 |
+
return super().call_function(target, args, kwargs)
|
154 |
+
lhs, rhs = args
|
155 |
+
|
156 |
+
return super().call_function(
|
157 |
+
target=self.binary_magic_method_remap[target],
|
158 |
+
args=(lhs, rhs),
|
159 |
+
kwargs={},
|
160 |
+
)
|
161 |
+
|
162 |
+
return super().call_function(target, args, kwargs)
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/optimization.py
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.fx as fx
|
2 |
+
from torch.fx.node import Argument, Target
|
3 |
+
from torch.nn.utils.fusion import fuse_conv_bn_eval
|
4 |
+
from typing import Type, Dict, Any, Tuple, Iterable, Optional, List, cast
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from torch.fx.passes.shape_prop import ShapeProp
|
9 |
+
import copy
|
10 |
+
from collections import defaultdict
|
11 |
+
import torch.utils.mkldnn as th_mkldnn
|
12 |
+
import operator
|
13 |
+
import time
|
14 |
+
import logging
|
15 |
+
from enum import Enum
|
16 |
+
|
17 |
+
def _parent_name(target : str) -> Tuple[str, str]:
|
18 |
+
"""
|
19 |
+
Splits a qualname into parent path and last atom.
|
20 |
+
For example, `foo.bar.baz` -> (`foo.bar`, `baz`)
|
21 |
+
"""
|
22 |
+
*parent, name = target.rsplit('.', 1)
|
23 |
+
return parent[0] if parent else '', name
|
24 |
+
|
25 |
+
# Works for length 2 patterns with 2 modules
|
26 |
+
def matches_module_pattern(pattern: Iterable[Type], node: fx.Node, modules: Dict[str, Any]):
|
27 |
+
if len(node.args) == 0:
|
28 |
+
return False
|
29 |
+
nodes: Tuple[Any, fx.Node] = (node.args[0], node)
|
30 |
+
for expected_type, current_node in zip(pattern, nodes):
|
31 |
+
if not isinstance(current_node, fx.Node):
|
32 |
+
return False
|
33 |
+
if current_node.op != 'call_module':
|
34 |
+
return False
|
35 |
+
if not isinstance(current_node.target, str):
|
36 |
+
return False
|
37 |
+
if current_node.target not in modules:
|
38 |
+
return False
|
39 |
+
if type(modules[current_node.target]) is not expected_type:
|
40 |
+
return False
|
41 |
+
return True
|
42 |
+
|
43 |
+
|
44 |
+
def replace_node_module(node: fx.Node, modules: Dict[str, Any], new_module: torch.nn.Module):
|
45 |
+
assert(isinstance(node.target, str))
|
46 |
+
parent_name, name = _parent_name(node.target)
|
47 |
+
modules[node.target] = new_module
|
48 |
+
setattr(modules[parent_name], name, new_module)
|
49 |
+
|
50 |
+
def fuse(model: torch.nn.Module, inplace=False) -> torch.nn.Module:
|
51 |
+
"""
|
52 |
+
Fuses convolution/BN layers for inference purposes. Will deepcopy your
|
53 |
+
model by default, but can modify the model inplace as well.
|
54 |
+
"""
|
55 |
+
patterns = [(nn.Conv1d, nn.BatchNorm1d),
|
56 |
+
(nn.Conv2d, nn.BatchNorm2d),
|
57 |
+
(nn.Conv3d, nn.BatchNorm3d)]
|
58 |
+
if not inplace:
|
59 |
+
model = copy.deepcopy(model)
|
60 |
+
fx_model = fx.symbolic_trace(model)
|
61 |
+
modules = dict(fx_model.named_modules())
|
62 |
+
new_graph = copy.deepcopy(fx_model.graph)
|
63 |
+
|
64 |
+
for pattern in patterns:
|
65 |
+
for node in new_graph.nodes:
|
66 |
+
if matches_module_pattern(pattern, node, modules):
|
67 |
+
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
|
68 |
+
continue
|
69 |
+
conv = modules[node.args[0].target]
|
70 |
+
bn = modules[node.target]
|
71 |
+
if not bn.track_running_stats:
|
72 |
+
continue
|
73 |
+
fused_conv = fuse_conv_bn_eval(conv, bn)
|
74 |
+
replace_node_module(node.args[0], modules, fused_conv)
|
75 |
+
node.replace_all_uses_with(node.args[0])
|
76 |
+
new_graph.erase_node(node)
|
77 |
+
return fx.GraphModule(fx_model, new_graph)
|
78 |
+
|
79 |
+
def remove_dropout(model: nn.Module) -> nn.Module:
|
80 |
+
"""
|
81 |
+
Removes all dropout layers from the module.
|
82 |
+
"""
|
83 |
+
fx_model = fx.symbolic_trace(model)
|
84 |
+
|
85 |
+
class DropoutRemover(torch.fx.Transformer):
|
86 |
+
def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
87 |
+
if isinstance(self.submodules[target], nn.Dropout):
|
88 |
+
assert len(args) == 1
|
89 |
+
return args[0]
|
90 |
+
else:
|
91 |
+
return super().call_module(target, args, kwargs)
|
92 |
+
return DropoutRemover(fx_model).transform()
|
93 |
+
|
94 |
+
def extract_subgraph(orig_module: nn.Module, nodes: List[fx.Node], inputs: List[fx.Node], outputs: List[fx.Node]):
|
95 |
+
"""
|
96 |
+
Given lists of nodes from an existing graph that represent a subgraph, returns a submodule that executes that subgraph.
|
97 |
+
"""
|
98 |
+
new_graph = fx.Graph()
|
99 |
+
env: Dict[fx.Node, fx.Node] = {}
|
100 |
+
for input in inputs:
|
101 |
+
new_node = new_graph.placeholder(input.name)
|
102 |
+
env[input] = new_node
|
103 |
+
for node in nodes:
|
104 |
+
new_node = new_graph.node_copy(node, lambda x: env[x])
|
105 |
+
env[node] = new_node
|
106 |
+
new_graph.output([env[output] for output in outputs])
|
107 |
+
new_graph.lint()
|
108 |
+
return fx.GraphModule(orig_module, new_graph)
|
109 |
+
|
110 |
+
mkldnn_supported = [
|
111 |
+
nn.Conv2d, nn.Linear, nn.BatchNorm2d, nn.ReLU, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d,
|
112 |
+
torch.relu, torch.transpose, torch.sigmoid,
|
113 |
+
F.relu, F.avg_pool2d, F.adaptive_avg_pool2d
|
114 |
+
]
|
115 |
+
# These are operators that may not be convertible into MKLDNN ops (e.g. the
|
116 |
+
# args are scalar values). Thus, we only include them in the subgraph if their
|
117 |
+
# arguments are already in MKLDNN.
|
118 |
+
# TODO: Determine whether this can be removed after type inference.
|
119 |
+
mkldnn_supported_unknown = [operator.add, operator.mul]
|
120 |
+
mkldnn_map = {
|
121 |
+
nn.Conv2d: th_mkldnn.MkldnnConv2d,
|
122 |
+
nn.Linear: th_mkldnn.MkldnnLinear,
|
123 |
+
nn.BatchNorm2d: lambda a, _: th_mkldnn.MkldnnBatchNorm(a)
|
124 |
+
}
|
125 |
+
|
126 |
+
|
127 |
+
def modules_to_mkldnn(nodes: List[fx.Node], modules: Dict[str, nn.Module]):
|
128 |
+
"""
|
129 |
+
For each node, if it's a module that can be preconverted into MKLDNN,
|
130 |
+
then we do so and create a mapping to allow us to convert from the MKLDNN
|
131 |
+
version of the module to the original.
|
132 |
+
"""
|
133 |
+
old_modules: Dict[nn.Module, nn.Module] = {}
|
134 |
+
for node in nodes:
|
135 |
+
if node.op == 'call_module':
|
136 |
+
assert(isinstance(node.target, str))
|
137 |
+
cur_module = modules[node.target]
|
138 |
+
if type(cur_module) in mkldnn_map:
|
139 |
+
new_module = mkldnn_map[type(cur_module)](cur_module, torch.float)
|
140 |
+
assert(isinstance(new_module, nn.Module))
|
141 |
+
old_modules[new_module] = copy.deepcopy(cur_module)
|
142 |
+
replace_node_module(node, modules, new_module)
|
143 |
+
return old_modules
|
144 |
+
|
145 |
+
def reset_modules(nodes: List[fx.Node], modules: Dict[str, nn.Module], old_modules: Dict[nn.Module, nn.Module]):
|
146 |
+
"""
|
147 |
+
Maps each module that's been changed with `modules_to_mkldnn` back to its
|
148 |
+
original.
|
149 |
+
"""
|
150 |
+
for node in nodes:
|
151 |
+
if node.op == 'call_module':
|
152 |
+
assert(isinstance(node.target, str))
|
153 |
+
cur_module = modules[node.target]
|
154 |
+
if cur_module in old_modules:
|
155 |
+
replace_node_module(node, modules, old_modules[cur_module])
|
156 |
+
|
157 |
+
class MklSubgraph:
|
158 |
+
def __init__(self, fx_graph: fx.Graph):
|
159 |
+
self.fx_graph = fx_graph
|
160 |
+
self.nodes: List[fx.Node] = []
|
161 |
+
self.start_nodes: List[fx.Node] = []
|
162 |
+
self.end_nodes: List[fx.Node] = []
|
163 |
+
|
164 |
+
def gen_mkl_autotuner(example_inputs, iters=10, warmup=1):
|
165 |
+
"""
|
166 |
+
This generates a heuristic that can be passed into `optimize_for_inference` that
|
167 |
+
determines whether a subgraph should be run in MKL by running it with the example_inputs.
|
168 |
+
|
169 |
+
Example usage:
|
170 |
+
heuristic = gen_mkl_autotuner(example_inputs, iters=10)
|
171 |
+
fast_model = optimization.optimize_for_inference(model, heuristic)
|
172 |
+
"""
|
173 |
+
fx_model = None
|
174 |
+
old_modules = None
|
175 |
+
|
176 |
+
def use_mkl_heuristic(graph: MklSubgraph) -> bool:
|
177 |
+
nonlocal fx_model, old_modules
|
178 |
+
input_nodes = graph.start_nodes
|
179 |
+
if fx_model is None:
|
180 |
+
fx_model = graph.fx_graph.owning_module
|
181 |
+
old_modules = graph.fx_graph.old_modules # type: ignore[attr-defined]
|
182 |
+
ShapeProp(fx_model).propagate(example_inputs)
|
183 |
+
sample_inputs = [torch.randn(node.shape) for node in input_nodes] # type: ignore[attr-defined]
|
184 |
+
output_args = cast(List[fx.Node], [node.args[0] for node in graph.end_nodes])
|
185 |
+
submodule = extract_subgraph(fx_model, graph.nodes, input_nodes, output_args)
|
186 |
+
|
187 |
+
def benchmark(f):
|
188 |
+
for _ in range(warmup):
|
189 |
+
f()
|
190 |
+
begin = time.time()
|
191 |
+
for _ in range(iters):
|
192 |
+
out = f()
|
193 |
+
return time.time() - begin
|
194 |
+
|
195 |
+
mkl_time = benchmark(lambda: [i.to_dense() for i in submodule(*[i.to_mkldnn() for i in sample_inputs])])
|
196 |
+
|
197 |
+
reset_modules(submodule.graph.nodes, dict(submodule.named_modules()), old_modules)
|
198 |
+
no_mkl_time = benchmark(lambda: submodule(*sample_inputs))
|
199 |
+
return mkl_time < no_mkl_time
|
200 |
+
return use_mkl_heuristic
|
201 |
+
|
202 |
+
def use_mkl_length(graph: MklSubgraph) -> bool:
|
203 |
+
"""
|
204 |
+
This is a heuristic that can be passed into `optimize_for_inference` that
|
205 |
+
determines whether a subgraph should be run in MKL by checking if there
|
206 |
+
are more than 2 nodes in it
|
207 |
+
"""
|
208 |
+
return len(graph.nodes) > 2
|
209 |
+
|
210 |
+
class UnionFind:
|
211 |
+
def __init__(self, n):
|
212 |
+
self.parent: List[Optional[int]] = [None] * n
|
213 |
+
self.size: List[int] = [0] * n
|
214 |
+
|
215 |
+
def make_set(self, v: int):
|
216 |
+
self.parent[v] = v
|
217 |
+
self.size[v] = 1
|
218 |
+
|
219 |
+
def find(self, v: int) -> int:
|
220 |
+
par = self.parent[v]
|
221 |
+
if v == par:
|
222 |
+
return v
|
223 |
+
assert(par is not None)
|
224 |
+
self.parent[v] = self.find(par)
|
225 |
+
return cast(int, self.parent[v])
|
226 |
+
|
227 |
+
def join(self, a: int, b: int):
|
228 |
+
a, b = self.find(a), self.find(b)
|
229 |
+
if a == b:
|
230 |
+
return a
|
231 |
+
if self.size[a] < self.size[b]:
|
232 |
+
a, b = b, a
|
233 |
+
self.parent[b] = a
|
234 |
+
self.size[a] += self.size[b]
|
235 |
+
|
236 |
+
def optimize_for_inference(
|
237 |
+
model: torch.nn.Module,
|
238 |
+
pass_config: Optional[Dict[str, Any]] = None,
|
239 |
+
tracer: Type[fx.Tracer] = fx.Tracer
|
240 |
+
) -> torch.nn.Module:
|
241 |
+
"""
|
242 |
+
Performs a set of optimization passes to optimize a model for the
|
243 |
+
purposes of inference. Specifically, the passes that are run are:
|
244 |
+
1. Conv/BN fusion
|
245 |
+
2. Dropout removal
|
246 |
+
3. MKL layout optimizations
|
247 |
+
|
248 |
+
The third optimization takes a function `use_mkl_heuristic` that's used
|
249 |
+
to determine whether a subgraph should be explicitly run in MKL layout.
|
250 |
+
|
251 |
+
Note: As FX does not currently handle aliasing, this pass currently
|
252 |
+
assumes nothing aliases. If that isn't true, use at your own risk.
|
253 |
+
"""
|
254 |
+
default_pass_config = {
|
255 |
+
"conv_bn_fuse": True,
|
256 |
+
"remove_dropout": True,
|
257 |
+
"mkldnn_layout_optimize": {'heuristic': use_mkl_length},
|
258 |
+
}
|
259 |
+
if pass_config is None:
|
260 |
+
pass_config = {}
|
261 |
+
default_pass_config.update(pass_config)
|
262 |
+
|
263 |
+
if default_pass_config["conv_bn_fuse"]:
|
264 |
+
model = fuse(model)
|
265 |
+
if default_pass_config["remove_dropout"]:
|
266 |
+
model = remove_dropout(model)
|
267 |
+
if default_pass_config["mkldnn_layout_optimize"] is False:
|
268 |
+
return model
|
269 |
+
if not isinstance(default_pass_config["mkldnn_layout_optimize"], dict):
|
270 |
+
raise RuntimeError("mkldnn_layout_optimize config is not a dict")
|
271 |
+
if "heuristic" not in default_pass_config["mkldnn_layout_optimize"]:
|
272 |
+
raise RuntimeError("Heuristic not found in mkldnn_layout_optimize config")
|
273 |
+
use_mkl_heuristic = default_pass_config["mkldnn_layout_optimize"]["heuristic"]
|
274 |
+
|
275 |
+
cur_tracer = tracer()
|
276 |
+
fx_graph = cur_tracer.trace(copy.deepcopy(model))
|
277 |
+
fx_model = fx.GraphModule(cur_tracer.root, fx_graph)
|
278 |
+
modules: Dict[str, nn.Module] = dict(model.named_modules())
|
279 |
+
|
280 |
+
class MklSupport(Enum):
|
281 |
+
NO = 1
|
282 |
+
YES = 2
|
283 |
+
UNKNOWN = 3
|
284 |
+
|
285 |
+
# Inserts to_mkldnn and to_dense around every node we want to be a MKLDNN node.
|
286 |
+
# If the op is in `mkldnn_supported` then we always treat it as a MKLDNN node.
|
287 |
+
# However, if it's in `mkldnn_supported_unknown`, then we only treat it as
|
288 |
+
# a MKLDNN node if its inputs are MKLDNN nodes.
|
289 |
+
for node in list(fx_graph.nodes):
|
290 |
+
supports_mkldnn = MklSupport.NO
|
291 |
+
if node.op == 'call_module':
|
292 |
+
cur_module = modules[node.target]
|
293 |
+
if type(cur_module) in mkldnn_supported:
|
294 |
+
supports_mkldnn = MklSupport.YES
|
295 |
+
sample_parameter = next(cur_module.parameters(), None)
|
296 |
+
if sample_parameter is not None:
|
297 |
+
assert(sample_parameter.dtype == torch.float), "this pass is only for torch.float modules"
|
298 |
+
assert(sample_parameter.device == torch.device('cpu')), "this pass is only for CPU modules"
|
299 |
+
elif node.op == 'call_function':
|
300 |
+
if node.target in mkldnn_supported:
|
301 |
+
supports_mkldnn = MklSupport.YES
|
302 |
+
elif node.target in mkldnn_supported_unknown:
|
303 |
+
supports_mkldnn = MklSupport.UNKNOWN
|
304 |
+
|
305 |
+
if supports_mkldnn != MklSupport.NO:
|
306 |
+
if supports_mkldnn == MklSupport.UNKNOWN:
|
307 |
+
if not any(arg.target == 'to_dense' for arg in node.args):
|
308 |
+
continue
|
309 |
+
with fx_graph.inserting_before(node):
|
310 |
+
mkldnn_args = fx.map_arg(node.args, lambda n: fx_graph.call_method('to_mkldnn', (n, )))
|
311 |
+
|
312 |
+
node.args = cast(Tuple[fx.node.Argument], mkldnn_args)
|
313 |
+
|
314 |
+
with fx_graph.inserting_after(node):
|
315 |
+
dense_x = fx_graph.create_node('call_method', 'to_dense', (node,))
|
316 |
+
node.replace_all_uses_with(dense_x)
|
317 |
+
dense_x.args = (node,)
|
318 |
+
|
319 |
+
# Does pre-conversion of all modules into MKLDNN (when possible)
|
320 |
+
old_modules = modules_to_mkldnn(list(fx_graph.nodes), modules)
|
321 |
+
fx_graph.old_modules = old_modules # type: ignore[attr-defined]
|
322 |
+
|
323 |
+
# optimizes all a -> to_dense -> to_mkldnn -> b patterns into a -> b
|
324 |
+
for node in fx_graph.nodes:
|
325 |
+
if node.op == 'call_method' and node.target == 'to_dense':
|
326 |
+
prv_node = node.args[0]
|
327 |
+
users = list(node.users)
|
328 |
+
for user in users:
|
329 |
+
if user.op == 'call_method' and user.target == 'to_mkldnn':
|
330 |
+
user.replace_all_uses_with(prv_node)
|
331 |
+
fx_graph.erase_node(user)
|
332 |
+
if len(node.users) == 0:
|
333 |
+
fx_graph.erase_node(node)
|
334 |
+
|
335 |
+
|
336 |
+
num_nodes = len(fx_graph.nodes)
|
337 |
+
uf = UnionFind(num_nodes)
|
338 |
+
|
339 |
+
def get_color(n):
|
340 |
+
if hasattr(n, 'color'): # Current node is part of a MKL subgraph
|
341 |
+
return uf.find(n.color)
|
342 |
+
if hasattr(n, 'start_color'): # Current node is input to MKL subgraph
|
343 |
+
return uf.find(n.start_color)
|
344 |
+
return None
|
345 |
+
|
346 |
+
|
347 |
+
# This code is to find each MKLDNN subgraph. Each MKLDNN subgraph consists
|
348 |
+
# of input nodes (which are only `to_mkldnn` calls), output nodes
|
349 |
+
# (`to_dense` calls), and intermediate nodes, which are run entirely on
|
350 |
+
# MKLDNN layout tensors.
|
351 |
+
#
|
352 |
+
# Specifically, this code does a flood fill on a directed acyclic graph
|
353 |
+
# (DAG), starting from each possible "start node" (i.e: `to_mkldnn` nodes).
|
354 |
+
# If every node only had one input, this would be sufficient. However, in
|
355 |
+
# the case that a node has multiple inputs coming from different start
|
356 |
+
# nodes (i.e. colors), we need to join these 2 colors into 1. That's done
|
357 |
+
# using a Disjoint Set Union.
|
358 |
+
for cur_idx, node in enumerate(fx_graph.nodes):
|
359 |
+
if node.op == 'call_method' and node.target == 'to_mkldnn':
|
360 |
+
node.start_color = cur_idx
|
361 |
+
uf.make_set(cur_idx)
|
362 |
+
elif node.op == 'call_method' and node.target == 'to_dense':
|
363 |
+
assert(get_color(node.args[0]) is not None)
|
364 |
+
node.end_color = get_color(node.args[0])
|
365 |
+
else:
|
366 |
+
cur_colors = [get_color(i) for i in node.all_input_nodes if isinstance(i, fx.Node) if get_color(i) is not None]
|
367 |
+
|
368 |
+
if len(cur_colors) == 0:
|
369 |
+
continue
|
370 |
+
assert(not any(i is None for i in cur_colors))
|
371 |
+
cur_colors = sorted(cur_colors)
|
372 |
+
node.color = cur_colors[0]
|
373 |
+
for other_color in cur_colors[1:]:
|
374 |
+
uf.join(cur_colors[0], other_color)
|
375 |
+
|
376 |
+
|
377 |
+
mkldnn_graphs: Dict[int, MklSubgraph] = defaultdict(lambda: MklSubgraph(fx_graph))
|
378 |
+
for node in fx_graph.nodes:
|
379 |
+
if hasattr(node, 'color'):
|
380 |
+
mkldnn_graphs[uf.find(node.color)].nodes.append(node)
|
381 |
+
if hasattr(node, 'start_color'):
|
382 |
+
mkldnn_graphs[uf.find(node.start_color)].start_nodes.append(node)
|
383 |
+
if hasattr(node, 'end_color'):
|
384 |
+
mkldnn_graphs[uf.find(node.end_color)].end_nodes.append(node)
|
385 |
+
|
386 |
+
|
387 |
+
# Now that we have all the subgraphs, we need to decide which MKLDNN
|
388 |
+
# subgraphs we actually want to keep in MKLDNN.
|
389 |
+
for graph in mkldnn_graphs.values():
|
390 |
+
if not use_mkl_heuristic(graph):
|
391 |
+
for node in graph.start_nodes + graph.end_nodes:
|
392 |
+
prv = node.args[0]
|
393 |
+
node.replace_all_uses_with(prv)
|
394 |
+
fx_graph.erase_node(node)
|
395 |
+
reset_modules(graph.nodes, modules, old_modules)
|
396 |
+
|
397 |
+
mkldnn_conversions = 0
|
398 |
+
for node in fx_graph.nodes:
|
399 |
+
if node.target == 'to_mkldnn' or node.target == 'to_dense':
|
400 |
+
mkldnn_conversions += 1
|
401 |
+
|
402 |
+
logging.getLogger(__name__).info(f"mkldnn conversions: {mkldnn_conversions}")
|
403 |
+
fx_graph.lint()
|
404 |
+
result = fx.GraphModule(model, fx_graph)
|
405 |
+
return result
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
from typing import NamedTuple, Dict, List, Set
|
3 |
+
|
4 |
+
from torch.fx.node import Node, map_arg
|
5 |
+
|
6 |
+
|
7 |
+
class Partition:
|
8 |
+
"""Partition class contains all the information about an individual partition.
|
9 |
+
It also provides necessary methods for manipulation the partition.
|
10 |
+
"""
|
11 |
+
|
12 |
+
def __init__(self, partition_id: int) -> None:
|
13 |
+
self.nodes: Set[Node] = set()
|
14 |
+
self.partition_id = partition_id
|
15 |
+
self.parents: Set[Partition] = set()
|
16 |
+
self.children: Set[Partition] = set()
|
17 |
+
self.bfs_level: int = -1
|
18 |
+
self.used_mem_bytes: int = 0
|
19 |
+
self.logical_device_ids: List[int] = []
|
20 |
+
|
21 |
+
def __str__(self):
|
22 |
+
return str(self.partition_id)
|
23 |
+
|
24 |
+
def recalculate_mem_size(self):
|
25 |
+
self.used_mem_bytes = 0
|
26 |
+
for node in self.nodes:
|
27 |
+
self.used_mem_bytes += get_extra_size_of(node, self.nodes)
|
28 |
+
|
29 |
+
def add_node(self, node):
|
30 |
+
input_nodes: Dict[Node, None] = {}
|
31 |
+
map_arg(node.args, input_nodes.setdefault)
|
32 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
33 |
+
# Add current node's input nodes if they are placeholder or constants
|
34 |
+
for n in input_nodes:
|
35 |
+
if n.op in {"placeholder", "get_attr"}:
|
36 |
+
self.nodes.add(n)
|
37 |
+
self.nodes.add(node)
|
38 |
+
self.recalculate_mem_size()
|
39 |
+
|
40 |
+
def remove_node(self, node):
|
41 |
+
# Remove a node only if the node is in the partition
|
42 |
+
if node in self.nodes:
|
43 |
+
self.nodes.remove(node)
|
44 |
+
# Collect the node's input nodes
|
45 |
+
input_nodes: Dict[Node, None] = {}
|
46 |
+
map_arg(node.args, input_nodes.setdefault)
|
47 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
48 |
+
# Check if an input node is a placeholder or get_attr,
|
49 |
+
# and this input node is not used by some other nodes in this partition,
|
50 |
+
# the remove this input node
|
51 |
+
for input_node in input_nodes:
|
52 |
+
if all(
|
53 |
+
n not in self.nodes for n in input_node.users
|
54 |
+
) and input_node.op in {"placeholder", "get_attr"}:
|
55 |
+
self.nodes.remove(input_node)
|
56 |
+
self.recalculate_mem_size()
|
57 |
+
|
58 |
+
|
59 |
+
class Device(NamedTuple):
|
60 |
+
name: str
|
61 |
+
available_mem_bytes: int
|
62 |
+
logical_id: int
|
63 |
+
|
64 |
+
|
65 |
+
class NodeLatency(NamedTuple):
|
66 |
+
# Latency due to the memory bandwidth
|
67 |
+
mem_latency_sec: float
|
68 |
+
# Latency due to the computation
|
69 |
+
computer_latency_sec: float
|
70 |
+
|
71 |
+
|
72 |
+
class PartitionLatency(NamedTuple):
|
73 |
+
# Sum of all nodes' memory latency on the critical path
|
74 |
+
mem_latency_sec: float
|
75 |
+
# Sum of all nodes' compute latency on the critical path
|
76 |
+
computer_latency_sec: float
|
77 |
+
# Latency of the critical path
|
78 |
+
overall_latency_sec: float
|
79 |
+
|
80 |
+
|
81 |
+
class PartitionMode(Enum):
|
82 |
+
size_based = 0
|
83 |
+
sparse_nn = 1
|
84 |
+
cost_aware = 2
|
85 |
+
kl_based = 3
|
86 |
+
aot_based = 4
|
87 |
+
|
88 |
+
|
89 |
+
class PartitionerConfig(NamedTuple):
|
90 |
+
devices: List[Device]
|
91 |
+
mode: PartitionMode = PartitionMode.size_based
|
92 |
+
transfer_rate_bytes_per_sec: float = 0.0
|
93 |
+
node_to_latency_mapping: Dict[Node, NodeLatency] = {}
|
94 |
+
node_to_partition_mapping: Dict[Node, int] = {}
|
95 |
+
partition_to_logical_device_mapping: Dict[int, List[int]] = {}
|
96 |
+
# Saturate host by replicating partitions to the remaining idle devices.
|
97 |
+
saturate_host: bool = False
|
98 |
+
|
99 |
+
|
100 |
+
def get_extra_size_of(node: Node, nodes: Set[Node]) -> int:
|
101 |
+
"""Given a node and a set of nodes,
|
102 |
+
this function return the extra size that needed
|
103 |
+
if this node is included in this set.
|
104 |
+
"""
|
105 |
+
# Find all its input nodes
|
106 |
+
input_nodes: Dict[Node, None] = {}
|
107 |
+
map_arg(node.args, input_nodes.setdefault)
|
108 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
109 |
+
# Calculate total size of related nodes
|
110 |
+
total_size_of_input_nodes = 0
|
111 |
+
for n in input_nodes:
|
112 |
+
# Make sure this node hasn't been in this set yet
|
113 |
+
if n not in nodes:
|
114 |
+
size_bytes = getattr(n, "size_bytes", None)
|
115 |
+
if size_bytes:
|
116 |
+
total_size_of_input_nodes += size_bytes.output_size
|
117 |
+
else:
|
118 |
+
raise RuntimeError("node has no size_bytes attr")
|
119 |
+
# Don't forget the op node itself
|
120 |
+
size_bytes = getattr(node, "size_bytes", None)
|
121 |
+
if size_bytes:
|
122 |
+
total_size_of_input_nodes += size_bytes.total_size
|
123 |
+
else:
|
124 |
+
raise RuntimeError("node has no size_bytes attr")
|
125 |
+
return total_size_of_input_nodes
|
126 |
+
|
127 |
+
|
128 |
+
def get_latency_of_one_partition(
|
129 |
+
partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency]
|
130 |
+
) -> PartitionLatency:
|
131 |
+
"""Given a partition and its nodes' latency, return a PartitionLatency for this partition"""
|
132 |
+
|
133 |
+
def get_top_nodes(partition: Partition) -> List[Node]:
|
134 |
+
"""Given a partition, return a list of nodes on the top bfs level"""
|
135 |
+
top_nodes: List[Node] = []
|
136 |
+
for node in partition.nodes:
|
137 |
+
# Skip placeholder and get_attr nodes
|
138 |
+
if node.op in {"placeholder", "get_attr"}:
|
139 |
+
continue
|
140 |
+
input_nodes: Dict[Node, None] = {}
|
141 |
+
map_arg(node.args, input_nodes.setdefault)
|
142 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
143 |
+
# If a node has no input nodes in this partition,
|
144 |
+
# or its input nodes in this partition are placeholders and get_attrs
|
145 |
+
# this node is on the top bfs level in this partition
|
146 |
+
if not any(
|
147 |
+
n in partition.nodes and n.op not in {"placeholder", "get_attr"}
|
148 |
+
for n in input_nodes
|
149 |
+
):
|
150 |
+
top_nodes.append(node)
|
151 |
+
return top_nodes
|
152 |
+
|
153 |
+
def dfs_helper(node: Node, partition_latency) -> PartitionLatency:
|
154 |
+
"""Given a top node of a partition, this function returns
|
155 |
+
the latency of the critical path in the partition
|
156 |
+
"""
|
157 |
+
node_latency = node_to_latency_mapping[node]
|
158 |
+
# Calculate the current overall latency of the partition
|
159 |
+
overall_latency_sec = partition_latency.overall_latency_sec + max(
|
160 |
+
node_latency.computer_latency_sec, node_latency.mem_latency_sec
|
161 |
+
)
|
162 |
+
# Update the mem latency of this path
|
163 |
+
mem_latency_sec = (
|
164 |
+
partition_latency.mem_latency_sec + node_latency.mem_latency_sec
|
165 |
+
)
|
166 |
+
# Update the compute latency of this path
|
167 |
+
computer_latency_sec = (
|
168 |
+
partition_latency.computer_latency_sec + node_latency.computer_latency_sec
|
169 |
+
)
|
170 |
+
# Get all users of this node that are in this partition
|
171 |
+
users = set(node.users).intersection(partition.nodes)
|
172 |
+
if users:
|
173 |
+
max_latency = PartitionLatency(
|
174 |
+
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
|
175 |
+
)
|
176 |
+
for n in users:
|
177 |
+
# Get new partition latency recursively
|
178 |
+
new_partition_latency = dfs_helper(
|
179 |
+
n,
|
180 |
+
PartitionLatency(
|
181 |
+
mem_latency_sec, computer_latency_sec, overall_latency_sec
|
182 |
+
),
|
183 |
+
)
|
184 |
+
if (
|
185 |
+
new_partition_latency.overall_latency_sec
|
186 |
+
> max_latency.overall_latency_sec
|
187 |
+
):
|
188 |
+
max_latency = new_partition_latency
|
189 |
+
return max_latency
|
190 |
+
# If there is no user, the node is at bottom of the partition
|
191 |
+
return PartitionLatency(
|
192 |
+
mem_latency_sec, computer_latency_sec, overall_latency_sec
|
193 |
+
)
|
194 |
+
|
195 |
+
# Main part starts
|
196 |
+
# Get all top level nodes of this partition
|
197 |
+
top_nodes = get_top_nodes(partition)
|
198 |
+
critical_path_latency = PartitionLatency(
|
199 |
+
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
|
200 |
+
)
|
201 |
+
# Go through all top nodes and find the largest latency (critical pass latency)
|
202 |
+
for node in top_nodes:
|
203 |
+
partition_latency = dfs_helper(
|
204 |
+
node,
|
205 |
+
PartitionLatency(
|
206 |
+
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
|
207 |
+
),
|
208 |
+
)
|
209 |
+
if (
|
210 |
+
partition_latency.overall_latency_sec
|
211 |
+
> critical_path_latency.overall_latency_sec
|
212 |
+
):
|
213 |
+
critical_path_latency = partition_latency
|
214 |
+
return critical_path_latency
|
215 |
+
|
216 |
+
|
217 |
+
def get_partition_to_latency_mapping(
|
218 |
+
partitions: List[Partition], node_to_latency_mapping: Dict[Node, NodeLatency]
|
219 |
+
) -> Dict[Partition, PartitionLatency]:
|
220 |
+
"""Given all the partitions and node_to_latency_mapping dictionary,
|
221 |
+
return a mapping dictionary of each partition to its overall latency
|
222 |
+
"""
|
223 |
+
partition_to_latency_mapping: Dict[Partition, PartitionLatency] = {}
|
224 |
+
# Go through each partition and get its latency
|
225 |
+
for partition in partitions:
|
226 |
+
partition_latency = get_latency_of_one_partition(
|
227 |
+
partition, node_to_latency_mapping
|
228 |
+
)
|
229 |
+
partition_to_latency_mapping[partition] = partition_latency
|
230 |
+
return partition_to_latency_mapping
|
231 |
+
|
232 |
+
|
233 |
+
def get_comm_latency_between(
|
234 |
+
parent_partition: Partition,
|
235 |
+
child_partition: Partition,
|
236 |
+
transfer_rate_bytes_per_sec: float,
|
237 |
+
):
|
238 |
+
"""Given two partitions (parent and child),
|
239 |
+
calculate the communication latency between the two.
|
240 |
+
"""
|
241 |
+
# If two partitions are on the same device, the comm latency is 0.
|
242 |
+
if (
|
243 |
+
parent_partition.logical_device_ids != []
|
244 |
+
and child_partition.logical_device_ids != []
|
245 |
+
and parent_partition.logical_device_ids == child_partition.logical_device_ids
|
246 |
+
):
|
247 |
+
return 0.0
|
248 |
+
# Keep tracking the communication size between parent and child
|
249 |
+
comm_size = 0
|
250 |
+
# Keep tracking all the counted node
|
251 |
+
visited_nodes = set()
|
252 |
+
# Go through all nodes in the child partition
|
253 |
+
# If a node has input nodes from the parent partition,
|
254 |
+
# the output size of those input nodes will be counted
|
255 |
+
# and added to comm_size
|
256 |
+
for node in child_partition.nodes:
|
257 |
+
input_nodes: Dict[Node, None] = {}
|
258 |
+
map_arg(node.args, input_nodes.setdefault)
|
259 |
+
map_arg(node.kwargs, input_nodes.setdefault)
|
260 |
+
for n in input_nodes:
|
261 |
+
if n in parent_partition.nodes and n not in visited_nodes:
|
262 |
+
size_bytes = getattr(n, "size_bytes", None)
|
263 |
+
if size_bytes is not None:
|
264 |
+
comm_size += size_bytes.output_size
|
265 |
+
visited_nodes.add(n)
|
266 |
+
return comm_size / transfer_rate_bytes_per_sec
|
267 |
+
|
268 |
+
|
269 |
+
def get_latency_of_partitioned_graph(
|
270 |
+
partitions: List[Partition],
|
271 |
+
partition_to_latency_mapping: Dict[Partition, PartitionLatency],
|
272 |
+
transfer_rate_bytes_per_sec: float,
|
273 |
+
):
|
274 |
+
"""Given all partitions in a graph, find the critical path among all partitions
|
275 |
+
and return its latency as the latency of the whole graph
|
276 |
+
"""
|
277 |
+
|
278 |
+
def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float:
|
279 |
+
"""This function helps to recursively get the latency of a path of partitions"""
|
280 |
+
# Update latency by adding current partition's latency
|
281 |
+
latency_so_far_sec += partition_to_latency_mapping[
|
282 |
+
partition
|
283 |
+
].overall_latency_sec
|
284 |
+
children = partition.children
|
285 |
+
if partition.children:
|
286 |
+
max_latency_sec = 0.0
|
287 |
+
for child in partition.children:
|
288 |
+
# Calculate latency between
|
289 |
+
comm_latency_sec = get_comm_latency_between(
|
290 |
+
partition, child, transfer_rate_bytes_per_sec
|
291 |
+
)
|
292 |
+
new_latency_sec = dfs_helper(
|
293 |
+
child, latency_so_far_sec + comm_latency_sec
|
294 |
+
)
|
295 |
+
if new_latency_sec > max_latency_sec:
|
296 |
+
max_latency_sec = new_latency_sec
|
297 |
+
return max_latency_sec
|
298 |
+
return latency_so_far_sec
|
299 |
+
|
300 |
+
def get_top_partitions(partitions: List[Partition]) -> List[Partition]:
|
301 |
+
"""This function is to return all the partitions without parents
|
302 |
+
as the starting points of all the paths
|
303 |
+
"""
|
304 |
+
top_partitions = []
|
305 |
+
for partition in partitions:
|
306 |
+
# If a partition has no parents, then it is a top partition
|
307 |
+
if len(partition.parents) == 0:
|
308 |
+
top_partitions.append(partition)
|
309 |
+
return top_partitions
|
310 |
+
|
311 |
+
top_partitions = get_top_partitions(partitions)
|
312 |
+
critical_path_latency_sec = 0.0
|
313 |
+
for partition in top_partitions:
|
314 |
+
latency_sec = dfs_helper(partition, 0.0)
|
315 |
+
if latency_sec > critical_path_latency_sec:
|
316 |
+
critical_path_latency_sec = latency_sec
|
317 |
+
return critical_path_latency_sec
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py
ADDED
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
import contextlib
|
7 |
+
import functools
|
8 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
9 |
+
import torch
|
10 |
+
import torch.utils._pytree as pytree
|
11 |
+
from torch.fx import Tracer, GraphModule
|
12 |
+
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode, unset_fake_temporarily, is_fake
|
13 |
+
from torch._dispatch.python import enable_python_dispatcher, enable_pre_dispatch
|
14 |
+
import torch.fx as fx
|
15 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata
|
16 |
+
from contextlib import contextmanager, nullcontext
|
17 |
+
import inspect
|
18 |
+
from dataclasses import dataclass
|
19 |
+
import weakref
|
20 |
+
import operator
|
21 |
+
from torch.utils._stats import count
|
22 |
+
import logging
|
23 |
+
|
24 |
+
from torch.overrides import TorchFunctionMode
|
25 |
+
|
26 |
+
from torch.utils._python_dispatch import (
|
27 |
+
TorchDispatchMode,
|
28 |
+
_pop_mode,
|
29 |
+
_push_mode,
|
30 |
+
)
|
31 |
+
|
32 |
+
from .sym_node import SymNode
|
33 |
+
from ._sym_dispatch_mode import SymDispatchMode
|
34 |
+
from torch.fx import Proxy
|
35 |
+
import torch.fx.traceback as fx_traceback
|
36 |
+
from torch import SymInt, SymFloat, SymBool
|
37 |
+
from torch.utils.weak import WeakTensorKeyDictionary
|
38 |
+
|
39 |
+
__all__ = ["PythonKeyTracer", "dispatch_trace", "make_fx", "DecompositionInterpreter", "py_sym_types", "get_innermost_proxy_mode"]
|
40 |
+
aten = torch.ops.aten
|
41 |
+
prim = torch.ops.prim
|
42 |
+
|
43 |
+
log = logging.getLogger(__name__)
|
44 |
+
not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
|
45 |
+
|
46 |
+
CURRENT_DECOMPOSITION_TABLE: Dict[torch._ops.OperatorBase, Callable] = {}
|
47 |
+
|
48 |
+
CONSTANT_NUMEL_LIMIT = 1
|
49 |
+
|
50 |
+
# We currently convert all SymInt to proxies before we use them.
|
51 |
+
# This could plausibly be handled at the Dynamo level.
|
52 |
+
pytree.register_pytree_node(torch.Size, lambda x: (list(x), None), lambda xs, _: tuple(xs))
|
53 |
+
|
54 |
+
def fake_signature(fn, nargs):
|
55 |
+
"""FX gets confused by varargs, de-confuse it"""
|
56 |
+
argnames = ",".join(f"arg{i}" for i in range(nargs))
|
57 |
+
return eval(f"lambda {argnames}: fn({argnames})", {"fn": fn})
|
58 |
+
|
59 |
+
@contextmanager
|
60 |
+
def decompose(decomposition_table):
|
61 |
+
global CURRENT_DECOMPOSITION_TABLE
|
62 |
+
old_decomposition_table = CURRENT_DECOMPOSITION_TABLE
|
63 |
+
CURRENT_DECOMPOSITION_TABLE = decomposition_table
|
64 |
+
try:
|
65 |
+
yield CURRENT_DECOMPOSITION_TABLE
|
66 |
+
finally:
|
67 |
+
CURRENT_DECOMPOSITION_TABLE = old_decomposition_table
|
68 |
+
|
69 |
+
# ensure we cannot collide with other properties
|
70 |
+
proxy_slot = object()
|
71 |
+
no_default = object()
|
72 |
+
|
73 |
+
py_sym_types = (SymInt, SymFloat, SymBool)
|
74 |
+
def is_sym_node(node):
|
75 |
+
assert hasattr(node, 'meta'), "All nodes traced with proxy_tensor should have meta"
|
76 |
+
return "val" in node.meta and isinstance(node.meta['val'], py_sym_types)
|
77 |
+
|
78 |
+
def set_proxy_slot(obj, tracer, proxy):
|
79 |
+
if isinstance(obj, torch.Tensor):
|
80 |
+
# We DO want to clobber proxies whenever we run an inplace operation
|
81 |
+
# on a tensor, and it affects the metadata on the proxy.
|
82 |
+
tracer.tensor_tracker[obj] = proxy
|
83 |
+
else:
|
84 |
+
# NB: Never clobber pre-existing proxy. Although the proxies
|
85 |
+
# are in principle equivalent, when we do graph partitioning
|
86 |
+
# we need there not to be spurious dependencies on tangent inputs.
|
87 |
+
# This works because primals get their SymInts set first, and
|
88 |
+
# THEN later we allocate tangent inputs. Make sure if a SymInt
|
89 |
+
# is derivable from a primal that we use that.
|
90 |
+
assert isinstance(obj, SymNode), type(obj)
|
91 |
+
if obj not in tracer.symnode_tracker:
|
92 |
+
tracer.symnode_tracker[obj] = proxy
|
93 |
+
|
94 |
+
def has_proxy_slot(obj, tracer):
|
95 |
+
assert isinstance(obj, (torch.Tensor, SymNode)), type(obj)
|
96 |
+
return get_proxy_slot(obj, tracer, False, lambda _: True)
|
97 |
+
|
98 |
+
# the default argument is what to return if the slot is not set.
|
99 |
+
# the transform argument is handy if you need to extract a subfield from
|
100 |
+
# the successfully looked up result (but NOT the default.)
|
101 |
+
def get_proxy_slot(obj, tracer, default=no_default, transform=lambda x: x):
|
102 |
+
if isinstance(obj, torch.Tensor):
|
103 |
+
tracker = tracer.tensor_tracker
|
104 |
+
else:
|
105 |
+
assert isinstance(obj, SymNode), type(obj)
|
106 |
+
tracker = tracer.symnode_tracker
|
107 |
+
|
108 |
+
if obj not in tracker:
|
109 |
+
if default is no_default:
|
110 |
+
raise RuntimeError(f"{obj} is not tracked with proxy for {tracer}")
|
111 |
+
return default
|
112 |
+
return transform(tracker[obj])
|
113 |
+
|
114 |
+
def snapshot_fake(val):
|
115 |
+
return val.detach()
|
116 |
+
|
117 |
+
def extract_val(val):
|
118 |
+
if is_fake(val):
|
119 |
+
return snapshot_fake(val)
|
120 |
+
elif isinstance(val, py_sym_types):
|
121 |
+
return val
|
122 |
+
elif isinstance(val, (list, tuple)):
|
123 |
+
return val.__class__([extract_val(x) for x in val])
|
124 |
+
elif isinstance(val, torch.Tensor):
|
125 |
+
if not val.is_sparse:
|
126 |
+
# NB: Kinda hacky, but we should try to get val as the metadata
|
127 |
+
# everywhere
|
128 |
+
# TODO: This doesn't properly track storages. A more robust
|
129 |
+
# approach would be to maintain a per-trace FakeTensorMode and
|
130 |
+
# from_real_tensor to create fake values (don't forget to
|
131 |
+
# snapshot_fake)
|
132 |
+
fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=True)
|
133 |
+
with fake_tensor_mode:
|
134 |
+
return torch.empty_strided(val.shape, val.stride(), device=val.device, dtype=val.dtype)
|
135 |
+
else:
|
136 |
+
return None
|
137 |
+
elif isinstance(val, (int, float, bool)):
|
138 |
+
return val
|
139 |
+
|
140 |
+
# What invariants do we have for the 'val' set on the FX node? It has accurate
|
141 |
+
# metadata... but only for metadata that exists "below" all other subsystems
|
142 |
+
# (most notably autograd, but also vmap, functorch transforms, etc). This means
|
143 |
+
# you can get the dtype, shape, stride, storage, but you CANNOT get requires_grad,
|
144 |
+
# grad_fn, _base (_base actually may be set due to recursive call to
|
145 |
+
# ADInplaceOrView, but you shouldn't rely on it.)
|
146 |
+
def set_meta(proxy, val):
|
147 |
+
proxy.node.meta['val'] = extract_val(val)
|
148 |
+
# Best effort tensor_meta setting; prefer using val!
|
149 |
+
if is_fake(val):
|
150 |
+
proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val)
|
151 |
+
elif isinstance(val, torch.Tensor) and not val.is_sparse:
|
152 |
+
proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val)
|
153 |
+
return proxy
|
154 |
+
|
155 |
+
def thunkify(f, *args, **kwargs):
|
156 |
+
"""
|
157 |
+
Delays computation of f until it's called again
|
158 |
+
Also caches the result
|
159 |
+
"""
|
160 |
+
return functools.lru_cache(1)(functools.partial(f, *args, **kwargs))
|
161 |
+
|
162 |
+
def track_tensor(tensor, proxy, *, constant, tracer):
|
163 |
+
def try_set_proxy_slot(outer_s, proxy_callable, *args):
|
164 |
+
assert callable(proxy_callable)
|
165 |
+
if isinstance(outer_s, SymInt):
|
166 |
+
inner_s = outer_s.node
|
167 |
+
set_proxy_slot(inner_s, tracer, thunkify(proxy_callable, outer_s, *args))
|
168 |
+
|
169 |
+
# The basic idea is that we need to associate each tensor/SymInt
|
170 |
+
# with a Proxy. How do we setup this association? We just store
|
171 |
+
# the proxy on the proxy slot of the object, keyed on the tracer
|
172 |
+
# (so that if we have multiple tracers at the same time, they
|
173 |
+
# don't clobber each other.)
|
174 |
+
for i, s in enumerate(tensor.shape):
|
175 |
+
try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_size.int(proxy, i), x), i)
|
176 |
+
|
177 |
+
for i, s in enumerate(tensor.stride()):
|
178 |
+
try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_stride.int(proxy, i), x), i)
|
179 |
+
|
180 |
+
try_set_proxy_slot(tensor.numel(), lambda x: set_meta(torch.ops.aten.sym_numel.default(proxy), x))
|
181 |
+
try_set_proxy_slot(tensor.storage_offset(), lambda x: set_meta(torch.ops.aten.sym_storage_offset.default(proxy), x))
|
182 |
+
set_proxy_slot(tensor, tracer, _ProxyTensor(proxy, constant))
|
183 |
+
|
184 |
+
def track_tensor_tree(inner_res, proxy_res, *, constant, tracer):
|
185 |
+
def wrap_with_proxy(e, proxy, constant):
|
186 |
+
if isinstance(e, torch.Tensor):
|
187 |
+
track_tensor(e, proxy, tracer=tracer, constant=constant)
|
188 |
+
set_meta(proxy, e)
|
189 |
+
elif isinstance(e, py_sym_types):
|
190 |
+
# NB: eagerly set meta here, so that the numbering is in order
|
191 |
+
set_meta(proxy, e)
|
192 |
+
set_proxy_slot(e.node, tracer, lambda: proxy)
|
193 |
+
elif isinstance(e, (tuple, list)):
|
194 |
+
if isinstance(proxy, fx.Proxy):
|
195 |
+
set_meta(proxy, e)
|
196 |
+
|
197 |
+
# example use case: allreduce_ returns ([tensor], work)
|
198 |
+
for idx, ee in enumerate(e):
|
199 |
+
wrap_with_proxy(ee, proxy[idx], get_constant(idx))
|
200 |
+
elif isinstance(e, dict):
|
201 |
+
# In theory we could support const-prop when proxy-tensor-tracing
|
202 |
+
# operators that returns dicts of tensors, but we have no use case
|
203 |
+
# for it today (since the only op we currently trace that can
|
204 |
+
# return a dict is triton_kernel_wrapper_functional/mutation,
|
205 |
+
# which does not participate in const-prop)
|
206 |
+
assert constant is None
|
207 |
+
|
208 |
+
if isinstance(proxy, fx.Proxy):
|
209 |
+
set_meta(proxy, e)
|
210 |
+
|
211 |
+
# example use case: triton_kernel_wrapper takes arguments as kwargs
|
212 |
+
for key, val in e.items():
|
213 |
+
wrap_with_proxy(val, proxy[key], None)
|
214 |
+
else:
|
215 |
+
# intentionally pass on primitives
|
216 |
+
pass
|
217 |
+
|
218 |
+
|
219 |
+
def get_constant(idx):
|
220 |
+
if constant is None:
|
221 |
+
return None
|
222 |
+
else:
|
223 |
+
return constant[idx]
|
224 |
+
|
225 |
+
wrap_with_proxy(inner_res, proxy_res, constant)
|
226 |
+
|
227 |
+
return inner_res
|
228 |
+
|
229 |
+
|
230 |
+
def maybe_disable_fake_tensor_mode():
|
231 |
+
# TODO: figure out if this API generally makes sense and bake it into the
|
232 |
+
# library
|
233 |
+
return unset_fake_temporarily()
|
234 |
+
|
235 |
+
|
236 |
+
@dataclass
|
237 |
+
class _ProxyTensor:
|
238 |
+
proxy: Proxy
|
239 |
+
constant: Optional[torch.Tensor]
|
240 |
+
|
241 |
+
|
242 |
+
def fetch_sym_proxy(tracer):
|
243 |
+
def inner(e):
|
244 |
+
n = e.node
|
245 |
+
if n.constant is not None:
|
246 |
+
return n.constant
|
247 |
+
else:
|
248 |
+
# NB: we REQUIRE all symints to be tracked
|
249 |
+
return get_proxy_slot(n, tracer)()
|
250 |
+
return inner
|
251 |
+
|
252 |
+
|
253 |
+
def fetch_tensor_proxy(tracer):
|
254 |
+
return lambda t: get_proxy_slot(t, tracer, t)
|
255 |
+
|
256 |
+
HANDLED_TYPES = (torch.Tensor, torch.nn.Parameter, FakeTensor)
|
257 |
+
|
258 |
+
def proxy_call(proxy_mode, func, pre_dispatch, args, kwargs):
|
259 |
+
unrecognized_types = []
|
260 |
+
|
261 |
+
def can_handle_tensor(x):
|
262 |
+
r = type(x) in HANDLED_TYPES or has_proxy_slot(x, proxy_mode.tracer)
|
263 |
+
if proxy_mode._allow_fake_constant:
|
264 |
+
r = r or type(x) in (torch._subclasses.FakeTensor,)
|
265 |
+
if not r:
|
266 |
+
unrecognized_types.append(type(x))
|
267 |
+
return r
|
268 |
+
|
269 |
+
# If there are any tensor subclasses, we need to handle those tensor subclasses first
|
270 |
+
# TODO: we could use types to test this
|
271 |
+
if not pytree.tree_all_only(torch.Tensor, can_handle_tensor, (args, kwargs)):
|
272 |
+
not_implemented_log.debug("ProxyTensorMode tensors without proxy had unrecognized subclasses: %s", unrecognized_types)
|
273 |
+
return NotImplemented
|
274 |
+
|
275 |
+
r = maybe_handle_decomp(proxy_mode, func, args, kwargs)
|
276 |
+
if r is not NotImplemented:
|
277 |
+
return r
|
278 |
+
|
279 |
+
# For pre-autograd tracing, we do not want to run CompositeImplicit decomps.
|
280 |
+
if not pre_dispatch and func not in [
|
281 |
+
torch.ops.aten.size.default, torch.ops.aten.stride.default, torch.ops.aten.storage_offset.default
|
282 |
+
]:
|
283 |
+
with proxy_mode:
|
284 |
+
r = func.decompose(*args, **kwargs)
|
285 |
+
if r is not NotImplemented:
|
286 |
+
return r
|
287 |
+
|
288 |
+
tracer = proxy_mode.tracer
|
289 |
+
f_args, f_kwargs = pytree.tree_map_only(torch.Tensor, fetch_tensor_proxy(tracer), (args, kwargs))
|
290 |
+
|
291 |
+
# If there are SymInts, we also should not consider this constant.
|
292 |
+
# However, fake tensor handling of SymInts is sufficiently broken that
|
293 |
+
# I couldn't write a test for this case
|
294 |
+
all_constant = (
|
295 |
+
pytree.tree_all_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs))
|
296 |
+
# TODO: maybe constant SymInts should also be allowed? Not sure if
|
297 |
+
# this can happen
|
298 |
+
and pytree.tree_all_only((SymInt, SymFloat, SymBool), lambda _: False, (args, kwargs))
|
299 |
+
)
|
300 |
+
|
301 |
+
if torch.Tag.data_dependent_output in func.tags:
|
302 |
+
# Check if all of the Tensor inputs are constants
|
303 |
+
if all_constant:
|
304 |
+
const_args, const_kwargs = pytree.tree_map_only(
|
305 |
+
_ProxyTensor, lambda t: t.constant, (f_args, f_kwargs)
|
306 |
+
)
|
307 |
+
with maybe_disable_fake_tensor_mode():
|
308 |
+
return func(*const_args, **const_kwargs)
|
309 |
+
# If any of the Tensor inputs are "real" (not FakeTensor), we may
|
310 |
+
# incorrectly burn in constants by allowing this access. Raise
|
311 |
+
# an error in this case
|
312 |
+
if proxy_mode._error_on_data_dependent_ops and pytree.tree_all_only(torch.Tensor, lambda t: not is_fake(t), (args, kwargs)):
|
313 |
+
raise RuntimeError(
|
314 |
+
f"It appears that you're trying to get value out of a tracing tensor with {func} - erroring out! "
|
315 |
+
"It's likely that this is caused by data-dependent control flow or similar. "
|
316 |
+
"It may be possible to trace this with dynamic shapes; try setting tracing_mode='symbolic' "
|
317 |
+
"in your make_fx call."
|
318 |
+
)
|
319 |
+
proxy_args, proxy_kwargs = pytree.tree_map_only(
|
320 |
+
(SymInt, SymFloat, SymBool),
|
321 |
+
fetch_sym_proxy(proxy_mode.tracer),
|
322 |
+
pytree.tree_map_only(_ProxyTensor, lambda e: e.proxy, (f_args, f_kwargs))
|
323 |
+
)
|
324 |
+
|
325 |
+
# When we trace through a torch.tensor invocation, you never actually
|
326 |
+
# see a torch.ops.aten.tensor call. Instead, the way this function is
|
327 |
+
# implemented internally is that we allocate a plain tensor (this is
|
328 |
+
# *guaranteed* to be a plain tensor, we disable all modes when doing
|
329 |
+
# so), and then call at::lift_fresh on it (to give modes a chance to do
|
330 |
+
# their stuff). Furthermore, the tensor argument to lift_fresh is guaranteed
|
331 |
+
# to be freshly allocated, so we want lift_fresh to be a no-op (directly
|
332 |
+
# returning the input argument).
|
333 |
+
#
|
334 |
+
# Here is the basic problem: when we trace this sequence of executions
|
335 |
+
# into an FX graph, what happens to this call sequence? Traditionally,
|
336 |
+
# tensor constants get interned as buffers on the FX GraphModule. But
|
337 |
+
# this is dangerous. Consider:
|
338 |
+
#
|
339 |
+
# x = torch.tensor(1)
|
340 |
+
# x.add_(2)
|
341 |
+
#
|
342 |
+
# Naively, this traces into:
|
343 |
+
#
|
344 |
+
# t = self._tensor_constant0 # initialized to torch.tensor(1)
|
345 |
+
# x = torch.ops.aten.lift_fresh(t)
|
346 |
+
# x.add_(2)
|
347 |
+
#
|
348 |
+
# If lift_fresh returns t directly, the subsequent add_ call will
|
349 |
+
# modify the tensor constant. Really, the problem is we've violated
|
350 |
+
# the invariant the argument to lift is fresh. So what we should
|
351 |
+
# preserve the invariant by replacing lift_fresh with lift_fresh_copy:
|
352 |
+
#
|
353 |
+
# t = self._tensor_constant0 # initialized to torch.tensor(1)
|
354 |
+
# x = torch.ops.aten.lift_fresh_copy(t)
|
355 |
+
# x.add_(2)
|
356 |
+
#
|
357 |
+
# This is what the overload modification does.
|
358 |
+
if func is torch.ops.aten.lift_fresh.default:
|
359 |
+
func = torch.ops.aten.lift_fresh_copy.default
|
360 |
+
|
361 |
+
proxy_out = proxy_mode.tracer.create_proxy('call_function', func, proxy_args, proxy_kwargs,
|
362 |
+
name=proxy_mode.tracer.graph._target_to_str(func.overloadpacket.__name__))
|
363 |
+
|
364 |
+
# This makes DCE marginally less likely to DCE inplace operations.
|
365 |
+
# It is not strictly necessary
|
366 |
+
# Kind of a hacky way to test if an op is in-place or not
|
367 |
+
if func.overloadpacket.__name__[-1] == "_" and func.overloadpacket.__name__[0] != "_":
|
368 |
+
if isinstance(args[0], List):
|
369 |
+
# e.g., c10d::allreduce_ returns a list of tensors as the first element
|
370 |
+
# in the output.
|
371 |
+
for i, a in enumerate(args[0]):
|
372 |
+
a.proxy = proxy_out[0][i]
|
373 |
+
else:
|
374 |
+
args[0].proxy = proxy_out
|
375 |
+
|
376 |
+
out = func(*args, **kwargs)
|
377 |
+
|
378 |
+
# In some circumstances, we will be tracing in a situation where a tensor
|
379 |
+
# is *statically* known to be a constant (currently, this only happens if
|
380 |
+
# you run torch.tensor; deterministic factory functions like torch.arange
|
381 |
+
# don't get this treatment). When the tensor in question is small, it's
|
382 |
+
# helpful to due constant propagation in case we call item() (in which
|
383 |
+
# case we can return the constant value that is known, rather than give
|
384 |
+
# an error.) The logic here tests if constant propagation is possible
|
385 |
+
# (because all of the inputs are constant). If so, we disable fake tensor
|
386 |
+
# mode (if it is on) and do true compute on the constant.
|
387 |
+
#
|
388 |
+
# It's worth highlighting that we're making a policy decision here.
|
389 |
+
# There is a potential that the tensor is actually quite large, and we
|
390 |
+
# don't actually want to run the compute. The tensor being quite large
|
391 |
+
# is one of the reasons why factory functions don't get this treatment
|
392 |
+
# (since they can be quite large; if a parameter is initialized to a
|
393 |
+
# constant value it will be!) Similarly, there is also a potential
|
394 |
+
# to run an operator that blows up the size of a small tensor; we don't
|
395 |
+
# protect against this case, but we could force, e.g., only single
|
396 |
+
# element constant computation by testing the numel of the result before
|
397 |
+
# propagating const-ness. Similarly, we don't require the constant to
|
398 |
+
# live on CPU, but we could.
|
399 |
+
any_constant = pytree.tree_any_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs))
|
400 |
+
|
401 |
+
constant = None
|
402 |
+
|
403 |
+
# If this is a lift, the input tensor is guaranteed to be a
|
404 |
+
# constant, so we keep a copy of the original argument along so
|
405 |
+
# we can query it if we're asked to item() it at some later point
|
406 |
+
if func is torch.ops.aten.lift_fresh_copy.default and out.numel() <= CONSTANT_NUMEL_LIMIT:
|
407 |
+
with maybe_disable_fake_tensor_mode():
|
408 |
+
constant = args[0].clone()
|
409 |
+
elif (
|
410 |
+
torch.Tag.nondeterministic_seeded not in func.tags
|
411 |
+
and all_constant
|
412 |
+
and any_constant
|
413 |
+
and pytree.tree_all_only(torch.Tensor, lambda t: t.numel() <= CONSTANT_NUMEL_LIMIT, out)
|
414 |
+
):
|
415 |
+
# NB: do NOT include factories as constants
|
416 |
+
with maybe_disable_fake_tensor_mode():
|
417 |
+
const_args, const_kwargs = pytree.tree_map_only(
|
418 |
+
_ProxyTensor, lambda t: t.constant, (f_args, f_kwargs)
|
419 |
+
)
|
420 |
+
constant = func(*const_args, **const_kwargs)
|
421 |
+
else:
|
422 |
+
constant = None
|
423 |
+
|
424 |
+
track_tensor_tree(out, proxy_out, constant=constant, tracer=tracer)
|
425 |
+
return out
|
426 |
+
|
427 |
+
|
428 |
+
class PythonKeyTracer(Tracer):
|
429 |
+
def __init__(self):
|
430 |
+
super().__init__(autowrap_modules=())
|
431 |
+
self.tensor_tracker = WeakTensorKeyDictionary()
|
432 |
+
self.symnode_tracker = weakref.WeakKeyDictionary() # type: ignore[var-annotated]
|
433 |
+
|
434 |
+
# In general, we don't want to make modules leaves. In principle, users of
|
435 |
+
# this tracer might want to override this in order to turn a couple specific
|
436 |
+
# modules into leaves in the traced graph.
|
437 |
+
def call_module(
|
438 |
+
self, m: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
439 |
+
) -> Any:
|
440 |
+
return forward(*args, **kwargs)
|
441 |
+
|
442 |
+
# We don't want to turn getattr calls into proxies. So we just return the actual value.
|
443 |
+
def getattr(self, attr, attr_val, parameter_proxy_cache):
|
444 |
+
return attr_val
|
445 |
+
|
446 |
+
def create_arg(self, a: Any):
|
447 |
+
if isinstance(a, torch.nn.Parameter):
|
448 |
+
for n, p in self.root.named_parameters():
|
449 |
+
if a is p:
|
450 |
+
return self.create_node('get_attr', n, (), {})
|
451 |
+
qualname: Optional[str] = None
|
452 |
+
|
453 |
+
if not qualname:
|
454 |
+
i = 0
|
455 |
+
while True:
|
456 |
+
qualname = f'_param_constant{i}'
|
457 |
+
if not hasattr(self.root, qualname):
|
458 |
+
break
|
459 |
+
i += 1
|
460 |
+
setattr(self.root, qualname, a)
|
461 |
+
|
462 |
+
return self.create_node('get_attr', qualname, (), {})
|
463 |
+
elif isinstance(a, (SymInt, SymFloat, SymBool)):
|
464 |
+
assert a.node.constant is not None
|
465 |
+
return a.node.constant
|
466 |
+
return super().create_arg(a)
|
467 |
+
|
468 |
+
def unwrap_proxy(self, e):
|
469 |
+
if isinstance(e, torch.Tensor):
|
470 |
+
return get_proxy_slot(e, self, e, lambda e: e.proxy)
|
471 |
+
elif isinstance(e, (torch.SymInt, torch.SymFloat, torch.SymBool)):
|
472 |
+
return get_proxy_slot(e.node, self, e, lambda e: e())
|
473 |
+
else:
|
474 |
+
return e
|
475 |
+
|
476 |
+
|
477 |
+
@torch._disable_dynamo
|
478 |
+
def dispatch_trace(
|
479 |
+
root: Union[torch.nn.Module, Callable],
|
480 |
+
tracer: Tracer,
|
481 |
+
concrete_args: Optional[Tuple[Any, ...]] = None,
|
482 |
+
) -> GraphModule:
|
483 |
+
graph = tracer.trace(root, concrete_args)
|
484 |
+
name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
|
485 |
+
return GraphModule(tracer.root, graph, name)
|
486 |
+
|
487 |
+
|
488 |
+
@contextlib.contextmanager
|
489 |
+
def _pop_proxy_mode_temporarily(dk):
|
490 |
+
# This is a shim around the existng per-dispatch-key-mode logic.
|
491 |
+
# I'll delete the per-dispatch-key-mode logic in a followup PR
|
492 |
+
if dk is not None:
|
493 |
+
# During pre_dispatch, pop off of the PreDispatch mode stack
|
494 |
+
old = _pop_mode(dk)
|
495 |
+
try:
|
496 |
+
yield old
|
497 |
+
finally:
|
498 |
+
_push_mode(old, dk)
|
499 |
+
else:
|
500 |
+
# During normal tracing, pop off of the dedicated proxy mode stack
|
501 |
+
old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)
|
502 |
+
try:
|
503 |
+
yield old
|
504 |
+
finally:
|
505 |
+
torch._C._set_dispatch_mode(old)
|
506 |
+
|
507 |
+
def wrap_key(f, tensors, tracer, pre_dispatch: bool):
|
508 |
+
flat_tensors, tensors_spec = pytree.tree_flatten(tensors)
|
509 |
+
dk = torch._C.DispatchKey.PreDispatch if pre_dispatch else None
|
510 |
+
|
511 |
+
@functools.wraps(f)
|
512 |
+
def wrapped(*proxies):
|
513 |
+
flat_proxies, proxies_spec = pytree.tree_flatten(proxies)
|
514 |
+
assert len(flat_proxies) == len(flat_tensors)
|
515 |
+
with _pop_proxy_mode_temporarily(dk) as m:
|
516 |
+
assert isinstance(m, ProxyTorchDispatchMode)
|
517 |
+
track_tensor_tree(flat_tensors, flat_proxies, constant=None, tracer=tracer)
|
518 |
+
|
519 |
+
out = f(*tensors)
|
520 |
+
out = pytree.tree_map_only(
|
521 |
+
torch.Tensor,
|
522 |
+
lambda t: get_proxy_slot(t, tracer, t, lambda x: x.proxy),
|
523 |
+
out
|
524 |
+
)
|
525 |
+
out = pytree.tree_map_only(
|
526 |
+
(SymInt, SymFloat, SymBool),
|
527 |
+
lambda t: get_proxy_slot(t.node, tracer)(),
|
528 |
+
out
|
529 |
+
)
|
530 |
+
return out
|
531 |
+
|
532 |
+
return wrapped
|
533 |
+
|
534 |
+
ORIGINAL_ATEN = None
|
535 |
+
@contextmanager
|
536 |
+
def set_original_aten_op(func):
|
537 |
+
global ORIGINAL_ATEN
|
538 |
+
if ORIGINAL_ATEN is None and fx_traceback.has_preserved_node_meta():
|
539 |
+
ORIGINAL_ATEN = func
|
540 |
+
fx_traceback.current_meta['original_aten'] = func
|
541 |
+
try:
|
542 |
+
yield
|
543 |
+
finally:
|
544 |
+
ORIGINAL_ATEN = None
|
545 |
+
fx_traceback.current_meta['original_aten'] = None
|
546 |
+
else:
|
547 |
+
yield
|
548 |
+
|
549 |
+
|
550 |
+
|
551 |
+
# This mode is **only** used for pre_dispatch tracing.
|
552 |
+
# In particular, we need to make sure that autograd/autocast API's
|
553 |
+
# that do not desugar into dispatcher operators stay in the graph.
|
554 |
+
class PreDispatchTorchFunctionMode(TorchFunctionMode):
|
555 |
+
|
556 |
+
def __init__(self, tracer):
|
557 |
+
self.tracer = tracer
|
558 |
+
|
559 |
+
def __torch_function__(self, func, types, args=(), kwargs=None):
|
560 |
+
kwargs = kwargs or {}
|
561 |
+
pre_dispatch_ops = [
|
562 |
+
torch._C._set_grad_enabled,
|
563 |
+
torch.amp._enter_autocast,
|
564 |
+
torch.amp._exit_autocast,
|
565 |
+
]
|
566 |
+
if func in pre_dispatch_ops:
|
567 |
+
return self.tracer.create_node("call_function", func, args, {})
|
568 |
+
# Don't actually run the function! We just want to trace the calls
|
569 |
+
# into a graph. We don't actualy want to change global autograd state.
|
570 |
+
return func(*args, **kwargs)
|
571 |
+
|
572 |
+
class ProxyTorchDispatchMode(TorchDispatchMode):
|
573 |
+
def __init__(self, tracer, tracing_mode, pre_dispatch=False, _allow_fake_constant=False, _error_on_data_dependent_ops=True):
|
574 |
+
dk = torch._C.DispatchKey.PreDispatch if pre_dispatch else None
|
575 |
+
super().__init__(dk)
|
576 |
+
self.tracer = tracer
|
577 |
+
self.tracing_mode = tracing_mode
|
578 |
+
self.enable_tracing = True
|
579 |
+
self.pre_dispatch = pre_dispatch
|
580 |
+
self._allow_fake_constant = _allow_fake_constant
|
581 |
+
self._error_on_data_dependent_ops = _error_on_data_dependent_ops
|
582 |
+
self.sym_mode = ProxySymDispatchMode(tracer)
|
583 |
+
self.trace_state = {}
|
584 |
+
self._managers = []
|
585 |
+
# Indicates to our torch_dispatch dispatching infra that
|
586 |
+
# this is an "infra" mode with lower dispatching precedence.
|
587 |
+
self._mode_key = torch._C._TorchDispatchModeKey.PROXY
|
588 |
+
# Every time we enter a mode, we maintain a stack telling us what the previous
|
589 |
+
# ProxyTorchDispatchMode state was (if there was any).
|
590 |
+
# This lets us properly reset the state on exit.
|
591 |
+
self.enter_stack: List[Optional[ProxyTorchDispatchMode]] = []
|
592 |
+
|
593 |
+
@count
|
594 |
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
595 |
+
with self.sym_mode.enable(False), set_original_aten_op(func):
|
596 |
+
return self.inner_torch_dispatch(func, types, args, kwargs)
|
597 |
+
|
598 |
+
def __enter__(self):
|
599 |
+
# sym mode first, then us...
|
600 |
+
m = self.sym_mode.enable(True)
|
601 |
+
self._managers.append(m)
|
602 |
+
m.__enter__()
|
603 |
+
# Stash and store the previous proxy mode (there may or may not be one)
|
604 |
+
maybe_prev_proxy_mode = torch._C._unset_dispatch_mode(self._mode_key)
|
605 |
+
self.enter_stack.append(maybe_prev_proxy_mode)
|
606 |
+
return super().__enter__()
|
607 |
+
|
608 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
609 |
+
m = self._managers.pop()
|
610 |
+
# ...exit us first, then sym mode
|
611 |
+
b = super().__exit__(exc_type, exc_value, traceback)
|
612 |
+
|
613 |
+
# Re-enable the previous proxy mode, if there was one.
|
614 |
+
mb_previous_proxy_mode = self.enter_stack.pop()
|
615 |
+
if mb_previous_proxy_mode is not None:
|
616 |
+
torch._C._set_dispatch_mode(mb_previous_proxy_mode)
|
617 |
+
|
618 |
+
if not b:
|
619 |
+
return m.__exit__(exc_type, exc_value, traceback)
|
620 |
+
else:
|
621 |
+
return m.__exit__(None, None, None)
|
622 |
+
|
623 |
+
|
624 |
+
def inner_torch_dispatch(self, func, types, args=(), kwargs=None):
|
625 |
+
if not self.enable_tracing:
|
626 |
+
return func(*args, **kwargs)
|
627 |
+
|
628 |
+
if func in [prim.device.default]:
|
629 |
+
return func(*args, **kwargs)
|
630 |
+
|
631 |
+
return proxy_call(self, func, self.pre_dispatch, args, kwargs)
|
632 |
+
|
633 |
+
|
634 |
+
class ProxySymDispatchMode(SymDispatchMode):
|
635 |
+
def __init__(self, tracer):
|
636 |
+
super().__init__()
|
637 |
+
self.tracer = tracer
|
638 |
+
# When false, we don't trace operations. If you do this, you MUST
|
639 |
+
# call track_tensor/track_tensor_tree on all results of the operation
|
640 |
+
# to ensure we can adequately track the results
|
641 |
+
self.enable_tracing = True
|
642 |
+
|
643 |
+
@contextmanager
|
644 |
+
def enable(self, b):
|
645 |
+
old = self.enable_tracing
|
646 |
+
self.enable_tracing = b
|
647 |
+
try:
|
648 |
+
yield
|
649 |
+
finally:
|
650 |
+
self.enable_tracing = old
|
651 |
+
|
652 |
+
def _compute_proxy(self, func, args, out: Union[SymInt, SymFloat, SymBool]):
|
653 |
+
n_args = tuple(
|
654 |
+
get_proxy_slot(a.node, self.tracer)().node if isinstance(a, py_sym_types) else a
|
655 |
+
for a in args
|
656 |
+
)
|
657 |
+
|
658 |
+
# func doesn't have a __torch_function__ that Proxy can interpose, so
|
659 |
+
# we gotta do it manually
|
660 |
+
n_out = self.tracer.create_node("call_function", func, n_args, {})
|
661 |
+
p_out = fx.Proxy(n_out, self.tracer)
|
662 |
+
set_meta(p_out, out)
|
663 |
+
return p_out
|
664 |
+
|
665 |
+
def __sym_dispatch__(self, func, types, args, kwargs):
|
666 |
+
if not self.enable_tracing:
|
667 |
+
return func(*args, **kwargs)
|
668 |
+
|
669 |
+
# Peephole optimize multiply by one
|
670 |
+
# NB: be careful not to trigger guards here!
|
671 |
+
if func == operator.mul:
|
672 |
+
if isinstance(args[1], int) and args[1] == 1:
|
673 |
+
return args[0]
|
674 |
+
elif isinstance(args[0], int) and args[0] == 1:
|
675 |
+
return args[1]
|
676 |
+
|
677 |
+
# For speed, we assume there are no nested data structures
|
678 |
+
# (otherwise we could use tree_map)
|
679 |
+
# We also assume there are no keyword arguments.
|
680 |
+
assert not kwargs
|
681 |
+
out = func(*args, **kwargs)
|
682 |
+
|
683 |
+
# If func returned a constant, we don't need to trace; we have
|
684 |
+
# determined that the result is constant (no matter if the inputs
|
685 |
+
# were symbolic) and it is no longer necessary to trace the
|
686 |
+
# computation. This could occur if func triggered some guards.
|
687 |
+
if isinstance(out, py_sym_types):
|
688 |
+
# Delays tracing out the proxies on this op until we actually need it
|
689 |
+
p_out_thunk = thunkify(self._compute_proxy, func=func, args=args, out=out)
|
690 |
+
set_proxy_slot(out.node, self.tracer, p_out_thunk)
|
691 |
+
|
692 |
+
return out
|
693 |
+
|
694 |
+
|
695 |
+
# TODO: I'm not sure what the point of this class is; you can just
|
696 |
+
# make_fx through a regular Interpreter
|
697 |
+
class DecompositionInterpreter(torch.fx.Interpreter):
|
698 |
+
def __init__(self, module: torch.fx.GraphModule, new_graph: torch.fx.Graph, decomposition_table=None, **kwargs):
|
699 |
+
super().__init__(module, **kwargs)
|
700 |
+
self.new_graph = new_graph
|
701 |
+
self.tracer = torch.fx.proxy.GraphAppendingTracer(self.new_graph)
|
702 |
+
# Blegh
|
703 |
+
self.tracer.tensor_tracker = WeakTensorKeyDictionary() # type: ignore[attr-defined]
|
704 |
+
self.tracer.symnode_tracker = weakref.WeakKeyDictionary() # type: ignore[attr-defined]
|
705 |
+
self.decomposition_table = decomposition_table
|
706 |
+
if self.decomposition_table is None:
|
707 |
+
self.decomposition_table = {}
|
708 |
+
self.mode = ProxyTorchDispatchMode(self.tracer, tracing_mode="real")
|
709 |
+
|
710 |
+
def placeholder(self, target, args, kwargs):
|
711 |
+
out = super().placeholder(target, args, kwargs)
|
712 |
+
proxy = torch.fx.Proxy(self.new_graph.placeholder(target), self.tracer)
|
713 |
+
track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
|
714 |
+
# TODO handle case where the first character of target is '*'
|
715 |
+
return out
|
716 |
+
|
717 |
+
def get_attr(self, target, args, kwargs):
|
718 |
+
out = super().get_attr(target, args, kwargs)
|
719 |
+
proxy = torch.fx.Proxy(self.new_graph.get_attr(target), self.tracer)
|
720 |
+
track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
|
721 |
+
return out
|
722 |
+
|
723 |
+
# call_function, call_method, call_module get traced automatically by the outer mode.
|
724 |
+
|
725 |
+
def output(self, target, args, kwargs):
|
726 |
+
out = super().output(target, args, kwargs)
|
727 |
+
|
728 |
+
def unwrap(e):
|
729 |
+
return get_proxy_slot(e, self.tracer, e, lambda x: x.proxy.node)
|
730 |
+
self.new_graph.output(pytree.tree_map(unwrap, out))
|
731 |
+
return out
|
732 |
+
|
733 |
+
def run(self, *args, **kwargs):
|
734 |
+
# Should enter the mode at least once for being able to restore it later
|
735 |
+
# See: https://github.com/pytorch/pytorch/pull/82549#discussion_r934782025
|
736 |
+
with decompose(self.decomposition_table), self.mode:
|
737 |
+
return super().run(*args, **kwargs)
|
738 |
+
|
739 |
+
|
740 |
+
def wrapper_and_args_for_make_fx(func, args, kwargs):
|
741 |
+
# make_fx doesn't support kwargs, so we need to do this flattening
|
742 |
+
# and then unflatten the args before calling func
|
743 |
+
flat_args, spec = pytree.tree_flatten((args, kwargs))
|
744 |
+
|
745 |
+
def wrapped(flat_args):
|
746 |
+
fn_args, fn_kwargs = pytree.tree_unflatten(flat_args, spec)
|
747 |
+
return func(*fn_args, **fn_kwargs)
|
748 |
+
return wrapped, flat_args
|
749 |
+
|
750 |
+
@contextmanager
|
751 |
+
def disable_autocast_cache():
|
752 |
+
old_value = torch.is_autocast_cache_enabled()
|
753 |
+
torch.set_autocast_cache_enabled(False)
|
754 |
+
try:
|
755 |
+
yield
|
756 |
+
finally:
|
757 |
+
torch.set_autocast_cache_enabled(old_value)
|
758 |
+
|
759 |
+
|
760 |
+
def make_fx(f,
|
761 |
+
decomposition_table=None,
|
762 |
+
tracing_mode="real",
|
763 |
+
_allow_non_fake_inputs=False,
|
764 |
+
*,
|
765 |
+
pre_dispatch=False,
|
766 |
+
_allow_fake_constant=False,
|
767 |
+
_error_on_data_dependent_ops=True):
|
768 |
+
assert tracing_mode in ["real", "fake", "symbolic"]
|
769 |
+
|
770 |
+
if decomposition_table is None:
|
771 |
+
decomposition_table = {}
|
772 |
+
|
773 |
+
@functools.wraps(f)
|
774 |
+
def wrapped(*args):
|
775 |
+
# Avoid importing sympy at a module level
|
776 |
+
from .symbolic_shapes import ShapeEnv
|
777 |
+
|
778 |
+
phs = pytree.tree_map(lambda _: fx.PH, args) # type: ignore[attr-defined]
|
779 |
+
fx_tracer = PythonKeyTracer()
|
780 |
+
fake_tensor_mode: Any = nullcontext()
|
781 |
+
if tracing_mode == "real":
|
782 |
+
fake_tensor_mode = nullcontext()
|
783 |
+
elif tracing_mode == "fake":
|
784 |
+
import torch._dynamo
|
785 |
+
fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args)
|
786 |
+
if fake_tensor_mode is None:
|
787 |
+
fake_tensor_mode = FakeTensorMode(
|
788 |
+
allow_fallback_kernels=True,
|
789 |
+
allow_non_fake_inputs=_allow_non_fake_inputs,
|
790 |
+
shape_env=ShapeEnv(),
|
791 |
+
static_shapes=True,
|
792 |
+
)
|
793 |
+
elif tracing_mode == "symbolic":
|
794 |
+
import torch._dynamo
|
795 |
+
fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args)
|
796 |
+
if fake_tensor_mode is None:
|
797 |
+
shape_env = ShapeEnv()
|
798 |
+
fake_tensor_mode = FakeTensorMode(
|
799 |
+
allow_fallback_kernels=False,
|
800 |
+
allow_non_fake_inputs=_allow_non_fake_inputs,
|
801 |
+
shape_env=shape_env)
|
802 |
+
else:
|
803 |
+
shape_env = fake_tensor_mode.shape_env
|
804 |
+
assert shape_env is not None, "shape_env should be set if tracing with 'symbolic'"
|
805 |
+
|
806 |
+
else:
|
807 |
+
raise AssertionError(f"Unexpected tracing type: {tracing_mode}")
|
808 |
+
|
809 |
+
python_dispatcher_mode: Any = nullcontext()
|
810 |
+
pre_dispatch_mode: Any = nullcontext()
|
811 |
+
# pre-autograd tracing uses per-dispatch-key modes,
|
812 |
+
# which requires the python dispatcher
|
813 |
+
if tracing_mode == "symbolic" or pre_dispatch:
|
814 |
+
python_dispatcher_mode = enable_python_dispatcher()
|
815 |
+
if pre_dispatch:
|
816 |
+
pre_dispatch_mode = enable_pre_dispatch()
|
817 |
+
|
818 |
+
proxy_function_mode: Any = nullcontext()
|
819 |
+
if pre_dispatch:
|
820 |
+
proxy_function_mode = PreDispatchTorchFunctionMode(fx_tracer)
|
821 |
+
|
822 |
+
proxy_mode = ProxyTorchDispatchMode(fx_tracer,
|
823 |
+
tracing_mode,
|
824 |
+
pre_dispatch=pre_dispatch,
|
825 |
+
_allow_fake_constant=_allow_fake_constant,
|
826 |
+
_error_on_data_dependent_ops=_error_on_data_dependent_ops)
|
827 |
+
|
828 |
+
arg_count = 0
|
829 |
+
|
830 |
+
def wrap_fake(x):
|
831 |
+
nonlocal arg_count
|
832 |
+
# TODO: it would be nice to line these up with the names
|
833 |
+
# FX will choose for the placeholders, but we don't
|
834 |
+
# actually know what the names will be at this point yet
|
835 |
+
# NB: the Source here is actually meaningless
|
836 |
+
from torch._dynamo.source import ConstantSource
|
837 |
+
source = ConstantSource(f"input{arg_count}")
|
838 |
+
if isinstance(x, torch.Tensor):
|
839 |
+
arg_count += 1
|
840 |
+
return fake_tensor_mode.from_tensor(x, source=source) # type: ignore[attr-defined]
|
841 |
+
# NB: don't match on bools
|
842 |
+
elif type(x) is int and tracing_mode == "symbolic":
|
843 |
+
return shape_env.create_symintnode(shape_env.create_symbol(x, source, positive=None), hint=x, source=source)
|
844 |
+
|
845 |
+
return x
|
846 |
+
|
847 |
+
sym_mode = proxy_mode.sym_mode
|
848 |
+
|
849 |
+
wrap_fn_map = {
|
850 |
+
"real": lambda x: x,
|
851 |
+
"fake": wrap_fake,
|
852 |
+
"symbolic": wrap_fake,
|
853 |
+
}
|
854 |
+
args = pytree.tree_map(wrap_fn_map[tracing_mode], args)
|
855 |
+
|
856 |
+
if not hasattr(inspect.unwrap(f), '__code__') or inspect.unwrap(f).__code__.co_flags & inspect.CO_VARARGS:
|
857 |
+
# FX doesn't support varargs, so we gotta fake up a wrapper
|
858 |
+
# TODO: Would be nice to fix this at the source...
|
859 |
+
func = fake_signature(f, len(phs))
|
860 |
+
else:
|
861 |
+
func = f
|
862 |
+
|
863 |
+
# We disable the autocast cache as the autocast cache causes type conversions on parameters to
|
864 |
+
# check a cache, which introduces untracked tensors into the graph
|
865 |
+
#
|
866 |
+
# We also disable tracing by any other tensor proxy-based tracers except the current. The
|
867 |
+
# purpose of `make_fx` is to produce graphmodules as a side effect; its internal execution is
|
868 |
+
# thus irrelevant to any external functional trace.
|
869 |
+
with decompose(decomposition_table), fake_tensor_mode, python_dispatcher_mode, pre_dispatch_mode, proxy_function_mode, \
|
870 |
+
sym_mode, proxy_mode, disable_autocast_cache():
|
871 |
+
t = dispatch_trace(wrap_key(func, args, fx_tracer, pre_dispatch), tracer=fx_tracer, concrete_args=tuple(phs))
|
872 |
+
|
873 |
+
# TODO: kind of a bad way to do it, should maybe figure out a better way
|
874 |
+
if tracing_mode == "symbolic":
|
875 |
+
t.shape_env = shape_env # type: ignore[assignment]
|
876 |
+
return t
|
877 |
+
|
878 |
+
return wrapped
|
879 |
+
|
880 |
+
|
881 |
+
def get_torch_dispatch_modes():
|
882 |
+
return torch.utils._python_dispatch._get_current_dispatch_mode_stack()
|
883 |
+
|
884 |
+
|
885 |
+
def get_innermost_proxy_mode():
|
886 |
+
return torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)
|
887 |
+
|
888 |
+
|
889 |
+
@contextlib.contextmanager
|
890 |
+
def disable_proxy_modes_tracing(enable_current=False):
|
891 |
+
# enable_current=True is now a no-op, since only one proxy mode
|
892 |
+
# can live on the stack at a time.
|
893 |
+
# We should kill this API in a future PR.
|
894 |
+
maybe_old = None
|
895 |
+
if not enable_current:
|
896 |
+
# Only one proxy_mode can be "active" at a time.
|
897 |
+
# So we simply remove our active mode.
|
898 |
+
maybe_old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)
|
899 |
+
try:
|
900 |
+
yield
|
901 |
+
finally:
|
902 |
+
if maybe_old is not None:
|
903 |
+
torch._C._set_dispatch_mode(maybe_old)
|
904 |
+
|
905 |
+
|
906 |
+
def maybe_handle_decomp(proxy_mode, op, args, kwargs):
|
907 |
+
if op in CURRENT_DECOMPOSITION_TABLE:
|
908 |
+
with proxy_mode:
|
909 |
+
return CURRENT_DECOMPOSITION_TABLE[op](*args, **kwargs)
|
910 |
+
return NotImplemented
|
911 |
+
|
912 |
+
|
913 |
+
def get_isolated_graphmodule(func, args, kwargs, tracing_mode="real"):
|
914 |
+
"""A helper function used to get the GraphModule for the given func.
|
915 |
+
|
916 |
+
It's expected to be used in the ProxyTensor tracing context.
|
917 |
+
It detaches the args and kwargs from the current tracer so that the trace of
|
918 |
+
the current graph module can be created without any side-effects.
|
919 |
+
"""
|
920 |
+
wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)
|
921 |
+
|
922 |
+
with disable_proxy_modes_tracing():
|
923 |
+
gm = make_fx(wrapped, tracing_mode=tracing_mode)(all_args)
|
924 |
+
return gm
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/recording.py
ADDED
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import itertools
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.utils._pytree as pytree
|
8 |
+
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
"ShapeEnvEvent",
|
12 |
+
"record_shapeenv_event",
|
13 |
+
"replay_shape_env_events",
|
14 |
+
"FakeTensorMeta",
|
15 |
+
"shape_env_check_state_equal",
|
16 |
+
"NotEqualError",
|
17 |
+
]
|
18 |
+
|
19 |
+
# [Note: Recording ShapeEnv Events]
|
20 |
+
# =================================
|
21 |
+
#
|
22 |
+
# What is a ShapeEnv event?
|
23 |
+
# -------------------------
|
24 |
+
# We consider a ShapeEnv event every function call (ShapeEnv method or
|
25 |
+
# independent function) that modifies the state of the ShapeEnv instance.
|
26 |
+
# Such calls are recorded alongside their positional and keyword arguments,
|
27 |
+
# so that it may be replayed over a different ShapeEnv instance.
|
28 |
+
#
|
29 |
+
# See [Note: ShapeEnv State Equality] for what is considered the state
|
30 |
+
# of a ShapeEnv instance.
|
31 |
+
#
|
32 |
+
# What is it for?
|
33 |
+
# ---------------
|
34 |
+
# ShapeEnv events recording is used for reconstructing the ShapeEnv in an
|
35 |
+
# arbitrary state in time.
|
36 |
+
#
|
37 |
+
# Being able to arbitrarily replay events like so is useful, mainly for
|
38 |
+
# translation validation bisection. i.e. if a ValidationException has been
|
39 |
+
# raised, find the earliest point in time where the translation validation
|
40 |
+
# fails.
|
41 |
+
#
|
42 |
+
# Besides that, it also allows us to inspect the given instance and,
|
43 |
+
# for example, check the guards that would actually be issued at that point.
|
44 |
+
#
|
45 |
+
# What kind of arguments can be stored in an event?
|
46 |
+
# -------------------------------------------------
|
47 |
+
# There's no specific rule for what cannot be used as an argument.
|
48 |
+
# That said, pay special attention to the following cases:
|
49 |
+
#
|
50 |
+
# 1. Tensor inputs: there are some tests that check whether the inputs
|
51 |
+
# were garbage collected after execution. These will fail if there's
|
52 |
+
# an event that is holding a reference to those inputs.
|
53 |
+
#
|
54 |
+
# 2. ShapeEnv arguments: if there is an argument of ShapeEnv type, that
|
55 |
+
# will be automatically replaced by the new given ShapeEnv instance.
|
56 |
+
#
|
57 |
+
# 3. SymTypes arguments: they also hold references to ShapeEnv. So,
|
58 |
+
# whenever we see them, we create a new instance, replacing the
|
59 |
+
# ShapeEnv reference.
|
60 |
+
#
|
61 |
+
# 4. FX nodes: specifically, FX nodes from the FX graph for symbolic
|
62 |
+
# shapes. That argument must be replaced when replaying the event at
|
63 |
+
# ShapeEnvEvent.run, since it has to reference a node from the given
|
64 |
+
# instance, and not from the recorded instance.
|
65 |
+
|
66 |
+
|
67 |
+
# Event class for reconstructing ShapeEnv at arbitrary time.
|
68 |
+
#
|
69 |
+
# Represents a method call that mutates ShapeEnv in a way that affects the
|
70 |
+
# issued guards, when ShapeEnv.produce_guards is called.
|
71 |
+
@dataclass
|
72 |
+
class ShapeEnvEvent:
|
73 |
+
# ShapeEnv method.
|
74 |
+
f: Callable
|
75 |
+
|
76 |
+
# Arguments and keyword arguments called with.
|
77 |
+
args: Optional[List[Any]] = None
|
78 |
+
kwargs: Optional[Dict[str, Any]] = None
|
79 |
+
|
80 |
+
# List of tracked_fakes at the time the method was called.
|
81 |
+
tracked_fakes: Optional[List[Any]] = None
|
82 |
+
|
83 |
+
# Name of the captured event.
|
84 |
+
# Used for special handling of particular methods.
|
85 |
+
name: Optional[str] = None
|
86 |
+
|
87 |
+
# Replay itself, but using shape_env as self.
|
88 |
+
def run(self, shape_env=None) -> Any:
|
89 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv, SymTypes
|
90 |
+
|
91 |
+
# Special handling for the constructor event.
|
92 |
+
if self.f is ShapeEnv:
|
93 |
+
assert shape_env is None and self.args is None and self.kwargs is not None
|
94 |
+
return ShapeEnv(**self.kwargs)
|
95 |
+
|
96 |
+
assert shape_env is not None
|
97 |
+
args = list(self.args or list())
|
98 |
+
kwargs = dict(self.kwargs or dict())
|
99 |
+
|
100 |
+
# Replace any argument of type ShapeEnv by the given one.
|
101 |
+
args, kwargs = pytree.tree_map_only(
|
102 |
+
ShapeEnv, lambda _: shape_env, (args, kwargs)
|
103 |
+
)
|
104 |
+
|
105 |
+
# Replace any argument of type SymTypes by a new instance,
|
106 |
+
# replacing its ShapeEnv reference.
|
107 |
+
args, kwargs = pytree.tree_map_only(
|
108 |
+
SymTypes,
|
109 |
+
lambda a: type(a)(a.node.with_shape_env(shape_env)),
|
110 |
+
(args, kwargs),
|
111 |
+
)
|
112 |
+
|
113 |
+
# Converts FX nodes using the mapping argument.
|
114 |
+
def maybe_convert_node(x: Any) -> Any:
|
115 |
+
if not isinstance(x, torch.fx.Node):
|
116 |
+
# Don't do anything to x if it's not an FX node.
|
117 |
+
return x
|
118 |
+
# If, at some point, we created an FX node, it means that translation validation is on.
|
119 |
+
# It also means we are building an FX graph for symbolic shapes at shape_env.graph, and
|
120 |
+
# we are tracking node names at shape_env.name_to_node.
|
121 |
+
assert hasattr(shape_env, "name_to_node")
|
122 |
+
name_to_node = shape_env.name_to_node # type: ignore[attr-defined]
|
123 |
+
assert x.name in name_to_node
|
124 |
+
return name_to_node[x.name]
|
125 |
+
|
126 |
+
# Replaces the value of an specific argument by the result of fn.
|
127 |
+
def replacearg(index: int, key: str, fn: Callable):
|
128 |
+
if index < len(args):
|
129 |
+
args[index] = fn(args[index])
|
130 |
+
if key in kwargs:
|
131 |
+
kwargs[key] = fn(kwargs[key])
|
132 |
+
|
133 |
+
if self.is_create_fx_call_function():
|
134 |
+
# ShapeEnv.create_fx_call_function:
|
135 |
+
# "args" parameter is a tuple of FX nodes from the FX graph of the old ShapeEnv.
|
136 |
+
# They must be replaced, since a "call_function" FX node with this tuple as argument
|
137 |
+
# will be added to the FX graph of the new shape_env.
|
138 |
+
replacearg(
|
139 |
+
index=2,
|
140 |
+
key="args",
|
141 |
+
fn=lambda args: tuple(maybe_convert_node(a) for a in args),
|
142 |
+
)
|
143 |
+
if self.is_evaluate_expr() or self.is_defer_runtime_assert():
|
144 |
+
# ShapeEnv.evaluate_expr and ShapeEnv.defer_runtime_assert:
|
145 |
+
# "fx_node" parameter is an (optional) FX node that represents the evaluate expression.
|
146 |
+
# They must be replaced, since it will be part of a "call_function" FX node for
|
147 |
+
# torch._assert, which will be added to the FX graph of the new shape_env.
|
148 |
+
replacearg(index=3, key="fx_node", fn=maybe_convert_node)
|
149 |
+
|
150 |
+
# Actually call the method with the converted arguments.
|
151 |
+
return self.f(*args, **kwargs)
|
152 |
+
|
153 |
+
def __str__(self) -> str:
|
154 |
+
name = self.name if self.name is not None else self.f.__name__
|
155 |
+
return f"event: {name} ({self.args}, {self.kwargs})"
|
156 |
+
|
157 |
+
def is_create_fx_call_function(self) -> bool:
|
158 |
+
return self.name == "create_fx_call_function"
|
159 |
+
|
160 |
+
def is_evaluate_expr(self) -> bool:
|
161 |
+
return self.name == "evaluate_expr"
|
162 |
+
|
163 |
+
def is_defer_runtime_assert(self) -> bool:
|
164 |
+
return self.name == "defer_runtime_assert"
|
165 |
+
|
166 |
+
|
167 |
+
# Extracts a ShapeEnv instance inside args and kwargs.
|
168 |
+
# Specifically, it looks for:
|
169 |
+
# 1. ShapeEnv arguments
|
170 |
+
# 2. SymInt, SymFloat, or SymBool arguments
|
171 |
+
# If we find more than one object of any of the above types, we
|
172 |
+
# also check that the ShapeEnv instance is the same for all of them.
|
173 |
+
def _extract_shape_env_and_assert_equal(args, kwargs):
|
174 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv, SymTypes
|
175 |
+
|
176 |
+
def assert_equal(old: Optional[ShapeEnv], new: ShapeEnv) -> ShapeEnv:
|
177 |
+
if old is not None:
|
178 |
+
assert old is new, "call with different ShapeEnv"
|
179 |
+
return new
|
180 |
+
|
181 |
+
shape_env = None
|
182 |
+
for val in itertools.chain(args, kwargs.values()):
|
183 |
+
if isinstance(val, ShapeEnv):
|
184 |
+
shape_env = assert_equal(shape_env, val)
|
185 |
+
if isinstance(val, SymTypes):
|
186 |
+
shape_env = assert_equal(shape_env, val.node.shape_env)
|
187 |
+
|
188 |
+
return shape_env
|
189 |
+
|
190 |
+
|
191 |
+
# Decorator for recording the given function as a replayable event.
|
192 |
+
#
|
193 |
+
# This decorator should be used at every function that mutates the state of
|
194 |
+
# ShapeEnv in some way that affects the resulting issued guards (i.e. when
|
195 |
+
# ShapeEnv.produce_guards is called).
|
196 |
+
#
|
197 |
+
# save_tracked_fakes: saves a snapshot of the TrackedFake list.
|
198 |
+
# This is used when calling ShapeEnv.produce_guards at arbitrary points in time.
|
199 |
+
#
|
200 |
+
# When to save the list of TrackedFake?
|
201 |
+
# =====================================
|
202 |
+
# We should save the list of TrackedFake whenever the translation validation
|
203 |
+
# bisection may actually stop and call the produce_guards method at the moment
|
204 |
+
# right after the recorded function was played. In other words, since the
|
205 |
+
# bisection bisects through torch._assert calls, we should save in all methods
|
206 |
+
# that adds a torch._assert call to the symbolic shapes FX graph.
|
207 |
+
#
|
208 |
+
# At the moment, there are 2 methods that save the list:
|
209 |
+
# - ShapeEnv.evaluate_expr
|
210 |
+
# - ShapeEnv.defer_runtime_assert
|
211 |
+
def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable:
|
212 |
+
def decorator(fn: Callable) -> Callable:
|
213 |
+
assert callable(fn)
|
214 |
+
name = fn.__name__
|
215 |
+
|
216 |
+
@functools.wraps(fn)
|
217 |
+
def wrapper(*args, **kwargs):
|
218 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
219 |
+
|
220 |
+
if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type]
|
221 |
+
# If ShapeEnv is already recording an event, call the wrapped
|
222 |
+
# function directly.
|
223 |
+
#
|
224 |
+
# NB: here, we skip the check of whether all ShapeEnv instances
|
225 |
+
# are equal, in favor of a faster dispatch.
|
226 |
+
return fn(*args, **kwargs)
|
227 |
+
|
228 |
+
# Retrieve an instance of ShapeEnv.
|
229 |
+
# Assumption: the collection of args and kwargs may not reference
|
230 |
+
# different ShapeEnv instances.
|
231 |
+
self = _extract_shape_env_and_assert_equal(args, kwargs)
|
232 |
+
|
233 |
+
# If we are calling this function without any ShapeEnv instance
|
234 |
+
# alive in its arguments, we don't record and call the original.
|
235 |
+
if self is None:
|
236 |
+
return fn(*args, **kwargs)
|
237 |
+
|
238 |
+
# Otherwise, start recording and call the function.
|
239 |
+
with self.recording():
|
240 |
+
# Take a snapshot of the current tracked_fakes.
|
241 |
+
tracked_fakes = (
|
242 |
+
self.snapshot_tracked_fakes() if save_tracked_fakes else None
|
243 |
+
)
|
244 |
+
# Record the event for 'fn'.
|
245 |
+
event = ShapeEnvEvent(
|
246 |
+
fn, list(args), kwargs, tracked_fakes, name=fn.__name__
|
247 |
+
)
|
248 |
+
self.events.append(event)
|
249 |
+
# Play the event on this ShapeEnv.
|
250 |
+
return event.run(self)
|
251 |
+
|
252 |
+
return wrapper
|
253 |
+
|
254 |
+
return decorator
|
255 |
+
|
256 |
+
|
257 |
+
# Replays the ShapeEnvEvents list.
|
258 |
+
# It assumes the first event is the constructor call.
|
259 |
+
#
|
260 |
+
# fn: transforms an old FX node into one corresponding to the newly created ShapeEnv.
|
261 |
+
def replay_shape_env_events(events):
|
262 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
263 |
+
|
264 |
+
constructor_event = events[0]
|
265 |
+
assert constructor_event.f == ShapeEnv
|
266 |
+
|
267 |
+
# Constructs the new ShapeEnv.
|
268 |
+
shape_env = constructor_event.run()
|
269 |
+
|
270 |
+
for event in events[1:]:
|
271 |
+
try:
|
272 |
+
# Actually replays each event.
|
273 |
+
# We need to call create_mapping_fn every time, since the node list might
|
274 |
+
# change after each event is replayed.
|
275 |
+
event.run(shape_env)
|
276 |
+
except Exception as e:
|
277 |
+
raise RuntimeError(f"failed when running event: {event}") from e
|
278 |
+
|
279 |
+
return shape_env
|
280 |
+
|
281 |
+
|
282 |
+
# FakeTensor metadata.
|
283 |
+
# This is to be used in place of FakeTensor placeholders when calling
|
284 |
+
# ShapeEnv.produce_guards.
|
285 |
+
@dataclass
|
286 |
+
class FakeTensorMeta:
|
287 |
+
tensor_size: Tuple[Union[int, torch.SymInt], ...]
|
288 |
+
tensor_stride: Tuple[Union[int, torch.SymInt], ...]
|
289 |
+
tensor_storage_offset: Union[int, torch.SymInt]
|
290 |
+
is_nested: bool
|
291 |
+
|
292 |
+
def size(self) -> Tuple[Union[int, torch.SymInt], ...]:
|
293 |
+
return self.tensor_size
|
294 |
+
|
295 |
+
def stride(self) -> Tuple[Union[int, torch.SymInt], ...]:
|
296 |
+
return self.tensor_stride
|
297 |
+
|
298 |
+
def storage_offset(self) -> Union[int, torch.SymInt]:
|
299 |
+
return self.tensor_storage_offset
|
300 |
+
|
301 |
+
def dim(self) -> int:
|
302 |
+
return len(self.tensor_size)
|
303 |
+
|
304 |
+
@staticmethod
|
305 |
+
def from_fake(fake) -> "FakeTensorMeta":
|
306 |
+
return FakeTensorMeta(
|
307 |
+
fake.size(), fake.stride(), fake.storage_offset(), fake.is_nested
|
308 |
+
)
|
309 |
+
|
310 |
+
|
311 |
+
# [Note: ShapeEnv State Equality]
|
312 |
+
# ===============================
|
313 |
+
#
|
314 |
+
# What is considered ShapeEnv state?
|
315 |
+
# ----------------------------------
|
316 |
+
# We consider to be the state of a ShapeEnv instance everything that
|
317 |
+
# is not in the inline tuple inside remove_nonstate_variables function.
|
318 |
+
# That is: the fields within ShapeEnv that modify the flow of execution
|
319 |
+
# of the program.
|
320 |
+
#
|
321 |
+
# So, for example: the replacements field might influence on how an
|
322 |
+
# expression is simplified. That, in turn, may result in a guard being
|
323 |
+
# statically known (i.e. not added).
|
324 |
+
#
|
325 |
+
# On the other hand, var_to_stack serves only changes what is printed
|
326 |
+
# in the screen, i.e. used only for debugging purposes. Therefore, we
|
327 |
+
# should not consider it when comparing states.
|
328 |
+
#
|
329 |
+
# What to do on NotEqualError?
|
330 |
+
# ----------------------------
|
331 |
+
# Here are a few possible causes for getting a NotEqualError raised:
|
332 |
+
#
|
333 |
+
# 1. New field that does not belong in the ShapeEnv state.
|
334 |
+
# For example: log field of type ShapeEnvLoggerAdapter. Different
|
335 |
+
# ShapeEnv instances will always have different ShapeEnvLoggerAdapter
|
336 |
+
# instances, i.e. equality comparison would fail.
|
337 |
+
# Solution: add it to the inlined tuple inside remove_nonstate_variables
|
338 |
+
# function inside check_equal method.
|
339 |
+
#
|
340 |
+
# 2. New field that is not directly comparable across instances.
|
341 |
+
# For example: guards field of type List[ShapeGuard]. More specifically,
|
342 |
+
# the ShapeGuard type holds an expression and a stack information
|
343 |
+
# for debugging purposes. When replaying the even on a new ShapeEnv
|
344 |
+
# instance, the stack would be different, which would trigger this error.
|
345 |
+
# Solution: add a special case to the map_value function inside
|
346 |
+
# check_equal function.
|
347 |
+
#
|
348 |
+
# 3. Mutation of ShapeEnv on some not recorded function.
|
349 |
+
# If a mutation of the state of ShapeEnv happens inside a function
|
350 |
+
# that is not recorded (or that no caller in the stack is recorded),
|
351 |
+
# then, the replayed ShapeEnv won't catch that.
|
352 |
+
# Solution: decorate the function with record_shape_env_event.
|
353 |
+
|
354 |
+
|
355 |
+
# Checks whether the state of two ShapeEnv are equal w.r.t. the guards
|
356 |
+
# returned by ShapeEnv.produce_guards.
|
357 |
+
def shape_env_check_state_equal(env1, env2, non_state_variable_names, map_value):
|
358 |
+
# Collect and remove variables that don't necessarily represent the state
|
359 |
+
# of a ShapeEnv. Note: we copy the dictionary so that we don't modify the
|
360 |
+
# instance itself.
|
361 |
+
env1_vars = vars(env1).copy()
|
362 |
+
env2_vars = vars(env2).copy()
|
363 |
+
|
364 |
+
for v in non_state_variable_names:
|
365 |
+
if v in env1_vars:
|
366 |
+
env1_vars.pop(v)
|
367 |
+
if v in env2_vars:
|
368 |
+
env2_vars.pop(v)
|
369 |
+
|
370 |
+
# Function for transforming the mismatched values into string.
|
371 |
+
# Needed, since dict and set entries order might not be the same every time.
|
372 |
+
def value_to_str(value: Any) -> str:
|
373 |
+
if isinstance(value, dict):
|
374 |
+
return (
|
375 |
+
"{"
|
376 |
+
+ ", ".join(f"{k}: {value[k]}" for k in sorted(value.keys(), key=str))
|
377 |
+
+ "}"
|
378 |
+
)
|
379 |
+
if isinstance(value, set):
|
380 |
+
return "{" + ", ".join(f"{v}" for v in sorted(value)) + "}"
|
381 |
+
return str(value)
|
382 |
+
|
383 |
+
# Compares env1_vars with env2_vars.
|
384 |
+
# Here, we allow the value of each field to be mapped, so that we appropriately
|
385 |
+
# compare the two values.
|
386 |
+
def compare_vars(
|
387 |
+
map_value: Callable[[str, Any], Any]
|
388 |
+
) -> List[Tuple[str, str, str]]:
|
389 |
+
env1_set, env2_set = set(env1_vars), set(env2_vars)
|
390 |
+
|
391 |
+
# First, compare the set of keys in each vars dictionary.
|
392 |
+
if env1_set != env2_set:
|
393 |
+
raise NotEqualError(
|
394 |
+
"field set mismatch:",
|
395 |
+
[
|
396 |
+
(
|
397 |
+
"found unique fields:",
|
398 |
+
str(sorted(env1_set - env2_set)),
|
399 |
+
str(sorted(env2_set - env1_set)),
|
400 |
+
),
|
401 |
+
],
|
402 |
+
)
|
403 |
+
|
404 |
+
# Then, sort the keys, and compare the mapped values of each key.
|
405 |
+
sorted_keys = list(env1_set)
|
406 |
+
sorted_keys.sort()
|
407 |
+
|
408 |
+
mapped_dict = [
|
409 |
+
(k, map_value(k, env1_vars[k]), map_value(k, env2_vars[k]))
|
410 |
+
for k in sorted_keys
|
411 |
+
]
|
412 |
+
|
413 |
+
# Return a list of tuples representing the fields that did not match
|
414 |
+
# alongside their respective mapped values.
|
415 |
+
return [
|
416 |
+
(f"{k}: values don't match.", value_to_str(val1), value_to_str(val2))
|
417 |
+
for k, val1, val2 in mapped_dict
|
418 |
+
if val1 != val2
|
419 |
+
]
|
420 |
+
|
421 |
+
# Accumulate the mismatching fields.
|
422 |
+
errors = compare_vars(map_value)
|
423 |
+
|
424 |
+
if len(errors) > 0:
|
425 |
+
raise NotEqualError("field values don't match:", errors)
|
426 |
+
|
427 |
+
|
428 |
+
class NotEqualError(Exception):
|
429 |
+
def __init__(
|
430 |
+
self,
|
431 |
+
msg: str,
|
432 |
+
mismatched: List[Tuple[str, str, str]],
|
433 |
+
) -> None:
|
434 |
+
details = "\n".join(
|
435 |
+
[
|
436 |
+
"\n".join(
|
437 |
+
[
|
438 |
+
f"==> {inner_msg}",
|
439 |
+
f" > Left: {str1}",
|
440 |
+
f" > Right: {str2}",
|
441 |
+
]
|
442 |
+
)
|
443 |
+
for inner_msg, str1, str2 in mismatched
|
444 |
+
]
|
445 |
+
)
|
446 |
+
|
447 |
+
super().__init__(
|
448 |
+
f"""\
|
449 |
+
ShapeEnv not equal: {msg}
|
450 |
+
|
451 |
+
{details}
|
452 |
+
"""
|
453 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class Equality:
|
2 |
+
def __init__(self, lhs, rhs):
|
3 |
+
self.lhs = lhs
|
4 |
+
self.rhs = rhs
|
5 |
+
|
6 |
+
def __str__(self):
|
7 |
+
return f'{self.lhs} = {self.rhs}'
|
8 |
+
|
9 |
+
def __repr__(self):
|
10 |
+
return f'{self.lhs} = {self.rhs}'
|
11 |
+
|
12 |
+
def __eq__(self, other):
|
13 |
+
if isinstance(other, Equality):
|
14 |
+
return self.lhs == other.lhs and self.rhs == other.rhs
|
15 |
+
else:
|
16 |
+
return False
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import inspect
|
3 |
+
import textwrap
|
4 |
+
import copy
|
5 |
+
import functools
|
6 |
+
from types import FunctionType
|
7 |
+
from typing import cast, Union, Callable, Dict, Optional, Any
|
8 |
+
from torch.fx._symbolic_trace import Tracer
|
9 |
+
from torch.fx.graph import Graph
|
10 |
+
from torch._sources import normalize_source_lines
|
11 |
+
import torch
|
12 |
+
|
13 |
+
class AST_Rewriter(ast.NodeTransformer):
|
14 |
+
"""
|
15 |
+
Take a FunctionType object representing a `forward` method, then
|
16 |
+
perform an AST rewrite to swap out nodes that are not symbolically
|
17 |
+
traceable with a callsite to the FX alternative.
|
18 |
+
|
19 |
+
To support swapping out an AST node, define a new `visit` method on
|
20 |
+
that node. For more details, see:
|
21 |
+
https://docs.python.org/3/library/ast.html#ast.NodeTransformer
|
22 |
+
"""
|
23 |
+
|
24 |
+
def rewrite(self, fn: FunctionType):
|
25 |
+
|
26 |
+
# Normalize the source lines
|
27 |
+
sourcelines, _ = inspect.getsourcelines(fn)
|
28 |
+
sourcelines = normalize_source_lines(sourcelines)
|
29 |
+
source = ''.join(sourcelines)
|
30 |
+
normalized_str = textwrap.dedent(source)
|
31 |
+
|
32 |
+
# Rewrite the original AST
|
33 |
+
source_ast = ast.parse(normalized_str)
|
34 |
+
dest_ast = ast.fix_missing_locations(self.visit(source_ast))
|
35 |
+
|
36 |
+
# Pull out the compiled function from the newly-created Module
|
37 |
+
code = compile(dest_ast, "", "exec")
|
38 |
+
globals_dict = copy.copy(fn.__globals__)
|
39 |
+
keys_before = set(globals_dict.keys())
|
40 |
+
exec(code, globals_dict)
|
41 |
+
new_keys = list(set(globals_dict.keys()) - keys_before)
|
42 |
+
assert len(new_keys) == 1
|
43 |
+
fn_compiled = globals_dict[new_keys[0]]
|
44 |
+
|
45 |
+
# return the compiled function with the original globals
|
46 |
+
def change_func_globals(f, globals):
|
47 |
+
"""Based on https://stackoverflow.com/a/13503277/2988730 (@unutbu)"""
|
48 |
+
# __globals__ is a private member of the function class
|
49 |
+
# so we have to copy the function, f, all of its member, except f.__globals__
|
50 |
+
g = FunctionType(
|
51 |
+
f.__code__,
|
52 |
+
globals,
|
53 |
+
name=f.__name__,
|
54 |
+
argdefs=f.__defaults__,
|
55 |
+
closure=f.__closure__,
|
56 |
+
)
|
57 |
+
g = functools.update_wrapper(g, f)
|
58 |
+
g.__kwdefaults__ = copy.copy(f.__kwdefaults__)
|
59 |
+
return g
|
60 |
+
# Return the correct FunctionType object
|
61 |
+
return change_func_globals(fn_compiled, globals=fn.__globals__)
|
62 |
+
|
63 |
+
def visit_Assert(self, node):
|
64 |
+
"""
|
65 |
+
Swap out the Assert node (Python's `assert`) with a callsite to the
|
66 |
+
symbolically-traceable torch._assert function
|
67 |
+
"""
|
68 |
+
# Create the Call node
|
69 |
+
n = ast.parse('torch._assert()', mode='eval')
|
70 |
+
assert isinstance(n, ast.Expression)
|
71 |
+
call_node = n.body
|
72 |
+
assert isinstance(call_node, ast.Call)
|
73 |
+
msg = node.msg if node.msg else ast.Constant(value="", kind=None)
|
74 |
+
call_node.args = [node.test, msg]
|
75 |
+
|
76 |
+
# Ensure that the new node conforms to the Python AST grammar
|
77 |
+
expr_wrapper = ast.Expr(value=call_node)
|
78 |
+
|
79 |
+
# Return the new Call node to signify that we want to use it as
|
80 |
+
# a replacement for the original _assert node
|
81 |
+
return ast.copy_location(expr_wrapper, node)
|
82 |
+
|
83 |
+
def visit_AnnAssign(self, node):
|
84 |
+
"""
|
85 |
+
Swap out Python's AnnAssign with an Assign node where the annotation function is called.
|
86 |
+
Example:
|
87 |
+
Original:
|
88 |
+
y: Tensor_Type(1,2,3, Dyn) = f2(x)
|
89 |
+
Output:
|
90 |
+
y = annotate(f2(x),Tensor_Type((1,2,3,Dyn)))
|
91 |
+
"""
|
92 |
+
return ast.Assign(targets=[node.target], value=ast.Call(
|
93 |
+
func=ast.Name(id='annotate', ctx=ast.Load()),
|
94 |
+
args=[node.value, node.annotation], keywords=[]))
|
95 |
+
|
96 |
+
|
97 |
+
class RewritingTracer(Tracer):
|
98 |
+
def trace(self, root: Union[torch.nn.Module, Callable], concrete_args: Optional[Dict[str, Any]] = None) -> Graph:
|
99 |
+
return super().trace(_rewrite(root), concrete_args)
|
100 |
+
|
101 |
+
|
102 |
+
def _rewrite(fn: Union[torch.nn.Module, Callable]) -> Union[torch.nn.Module, Callable]:
|
103 |
+
if isinstance(fn, torch.nn.Module):
|
104 |
+
# Rewrite this module's `forward` as well as the `forward`s of
|
105 |
+
# all of this module's recursive descendents. Return the new,
|
106 |
+
# rewritten module hierarchy.
|
107 |
+
def rewrite_module(m : torch.nn.Module):
|
108 |
+
class RewrittenModule(torch.nn.Module):
|
109 |
+
def __init__(self, orig):
|
110 |
+
super().__init__()
|
111 |
+
for k, v in orig.__dict__.items():
|
112 |
+
if isinstance(v, torch.nn.Module):
|
113 |
+
self.__dict__[k] = copy.copy(rewrite_module(v))
|
114 |
+
else:
|
115 |
+
self.__dict__[k] = copy.copy(v)
|
116 |
+
RewrittenModule.forward = AST_Rewriter().rewrite(cast(FunctionType, m.forward))
|
117 |
+
return RewrittenModule(m)
|
118 |
+
return rewrite_module(fn)
|
119 |
+
else:
|
120 |
+
# Rewrite this single free function
|
121 |
+
return AST_Rewriter().rewrite(cast(FunctionType, fn))
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.fx
|
3 |
+
import inspect
|
4 |
+
from typing import Any, Dict, Optional, Tuple
|
5 |
+
from torch.fx.node import Argument, Target
|
6 |
+
from torch._jit_internal import boolean_dispatched
|
7 |
+
from torch.fx.operator_schemas import _torchscript_type_to_python_type
|
8 |
+
|
9 |
+
from torch.fx import Transformer
|
10 |
+
|
11 |
+
class AnnotateTypesWithSchema(Transformer):
|
12 |
+
"""
|
13 |
+
Use Python function signatures to annotate types for `Nodes` within an FX graph.
|
14 |
+
This pulls out Python function signatures for:
|
15 |
+
|
16 |
+
1. Standard `torch.nn` Module calls
|
17 |
+
2. `torch.nn.functional` calls
|
18 |
+
3. Attribute fetches via `get_attr`
|
19 |
+
|
20 |
+
Example usage:
|
21 |
+
|
22 |
+
m = torchvision.models.resnet18()
|
23 |
+
|
24 |
+
traced = torch.fx.symbolic_trace(m)
|
25 |
+
|
26 |
+
traced = AnnotateTypesWithSchema(traced).transform()
|
27 |
+
|
28 |
+
"""
|
29 |
+
def __init__(self, module : torch.nn.Module, annotate_functionals : bool = True,
|
30 |
+
annotate_modules : bool = True, annotate_get_attrs : bool = True):
|
31 |
+
super().__init__(module)
|
32 |
+
self.annotate_functionals = annotate_functionals
|
33 |
+
self.annotate_modules = annotate_modules
|
34 |
+
self.annotate_get_attrs = annotate_get_attrs
|
35 |
+
|
36 |
+
def call_function(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
|
37 |
+
python_ret_type = None
|
38 |
+
if self.annotate_functionals and target.__module__ == 'torch.nn.functional':
|
39 |
+
target_for_analysis = target
|
40 |
+
if target in boolean_dispatched:
|
41 |
+
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
|
42 |
+
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
|
43 |
+
# branches of the dispatch have exactly the same signature. If they do, use the `true`
|
44 |
+
# branch signature for analysis. Otherwise, leave this un-normalized
|
45 |
+
assert not isinstance(target, str)
|
46 |
+
dispatched = boolean_dispatched[target]
|
47 |
+
if_true, if_false = dispatched['if_true'], dispatched['if_false']
|
48 |
+
# TODO: can we emit the union of these? What are the implications on TorchScript
|
49 |
+
# compilation?
|
50 |
+
if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:
|
51 |
+
return super().call_function(target, args, kwargs)
|
52 |
+
target_for_analysis = if_true
|
53 |
+
|
54 |
+
python_ret_type = self._extract_python_return_type(target_for_analysis)
|
55 |
+
|
56 |
+
return_proxy = super().call_function(target, args, kwargs)
|
57 |
+
return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
|
58 |
+
return return_proxy
|
59 |
+
|
60 |
+
def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
|
61 |
+
python_ret_type = None
|
62 |
+
assert isinstance(target, str)
|
63 |
+
submod = self.fetch_attr(target)
|
64 |
+
if self.annotate_modules and hasattr(submod.__class__, '__name__'):
|
65 |
+
classname = submod.__class__.__name__
|
66 |
+
if getattr(torch.nn, classname, None) == submod.__class__:
|
67 |
+
python_ret_type = self._extract_python_return_type(submod.forward)
|
68 |
+
return_proxy = super().call_module(target, args, kwargs)
|
69 |
+
return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
|
70 |
+
return return_proxy
|
71 |
+
|
72 |
+
def get_attr(self, target : torch.fx.node.Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
|
73 |
+
attr_proxy = super().get_attr(target, args, kwargs)
|
74 |
+
|
75 |
+
if self.annotate_get_attrs:
|
76 |
+
module_itr = self.module
|
77 |
+
assert isinstance(target, str)
|
78 |
+
atoms = target.split('.')
|
79 |
+
for i, atom in enumerate(atoms):
|
80 |
+
if not hasattr(module_itr, atom):
|
81 |
+
raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!')
|
82 |
+
module_itr = getattr(module_itr, atom)
|
83 |
+
|
84 |
+
maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
|
85 |
+
if maybe_inferred_ts_type.success():
|
86 |
+
python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type())
|
87 |
+
attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type
|
88 |
+
|
89 |
+
return attr_proxy
|
90 |
+
|
91 |
+
def _extract_python_return_type(self, target : Target) -> Optional[Any]:
|
92 |
+
"""
|
93 |
+
Given a Python call target, try to extract the Python return annotation
|
94 |
+
if it is available, otherwise return None
|
95 |
+
|
96 |
+
Args:
|
97 |
+
|
98 |
+
target (Callable): Python callable to get return annotation for
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
|
102 |
+
Optional[Any]: Return annotation from the `target`, or None if it was
|
103 |
+
not available.
|
104 |
+
"""
|
105 |
+
assert callable(target)
|
106 |
+
try:
|
107 |
+
sig = inspect.signature(target)
|
108 |
+
except (ValueError, TypeError):
|
109 |
+
return None
|
110 |
+
|
111 |
+
return sig.return_annotation if sig.return_annotation is not inspect.Signature.empty else None
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/sym_node.py
ADDED
@@ -0,0 +1,1145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file does three things:
|
3 |
+
- Contains the definition of SymNode
|
4 |
+
- Installs all the magic methods into SymBool, SymFloat, SymFloat at import time
|
5 |
+
- Does not depend on sympy at import time
|
6 |
+
|
7 |
+
As this file is imported from within torch/__init__.py we do not want it to depend on SymPy
|
8 |
+
to avoid having to load SymPy at import time, as doing so is *very* slow.
|
9 |
+
"""
|
10 |
+
|
11 |
+
import builtins
|
12 |
+
import itertools
|
13 |
+
import logging
|
14 |
+
import math
|
15 |
+
import operator
|
16 |
+
import sys
|
17 |
+
from functools import lru_cache
|
18 |
+
from typing import Optional, Type, TYPE_CHECKING, Union
|
19 |
+
|
20 |
+
# NB: The sym_* functions are used via getattr() and must be imported here.
|
21 |
+
from torch import ( # noqa: F401
|
22 |
+
sym_float,
|
23 |
+
sym_ite,
|
24 |
+
sym_max,
|
25 |
+
sym_min,
|
26 |
+
sym_not,
|
27 |
+
sym_sqrt,
|
28 |
+
SymBool,
|
29 |
+
SymFloat,
|
30 |
+
SymInt,
|
31 |
+
)
|
32 |
+
|
33 |
+
from torch.fx.experimental._sym_dispatch_mode import (
|
34 |
+
handle_sym_dispatch,
|
35 |
+
sym_function_mode,
|
36 |
+
)
|
37 |
+
|
38 |
+
if TYPE_CHECKING:
|
39 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
40 |
+
|
41 |
+
log = logging.getLogger(__name__)
|
42 |
+
|
43 |
+
|
44 |
+
__all__ = ["SymNode", "method_to_operator", "magic_methods", "sym_sqrt"]
|
45 |
+
|
46 |
+
|
47 |
+
SymTypes = (SymInt, SymFloat, SymBool)
|
48 |
+
|
49 |
+
|
50 |
+
def _to_symtype(t):
|
51 |
+
if t is bool:
|
52 |
+
return SymBool
|
53 |
+
if t is int:
|
54 |
+
return SymInt
|
55 |
+
if t is float:
|
56 |
+
return SymFloat
|
57 |
+
return t
|
58 |
+
|
59 |
+
|
60 |
+
# TODO: An incomplete list
|
61 |
+
# 1. Set variables to be equal when we do equality
|
62 |
+
# 2. Specialize on 0/1 when we do subtraction
|
63 |
+
class SymNode:
|
64 |
+
"""
|
65 |
+
This is a type erased SymInt/SymFloat which we use to do actual operations.
|
66 |
+
End users don't touch this. Magic methods are NOT defined on this object.
|
67 |
+
"""
|
68 |
+
|
69 |
+
def __init__(
|
70 |
+
self,
|
71 |
+
expr,
|
72 |
+
shape_env,
|
73 |
+
pytype,
|
74 |
+
hint: Optional[Union[int, float, bool]],
|
75 |
+
constant=None,
|
76 |
+
fx_node=None,
|
77 |
+
):
|
78 |
+
self._expr = expr
|
79 |
+
self.shape_env = shape_env
|
80 |
+
self.pytype = pytype
|
81 |
+
# What's the difference between hint and constant?
|
82 |
+
#
|
83 |
+
# - A constant is known to be invariant across invocations of the model;
|
84 |
+
# it will always be this value. We only really know this when we
|
85 |
+
# encounter an honest-to-goodness literal (when wrapping it into
|
86 |
+
# a SymNode, we set constant.) Most of the time, constant is None
|
87 |
+
#
|
88 |
+
# - A hint is a *particular* value from the particular run we are
|
89 |
+
# tracing, but it may vary the next time around. It's useful to
|
90 |
+
# keep this around, as if we need a concrete value from a SymNode,
|
91 |
+
# we will return the hint and guard on the expression that produced
|
92 |
+
# it giving the same hint next time around. The hint is not
|
93 |
+
# guaranteed to be set either: if you have an unbacked SymNode,
|
94 |
+
# there won't be any hint; it was the result of some tensor-dependent
|
95 |
+
# computation, but we don't know what it actually is because we
|
96 |
+
# haven't actually run the tensor computation.
|
97 |
+
#
|
98 |
+
# If _hint is None, we will query maybe_evaluate_static(compute_hint=True)
|
99 |
+
# in hopes that we've learned enough about the unbacked symints to
|
100 |
+
# discharge the hint; otherwise, you're likely to just error out.
|
101 |
+
#
|
102 |
+
# (A previous version of this system had some optimizations to only
|
103 |
+
# recompute when it was possible we had learned enough about the
|
104 |
+
# unbacked symint that a hint was now possible, but as we added more
|
105 |
+
# potential refinements to unbacked symints this got harder to keep
|
106 |
+
# in sync, so we've deleted it for now.)
|
107 |
+
if hint is not None:
|
108 |
+
assert type(hint) is pytype or type(hint) is _to_symtype(pytype), (
|
109 |
+
"Cannot create SymNode of type "
|
110 |
+
f"{pytype} with incompatible hint of type {type(hint)}"
|
111 |
+
)
|
112 |
+
self._hint = hint
|
113 |
+
self.constant: Optional[Union[int, float, bool]] = constant
|
114 |
+
|
115 |
+
# Record the FX node of the current node if we are doing translation
|
116 |
+
# validation. They will be used for building the input assertions for
|
117 |
+
# the translation validation problem.
|
118 |
+
self.fx_node = (
|
119 |
+
fx_node if self.shape_env._translation_validation_enabled else None
|
120 |
+
)
|
121 |
+
|
122 |
+
def with_shape_env(self, shape_env: "ShapeEnv") -> "SymNode":
|
123 |
+
return SymNode(
|
124 |
+
self._expr, shape_env, self.pytype, self._hint, self.constant, self.fx_node
|
125 |
+
)
|
126 |
+
|
127 |
+
@property
|
128 |
+
def expr(self):
|
129 |
+
return self.shape_env.replace(self._expr)
|
130 |
+
|
131 |
+
# Recompute the hint and see if we've got it now
|
132 |
+
# Precondition: self._hint is None
|
133 |
+
def _update_hint(self):
|
134 |
+
r = self.shape_env._maybe_evaluate_static(self.expr, compute_hint=True)
|
135 |
+
if r is not None:
|
136 |
+
self._hint = self.pytype(r) if not isinstance(r, SymTypes) else r
|
137 |
+
|
138 |
+
@property
|
139 |
+
def hint(self):
|
140 |
+
if self._hint is None:
|
141 |
+
self._update_hint()
|
142 |
+
return self._hint
|
143 |
+
|
144 |
+
def has_hint(self):
|
145 |
+
if self._hint is None:
|
146 |
+
self._update_hint()
|
147 |
+
return self._hint is not None
|
148 |
+
|
149 |
+
def require_hint(self, fallback=None):
|
150 |
+
if self._hint is None:
|
151 |
+
self._update_hint()
|
152 |
+
if self._hint is None:
|
153 |
+
if fallback is not None:
|
154 |
+
return fallback
|
155 |
+
# NB: we expect this to raise
|
156 |
+
return self.shape_env.size_hint(self.expr)
|
157 |
+
return self._hint
|
158 |
+
|
159 |
+
def maybe_as_int(self):
|
160 |
+
if self.expr.is_number:
|
161 |
+
return int(self.expr)
|
162 |
+
else:
|
163 |
+
return None
|
164 |
+
|
165 |
+
def is_int(self):
|
166 |
+
return self.pytype is int
|
167 |
+
|
168 |
+
def is_float(self):
|
169 |
+
return self.pytype is float
|
170 |
+
|
171 |
+
def is_bool(self):
|
172 |
+
return self.pytype is bool
|
173 |
+
|
174 |
+
def wrap_int(self, num):
|
175 |
+
assert type(num) is int
|
176 |
+
import sympy
|
177 |
+
|
178 |
+
return SymNode(
|
179 |
+
sympy.Integer(num), self.shape_env, int, num, constant=num, fx_node=num
|
180 |
+
)
|
181 |
+
|
182 |
+
def wrap_float(self, num):
|
183 |
+
assert type(num) is float
|
184 |
+
import sympy
|
185 |
+
|
186 |
+
return SymNode(
|
187 |
+
sympy.Float(num), self.shape_env, float, num, constant=num, fx_node=num
|
188 |
+
)
|
189 |
+
|
190 |
+
def wrap_bool(self, num):
|
191 |
+
assert type(num) is bool
|
192 |
+
import sympy
|
193 |
+
|
194 |
+
return SymNode(
|
195 |
+
sympy.true if num else sympy.false,
|
196 |
+
self.shape_env,
|
197 |
+
bool,
|
198 |
+
num,
|
199 |
+
constant=num,
|
200 |
+
fx_node=num,
|
201 |
+
)
|
202 |
+
|
203 |
+
def clone(self):
|
204 |
+
return self
|
205 |
+
|
206 |
+
def str(self):
|
207 |
+
return f"{self.expr}"
|
208 |
+
|
209 |
+
def __str__(self):
|
210 |
+
return self.str()
|
211 |
+
|
212 |
+
def __repr__(self):
|
213 |
+
return self.str()
|
214 |
+
|
215 |
+
# These methods call the metaprogrammed methods, they're hand written
|
216 |
+
# here so we get good stack traces
|
217 |
+
def abs(self) -> "SymNode":
|
218 |
+
return self._abs() # type: ignore[attr-defined]
|
219 |
+
|
220 |
+
def add(self, other) -> "SymNode":
|
221 |
+
return self._add(other) # type: ignore[attr-defined]
|
222 |
+
|
223 |
+
def sub(self, other) -> "SymNode":
|
224 |
+
return self._sub(other) # type: ignore[attr-defined]
|
225 |
+
|
226 |
+
def mul(self, other) -> "SymNode":
|
227 |
+
return self._mul(other) # type: ignore[attr-defined]
|
228 |
+
|
229 |
+
def mod(self, other) -> "SymNode":
|
230 |
+
return self._mod(other) # type: ignore[attr-defined]
|
231 |
+
|
232 |
+
def pow(self, other) -> "SymNode":
|
233 |
+
return self._pow(other) # type: ignore[attr-defined]
|
234 |
+
|
235 |
+
def and_(self, other) -> "SymNode":
|
236 |
+
return self._and_(other) # type: ignore[attr-defined]
|
237 |
+
|
238 |
+
def or_(self, other) -> "SymNode":
|
239 |
+
return self._or_(other) # type: ignore[attr-defined]
|
240 |
+
|
241 |
+
def truediv(self, other) -> "SymNode":
|
242 |
+
return self._truediv(other) # type: ignore[attr-defined]
|
243 |
+
|
244 |
+
def floordiv(self, other) -> "SymNode":
|
245 |
+
return self._floordiv(other) # type: ignore[attr-defined]
|
246 |
+
|
247 |
+
def lshift(self, other) -> "SymNode":
|
248 |
+
return self._lshift(other) # type: ignore[attr-defined]
|
249 |
+
|
250 |
+
def rshift(self, other) -> "SymNode":
|
251 |
+
return self._rshift(other) # type: ignore[attr-defined]
|
252 |
+
|
253 |
+
def sym_not(self) -> "SymNode": # noqa: F811
|
254 |
+
return self._sym_not() # type: ignore[attr-defined]
|
255 |
+
|
256 |
+
def eq(self, other) -> "SymNode":
|
257 |
+
return self._eq(other) # type: ignore[attr-defined]
|
258 |
+
|
259 |
+
def ne(self, other) -> "SymNode":
|
260 |
+
return self._ne(other) # type: ignore[attr-defined]
|
261 |
+
|
262 |
+
def gt(self, other) -> "SymNode":
|
263 |
+
return self._gt(other) # type: ignore[attr-defined]
|
264 |
+
|
265 |
+
def lt(self, other) -> "SymNode":
|
266 |
+
return self._lt(other) # type: ignore[attr-defined]
|
267 |
+
|
268 |
+
def le(self, other) -> "SymNode":
|
269 |
+
return self._le(other) # type: ignore[attr-defined]
|
270 |
+
|
271 |
+
def ge(self, other) -> "SymNode":
|
272 |
+
return self._ge(other) # type: ignore[attr-defined]
|
273 |
+
|
274 |
+
def floor(self) -> "SymNode":
|
275 |
+
return self._floor() # type: ignore[attr-defined]
|
276 |
+
|
277 |
+
def sym_float(self) -> "SymNode": # noqa: F811
|
278 |
+
return self._sym_float() # type: ignore[attr-defined]
|
279 |
+
|
280 |
+
def sym_int(self) -> "SymNode":
|
281 |
+
return self._sym_int() # type: ignore[attr-defined]
|
282 |
+
|
283 |
+
def ceil(self) -> "SymNode":
|
284 |
+
return self._ceil() # type: ignore[attr-defined]
|
285 |
+
|
286 |
+
def neg(self) -> "SymNode":
|
287 |
+
return self._neg() # type: ignore[attr-defined]
|
288 |
+
|
289 |
+
def sym_min(self, other) -> "SymNode": # noqa: F811
|
290 |
+
return self._sym_min(other) # type: ignore[attr-defined]
|
291 |
+
|
292 |
+
def sym_max(self, other) -> "SymNode": # noqa: F811
|
293 |
+
return self._sym_max(other) # type: ignore[attr-defined]
|
294 |
+
|
295 |
+
def sym_ite(self, then_val, else_val) -> "SymNode":
|
296 |
+
return self._sym_ite(then_val, else_val) # type: ignore[attr-defined]
|
297 |
+
|
298 |
+
def sym_sqrt(self) -> "SymNode":
|
299 |
+
return self._sym_sqrt() # type: ignore[attr-defined]
|
300 |
+
|
301 |
+
def is_contiguous(self, sizes, strides) -> "SymNode":
|
302 |
+
return self._is_contiguous(sizes, strides) # type: ignore[attr-defined]
|
303 |
+
|
304 |
+
def is_channels_last_contiguous_2d(self, sizes, strides) -> "SymNode":
|
305 |
+
return self._is_channels_last_contiguous_2d(sizes, strides) # type: ignore[attr-defined]
|
306 |
+
|
307 |
+
def is_channels_last_contiguous_3d(self, sizes, strides) -> "SymNode":
|
308 |
+
return self._is_channels_last_contiguous_3d(sizes, strides) # type: ignore[attr-defined]
|
309 |
+
|
310 |
+
def is_channels_last_strides_2d(self, sizes, strides) -> "SymNode":
|
311 |
+
return self._is_channels_last_strides_2d(sizes, strides) # type: ignore[attr-defined]
|
312 |
+
|
313 |
+
def is_channels_last_strides_3d(self, sizes, strides) -> "SymNode":
|
314 |
+
return self._is_channels_last_strides_3d(sizes, strides) # type: ignore[attr-defined]
|
315 |
+
|
316 |
+
def is_non_overlapping_and_dense_indicator(self, sizes, strides) -> "SymNode":
|
317 |
+
return self._is_non_overlapping_and_dense_indicator(sizes, strides) # type: ignore[attr-defined]
|
318 |
+
|
319 |
+
# Make C++ happy
|
320 |
+
def sym_or(self, other):
|
321 |
+
return self.or_(other)
|
322 |
+
|
323 |
+
def sym_and(self, other):
|
324 |
+
return self.and_(other)
|
325 |
+
|
326 |
+
def is_non_overlapping_and_dense(self, sizes, strides):
|
327 |
+
return self.is_non_overlapping_and_dense_indicator(sizes, strides).eq(to_node(self, 1)) # type: ignore[attr-defined]
|
328 |
+
|
329 |
+
def int_(self):
|
330 |
+
return self.guard_int("", 0) # NB: uses Python backtrace
|
331 |
+
|
332 |
+
# You can manually trigger a guard with this function
|
333 |
+
def guard_int(self, file, line):
|
334 |
+
# TODO: use the file/line for some useful diagnostic on why a
|
335 |
+
# guard occurred
|
336 |
+
r = self.shape_env.evaluate_expr(self.expr, self.hint, fx_node=self.fx_node)
|
337 |
+
try:
|
338 |
+
return int(r)
|
339 |
+
except Exception:
|
340 |
+
log.warning("Failed to convert to int: %s", r)
|
341 |
+
raise
|
342 |
+
|
343 |
+
def guard_float(self, file, line):
|
344 |
+
# TODO: use the file/line for some useful diagnostic on why a
|
345 |
+
# guard occurred
|
346 |
+
r = self.shape_env.evaluate_expr(
|
347 |
+
self.expr, self.hint, fx_node=self.fx_node, expect_rational=False
|
348 |
+
)
|
349 |
+
try:
|
350 |
+
return float(r)
|
351 |
+
except Exception:
|
352 |
+
log.warning("Failed to convert to float: %s", r)
|
353 |
+
raise
|
354 |
+
|
355 |
+
def guard_bool(self, file, line):
|
356 |
+
# TODO: use the file/line for some useful diagnostic on why a
|
357 |
+
# guard occurred
|
358 |
+
r = self.shape_env.evaluate_expr(self.expr, self.hint, fx_node=self.fx_node)
|
359 |
+
try:
|
360 |
+
return bool(r)
|
361 |
+
except Exception:
|
362 |
+
log.warning("Failed to convert to bool: %s", r)
|
363 |
+
raise
|
364 |
+
|
365 |
+
def expect_true(self, file, line):
|
366 |
+
if self.has_hint():
|
367 |
+
# OK to generate guards
|
368 |
+
return self.guard_bool(file, line)
|
369 |
+
# Generate a deferred runtime assert (this might actually end up doing
|
370 |
+
# a regular guard if we can!)
|
371 |
+
# TODO: file/line here is very important, because the assert has been
|
372 |
+
# deferred so you can't backtrace easily
|
373 |
+
return self.shape_env.defer_runtime_assert(
|
374 |
+
self.expr, f"{file}:{line}", fx_node=self.fx_node
|
375 |
+
)
|
376 |
+
|
377 |
+
def expect_size(self, file, line):
|
378 |
+
from torch.fx.experimental.symbolic_shapes import _advise_is_size
|
379 |
+
|
380 |
+
b = self.ge(self.wrap_int(0))
|
381 |
+
# Generate a deferred runtime assert
|
382 |
+
r = b.expect_true(file, line)
|
383 |
+
# Refine compile time range, but only if it's unbacked.
|
384 |
+
# If you refine range for hinted variables, you can end up making
|
385 |
+
# improper deductions since compile time reasoning may be
|
386 |
+
# incompatible with runtime reasoning.
|
387 |
+
if r and not self.has_hint():
|
388 |
+
_advise_is_size(SymInt(self))
|
389 |
+
return r
|
390 |
+
|
391 |
+
def bool_(self):
|
392 |
+
return self.guard_bool("", 0)
|
393 |
+
|
394 |
+
def is_symbolic(self):
|
395 |
+
return True
|
396 |
+
|
397 |
+
def singleton_int(self):
|
398 |
+
return None
|
399 |
+
|
400 |
+
def is_constant(self):
|
401 |
+
return False
|
402 |
+
|
403 |
+
|
404 |
+
# TODO: this probably needs the sizes-strides eval functions
|
405 |
+
METHOD_TO_OPERATOR = {
|
406 |
+
"abs": operator.abs,
|
407 |
+
"add": operator.add,
|
408 |
+
"and": operator.and_,
|
409 |
+
"ceil": math.ceil,
|
410 |
+
"eq": operator.eq,
|
411 |
+
"floor": math.floor,
|
412 |
+
"floordiv": operator.floordiv,
|
413 |
+
"ge": operator.ge,
|
414 |
+
"gt": operator.gt,
|
415 |
+
"le": operator.le,
|
416 |
+
"lshift": operator.lshift,
|
417 |
+
"lt": operator.lt,
|
418 |
+
"mod": operator.mod,
|
419 |
+
"mul": operator.mul,
|
420 |
+
"ne": operator.ne,
|
421 |
+
"neg": operator.neg,
|
422 |
+
"or": operator.or_,
|
423 |
+
"pow": operator.pow,
|
424 |
+
"rshift": operator.rshift,
|
425 |
+
"sub": operator.sub,
|
426 |
+
"sym_float": sym_float,
|
427 |
+
"sym_ite": sym_ite,
|
428 |
+
"sym_max": sym_max,
|
429 |
+
"sym_min": sym_min,
|
430 |
+
"sym_not": sym_not,
|
431 |
+
"sym_sqrt": sym_sqrt,
|
432 |
+
"truediv": operator.truediv,
|
433 |
+
}
|
434 |
+
|
435 |
+
unary_magic_methods = {
|
436 |
+
"abs",
|
437 |
+
"sym_float",
|
438 |
+
"ceil",
|
439 |
+
"floor",
|
440 |
+
"neg",
|
441 |
+
"sym_sqrt",
|
442 |
+
"sym_not",
|
443 |
+
}
|
444 |
+
|
445 |
+
# Most methods are only registered on SymInt and SymFloat
|
446 |
+
# Some methods are only be registered on SymBool
|
447 |
+
only_bool_magic_methods = {"and", "or", "sym_not", "sym_ite"}
|
448 |
+
# Methods that implicitly convert SymBool into SymInt
|
449 |
+
bool_becomes_int_magic_methods = {"add", "sub", "mul"}
|
450 |
+
# Methods that are also on SymBool, in addition to on SymInt and SymFloat
|
451 |
+
also_bool_magic_methods = {"eq"}
|
452 |
+
bool_magic_methods = only_bool_magic_methods | also_bool_magic_methods
|
453 |
+
|
454 |
+
|
455 |
+
magic_methods_on_operator_with_trailing_underscore = {"and", "or"}
|
456 |
+
|
457 |
+
|
458 |
+
always_float_magic_methods = {"truediv", "sym_float", "sym_sqrt", "pow"}
|
459 |
+
always_int_magic_methods = {"ceil", "floor"}
|
460 |
+
always_bool_magic_methods = {
|
461 |
+
"eq",
|
462 |
+
"ne",
|
463 |
+
"gt",
|
464 |
+
"lt",
|
465 |
+
"le",
|
466 |
+
"ge",
|
467 |
+
"and",
|
468 |
+
"or",
|
469 |
+
"sym_not",
|
470 |
+
"is_non_overlapping_and_dense",
|
471 |
+
}
|
472 |
+
|
473 |
+
# Methods that have a `__foo__` as well as `__rfoo__`
|
474 |
+
|
475 |
+
|
476 |
+
def _sympy_truediv(a, b):
|
477 |
+
from torch.utils._sympy.functions import TrueDiv
|
478 |
+
|
479 |
+
return TrueDiv(a, b)
|
480 |
+
|
481 |
+
|
482 |
+
def _sympy_floordiv(a, b):
|
483 |
+
from torch.utils._sympy.functions import FloorDiv
|
484 |
+
|
485 |
+
return FloorDiv(a, b)
|
486 |
+
|
487 |
+
|
488 |
+
def _sympy_mod(a, b):
|
489 |
+
from torch.utils._sympy.functions import Mod
|
490 |
+
|
491 |
+
return Mod(a, b)
|
492 |
+
|
493 |
+
|
494 |
+
def _sympy_pow(a, b):
|
495 |
+
from torch.utils._sympy.functions import Pow
|
496 |
+
|
497 |
+
return Pow(a, b)
|
498 |
+
|
499 |
+
|
500 |
+
def _sympy_and(a, b):
|
501 |
+
import sympy
|
502 |
+
|
503 |
+
return sympy.And(a, b)
|
504 |
+
|
505 |
+
|
506 |
+
def _sympy_or(a, b):
|
507 |
+
import sympy
|
508 |
+
|
509 |
+
return sympy.Or(a, b)
|
510 |
+
|
511 |
+
|
512 |
+
def _sympy_lshift(a, b):
|
513 |
+
from torch.utils._sympy.functions import LShift
|
514 |
+
|
515 |
+
return LShift(a, b)
|
516 |
+
|
517 |
+
|
518 |
+
def _sympy_rshift(a, b):
|
519 |
+
from torch.utils._sympy.functions import RShift
|
520 |
+
|
521 |
+
return RShift(a, b)
|
522 |
+
|
523 |
+
|
524 |
+
reflectable_magic_methods = {
|
525 |
+
"add": lambda a, b: a + b,
|
526 |
+
"sub": lambda a, b: a - b,
|
527 |
+
"mul": lambda a, b: a * b,
|
528 |
+
"mod": _sympy_mod,
|
529 |
+
"pow": _sympy_pow,
|
530 |
+
"and": _sympy_and,
|
531 |
+
"or": _sympy_or,
|
532 |
+
"truediv": _sympy_truediv,
|
533 |
+
"floordiv": _sympy_floordiv,
|
534 |
+
"lshift": _sympy_lshift,
|
535 |
+
"rshift": _sympy_rshift,
|
536 |
+
}
|
537 |
+
|
538 |
+
|
539 |
+
def _floor_ceil_helper(a, fn):
|
540 |
+
import sympy
|
541 |
+
|
542 |
+
if isinstance(a, sympy.Mul):
|
543 |
+
aa = a.args
|
544 |
+
if len(aa) == 2 and isinstance(aa[0], sympy.Float) and aa[1].is_integer:
|
545 |
+
coef = sympy.Integer(aa[0])
|
546 |
+
if aa[0] == coef: # structural equality test
|
547 |
+
return coef * aa[1]
|
548 |
+
if (
|
549 |
+
isinstance(a, sympy.Float)
|
550 |
+
and a == sympy.Integer(a)
|
551 |
+
or isinstance(a, sympy.Integer)
|
552 |
+
):
|
553 |
+
return sympy.Integer(a)
|
554 |
+
return fn(a)
|
555 |
+
|
556 |
+
|
557 |
+
def _sympy_floor(a):
|
558 |
+
import sympy
|
559 |
+
|
560 |
+
return _floor_ceil_helper(a, sympy.floor)
|
561 |
+
|
562 |
+
|
563 |
+
def _sympy_ceil(a):
|
564 |
+
import sympy
|
565 |
+
|
566 |
+
return _floor_ceil_helper(a, sympy.ceiling)
|
567 |
+
|
568 |
+
|
569 |
+
def _sympy_eq(a, b):
|
570 |
+
import sympy
|
571 |
+
|
572 |
+
return sympy.Eq(a, b)
|
573 |
+
|
574 |
+
|
575 |
+
def _sympy_ne(a, b):
|
576 |
+
import sympy
|
577 |
+
|
578 |
+
return sympy.Ne(a, b)
|
579 |
+
|
580 |
+
|
581 |
+
def _sympy_gt(a, b):
|
582 |
+
import sympy
|
583 |
+
|
584 |
+
return sympy.Gt(a, b)
|
585 |
+
|
586 |
+
|
587 |
+
def _sympy_lt(a, b):
|
588 |
+
import sympy
|
589 |
+
|
590 |
+
return sympy.Lt(a, b)
|
591 |
+
|
592 |
+
|
593 |
+
def _sympy_le(a, b):
|
594 |
+
import sympy
|
595 |
+
|
596 |
+
return sympy.Le(a, b)
|
597 |
+
|
598 |
+
|
599 |
+
def _sympy_ge(a, b):
|
600 |
+
import sympy
|
601 |
+
|
602 |
+
return sympy.Ge(a, b)
|
603 |
+
|
604 |
+
|
605 |
+
def _sympy_min(a, b):
|
606 |
+
import sympy
|
607 |
+
|
608 |
+
return sympy.Min(a, b)
|
609 |
+
|
610 |
+
|
611 |
+
def _sympy_max(a, b):
|
612 |
+
import sympy
|
613 |
+
|
614 |
+
return sympy.Max(a, b)
|
615 |
+
|
616 |
+
|
617 |
+
def _sympy_ite(a, t, f):
|
618 |
+
import sympy
|
619 |
+
|
620 |
+
return sympy.Piecewise((t, a), (f, True))
|
621 |
+
|
622 |
+
|
623 |
+
def _sympy_sqrt(a):
|
624 |
+
import sympy
|
625 |
+
|
626 |
+
return sympy.sqrt(a)
|
627 |
+
|
628 |
+
|
629 |
+
def _sympy_abs(a):
|
630 |
+
import sympy
|
631 |
+
|
632 |
+
return sympy.Abs(a)
|
633 |
+
|
634 |
+
|
635 |
+
def _sympy_sym_float(a):
|
636 |
+
# Cannot use sympy.Float(a) here, coz it expects python literals
|
637 |
+
# Multiply by 1.0 to cast to float. This is needed when the input
|
638 |
+
# is a SymInt which has the assumption that it is integer and
|
639 |
+
# SymPy will otherwise assume that return value cannot be a float.
|
640 |
+
return a * 1.0
|
641 |
+
|
642 |
+
|
643 |
+
magic_methods = {
|
644 |
+
**reflectable_magic_methods,
|
645 |
+
"sym_not": lambda a: ~a,
|
646 |
+
"eq": _sympy_eq,
|
647 |
+
"ne": _sympy_ne,
|
648 |
+
"gt": _sympy_gt,
|
649 |
+
"lt": _sympy_lt,
|
650 |
+
"le": _sympy_le,
|
651 |
+
"ge": _sympy_ge,
|
652 |
+
"floor": _sympy_floor,
|
653 |
+
"sym_float": _sympy_sym_float,
|
654 |
+
"ceil": _sympy_ceil,
|
655 |
+
"neg": lambda a: -a,
|
656 |
+
"sym_min": _sympy_min,
|
657 |
+
"sym_max": _sympy_max,
|
658 |
+
"sym_ite": _sympy_ite,
|
659 |
+
"sym_sqrt": _sympy_sqrt,
|
660 |
+
"abs": _sympy_abs,
|
661 |
+
}
|
662 |
+
|
663 |
+
|
664 |
+
def sympy_is_contiguous(sizes, strides):
|
665 |
+
dim = len(sizes)
|
666 |
+
return sympy_is_contiguous_generic(sizes, strides, list(range(dim - 1, -1, -1)))
|
667 |
+
|
668 |
+
|
669 |
+
def sympy_is_contiguous_generic(sizes, strides, dim_order):
|
670 |
+
import sympy
|
671 |
+
|
672 |
+
dim = len(sizes)
|
673 |
+
|
674 |
+
if len(dim_order) != dim:
|
675 |
+
return sympy.false
|
676 |
+
|
677 |
+
is_contiguous = sympy.true
|
678 |
+
z = sympy.Integer(1)
|
679 |
+
# Contiguous if the strides make sense (or the dim is size 1)
|
680 |
+
for d in dim_order:
|
681 |
+
is_contiguous &= sympy.Eq(sizes[d], sympy.Integer(1)) | sympy.Eq(strides[d], z)
|
682 |
+
z *= sizes[d]
|
683 |
+
# OR if any size is zero
|
684 |
+
for d in range(dim):
|
685 |
+
is_contiguous |= sympy.Eq(sizes[d], sympy.Integer(0))
|
686 |
+
return is_contiguous
|
687 |
+
|
688 |
+
|
689 |
+
# NB: There is a TODO in C++ to allow omitting the batch dim. If that
|
690 |
+
# happens you will need to refactor this
|
691 |
+
|
692 |
+
|
693 |
+
def sympy_is_channels_last_contiguous_2d(sizes, strides):
|
694 |
+
return sympy_is_contiguous_generic(sizes, strides, [1, 3, 2, 0])
|
695 |
+
|
696 |
+
|
697 |
+
def sympy_is_channels_last_contiguous_3d(sizes, strides):
|
698 |
+
return sympy_is_contiguous_generic(sizes, strides, [1, 4, 3, 2, 0])
|
699 |
+
|
700 |
+
|
701 |
+
def sympy_is_channels_last_strides_generic(sizes, strides, dim_order):
|
702 |
+
import sympy
|
703 |
+
|
704 |
+
dim = len(sizes)
|
705 |
+
|
706 |
+
if dim != len(dim_order):
|
707 |
+
return sympy.false
|
708 |
+
|
709 |
+
m = sympy.Integer(0)
|
710 |
+
r = sympy.true
|
711 |
+
|
712 |
+
# special case for trivial C dimension. default to NCHW
|
713 |
+
r &= sympy.Ne(strides[1], 0)
|
714 |
+
|
715 |
+
for d in dim_order:
|
716 |
+
r &= sympy.Ne(sizes[d], 0) & (strides[d] >= m)
|
717 |
+
# Fallback to NCHW as default layout for ambiguous cases
|
718 |
+
# This is the flaw of implicit memory_format from strides.
|
719 |
+
# N111 tensor with identical strides for size 1 dimension;
|
720 |
+
# Two cases could lead us here:
|
721 |
+
# a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
|
722 |
+
# b. N11W contiguous Tensor sliced on the W-dimension.
|
723 |
+
# ([N,1,1,1]@[W,W,W,W])
|
724 |
+
if d == 0:
|
725 |
+
r &= sympy.Ne(m, strides[1])
|
726 |
+
# This is necessary to:
|
727 |
+
# 1. distinguish the memory_format of N1H1;
|
728 |
+
# [H, 1, 1, 1] channels_last stride
|
729 |
+
# [H, H, 1, 1] contiguous stride
|
730 |
+
# 2. permutation of 1C1W:
|
731 |
+
# [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
|
732 |
+
# [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as
|
733 |
+
# channels_last
|
734 |
+
m = strides[d] * sympy.Max(sizes[d], 1)
|
735 |
+
|
736 |
+
return r
|
737 |
+
|
738 |
+
|
739 |
+
def sympy_is_channels_last_strides_2d(sizes, strides):
|
740 |
+
return sympy_is_channels_last_strides_generic(sizes, strides, [1, 3, 2, 0])
|
741 |
+
|
742 |
+
|
743 |
+
def sympy_is_channels_last_strides_3d(sizes, strides):
|
744 |
+
return sympy_is_channels_last_strides_generic(sizes, strides, [1, 4, 3, 2, 0])
|
745 |
+
|
746 |
+
|
747 |
+
def _sympy_is_non_overlapping_and_dense_indicator(sizes, strides):
|
748 |
+
from torch.utils._sympy.functions import IsNonOverlappingAndDenseIndicator
|
749 |
+
|
750 |
+
return IsNonOverlappingAndDenseIndicator(*sizes, *strides)
|
751 |
+
|
752 |
+
|
753 |
+
sizes_strides_methods = {
|
754 |
+
# TODO: These could also be done with indicators, maybe it is better
|
755 |
+
# for reasoning to do it that way
|
756 |
+
"is_contiguous": sympy_is_contiguous,
|
757 |
+
"is_channels_last_contiguous_2d": sympy_is_channels_last_contiguous_2d,
|
758 |
+
"is_channels_last_contiguous_3d": sympy_is_channels_last_contiguous_3d,
|
759 |
+
"is_channels_last_strides_2d": sympy_is_channels_last_strides_2d,
|
760 |
+
"is_channels_last_strides_3d": sympy_is_channels_last_strides_3d,
|
761 |
+
"is_non_overlapping_and_dense_indicator": _sympy_is_non_overlapping_and_dense_indicator,
|
762 |
+
}
|
763 |
+
|
764 |
+
alternate_impl_if_hinted_methods = {
|
765 |
+
"sym_min": builtins.min,
|
766 |
+
"sym_max": builtins.max,
|
767 |
+
}
|
768 |
+
|
769 |
+
|
770 |
+
def to_node(self, num):
|
771 |
+
if isinstance(num, SymTypes):
|
772 |
+
return num.node
|
773 |
+
elif type(num) is bool:
|
774 |
+
return self.wrap_bool(num)
|
775 |
+
elif type(num) is int:
|
776 |
+
return self.wrap_int(num)
|
777 |
+
elif type(num) is float:
|
778 |
+
return self.wrap_float(num)
|
779 |
+
else:
|
780 |
+
# NotImplemented is important so that Python tries the
|
781 |
+
# other magic method
|
782 |
+
return NotImplemented
|
783 |
+
|
784 |
+
|
785 |
+
def wrap_node(x):
|
786 |
+
# TODO: let C++ also take advantage of this
|
787 |
+
if isinstance(x, SymNode) and x.constant is not None:
|
788 |
+
return x.constant
|
789 |
+
if x.is_int():
|
790 |
+
return SymInt(x)
|
791 |
+
elif x.is_float():
|
792 |
+
return SymFloat(x)
|
793 |
+
elif x.is_bool():
|
794 |
+
return SymBool(x)
|
795 |
+
else:
|
796 |
+
raise AssertionError(f"unrecognized return type {x}")
|
797 |
+
|
798 |
+
|
799 |
+
def method_to_operator(method):
|
800 |
+
return METHOD_TO_OPERATOR[method]
|
801 |
+
|
802 |
+
|
803 |
+
def _make_node_magic(method, func):
|
804 |
+
func = lru_cache(256)(func)
|
805 |
+
|
806 |
+
if method in magic_methods_on_operator_with_trailing_underscore:
|
807 |
+
method_attr = f"{method}_"
|
808 |
+
else:
|
809 |
+
method_attr = method
|
810 |
+
|
811 |
+
def binary_magic_impl(self, other):
|
812 |
+
from torch.fx.experimental.symbolic_shapes import safe_expand
|
813 |
+
|
814 |
+
op = method_to_operator(method)
|
815 |
+
|
816 |
+
out_hint = None
|
817 |
+
if self.hint is not None and other.hint is not None:
|
818 |
+
out_hint = op(self.hint, other.hint)
|
819 |
+
|
820 |
+
alternate_impl = alternate_impl_if_hinted_methods.get(method)
|
821 |
+
if alternate_impl and out_hint is not None:
|
822 |
+
return to_node(self, alternate_impl(wrap_node(self), wrap_node(other)))
|
823 |
+
|
824 |
+
if sym_function_mode():
|
825 |
+
return to_node(
|
826 |
+
self, handle_sym_dispatch(op, (wrap_node(self), wrap_node(other)), {})
|
827 |
+
)
|
828 |
+
assert isinstance(other, SymNode)
|
829 |
+
# TODO: consider constant prop here
|
830 |
+
try:
|
831 |
+
out = func(self.expr, other.expr)
|
832 |
+
except Exception:
|
833 |
+
log.warning("failed to eval %s(%s, %s)", method, self.expr, other.expr)
|
834 |
+
raise
|
835 |
+
out = safe_expand(out)
|
836 |
+
pytype: Type
|
837 |
+
# This is not strictly correct. In Python, a**b may return complex when
|
838 |
+
# a < 0 and b is a float: (-1)**2.1. Same for sympy.sqrt(-3.14). This
|
839 |
+
# returns a float while both arguments are ints: 2**(-1). Also, max and
|
840 |
+
# min do not type promote. To avoid having data-dependent control flow
|
841 |
+
# here, we just set the type to float if one of the args is a float. In
|
842 |
+
# case of a type mismatch, we assume that it will be detected during
|
843 |
+
# evaluation.
|
844 |
+
if method in always_float_magic_methods:
|
845 |
+
pytype = float
|
846 |
+
elif method in always_bool_magic_methods:
|
847 |
+
pytype = bool
|
848 |
+
elif self.pytype is float or other.pytype is float:
|
849 |
+
pytype = float
|
850 |
+
else:
|
851 |
+
pytype = self.pytype
|
852 |
+
|
853 |
+
if (
|
854 |
+
pytype is not None
|
855 |
+
and out_hint is not None
|
856 |
+
and not isinstance(out_hint, SymTypes)
|
857 |
+
):
|
858 |
+
out_hint = pytype(out_hint)
|
859 |
+
|
860 |
+
# Create a FX node that corresponds to the operation being applied to
|
861 |
+
# this node.
|
862 |
+
fx_node, _ = self.shape_env.create_fx_call_function(
|
863 |
+
op, (self.fx_node, other.fx_node)
|
864 |
+
)
|
865 |
+
return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node)
|
866 |
+
|
867 |
+
def unary_magic_impl(self):
|
868 |
+
from torch.fx.experimental.symbolic_shapes import safe_expand
|
869 |
+
|
870 |
+
op = method_to_operator(method)
|
871 |
+
if sym_function_mode():
|
872 |
+
return to_node(self, handle_sym_dispatch(op, (wrap_node(self),), {}))
|
873 |
+
# TODO: consider constant prop here
|
874 |
+
expr = self.expr
|
875 |
+
if method == "floor" or method == "ceiling":
|
876 |
+
expr = self.shape_env._simplify_floor_div(expr)
|
877 |
+
|
878 |
+
try:
|
879 |
+
out = func(expr)
|
880 |
+
except Exception:
|
881 |
+
log.warning("failed to eval %s(%s)", method, expr)
|
882 |
+
raise
|
883 |
+
|
884 |
+
out_hint = None
|
885 |
+
if self.hint is not None:
|
886 |
+
out_hint = op(self.hint)
|
887 |
+
out = safe_expand(out)
|
888 |
+
pytype: Type
|
889 |
+
if method in always_int_magic_methods:
|
890 |
+
pytype = int
|
891 |
+
elif method in always_float_magic_methods:
|
892 |
+
pytype = float
|
893 |
+
else:
|
894 |
+
pytype = self.pytype
|
895 |
+
|
896 |
+
fx_node, _ = self.shape_env.create_fx_call_function(op, (self.fx_node,))
|
897 |
+
return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node)
|
898 |
+
|
899 |
+
if method in unary_magic_methods:
|
900 |
+
setattr(SymNode, f"_{method_attr}", unary_magic_impl)
|
901 |
+
elif method == "sym_ite":
|
902 |
+
|
903 |
+
def sym_ite_impl(pred_node, then_node, else_node):
|
904 |
+
from torch.fx.experimental.symbolic_shapes import safe_expand
|
905 |
+
|
906 |
+
out_hint = then_node.hint if pred_node.hint else else_node.hint
|
907 |
+
if sym_function_mode():
|
908 |
+
return to_node(
|
909 |
+
pred_node,
|
910 |
+
handle_sym_dispatch(
|
911 |
+
sym_ite,
|
912 |
+
(
|
913 |
+
wrap_node(pred_node),
|
914 |
+
wrap_node(then_node),
|
915 |
+
wrap_node(else_node),
|
916 |
+
),
|
917 |
+
{},
|
918 |
+
),
|
919 |
+
)
|
920 |
+
|
921 |
+
try:
|
922 |
+
out = func(pred_node.expr, then_node.expr, else_node.expr)
|
923 |
+
except Exception:
|
924 |
+
log.warning(
|
925 |
+
"failed to eval %s(%s, %s, %s)",
|
926 |
+
method,
|
927 |
+
pred_node.expr,
|
928 |
+
then_node.expr,
|
929 |
+
else_node.expr,
|
930 |
+
)
|
931 |
+
raise
|
932 |
+
|
933 |
+
out = safe_expand(out)
|
934 |
+
fx_node, _ = pred_node.shape_env.create_fx_call_function(
|
935 |
+
sym_ite, (pred_node.fx_node, then_node.fx_node, else_node.fx_node)
|
936 |
+
)
|
937 |
+
return SymNode(
|
938 |
+
out, pred_node.shape_env, then_node.pytype, out_hint, fx_node=fx_node
|
939 |
+
)
|
940 |
+
|
941 |
+
setattr(SymNode, f"_{method_attr}", sym_ite_impl)
|
942 |
+
else:
|
943 |
+
setattr(SymNode, f"_{method_attr}", binary_magic_impl)
|
944 |
+
|
945 |
+
|
946 |
+
def _make_node_sizes_strides(method, func):
|
947 |
+
# NB: don't LRU cache, lots of arguments
|
948 |
+
|
949 |
+
def sizes_strides_impl(self, sizes, strides):
|
950 |
+
op = getattr(sys.modules[__name__], method)
|
951 |
+
if sym_function_mode():
|
952 |
+
return to_node(
|
953 |
+
self,
|
954 |
+
handle_sym_dispatch(
|
955 |
+
op,
|
956 |
+
([wrap_node(s) for s in sizes], [wrap_node(s) for s in strides]),
|
957 |
+
{},
|
958 |
+
),
|
959 |
+
)
|
960 |
+
size_exprs = [s.expr for s in sizes]
|
961 |
+
stride_exprs = [s.expr for s in strides]
|
962 |
+
try:
|
963 |
+
out = func(size_exprs, stride_exprs)
|
964 |
+
except Exception:
|
965 |
+
log.warning("failed to eval %s(%s, %s)", method, size_exprs, stride_exprs)
|
966 |
+
raise
|
967 |
+
# bool is never expandable
|
968 |
+
|
969 |
+
size_hints = []
|
970 |
+
out_hint = None
|
971 |
+
for s in sizes:
|
972 |
+
if s.hint is None:
|
973 |
+
break
|
974 |
+
size_hints.append(s.hint)
|
975 |
+
else:
|
976 |
+
stride_hints = []
|
977 |
+
for s in strides:
|
978 |
+
if s.hint is None:
|
979 |
+
break
|
980 |
+
stride_hints.append(s.hint)
|
981 |
+
else:
|
982 |
+
out_hint = op(size_hints, stride_hints)
|
983 |
+
|
984 |
+
# NB: This is the indicator function, not the actual bool!
|
985 |
+
pytype: Type
|
986 |
+
if method.endswith("_indicator"):
|
987 |
+
pytype = int
|
988 |
+
else:
|
989 |
+
pytype = bool
|
990 |
+
return SymNode(out, self.shape_env, pytype, out_hint)
|
991 |
+
|
992 |
+
setattr(SymNode, f"_{method}", sizes_strides_impl)
|
993 |
+
|
994 |
+
# TODO: This is technically hotpath, but in the ideal end state
|
995 |
+
# guards on this will resolve at a higher level so you never
|
996 |
+
# spend time in this code
|
997 |
+
def sizes_strides_user(sizes, strides):
|
998 |
+
import sympy
|
999 |
+
|
1000 |
+
from torch.fx.experimental.symbolic_shapes import (
|
1001 |
+
eval_is_non_overlapping_and_dense,
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
for a in itertools.chain(sizes, strides):
|
1005 |
+
if isinstance(a, SymInt):
|
1006 |
+
return wrap_node(
|
1007 |
+
getattr(a.node, method)(
|
1008 |
+
[to_node(a.node, b) for b in sizes],
|
1009 |
+
[to_node(a.node, b) for b in strides],
|
1010 |
+
)
|
1011 |
+
)
|
1012 |
+
if method == "is_non_overlapping_and_dense_indicator":
|
1013 |
+
return eval_is_non_overlapping_and_dense(sizes, strides)
|
1014 |
+
else:
|
1015 |
+
# TODO: this is an awful implementation
|
1016 |
+
return bool(
|
1017 |
+
func(
|
1018 |
+
[sympy.sympify(a) for a in sizes],
|
1019 |
+
[sympy.sympify(a) for a in strides],
|
1020 |
+
)
|
1021 |
+
)
|
1022 |
+
|
1023 |
+
# Skip for is_non_overlapping_and_dense_indicator
|
1024 |
+
if not hasattr(sys.modules[__name__], method):
|
1025 |
+
setattr(sys.modules[__name__], method, sizes_strides_user)
|
1026 |
+
|
1027 |
+
|
1028 |
+
for method, func in magic_methods.items():
|
1029 |
+
_make_node_magic(method, func)
|
1030 |
+
|
1031 |
+
for method, func in sizes_strides_methods.items():
|
1032 |
+
_make_node_sizes_strides(method, func)
|
1033 |
+
|
1034 |
+
|
1035 |
+
def _make_user_magic(method, user_type):
|
1036 |
+
# User magic takes care of wrapping the other operand into a node,
|
1037 |
+
# so that our internal logic can assume everything is nodes
|
1038 |
+
|
1039 |
+
if method in magic_methods_on_operator_with_trailing_underscore:
|
1040 |
+
method_attr = f"{method}_"
|
1041 |
+
else:
|
1042 |
+
method_attr = method
|
1043 |
+
|
1044 |
+
def get_constant(x: Union[SymInt, int, SymFloat, float, SymBool, bool]):
|
1045 |
+
if isinstance(x, (int, float, bool)):
|
1046 |
+
return x
|
1047 |
+
if isinstance(x, SymBool):
|
1048 |
+
return x.node.guard_bool("", 0)
|
1049 |
+
raise AssertionError("expect to be called with constant SymBools")
|
1050 |
+
|
1051 |
+
def is_constant(x):
|
1052 |
+
if isinstance(x, (int, float, bool)):
|
1053 |
+
return True
|
1054 |
+
if isinstance(x, (SymInt, SymFloat, SymBool)):
|
1055 |
+
return x.node.is_constant()
|
1056 |
+
return False
|
1057 |
+
|
1058 |
+
if method in bool_becomes_int_magic_methods:
|
1059 |
+
|
1060 |
+
def promote(x):
|
1061 |
+
"""Implements True+True=2, which works in python but not sympy"""
|
1062 |
+
if isinstance(x, SymBool):
|
1063 |
+
return SymInt(x.node.wrap_int(int(x)))
|
1064 |
+
return x
|
1065 |
+
|
1066 |
+
else:
|
1067 |
+
|
1068 |
+
def promote(x):
|
1069 |
+
return x
|
1070 |
+
|
1071 |
+
# Before and after performing the operation, check if any operands are constant.
|
1072 |
+
# If so, extract out the constant values first. If `self` itself is a
|
1073 |
+
# constant, then "redispatch" by calling back into the operator. Sometimes
|
1074 |
+
# this means that operations involving SymBool return plain bools.
|
1075 |
+
# Alternatively, we could also rewrap into constant Symbool (i.e. by
|
1076 |
+
# implementing wrap_bool in ConstantSymNodeImpl), but we're not doing that
|
1077 |
+
# today for no particular reason.
|
1078 |
+
def unary_magic_impl(self):
|
1079 |
+
self = promote(self)
|
1080 |
+
if is_constant(self):
|
1081 |
+
return (method_to_operator(method))(get_constant(self))
|
1082 |
+
return wrap_node(getattr(self.node, method_attr)())
|
1083 |
+
|
1084 |
+
def binary_magic_impl(self, other):
|
1085 |
+
self = promote(self)
|
1086 |
+
other = promote(other)
|
1087 |
+
if is_constant(self):
|
1088 |
+
return (method_to_operator(method))(get_constant(self), other)
|
1089 |
+
if is_constant(other):
|
1090 |
+
other = get_constant(other)
|
1091 |
+
other_node = to_node(self.node, other)
|
1092 |
+
if other_node is NotImplemented:
|
1093 |
+
return NotImplemented
|
1094 |
+
ret = wrap_node(getattr(self.node, method_attr)(other_node))
|
1095 |
+
return get_constant(ret) if is_constant(ret) else ret
|
1096 |
+
|
1097 |
+
def rbinary_magic_impl(self, other):
|
1098 |
+
self = promote(self)
|
1099 |
+
other = promote(other)
|
1100 |
+
if is_constant(self):
|
1101 |
+
return (method_to_operator(method))(get_constant(self), other)
|
1102 |
+
if is_constant(other):
|
1103 |
+
other = get_constant(other)
|
1104 |
+
other_node = to_node(self.node, other)
|
1105 |
+
if other_node is NotImplemented:
|
1106 |
+
return NotImplemented
|
1107 |
+
ret = wrap_node(getattr(other_node, method_attr)(self.node))
|
1108 |
+
return get_constant(ret) if is_constant(ret) else ret
|
1109 |
+
|
1110 |
+
if method in unary_magic_methods:
|
1111 |
+
setattr(user_type, f"__{method}__", unary_magic_impl)
|
1112 |
+
elif method == "sym_ite":
|
1113 |
+
|
1114 |
+
def sym_ite_magic_impl(pred, then_val, else_val):
|
1115 |
+
pred_node = pred.node
|
1116 |
+
then_node = to_node(pred_node, then_val)
|
1117 |
+
else_node = to_node(pred_node, else_val)
|
1118 |
+
if then_node is NotImplemented or else_node is NotImplemented:
|
1119 |
+
return NotImplemented
|
1120 |
+
assert (
|
1121 |
+
isinstance(then_node, SymNode)
|
1122 |
+
and isinstance(else_node, SymNode)
|
1123 |
+
and then_node.pytype == else_node.pytype
|
1124 |
+
)
|
1125 |
+
ret = wrap_node(getattr(pred.node, method_attr)(then_node, else_node))
|
1126 |
+
return get_constant(ret) if ret.node.is_constant() else ret
|
1127 |
+
|
1128 |
+
setattr(user_type, f"__{method}__", sym_ite_magic_impl)
|
1129 |
+
else:
|
1130 |
+
setattr(user_type, f"__{method}__", binary_magic_impl)
|
1131 |
+
if method in reflectable_magic_methods:
|
1132 |
+
setattr(user_type, f"__r{method}__", rbinary_magic_impl)
|
1133 |
+
|
1134 |
+
|
1135 |
+
for method, func in magic_methods.items(): # type: ignore[assignment]
|
1136 |
+
if method in only_bool_magic_methods:
|
1137 |
+
_make_user_magic(method, SymBool)
|
1138 |
+
continue
|
1139 |
+
if method in also_bool_magic_methods or method in bool_becomes_int_magic_methods:
|
1140 |
+
_make_user_magic(method, SymBool)
|
1141 |
+
_make_user_magic(method, SymInt)
|
1142 |
+
_make_user_magic(method, SymFloat)
|
1143 |
+
|
1144 |
+
del method
|
1145 |
+
del func
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/symbolic_shapes.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: disable-error-code=attr-defined
|
2 |
+
from .core import unify, reify # noqa: F403
|
3 |
+
from .more import unifiable # noqa: F403
|
4 |
+
from .variable import var, isvar, vars, variables, Var # noqa: F403
|