applied-ai-018 commited on
Commit
25d27a1
·
verified ·
1 Parent(s): facc6c1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/exp_avg.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_backward_state.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/fx/experimental/_config.py +76 -0
  36. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py +0 -0
  37. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py +557 -0
  46. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py +1279 -0
  47. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py +1040 -0
  48. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py +14 -0
  49. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py +348 -0
  50. venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py +52 -0
ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dab19f81a96390c80aad7c00a62f881c820d2b0e7f8cdf3b65a10fc354c1a4b
3
+ size 33555612
ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb4b562042d46e92045107acdb60e2f801966785f94d775dc5b323be07f6efc1
3
+ size 33555627
ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4083b9e344764589ad5ff7e0c7bce6d645a8862c5bd7fa8bc19b900128638b3
3
+ size 9387
ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1fdce3bc60013d6d1d96195115a6ab7160d1b6cbfa42082f59b521708decf46
3
+ size 9293
ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:debbfd0ebbeefda585b5c4d8364a4a4553a5cec6eb0afee6cae4d22a8952fc48
3
+ size 50332828
venv/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc ADDED
Binary file (3.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_backward_state.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc ADDED
Binary file (9.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc ADDED
Binary file (8.34 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc ADDED
Binary file (9.27 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc ADDED
Binary file (932 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc ADDED
Binary file (30.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc ADDED
Binary file (112 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/_config.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ from typing import Optional
5
+
6
+ # [@compile_ignored: debug] Uses z3 for validating the guard optimizations transformations.
7
+ translation_validation = (
8
+ os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION", "0") == "1"
9
+ )
10
+ # Timeout (in milliseconds) for z3 finding a solution.
11
+ # [@compile_ignored: debug]
12
+ translation_validation_timeout = int(
13
+ os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION_TIMEOUT", "600000")
14
+ )
15
+ # Disables bisection for translation validation.
16
+ #
17
+ # Translation validation bisection is enabled by default, if translation validation
18
+ # is also enabled. This should help finding guard simplification issues. However,
19
+ # since validation uses Z3 for bisecting, it might take a lot of time.
20
+ #
21
+ # Set this configuration option so as to avoid bisecting.
22
+ # [@compile_ignored: debug]
23
+ translation_validation_no_bisect = (
24
+ os.environ.get("TORCHDYNAMO_TRANSLATION_NO_BISECT", "0") == "1"
25
+ )
26
+ # Checks whether replaying ShapeEnv events on a freshly constructed one yields
27
+ # the a ShapeEnv with the same state. This should be used only in testing.
28
+ check_shape_env_recorded_events = False
29
+
30
+ # TODO: Perhaps consider allowing unions for the configs below (so you can hit
31
+ # multiple reps at the same time)
32
+
33
+ # Give extended debug information if the string representation of a guard
34
+ # matches this. For example, set this to "Ne(s0, 10)" and whenever we issue
35
+ # this guard, we will generate full Python and C++ backtrace
36
+ # [@compile_ignored: debug]
37
+ extended_debug_guard_added = os.environ.get(
38
+ "TORCHDYNAMO_EXTENDED_DEBUG_GUARD_ADDED", None
39
+ )
40
+
41
+ # Give extended debug information when a particular symbol is allocated. For
42
+ # example, set this to "u2" and whenever we create this symbol, we will
43
+ # generate full Python and C++ backtrace
44
+ # [@compile_ignored: debug]
45
+ extended_debug_create_symbol = os.environ.get(
46
+ "TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL", None
47
+ )
48
+
49
+ # Give extended debug information (C++ backtrace) for all extended debug
50
+ # settings as well as errors. The C++ backtrace is slow and very spammy so we
51
+ # don't include it by default even when you're requesting extended debug.
52
+ # [@compile_ignored: debug]
53
+ extended_debug_cpp = os.environ.get("TORCHDYNAMO_EXTENDED_DEBUG_CPP", "") != ""
54
+
55
+ # [@compile_ignored: debug] Show a warning for every specialization
56
+ print_specializations = False
57
+
58
+ # wraps (un)equalities with 'Not' class after recording the correct expression
59
+ # in the FX graph. This should incorrectly construct the divisible and replacement
60
+ # lists, and incorrectly issue guards.
61
+ inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY = False
62
+
63
+ # [@compile_ignored: debug] Validate that ShapeEnv's version key is updated correctly
64
+ validate_shape_env_version_key = False
65
+
66
+ # If we produce more than this many guards on a symbol, force the symbol to
67
+ # get specialized and bail out if this many guards mention this particular
68
+ # symbol. This may be slightly more aggressive than the true number of guards
69
+ # issued (as we test if we've hit the limit on-the-fly, whereas we may
70
+ # do further simplifications at final guard issuance time that make guards
71
+ # irrelevant.)
72
+ symbol_guard_limit_before_specialize: Optional[int] = None
73
+
74
+ from torch.utils._config_module import install_config_module
75
+
76
+ install_config_module(sys.modules[__name__])
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (211 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc ADDED
Binary file (478 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc ADDED
Binary file (8.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc ADDED
Binary file (718 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
2
+ op_mod, op_gt, op_lt, op_neq, op_eq
3
+ from torch.fx.tensor_type import TensorType, Dyn
4
+
5
+
6
+ class Constraint:
7
+ pass
8
+
9
+
10
+ class Conj(Constraint):
11
+ def __init__(self, conjuncts):
12
+ """
13
+ :param conjuncts: Conjunction of constraints
14
+ """
15
+ self.conjucts = conjuncts
16
+
17
+ def __eq__(self, other):
18
+ if isinstance(other, Conj):
19
+ return self.conjucts == other.conjucts and self.conjucts == other.conjucts
20
+ else:
21
+ return False
22
+
23
+ def __repr__(self):
24
+ return f'And({self.conjucts})'
25
+
26
+
27
+ class Disj(Constraint):
28
+ def __init__(self, disjuncts):
29
+ """
30
+ :param disjuncts: Disjunction of constraints
31
+ """
32
+ self.disjuncts = disjuncts
33
+
34
+ def __eq__(self, other):
35
+ if isinstance(other, Disj):
36
+ return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
37
+ else:
38
+ return False
39
+
40
+ def __repr__(self):
41
+ return f'Or({self.disjuncts})'
42
+
43
+
44
+ class Prod(Constraint):
45
+ def __init__(self, products):
46
+ """
47
+ :param products: lists of dimensions to multiply
48
+ """
49
+ self.products = products
50
+
51
+ def __eq__(self, other):
52
+ if isinstance(other, Prod):
53
+ return self.products == other.products and self.products == other.products
54
+ else:
55
+ return False
56
+
57
+ def __repr__(self):
58
+ return f'Product({self.products})'
59
+
60
+
61
+ class T(Constraint):
62
+ """
63
+ True
64
+ """
65
+ def __init__(self):
66
+ pass
67
+
68
+ def __eq__(self, other):
69
+ return isinstance(other, T)
70
+
71
+ def __repr__(self):
72
+ return 'True'
73
+
74
+ class F(Constraint):
75
+ """
76
+ False
77
+ """
78
+ def __init__(self):
79
+ pass
80
+
81
+ def __eq__(self, other):
82
+ return isinstance(other, F)
83
+
84
+ def __repr__(self):
85
+ return 'False'
86
+
87
+
88
+ class BinaryConstraint(Constraint):
89
+ """
90
+ Represents all binary operations
91
+ """
92
+ def __init__(self, lhs, rhs, op):
93
+ """
94
+ :param lhs: lhs of the constraint
95
+ :param rhs: rhs of the constraint
96
+ :param op: string representing the operation
97
+ """
98
+ self.lhs = lhs
99
+ self.rhs = rhs
100
+ self.op = op
101
+
102
+ def __eq__(self, other):
103
+ if isinstance(other, BinaryConstraint):
104
+ return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
105
+ else:
106
+ return False
107
+
108
+ def __repr__(self):
109
+ return f'({self.lhs} {self.op} {self.rhs})'
110
+
111
+
112
+ class BinConstraintT(BinaryConstraint):
113
+ """
114
+ Binary constraints about tensors
115
+ """
116
+ def __init__(self, lhs, rhs, op):
117
+ assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \
118
+ (isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)
119
+ super().__init__(lhs, rhs, op)
120
+
121
+ def __eq__(self, other):
122
+ return super().__eq__(other)
123
+
124
+
125
+ class BinConstraintD(BinaryConstraint):
126
+ """
127
+ Binary constraints about dimensions
128
+ """
129
+ def __init__(self, lhs, rhs, op):
130
+ assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
131
+ assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
132
+
133
+ super().__init__(lhs, rhs, op)
134
+
135
+ def __eq__(self, other):
136
+ return super().__eq__(other)
137
+
138
+
139
+
140
+ class TGreatestUpperBound(Constraint):
141
+ """
142
+ Greatest Upper bound for tensors with dynamic type
143
+ """
144
+ def __init__(self, res, rhs1, rhs2):
145
+ """
146
+ :param res: tensor variable that stores the result of the outout
147
+ :param rhs1: tensor or tensor variable
148
+ :param rhs2: tensor or tensor variabke
149
+ """
150
+ self.res = res
151
+ self.rhs1 = rhs1
152
+ self.rhs2 = rhs2
153
+
154
+ def __repr__(self):
155
+ return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
156
+
157
+ def __eq__(self, other):
158
+ if isinstance(other, TGreatestUpperBound):
159
+ return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
160
+ else:
161
+ return False
162
+
163
+
164
+ class DGreatestUpperBound(Constraint):
165
+ """
166
+ Greatest Upper bound for dimensions
167
+ """
168
+ def __init__(self, res, rhs1, rhs2):
169
+ """
170
+ :param res: Dimension variable to store the result
171
+ :param rhs1: dimension variable 1
172
+ :param rhs2: dimension variable 2
173
+ """
174
+ assert is_dim(res)
175
+ assert is_dim(rhs1)
176
+ assert is_dim(rhs2)
177
+
178
+ self.res = res
179
+ self.rhs1 = rhs1
180
+ self.rhs2 = rhs2
181
+
182
+ def __repr__(self):
183
+ return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
184
+
185
+ def __eq__(self, other):
186
+ if isinstance(other, DGreatestUpperBound):
187
+ return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
188
+ else:
189
+ return False
190
+
191
+
192
+ class CanReshape(Constraint):
193
+ """
194
+ can_reshape constraint
195
+ """
196
+ def __init__(self, src, target):
197
+ """
198
+ :param src: tensor variable
199
+ :param target: tensor
200
+ """
201
+ self.src = src
202
+ self.target = target
203
+
204
+ def __repr__(self):
205
+ return f'can-reshape({self.src}, {self.target})'
206
+
207
+ def __eq__(self, other):
208
+ if isinstance(other, CanReshape):
209
+ return self.src == other.src and self.target == other.target
210
+ else:
211
+ return False
212
+
213
+
214
+ class IndexSelect(Constraint):
215
+
216
+ def __init__(self, tensor_size, input_var, dim_replace, index, output):
217
+ """
218
+ Args:
219
+ input_var: input to index_select
220
+ tensor_size: tensor size we are considering
221
+ dim_replace: the dimension of the output at "index"
222
+ index: location of the dimensions to replace in the input
223
+ output: variable to store the result
224
+ """
225
+ assert isinstance(input_var, TVar)
226
+ assert isinstance(output, TVar)
227
+ assert isinstance(dim_replace, DVar) or dim_replace == Dyn
228
+ assert isinstance(index, int)
229
+
230
+ self.input_var = input_var
231
+ self.tensor_size = tensor_size
232
+ self.dim_replace = dim_replace
233
+ self.index = index
234
+ self.output = output
235
+
236
+ def __repr__(self):
237
+
238
+ return f' {self.output} = ' \
239
+ f'IndexSelect({self.input_var}, ' \
240
+ f'tensor_size: {self.tensor_size}, ' \
241
+ f'{self.dim_replace}, ' \
242
+ f'{self.index})'
243
+
244
+ def __eq__(self, other):
245
+ if isinstance(other, IndexSelect):
246
+ return self.tensor_size == other.tensor_size and \
247
+ self.dim_replace == other.dim_replace and \
248
+ self.index == other.index and \
249
+ self.output == other.output and \
250
+ self.input_var == other.input_var
251
+ else:
252
+ return False
253
+
254
+
255
+ class Transpose(Constraint):
256
+
257
+ def __init__(self, tensor_size, input_var, index1, index2, output):
258
+ """
259
+ Args:
260
+ tensor_size: current tensor size
261
+ input_var: variable to hold input
262
+ index1: dimension 1
263
+ index2: dimension 2
264
+ output: output that stores result
265
+ """
266
+ assert isinstance(input_var, TVar)
267
+ assert isinstance(output, TVar)
268
+ assert isinstance(index1, int)
269
+ assert isinstance(index2, int)
270
+
271
+ self.input_var = input_var
272
+ self.tensor_size = tensor_size
273
+ self.index1 = index1
274
+ self.index2 = index2
275
+ self.output = output
276
+
277
+ def __repr__(self):
278
+
279
+ return f' {self.output} = ' \
280
+ f'Transpose({self.input_var}, ' \
281
+ f'tensor_size: {self.tensor_size}, ' \
282
+ f'{self.index1}, ' \
283
+ f'{self.index2})'
284
+
285
+ def __eq__(self, other):
286
+ if isinstance(other, Transpose):
287
+ return self.tensor_size == other.tensor_size and \
288
+ self.index1 == other.index1 and \
289
+ self.index2 == other.index2 and \
290
+ self.output == other.output and \
291
+ self.input_var == other.input_var
292
+ else:
293
+ return False
294
+
295
+
296
+ class GetItem(Constraint):
297
+
298
+ def __init__(self, tensor_size, index, res, input_var):
299
+ """
300
+ Constraint for getting item given a tensor size
301
+ :param tensor_size: actual number
302
+ :param index: actual number representing the index
303
+ :param res: dimension variable to carry the item we get
304
+ :param input_var: a tensor variable from which we will get item
305
+ """
306
+ assert isinstance(res, DVar)
307
+
308
+ self.res = res
309
+ self.tensor_size = tensor_size
310
+ self.index = index
311
+ self.input_var = input_var
312
+
313
+ def __repr__(self):
314
+ return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
315
+
316
+ def __eq__(self, other):
317
+ if isinstance(other, GetItem):
318
+ return self.res == other.res and \
319
+ self.tensor_size == other.tensor_size and \
320
+ self.index == other.index and \
321
+ self.input_var == other.input_var
322
+ else:
323
+ return False
324
+
325
+ class GetItemTensor(Constraint):
326
+
327
+ def __init__(self, tensor_size, index_tuple, res, input_var):
328
+ """
329
+ Constraint for getting item given a tensor size
330
+ However, when the argument is a tuple, we will
331
+ expect a tensor
332
+ :param tensor_size: actual number representing the rank
333
+ :param index_tuple: tuple for indexing
334
+ :param res: tensor variable to carry the item we get
335
+ :param input_var: a tensor variable from which we will get item
336
+ """
337
+ assert isinstance(res, TVar)
338
+
339
+ self.res = res
340
+ self.tensor_size = tensor_size
341
+ self.index_tuple = index_tuple
342
+ self.input_var = input_var
343
+
344
+ def __repr__(self):
345
+ return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
346
+
347
+ def __eq__(self, other):
348
+ if isinstance(other, GetItemTensor):
349
+ return self.res == other.res and \
350
+ self.tensor_size == other.tensor_size and \
351
+ self.index_tuple == other.index_tuple and \
352
+ self.input_var == other.input_var
353
+ else:
354
+ return False
355
+
356
+ class CalcConv(Constraint):
357
+
358
+ def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
359
+ """
360
+ :param conv_result: the convolution result
361
+ :param input_var: input to convolution
362
+ :param c_out: output chanel type
363
+ :param kernel: kernel tuple
364
+ """
365
+ self.conv_result = conv_result
366
+ self.input_var = input_var
367
+ self.c_out = c_out
368
+ self.kernel = kernel
369
+ self.padding = padding
370
+ self.stride = stride
371
+ self.dilation = dilation
372
+ self.matching_constraint = matching_constraint_vars
373
+
374
+ def __repr__(self):
375
+ return f'{self.conv_result} =' \
376
+ f' calc-conv({self.input_var},' \
377
+ f' {self.c_out}, {self.kernel}, ' \
378
+ f'{self.padding}, {self.stride},' \
379
+ f' {self.dilation})'
380
+
381
+ def __eq__(self, other):
382
+ if isinstance(other, CalcConv):
383
+ return self.conv_result == other.conv_result and self.input_var == other.input_var and \
384
+ self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
385
+ and self.stride == other.stride and self.dilation == other.dilation \
386
+ and self.matching_constraint == other.matching_constraint
387
+ else:
388
+ return False
389
+
390
+
391
+ class CalcMaxPool(Constraint):
392
+
393
+ def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
394
+ """
395
+ :param maxpool_result: the result of maxpool
396
+ :param input_var: input to convolution
397
+ :param kernel: kernel tuple
398
+ """
399
+ self.maxpool_result = maxpool_result
400
+ self.input_var = input_var
401
+ self.kernel = kernel
402
+ self.padding = padding
403
+ self.stride = stride
404
+ self.dilation = dilation
405
+ self.matching_constraint = matching_constraint_vars
406
+
407
+ def __repr__(self):
408
+ return f'{self.maxpool_result} =' \
409
+ f' calc-maxpool({self.input_var},' \
410
+ f' {self.kernel}, ' \
411
+ f'{self.padding}, {self.stride},' \
412
+ f' {self.dilation})'
413
+
414
+ def __eq__(self, other):
415
+ if isinstance(other, CalcMaxPool):
416
+ return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
417
+ and self.kernel == other.kernel and self.padding == other.padding \
418
+ and self.stride == other.stride and self.dilation == other.dilation \
419
+ and self.matching_constraint == other.matching_constraint
420
+ else:
421
+ return False
422
+
423
+
424
+ class ApplyBroadcasting(Constraint):
425
+ def __init__(self, res1, res2, input1, input2):
426
+ """
427
+ :param res1: resulting tensor 1
428
+ :param res2: resulting tensor 2
429
+ :param input1: tensor variable 1
430
+ :param input2: tensor variable 2
431
+ """
432
+ self.res1 = res1
433
+ self.res2 = res2
434
+ self.input1 = input1
435
+ self.input2 = input2
436
+
437
+ def __eq__(self, other):
438
+ if isinstance(other, ApplyBroadcasting):
439
+ return self.res1 == other.res1 \
440
+ and self.res2 == other.res2 \
441
+ and self.input1 == other.input1 \
442
+ and self.input2 == other.input2
443
+ else:
444
+ return False
445
+
446
+ def __repr__(self):
447
+ return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
448
+
449
+
450
+ class CalcProduct(Constraint):
451
+ """
452
+ Given correct dimensions, calculate the product for flatten accounting for Dyn
453
+ """
454
+ def __init__(self, start, end, flattened, dims_to_flatten):
455
+ """
456
+ :param start: start index
457
+ :param end: end index
458
+ :param flattened: variable to store the product
459
+ :param dims_to_flatten: the type which we will flatten
460
+ """
461
+ assert isinstance(dims_to_flatten, list)
462
+ assert isinstance(flattened, TVar)
463
+ assert isinstance(start, int)
464
+ assert isinstance(end, int)
465
+
466
+ self.start = start
467
+ self.end = end
468
+ self.dims_to_flatten = dims_to_flatten
469
+ self.flattened = flattened
470
+
471
+ def __eq__(self, other):
472
+ if isinstance(other, CalcProduct):
473
+ return self.start == other.start and self.end == other.end and \
474
+ self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
475
+
476
+ else:
477
+ return False
478
+
479
+ def __repr__(self):
480
+ return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
481
+
482
+
483
+ class TVar:
484
+ """
485
+ Tensor variable with no tensor constructor
486
+ """
487
+ def __init__(self, tvar):
488
+ """
489
+ :param tvar: tensor variable
490
+ """
491
+ self.tvar = tvar
492
+
493
+ def __repr__(self):
494
+ return f'TV({self.tvar})'
495
+
496
+ def __eq__(self, other):
497
+ if isinstance(other, TVar):
498
+ return self.tvar == other.tvar
499
+ else:
500
+ return False
501
+
502
+
503
+ class DVar:
504
+ """
505
+ Dimension variable
506
+ """
507
+ def __init__(self, c):
508
+ """
509
+ :param c: character or number
510
+ """
511
+ self.c = c
512
+
513
+ def __repr__(self):
514
+ return f'DV({self.c})'
515
+
516
+ def __eq__(self, other):
517
+ if isinstance(other, DVar):
518
+ return self.c == other.c
519
+ else:
520
+ return False
521
+
522
+
523
+ class BVar:
524
+ """
525
+ Boolean variable
526
+ """
527
+ def __init__(self, c):
528
+ """
529
+ :param c: character or number
530
+ """
531
+ self.c = c
532
+
533
+ def __repr__(self):
534
+ return f'BV({self.c})'
535
+
536
+ def __eq__(self, other):
537
+ if isinstance(other, BVar):
538
+ return self.c == other.c
539
+ else:
540
+ return False
541
+
542
+
543
+ def is_algebraic_expression(constraint):
544
+ if isinstance(constraint, BinConstraintD):
545
+ return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
546
+ else:
547
+ return isinstance(constraint, Prod)
548
+
549
+
550
+ def is_bool_expr(constraint):
551
+ if isinstance(constraint, BinConstraintD):
552
+ return constraint.op in [op_gt, op_lt, op_neq, op_eq]
553
+ else:
554
+ return isinstance(constraint, (BVar, Conj, Disj))
555
+
556
+ def is_dim(d):
557
+ return isinstance(d, (DVar, int)) or d == Dyn
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py ADDED
@@ -0,0 +1,1279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import operator
3
+ import warnings
4
+ from typing import Callable, Dict, Iterable
5
+
6
+ from torch.fx._symbolic_trace import _assert_is_none
7
+ from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \
8
+ Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \
9
+ TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound
10
+ from torch.fx.experimental.migrate_gradual_types.operation import \
11
+ op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul
12
+ from torch.fx.node import Target, Node
13
+ from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \
14
+ gen_bvar
15
+
16
+ from torch.fx.tensor_type import Dyn, TensorType
17
+ from torch.nn.modules.conv import Conv2d
18
+ from torch.nn.modules.batchnorm import BatchNorm2d
19
+
20
+ _INFERENCE_RULES: Dict[Target, Callable] = {}
21
+
22
+ MAX_TENSOR_RANK = 4
23
+
24
+ def register_inference_rule(call_target):
25
+ def register(fn):
26
+ if call_target in _INFERENCE_RULES:
27
+ raise RuntimeError(f'Inference rule already registered for {call_target}!')
28
+ _INFERENCE_RULES[call_target] = fn
29
+ return fn
30
+ return register
31
+
32
+
33
+ def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter):
34
+ d, counter = gen_tensor_dims(n, counter)
35
+ c1 = BinConstraintT(input, TensorType(d), op_eq)
36
+ start_dim = n if start_dim == -1 else abs(start_dim)
37
+ end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1
38
+ c2 = CalcProduct(start_dim, end_dim, flattened, d)
39
+ nat_constraints = gen_nat_constraints(d)
40
+ return Conj([c1, c2, *nat_constraints]), counter
41
+
42
+
43
+ @register_inference_rule(getattr)
44
+ def get_attr_inference_rule(n: Node, symbols, constraints, counter):
45
+ """
46
+ If the attribute is "device" then the tensor shape is preserved
47
+ """
48
+ assert isinstance(n.args[0], Node)
49
+ assert isinstance(n.args[1], str)
50
+ output, counter = gen_tvar(counter)
51
+ symbols[n] = output
52
+
53
+ input = symbols[n.args[0]]
54
+ attr = n.args[1]
55
+
56
+ if attr == 'device':
57
+ return [BinConstraintT(input, output, op_eq)], counter
58
+ else:
59
+ raise NotImplementedError('Not yet implemented')
60
+
61
+ @register_inference_rule(torch.bmm)
62
+ def bmm_inference_rule(n: Node, symbols, constraints, counter):
63
+ """
64
+ Constraints that match the input to a size 3 tensor
65
+ and switch the dimensions according to the rules
66
+ of batch multiplication
67
+ """
68
+ assert isinstance(n.args[0], Node)
69
+ assert isinstance(n.args[1], Node)
70
+
71
+ bmm_output, counter = gen_tvar(counter)
72
+ symbols[n] = bmm_output
73
+
74
+ bmm_input1 = symbols[n.args[0]]
75
+ bmm_input2 = symbols[n.args[1]]
76
+
77
+ dims_input1, counter = gen_tensor_dims(3, counter)
78
+ dims_input2, counter = gen_tensor_dims(3, counter)
79
+
80
+ inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
81
+ BinConstraintT(bmm_input2, Dyn, op_eq),
82
+ BinConstraintT(bmm_output, Dyn, op_eq)])
83
+
84
+ input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
85
+ BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
86
+ BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)])
87
+
88
+ input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq),
89
+ BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
90
+ BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)])
91
+
92
+ consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)]
93
+
94
+ batch_size, counter = gen_dvar(counter)
95
+
96
+ inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
97
+ BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
98
+ BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
99
+ *consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])])
100
+
101
+ return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter
102
+
103
+
104
+ @register_inference_rule("index_select")
105
+ def index_select_inference_rule(n: Node, symbols, constraints, counter):
106
+ """
107
+ We constrain the second argument to a vector or Dyn.
108
+ The output replaces the input with the shape of the vector
109
+ at the position given by the index (first argument)
110
+ """
111
+ # print(n.args)
112
+ assert isinstance(n.args[0], Node)
113
+ assert isinstance(n.args[1], int)
114
+ assert isinstance(n.args[2], Node)
115
+
116
+
117
+
118
+ index_select, counter = gen_tvar(counter)
119
+ symbols[n] = index_select
120
+
121
+ dims, counter = gen_tensor_dims(1, counter)
122
+
123
+ # equality constraint
124
+ is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)
125
+ is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)
126
+
127
+ c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select)
128
+ for i in range(MAX_TENSOR_RANK)])])
129
+ c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select)
130
+ for i in range(MAX_TENSOR_RANK)])])
131
+
132
+ return [Disj([c2, c3])], counter
133
+
134
+
135
+ @register_inference_rule("expand")
136
+ def expand_inference_rule(n: Node, symbols, constraints, counter):
137
+ """
138
+ We generate the exact constraints as we do for tensor additions but we constraint
139
+ the rank of this expression to be equal to len(n.args[1:]) so that only
140
+ those cases get considered for the output
141
+ """
142
+ assert isinstance(n.args[0], Node)
143
+
144
+ # define the output for expand
145
+ expand, counter = gen_tvar(counter)
146
+ symbols[n] = expand
147
+
148
+ # since we do not have two nodes here, we will construct an argument variable
149
+ e1 = symbols[n.args[0]]
150
+ e2, counter = gen_tvar(counter)
151
+
152
+ e2_nat_constraints = []
153
+ for arg in n.args[1:]:
154
+ assert isinstance(arg, (Node, int))
155
+ if isinstance(arg, Node):
156
+ assert isinstance(symbols[arg], DVar)
157
+ e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))
158
+
159
+ e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)
160
+
161
+ constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)
162
+
163
+ # constraint the output size
164
+ dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
165
+ nat_constraints = gen_nat_constraints(dims)
166
+ c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
167
+ constraints += c
168
+
169
+ return constraints, counter
170
+
171
+
172
+ @register_inference_rule(torch.nn.functional.gelu)
173
+ @register_inference_rule(torch.nn.functional.dropout)
174
+ @register_inference_rule(torch.nn.functional.softmax)
175
+ @register_inference_rule("detach")
176
+ @register_inference_rule("to")
177
+ @register_inference_rule("int")
178
+ @register_inference_rule("long")
179
+ @register_inference_rule("contiguous")
180
+ @register_inference_rule(torch.ones)
181
+ @register_inference_rule(torch.zeros)
182
+ def equality_inference_rule(n: Node, symbols, constraints, counter):
183
+ """
184
+ We generate the constraint: input = output
185
+ """
186
+ output, counter = gen_tvar(counter)
187
+ symbols[n] = output
188
+
189
+ if isinstance(n.args[0], Node):
190
+ input = symbols[n.args[0]]
191
+ if isinstance(input, TVar):
192
+ return [BinConstraintT(input, output, op_eq)], counter
193
+
194
+ # then we have dimension variables
195
+ else:
196
+ for arg in n.args:
197
+ assert isinstance(symbols[arg], DVar)
198
+ my_size = [symbols[arg] for arg in n.args]
199
+ return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
200
+
201
+ elif isinstance(n.args[0], tuple):
202
+ # then the tuple is the size
203
+ assert len(n.args[0]) <= 4
204
+ my_size = [symbols[arg] for arg in n.args[0]]
205
+ return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
206
+ else:
207
+ raise NotImplementedError('Method not yet implemented')
208
+
209
+
210
+ @register_inference_rule("transpose")
211
+ def transpose_inference_rule(n: Node, symbols, constraints, counter):
212
+ """
213
+ Can be considered as a sequence of two index selects, so we generate constraints accordingly
214
+ """
215
+ assert isinstance(n.args[0], Node)
216
+ assert isinstance(n.args[1], int)
217
+ assert isinstance(n.args[2], int)
218
+
219
+ output, counter = gen_tvar(counter)
220
+ symbols[n] = output
221
+
222
+ from_arg = symbols[n.args[0]]
223
+ assert isinstance(from_arg, TVar)
224
+
225
+ # input and output are dyn
226
+ is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])
227
+
228
+ # or input is a tensor and we actually do the replacement
229
+ c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])
230
+
231
+ return [Disj([is_dyn, c3])], counter
232
+
233
+
234
+ @register_inference_rule("type_as")
235
+ def type_inference_rule(n: Node, symbols, constraints, counter):
236
+ """
237
+ We generate the constraint: input = output
238
+ """
239
+ assert isinstance(n.args[0], Node)
240
+ assert isinstance(n.args[1], Node)
241
+
242
+ output, counter = gen_tvar(counter)
243
+ symbols[n] = output
244
+
245
+ from_arg = symbols[n.args[0]]
246
+ to_arg = symbols[n.args[1]]
247
+
248
+ assert isinstance(from_arg, TVar)
249
+ assert isinstance(to_arg, TVar)
250
+
251
+ return [BinConstraintT(from_arg, to_arg, op_consistency),
252
+ BinConstraintT(output, to_arg, op_eq)], counter
253
+
254
+ @register_inference_rule("masked_fill_")
255
+ def masked_fill_inference_rule(n: Node, symbols, constraints, counter):
256
+ """
257
+ Similar to addition. For now we implement the constraints when
258
+ the argument is a boolean tensor. There is also a case for when
259
+ it is a condition. We will leave this out for now.
260
+ """
261
+
262
+ assert isinstance(n.args[0], Node)
263
+ assert isinstance(n.args[1], Node)
264
+
265
+ # We will retrieve the type variables from the symbol table
266
+ # and confirm they are tensor variables
267
+
268
+ e1 = symbols[n.args[0]]
269
+ e2 = symbols[n.args[1]]
270
+
271
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
272
+ masked_fill_tensor, counter = gen_tvar(counter)
273
+ symbols[n] = masked_fill_tensor
274
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)
275
+ else:
276
+ raise NotImplementedError('Not yet implemented')
277
+
278
+
279
+ @register_inference_rule(torch.nn.functional.embedding)
280
+ def embedding_inference_rule_functional(n: Node, symbols, constraints, counter):
281
+ assert isinstance(n.args[0], Node)
282
+
283
+ embedding_dim_weights = symbols[n.args[1]]
284
+
285
+ # will treat this as a static shape. So we will not use matching.
286
+ weight_dims, counter = gen_tensor_dims(2, counter)
287
+ equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq)
288
+ embedding_dim = weight_dims[1]
289
+ constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter)
290
+ return [equality_constraint] + constraints, counter
291
+
292
+
293
+ @register_inference_rule(torch.nn.modules.sparse.Embedding)
294
+ def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):
295
+ """
296
+ The output shape differs from the input shape in the last dimension
297
+ """
298
+ assert isinstance(n.args[0], Node)
299
+ return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)
300
+
301
+
302
+ def gen_embedding_rules(n: Node, symbols, embedding_dim, counter):
303
+
304
+ embedding_output, counter = gen_tvar(counter)
305
+ symbols[n] = embedding_output
306
+ embedding_input = symbols[n.args[0]]
307
+
308
+ input_dyn = BinConstraintT(embedding_input, Dyn, op_eq)
309
+ output_dyn = BinConstraintT(embedding_output, Dyn, op_eq)
310
+
311
+ c1 = Conj([input_dyn, output_dyn])
312
+ c2 = []
313
+
314
+ for i in range(1, MAX_TENSOR_RANK):
315
+ new_dims, counter = gen_tensor_dims(i, counter)
316
+ nat_constraints = gen_nat_constraints(new_dims)
317
+
318
+ # we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases
319
+ c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq),
320
+ BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] +
321
+ nat_constraints)
322
+ c2.append(c_tensor_i)
323
+
324
+ return [Disj([c1, Disj(c2)])], counter
325
+
326
+
327
+ @register_inference_rule(torch.tensor)
328
+ def tensor_inference_rule(n: Node, symbols, constraints, counter):
329
+ """
330
+ If the tensor is a scalar, we will skip it since we
331
+ do not support scalars yet. We will add support in the future
332
+ if it's needed. For our examples so far, scalars are not needed.
333
+ """
334
+ return [], counter
335
+
336
+
337
+ @register_inference_rule("reshape")
338
+ @register_inference_rule("view")
339
+ def view_inference_rule(n: Node, symbols, constraints, counter):
340
+ """
341
+ Similar to reshape but with an extra condition on the strides
342
+ """
343
+ assert isinstance(n.args[0], Node)
344
+
345
+ # generate the new variable
346
+ my_view, counter = gen_tvar(counter)
347
+ symbols[n] = my_view
348
+
349
+
350
+ src_var = symbols[n.args[0]]
351
+ t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape
352
+ t2_type = []
353
+ num_constraints = []
354
+
355
+ for t in t2:
356
+ if t == -1:
357
+ var, counter = gen_dvar(counter)
358
+ t2_type.append(var)
359
+ num_constraints.append(BinConstraintD(var, Dyn, op_neq))
360
+
361
+ else:
362
+ num_constraints.append(BinConstraintD(t, Dyn, op_neq))
363
+ t2_type.append(t)
364
+
365
+ t2_type = TensorType(t2_type) # type: ignore[assignment]
366
+
367
+ c1 = BinConstraintT(my_view, t2_type, op_eq)
368
+ c2 = CanReshape(src_var, t2_type)
369
+
370
+ # TODO: add the extra check mentioned here:
371
+ # https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view
372
+
373
+ return [c1, c2] + num_constraints, counter # type: ignore[operator]
374
+
375
+
376
+ @register_inference_rule("size")
377
+ def size_inference_rule(n: Node, symbols, constraints, counter):
378
+ """
379
+ The constraint is just lhs = rhs.
380
+ Ex: size = input_ids.size()
381
+ """
382
+
383
+
384
+ if len(n.args) == 1:
385
+ # generate the new variable
386
+ size, counter = gen_tvar(counter)
387
+ symbols[n] = size
388
+ input = symbols[n.args[0]]
389
+ c = BinConstraintT(input, size, op_eq)
390
+ return [c], counter
391
+
392
+ elif len(n.args) == 2:
393
+ # TODO: review this rule; should input = dyn; output = dyn be included here?
394
+ if isinstance(n.args[1], int):
395
+ # generate the new variable
396
+ size_index, counter = gen_dvar(counter)
397
+ symbols[n] = size_index
398
+ input = symbols[n.args[0]]
399
+ c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]
400
+ c3 = BinConstraintD(0, size_index, op_leq)
401
+
402
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
403
+ output_dyn = BinConstraintD(size_index, Dyn, op_eq)
404
+ c1 = Conj([input_dyn, output_dyn])
405
+
406
+ return [Disj([c1, Conj([Disj(c2), c3])])], counter
407
+
408
+ else:
409
+ raise NotImplementedError
410
+
411
+ else:
412
+ raise NotImplementedError
413
+
414
+
415
+ def range_check(i, n):
416
+ """
417
+ Checks if an index i is within range of a size n list
418
+ Args:
419
+ i: index
420
+ n: list size
421
+
422
+ Returns: Boolean
423
+ """
424
+ if i >= 0:
425
+ return T() if i < n else F()
426
+ else:
427
+ return T() if i >= n else F()
428
+
429
+
430
+ @register_inference_rule(torch.cumsum)
431
+ def cumsum_inference_rule(n: Node, symbols, constraints, counter):
432
+ """
433
+ Input and output shapes should be equal
434
+ We should verify that the index is valid
435
+ """
436
+ assert isinstance(n.args[0], Node)
437
+ arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"]
438
+ assert isinstance(arg_1, int)
439
+
440
+ output, counter = gen_tvar(counter)
441
+ symbols[n] = output
442
+ input = symbols[n.args[0]]
443
+
444
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
445
+ output_dyn = BinConstraintT(output, Dyn, op_eq)
446
+ c1 = Conj([input_dyn, output_dyn])
447
+ c2 = []
448
+ for i in range(1, MAX_TENSOR_RANK + 1):
449
+ new_dims, counter = gen_tensor_dims(i, counter)
450
+
451
+ nat_constraints = gen_nat_constraints(new_dims)
452
+
453
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq),
454
+ BinConstraintT(output, TensorType(new_dims), op_eq)] +
455
+ [range_check(arg_1, i)] + nat_constraints)
456
+
457
+ c2.append(c_tensor_i)
458
+ dyn_or_tensor = Disj([c1, Disj(c2)])
459
+ return [dyn_or_tensor], counter
460
+
461
+
462
+ @register_inference_rule(_assert_is_none)
463
+ def assert_inference_rule(n: Node, symbols, constraints, counter):
464
+ assert len(n.users) == 0
465
+ return [], counter
466
+
467
+
468
+ @register_inference_rule(operator.getitem)
469
+ def getitem_inference_rule(n: Node, symbols, constraints, counter):
470
+ assert isinstance(n.args[0], Node)
471
+
472
+ # dimension output case
473
+ if isinstance(n.args[1], int):
474
+ # create and store the new dimension variable
475
+ get_item_output, counter = gen_dvar(counter)
476
+ symbols[n] = get_item_output
477
+
478
+ # retrieve arg variables
479
+ get_item_arg = symbols[n.args[0]]
480
+ assert isinstance(get_item_arg, TVar)
481
+
482
+
483
+ # if the input is dynamic, we accept any index and return
484
+ # a dynamic dimension as output
485
+ input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
486
+ output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
487
+ c1 = Conj([input_dyn, output_dyn])
488
+
489
+ # if the input is a tensor,
490
+ # generate a getItem constraint which will be expanded based on the
491
+ # tensor dimension.
492
+
493
+ c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)]
494
+
495
+
496
+ # since the output is a dimension, we make sure it's a natural number
497
+ # added as a conjunction to the disjunction of c2
498
+ c3 = BinConstraintD(0, get_item_output, op_leq)
499
+ return [Disj([c1, Conj([Disj(c2), c3])])], counter
500
+
501
+ # tensor output case
502
+ elif isinstance(n.args[1], tuple):
503
+ # create and store the new tensor variable
504
+ get_item_output, counter = gen_tvar(counter)
505
+ symbols[n] = get_item_output
506
+
507
+ # retrieve arg variables
508
+ if n.args[0] in symbols:
509
+ get_item_arg = symbols[n.args[0]]
510
+ assert isinstance(get_item_arg, TVar)
511
+
512
+ input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
513
+ output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment]
514
+ c1 = Conj([input_dyn, output_dyn])
515
+
516
+ c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc]
517
+ for i in range(MAX_TENSOR_RANK)]
518
+ else:
519
+ # TODO: we should figure out why there is a key-error here.
520
+ return [], counter
521
+
522
+ return [Disj([c1, *c2])], counter
523
+
524
+ else:
525
+ raise RuntimeError('Method not yet implemented')
526
+
527
+
528
+ @register_inference_rule(operator.gt)
529
+ def gt_inference_rule(n: Node, symbols, constraints, counter):
530
+ assert isinstance(n.args[0], (Node, int))
531
+ assert isinstance(n.args[1], (Node, int))
532
+
533
+ # We make sure this node will not be used again. We do not
534
+ # generate a constraint about that node. Only about the operands.
535
+
536
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
537
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
538
+
539
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
540
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
541
+ gt_tensor, counter = gen_tvar(counter)
542
+ symbols[n] = gt_tensor
543
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor)
544
+
545
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
546
+ # This is meant to be used for flow analysis only
547
+ gt_constraint = BinConstraintD(e1, e2, op_gt)
548
+
549
+ my_gt, counter = gen_bvar(counter)
550
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
551
+ return [equality_constraint], counter
552
+
553
+ else:
554
+ raise RuntimeError('Sort Mismatch')
555
+
556
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
557
+ if isinstance(e1, DVar):
558
+ # This is meant to be used for flow analysis only
559
+ gt_constraint = BinConstraintD(e1, e2, op_gt)
560
+
561
+ my_gt, counter = gen_bvar(counter)
562
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
563
+ return [equality_constraint], counter
564
+
565
+ elif isinstance(e1, TVar) and isinstance(e2, int):
566
+ # then we made the wrong assumption about the argument being a tensor
567
+ # so we should fix the assumption
568
+ warnings.warn(f'Made the wrong assumption for node {n}. Correctness not guaranteed.')
569
+
570
+ new_e1, counter = gen_dvar(counter)
571
+ symbols[n.args[0]] = new_e1
572
+ symbols[n.args[0]]
573
+
574
+ gt_constraint = BinConstraintD(new_e1, e2, op_gt)
575
+
576
+ my_gt, counter = gen_bvar(counter)
577
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
578
+ return [equality_constraint], counter
579
+
580
+ else:
581
+ raise NotImplementedError('Method not yet implemented')
582
+
583
+ else:
584
+ raise NotImplementedError('Method not yet implemented')
585
+
586
+
587
+ @register_inference_rule(operator.eq)
588
+ def eq_inference_rule(n: Node, symbols, constraints, counter):
589
+ assert isinstance(n.args[0], (Node, int))
590
+ assert isinstance(n.args[1], (Node, int))
591
+
592
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
593
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
594
+
595
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
596
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
597
+ eq_tensor, counter = gen_tvar(counter)
598
+ symbols[n] = eq_tensor
599
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor)
600
+
601
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
602
+ # This is meant to be used for flow analysis only
603
+ eq_constraint = BinConstraintD(e1, e2, op_eq)
604
+
605
+ my_eq, counter = gen_bvar(counter)
606
+ equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
607
+ return [equality_constraint], counter
608
+
609
+ else:
610
+ raise RuntimeError('Sort Mismatch')
611
+
612
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
613
+ if isinstance(e1, DVar):
614
+ # This is meant to be used for flow analysis only
615
+ eq_constraint = BinConstraintD(e1, e2, op_eq)
616
+
617
+ my_eq, counter = gen_bvar(counter)
618
+ equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
619
+ return [equality_constraint], counter
620
+ else:
621
+ raise NotImplementedError('Method not yet implemented')
622
+ else:
623
+ raise NotImplementedError('Method not yet implemented')
624
+
625
+ @register_inference_rule(operator.ne)
626
+ def neq_inference_rule(n: Node, symbols, constraints, counter):
627
+ """
628
+ Translates to inconsistent in gradual types.
629
+ To prove inequality, we should prove that
630
+ tensors are either different sizes or
631
+ disagree on at least one dimension
632
+
633
+ This is a WIP (works when the condition
634
+ is false. We are working on making this operation work
635
+ when the condition is true as well)
636
+ """
637
+ assert isinstance(n.args[0], Node)
638
+ assert isinstance(n.args[1], tuple)
639
+
640
+ # implementing for size 3 and 4
641
+ if len(n.args[1]) == 3:
642
+
643
+ assert isinstance(n.args[1][0], (Node, int))
644
+ assert isinstance(n.args[1][1], (Node, int))
645
+ assert isinstance(n.args[1][2], (Node, int))
646
+
647
+ lhs = symbols[n.args[0]]
648
+
649
+ b, counter = gen_tensor_dims(4, counter)
650
+ input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq)
651
+
652
+ d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
653
+ d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
654
+ d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
655
+
656
+ # dimensions not equal
657
+ my_ne, counter = gen_bvar(counter)
658
+ neq_1 = BinConstraintD(d1, b[0], op_neq)
659
+ neq_2 = BinConstraintD(d2, b[1], op_neq)
660
+ neq_3 = BinConstraintD(d3, b[2], op_neq)
661
+
662
+ # dimensions inconsistent
663
+ dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1])
664
+ dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2])
665
+ dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3])
666
+
667
+ dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3])
668
+
669
+ # we are covering size 3 and 4 only for now
670
+ ne_constraint = Conj([input_is_size3, dims_inconsistent])
671
+
672
+ my_ne, counter = gen_bvar(counter)
673
+ equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
674
+
675
+ elif len(n.args[1]) == 4:
676
+
677
+ assert isinstance(n.args[1][0], (Node, int))
678
+ assert isinstance(n.args[1][1], (Node, int))
679
+ assert isinstance(n.args[1][2], (Node, int))
680
+ assert isinstance(n.args[1][3], (Node, int))
681
+
682
+ lhs = symbols[n.args[0]]
683
+
684
+ b1, counter = gen_dvar(counter)
685
+ b2, counter = gen_dvar(counter)
686
+ b3, counter = gen_dvar(counter)
687
+ b4, counter = gen_dvar(counter)
688
+
689
+ input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq)
690
+
691
+ d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
692
+ d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
693
+ d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
694
+ d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]]
695
+
696
+ # dimensions not equal
697
+ my_ne, counter = gen_bvar(counter)
698
+ neq_1 = BinConstraintD(d1, b1, op_neq)
699
+ neq_2 = BinConstraintD(d2, b2, op_neq)
700
+ neq_3 = BinConstraintD(d3, b3, op_neq)
701
+ neq_4 = BinConstraintD(d4, b4, op_neq)
702
+
703
+ # dimensions to inconsistent
704
+ dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1])
705
+ dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2])
706
+ dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3])
707
+ dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4])
708
+
709
+ dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4])
710
+
711
+ ne_constraint = Conj([input_is_size4, dims_inconsistent])
712
+
713
+ my_ne, counter = gen_bvar(counter)
714
+
715
+ equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
716
+
717
+ else:
718
+ raise NotImplementedError('Method not yet implemented')
719
+
720
+ return [equality_constraint], counter
721
+
722
+
723
+ @register_inference_rule(operator.lt)
724
+ def lt_inference_rule(n: Node, symbols, constraints, counter):
725
+ assert isinstance(n.args[0], (Node, int))
726
+ assert isinstance(n.args[1], (Node, int))
727
+
728
+ # We make sure this node will not be used again. We do not
729
+ # generate a constraint about that node. Only about the operands.
730
+
731
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
732
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
733
+
734
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
735
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
736
+ lt_tensor, counter = gen_tvar(counter)
737
+ symbols[n] = lt_tensor
738
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor)
739
+
740
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
741
+ # This is meant to be used for flow analysis only
742
+ lt_constraint = BinConstraintD(e1, e2, op_lt)
743
+
744
+ my_lt, counter = gen_bvar(counter)
745
+ equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
746
+ return [equality_constraint], counter
747
+
748
+ else:
749
+ raise RuntimeError('Sort Mismatch')
750
+
751
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
752
+ if isinstance(e1, DVar):
753
+ # This is meant to be used for flow analysis only
754
+ lt_constraint = BinConstraintD(e1, e2, op_lt)
755
+
756
+ my_lt, counter = gen_bvar(counter)
757
+ equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
758
+ return [equality_constraint], counter
759
+ else:
760
+ raise NotImplementedError('Method not yet implemented')
761
+
762
+ else:
763
+ raise NotImplementedError('Method not yet implemented')
764
+
765
+
766
+ @register_inference_rule(torch.full)
767
+ def full_inference_rule(n: Node, symbols, constraints, counter):
768
+ full, counter = gen_tvar(counter)
769
+ symbols[n] = full
770
+ res = []
771
+
772
+ assert isinstance(n.args[0], Iterable)
773
+ for arg in n.args[0]:
774
+ dim = arg if isinstance(arg, int) else symbols[arg]
775
+ res.append(dim)
776
+ c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type]
777
+ return [c], counter
778
+
779
+
780
+ # TODO normalize index
781
+ @register_inference_rule(torch.arange)
782
+ def arange_inference_rule(n: Node, symbols, constraints, counter):
783
+ start = 0
784
+ step = 1
785
+
786
+ if len(n.args) == 1:
787
+ end = symbols[n.args[0]]
788
+ else:
789
+ raise NotImplementedError('Not yet implemented')
790
+
791
+ # int((end - start) / step)
792
+ d1, counter = gen_dvar(counter)
793
+ size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
794
+ arange, counter = gen_tvar(counter)
795
+ symbols[n] = arange
796
+
797
+ # either the a parameter is a number or it is Dyn
798
+ c1 = Disj([BinConstraintD(end, Dyn, op_eq),
799
+ BinConstraintD(start, Dyn, op_eq),
800
+ BinConstraintD(step, Dyn, op_eq)])
801
+ c2 = BinConstraintD(d1, Dyn, op_eq)
802
+ both_dyn = Conj([c1, c2])
803
+
804
+ c11 = Conj([BinConstraintD(end, Dyn, op_neq),
805
+ BinConstraintD(start, Dyn, op_neq),
806
+ BinConstraintD(step, Dyn, op_neq)])
807
+ c22 = BinConstraintD(d1, Dyn, op_neq)
808
+ both_numbers = Conj([c11, c22, size_constraint])
809
+
810
+ return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter
811
+
812
+ def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var):
813
+ # additional vars that don't correspond to expressions
814
+ e11, counter = gen_tvar(counter)
815
+ e22, counter = gen_tvar(counter)
816
+
817
+ # generate constraints
818
+ c1 = TGreatestUpperBound(output_var, e11, e22)
819
+ c2 = ApplyBroadcasting(e11, e22, e1, e2)
820
+ c3 = BinConstraintT(e11, e22, op_consistency)
821
+ return [c1, c2, c3], counter
822
+
823
+
824
+ @register_inference_rule(operator.mul)
825
+ @register_inference_rule(torch.ne)
826
+ @register_inference_rule("ne")
827
+ @register_inference_rule(torch.add)
828
+ @register_inference_rule(operator.add)
829
+ def broadcasting_inference_rule(n: Node, symbols, constraints, counter):
830
+
831
+ op_code = None
832
+ if n.target == operator.add or n.target == torch.add:
833
+ op_code = op_add
834
+ elif n.target == operator.mul:
835
+ op_code = op_mul
836
+
837
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
838
+ if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
839
+ my_output, counter = gen_tvar(counter)
840
+ symbols[n] = my_output
841
+ e1 = symbols[n.args[0]]
842
+ e2 = symbols[n.args[1]]
843
+
844
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
845
+ else:
846
+ raise NotImplementedError('Method not yet implemented')
847
+
848
+ elif isinstance(n.args[0], Node) and isinstance(n.args[1], (int, float)):
849
+ if isinstance(symbols[n.args[0]], TVar):
850
+ my_output, counter = gen_tvar(counter)
851
+ symbols[n] = my_output
852
+ e1 = symbols[n.args[0]]
853
+ return [BinConstraintT(my_output, e1, op_eq)], counter
854
+ elif isinstance(symbols[n.args[0]], DVar):
855
+ my_output, counter = gen_dvar(counter)
856
+ symbols[n] = my_output
857
+ e1 = symbols[n.args[0]]
858
+
859
+ # we will propagate the runtime value here since this is regular addition
860
+ c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
861
+ BinConstraintD(0, my_output, op_leq)])
862
+ return [c], counter
863
+
864
+ elif isinstance(n.args[1], Node) and isinstance(n.args[0], (int, float)):
865
+ if isinstance(symbols[n.args[1]], TVar):
866
+ my_output, counter = gen_tvar(counter)
867
+ symbols[n] = my_output
868
+ e2 = symbols[n.args[1]]
869
+ return [BinConstraintT(my_output, e2, op_eq)], counter
870
+ elif isinstance(symbols[n.args[1]], DVar):
871
+ my_output, counter = gen_dvar(counter)
872
+ symbols[n] = my_output
873
+ e2 = symbols[n.args[1]]
874
+
875
+ # we will propagate the runtime value here since this is regular addition
876
+ c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
877
+ BinConstraintD(0, my_output, op_leq)])
878
+ return [c], counter
879
+
880
+ else:
881
+ raise NotImplementedError('Method not yet implemented')
882
+
883
+ else:
884
+ # TODO generate add constraints for scalar addition
885
+ raise NotImplementedError('Addition not yet implemented')
886
+
887
+
888
+ @register_inference_rule(torch.flatten)
889
+ def flatten_inference_rule(n: Node, symbols, constraints, counter):
890
+ assert isinstance(n.args[0], Node)
891
+
892
+ # generate the new variable
893
+ flattened, counter = gen_tvar(counter)
894
+ symbols[n] = flattened
895
+
896
+ input = symbols[n.args[0]]
897
+
898
+ # set the default start and end dims
899
+ start_dim = 1
900
+ end_dim = -1
901
+
902
+ if len(n.args) > 1:
903
+ assert isinstance(n.args[1], int)
904
+ start_dim = n.args[1]
905
+
906
+ if len(n.args) > 2:
907
+ assert isinstance(n.args[2], int)
908
+ end_dim = n.args[2]
909
+
910
+ c1 = BinConstraintT(input, Dyn, op_eq)
911
+ c2 = BinConstraintT(flattened, Dyn, op_eq)
912
+ both_dyn = Conj([c1, c2])
913
+
914
+ const = []
915
+ for i in range(1, MAX_TENSOR_RANK + 1):
916
+ c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter)
917
+ const.append(c)
918
+
919
+ return [Disj([both_dyn, *const])], counter
920
+
921
+
922
+ @register_inference_rule(torch.nn.functional.layer_norm)
923
+ def layer_norm_functional(n: Node, symbols, constraints, counter):
924
+ """
925
+ We generate the constraint: input = output
926
+ """
927
+ assert isinstance(n.args[0], Node)
928
+ return gen_layer_norm_constraints(n, n.args[1], symbols, counter)
929
+
930
+
931
+ @register_inference_rule(torch.nn.LayerNorm)
932
+ def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
933
+ """
934
+ Input and output shapes should be equal.
935
+ Input should be consistent with the normalized_shape
936
+ """
937
+ assert isinstance(n.args[0], Node)
938
+ return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)
939
+
940
+
941
+ def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter):
942
+ output, counter = gen_tvar(counter)
943
+ symbols[n] = output
944
+ input = symbols[n.args[0]]
945
+
946
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
947
+ output_dyn = BinConstraintT(output, Dyn, op_eq)
948
+
949
+ c1 = Conj([input_dyn, output_dyn])
950
+
951
+ c2 = []
952
+ for i in range(1, MAX_TENSOR_RANK + 1):
953
+ new_dims_rhs, counter = gen_tensor_dims(i, counter)
954
+ nat_constraints = gen_nat_constraints(new_dims_rhs)
955
+
956
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq),
957
+ BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] +
958
+ add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) +
959
+ nat_constraints)
960
+ c2.append(c_tensor_i)
961
+ return [Disj([c1, Disj(c2)])], counter
962
+
963
+ @register_inference_rule(torch.nn.Dropout)
964
+ @register_inference_rule(torch.nn.ReLU)
965
+ def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):
966
+ """
967
+ Input and output shapes should be equal.
968
+ """
969
+ assert isinstance(n.args[0], Node)
970
+ output, counter = gen_tvar(counter)
971
+ symbols[n] = output
972
+ input = symbols[n.args[0]]
973
+ assert isinstance(input, TVar)
974
+ return [BinConstraintT(input, output, op_eq)], counter
975
+
976
+
977
+ @register_inference_rule(torch.nn.Linear)
978
+ def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):
979
+ """
980
+ Input and output sizes should be the same except for the last dimension
981
+ If the input is Dyn, then so should the output
982
+ """
983
+ assert isinstance(n.args[0], Node)
984
+ return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)
985
+
986
+
987
+ @register_inference_rule("dim") # type: ignore[attr-defined]
988
+ def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
989
+ assert isinstance(n.args[0], Node)
990
+ my_dim, counter = gen_dvar(counter)
991
+ symbols[n] = my_dim
992
+ input = symbols[n.args[0]]
993
+
994
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
995
+ output_dyn = BinConstraintD(my_dim, Dyn, op_eq)
996
+
997
+ c1 = []
998
+
999
+ for i in range(1, MAX_TENSOR_RANK + 1):
1000
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
1001
+
1002
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
1003
+ BinConstraintD(my_dim, i, op_eq)])
1004
+ c1.append(c_tensor_i)
1005
+
1006
+ return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
1007
+
1008
+
1009
+ @register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined]
1010
+ def torch_linear_inference_rule(n: Node, symbols, constraints, counter):
1011
+ assert isinstance(n.args[0], Node)
1012
+ weight_dims, counter = gen_tensor_dims(2, counter)
1013
+ equality_constraint = BinConstraintT(symbols[n.args[1]], TensorType(weight_dims), op_eq)
1014
+ constraints, counter = linear_constraints(n, weight_dims[1], weight_dims[0], symbols, counter)
1015
+ return [equality_constraint] + constraints, counter
1016
+
1017
+
1018
+ def linear_constraints(n: Node, in_features, out_features, symbols, counter):
1019
+ linear_output, counter = gen_tvar(counter)
1020
+ symbols[n] = linear_output
1021
+ linear_input = symbols[n.args[0]]
1022
+
1023
+ input_dyn = BinConstraintT(linear_input, Dyn, op_eq)
1024
+ output_dyn = BinConstraintT(linear_output, Dyn, op_eq)
1025
+
1026
+ c1 = Conj([input_dyn, output_dyn])
1027
+
1028
+ c2 = []
1029
+ for i in range(1, MAX_TENSOR_RANK + 1):
1030
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
1031
+ new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
1032
+
1033
+ nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
1034
+
1035
+ c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq),
1036
+ BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] +
1037
+ add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) +
1038
+ nat_constraints)
1039
+ c2.append(c_tensor_i)
1040
+ return [Disj([c1, Disj(c2)])], counter
1041
+
1042
+ def add_layer_norm_constraints(input_dim, normalized_dim):
1043
+ """
1044
+ The constraints say that the type has te form: [*, 1024, 1024]
1045
+ while the normalized_dim have the form [1024, 1024]
1046
+ Args:
1047
+ input_dim: Input shape of layer norm
1048
+ normalized_dim: normalized_dim parameter of the module instance
1049
+
1050
+ """
1051
+
1052
+ # in this case we return false since there's a pattern mismatch
1053
+ if len(normalized_dim) > len(input_dim):
1054
+ return [F()]
1055
+
1056
+ else:
1057
+ constraints = []
1058
+ for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
1059
+ constraints.append(BinConstraintD(i, n, op_consistency))
1060
+ return constraints
1061
+
1062
+
1063
+ def add_linear_constraints(dims1, dims2, in_features, out_features):
1064
+ assert len(dims1) == len(dims2)
1065
+ constraints = []
1066
+ for i in range(len(dims1)):
1067
+ if i == len(dims1) - 1:
1068
+ constraints.append(BinConstraintD(dims1[i], in_features, op_consistency))
1069
+ constraints.append(BinConstraintD(dims2[i], out_features, op_eq))
1070
+ else:
1071
+ constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))
1072
+
1073
+ return constraints
1074
+
1075
+
1076
+ @register_inference_rule(torch.reshape)
1077
+ def reshape_inference_rule(n: Node, symbols, constraints, counter):
1078
+ assert isinstance(n.args[0], Node)
1079
+
1080
+ # generate the new variable
1081
+ my_reshape, counter = gen_tvar(counter)
1082
+ symbols[n] = my_reshape
1083
+
1084
+ src_var = symbols[n.args[0]]
1085
+ t2 = n.args[1]
1086
+ t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr]
1087
+ c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr]
1088
+ c2 = CanReshape(src_var, t2_type)
1089
+
1090
+ return [c1, c2], counter
1091
+
1092
+
1093
+ @register_inference_rule(BatchNorm2d)
1094
+ def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1095
+ assert isinstance(n.args[0], Node)
1096
+
1097
+ # generate the new variable
1098
+ batchnorm_output, counter = gen_tvar(counter)
1099
+ symbols[n] = batchnorm_output
1100
+ batchnorm_input = symbols[n.args[0]]
1101
+
1102
+ # dim vars
1103
+ d1, counter = gen_dvar(counter)
1104
+ d2, counter = gen_dvar(counter)
1105
+ d3, counter = gen_dvar(counter)
1106
+ d4, counter = gen_dvar(counter)
1107
+
1108
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1109
+
1110
+ c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching)
1111
+ c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq)
1112
+ return [c1, c2, *nat_constraints], counter
1113
+
1114
+
1115
+ @register_inference_rule(torch.nn.AdaptiveAvgPool2d)
1116
+ def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1117
+ assert isinstance(n.args[0], Node)
1118
+
1119
+ avg_pool, counter = gen_tvar(counter)
1120
+
1121
+ symbols[n] = avg_pool
1122
+ input_var = symbols[n.args[0]]
1123
+
1124
+ # dim vars
1125
+ d1, counter = gen_dvar(counter)
1126
+ d2, counter = gen_dvar(counter)
1127
+ d3, counter = gen_dvar(counter)
1128
+ d4, counter = gen_dvar(counter)
1129
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1130
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1131
+ c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq)
1132
+
1133
+ return [c1, c2, *nat_constraints], counter
1134
+
1135
+
1136
+ @register_inference_rule(Conv2d)
1137
+ def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1138
+ assert isinstance(n.args[0], Node)
1139
+
1140
+ my_conv, counter = gen_tvar(counter)
1141
+ symbols[n] = my_conv
1142
+ input_var = symbols[n.args[0]]
1143
+
1144
+ # dim vars
1145
+ [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
1146
+
1147
+ # c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
1148
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1149
+
1150
+ # c2 = DConsistency(module_instance.in_channels, d2)
1151
+ c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)
1152
+
1153
+ c3 = CalcConv(my_conv, input_var,
1154
+ module_instance.out_channels,
1155
+ module_instance.kernel_size,
1156
+ module_instance.padding,
1157
+ module_instance.stride,
1158
+ module_instance.dilation, [d1, d2, d3, d4])
1159
+
1160
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1161
+
1162
+ return [c1, c2, c3, *nat_constraints], counter
1163
+
1164
+
1165
+ @register_inference_rule(torch.nn.MaxPool2d)
1166
+ def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1167
+ assert isinstance(n.args[0], Node)
1168
+ maxpool, counter = gen_tvar(counter)
1169
+ symbols[n] = maxpool
1170
+ input_var = symbols[n.args[0]]
1171
+
1172
+ # dim vars
1173
+ [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
1174
+
1175
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1176
+
1177
+ c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding,
1178
+ module_instance.stride, module_instance.dilation, [d1, d2, d3, d4])
1179
+
1180
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1181
+
1182
+ return [c1, c2, *nat_constraints], counter
1183
+
1184
+
1185
+ class ConstraintGenerator:
1186
+ def __init__(self, traced, graph=None):
1187
+ self.traced = traced # traced or tracer.root
1188
+ self.traced_params = dict(self.traced.named_parameters())
1189
+ self.constraints = []
1190
+ self.symbol_dict = {}
1191
+ self.graph = traced.graph if hasattr(traced, 'graph') else graph
1192
+
1193
+
1194
+ def generate_constraints(self, counter=0):
1195
+ """
1196
+ Iterate through every node and generate constraints
1197
+ Effect: self.constraints will be populated with the final constraints
1198
+ """
1199
+ graph = self.graph
1200
+
1201
+ all_constraints = []
1202
+
1203
+ for n in graph.nodes:
1204
+ (constraints, counter) = self.generate_constraints_node(n, counter)
1205
+ all_constraints += constraints
1206
+
1207
+ return Conj(all_constraints), counter
1208
+
1209
+ def generate_constraints_node(self, n: Node, counter):
1210
+ """
1211
+ Generate constraints the given node:
1212
+ Currently supported operations:
1213
+ - Reshape
1214
+ - Add
1215
+ - conv2d
1216
+ """
1217
+
1218
+ if n.op == 'placeholder':
1219
+ x, counter = gen_tvar(counter)
1220
+ self.symbol_dict[n] = x
1221
+
1222
+ my_type = n.type
1223
+
1224
+ if n.type != Dyn and (not isinstance(n.type, TensorType)):
1225
+ if n.type == torch.nn.parameter.Parameter:
1226
+ # since we have a parameter, the shape must be static
1227
+ assert 'example_value' in n.meta
1228
+ my_type = TensorType(n.meta['example_value'].size())
1229
+ else:
1230
+ my_type = Dyn
1231
+
1232
+ c1 = BinConstraintT(my_type, x, op_precision)
1233
+ c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq)
1234
+ return [c1, c2], counter
1235
+
1236
+ elif n.op == 'call_function':
1237
+ if n.target in _INFERENCE_RULES:
1238
+ return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
1239
+ else:
1240
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
1241
+
1242
+ elif n.op == 'call_module':
1243
+
1244
+ module_instance = self.traced.get_submodule(n.target)
1245
+ if type(module_instance) in _INFERENCE_RULES:
1246
+ return _INFERENCE_RULES[type(module_instance)](n,
1247
+ module_instance,
1248
+ self.symbol_dict,
1249
+ self.constraints, counter)
1250
+ else:
1251
+ raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
1252
+
1253
+ elif n.op == 'call_method':
1254
+ if n.target in _INFERENCE_RULES:
1255
+ return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
1256
+ else:
1257
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
1258
+
1259
+ elif n.op == 'get_attr':
1260
+ t = self.traced_params.get(n.target, None)
1261
+
1262
+ if isinstance(t, torch.Tensor):
1263
+ if len(t.shape) > 0:
1264
+ res = list(t.shape)
1265
+ attr_type = TensorType(res)
1266
+ output, counter = gen_tvar(counter)
1267
+ self.symbol_dict[n] = output
1268
+ return [BinConstraintT(output, attr_type, op_eq)], counter
1269
+ else:
1270
+ # scalar?
1271
+ return [], counter
1272
+ else:
1273
+ return [], counter
1274
+
1275
+ elif n.op == 'output':
1276
+ return [], counter
1277
+
1278
+ else:
1279
+ raise NotImplementedError(f"Method {n.op} not yet implemented")
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py ADDED
@@ -0,0 +1,1040 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+ import copy
3
+ import itertools
4
+ from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
5
+ from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
6
+ Transpose
7
+ from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
8
+ from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
9
+ from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
10
+ from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
11
+ from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
12
+ from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
13
+ from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
14
+ from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
15
+ from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
16
+ from torch.fx.tensor_type import TensorType, Dyn
17
+ from typing import Callable, Dict, List
18
+
19
+ _TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
20
+
21
+
22
+ def register_transformation_rule(call_target):
23
+ def register(fn):
24
+ if call_target in _TRANSFORMATION_RULES:
25
+ raise RuntimeError(f'Transformation rule already registered for {call_target}!')
26
+ _TRANSFORMATION_RULES[call_target] = fn
27
+ return fn
28
+ return register
29
+
30
+
31
+ def valid_index(index, dims):
32
+ """
33
+ Given a list of dimensions, checks if an index is valid in the list
34
+ """
35
+ try:
36
+ dims[index]
37
+ return T()
38
+ except IndexError:
39
+ return F()
40
+
41
+
42
+ @register_transformation_rule(Transpose)
43
+ def transform_transpose(constraint, counter):
44
+ """
45
+ Similar to a sequence of two index-selects
46
+ """
47
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
48
+ is_valid_index1 = valid_index(constraint.index1, dims)
49
+ is_valid_index2 = valid_index(constraint.index2, dims)
50
+ new_dims = copy.deepcopy(dims)
51
+ nat_constraints = gen_nat_constraints(dims)
52
+
53
+ if is_valid_index1 == T() and is_valid_index2 == T():
54
+ new_dims[constraint.index1] = dims[constraint.index2]
55
+ new_dims[constraint.index2] = dims[constraint.index1]
56
+
57
+ transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
58
+ *nat_constraints,
59
+ is_valid_index1, is_valid_index2,
60
+ BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
61
+ return transformed_constraint, counter
62
+
63
+
64
+ @register_transformation_rule(IndexSelect)
65
+ def transform_index_select(constraint, counter):
66
+ """
67
+ The constraints consider the given tensor size, checks if the index is valid
68
+ and if so, generates a constraint for replacing the input dimension
69
+ with the required dimension
70
+ """
71
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
72
+ is_valid_index = valid_index(constraint.index, dims)
73
+ nat_constraints = gen_nat_constraints(dims)
74
+
75
+ # if the index is valid then replace the input dimension with the new dimension
76
+ # otherwise the dimension will not be replaced and the clause will contain False
77
+ if is_valid_index == T():
78
+ new_dims = copy.deepcopy(dims)
79
+ new_dims[constraint.index] = constraint.dim_replace
80
+
81
+ transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
82
+ *nat_constraints,
83
+ is_valid_index,
84
+ BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
85
+
86
+ # print(constraints)
87
+ return transformed_constraint, counter
88
+
89
+
90
+ @register_transformation_rule(GetItem)
91
+ def transform_get_item(constraint, counter):
92
+ """
93
+ generate an equality of the form:
94
+ t = [a1, ..., an]
95
+ then generate constraints that check if the given index is valid
96
+ given this particular tensor size.
97
+ If the index is valid, generate a constraint to get the item
98
+ Note that we already handled the Dyn input case in the previous
99
+ step.
100
+ Args:
101
+ constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
102
+ counter: variable tracking
103
+ Returns: simplified constraints for GetItem
104
+
105
+ """
106
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
107
+ nat_constraints = gen_nat_constraints(dims)
108
+
109
+
110
+ is_valid_index = valid_index(constraint.index, dims)
111
+
112
+ all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
113
+ *nat_constraints,
114
+ is_valid_index]
115
+
116
+ # if the index is valid, we generate a constraint for getting an item
117
+ # otherwise this clause will have been UNSAT due to the wrong index
118
+ if is_valid_index == T():
119
+ all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
120
+
121
+ return Conj(all_constraints), counter
122
+
123
+ def valid_index_tensor(index, dims):
124
+ """
125
+ if the slice instances exceed the length of the dimensions
126
+ then this is a type error so we return False
127
+ """
128
+ slice_count = 0
129
+ for s in index:
130
+ if isinstance(s, slice):
131
+ slice_count += 1
132
+ if slice_count > len(dims):
133
+ return F()
134
+ else:
135
+ return T()
136
+
137
+ @register_transformation_rule(GetItemTensor)
138
+ def transform_get_item_tensor(constraint, counter):
139
+ """
140
+ When the index is a tuple, then the output will be a tensor
141
+ TODO: we have to check if this is the case for all HF models
142
+
143
+ The cases we are covering here are a tuple with one of:
144
+ - slice with default argument
145
+ - None
146
+
147
+ None appends 1 to the input tensor dimensions
148
+ so each occurrence of 'None' increases the rank by 1
149
+
150
+ slice with default arguments does not change the rank
151
+ """
152
+ assert isinstance(constraint.index_tuple, tuple)
153
+
154
+
155
+ # generate a result tensor of the expected size
156
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
157
+ nat_constraints = gen_nat_constraints(dims)
158
+
159
+ # generate a place-holder list of the right rank
160
+ # where "slice" does not contribute to the rank and "None" does
161
+ none_c = constraint.index_tuple.count(None)
162
+ resulting_tensor_dims = (none_c + len(dims)) * [None]
163
+
164
+ dim_index = 0
165
+ for i in range(len(constraint.index_tuple)):
166
+
167
+ # append 1 to the right location of the resulting tensor
168
+ if constraint.index_tuple[i] is None:
169
+ resulting_tensor_dims[i] = 1
170
+
171
+ elif constraint.index_tuple[i] == slice(None, None, None):
172
+ pass
173
+
174
+ else:
175
+ raise NotImplementedError('Method not yet implemented')
176
+
177
+ # append the remaining dimensions to the right location
178
+ dim_index = 0
179
+ for i in range(len(resulting_tensor_dims)):
180
+ if resulting_tensor_dims[i] is None:
181
+ resulting_tensor_dims[i] = dims[dim_index]
182
+ dim_index += 1
183
+
184
+ # check if the index is valid
185
+ is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
186
+
187
+ # check if the resulting tensor is within bounds
188
+ if len(resulting_tensor_dims) > 4:
189
+ return F(), counter
190
+
191
+ else:
192
+ constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
193
+ BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
194
+ *nat_constraints,
195
+ is_valid_index]
196
+ return Conj(constraints), counter
197
+
198
+
199
+ @register_transformation_rule(BinConstraintT)
200
+ def generate_binconstraint_t(constraint, counter):
201
+ """
202
+ Transform binary constraints for tensors
203
+ """
204
+
205
+ # precision constraints
206
+ if constraint.op == op_precision:
207
+ if constraint.lhs == Dyn:
208
+ return T(), counter
209
+ elif isinstance(constraint.lhs, TensorType):
210
+ is_fully_static = all(d != Dyn for d in constraint.lhs.__args__)
211
+ if is_fully_static:
212
+ return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
213
+ else:
214
+ new_dims = []
215
+
216
+ for _ in range(len(constraint.lhs.__args__)):
217
+ dim, counter = gen_dvar(counter)
218
+ new_dims.append(dim)
219
+
220
+ new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
221
+ new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
222
+ [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
223
+ [BinConstraintD(1, new_dim, op_leq) for
224
+ new_dim in new_dims]
225
+ return Conj(new_dim_constraints), counter
226
+
227
+ # matching
228
+ elif constraint.op == op_matching:
229
+ assert isinstance(constraint.rhs, TensorType)
230
+ d1 = constraint.rhs.__args__[0]
231
+ d2 = constraint.rhs.__args__[1]
232
+ d3 = constraint.rhs.__args__[2]
233
+ d4 = constraint.rhs.__args__[3]
234
+
235
+ conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
236
+ BinConstraintD(d1, Dyn, op_eq),
237
+ BinConstraintD(d2, Dyn, op_eq),
238
+ BinConstraintD(d3, Dyn, op_eq),
239
+ BinConstraintD(d4, Dyn, op_eq)]
240
+ return Disj([Conj(conj),
241
+ BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
242
+
243
+ elif constraint.op == op_consistency:
244
+ c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
245
+ [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
246
+
247
+ return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
248
+
249
+ elif constraint.op == op_leq:
250
+ assert isinstance(constraint.rhs, int)
251
+ disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
252
+ for i in range(1, constraint.rhs + 1):
253
+ dims = []
254
+ for j in range(1, i + 1):
255
+ dim_var, counter = gen_dvar(counter)
256
+ dims.append(dim_var)
257
+ disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
258
+ return Disj(disj), counter
259
+ else:
260
+ return constraint, counter
261
+
262
+
263
+ @register_transformation_rule(BinConstraintD)
264
+ def generate_binconstraint_d(constraint, counter):
265
+ """
266
+ Transform binary constraints for dimensions
267
+ """
268
+ if constraint.op == op_precision:
269
+ if isinstance(constraint.lhs, int):
270
+ return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
271
+ elif constraint.lhs == Dyn:
272
+ return T(), counter
273
+
274
+ elif constraint.op == op_consistency:
275
+ return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
276
+ BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
277
+
278
+ else:
279
+ return constraint, counter
280
+
281
+
282
+ @register_transformation_rule(Conj)
283
+ def generate_conj(constraint, counter):
284
+ """
285
+ Transform conjunctions
286
+ """
287
+ new = []
288
+ for c in constraint.conjucts:
289
+ new_c, counter = transform_constraint(c, counter)
290
+ new.append(new_c)
291
+ return Conj(new), counter
292
+
293
+
294
+ @register_transformation_rule(Disj)
295
+ def generate_disj(constraint, counter):
296
+ """
297
+ Transform disjunctions
298
+ """
299
+ new = []
300
+ for c in constraint.disjuncts:
301
+ new_c, counter = transform_constraint(c, counter)
302
+ new.append(new_c)
303
+ return Disj(new), counter
304
+
305
+
306
+ @register_transformation_rule(TGreatestUpperBound)
307
+ def generate_gub(constraint, counter):
308
+ """
309
+ Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
310
+ on dimensions
311
+ """
312
+ c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
313
+ BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
314
+
315
+ [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
316
+
317
+ return Disj([c1, c2, c3, c4, c5]), counter
318
+
319
+
320
+ @register_transformation_rule(DGreatestUpperBound)
321
+ def generate_d_gub(constraint, counter):
322
+ """
323
+ Transform greatest upper bound for dimensions into equality constraints
324
+ """
325
+ c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
326
+ c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
327
+ c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
328
+ return Disj([c1, c2, c3]), counter
329
+
330
+
331
+ @register_transformation_rule(CalcConv)
332
+ def generate_calc_conv(constraint, counter):
333
+ d, counter = gen_tensor_dims(4, counter)
334
+ conv_result = TensorType([d[0], d[1], d[2], d[3]])
335
+
336
+ # the convolution result is a tensor of size 4
337
+ c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
338
+
339
+ # the second dimension of the output is equal to the output channels
340
+ c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
341
+
342
+ # the input corresponds to the output in the first dimension of the convolution
343
+ c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
344
+
345
+ c4, c5 = calc_last_two_dims(constraint, d)
346
+
347
+ leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
348
+ BinConstraintD(0, d[1], op_leq),
349
+ BinConstraintD(0, d[2], op_leq),
350
+ BinConstraintD(0, d[3], op_leq)])
351
+
352
+ return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
353
+
354
+
355
+ @register_transformation_rule(CalcMaxPool)
356
+ def generate_calc_maxpool(constraint, counter):
357
+ """
358
+ Transform maxpool constraints
359
+ """
360
+ d, counter = gen_tensor_dims(4, counter)
361
+ maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
362
+
363
+ # the maxpool result is a tensor of size 4
364
+ c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
365
+
366
+ # the input corresponds to the output in the first and second dimension of maxpool
367
+ c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
368
+ c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
369
+ c4, c5 = calc_last_two_dims(constraint, d)
370
+
371
+ leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
372
+ BinConstraintD(0, d[1], op_leq),
373
+ BinConstraintD(0, d[2], op_leq),
374
+ BinConstraintD(0, d[3], op_leq)])
375
+
376
+ return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
377
+
378
+
379
+ @register_transformation_rule(CalcProduct)
380
+ def generate_calc_product(constraint, counter):
381
+ """
382
+ Transform flatten constraints
383
+ """
384
+ start = constraint.start
385
+ end = constraint.end
386
+ dims = constraint.dims_to_flatten
387
+ flattened = constraint.flattened
388
+ n = len(constraint.dims_to_flatten)
389
+
390
+ # this will be evaluated right here
391
+ boundary_check = (0 <= start and start < end and end <= n)
392
+
393
+ c_boundary = T() if boundary_check else F()
394
+
395
+ lhs = dims[0:start]
396
+ rhs = dims[end:]
397
+ mid = dims[start:end]
398
+
399
+ all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
400
+
401
+ all_constraints = []
402
+
403
+ for p in all_possibilities:
404
+ p = list(p)
405
+ # this tells us there is a dynamic variable
406
+ contains_dyn = not all(constraint.op == op_neq for constraint in p)
407
+ if contains_dyn:
408
+ mid_var = [Dyn]
409
+ total_constraints = lhs + mid_var + rhs
410
+ if len(total_constraints) > 4:
411
+ all_constraints.append(F())
412
+ else:
413
+ all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
414
+ else:
415
+ new_var, counter = gen_dvar(counter)
416
+ mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
417
+ mid_var = [new_var]
418
+ total_constraints = lhs + mid_var + rhs
419
+ if len(total_constraints) > 4:
420
+ all_constraints.append(F())
421
+ else:
422
+ all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
423
+
424
+ return Conj([Disj(all_constraints), c_boundary]), counter
425
+
426
+
427
+ @register_transformation_rule(CanReshape)
428
+ def generate_reshape(constraint, counter):
429
+ """
430
+ Transform reshape constraints
431
+ """
432
+ d, counter = gen_tensor_dims(4, counter)
433
+
434
+ d1 = d[0]
435
+ d2 = d[1]
436
+ d3 = d[2]
437
+ d4 = d[3]
438
+
439
+ target = constraint.target.__args__
440
+
441
+ is_fully_static = all(d != Dyn for d in target)
442
+
443
+ # dynamic tensor
444
+ c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
445
+ c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
446
+ c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
447
+ c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
448
+ c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
449
+
450
+ d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
451
+ d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
452
+
453
+ d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
454
+ d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
455
+
456
+ d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
457
+ d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
458
+
459
+ d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
460
+ d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
461
+
462
+ nat_d1 = BinConstraintD(0, d1, op_leq)
463
+ nat_d2 = BinConstraintD(0, d2, op_leq)
464
+ nat_d3 = BinConstraintD(0, d3, op_leq)
465
+ nat_d4 = BinConstraintD(0, d4, op_leq)
466
+
467
+ if is_fully_static:
468
+ # size 1 tensor
469
+ c3_tensor1 = Disj([d1_eq_dyn,
470
+ (Conj([d1_neq_dyn,
471
+ BinConstraintD(d1, Prod(target), op_eq)]))])
472
+ all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
473
+
474
+ # size 2 tensor
475
+ all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
476
+
477
+ # size 3 tensor
478
+ all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
479
+
480
+ # size 4 tensor
481
+ all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
482
+
483
+ return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
484
+ nat_d1, nat_d2, nat_d3, nat_d4]), counter
485
+
486
+ # then there must be exactly one occurrence of dyn
487
+ else:
488
+ new_target = []
489
+
490
+ for n in target:
491
+ if n != Dyn:
492
+ new_target.append(n)
493
+
494
+ # tensor 1
495
+ c3_tensor1 = Disj([d1_eq_dyn,
496
+ (Conj([d1_neq_dyn,
497
+ is_dim_div_by_target(new_target, d1)]))])
498
+ all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
499
+
500
+ # tensor 2
501
+ c21 = Disj([d1_eq_dyn, d2_eq_dyn])
502
+ c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
503
+ all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
504
+
505
+ # tensor 3
506
+ c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
507
+ c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
508
+ all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
509
+
510
+ # tensor 4
511
+ c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
512
+ c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
513
+ all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
514
+
515
+ return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
516
+ nat_d1, nat_d2, nat_d3, nat_d4]), counter
517
+
518
+
519
+ @register_transformation_rule(ApplyBroadcasting)
520
+ def generate_broadcasting(constraint, counter):
521
+ """
522
+ Transform broadcasting constraints
523
+ """
524
+ e11, e12 = constraint.res1, constraint.res2
525
+ e1, e2 = constraint.input1, constraint.input2
526
+
527
+ e1_dyn = BinConstraintT(e1, Dyn, op_eq)
528
+ e2_dyn = BinConstraintT(e2, Dyn, op_eq)
529
+
530
+ # Introduce dimensions
531
+ e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
532
+ e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
533
+
534
+ # dyn possibility
535
+ e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
536
+ e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
537
+
538
+ # tensor possibility
539
+ # generate dimensions to create tensors of size 1
540
+ final_tensor_1_constraint, _, _, nat_dims_1, counter = \
541
+ gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
542
+
543
+ # generate dimensions to create tensors of size 2
544
+ final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
545
+ final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
546
+ gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
547
+
548
+ # generate dimensions to create tensors of size 3
549
+ final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
550
+ final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
551
+ gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
552
+
553
+ # generate dimensions to create tensors of size 4
554
+ final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
555
+ final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
556
+ gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
557
+
558
+ final_result = Disj([
559
+ e1_dyn_constraint,
560
+ e2_dyn_constraint,
561
+ final_tensor_1_constraint,
562
+ final_tensor_2_constraint_no_padding,
563
+ final_tensor_2_constraint_padding_arg1,
564
+ final_tensor_2_constraint_padding_arg2,
565
+ final_tensor_3_constraint_no_padding,
566
+ final_tensor_3_constraint_padding_arg1,
567
+ final_tensor_3_constraint_padding_arg2,
568
+ final_tensor_4_constraint_no_padding,
569
+ final_tensor_4_constraint_padding_arg1,
570
+ final_tensor_4_constraint_padding_arg2
571
+ ])
572
+
573
+ return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
574
+
575
+
576
+ def transform_constraint(constraint: Constraint, counter: int):
577
+ """
578
+ Transforms a constraint into a simpler constraint.
579
+ Ex: precision and consistency are transformed to equality
580
+ Args:
581
+ constraint: constraint to be transformed
582
+ counter: for variable tracking
583
+
584
+ Returns: Constraint
585
+
586
+ """
587
+ if type(constraint) in _TRANSFORMATION_RULES:
588
+ return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
589
+
590
+ else:
591
+ return constraint, counter
592
+
593
+
594
+
595
+
596
+ def calc_last_two_dims(constraint, d: List[DVar]):
597
+ """
598
+ Generates constraints for the last two dimensions of a convolution or a maxpool output
599
+ Args:
600
+ constraint: CalcConv or CalcMaxPool
601
+ d: The list of output dimensions
602
+
603
+ Returns: Constraints for calculating the last two dimensions of the output
604
+
605
+ """
606
+
607
+ assert isinstance(constraint, (CalcConv, CalcMaxPool))
608
+
609
+ b3 = constraint.matching_constraint[2]
610
+ b4 = constraint.matching_constraint[3]
611
+
612
+ b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
613
+ b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
614
+
615
+ d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
616
+ d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
617
+
618
+ # transform parameters into tuples incase they are not already
619
+ padding = (constraint.padding, constraint.padding) \
620
+ if isinstance(constraint.padding, int) else constraint.padding
621
+ kernel = (constraint.kernel, constraint.kernel) \
622
+ if isinstance(constraint.kernel, int) else constraint.kernel
623
+ stride = (constraint.stride, constraint.stride) \
624
+ if isinstance(constraint.stride, int) else constraint.stride
625
+ dilation = (constraint.dilation, constraint.dilation) \
626
+ if isinstance(constraint.dilation, int) else constraint.dilation
627
+
628
+ f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
629
+ f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
630
+ f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
631
+ f4 = BinConstraintD(f3, 1, op_add)
632
+
633
+ c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
634
+
635
+ f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
636
+ f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
637
+ f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
638
+ f44 = BinConstraintD(f33, 1, op_add)
639
+
640
+ c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
641
+
642
+ return c4, c5
643
+
644
+
645
+ def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
646
+ """
647
+ Generate all possibilities of being equal or not equal to dyn for my_list
648
+ Args:
649
+ my_list: List of tensor dimensions
650
+
651
+ Returns: A list of a list of constraints. Each list of constraints corresponds to
652
+ one possibility about the values of the dimension variables
653
+ """
654
+ # generate all possibilities of being equal or not equal to dyn for my_list
655
+ eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
656
+ neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
657
+ d_possibilities = []
658
+
659
+ for i in zip(eq_possibilities, neq_possibilities):
660
+ d_possibilities.append(list(i))
661
+ all_possibilities = list(itertools.product(*d_possibilities))
662
+ return all_possibilities
663
+
664
+
665
+ def is_target_div_by_dim(target: List[int], dim: List[DVar]):
666
+ """
667
+ Generate constraints to check if the target dimensions are divisible by the input dimensions
668
+ Args:
669
+ target: Target dimensions
670
+ dim: Input dimensions
671
+
672
+ Returns: Constraints to check divisibility
673
+
674
+ """
675
+ return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
676
+
677
+
678
+ def is_dim_div_by_target(target: List[int], dim: List[DVar]):
679
+ """
680
+ Generate constraints to check if the input dimensions is divisible by the target dimensions
681
+ Args:
682
+ target: Target dimensions
683
+ dim: Input dimensions
684
+
685
+ Returns: Constraints to check divisibility
686
+
687
+ """
688
+ return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
689
+
690
+
691
+ def gen_all_reshape_possibilities(list_of_dims, target):
692
+ """
693
+ Consider all possibilities what the input dimensions could be (number or dynamic)
694
+ Then generate the appropriate constraints using multiplication or mod depending on the possibility
695
+ The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
696
+ for the input. Target is fixed because at most one dimension could be dyn.
697
+ We have different cases for this.
698
+
699
+ Args:
700
+ list_of_dims: The input list of dimensions
701
+ target: The tensor we want to reshape to
702
+
703
+ Returns: A disjunction of transformed reshape constraints
704
+
705
+ """
706
+ all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
707
+
708
+ all_constraints = []
709
+
710
+ for p in all_possibilities:
711
+ to_multiply = []
712
+
713
+ p = list(p)
714
+
715
+ for constraint in p:
716
+ assert isinstance(constraint, BinConstraintD)
717
+ if constraint.op == op_neq:
718
+ to_multiply.append(constraint.lhs)
719
+
720
+ if not to_multiply:
721
+ all_constraints.append(Conj(p))
722
+
723
+ elif len(to_multiply) < len(list_of_dims):
724
+ all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
725
+ else:
726
+ all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
727
+ Prod(target), op_eq)]))
728
+
729
+ return Disj(all_constraints)
730
+
731
+
732
+ def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
733
+ """
734
+ Apply broadcasting to the 'index' dimension of tensor_input1.
735
+ Args:
736
+ tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
737
+ tensor_input2: represents the second input
738
+ res1: broadcasted result 1
739
+ res2: broadcasted result 2
740
+ index: the index to broadcast
741
+ padding: If padding was used, then tensor_input1[index] does not exist
742
+
743
+ Returns:
744
+
745
+ """
746
+ if tensor_input1[index] is None:
747
+ assert padding
748
+
749
+
750
+ if not padding:
751
+ # then the inputs are the same length so they all have dimensions at "index"
752
+ return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
753
+ BinConstraintD(res1[index], res2[index], op_eq),
754
+ BinConstraintD(res2[index], tensor_input2[index], op_eq)])
755
+
756
+ else:
757
+ # we don't set the input dimension to 1, since it doesn't exist.
758
+ return Conj([BinConstraintD(res1[index], res2[index], op_eq),
759
+ BinConstraintD(res2[index], tensor_input2[index], op_eq)])
760
+
761
+
762
+ def apply_padding(e1_var: TVar,
763
+ e11: BinConstraintT,
764
+ e2: BinConstraintT,
765
+ e12: BinConstraintT,
766
+ d2: List[DVar],
767
+ d11: List[DVar],
768
+ d12: List[DVar],
769
+ counter: int):
770
+ """
771
+ We are considering the possibility where one input has less dimensions than
772
+ another input, so we apply padding to the broadcasted results
773
+
774
+ Args:
775
+ e1_var: Variable representing the first input where padding will be
776
+ e11: constraint of the form e11 = Tensortype[d1, ..., dn]
777
+ e2: constraint of the form e2 = Tensortype[d1, ..., dn]
778
+ e12: constraint of the form e11 = Tensortype[d1, ..., dn]
779
+ d2: Tensor variables for the second input
780
+ d11: Tensor variables for the broadcasted first input
781
+ d12: Tensor variables for the broadcasted second input
782
+ counter: variable tracking
783
+
784
+ Returns: A new constraint whose goal is to apply padding to the broadcasted result
785
+
786
+ """
787
+
788
+ res = []
789
+
790
+ # pad the shorter input with None so we can pass it to the broadcasting helper function
791
+ for i in range(1, len(d2)):
792
+
793
+ d1, counter = gen_tensor_dims(i, counter)
794
+
795
+ nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
796
+
797
+ e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
798
+
799
+ simulate_padding = [None] * (len(d2) - i)
800
+
801
+ assert len(simulate_padding + d1) == len(d2)
802
+
803
+ broadcast_padding = []
804
+
805
+ # for every padding size, we also consider broadcasting
806
+ for j in range(len(d2) - i):
807
+ broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
808
+
809
+ # we consider the possibilities for broadcasting for every dimension. Since we already
810
+ # padded d1, we do not consider it while broadcasting
811
+ all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
812
+ d2[(len(d2) - i):],
813
+ d11[(len(d2) - i):],
814
+ d12[(len(d2) - i):])
815
+ # combine all constraints into a conjunction
816
+ c = Conj([e1, e11, e2, e12,
817
+ *broadcast_padding,
818
+ all_broadcasting_possibilities,
819
+ *nat_constraints
820
+ ])
821
+ res.append(c)
822
+
823
+ return Disj(res), counter
824
+
825
+
826
+ def no_broadcast_dim_with_index(d1: List[DVar],
827
+ d2: List[DVar],
828
+ d3: List[DVar],
829
+ d4: List[DVar],
830
+ i: int):
831
+ """
832
+ Args:
833
+ d1: input 1
834
+ d2: input 2
835
+ d3: simulated broadcasting for input 1
836
+ d4: simulated broadcasting for input 2
837
+ i: the rank of the resulting tensor addition
838
+
839
+ Returns: Constraints for when no broadcasting occurs
840
+ """
841
+ return Conj([
842
+ Disj([
843
+ Conj([BinConstraintD(d1[i], 1, op_eq),
844
+ BinConstraintD(d2[i], 1, op_eq)]),
845
+
846
+ Conj([BinConstraintD(d1[i], 1, op_neq),
847
+ BinConstraintD(d2[i], 1, op_neq)])]),
848
+
849
+ BinConstraintD(d1[i], d3[i], op_eq),
850
+ BinConstraintD(d2[i], d4[i], op_eq)])
851
+
852
+
853
+
854
+ def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
855
+ """
856
+ Generate lists of DVar to represent tensor dimensions
857
+ Args:
858
+ num_tensors: the required number of tensors
859
+ dim_size: the number of dimensions for each tensor
860
+ counter: variable tracking
861
+
862
+ Returns: A list of a list of tensor dimensions
863
+
864
+ """
865
+ res = []
866
+
867
+ for _ in range(num_tensors):
868
+ dims, counter = gen_tensor_dims(dim_size, counter)
869
+ res.append(dims)
870
+
871
+ return res, counter
872
+
873
+
874
+ def create_equality_constraints_for_broadcasting(e1: TVar,
875
+ e2: TVar,
876
+ e11: TVar,
877
+ e12: TVar,
878
+ d1: List[DVar],
879
+ d2: List[DVar],
880
+ d11: List[DVar],
881
+ d12: List[DVar]):
882
+ """
883
+ Create equality constraints for when no broadcasting occurs
884
+ Args:
885
+ e1: Input 1
886
+ e2: Input 2
887
+ e11: Broadcasted input 1
888
+ e12: Broadcasted input 2
889
+ d1: Variables that store dimensions for e1
890
+ d2: Variables that store dimensions for e2
891
+ d11: Variables that store dimensions for e11
892
+ d12: Variables that store dimensions for e22
893
+
894
+ Returns: Four equality constraints
895
+
896
+ """
897
+
898
+ e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
899
+ e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
900
+ e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
901
+ e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
902
+ return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
903
+
904
+
905
+ def gen_consistency_constraints(constraint: Constraint, counter: int):
906
+ """
907
+ Args:
908
+ constraint: Consistency constraint on tensors
909
+ counter: for variable tracking
910
+
911
+ Returns: Equality and consistency constraints on dimensions
912
+
913
+ """
914
+
915
+ all_constraints = []
916
+
917
+ for i in range(1, MAX_TENSOR_RANK + 1):
918
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
919
+ new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
920
+
921
+ nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
922
+
923
+ c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
924
+ BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
925
+ [BinConstraintD(d1, d2, op_consistency) for
926
+ d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
927
+
928
+ all_constraints.append(c_tensor_i)
929
+
930
+ return all_constraints, counter
931
+
932
+
933
+ def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
934
+ """
935
+ Args:
936
+ constraint: Greatest upper bound on tensors
937
+ counter: variable tracking
938
+
939
+ Returns: A set of equality constraints and DGreatestUpperBound constraints
940
+
941
+ """
942
+
943
+ all_constraints = []
944
+
945
+ for i in range(1, MAX_TENSOR_RANK + 1):
946
+ c = []
947
+ dims1, counter = gen_tensor_dims(i, counter)
948
+ c1tensor = TensorType(dims1)
949
+
950
+ dims2, counter = gen_tensor_dims(i, counter)
951
+ c2tensor = TensorType(dims2)
952
+
953
+ dims3, counter = gen_tensor_dims(i, counter)
954
+ c3tensor = TensorType(dims3)
955
+
956
+ c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
957
+ BinConstraintT(constraint.rhs2, c2tensor, op_eq),
958
+ BinConstraintT(constraint.res, c3tensor, op_eq)] + \
959
+ gen_nat_constraints(dims1 + dims2 + dims3)
960
+
961
+ assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
962
+ for i in range(len(c3tensor.__args__)):
963
+ c.append(DGreatestUpperBound(c3tensor.__args__[i],
964
+ c1tensor.__args__[i],
965
+ c2tensor.__args__[i]))
966
+
967
+ all_constraints.append(Conj(c))
968
+ return all_constraints, counter
969
+
970
+
971
+ def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
972
+ """
973
+ Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
974
+ We look at all combinations for all dimensions in d1 and d2
975
+ Args:
976
+ d1: input1 dimensions
977
+ d2: input2 dimensions
978
+ d11: broadcasted input1 dimensions
979
+ d12: broadcasted input2 dimensions
980
+
981
+ Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
982
+
983
+ """
984
+
985
+ size = len(d1)
986
+
987
+ res2 = []
988
+
989
+ for i in range(size):
990
+ t1 = broadcast_dim(d1, d2, d11, d12, i)
991
+ t2 = broadcast_dim(d2, d1, d12, d11, i)
992
+ t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
993
+
994
+ res2.append(Disj([t1, t2, t3]))
995
+
996
+ return Conj(res2)
997
+
998
+
999
+ def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
1000
+ """
1001
+ Simulates broadcasting on e1 and e2 and returns the results
1002
+ respectively in e11 and e12. Because of gradual types,
1003
+ e1 and e2 may not be equal. Similarly, e11 and e12 may not
1004
+ be equal. e11 and e12 should be guaranteed to be consistent
1005
+ as they represent the shapes of the tensors to be added after
1006
+ broadcasting.
1007
+ Args:
1008
+ e1: TVar representing the type of input 1
1009
+ e2: TVar representing the type of input 2
1010
+ e11: TVar representing the representing broadcasted input 1
1011
+ e12: TVar representing the representing broadcasted input 2
1012
+ i: The rank of the resulting type of addition
1013
+ counter: for variable tracking
1014
+
1015
+ Returns: Simplified broadcasting constraints
1016
+
1017
+ """
1018
+ dims, counter = gen_lists_of_dims(4, i, counter)
1019
+ [d1, d2, d3, d4] = dims
1020
+ nat_dims_i = gen_nat_constraints(list(itertools.chain.from_iterable(dims)))
1021
+
1022
+ initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
1023
+ d1, d2, d3, d4)
1024
+
1025
+ [e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
1026
+
1027
+ # without padding, broadcast all possibilities for tensors of size i
1028
+ final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
1029
+ generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
1030
+
1031
+ # with padding, broadcast all possibilities for tensors of size i
1032
+ final_tensor_constraint_padding_arg1, counter = \
1033
+ apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
1034
+
1035
+ final_tensor_constraint_padding_arg2, counter = \
1036
+ apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
1037
+
1038
+ return final_tensor_constraint_no_padding, \
1039
+ final_tensor_constraint_padding_arg1, \
1040
+ final_tensor_constraint_padding_arg2, nat_dims_i, counter
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ op_add = '+'
2
+ op_sub = '-'
3
+ op_mul = '*'
4
+ op_div = '/'
5
+ op_eq = '='
6
+ op_neq = '!='
7
+ op_imp = '=>'
8
+ op_matching = '⊳'
9
+ op_consistency = '~'
10
+ op_precision = '⊑'
11
+ op_leq = '≤'
12
+ op_lt = '<'
13
+ op_gt = '>'
14
+ op_mod = '%'
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
2
+ from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
3
+ from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
4
+ from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
5
+ from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
6
+ from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
7
+ from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
8
+ from torch.fx.tensor_type import TensorType, Dyn
9
+
10
+ try:
11
+ import z3 # type: ignore[import]
12
+ from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
13
+ HAS_Z3 = True
14
+
15
+ def transform_to_z3(constraint, counter, dimension_dict):
16
+ if isinstance(constraint, Conj):
17
+ conjuncts = []
18
+ for c in constraint.conjucts:
19
+ new_c, counter = transform_to_z3(c, counter, dimension_dict)
20
+ conjuncts.append(new_c)
21
+ return z3.And(conjuncts), counter
22
+
23
+ elif isinstance(constraint, Disj):
24
+ disjuncts = []
25
+ for c in constraint.disjuncts:
26
+ new_c, counter = transform_to_z3(c, counter, dimension_dict)
27
+ disjuncts.append(new_c)
28
+ return z3.Or(disjuncts), counter
29
+
30
+ elif isinstance(constraint, T):
31
+ return True, counter
32
+
33
+ elif isinstance(constraint, F):
34
+ return False, counter
35
+
36
+ elif isinstance(constraint, BinConstraintT):
37
+ if constraint.op == op_eq:
38
+ lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
39
+ rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
40
+ return (lhs == rhs), counter
41
+
42
+ else:
43
+ raise NotImplementedError('Method not yet implemented')
44
+
45
+ elif isinstance(constraint, BinConstraintD):
46
+ if constraint.op == op_eq:
47
+
48
+ if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
49
+ transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
50
+ transformed_lhs = z3.Bool(constraint.lhs.c)
51
+ return transformed_lhs == transformed_rhs, counter
52
+
53
+ elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
54
+ # with dimension transformations we consider the encoding
55
+ lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
56
+ rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
57
+ return lhs == rhs, counter
58
+
59
+ else:
60
+ # then we have an algebraic expression which means that we disregard the
61
+ # first element of the encoding
62
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
63
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
64
+ return lhs == rhs, counter
65
+
66
+ # The assumption here is that the LHS and RHS must be dimensions
67
+ elif constraint.op == op_neq:
68
+ assert is_dim(constraint.lhs)
69
+ assert is_dim(constraint.rhs)
70
+ lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
71
+ rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
72
+ if constraint.rhs == Dyn or constraint.lhs == Dyn:
73
+ if constraint.rhs == Dyn:
74
+ return lhs.arg(0) == 1, counter
75
+ elif constraint.lhs == Dyn:
76
+ return rhs.arg(0) == 1, counter
77
+
78
+ # if one of the instances is a number
79
+ elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
80
+ if isinstance(constraint.lhs, int):
81
+ return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
82
+
83
+ elif isinstance(constraint.rhs, int):
84
+ return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
85
+
86
+ else:
87
+ return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
88
+ z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
89
+ z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
90
+
91
+
92
+ elif constraint.op == op_leq:
93
+ # if the dimensions are not dyn, this will come into effect
94
+ # there would have been another constraint specifying if a given dimension
95
+ # is dyn or not
96
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
97
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
98
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
99
+ return lhs <= rhs, counter
100
+
101
+ elif constraint.op == op_gt:
102
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
103
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
104
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
105
+ return lhs > rhs, counter
106
+
107
+ elif constraint.op == op_lt:
108
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
109
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
110
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
111
+ return lhs < rhs, counter
112
+
113
+ else:
114
+ raise NotImplementedError('operation not yet implemented')
115
+
116
+ else:
117
+ raise NotImplementedError('Operation not yet implemented')
118
+
119
+
120
+ def transform_var(tensor, counter, dimension_dict):
121
+ """
122
+ Transforms tensor variables to a format understood by z3
123
+ Args:
124
+ tensor: Tensor variable or a tensor type potentially with variable dimensions
125
+ Returns: Transformed variable to a z3 format
126
+
127
+ """
128
+ if isinstance(tensor, TensorType):
129
+ res = []
130
+ for t in tensor.__args__:
131
+ transformed, counter = transform_dimension(t, counter, dimension_dict)
132
+ res.append(transformed)
133
+
134
+ assert len(res) <= 4
135
+ if len(tensor.__args__) == 1:
136
+ return tensor_type.tensor1(res[0]), counter
137
+ elif len(tensor.__args__) == 2:
138
+ return tensor_type.tensor2(res[0], res[1]), counter
139
+ elif len(tensor.__args__) == 3:
140
+ return tensor_type.tensor3(res[0], res[1], res[2]), counter
141
+ elif len(tensor.__args__) == 4:
142
+ return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
143
+
144
+ elif tensor == Dyn:
145
+ return z3_dyn, counter
146
+
147
+ elif isinstance(tensor, TVar):
148
+ return z3.Const(tensor.tvar, tensor_type), counter
149
+
150
+ def transform_dimension(dimension, counter, dimension_dict):
151
+ """
152
+ Takes a dimension variable or a number and transforms it to a tuple
153
+ according to our scheme
154
+ Args:
155
+ dimension: The dimension to be transformed
156
+ counter: variable tracking
157
+
158
+ Returns: tuple and the current counter
159
+
160
+ """
161
+ if dimension == Dyn:
162
+ counter += 1
163
+ return D(0, z3.Int(counter)), counter
164
+ elif isinstance(dimension, int):
165
+ return D(1, dimension), counter
166
+ elif isinstance(dimension, DVar):
167
+ if dimension.c in dimension_dict:
168
+ return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
169
+ else:
170
+ counter += 1
171
+ dimension_dict[dimension.c] = counter
172
+ return D(z3.Int(counter), z3.Int(dimension.c)), counter
173
+
174
+
175
+ def transform_algebraic_expression(expr, counter, dimension_dict):
176
+ """
177
+ Transforms an algebraic expression to z3 format
178
+ Args:
179
+ expr: An expression is either a dimension variable or an algebraic-expression
180
+
181
+
182
+ Returns: the transformed expression
183
+
184
+ """
185
+ assert is_algebraic_expression(expr) or is_dim(expr)
186
+
187
+ if is_dim(expr):
188
+ transformed, counter = transform_dimension(expr, counter, dimension_dict)
189
+ return transformed.arg(1), counter
190
+
191
+ elif isinstance(expr, Prod):
192
+
193
+ dims = []
194
+ for dim in expr.products:
195
+ assert is_dim(dim)
196
+ d, counter = transform_dimension(dim, counter, dimension_dict)
197
+ dims.append(d.arg(1))
198
+ return z3.Product(dims), counter
199
+
200
+ elif is_algebraic_expression(expr):
201
+
202
+ lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
203
+ rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
204
+
205
+ if expr.op == op_sub:
206
+ c = lhs - rhs
207
+
208
+ elif expr.op == op_add:
209
+ c = lhs + rhs
210
+
211
+ elif expr.op == op_div:
212
+ c = lhs / rhs
213
+
214
+ elif expr.op == op_mul:
215
+ c = lhs * rhs
216
+
217
+ elif expr.op == op_mod:
218
+ c = lhs % rhs
219
+
220
+ else:
221
+ raise NotImplementedError('operation not yet implemented')
222
+
223
+ return c, counter
224
+
225
+ else:
226
+ raise RuntimeError
227
+
228
+
229
+ def transform_all_constraints(traced, counter=0):
230
+ """
231
+ Given a trace, generates constraints and transforms them to z3 format
232
+
233
+ """
234
+ dimension_dict = {} # type: ignore[var-annotated]
235
+
236
+ generator = ConstraintGenerator(traced)
237
+ new_constraints, counter = generator.generate_constraints(counter)
238
+
239
+ # print(new_constraints.conjucts[0])
240
+ # print(*new_constraints.conjucts, sep='\n')
241
+
242
+ # transform precision, matching, consistency till obtaining a fixed point
243
+ new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
244
+ # print(new_constraints)
245
+ # print(new_constraints.conjucts)
246
+ # new_constraints.conjucts = new_constraints.conjucts[:-1]
247
+ # print(*new_constraints.conjucts, sep='\n')
248
+
249
+ transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
250
+ # print(transformed)
251
+ return transformed
252
+
253
+ def iterate_till_fixed_point(constraints, counter):
254
+ """
255
+ Transform constraints till reaching a fixed point
256
+ """
257
+ old_c = None
258
+ while old_c != constraints:
259
+ old_c = constraints
260
+ constraints, counter = transform_constraint(constraints, counter)
261
+ return constraints, counter
262
+
263
+ def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
264
+ """
265
+ Takes a node and a graph and generates two sets of constraints.
266
+ One set constraints the node's constraints and another set
267
+ constraints the negation of the node's constraints
268
+ Args:
269
+ tracer_root: the root for getting the module instances
270
+ graph: the graph so far in the tracing process
271
+ node: node that represents a conditional
272
+ counter: variable tracking
273
+
274
+ Returns: Two sets of constraints. One with a conjunction with the
275
+ the conditional constraint and the other with a conjunction with
276
+ its negation.
277
+
278
+ """
279
+ dimension_dict = {} # type: ignore[var-annotated]
280
+
281
+ generator = ConstraintGenerator(tracer_root, graph)
282
+ new_constraints, counter = generator.generate_constraints(counter)
283
+
284
+ condition_constraint = new_constraints.conjucts[-1]
285
+
286
+ # we know the constraint is a conjunction where the last constraint is about the conditional
287
+ # so remove the last constraint
288
+ new_constraints.conjucts = new_constraints.conjucts[:-1]
289
+
290
+ # transform precision, matching, consistency till obtaining a fixed point
291
+ new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
292
+
293
+
294
+ # since the function returns a list of one element, we get the first element
295
+ # we are only interested in the RHS in this case because the LHS just stores
296
+ # the result
297
+
298
+ # we make sure the constraint is of the form:
299
+ # c = b where b is a boolean expression
300
+ # and we consider b (constraint.rhs) for transformation
301
+ assert isinstance(condition_constraint.lhs, BVar)
302
+ assert is_bool_expr(condition_constraint.rhs)
303
+ condition_constraint_rhs = condition_constraint.rhs
304
+
305
+ # transform the condition constraint
306
+ condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
307
+
308
+ transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
309
+
310
+ transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
311
+
312
+ negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
313
+
314
+ return z3.And([transformed, transformed_condition_constraint]), \
315
+ z3.And([transformed, negation_transformed_condition_constraint])
316
+
317
+
318
+ def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
319
+ """
320
+ Given an IR and a node representing a conditional, evaluate the conditional
321
+ and its negation
322
+ Args:
323
+ tracer_root: Tracer root for module instances
324
+ node: The node to be evaluated
325
+
326
+ Returns: the results of evaluating the condition and the negation with
327
+ the rest of the constraints
328
+
329
+ """
330
+
331
+ transformed_positive, transformed_negative = \
332
+ transform_all_constraints_trace_time(tracer_root, graph, node, counter)
333
+
334
+ s = z3.Solver()
335
+ s.add(transformed_positive)
336
+ if user_constraints is not None:
337
+ s.add(user_constraints)
338
+ condition = s.check()
339
+
340
+ s = z3.Solver()
341
+ s.add(transformed_negative)
342
+ if user_constraints is not None:
343
+ s.add(user_constraints)
344
+ negation = s.check()
345
+ return condition, negation
346
+
347
+ except ImportError:
348
+ HAS_Z3 = False
venv/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
2
+ BVar
3
+ from torch.fx.experimental.migrate_gradual_types.operation import op_leq
4
+
5
+
6
+ def gen_tvar(curr):
7
+ """
8
+ Generate a tensor variable
9
+ :param curr: The current counter
10
+ :return: a tensor variable and the updated counter
11
+ """
12
+ curr += 1
13
+ return TVar(curr), curr
14
+
15
+
16
+ def gen_dvar(curr):
17
+ """
18
+ Generate a dimension variable
19
+ :param curr: the current counter
20
+ :return: a dimension variable and an updated counter
21
+ """
22
+ curr += 1
23
+ return DVar(curr), curr
24
+
25
+ def gen_bvar(curr):
26
+ """
27
+ Generate a boolean variable
28
+ :param curr: the current counter
29
+ :return: a boolean variable and an updated counter
30
+ """
31
+ curr += 1
32
+ return BVar(curr), curr
33
+
34
+ def gen_tensor_dims(n, curr):
35
+ """
36
+ Generate a list of tensor dimensions
37
+ :param n: the number of dimensions
38
+ :param curr: the current counter
39
+ :return: a list of dimension variables and an updated counter
40
+ """
41
+ dims = []
42
+ for _ in range(n):
43
+ dvar, curr = gen_dvar(curr)
44
+ dims.append(dvar)
45
+ return dims, curr
46
+
47
+
48
+ def gen_nat_constraints(list_of_dims):
49
+ """
50
+ Generate natural number constraints for dimensions
51
+ """
52
+ return [BinConstraintD(0, d, op_leq) for d in list_of_dims]