applied-ai-018 commited on
Commit
bed2b1d
·
verified ·
1 Parent(s): cacbf08

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/fx/__init__.pyi +11 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/fx/config.py +6 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_backward_state.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py +58 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/debug.py +31 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py +914 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py +557 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py +1279 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py +1040 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py +14 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py +348 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py +52 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py +29 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/normalize.py +162 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py +118 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__init__.py +3 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/fx/__init__.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._symbolic_trace import (
2
+ symbolic_trace as symbolic_trace,
3
+ Tracer as Tracer,
4
+ wrap as wrap,
5
+ )
6
+ from .graph import Graph as Graph
7
+ from .graph_module import GraphModule as GraphModule
8
+ from .interpreter import Interpreter as Interpreter, Transformer as Transformer
9
+ from .node import has_side_effect as has_side_effect, map_arg as map_arg, Node as Node
10
+ from .proxy import Proxy as Proxy
11
+ from .subgraph_rewriter import replace_pattern as replace_pattern
llmeval-env/lib/python3.10/site-packages/torch/fx/config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Whether to disable showing progress on compilation passes
2
+ # Need to add a new config otherwise wil get a circular import if dynamo config is imported here
3
+ disable_progress = True
4
+
5
+ # If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
6
+ verbose_progress = False
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_backward_state.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc ADDED
Binary file (6.78 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc ADDED
Binary file (9.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc ADDED
Binary file (937 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc ADDED
Binary file (30.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc ADDED
Binary file (112 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Type
2
+
3
+ __all__ = ["SymDispatchMode", "handle_sym_dispatch", "sym_function_mode"]
4
+
5
+ SYM_FUNCTION_MODE: Optional["SymDispatchMode"] = None
6
+
7
+
8
+ # SymDispatchMode gets invoked whenever an operation is processed on
9
+ # a PySymInt. When this occurs, you get called at __sym_dispatch__
10
+ # with the operation in question. This is symmetric to TorchDispatchMode
11
+ # but with some caveats:
12
+ #
13
+ # - In TorchDispatchMode, you get the same arguments as what a user
14
+ # invoked your API with; e.g., if you call torch.ops.aten.foo(a, b),
15
+ # you get (a, b) as args to your call. In SymDispatchMode, if
16
+ # you call a + b (where a and b are SymInts), you will get
17
+ # (a.node, b.node) as your args (these are PySymInts)
18
+ #
19
+ # - SymInt/PySymInt don't have FX proxy support (unlike, e.g., Tensor).
20
+ # So you have to manually call Tracer/create_node to write into
21
+ # the graph. See ProxySymDispatchMode for an example
22
+ #
23
+ class SymDispatchMode:
24
+ def __sym_dispatch__(self, func, types, args, kwargs):
25
+ raise NotImplementedError()
26
+
27
+ def __enter__(self):
28
+ global SYM_FUNCTION_MODE
29
+ old = SYM_FUNCTION_MODE
30
+ if hasattr(self, "inner"):
31
+ raise RuntimeError(
32
+ f"{self} has already been used as a mode. Please use a fresh version"
33
+ )
34
+ else:
35
+ self.inner = old
36
+ SYM_FUNCTION_MODE = self
37
+ return self
38
+
39
+ def __exit__(self, exc_type, exc_val, exc_tb):
40
+ global SYM_FUNCTION_MODE
41
+ SYM_FUNCTION_MODE = self.inner
42
+
43
+
44
+ def handle_sym_dispatch(func, args, kwargs):
45
+ global SYM_FUNCTION_MODE
46
+ mode = sym_function_mode()
47
+ assert mode
48
+ SYM_FUNCTION_MODE = mode.inner
49
+ try:
50
+ # TODO: properly compute types
51
+ types: List[Type] = []
52
+ return mode.__sym_dispatch__(func, types, args, kwargs)
53
+ finally:
54
+ SYM_FUNCTION_MODE = mode
55
+
56
+
57
+ def sym_function_mode():
58
+ return SYM_FUNCTION_MODE
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/debug.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.fx as fx
2
+
3
+ def set_trace(gm: fx.GraphModule) -> fx.GraphModule:
4
+ """
5
+ Sets a breakpoint in `gm`'s generated python code. It drops into pdb when
6
+ `gm` gets run.
7
+
8
+ Args:
9
+ gm: graph module to insert breakpoint. It is then recompiled for it to
10
+ take effect.
11
+
12
+ Returns:
13
+ the `gm` with breakpoint inserted.
14
+ """
15
+ def insert_pdb(body):
16
+ return ["import pdb; pdb.set_trace()\n", *body]
17
+
18
+ with gm.graph.on_generate_code(
19
+ make_transformer=lambda cur_transform: (
20
+ # new code transformer to register
21
+ lambda body: (
22
+ insert_pdb(
23
+ cur_transform(body) if cur_transform
24
+ else body
25
+ )
26
+ )
27
+ )
28
+ ):
29
+ gm.recompile()
30
+
31
+ return gm
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce
2
+ import torch
3
+ import operator
4
+ from torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise
5
+ from typing import Callable, Dict
6
+ from torch.fx.node import Target, Node
7
+ from torch.nn.modules.batchnorm import BatchNorm2d
8
+ from torch.nn.modules.conv import Conv2d
9
+ from torch.fx.experimental.refinement_types import Equality
10
+ import itertools
11
+
12
+ from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
13
+
14
+ import sympy
15
+
16
+ _INFERENCE_RULES: Dict[Target, Callable] = {}
17
+ _REFINEMENT_RULES: Dict[Target, Callable] = {}
18
+ _RULES: Dict[Target, Callable] = {}
19
+
20
+
21
+ def expand_to_tensor_dim(t, n):
22
+ """
23
+ Expand a type to the desired tensor dimension if possible
24
+ Raise an error otherwise.
25
+ - t is the given type
26
+ - n is a number of dimensions to expand to
27
+ """
28
+ if t == Dyn:
29
+ dims = [Dyn] * n
30
+ return TensorType(tuple(dims))
31
+ elif isinstance(t, TensorType):
32
+ if len(t.__args__) != n:
33
+ raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')
34
+ return t
35
+ else:
36
+ raise TypeError(f'Cannot match the type {t}')
37
+
38
+
39
+ def broadcast_types(t1, t2):
40
+ """
41
+ Applies broadcasting to both given types such that they
42
+ become consistent with eachother and returns two new
43
+ resulting types
44
+ """
45
+
46
+ # if either type is Dyn, do nothing since the types are already consistent
47
+ if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
48
+ return t1, t2
49
+
50
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
51
+ s1 = len(t1.__args__)
52
+ s2 = len(t2.__args__)
53
+
54
+ new_t1 = list(t1.__args__)
55
+ new_t2 = list(t2.__args__)
56
+
57
+ # We make the types the same length which is the first requirement
58
+ # for consistency
59
+ if s1 > s2:
60
+ for i in range(s1 - s2):
61
+ new_t2.insert(0, 1)
62
+
63
+ elif s2 > s1:
64
+ for i in range(s2 - s1):
65
+ new_t1.insert(0, 1)
66
+
67
+ # we replace occurrences of "1" with each tensor with
68
+ # the corresponding type from the other tensor
69
+ for i, (x, y) in enumerate(zip(new_t1, new_t2)):
70
+ if x == 1:
71
+ new_t1[i] = y
72
+ elif y == 1:
73
+ new_t2[i] = x
74
+
75
+ # at this point our tensors should be consistent
76
+ # and we can apply the element-wise operation and find the right dimension
77
+ # for the output of the operation
78
+ (t1, t2) = TensorType(tuple(new_t1)), TensorType(tuple(new_t2))
79
+ return (t1, t2)
80
+ else:
81
+ raise TypeError(f'Cannot broadcast types {t1} and {t2}')
82
+
83
+ def register_inference_rule(call_target):
84
+ def register(fn):
85
+ if call_target in _INFERENCE_RULES:
86
+ raise RuntimeError(f'Inference rule already registered for {call_target}!')
87
+ _INFERENCE_RULES[call_target] = fn
88
+ return fn
89
+ return register
90
+
91
+ def register_refinement_rule(call_target):
92
+ def register(fn):
93
+ if call_target in _REFINEMENT_RULES:
94
+ raise RuntimeError(f'Refinement rule already registered for {call_target}!')
95
+ _REFINEMENT_RULES[call_target] = fn
96
+ return fn
97
+ return register
98
+
99
+ def register_algebraic_expressions_inference_rule(call_target):
100
+ def register(fn):
101
+ if call_target in _RULES:
102
+ raise RuntimeError(f'Rule already registered for {call_target}!')
103
+ _RULES[call_target] = fn
104
+ return fn
105
+ return register
106
+
107
+ @register_inference_rule(torch.add)
108
+ @register_inference_rule(operator.add)
109
+ def add_inference_rule(n: Node):
110
+ """
111
+ Apply the addition inference rule. This includes:
112
+ - scalar addition
113
+ - broadcasting semantics
114
+
115
+ Note that we always return the least precise type between
116
+ the operands (after applying broadcasting) to be the final type of the operation
117
+
118
+ Note that we do not modify the operand types themselves after applying broadcasting
119
+ to them. We only use them to calculate the final type
120
+ """
121
+ assert isinstance(n.args[0], Node)
122
+ assert isinstance(n.args[1], Node)
123
+ t1 = n.args[0].type
124
+ t2 = n.args[1].type
125
+
126
+ # handle scalar addition
127
+ if t1 == int and isinstance(t2, TensorType):
128
+ n.type = t2
129
+ return n.type
130
+
131
+ # handle scalar addition
132
+ elif t2 == int and isinstance(t1, TensorType):
133
+ n.type = t1
134
+ return n.type
135
+
136
+ # we bring the new types to the point where
137
+ # we can check for consistency
138
+ # any inconsistency would not have been caused
139
+ # by broadcasting at this point
140
+ (new_t1, new_t2) = broadcast_types(t1, t2)
141
+
142
+ if new_t1 != t1 or new_t2 != t2:
143
+ n.meta['broadcast'] = True
144
+ n.meta[str(n.args[0])] = new_t1
145
+ n.meta[str(n.args[1])] = new_t2
146
+
147
+ else:
148
+ n.meta['broadcast'] = False
149
+
150
+ new_t1 = t1 if not n.meta['broadcast'] else new_t1
151
+ new_t2 = t2 if not n.meta['broadcast'] else new_t2
152
+
153
+ # we check for consistency between the new types
154
+ if is_consistent(new_t1, new_t2):
155
+ # we return the less precise type because
156
+ # broadcasting may have happened
157
+ # for operands with shape [1,2,Dyn] and [1,2,1]
158
+ # we have to assign the node [1,2,Dyn]
159
+ if is_more_precise(new_t1, new_t2):
160
+ n.type = new_t2
161
+ else:
162
+ n.type = new_t1
163
+ return n.type
164
+ else:
165
+ raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.'
166
+ f' Types should match ')
167
+
168
+ @register_inference_rule(getattr)
169
+ def get_attr_inference_rule(n: Node, traced):
170
+ """
171
+ The current getattr rule only handles the shape attribute
172
+ Can be extended to other attributes
173
+ The most representitive type we have is "Dyn" but the system
174
+ can be extended with more types, such as a type to represent shapes
175
+ """
176
+ attr_node = n.args[0]
177
+ attr_name = n.args[1]
178
+
179
+ if attr_name == "shape":
180
+ n.type = Dyn
181
+ else:
182
+ raise TypeError("Not yet implemented")
183
+
184
+ # TODO. We leave it like this till we add a type to represent tensor sizes
185
+ return n.type
186
+
187
+ @register_inference_rule(torch.transpose)
188
+ def transpose_inference_rule(n: Node):
189
+ """
190
+ We check that dimensions for the transpose operations
191
+ are within range of the tensor type of the node
192
+ """
193
+ if n.target == torch.transpose:
194
+ assert isinstance(n.args[0], Node)
195
+ t = n.args[0].type
196
+
197
+ assert isinstance(n.args[1], int)
198
+ assert isinstance(n.args[2], int)
199
+ dim1, dim2 = n.args[1], n.args[2]
200
+
201
+ if t == Dyn:
202
+ n.type = Dyn
203
+ return n.type
204
+
205
+ elif isinstance(t, TensorType):
206
+ if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):
207
+ new_type = list(t.__args__)
208
+ new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1]
209
+ final = TensorType(new_type)
210
+ n.type = get_greatest_upper_bound(n.type, final)
211
+ return n.type
212
+ else:
213
+ raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
214
+ else:
215
+ raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
216
+
217
+
218
+ @register_inference_rule(torch.reshape)
219
+ def reshape_inference_rule(n: Node):
220
+ """
221
+ Without dynamism, the rule checks that the
222
+ product of the elements of the argument tensor
223
+ type is equal to the product of the elements
224
+ of the required shape. We gradualize this rule
225
+ by adding a case to handle fully dynamic input
226
+ as well as input where some of the tensor dimensions
227
+ are unknown. In this case we check for divisibility
228
+ """
229
+ assert isinstance(n.args[0], Node)
230
+ t1 = n.args[0].type
231
+
232
+ assert isinstance(n.args[1], list)
233
+ t2 = n.args[1]
234
+ t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])
235
+
236
+ # if we do not know the original tensor dimension,
237
+ # we return the required dimension
238
+ if t1 == Dyn:
239
+ n.type = t2_type
240
+ return t2_type
241
+
242
+ # if any of the dimensions are unknown,
243
+ # we check for divisibility
244
+ elif isinstance(t1, TensorType):
245
+ assert isinstance(t1, TensorType)
246
+ a = [e if e != Dyn else 1 for e in t1.__args__]
247
+ p1 = reduce(operator.mul, a)
248
+ p2 = reduce(operator.mul, t2)
249
+ if p1 % p2 == 0 or p2 % p1 == 0:
250
+ n.type = t2_type
251
+ return t2_type
252
+ else:
253
+ raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
254
+ else:
255
+ raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
256
+
257
+ @register_inference_rule(BatchNorm2d)
258
+ def bn2d_inference_rule(n: Node, module_instance):
259
+ """
260
+ Given a BatchNorm2D instance and a node check the following conditions:
261
+ - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4)
262
+ - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
263
+ - t is consistent with t'
264
+ - x_2 is consistent with the module's num_features
265
+ - x_2' is consistent with the module's num_features
266
+ output type: the more precise type of t and t'
267
+ """
268
+ assert isinstance(n.args[0], Node)
269
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
270
+ arg_type = n.args[0].type
271
+ n.type = expand_to_tensor_dim(n.type, 4)
272
+
273
+ # we check the conditions on the incoming argument
274
+ # and any existing annotation
275
+ # we also check for consistency between both annotations
276
+ if is_consistent(arg_type.__args__[1], module_instance.num_features) and \
277
+ is_consistent(n.type.__args__[1], module_instance.num_features) and \
278
+ is_consistent(arg_type, n.type):
279
+
280
+ # we choose the more precise type
281
+ # to be the node type
282
+ # so if an incoming argument has more type information
283
+ # we set this node's type to be the argument type
284
+ n.type = get_greatest_upper_bound(arg_type, n.type)
285
+ return n.type
286
+ else:
287
+ raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')
288
+
289
+
290
+ def calculate_out_dimension(d_in, module_instance, index):
291
+ """
292
+ For calculating h_in and w_out according to the conv2D documentation
293
+ """
294
+ padding = (module_instance.padding, module_instance.padding) \
295
+ if isinstance(module_instance.padding, int) else module_instance.padding
296
+ kernel_size = (module_instance.kernel_size, module_instance.kernel_size) \
297
+ if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size
298
+ stride = (module_instance.stride, module_instance.stride) \
299
+ if isinstance(module_instance.stride, int) else module_instance.stride
300
+ dilation = (module_instance.dilation, module_instance.dilation) \
301
+ if isinstance(module_instance.dilation, int) else module_instance.dilation
302
+
303
+ DIMENSION_TYPES = (int, sympy.Symbol)
304
+
305
+ if d_in == Dyn:
306
+ return Dyn
307
+
308
+ elif isinstance(d_in, DIMENSION_TYPES):
309
+ n = d_in + 2 * padding[index] - \
310
+ dilation[index] * \
311
+ (kernel_size[index] - 1) - 1
312
+
313
+ return (n // stride[0]) + 1
314
+
315
+ else:
316
+ raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}')
317
+
318
+
319
+ def get_greatest_upper_bound(type1, type2):
320
+ """
321
+ Get the most precise type that's consistent with the given types
322
+ """
323
+ if type1 == Dyn:
324
+ return type2
325
+ elif type2 == Dyn:
326
+ return type1
327
+ elif isinstance(type1, TensorType) and isinstance(type2, TensorType):
328
+ if not is_consistent(type1, type2):
329
+ raise TypeError(f'Inconsistent types {type1}, {type2}')
330
+ gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)]
331
+ return TensorType(tuple(gub))
332
+
333
+
334
+ @register_inference_rule(Conv2d)
335
+ def conv2d_inference_rule(n: Node, module_instance):
336
+ """
337
+ Given a Conv2D instance and a node check the following conditions:
338
+ - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W)
339
+ - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
340
+ - x_2 is consistent with the module's in_channels
341
+ - let o = (x_1, out_channels, H_out, W_out)
342
+ then the output is the greatest upper bound of o and the existing node type t'.
343
+ """
344
+ assert isinstance(n.args[0], Node)
345
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
346
+ arg_type = n.args[0].type
347
+ curr_node_type = expand_to_tensor_dim(n.type, 4)
348
+
349
+ if is_consistent(arg_type.__args__[1], module_instance.in_channels):
350
+ w_in = arg_type.__args__[3]
351
+ h_in = arg_type.__args__[2]
352
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
353
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
354
+ new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))
355
+ gub = get_greatest_upper_bound(new_type, curr_node_type)
356
+ n.type = gub
357
+ return n.type
358
+ else:
359
+ raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}')
360
+
361
+
362
+ @register_inference_rule(torch.nn.ReLU)
363
+ def relu_inference_rule(n: Node, module_instance):
364
+ """
365
+ Input and output shapes should be equal.
366
+ """
367
+ assert isinstance(n.args[0], Node)
368
+
369
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
370
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
371
+
372
+ if isinstance(n.args[0].type, TensorType):
373
+ n.type = get_greatest_upper_bound(n.args[0].type, n.type)
374
+ return n.type
375
+
376
+
377
+ def maxpool2d_check(typ, module_instance):
378
+ """
379
+ Applies the maxpool2d shape information to the input
380
+ this affects the last two dimensions
381
+ """
382
+ new_type_list = list(typ.__args__)
383
+ if len(new_type_list) == 4 or len(new_type_list) == 3:
384
+ w_in = new_type_list[-1]
385
+ h_in = new_type_list[-2]
386
+
387
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
388
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
389
+
390
+ new_type_list[-1] = w_out
391
+ new_type_list[-2] = h_out
392
+ return TensorType(tuple(new_type_list))
393
+
394
+ else:
395
+ raise TypeError(f'Wrong size {typ} for {module_instance}')
396
+
397
+
398
+ @register_inference_rule(torch.nn.MaxPool2d)
399
+ def maxpool2d_inference_rule(n: Node, module_instance):
400
+ """
401
+ Given a MaxPool2D instance and a node check the following conditions:
402
+ - Input size matches size 3 or 4
403
+ - Current node type is consistent with the output type we will calculate
404
+ - Input size matches output size and the last two dimensions of the output
405
+ are w_out and h_out. The remaining dimensions are the same as the input
406
+ - Our final result is the greatest upper bound of the output we calculate
407
+ and the current node type.
408
+ """
409
+ assert isinstance(n.args[0], Node)
410
+
411
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
412
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
413
+ if isinstance(n.args[0].type, TensorType):
414
+ output = maxpool2d_check(n.args[0].type, module_instance)
415
+ n.type = get_greatest_upper_bound(output, n.type)
416
+ return n.type
417
+
418
+
419
+
420
+ def linear_check(tensor_type, module_instance):
421
+ """
422
+ Checks that an input tensor type satisfies the conditions for linear operation
423
+ and returns the output type based on in and out features given by module_instance
424
+ """
425
+ if len(tensor_type.__args__) >= 2:
426
+ if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):
427
+ new_type_args = list(tensor_type.__args__)
428
+ new_type_args[-1] = module_instance.out_features
429
+ return TensorType(tuple(new_type_args))
430
+ else:
431
+ raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')
432
+ else:
433
+ raise TypeError(f'Type {tensor_type} must have rank 2 or more.')
434
+
435
+
436
+ @register_inference_rule(torch.nn.Linear)
437
+ def linear_inference_rule(n: Node, module_instance):
438
+ """
439
+ Applies the shape information to the input then gets the greatest upper bound
440
+ of the resulting type and the existing type
441
+ """
442
+ assert isinstance(n.args[0], Node)
443
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
444
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
445
+ if isinstance(n.args[0].type, TensorType):
446
+ output_type = linear_check(n.args[0].type, module_instance)
447
+ n.type = get_greatest_upper_bound(output_type, n.type)
448
+ return n.type
449
+
450
+
451
+ def adaptiveavgpool2d_check(tensor_type, module_instance):
452
+ output_size = module_instance.output_size
453
+ if isinstance(output_size, int):
454
+ output_size = [output_size, output_size]
455
+ elif isinstance(output_size, tuple):
456
+ output_size = list(output_size)
457
+ if output_size[0] is None:
458
+ output_size[0] = output_size[1]
459
+ if output_size[1] is None:
460
+ output_size[1] = output_size[0]
461
+
462
+ new_type_list = list(tensor_type.__args__)
463
+
464
+ if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3:
465
+ new_type_list[-1] = output_size[1]
466
+ new_type_list[-2] = output_size[0]
467
+
468
+ return TensorType(tuple(new_type_list))
469
+
470
+ else:
471
+ raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}')
472
+
473
+ @register_inference_rule(torch.nn.AdaptiveAvgPool2d)
474
+ def adaptiveavgpool2d_inference_rule(n: Node, module_instance):
475
+ """
476
+ The input and output sizes should be the same except for the last
477
+ two dimensions taken from the input, which represent width and height
478
+ """
479
+ assert isinstance(n.args[0], Node)
480
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
481
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
482
+ if isinstance(n.args[0].type, TensorType):
483
+ output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)
484
+ n.type = get_greatest_upper_bound(n.type, output_type)
485
+ return n.type
486
+
487
+ def flatten_check(tensor_type, start_dim, end_dim):
488
+ l = len(tensor_type.__args__)
489
+
490
+ start_dim = l if start_dim == -1 else abs(start_dim)
491
+ end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
492
+
493
+ if 0 <= start_dim <= (l - 1) and 0 <= end_dim <= l and start_dim < end_dim:
494
+ my_args = list(tensor_type.__args__)
495
+ lhs = my_args[0:start_dim]
496
+ rhs = my_args[end_dim:]
497
+ mid = my_args[start_dim:end_dim]
498
+ if Dyn in mid:
499
+ mid = [Dyn]
500
+ else:
501
+ mid = [reduce(operator.mul, my_args[start_dim:end_dim])]
502
+ new_type_list = lhs + mid + rhs
503
+ return TensorType(tuple(new_type_list))
504
+ else:
505
+ raise TypeError(f'Incompatible dimensions {start_dim}, {end_dim - 1} in type {tensor_type}')
506
+
507
+ @register_inference_rule(torch.flatten)
508
+ def flatten_inference_rule(n: Node):
509
+ """
510
+ Applies the flatten shape information to the input then gets the
511
+ greatest upper bound of the resulting type and the existing type
512
+ """
513
+ assert isinstance(n.args[0], Node)
514
+
515
+ # set the default start and end dims
516
+ start_dim = 1
517
+ end_dim = -1
518
+
519
+ if len(n.args) > 1:
520
+ assert isinstance(n.args[1], int)
521
+ start_dim = n.args[1]
522
+
523
+ if len(n.args) > 2:
524
+ assert isinstance(n.args[2], int)
525
+ end_dim = n.args[2]
526
+
527
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
528
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
529
+
530
+ if isinstance(n.args[0].type, TensorType):
531
+ output_type = flatten_check(n.args[0].type, start_dim, end_dim)
532
+ n.type = get_greatest_upper_bound(output_type , n.type)
533
+
534
+ return n.type
535
+
536
+ class GraphTypeChecker:
537
+ def __init__(self, env, traced):
538
+ self.env = env
539
+ self.traced = traced
540
+
541
+ def type_check(self):
542
+ """
543
+ A gradual type checker for graphs
544
+ Effect: every node's field type will be
545
+ populated with a type after type-checking is done
546
+ """
547
+ graph = self.traced.graph
548
+
549
+ # type check every node with gradual type rules
550
+ # if any node does not type check return false
551
+ for n in graph.nodes:
552
+ self.type_check_node(n)
553
+ return True
554
+
555
+ def type_check_node(self, n: Node):
556
+ """
557
+ Type check a given fx node.
558
+ Current operations:
559
+ - Reshape
560
+ - Transpose
561
+ - Add
562
+ - Relu
563
+ - conv2d
564
+ - batchnorm2d
565
+ - flatten
566
+ - maxpool2d
567
+ - adaptiveavgpool2d
568
+ - linear
569
+ """
570
+ if n.type is None:
571
+ n.type = Dyn
572
+
573
+ if n.op == 'placeholder':
574
+ return n.type
575
+
576
+ elif n.op == 'get_attr':
577
+ t = get_parameter(self.traced, n.target) # type: ignore[arg-type]
578
+ if isinstance(t.data, torch.Tensor):
579
+ n.type = TensorType(t.data.shape)
580
+ return n.type
581
+
582
+ elif n.op == 'call_function':
583
+ if n.target == getattr:
584
+ assert getattr in _INFERENCE_RULES
585
+ return _INFERENCE_RULES[n.target](n, self.traced)
586
+
587
+ elif n.target in _INFERENCE_RULES:
588
+ return _INFERENCE_RULES[n.target](n)
589
+ else:
590
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
591
+
592
+ elif n.op == 'call_module':
593
+ module_instance = self.traced.get_submodule(n.target)
594
+ if type(module_instance) in _INFERENCE_RULES:
595
+ return _INFERENCE_RULES[type(module_instance)](n, module_instance)
596
+ else:
597
+ raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
598
+
599
+ elif n.op == 'output':
600
+ def get_node_type(a):
601
+ return a.type
602
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
603
+ return n.type
604
+
605
+ else:
606
+ raise NotImplementedError(f"Method {n.op} not yet implemented")
607
+
608
+
609
+ @register_refinement_rule(Conv2d)
610
+ def conv_refinement_rule(n: Node):
611
+ """
612
+ The equality constraints are between the first dimension of
613
+ the input and output
614
+ """
615
+ res = []
616
+ assert isinstance(n.args[0], Node)
617
+ arg_type = n.args[0].type
618
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
619
+ res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
620
+ return res
621
+
622
+
623
+ @register_refinement_rule(torch.nn.Linear)
624
+ def linear_refinement_rule(n: Node):
625
+ """
626
+ The equality constraints are between the first dimension of
627
+ the input and output
628
+ """
629
+ res = []
630
+ assert isinstance(n.args[0], Node)
631
+ arg_type = n.args[0].type
632
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
633
+ res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
634
+ return res
635
+
636
+ @register_refinement_rule(BatchNorm2d)
637
+ @register_refinement_rule(torch.nn.ReLU)
638
+ def all_eq(n: Node):
639
+ """
640
+ For operations where the input shape is equal to the output shape
641
+ """
642
+ res = []
643
+ assert isinstance(n.args[0], Node)
644
+ arg_type = n.args[0].type
645
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
646
+ args1 = arg_type.__args__
647
+ args2 = n.type.__args__
648
+ res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
649
+ return res
650
+
651
+
652
+ @register_refinement_rule(torch.nn.AdaptiveAvgPool2d)
653
+ @register_refinement_rule(torch.nn.MaxPool2d)
654
+ def first_two_eq(n: Node):
655
+ """
656
+ For operations where the first two dimensions of the input and output shape
657
+ are equal
658
+ """
659
+ res = []
660
+ assert isinstance(n.args[0], Node)
661
+ arg_type = n.args[0].type
662
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
663
+ args1 = arg_type.__args__
664
+ args2 = n.type.__args__
665
+ res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
666
+ return res
667
+
668
+
669
+ @register_refinement_rule(torch.add)
670
+ @register_refinement_rule(operator.add)
671
+ def element_wise_eq(n: Node):
672
+ """
673
+ For element-wise operations and handles broadcasting.
674
+ Note that after applying broadcasting to the arguments
675
+ we are able to determine if certain dimensions have not been broadcast
676
+ if they are symbolicallu equal.
677
+
678
+ in this case, we can establish equality between those dimensions and the
679
+ corresponding output dimensions.
680
+
681
+ Note that it takes two iterations for this result. One iteration to establish
682
+ equality between certain dimensions of the operands (requiring the whole solver
683
+ including unification) and another iteration to establish equality between the operands
684
+ and the resulting type, requiring another round of constraint generation and unificaiton.
685
+ """
686
+ res = []
687
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
688
+ arg_type1 = n.args[0].type
689
+ arg_type2 = n.args[1].type
690
+ if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType):
691
+ args1, args2 = broadcast_types(arg_type1, arg_type2)
692
+ # by this point, we know that args1 and args2 are the same size.
693
+ a1 = args1.__args__
694
+ a2 = args2.__args__
695
+ a3 = n.type.__args__
696
+
697
+ # we would be here in the second iteration where we establish equality
698
+ # between operand type dimensions and the resulting type dimensions
699
+ r = []
700
+ for x, y, z in zip(a1, a2, a3):
701
+ if x == y:
702
+ r.append(Equality(x, z))
703
+ res = r
704
+ return res
705
+
706
+
707
+ @register_refinement_rule(torch.flatten)
708
+ def flatten_refinement_rule(n: Node):
709
+ """
710
+ Generates equality constraints between the dimensions of the input and output
711
+ that will not be involved in the flatten operation
712
+ """
713
+ assert isinstance(n.args[0], Node)
714
+
715
+ eq_const = []
716
+
717
+ start_dim = 1
718
+ end_dim = -1
719
+
720
+ if len(n.args) > 1:
721
+ assert isinstance(n.args[1], int)
722
+ start_dim = n.args[1]
723
+
724
+ if len(n.args) > 2:
725
+ assert isinstance(n.args[2], int)
726
+ end_dim = n.args[2]
727
+
728
+ if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):
729
+ l = len(n.type.__args__)
730
+ arg_type = n.args[0].type
731
+ start_dim = l if start_dim == -1 else start_dim
732
+ end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
733
+
734
+ for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):
735
+ eq_const.append(Equality(t1, t2))
736
+
737
+ for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):
738
+ eq_const.append(Equality(t1, t2))
739
+ return eq_const
740
+
741
+
742
+ @register_algebraic_expressions_inference_rule(Conv2d)
743
+ def conv_rule(n: Node, module_instance):
744
+ """
745
+ Represents the outout in terms of an algrbraic expression w.r.t
746
+ the input when possible
747
+ """
748
+ assert isinstance(n.args[0], Node)
749
+ arg_type = n.args[0].type
750
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
751
+ w_in = arg_type.__args__[3]
752
+ h_in = arg_type.__args__[2]
753
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
754
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
755
+ new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))
756
+ n.type = new_type
757
+ return new_type
758
+
759
+ class Refine:
760
+ """
761
+ Symbolic shape inference.
762
+ Generates constraints over type variables.
763
+ Currently all constraints are equality constraints.
764
+ """
765
+ def __init__(self, traced):
766
+ self.constraints = []
767
+ self.traced = traced
768
+ self.symbol_iter = itertools.count(start=0, step=1)
769
+
770
+ def refine(self):
771
+ """
772
+ Generates constraints for
773
+ every node in the graph based on
774
+ the operation.
775
+ """
776
+ graph = self.traced.graph
777
+ for n in graph.nodes:
778
+ self.refine_node(n)
779
+ return True
780
+
781
+ def symbolic_relations(self):
782
+ """
783
+ Infers algebraic relations
784
+ """
785
+ graph = self.traced.graph
786
+ for n in graph.nodes:
787
+ self.infer_symbolic_relations(n)
788
+ return True
789
+
790
+ def replace_dyn_with_fresh_var(self, typ):
791
+ """
792
+ Replace all unknown types with fresh type variables.
793
+ """
794
+ if typ == Dyn:
795
+ new_symbol = Var(next(self.symbol_iter))
796
+ return new_symbol
797
+ elif isinstance(typ, TensorType):
798
+ new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]
799
+ return TensorType(tuple(new_args))
800
+ elif isinstance(typ, list):
801
+ return [self.replace_dyn_with_fresh_var(t) for t in typ]
802
+ elif isinstance(typ, tuple):
803
+ return (self.replace_dyn_with_fresh_var(t) for t in typ)
804
+ else:
805
+ return typ
806
+
807
+
808
+ def convert_to_sympy_symbols(self, typ):
809
+ """
810
+ Replace all unknown types with fresh type variables.
811
+ """
812
+ if isinstance(typ, Var):
813
+ return sympy.symbols(str(typ))
814
+ elif isinstance(typ, TensorType):
815
+ new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__]
816
+ return TensorType(tuple(new_args))
817
+ elif isinstance(typ, list):
818
+ return [self.convert_to_sympy_symbols(t) for t in typ]
819
+ elif isinstance(typ, tuple):
820
+ return (self.convert_to_sympy_symbols(t) for t in typ)
821
+ else:
822
+ return typ
823
+
824
+ def refine_node(self, n: Node):
825
+ """
826
+ Returns a list of equality constraints for
827
+ call_module and call_function nodes.
828
+ Models the relation between input and output dimensions
829
+ using constraints in case they are both tensors.
830
+ All operations used in resnet50 are defined.
831
+ """
832
+ if n.type is None:
833
+ n.type = Dyn
834
+
835
+ n.type = self.replace_dyn_with_fresh_var(n.type)
836
+
837
+ if n.op == 'call_function':
838
+ if n.target in _REFINEMENT_RULES:
839
+ self.constraints += _REFINEMENT_RULES[n.target](n)
840
+ else:
841
+ pass
842
+
843
+ if n.op == 'call_module':
844
+ module_instance = self.traced.get_submodule(n.target)
845
+ if type(module_instance) in _REFINEMENT_RULES:
846
+ self.constraints += _REFINEMENT_RULES[type(module_instance)](n)
847
+ else:
848
+ pass
849
+
850
+ if n.op == 'output':
851
+ def get_node_type(a):
852
+ return a.type
853
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
854
+ return n.type
855
+
856
+ else:
857
+ pass
858
+
859
+ def infer_symbolic_relations(self, n: Node):
860
+ n.type = self.convert_to_sympy_symbols(n.type)
861
+ if n.op == 'call_function':
862
+ if n.target in _RULES:
863
+ return _RULES[n.target](n)
864
+ else:
865
+ pass
866
+
867
+ if n.op == 'call_module':
868
+ module_instance = self.traced.get_submodule(n.target)
869
+ if type(module_instance) in _RULES:
870
+ return _RULES[type(module_instance)](n, module_instance)
871
+ else:
872
+ pass
873
+
874
+ if n.op == 'output':
875
+ def get_node_type(a):
876
+ return a.type
877
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
878
+ return n.type
879
+
880
+ else:
881
+ pass
882
+
883
+ def get_parameter(traced, target: str):
884
+ """
885
+ Returns the parameter given by ``target`` if it exists,
886
+ otherwise throws an error.
887
+
888
+ See the docstring for ``get_submodule`` for a more detailed
889
+ explanation of this method's functionality as well as how to
890
+ correctly specify ``target``.
891
+
892
+ Args:
893
+ target: The fully-qualified string name of the Parameter
894
+ to look for. (See ``get_submodule`` for how to specify a
895
+ fully-qualified string.)
896
+
897
+ Returns:
898
+ torch.nn.Parameter: The Parameter referenced by ``target``
899
+
900
+ Raises:
901
+ AttributeError: If the target string references an invalid
902
+ path or resolves to something that is not an
903
+ ``nn.Parameter``
904
+ """
905
+ module_path, _, param_name = target.rpartition(".")
906
+
907
+ mod: torch.nn.Module = traced.get_submodule(module_path)
908
+
909
+ if not hasattr(mod, param_name):
910
+ raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`")
911
+
912
+ param: torch.nn.Parameter = getattr(mod, param_name)
913
+
914
+ return param
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (216 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc ADDED
Binary file (483 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc ADDED
Binary file (8.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
2
+ op_mod, op_gt, op_lt, op_neq, op_eq
3
+ from torch.fx.tensor_type import TensorType, Dyn
4
+
5
+
6
+ class Constraint:
7
+ pass
8
+
9
+
10
+ class Conj(Constraint):
11
+ def __init__(self, conjuncts):
12
+ """
13
+ :param conjuncts: Conjunction of constraints
14
+ """
15
+ self.conjucts = conjuncts
16
+
17
+ def __eq__(self, other):
18
+ if isinstance(other, Conj):
19
+ return self.conjucts == other.conjucts and self.conjucts == other.conjucts
20
+ else:
21
+ return False
22
+
23
+ def __repr__(self):
24
+ return f'And({self.conjucts})'
25
+
26
+
27
+ class Disj(Constraint):
28
+ def __init__(self, disjuncts):
29
+ """
30
+ :param disjuncts: Disjunction of constraints
31
+ """
32
+ self.disjuncts = disjuncts
33
+
34
+ def __eq__(self, other):
35
+ if isinstance(other, Disj):
36
+ return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
37
+ else:
38
+ return False
39
+
40
+ def __repr__(self):
41
+ return f'Or({self.disjuncts})'
42
+
43
+
44
+ class Prod(Constraint):
45
+ def __init__(self, products):
46
+ """
47
+ :param products: lists of dimensions to multiply
48
+ """
49
+ self.products = products
50
+
51
+ def __eq__(self, other):
52
+ if isinstance(other, Prod):
53
+ return self.products == other.products and self.products == other.products
54
+ else:
55
+ return False
56
+
57
+ def __repr__(self):
58
+ return f'Product({self.products})'
59
+
60
+
61
+ class T(Constraint):
62
+ """
63
+ True
64
+ """
65
+ def __init__(self):
66
+ pass
67
+
68
+ def __eq__(self, other):
69
+ return isinstance(other, T)
70
+
71
+ def __repr__(self):
72
+ return 'True'
73
+
74
+ class F(Constraint):
75
+ """
76
+ False
77
+ """
78
+ def __init__(self):
79
+ pass
80
+
81
+ def __eq__(self, other):
82
+ return isinstance(other, F)
83
+
84
+ def __repr__(self):
85
+ return 'False'
86
+
87
+
88
+ class BinaryConstraint(Constraint):
89
+ """
90
+ Represents all binary operations
91
+ """
92
+ def __init__(self, lhs, rhs, op):
93
+ """
94
+ :param lhs: lhs of the constraint
95
+ :param rhs: rhs of the constraint
96
+ :param op: string representing the operation
97
+ """
98
+ self.lhs = lhs
99
+ self.rhs = rhs
100
+ self.op = op
101
+
102
+ def __eq__(self, other):
103
+ if isinstance(other, BinaryConstraint):
104
+ return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
105
+ else:
106
+ return False
107
+
108
+ def __repr__(self):
109
+ return f'({self.lhs} {self.op} {self.rhs})'
110
+
111
+
112
+ class BinConstraintT(BinaryConstraint):
113
+ """
114
+ Binary constraints about tensors
115
+ """
116
+ def __init__(self, lhs, rhs, op):
117
+ assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \
118
+ (isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)
119
+ super().__init__(lhs, rhs, op)
120
+
121
+ def __eq__(self, other):
122
+ return super().__eq__(other)
123
+
124
+
125
+ class BinConstraintD(BinaryConstraint):
126
+ """
127
+ Binary constraints about dimensions
128
+ """
129
+ def __init__(self, lhs, rhs, op):
130
+ assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
131
+ assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
132
+
133
+ super().__init__(lhs, rhs, op)
134
+
135
+ def __eq__(self, other):
136
+ return super().__eq__(other)
137
+
138
+
139
+
140
+ class TGreatestUpperBound(Constraint):
141
+ """
142
+ Greatest Upper bound for tensors with dynamic type
143
+ """
144
+ def __init__(self, res, rhs1, rhs2):
145
+ """
146
+ :param res: tensor variable that stores the result of the outout
147
+ :param rhs1: tensor or tensor variable
148
+ :param rhs2: tensor or tensor variabke
149
+ """
150
+ self.res = res
151
+ self.rhs1 = rhs1
152
+ self.rhs2 = rhs2
153
+
154
+ def __repr__(self):
155
+ return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
156
+
157
+ def __eq__(self, other):
158
+ if isinstance(other, TGreatestUpperBound):
159
+ return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
160
+ else:
161
+ return False
162
+
163
+
164
+ class DGreatestUpperBound(Constraint):
165
+ """
166
+ Greatest Upper bound for dimensions
167
+ """
168
+ def __init__(self, res, rhs1, rhs2):
169
+ """
170
+ :param res: Dimension variable to store the result
171
+ :param rhs1: dimension variable 1
172
+ :param rhs2: dimension variable 2
173
+ """
174
+ assert is_dim(res)
175
+ assert is_dim(rhs1)
176
+ assert is_dim(rhs2)
177
+
178
+ self.res = res
179
+ self.rhs1 = rhs1
180
+ self.rhs2 = rhs2
181
+
182
+ def __repr__(self):
183
+ return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
184
+
185
+ def __eq__(self, other):
186
+ if isinstance(other, DGreatestUpperBound):
187
+ return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
188
+ else:
189
+ return False
190
+
191
+
192
+ class CanReshape(Constraint):
193
+ """
194
+ can_reshape constraint
195
+ """
196
+ def __init__(self, src, target):
197
+ """
198
+ :param src: tensor variable
199
+ :param target: tensor
200
+ """
201
+ self.src = src
202
+ self.target = target
203
+
204
+ def __repr__(self):
205
+ return f'can-reshape({self.src}, {self.target})'
206
+
207
+ def __eq__(self, other):
208
+ if isinstance(other, CanReshape):
209
+ return self.src == other.src and self.target == other.target
210
+ else:
211
+ return False
212
+
213
+
214
+ class IndexSelect(Constraint):
215
+
216
+ def __init__(self, tensor_size, input_var, dim_replace, index, output):
217
+ """
218
+ Args:
219
+ input_var: input to index_select
220
+ tensor_size: tensor size we are considering
221
+ dim_replace: the dimension of the output at "index"
222
+ index: location of the dimensions to replace in the input
223
+ output: variable to store the result
224
+ """
225
+ assert isinstance(input_var, TVar)
226
+ assert isinstance(output, TVar)
227
+ assert isinstance(dim_replace, DVar) or dim_replace == Dyn
228
+ assert isinstance(index, int)
229
+
230
+ self.input_var = input_var
231
+ self.tensor_size = tensor_size
232
+ self.dim_replace = dim_replace
233
+ self.index = index
234
+ self.output = output
235
+
236
+ def __repr__(self):
237
+
238
+ return f' {self.output} = ' \
239
+ f'IndexSelect({self.input_var}, ' \
240
+ f'tensor_size: {self.tensor_size}, ' \
241
+ f'{self.dim_replace}, ' \
242
+ f'{self.index})'
243
+
244
+ def __eq__(self, other):
245
+ if isinstance(other, IndexSelect):
246
+ return self.tensor_size == other.tensor_size and \
247
+ self.dim_replace == other.dim_replace and \
248
+ self.index == other.index and \
249
+ self.output == other.output and \
250
+ self.input_var == other.input_var
251
+ else:
252
+ return False
253
+
254
+
255
+ class Transpose(Constraint):
256
+
257
+ def __init__(self, tensor_size, input_var, index1, index2, output):
258
+ """
259
+ Args:
260
+ tensor_size: current tensor size
261
+ input_var: variable to hold input
262
+ index1: dimension 1
263
+ index2: dimension 2
264
+ output: output that stores result
265
+ """
266
+ assert isinstance(input_var, TVar)
267
+ assert isinstance(output, TVar)
268
+ assert isinstance(index1, int)
269
+ assert isinstance(index2, int)
270
+
271
+ self.input_var = input_var
272
+ self.tensor_size = tensor_size
273
+ self.index1 = index1
274
+ self.index2 = index2
275
+ self.output = output
276
+
277
+ def __repr__(self):
278
+
279
+ return f' {self.output} = ' \
280
+ f'Transpose({self.input_var}, ' \
281
+ f'tensor_size: {self.tensor_size}, ' \
282
+ f'{self.index1}, ' \
283
+ f'{self.index2})'
284
+
285
+ def __eq__(self, other):
286
+ if isinstance(other, Transpose):
287
+ return self.tensor_size == other.tensor_size and \
288
+ self.index1 == other.index1 and \
289
+ self.index2 == other.index2 and \
290
+ self.output == other.output and \
291
+ self.input_var == other.input_var
292
+ else:
293
+ return False
294
+
295
+
296
+ class GetItem(Constraint):
297
+
298
+ def __init__(self, tensor_size, index, res, input_var):
299
+ """
300
+ Constraint for getting item given a tensor size
301
+ :param tensor_size: actual number
302
+ :param index: actual number representing the index
303
+ :param res: dimension variable to carry the item we get
304
+ :param input_var: a tensor variable from which we will get item
305
+ """
306
+ assert isinstance(res, DVar)
307
+
308
+ self.res = res
309
+ self.tensor_size = tensor_size
310
+ self.index = index
311
+ self.input_var = input_var
312
+
313
+ def __repr__(self):
314
+ return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
315
+
316
+ def __eq__(self, other):
317
+ if isinstance(other, GetItem):
318
+ return self.res == other.res and \
319
+ self.tensor_size == other.tensor_size and \
320
+ self.index == other.index and \
321
+ self.input_var == other.input_var
322
+ else:
323
+ return False
324
+
325
+ class GetItemTensor(Constraint):
326
+
327
+ def __init__(self, tensor_size, index_tuple, res, input_var):
328
+ """
329
+ Constraint for getting item given a tensor size
330
+ However, when the argument is a tuple, we will
331
+ expect a tensor
332
+ :param tensor_size: actual number representing the rank
333
+ :param index_tuple: tuple for indexing
334
+ :param res: tensor variable to carry the item we get
335
+ :param input_var: a tensor variable from which we will get item
336
+ """
337
+ assert isinstance(res, TVar)
338
+
339
+ self.res = res
340
+ self.tensor_size = tensor_size
341
+ self.index_tuple = index_tuple
342
+ self.input_var = input_var
343
+
344
+ def __repr__(self):
345
+ return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
346
+
347
+ def __eq__(self, other):
348
+ if isinstance(other, GetItemTensor):
349
+ return self.res == other.res and \
350
+ self.tensor_size == other.tensor_size and \
351
+ self.index_tuple == other.index_tuple and \
352
+ self.input_var == other.input_var
353
+ else:
354
+ return False
355
+
356
+ class CalcConv(Constraint):
357
+
358
+ def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
359
+ """
360
+ :param conv_result: the convolution result
361
+ :param input_var: input to convolution
362
+ :param c_out: output chanel type
363
+ :param kernel: kernel tuple
364
+ """
365
+ self.conv_result = conv_result
366
+ self.input_var = input_var
367
+ self.c_out = c_out
368
+ self.kernel = kernel
369
+ self.padding = padding
370
+ self.stride = stride
371
+ self.dilation = dilation
372
+ self.matching_constraint = matching_constraint_vars
373
+
374
+ def __repr__(self):
375
+ return f'{self.conv_result} =' \
376
+ f' calc-conv({self.input_var},' \
377
+ f' {self.c_out}, {self.kernel}, ' \
378
+ f'{self.padding}, {self.stride},' \
379
+ f' {self.dilation})'
380
+
381
+ def __eq__(self, other):
382
+ if isinstance(other, CalcConv):
383
+ return self.conv_result == other.conv_result and self.input_var == other.input_var and \
384
+ self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
385
+ and self.stride == other.stride and self.dilation == other.dilation \
386
+ and self.matching_constraint == other.matching_constraint
387
+ else:
388
+ return False
389
+
390
+
391
+ class CalcMaxPool(Constraint):
392
+
393
+ def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
394
+ """
395
+ :param maxpool_result: the result of maxpool
396
+ :param input_var: input to convolution
397
+ :param kernel: kernel tuple
398
+ """
399
+ self.maxpool_result = maxpool_result
400
+ self.input_var = input_var
401
+ self.kernel = kernel
402
+ self.padding = padding
403
+ self.stride = stride
404
+ self.dilation = dilation
405
+ self.matching_constraint = matching_constraint_vars
406
+
407
+ def __repr__(self):
408
+ return f'{self.maxpool_result} =' \
409
+ f' calc-maxpool({self.input_var},' \
410
+ f' {self.kernel}, ' \
411
+ f'{self.padding}, {self.stride},' \
412
+ f' {self.dilation})'
413
+
414
+ def __eq__(self, other):
415
+ if isinstance(other, CalcMaxPool):
416
+ return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
417
+ and self.kernel == other.kernel and self.padding == other.padding \
418
+ and self.stride == other.stride and self.dilation == other.dilation \
419
+ and self.matching_constraint == other.matching_constraint
420
+ else:
421
+ return False
422
+
423
+
424
+ class ApplyBroadcasting(Constraint):
425
+ def __init__(self, res1, res2, input1, input2):
426
+ """
427
+ :param res1: resulting tensor 1
428
+ :param res2: resulting tensor 2
429
+ :param input1: tensor variable 1
430
+ :param input2: tensor variable 2
431
+ """
432
+ self.res1 = res1
433
+ self.res2 = res2
434
+ self.input1 = input1
435
+ self.input2 = input2
436
+
437
+ def __eq__(self, other):
438
+ if isinstance(other, ApplyBroadcasting):
439
+ return self.res1 == other.res1 \
440
+ and self.res2 == other.res2 \
441
+ and self.input1 == other.input1 \
442
+ and self.input2 == other.input2
443
+ else:
444
+ return False
445
+
446
+ def __repr__(self):
447
+ return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
448
+
449
+
450
+ class CalcProduct(Constraint):
451
+ """
452
+ Given correct dimensions, calculate the product for flatten accounting for Dyn
453
+ """
454
+ def __init__(self, start, end, flattened, dims_to_flatten):
455
+ """
456
+ :param start: start index
457
+ :param end: end index
458
+ :param flattened: variable to store the product
459
+ :param dims_to_flatten: the type which we will flatten
460
+ """
461
+ assert isinstance(dims_to_flatten, list)
462
+ assert isinstance(flattened, TVar)
463
+ assert isinstance(start, int)
464
+ assert isinstance(end, int)
465
+
466
+ self.start = start
467
+ self.end = end
468
+ self.dims_to_flatten = dims_to_flatten
469
+ self.flattened = flattened
470
+
471
+ def __eq__(self, other):
472
+ if isinstance(other, CalcProduct):
473
+ return self.start == other.start and self.end == other.end and \
474
+ self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
475
+
476
+ else:
477
+ return False
478
+
479
+ def __repr__(self):
480
+ return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
481
+
482
+
483
+ class TVar:
484
+ """
485
+ Tensor variable with no tensor constructor
486
+ """
487
+ def __init__(self, tvar):
488
+ """
489
+ :param tvar: tensor variable
490
+ """
491
+ self.tvar = tvar
492
+
493
+ def __repr__(self):
494
+ return f'TV({self.tvar})'
495
+
496
+ def __eq__(self, other):
497
+ if isinstance(other, TVar):
498
+ return self.tvar == other.tvar
499
+ else:
500
+ return False
501
+
502
+
503
+ class DVar:
504
+ """
505
+ Dimension variable
506
+ """
507
+ def __init__(self, c):
508
+ """
509
+ :param c: character or number
510
+ """
511
+ self.c = c
512
+
513
+ def __repr__(self):
514
+ return f'DV({self.c})'
515
+
516
+ def __eq__(self, other):
517
+ if isinstance(other, DVar):
518
+ return self.c == other.c
519
+ else:
520
+ return False
521
+
522
+
523
+ class BVar:
524
+ """
525
+ Boolean variable
526
+ """
527
+ def __init__(self, c):
528
+ """
529
+ :param c: character or number
530
+ """
531
+ self.c = c
532
+
533
+ def __repr__(self):
534
+ return f'BV({self.c})'
535
+
536
+ def __eq__(self, other):
537
+ if isinstance(other, BVar):
538
+ return self.c == other.c
539
+ else:
540
+ return False
541
+
542
+
543
+ def is_algebraic_expression(constraint):
544
+ if isinstance(constraint, BinConstraintD):
545
+ return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
546
+ else:
547
+ return isinstance(constraint, Prod)
548
+
549
+
550
+ def is_bool_expr(constraint):
551
+ if isinstance(constraint, BinConstraintD):
552
+ return constraint.op in [op_gt, op_lt, op_neq, op_eq]
553
+ else:
554
+ return isinstance(constraint, (BVar, Conj, Disj))
555
+
556
+ def is_dim(d):
557
+ return isinstance(d, (DVar, int)) or d == Dyn
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py ADDED
@@ -0,0 +1,1279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import operator
3
+ import warnings
4
+ from typing import Callable, Dict, Iterable
5
+
6
+ from torch.fx._symbolic_trace import _assert_is_none
7
+ from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \
8
+ Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \
9
+ TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound
10
+ from torch.fx.experimental.migrate_gradual_types.operation import \
11
+ op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul
12
+ from torch.fx.node import Target, Node
13
+ from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \
14
+ gen_bvar
15
+
16
+ from torch.fx.tensor_type import Dyn, TensorType
17
+ from torch.nn.modules.conv import Conv2d
18
+ from torch.nn.modules.batchnorm import BatchNorm2d
19
+
20
+ _INFERENCE_RULES: Dict[Target, Callable] = {}
21
+
22
+ MAX_TENSOR_RANK = 4
23
+
24
+ def register_inference_rule(call_target):
25
+ def register(fn):
26
+ if call_target in _INFERENCE_RULES:
27
+ raise RuntimeError(f'Inference rule already registered for {call_target}!')
28
+ _INFERENCE_RULES[call_target] = fn
29
+ return fn
30
+ return register
31
+
32
+
33
+ def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter):
34
+ d, counter = gen_tensor_dims(n, counter)
35
+ c1 = BinConstraintT(input, TensorType(d), op_eq)
36
+ start_dim = n if start_dim == -1 else abs(start_dim)
37
+ end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1
38
+ c2 = CalcProduct(start_dim, end_dim, flattened, d)
39
+ nat_constraints = gen_nat_constraints(d)
40
+ return Conj([c1, c2, *nat_constraints]), counter
41
+
42
+
43
+ @register_inference_rule(getattr)
44
+ def get_attr_inference_rule(n: Node, symbols, constraints, counter):
45
+ """
46
+ If the attribute is "device" then the tensor shape is preserved
47
+ """
48
+ assert isinstance(n.args[0], Node)
49
+ assert isinstance(n.args[1], str)
50
+ output, counter = gen_tvar(counter)
51
+ symbols[n] = output
52
+
53
+ input = symbols[n.args[0]]
54
+ attr = n.args[1]
55
+
56
+ if attr == 'device':
57
+ return [BinConstraintT(input, output, op_eq)], counter
58
+ else:
59
+ raise NotImplementedError('Not yet implemented')
60
+
61
+ @register_inference_rule(torch.bmm)
62
+ def bmm_inference_rule(n: Node, symbols, constraints, counter):
63
+ """
64
+ Constraints that match the input to a size 3 tensor
65
+ and switch the dimensions according to the rules
66
+ of batch multiplication
67
+ """
68
+ assert isinstance(n.args[0], Node)
69
+ assert isinstance(n.args[1], Node)
70
+
71
+ bmm_output, counter = gen_tvar(counter)
72
+ symbols[n] = bmm_output
73
+
74
+ bmm_input1 = symbols[n.args[0]]
75
+ bmm_input2 = symbols[n.args[1]]
76
+
77
+ dims_input1, counter = gen_tensor_dims(3, counter)
78
+ dims_input2, counter = gen_tensor_dims(3, counter)
79
+
80
+ inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
81
+ BinConstraintT(bmm_input2, Dyn, op_eq),
82
+ BinConstraintT(bmm_output, Dyn, op_eq)])
83
+
84
+ input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
85
+ BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
86
+ BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)])
87
+
88
+ input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq),
89
+ BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
90
+ BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)])
91
+
92
+ consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)]
93
+
94
+ batch_size, counter = gen_dvar(counter)
95
+
96
+ inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
97
+ BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
98
+ BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
99
+ *consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])])
100
+
101
+ return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter
102
+
103
+
104
+ @register_inference_rule("index_select")
105
+ def index_select_inference_rule(n: Node, symbols, constraints, counter):
106
+ """
107
+ We constrain the second argument to a vector or Dyn.
108
+ The output replaces the input with the shape of the vector
109
+ at the position given by the index (first argument)
110
+ """
111
+ # print(n.args)
112
+ assert isinstance(n.args[0], Node)
113
+ assert isinstance(n.args[1], int)
114
+ assert isinstance(n.args[2], Node)
115
+
116
+
117
+
118
+ index_select, counter = gen_tvar(counter)
119
+ symbols[n] = index_select
120
+
121
+ dims, counter = gen_tensor_dims(1, counter)
122
+
123
+ # equality constraint
124
+ is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)
125
+ is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)
126
+
127
+ c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select)
128
+ for i in range(MAX_TENSOR_RANK)])])
129
+ c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select)
130
+ for i in range(MAX_TENSOR_RANK)])])
131
+
132
+ return [Disj([c2, c3])], counter
133
+
134
+
135
+ @register_inference_rule("expand")
136
+ def expand_inference_rule(n: Node, symbols, constraints, counter):
137
+ """
138
+ We generate the exact constraints as we do for tensor additions but we constraint
139
+ the rank of this expression to be equal to len(n.args[1:]) so that only
140
+ those cases get considered for the output
141
+ """
142
+ assert isinstance(n.args[0], Node)
143
+
144
+ # define the output for expand
145
+ expand, counter = gen_tvar(counter)
146
+ symbols[n] = expand
147
+
148
+ # since we do not have two nodes here, we will construct an argument variable
149
+ e1 = symbols[n.args[0]]
150
+ e2, counter = gen_tvar(counter)
151
+
152
+ e2_nat_constraints = []
153
+ for arg in n.args[1:]:
154
+ assert isinstance(arg, (Node, int))
155
+ if isinstance(arg, Node):
156
+ assert isinstance(symbols[arg], DVar)
157
+ e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))
158
+
159
+ e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)
160
+
161
+ constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)
162
+
163
+ # constraint the output size
164
+ dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
165
+ nat_constraints = gen_nat_constraints(dims)
166
+ c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
167
+ constraints += c
168
+
169
+ return constraints, counter
170
+
171
+
172
+ @register_inference_rule(torch.nn.functional.gelu)
173
+ @register_inference_rule(torch.nn.functional.dropout)
174
+ @register_inference_rule(torch.nn.functional.softmax)
175
+ @register_inference_rule("detach")
176
+ @register_inference_rule("to")
177
+ @register_inference_rule("int")
178
+ @register_inference_rule("long")
179
+ @register_inference_rule("contiguous")
180
+ @register_inference_rule(torch.ones)
181
+ @register_inference_rule(torch.zeros)
182
+ def equality_inference_rule(n: Node, symbols, constraints, counter):
183
+ """
184
+ We generate the constraint: input = output
185
+ """
186
+ output, counter = gen_tvar(counter)
187
+ symbols[n] = output
188
+
189
+ if isinstance(n.args[0], Node):
190
+ input = symbols[n.args[0]]
191
+ if isinstance(input, TVar):
192
+ return [BinConstraintT(input, output, op_eq)], counter
193
+
194
+ # then we have dimension variables
195
+ else:
196
+ for arg in n.args:
197
+ assert isinstance(symbols[arg], DVar)
198
+ my_size = [symbols[arg] for arg in n.args]
199
+ return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
200
+
201
+ elif isinstance(n.args[0], tuple):
202
+ # then the tuple is the size
203
+ assert len(n.args[0]) <= 4
204
+ my_size = [symbols[arg] for arg in n.args[0]]
205
+ return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
206
+ else:
207
+ raise NotImplementedError('Method not yet implemented')
208
+
209
+
210
+ @register_inference_rule("transpose")
211
+ def transpose_inference_rule(n: Node, symbols, constraints, counter):
212
+ """
213
+ Can be considered as a sequence of two index selects, so we generate constraints accordingly
214
+ """
215
+ assert isinstance(n.args[0], Node)
216
+ assert isinstance(n.args[1], int)
217
+ assert isinstance(n.args[2], int)
218
+
219
+ output, counter = gen_tvar(counter)
220
+ symbols[n] = output
221
+
222
+ from_arg = symbols[n.args[0]]
223
+ assert isinstance(from_arg, TVar)
224
+
225
+ # input and output are dyn
226
+ is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])
227
+
228
+ # or input is a tensor and we actually do the replacement
229
+ c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])
230
+
231
+ return [Disj([is_dyn, c3])], counter
232
+
233
+
234
+ @register_inference_rule("type_as")
235
+ def type_inference_rule(n: Node, symbols, constraints, counter):
236
+ """
237
+ We generate the constraint: input = output
238
+ """
239
+ assert isinstance(n.args[0], Node)
240
+ assert isinstance(n.args[1], Node)
241
+
242
+ output, counter = gen_tvar(counter)
243
+ symbols[n] = output
244
+
245
+ from_arg = symbols[n.args[0]]
246
+ to_arg = symbols[n.args[1]]
247
+
248
+ assert isinstance(from_arg, TVar)
249
+ assert isinstance(to_arg, TVar)
250
+
251
+ return [BinConstraintT(from_arg, to_arg, op_consistency),
252
+ BinConstraintT(output, to_arg, op_eq)], counter
253
+
254
+ @register_inference_rule("masked_fill_")
255
+ def masked_fill_inference_rule(n: Node, symbols, constraints, counter):
256
+ """
257
+ Similar to addition. For now we implement the constraints when
258
+ the argument is a boolean tensor. There is also a case for when
259
+ it is a condition. We will leave this out for now.
260
+ """
261
+
262
+ assert isinstance(n.args[0], Node)
263
+ assert isinstance(n.args[1], Node)
264
+
265
+ # We will retrieve the type variables from the symbol table
266
+ # and confirm they are tensor variables
267
+
268
+ e1 = symbols[n.args[0]]
269
+ e2 = symbols[n.args[1]]
270
+
271
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
272
+ masked_fill_tensor, counter = gen_tvar(counter)
273
+ symbols[n] = masked_fill_tensor
274
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)
275
+ else:
276
+ raise NotImplementedError('Not yet implemented')
277
+
278
+
279
+ @register_inference_rule(torch.nn.functional.embedding)
280
+ def embedding_inference_rule_functional(n: Node, symbols, constraints, counter):
281
+ assert isinstance(n.args[0], Node)
282
+
283
+ embedding_dim_weights = symbols[n.args[1]]
284
+
285
+ # will treat this as a static shape. So we will not use matching.
286
+ weight_dims, counter = gen_tensor_dims(2, counter)
287
+ equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq)
288
+ embedding_dim = weight_dims[1]
289
+ constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter)
290
+ return [equality_constraint] + constraints, counter
291
+
292
+
293
+ @register_inference_rule(torch.nn.modules.sparse.Embedding)
294
+ def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):
295
+ """
296
+ The output shape differs from the input shape in the last dimension
297
+ """
298
+ assert isinstance(n.args[0], Node)
299
+ return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)
300
+
301
+
302
+ def gen_embedding_rules(n: Node, symbols, embedding_dim, counter):
303
+
304
+ embedding_output, counter = gen_tvar(counter)
305
+ symbols[n] = embedding_output
306
+ embedding_input = symbols[n.args[0]]
307
+
308
+ input_dyn = BinConstraintT(embedding_input, Dyn, op_eq)
309
+ output_dyn = BinConstraintT(embedding_output, Dyn, op_eq)
310
+
311
+ c1 = Conj([input_dyn, output_dyn])
312
+ c2 = []
313
+
314
+ for i in range(1, MAX_TENSOR_RANK):
315
+ new_dims, counter = gen_tensor_dims(i, counter)
316
+ nat_constraints = gen_nat_constraints(new_dims)
317
+
318
+ # we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases
319
+ c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq),
320
+ BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] +
321
+ nat_constraints)
322
+ c2.append(c_tensor_i)
323
+
324
+ return [Disj([c1, Disj(c2)])], counter
325
+
326
+
327
+ @register_inference_rule(torch.tensor)
328
+ def tensor_inference_rule(n: Node, symbols, constraints, counter):
329
+ """
330
+ If the tensor is a scalar, we will skip it since we
331
+ do not support scalars yet. We will add support in the future
332
+ if it's needed. For our examples so far, scalars are not needed.
333
+ """
334
+ return [], counter
335
+
336
+
337
+ @register_inference_rule("reshape")
338
+ @register_inference_rule("view")
339
+ def view_inference_rule(n: Node, symbols, constraints, counter):
340
+ """
341
+ Similar to reshape but with an extra condition on the strides
342
+ """
343
+ assert isinstance(n.args[0], Node)
344
+
345
+ # generate the new variable
346
+ my_view, counter = gen_tvar(counter)
347
+ symbols[n] = my_view
348
+
349
+
350
+ src_var = symbols[n.args[0]]
351
+ t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape
352
+ t2_type = []
353
+ num_constraints = []
354
+
355
+ for t in t2:
356
+ if t == -1:
357
+ var, counter = gen_dvar(counter)
358
+ t2_type.append(var)
359
+ num_constraints.append(BinConstraintD(var, Dyn, op_neq))
360
+
361
+ else:
362
+ num_constraints.append(BinConstraintD(t, Dyn, op_neq))
363
+ t2_type.append(t)
364
+
365
+ t2_type = TensorType(t2_type) # type: ignore[assignment]
366
+
367
+ c1 = BinConstraintT(my_view, t2_type, op_eq)
368
+ c2 = CanReshape(src_var, t2_type)
369
+
370
+ # TODO: add the extra check mentioned here:
371
+ # https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view
372
+
373
+ return [c1, c2] + num_constraints, counter # type: ignore[operator]
374
+
375
+
376
+ @register_inference_rule("size")
377
+ def size_inference_rule(n: Node, symbols, constraints, counter):
378
+ """
379
+ The constraint is just lhs = rhs.
380
+ Ex: size = input_ids.size()
381
+ """
382
+
383
+
384
+ if len(n.args) == 1:
385
+ # generate the new variable
386
+ size, counter = gen_tvar(counter)
387
+ symbols[n] = size
388
+ input = symbols[n.args[0]]
389
+ c = BinConstraintT(input, size, op_eq)
390
+ return [c], counter
391
+
392
+ elif len(n.args) == 2:
393
+ # TODO: review this rule; should input = dyn; output = dyn be included here?
394
+ if isinstance(n.args[1], int):
395
+ # generate the new variable
396
+ size_index, counter = gen_dvar(counter)
397
+ symbols[n] = size_index
398
+ input = symbols[n.args[0]]
399
+ c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]
400
+ c3 = BinConstraintD(0, size_index, op_leq)
401
+
402
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
403
+ output_dyn = BinConstraintD(size_index, Dyn, op_eq)
404
+ c1 = Conj([input_dyn, output_dyn])
405
+
406
+ return [Disj([c1, Conj([Disj(c2), c3])])], counter
407
+
408
+ else:
409
+ raise NotImplementedError
410
+
411
+ else:
412
+ raise NotImplementedError
413
+
414
+
415
+ def range_check(i, n):
416
+ """
417
+ Checks if an index i is within range of a size n list
418
+ Args:
419
+ i: index
420
+ n: list size
421
+
422
+ Returns: Boolean
423
+ """
424
+ if i >= 0:
425
+ return T() if i < n else F()
426
+ else:
427
+ return T() if i >= n else F()
428
+
429
+
430
+ @register_inference_rule(torch.cumsum)
431
+ def cumsum_inference_rule(n: Node, symbols, constraints, counter):
432
+ """
433
+ Input and output shapes should be equal
434
+ We should verify that the index is valid
435
+ """
436
+ assert isinstance(n.args[0], Node)
437
+ arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"]
438
+ assert isinstance(arg_1, int)
439
+
440
+ output, counter = gen_tvar(counter)
441
+ symbols[n] = output
442
+ input = symbols[n.args[0]]
443
+
444
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
445
+ output_dyn = BinConstraintT(output, Dyn, op_eq)
446
+ c1 = Conj([input_dyn, output_dyn])
447
+ c2 = []
448
+ for i in range(1, MAX_TENSOR_RANK + 1):
449
+ new_dims, counter = gen_tensor_dims(i, counter)
450
+
451
+ nat_constraints = gen_nat_constraints(new_dims)
452
+
453
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq),
454
+ BinConstraintT(output, TensorType(new_dims), op_eq)] +
455
+ [range_check(arg_1, i)] + nat_constraints)
456
+
457
+ c2.append(c_tensor_i)
458
+ dyn_or_tensor = Disj([c1, Disj(c2)])
459
+ return [dyn_or_tensor], counter
460
+
461
+
462
+ @register_inference_rule(_assert_is_none)
463
+ def assert_inference_rule(n: Node, symbols, constraints, counter):
464
+ assert len(n.users) == 0
465
+ return [], counter
466
+
467
+
468
+ @register_inference_rule(operator.getitem)
469
+ def getitem_inference_rule(n: Node, symbols, constraints, counter):
470
+ assert isinstance(n.args[0], Node)
471
+
472
+ # dimension output case
473
+ if isinstance(n.args[1], int):
474
+ # create and store the new dimension variable
475
+ get_item_output, counter = gen_dvar(counter)
476
+ symbols[n] = get_item_output
477
+
478
+ # retrieve arg variables
479
+ get_item_arg = symbols[n.args[0]]
480
+ assert isinstance(get_item_arg, TVar)
481
+
482
+
483
+ # if the input is dynamic, we accept any index and return
484
+ # a dynamic dimension as output
485
+ input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
486
+ output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
487
+ c1 = Conj([input_dyn, output_dyn])
488
+
489
+ # if the input is a tensor,
490
+ # generate a getItem constraint which will be expanded based on the
491
+ # tensor dimension.
492
+
493
+ c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)]
494
+
495
+
496
+ # since the output is a dimension, we make sure it's a natural number
497
+ # added as a conjunction to the disjunction of c2
498
+ c3 = BinConstraintD(0, get_item_output, op_leq)
499
+ return [Disj([c1, Conj([Disj(c2), c3])])], counter
500
+
501
+ # tensor output case
502
+ elif isinstance(n.args[1], tuple):
503
+ # create and store the new tensor variable
504
+ get_item_output, counter = gen_tvar(counter)
505
+ symbols[n] = get_item_output
506
+
507
+ # retrieve arg variables
508
+ if n.args[0] in symbols:
509
+ get_item_arg = symbols[n.args[0]]
510
+ assert isinstance(get_item_arg, TVar)
511
+
512
+ input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
513
+ output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment]
514
+ c1 = Conj([input_dyn, output_dyn])
515
+
516
+ c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc]
517
+ for i in range(MAX_TENSOR_RANK)]
518
+ else:
519
+ # TODO: we should figure out why there is a key-error here.
520
+ return [], counter
521
+
522
+ return [Disj([c1, *c2])], counter
523
+
524
+ else:
525
+ raise RuntimeError('Method not yet implemented')
526
+
527
+
528
+ @register_inference_rule(operator.gt)
529
+ def gt_inference_rule(n: Node, symbols, constraints, counter):
530
+ assert isinstance(n.args[0], (Node, int))
531
+ assert isinstance(n.args[1], (Node, int))
532
+
533
+ # We make sure this node will not be used again. We do not
534
+ # generate a constraint about that node. Only about the operands.
535
+
536
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
537
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
538
+
539
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
540
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
541
+ gt_tensor, counter = gen_tvar(counter)
542
+ symbols[n] = gt_tensor
543
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor)
544
+
545
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
546
+ # This is meant to be used for flow analysis only
547
+ gt_constraint = BinConstraintD(e1, e2, op_gt)
548
+
549
+ my_gt, counter = gen_bvar(counter)
550
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
551
+ return [equality_constraint], counter
552
+
553
+ else:
554
+ raise RuntimeError('Sort Mismatch')
555
+
556
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
557
+ if isinstance(e1, DVar):
558
+ # This is meant to be used for flow analysis only
559
+ gt_constraint = BinConstraintD(e1, e2, op_gt)
560
+
561
+ my_gt, counter = gen_bvar(counter)
562
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
563
+ return [equality_constraint], counter
564
+
565
+ elif isinstance(e1, TVar) and isinstance(e2, int):
566
+ # then we made the wrong assumption about the argument being a tensor
567
+ # so we should fix the assumption
568
+ warnings.warn(f'Made the wrong assumption for node {n}. Correctness not guaranteed.')
569
+
570
+ new_e1, counter = gen_dvar(counter)
571
+ symbols[n.args[0]] = new_e1
572
+ symbols[n.args[0]]
573
+
574
+ gt_constraint = BinConstraintD(new_e1, e2, op_gt)
575
+
576
+ my_gt, counter = gen_bvar(counter)
577
+ equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
578
+ return [equality_constraint], counter
579
+
580
+ else:
581
+ raise NotImplementedError('Method not yet implemented')
582
+
583
+ else:
584
+ raise NotImplementedError('Method not yet implemented')
585
+
586
+
587
+ @register_inference_rule(operator.eq)
588
+ def eq_inference_rule(n: Node, symbols, constraints, counter):
589
+ assert isinstance(n.args[0], (Node, int))
590
+ assert isinstance(n.args[1], (Node, int))
591
+
592
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
593
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
594
+
595
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
596
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
597
+ eq_tensor, counter = gen_tvar(counter)
598
+ symbols[n] = eq_tensor
599
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor)
600
+
601
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
602
+ # This is meant to be used for flow analysis only
603
+ eq_constraint = BinConstraintD(e1, e2, op_eq)
604
+
605
+ my_eq, counter = gen_bvar(counter)
606
+ equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
607
+ return [equality_constraint], counter
608
+
609
+ else:
610
+ raise RuntimeError('Sort Mismatch')
611
+
612
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
613
+ if isinstance(e1, DVar):
614
+ # This is meant to be used for flow analysis only
615
+ eq_constraint = BinConstraintD(e1, e2, op_eq)
616
+
617
+ my_eq, counter = gen_bvar(counter)
618
+ equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
619
+ return [equality_constraint], counter
620
+ else:
621
+ raise NotImplementedError('Method not yet implemented')
622
+ else:
623
+ raise NotImplementedError('Method not yet implemented')
624
+
625
+ @register_inference_rule(operator.ne)
626
+ def neq_inference_rule(n: Node, symbols, constraints, counter):
627
+ """
628
+ Translates to inconsistent in gradual types.
629
+ To prove inequality, we should prove that
630
+ tensors are either different sizes or
631
+ disagree on at least one dimension
632
+
633
+ This is a WIP (works when the condition
634
+ is false. We are working on making this operation work
635
+ when the condition is true as well)
636
+ """
637
+ assert isinstance(n.args[0], Node)
638
+ assert isinstance(n.args[1], tuple)
639
+
640
+ # implementing for size 3 and 4
641
+ if len(n.args[1]) == 3:
642
+
643
+ assert isinstance(n.args[1][0], (Node, int))
644
+ assert isinstance(n.args[1][1], (Node, int))
645
+ assert isinstance(n.args[1][2], (Node, int))
646
+
647
+ lhs = symbols[n.args[0]]
648
+
649
+ b, counter = gen_tensor_dims(4, counter)
650
+ input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq)
651
+
652
+ d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
653
+ d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
654
+ d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
655
+
656
+ # dimensions not equal
657
+ my_ne, counter = gen_bvar(counter)
658
+ neq_1 = BinConstraintD(d1, b[0], op_neq)
659
+ neq_2 = BinConstraintD(d2, b[1], op_neq)
660
+ neq_3 = BinConstraintD(d3, b[2], op_neq)
661
+
662
+ # dimensions inconsistent
663
+ dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1])
664
+ dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2])
665
+ dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3])
666
+
667
+ dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3])
668
+
669
+ # we are covering size 3 and 4 only for now
670
+ ne_constraint = Conj([input_is_size3, dims_inconsistent])
671
+
672
+ my_ne, counter = gen_bvar(counter)
673
+ equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
674
+
675
+ elif len(n.args[1]) == 4:
676
+
677
+ assert isinstance(n.args[1][0], (Node, int))
678
+ assert isinstance(n.args[1][1], (Node, int))
679
+ assert isinstance(n.args[1][2], (Node, int))
680
+ assert isinstance(n.args[1][3], (Node, int))
681
+
682
+ lhs = symbols[n.args[0]]
683
+
684
+ b1, counter = gen_dvar(counter)
685
+ b2, counter = gen_dvar(counter)
686
+ b3, counter = gen_dvar(counter)
687
+ b4, counter = gen_dvar(counter)
688
+
689
+ input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq)
690
+
691
+ d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
692
+ d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
693
+ d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
694
+ d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]]
695
+
696
+ # dimensions not equal
697
+ my_ne, counter = gen_bvar(counter)
698
+ neq_1 = BinConstraintD(d1, b1, op_neq)
699
+ neq_2 = BinConstraintD(d2, b2, op_neq)
700
+ neq_3 = BinConstraintD(d3, b3, op_neq)
701
+ neq_4 = BinConstraintD(d4, b4, op_neq)
702
+
703
+ # dimensions to inconsistent
704
+ dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1])
705
+ dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2])
706
+ dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3])
707
+ dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4])
708
+
709
+ dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4])
710
+
711
+ ne_constraint = Conj([input_is_size4, dims_inconsistent])
712
+
713
+ my_ne, counter = gen_bvar(counter)
714
+
715
+ equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
716
+
717
+ else:
718
+ raise NotImplementedError('Method not yet implemented')
719
+
720
+ return [equality_constraint], counter
721
+
722
+
723
+ @register_inference_rule(operator.lt)
724
+ def lt_inference_rule(n: Node, symbols, constraints, counter):
725
+ assert isinstance(n.args[0], (Node, int))
726
+ assert isinstance(n.args[1], (Node, int))
727
+
728
+ # We make sure this node will not be used again. We do not
729
+ # generate a constraint about that node. Only about the operands.
730
+
731
+ e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
732
+ e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
733
+
734
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
735
+ if isinstance(e1, TVar) and isinstance(e2, TVar):
736
+ lt_tensor, counter = gen_tvar(counter)
737
+ symbols[n] = lt_tensor
738
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor)
739
+
740
+ elif isinstance(e1, DVar) and isinstance(e2, DVar):
741
+ # This is meant to be used for flow analysis only
742
+ lt_constraint = BinConstraintD(e1, e2, op_lt)
743
+
744
+ my_lt, counter = gen_bvar(counter)
745
+ equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
746
+ return [equality_constraint], counter
747
+
748
+ else:
749
+ raise RuntimeError('Sort Mismatch')
750
+
751
+ elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
752
+ if isinstance(e1, DVar):
753
+ # This is meant to be used for flow analysis only
754
+ lt_constraint = BinConstraintD(e1, e2, op_lt)
755
+
756
+ my_lt, counter = gen_bvar(counter)
757
+ equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
758
+ return [equality_constraint], counter
759
+ else:
760
+ raise NotImplementedError('Method not yet implemented')
761
+
762
+ else:
763
+ raise NotImplementedError('Method not yet implemented')
764
+
765
+
766
+ @register_inference_rule(torch.full)
767
+ def full_inference_rule(n: Node, symbols, constraints, counter):
768
+ full, counter = gen_tvar(counter)
769
+ symbols[n] = full
770
+ res = []
771
+
772
+ assert isinstance(n.args[0], Iterable)
773
+ for arg in n.args[0]:
774
+ dim = arg if isinstance(arg, int) else symbols[arg]
775
+ res.append(dim)
776
+ c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type]
777
+ return [c], counter
778
+
779
+
780
+ # TODO normalize index
781
+ @register_inference_rule(torch.arange)
782
+ def arange_inference_rule(n: Node, symbols, constraints, counter):
783
+ start = 0
784
+ step = 1
785
+
786
+ if len(n.args) == 1:
787
+ end = symbols[n.args[0]]
788
+ else:
789
+ raise NotImplementedError('Not yet implemented')
790
+
791
+ # int((end - start) / step)
792
+ d1, counter = gen_dvar(counter)
793
+ size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
794
+ arange, counter = gen_tvar(counter)
795
+ symbols[n] = arange
796
+
797
+ # either the a parameter is a number or it is Dyn
798
+ c1 = Disj([BinConstraintD(end, Dyn, op_eq),
799
+ BinConstraintD(start, Dyn, op_eq),
800
+ BinConstraintD(step, Dyn, op_eq)])
801
+ c2 = BinConstraintD(d1, Dyn, op_eq)
802
+ both_dyn = Conj([c1, c2])
803
+
804
+ c11 = Conj([BinConstraintD(end, Dyn, op_neq),
805
+ BinConstraintD(start, Dyn, op_neq),
806
+ BinConstraintD(step, Dyn, op_neq)])
807
+ c22 = BinConstraintD(d1, Dyn, op_neq)
808
+ both_numbers = Conj([c11, c22, size_constraint])
809
+
810
+ return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter
811
+
812
+ def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var):
813
+ # additional vars that don't correspond to expressions
814
+ e11, counter = gen_tvar(counter)
815
+ e22, counter = gen_tvar(counter)
816
+
817
+ # generate constraints
818
+ c1 = TGreatestUpperBound(output_var, e11, e22)
819
+ c2 = ApplyBroadcasting(e11, e22, e1, e2)
820
+ c3 = BinConstraintT(e11, e22, op_consistency)
821
+ return [c1, c2, c3], counter
822
+
823
+
824
+ @register_inference_rule(operator.mul)
825
+ @register_inference_rule(torch.ne)
826
+ @register_inference_rule("ne")
827
+ @register_inference_rule(torch.add)
828
+ @register_inference_rule(operator.add)
829
+ def broadcasting_inference_rule(n: Node, symbols, constraints, counter):
830
+
831
+ op_code = None
832
+ if n.target == operator.add or n.target == torch.add:
833
+ op_code = op_add
834
+ elif n.target == operator.mul:
835
+ op_code = op_mul
836
+
837
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
838
+ if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
839
+ my_output, counter = gen_tvar(counter)
840
+ symbols[n] = my_output
841
+ e1 = symbols[n.args[0]]
842
+ e2 = symbols[n.args[1]]
843
+
844
+ return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
845
+ else:
846
+ raise NotImplementedError('Method not yet implemented')
847
+
848
+ elif isinstance(n.args[0], Node) and isinstance(n.args[1], (int, float)):
849
+ if isinstance(symbols[n.args[0]], TVar):
850
+ my_output, counter = gen_tvar(counter)
851
+ symbols[n] = my_output
852
+ e1 = symbols[n.args[0]]
853
+ return [BinConstraintT(my_output, e1, op_eq)], counter
854
+ elif isinstance(symbols[n.args[0]], DVar):
855
+ my_output, counter = gen_dvar(counter)
856
+ symbols[n] = my_output
857
+ e1 = symbols[n.args[0]]
858
+
859
+ # we will propagate the runtime value here since this is regular addition
860
+ c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
861
+ BinConstraintD(0, my_output, op_leq)])
862
+ return [c], counter
863
+
864
+ elif isinstance(n.args[1], Node) and isinstance(n.args[0], (int, float)):
865
+ if isinstance(symbols[n.args[1]], TVar):
866
+ my_output, counter = gen_tvar(counter)
867
+ symbols[n] = my_output
868
+ e2 = symbols[n.args[1]]
869
+ return [BinConstraintT(my_output, e2, op_eq)], counter
870
+ elif isinstance(symbols[n.args[1]], DVar):
871
+ my_output, counter = gen_dvar(counter)
872
+ symbols[n] = my_output
873
+ e2 = symbols[n.args[1]]
874
+
875
+ # we will propagate the runtime value here since this is regular addition
876
+ c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
877
+ BinConstraintD(0, my_output, op_leq)])
878
+ return [c], counter
879
+
880
+ else:
881
+ raise NotImplementedError('Method not yet implemented')
882
+
883
+ else:
884
+ # TODO generate add constraints for scalar addition
885
+ raise NotImplementedError('Addition not yet implemented')
886
+
887
+
888
+ @register_inference_rule(torch.flatten)
889
+ def flatten_inference_rule(n: Node, symbols, constraints, counter):
890
+ assert isinstance(n.args[0], Node)
891
+
892
+ # generate the new variable
893
+ flattened, counter = gen_tvar(counter)
894
+ symbols[n] = flattened
895
+
896
+ input = symbols[n.args[0]]
897
+
898
+ # set the default start and end dims
899
+ start_dim = 1
900
+ end_dim = -1
901
+
902
+ if len(n.args) > 1:
903
+ assert isinstance(n.args[1], int)
904
+ start_dim = n.args[1]
905
+
906
+ if len(n.args) > 2:
907
+ assert isinstance(n.args[2], int)
908
+ end_dim = n.args[2]
909
+
910
+ c1 = BinConstraintT(input, Dyn, op_eq)
911
+ c2 = BinConstraintT(flattened, Dyn, op_eq)
912
+ both_dyn = Conj([c1, c2])
913
+
914
+ const = []
915
+ for i in range(1, MAX_TENSOR_RANK + 1):
916
+ c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter)
917
+ const.append(c)
918
+
919
+ return [Disj([both_dyn, *const])], counter
920
+
921
+
922
+ @register_inference_rule(torch.nn.functional.layer_norm)
923
+ def layer_norm_functional(n: Node, symbols, constraints, counter):
924
+ """
925
+ We generate the constraint: input = output
926
+ """
927
+ assert isinstance(n.args[0], Node)
928
+ return gen_layer_norm_constraints(n, n.args[1], symbols, counter)
929
+
930
+
931
+ @register_inference_rule(torch.nn.LayerNorm)
932
+ def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
933
+ """
934
+ Input and output shapes should be equal.
935
+ Input should be consistent with the normalized_shape
936
+ """
937
+ assert isinstance(n.args[0], Node)
938
+ return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)
939
+
940
+
941
+ def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter):
942
+ output, counter = gen_tvar(counter)
943
+ symbols[n] = output
944
+ input = symbols[n.args[0]]
945
+
946
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
947
+ output_dyn = BinConstraintT(output, Dyn, op_eq)
948
+
949
+ c1 = Conj([input_dyn, output_dyn])
950
+
951
+ c2 = []
952
+ for i in range(1, MAX_TENSOR_RANK + 1):
953
+ new_dims_rhs, counter = gen_tensor_dims(i, counter)
954
+ nat_constraints = gen_nat_constraints(new_dims_rhs)
955
+
956
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq),
957
+ BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] +
958
+ add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) +
959
+ nat_constraints)
960
+ c2.append(c_tensor_i)
961
+ return [Disj([c1, Disj(c2)])], counter
962
+
963
+ @register_inference_rule(torch.nn.Dropout)
964
+ @register_inference_rule(torch.nn.ReLU)
965
+ def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):
966
+ """
967
+ Input and output shapes should be equal.
968
+ """
969
+ assert isinstance(n.args[0], Node)
970
+ output, counter = gen_tvar(counter)
971
+ symbols[n] = output
972
+ input = symbols[n.args[0]]
973
+ assert isinstance(input, TVar)
974
+ return [BinConstraintT(input, output, op_eq)], counter
975
+
976
+
977
+ @register_inference_rule(torch.nn.Linear)
978
+ def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):
979
+ """
980
+ Input and output sizes should be the same except for the last dimension
981
+ If the input is Dyn, then so should the output
982
+ """
983
+ assert isinstance(n.args[0], Node)
984
+ return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)
985
+
986
+
987
+ @register_inference_rule("dim") # type: ignore[attr-defined]
988
+ def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
989
+ assert isinstance(n.args[0], Node)
990
+ my_dim, counter = gen_dvar(counter)
991
+ symbols[n] = my_dim
992
+ input = symbols[n.args[0]]
993
+
994
+ input_dyn = BinConstraintT(input, Dyn, op_eq)
995
+ output_dyn = BinConstraintD(my_dim, Dyn, op_eq)
996
+
997
+ c1 = []
998
+
999
+ for i in range(1, MAX_TENSOR_RANK + 1):
1000
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
1001
+
1002
+ c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
1003
+ BinConstraintD(my_dim, i, op_eq)])
1004
+ c1.append(c_tensor_i)
1005
+
1006
+ return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
1007
+
1008
+
1009
+ @register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined]
1010
+ def torch_linear_inference_rule(n: Node, symbols, constraints, counter):
1011
+ assert isinstance(n.args[0], Node)
1012
+ weight_dims, counter = gen_tensor_dims(2, counter)
1013
+ equality_constraint = BinConstraintT(symbols[n.args[1]], TensorType(weight_dims), op_eq)
1014
+ constraints, counter = linear_constraints(n, weight_dims[1], weight_dims[0], symbols, counter)
1015
+ return [equality_constraint] + constraints, counter
1016
+
1017
+
1018
+ def linear_constraints(n: Node, in_features, out_features, symbols, counter):
1019
+ linear_output, counter = gen_tvar(counter)
1020
+ symbols[n] = linear_output
1021
+ linear_input = symbols[n.args[0]]
1022
+
1023
+ input_dyn = BinConstraintT(linear_input, Dyn, op_eq)
1024
+ output_dyn = BinConstraintT(linear_output, Dyn, op_eq)
1025
+
1026
+ c1 = Conj([input_dyn, output_dyn])
1027
+
1028
+ c2 = []
1029
+ for i in range(1, MAX_TENSOR_RANK + 1):
1030
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
1031
+ new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
1032
+
1033
+ nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
1034
+
1035
+ c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq),
1036
+ BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] +
1037
+ add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) +
1038
+ nat_constraints)
1039
+ c2.append(c_tensor_i)
1040
+ return [Disj([c1, Disj(c2)])], counter
1041
+
1042
+ def add_layer_norm_constraints(input_dim, normalized_dim):
1043
+ """
1044
+ The constraints say that the type has te form: [*, 1024, 1024]
1045
+ while the normalized_dim have the form [1024, 1024]
1046
+ Args:
1047
+ input_dim: Input shape of layer norm
1048
+ normalized_dim: normalized_dim parameter of the module instance
1049
+
1050
+ """
1051
+
1052
+ # in this case we return false since there's a pattern mismatch
1053
+ if len(normalized_dim) > len(input_dim):
1054
+ return [F()]
1055
+
1056
+ else:
1057
+ constraints = []
1058
+ for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
1059
+ constraints.append(BinConstraintD(i, n, op_consistency))
1060
+ return constraints
1061
+
1062
+
1063
+ def add_linear_constraints(dims1, dims2, in_features, out_features):
1064
+ assert len(dims1) == len(dims2)
1065
+ constraints = []
1066
+ for i in range(len(dims1)):
1067
+ if i == len(dims1) - 1:
1068
+ constraints.append(BinConstraintD(dims1[i], in_features, op_consistency))
1069
+ constraints.append(BinConstraintD(dims2[i], out_features, op_eq))
1070
+ else:
1071
+ constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))
1072
+
1073
+ return constraints
1074
+
1075
+
1076
+ @register_inference_rule(torch.reshape)
1077
+ def reshape_inference_rule(n: Node, symbols, constraints, counter):
1078
+ assert isinstance(n.args[0], Node)
1079
+
1080
+ # generate the new variable
1081
+ my_reshape, counter = gen_tvar(counter)
1082
+ symbols[n] = my_reshape
1083
+
1084
+ src_var = symbols[n.args[0]]
1085
+ t2 = n.args[1]
1086
+ t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr]
1087
+ c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr]
1088
+ c2 = CanReshape(src_var, t2_type)
1089
+
1090
+ return [c1, c2], counter
1091
+
1092
+
1093
+ @register_inference_rule(BatchNorm2d)
1094
+ def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1095
+ assert isinstance(n.args[0], Node)
1096
+
1097
+ # generate the new variable
1098
+ batchnorm_output, counter = gen_tvar(counter)
1099
+ symbols[n] = batchnorm_output
1100
+ batchnorm_input = symbols[n.args[0]]
1101
+
1102
+ # dim vars
1103
+ d1, counter = gen_dvar(counter)
1104
+ d2, counter = gen_dvar(counter)
1105
+ d3, counter = gen_dvar(counter)
1106
+ d4, counter = gen_dvar(counter)
1107
+
1108
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1109
+
1110
+ c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching)
1111
+ c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq)
1112
+ return [c1, c2, *nat_constraints], counter
1113
+
1114
+
1115
+ @register_inference_rule(torch.nn.AdaptiveAvgPool2d)
1116
+ def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1117
+ assert isinstance(n.args[0], Node)
1118
+
1119
+ avg_pool, counter = gen_tvar(counter)
1120
+
1121
+ symbols[n] = avg_pool
1122
+ input_var = symbols[n.args[0]]
1123
+
1124
+ # dim vars
1125
+ d1, counter = gen_dvar(counter)
1126
+ d2, counter = gen_dvar(counter)
1127
+ d3, counter = gen_dvar(counter)
1128
+ d4, counter = gen_dvar(counter)
1129
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1130
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1131
+ c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq)
1132
+
1133
+ return [c1, c2, *nat_constraints], counter
1134
+
1135
+
1136
+ @register_inference_rule(Conv2d)
1137
+ def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1138
+ assert isinstance(n.args[0], Node)
1139
+
1140
+ my_conv, counter = gen_tvar(counter)
1141
+ symbols[n] = my_conv
1142
+ input_var = symbols[n.args[0]]
1143
+
1144
+ # dim vars
1145
+ [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
1146
+
1147
+ # c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
1148
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1149
+
1150
+ # c2 = DConsistency(module_instance.in_channels, d2)
1151
+ c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)
1152
+
1153
+ c3 = CalcConv(my_conv, input_var,
1154
+ module_instance.out_channels,
1155
+ module_instance.kernel_size,
1156
+ module_instance.padding,
1157
+ module_instance.stride,
1158
+ module_instance.dilation, [d1, d2, d3, d4])
1159
+
1160
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1161
+
1162
+ return [c1, c2, c3, *nat_constraints], counter
1163
+
1164
+
1165
+ @register_inference_rule(torch.nn.MaxPool2d)
1166
+ def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter):
1167
+ assert isinstance(n.args[0], Node)
1168
+ maxpool, counter = gen_tvar(counter)
1169
+ symbols[n] = maxpool
1170
+ input_var = symbols[n.args[0]]
1171
+
1172
+ # dim vars
1173
+ [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
1174
+
1175
+ c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
1176
+
1177
+ c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding,
1178
+ module_instance.stride, module_instance.dilation, [d1, d2, d3, d4])
1179
+
1180
+ nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
1181
+
1182
+ return [c1, c2, *nat_constraints], counter
1183
+
1184
+
1185
+ class ConstraintGenerator:
1186
+ def __init__(self, traced, graph=None):
1187
+ self.traced = traced # traced or tracer.root
1188
+ self.traced_params = dict(self.traced.named_parameters())
1189
+ self.constraints = []
1190
+ self.symbol_dict = {}
1191
+ self.graph = traced.graph if hasattr(traced, 'graph') else graph
1192
+
1193
+
1194
+ def generate_constraints(self, counter=0):
1195
+ """
1196
+ Iterate through every node and generate constraints
1197
+ Effect: self.constraints will be populated with the final constraints
1198
+ """
1199
+ graph = self.graph
1200
+
1201
+ all_constraints = []
1202
+
1203
+ for n in graph.nodes:
1204
+ (constraints, counter) = self.generate_constraints_node(n, counter)
1205
+ all_constraints += constraints
1206
+
1207
+ return Conj(all_constraints), counter
1208
+
1209
+ def generate_constraints_node(self, n: Node, counter):
1210
+ """
1211
+ Generate constraints the given node:
1212
+ Currently supported operations:
1213
+ - Reshape
1214
+ - Add
1215
+ - conv2d
1216
+ """
1217
+
1218
+ if n.op == 'placeholder':
1219
+ x, counter = gen_tvar(counter)
1220
+ self.symbol_dict[n] = x
1221
+
1222
+ my_type = n.type
1223
+
1224
+ if n.type != Dyn and (not isinstance(n.type, TensorType)):
1225
+ if n.type == torch.nn.parameter.Parameter:
1226
+ # since we have a parameter, the shape must be static
1227
+ assert 'example_value' in n.meta
1228
+ my_type = TensorType(n.meta['example_value'].size())
1229
+ else:
1230
+ my_type = Dyn
1231
+
1232
+ c1 = BinConstraintT(my_type, x, op_precision)
1233
+ c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq)
1234
+ return [c1, c2], counter
1235
+
1236
+ elif n.op == 'call_function':
1237
+ if n.target in _INFERENCE_RULES:
1238
+ return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
1239
+ else:
1240
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
1241
+
1242
+ elif n.op == 'call_module':
1243
+
1244
+ module_instance = self.traced.get_submodule(n.target)
1245
+ if type(module_instance) in _INFERENCE_RULES:
1246
+ return _INFERENCE_RULES[type(module_instance)](n,
1247
+ module_instance,
1248
+ self.symbol_dict,
1249
+ self.constraints, counter)
1250
+ else:
1251
+ raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
1252
+
1253
+ elif n.op == 'call_method':
1254
+ if n.target in _INFERENCE_RULES:
1255
+ return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
1256
+ else:
1257
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
1258
+
1259
+ elif n.op == 'get_attr':
1260
+ t = self.traced_params.get(n.target, None)
1261
+
1262
+ if isinstance(t, torch.Tensor):
1263
+ if len(t.shape) > 0:
1264
+ res = list(t.shape)
1265
+ attr_type = TensorType(res)
1266
+ output, counter = gen_tvar(counter)
1267
+ self.symbol_dict[n] = output
1268
+ return [BinConstraintT(output, attr_type, op_eq)], counter
1269
+ else:
1270
+ # scalar?
1271
+ return [], counter
1272
+ else:
1273
+ return [], counter
1274
+
1275
+ elif n.op == 'output':
1276
+ return [], counter
1277
+
1278
+ else:
1279
+ raise NotImplementedError(f"Method {n.op} not yet implemented")
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py ADDED
@@ -0,0 +1,1040 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+ import copy
3
+ import itertools
4
+ from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
5
+ from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
6
+ Transpose
7
+ from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
8
+ from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
9
+ from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
10
+ from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
11
+ from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
12
+ from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
13
+ from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
14
+ from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
15
+ from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
16
+ from torch.fx.tensor_type import TensorType, Dyn
17
+ from typing import Callable, Dict, List
18
+
19
+ _TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
20
+
21
+
22
+ def register_transformation_rule(call_target):
23
+ def register(fn):
24
+ if call_target in _TRANSFORMATION_RULES:
25
+ raise RuntimeError(f'Transformation rule already registered for {call_target}!')
26
+ _TRANSFORMATION_RULES[call_target] = fn
27
+ return fn
28
+ return register
29
+
30
+
31
+ def valid_index(index, dims):
32
+ """
33
+ Given a list of dimensions, checks if an index is valid in the list
34
+ """
35
+ try:
36
+ dims[index]
37
+ return T()
38
+ except IndexError:
39
+ return F()
40
+
41
+
42
+ @register_transformation_rule(Transpose)
43
+ def transform_transpose(constraint, counter):
44
+ """
45
+ Similar to a sequence of two index-selects
46
+ """
47
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
48
+ is_valid_index1 = valid_index(constraint.index1, dims)
49
+ is_valid_index2 = valid_index(constraint.index2, dims)
50
+ new_dims = copy.deepcopy(dims)
51
+ nat_constraints = gen_nat_constraints(dims)
52
+
53
+ if is_valid_index1 == T() and is_valid_index2 == T():
54
+ new_dims[constraint.index1] = dims[constraint.index2]
55
+ new_dims[constraint.index2] = dims[constraint.index1]
56
+
57
+ transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
58
+ *nat_constraints,
59
+ is_valid_index1, is_valid_index2,
60
+ BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
61
+ return transformed_constraint, counter
62
+
63
+
64
+ @register_transformation_rule(IndexSelect)
65
+ def transform_index_select(constraint, counter):
66
+ """
67
+ The constraints consider the given tensor size, checks if the index is valid
68
+ and if so, generates a constraint for replacing the input dimension
69
+ with the required dimension
70
+ """
71
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
72
+ is_valid_index = valid_index(constraint.index, dims)
73
+ nat_constraints = gen_nat_constraints(dims)
74
+
75
+ # if the index is valid then replace the input dimension with the new dimension
76
+ # otherwise the dimension will not be replaced and the clause will contain False
77
+ if is_valid_index == T():
78
+ new_dims = copy.deepcopy(dims)
79
+ new_dims[constraint.index] = constraint.dim_replace
80
+
81
+ transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
82
+ *nat_constraints,
83
+ is_valid_index,
84
+ BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
85
+
86
+ # print(constraints)
87
+ return transformed_constraint, counter
88
+
89
+
90
+ @register_transformation_rule(GetItem)
91
+ def transform_get_item(constraint, counter):
92
+ """
93
+ generate an equality of the form:
94
+ t = [a1, ..., an]
95
+ then generate constraints that check if the given index is valid
96
+ given this particular tensor size.
97
+ If the index is valid, generate a constraint to get the item
98
+ Note that we already handled the Dyn input case in the previous
99
+ step.
100
+ Args:
101
+ constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
102
+ counter: variable tracking
103
+ Returns: simplified constraints for GetItem
104
+
105
+ """
106
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
107
+ nat_constraints = gen_nat_constraints(dims)
108
+
109
+
110
+ is_valid_index = valid_index(constraint.index, dims)
111
+
112
+ all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
113
+ *nat_constraints,
114
+ is_valid_index]
115
+
116
+ # if the index is valid, we generate a constraint for getting an item
117
+ # otherwise this clause will have been UNSAT due to the wrong index
118
+ if is_valid_index == T():
119
+ all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
120
+
121
+ return Conj(all_constraints), counter
122
+
123
+ def valid_index_tensor(index, dims):
124
+ """
125
+ if the slice instances exceed the length of the dimensions
126
+ then this is a type error so we return False
127
+ """
128
+ slice_count = 0
129
+ for s in index:
130
+ if isinstance(s, slice):
131
+ slice_count += 1
132
+ if slice_count > len(dims):
133
+ return F()
134
+ else:
135
+ return T()
136
+
137
+ @register_transformation_rule(GetItemTensor)
138
+ def transform_get_item_tensor(constraint, counter):
139
+ """
140
+ When the index is a tuple, then the output will be a tensor
141
+ TODO: we have to check if this is the case for all HF models
142
+
143
+ The cases we are covering here are a tuple with one of:
144
+ - slice with default argument
145
+ - None
146
+
147
+ None appends 1 to the input tensor dimensions
148
+ so each occurrence of 'None' increases the rank by 1
149
+
150
+ slice with default arguments does not change the rank
151
+ """
152
+ assert isinstance(constraint.index_tuple, tuple)
153
+
154
+
155
+ # generate a result tensor of the expected size
156
+ dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
157
+ nat_constraints = gen_nat_constraints(dims)
158
+
159
+ # generate a place-holder list of the right rank
160
+ # where "slice" does not contribute to the rank and "None" does
161
+ none_c = constraint.index_tuple.count(None)
162
+ resulting_tensor_dims = (none_c + len(dims)) * [None]
163
+
164
+ dim_index = 0
165
+ for i in range(len(constraint.index_tuple)):
166
+
167
+ # append 1 to the right location of the resulting tensor
168
+ if constraint.index_tuple[i] is None:
169
+ resulting_tensor_dims[i] = 1
170
+
171
+ elif constraint.index_tuple[i] == slice(None, None, None):
172
+ pass
173
+
174
+ else:
175
+ raise NotImplementedError('Method not yet implemented')
176
+
177
+ # append the remaining dimensions to the right location
178
+ dim_index = 0
179
+ for i in range(len(resulting_tensor_dims)):
180
+ if resulting_tensor_dims[i] is None:
181
+ resulting_tensor_dims[i] = dims[dim_index]
182
+ dim_index += 1
183
+
184
+ # check if the index is valid
185
+ is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
186
+
187
+ # check if the resulting tensor is within bounds
188
+ if len(resulting_tensor_dims) > 4:
189
+ return F(), counter
190
+
191
+ else:
192
+ constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
193
+ BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
194
+ *nat_constraints,
195
+ is_valid_index]
196
+ return Conj(constraints), counter
197
+
198
+
199
+ @register_transformation_rule(BinConstraintT)
200
+ def generate_binconstraint_t(constraint, counter):
201
+ """
202
+ Transform binary constraints for tensors
203
+ """
204
+
205
+ # precision constraints
206
+ if constraint.op == op_precision:
207
+ if constraint.lhs == Dyn:
208
+ return T(), counter
209
+ elif isinstance(constraint.lhs, TensorType):
210
+ is_fully_static = all(d != Dyn for d in constraint.lhs.__args__)
211
+ if is_fully_static:
212
+ return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
213
+ else:
214
+ new_dims = []
215
+
216
+ for _ in range(len(constraint.lhs.__args__)):
217
+ dim, counter = gen_dvar(counter)
218
+ new_dims.append(dim)
219
+
220
+ new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
221
+ new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
222
+ [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
223
+ [BinConstraintD(1, new_dim, op_leq) for
224
+ new_dim in new_dims]
225
+ return Conj(new_dim_constraints), counter
226
+
227
+ # matching
228
+ elif constraint.op == op_matching:
229
+ assert isinstance(constraint.rhs, TensorType)
230
+ d1 = constraint.rhs.__args__[0]
231
+ d2 = constraint.rhs.__args__[1]
232
+ d3 = constraint.rhs.__args__[2]
233
+ d4 = constraint.rhs.__args__[3]
234
+
235
+ conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
236
+ BinConstraintD(d1, Dyn, op_eq),
237
+ BinConstraintD(d2, Dyn, op_eq),
238
+ BinConstraintD(d3, Dyn, op_eq),
239
+ BinConstraintD(d4, Dyn, op_eq)]
240
+ return Disj([Conj(conj),
241
+ BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
242
+
243
+ elif constraint.op == op_consistency:
244
+ c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
245
+ [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
246
+
247
+ return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
248
+
249
+ elif constraint.op == op_leq:
250
+ assert isinstance(constraint.rhs, int)
251
+ disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
252
+ for i in range(1, constraint.rhs + 1):
253
+ dims = []
254
+ for j in range(1, i + 1):
255
+ dim_var, counter = gen_dvar(counter)
256
+ dims.append(dim_var)
257
+ disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
258
+ return Disj(disj), counter
259
+ else:
260
+ return constraint, counter
261
+
262
+
263
+ @register_transformation_rule(BinConstraintD)
264
+ def generate_binconstraint_d(constraint, counter):
265
+ """
266
+ Transform binary constraints for dimensions
267
+ """
268
+ if constraint.op == op_precision:
269
+ if isinstance(constraint.lhs, int):
270
+ return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
271
+ elif constraint.lhs == Dyn:
272
+ return T(), counter
273
+
274
+ elif constraint.op == op_consistency:
275
+ return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
276
+ BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
277
+
278
+ else:
279
+ return constraint, counter
280
+
281
+
282
+ @register_transformation_rule(Conj)
283
+ def generate_conj(constraint, counter):
284
+ """
285
+ Transform conjunctions
286
+ """
287
+ new = []
288
+ for c in constraint.conjucts:
289
+ new_c, counter = transform_constraint(c, counter)
290
+ new.append(new_c)
291
+ return Conj(new), counter
292
+
293
+
294
+ @register_transformation_rule(Disj)
295
+ def generate_disj(constraint, counter):
296
+ """
297
+ Transform disjunctions
298
+ """
299
+ new = []
300
+ for c in constraint.disjuncts:
301
+ new_c, counter = transform_constraint(c, counter)
302
+ new.append(new_c)
303
+ return Disj(new), counter
304
+
305
+
306
+ @register_transformation_rule(TGreatestUpperBound)
307
+ def generate_gub(constraint, counter):
308
+ """
309
+ Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
310
+ on dimensions
311
+ """
312
+ c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
313
+ BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
314
+
315
+ [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
316
+
317
+ return Disj([c1, c2, c3, c4, c5]), counter
318
+
319
+
320
+ @register_transformation_rule(DGreatestUpperBound)
321
+ def generate_d_gub(constraint, counter):
322
+ """
323
+ Transform greatest upper bound for dimensions into equality constraints
324
+ """
325
+ c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
326
+ c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
327
+ c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
328
+ return Disj([c1, c2, c3]), counter
329
+
330
+
331
+ @register_transformation_rule(CalcConv)
332
+ def generate_calc_conv(constraint, counter):
333
+ d, counter = gen_tensor_dims(4, counter)
334
+ conv_result = TensorType([d[0], d[1], d[2], d[3]])
335
+
336
+ # the convolution result is a tensor of size 4
337
+ c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
338
+
339
+ # the second dimension of the output is equal to the output channels
340
+ c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
341
+
342
+ # the input corresponds to the output in the first dimension of the convolution
343
+ c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
344
+
345
+ c4, c5 = calc_last_two_dims(constraint, d)
346
+
347
+ leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
348
+ BinConstraintD(0, d[1], op_leq),
349
+ BinConstraintD(0, d[2], op_leq),
350
+ BinConstraintD(0, d[3], op_leq)])
351
+
352
+ return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
353
+
354
+
355
+ @register_transformation_rule(CalcMaxPool)
356
+ def generate_calc_maxpool(constraint, counter):
357
+ """
358
+ Transform maxpool constraints
359
+ """
360
+ d, counter = gen_tensor_dims(4, counter)
361
+ maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
362
+
363
+ # the maxpool result is a tensor of size 4
364
+ c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
365
+
366
+ # the input corresponds to the output in the first and second dimension of maxpool
367
+ c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
368
+ c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
369
+ c4, c5 = calc_last_two_dims(constraint, d)
370
+
371
+ leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
372
+ BinConstraintD(0, d[1], op_leq),
373
+ BinConstraintD(0, d[2], op_leq),
374
+ BinConstraintD(0, d[3], op_leq)])
375
+
376
+ return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
377
+
378
+
379
+ @register_transformation_rule(CalcProduct)
380
+ def generate_calc_product(constraint, counter):
381
+ """
382
+ Transform flatten constraints
383
+ """
384
+ start = constraint.start
385
+ end = constraint.end
386
+ dims = constraint.dims_to_flatten
387
+ flattened = constraint.flattened
388
+ n = len(constraint.dims_to_flatten)
389
+
390
+ # this will be evaluated right here
391
+ boundary_check = (0 <= start and start < end and end <= n)
392
+
393
+ c_boundary = T() if boundary_check else F()
394
+
395
+ lhs = dims[0:start]
396
+ rhs = dims[end:]
397
+ mid = dims[start:end]
398
+
399
+ all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
400
+
401
+ all_constraints = []
402
+
403
+ for p in all_possibilities:
404
+ p = list(p)
405
+ # this tells us there is a dynamic variable
406
+ contains_dyn = not all(constraint.op == op_neq for constraint in p)
407
+ if contains_dyn:
408
+ mid_var = [Dyn]
409
+ total_constraints = lhs + mid_var + rhs
410
+ if len(total_constraints) > 4:
411
+ all_constraints.append(F())
412
+ else:
413
+ all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
414
+ else:
415
+ new_var, counter = gen_dvar(counter)
416
+ mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
417
+ mid_var = [new_var]
418
+ total_constraints = lhs + mid_var + rhs
419
+ if len(total_constraints) > 4:
420
+ all_constraints.append(F())
421
+ else:
422
+ all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
423
+
424
+ return Conj([Disj(all_constraints), c_boundary]), counter
425
+
426
+
427
+ @register_transformation_rule(CanReshape)
428
+ def generate_reshape(constraint, counter):
429
+ """
430
+ Transform reshape constraints
431
+ """
432
+ d, counter = gen_tensor_dims(4, counter)
433
+
434
+ d1 = d[0]
435
+ d2 = d[1]
436
+ d3 = d[2]
437
+ d4 = d[3]
438
+
439
+ target = constraint.target.__args__
440
+
441
+ is_fully_static = all(d != Dyn for d in target)
442
+
443
+ # dynamic tensor
444
+ c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
445
+ c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
446
+ c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
447
+ c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
448
+ c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
449
+
450
+ d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
451
+ d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
452
+
453
+ d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
454
+ d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
455
+
456
+ d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
457
+ d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
458
+
459
+ d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
460
+ d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
461
+
462
+ nat_d1 = BinConstraintD(0, d1, op_leq)
463
+ nat_d2 = BinConstraintD(0, d2, op_leq)
464
+ nat_d3 = BinConstraintD(0, d3, op_leq)
465
+ nat_d4 = BinConstraintD(0, d4, op_leq)
466
+
467
+ if is_fully_static:
468
+ # size 1 tensor
469
+ c3_tensor1 = Disj([d1_eq_dyn,
470
+ (Conj([d1_neq_dyn,
471
+ BinConstraintD(d1, Prod(target), op_eq)]))])
472
+ all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
473
+
474
+ # size 2 tensor
475
+ all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
476
+
477
+ # size 3 tensor
478
+ all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
479
+
480
+ # size 4 tensor
481
+ all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
482
+
483
+ return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
484
+ nat_d1, nat_d2, nat_d3, nat_d4]), counter
485
+
486
+ # then there must be exactly one occurrence of dyn
487
+ else:
488
+ new_target = []
489
+
490
+ for n in target:
491
+ if n != Dyn:
492
+ new_target.append(n)
493
+
494
+ # tensor 1
495
+ c3_tensor1 = Disj([d1_eq_dyn,
496
+ (Conj([d1_neq_dyn,
497
+ is_dim_div_by_target(new_target, d1)]))])
498
+ all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
499
+
500
+ # tensor 2
501
+ c21 = Disj([d1_eq_dyn, d2_eq_dyn])
502
+ c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
503
+ all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
504
+
505
+ # tensor 3
506
+ c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
507
+ c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
508
+ all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
509
+
510
+ # tensor 4
511
+ c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
512
+ c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
513
+ all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
514
+
515
+ return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
516
+ nat_d1, nat_d2, nat_d3, nat_d4]), counter
517
+
518
+
519
+ @register_transformation_rule(ApplyBroadcasting)
520
+ def generate_broadcasting(constraint, counter):
521
+ """
522
+ Transform broadcasting constraints
523
+ """
524
+ e11, e12 = constraint.res1, constraint.res2
525
+ e1, e2 = constraint.input1, constraint.input2
526
+
527
+ e1_dyn = BinConstraintT(e1, Dyn, op_eq)
528
+ e2_dyn = BinConstraintT(e2, Dyn, op_eq)
529
+
530
+ # Introduce dimensions
531
+ e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
532
+ e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
533
+
534
+ # dyn possibility
535
+ e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
536
+ e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
537
+
538
+ # tensor possibility
539
+ # generate dimensions to create tensors of size 1
540
+ final_tensor_1_constraint, _, _, nat_dims_1, counter = \
541
+ gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
542
+
543
+ # generate dimensions to create tensors of size 2
544
+ final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
545
+ final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
546
+ gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
547
+
548
+ # generate dimensions to create tensors of size 3
549
+ final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
550
+ final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
551
+ gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
552
+
553
+ # generate dimensions to create tensors of size 4
554
+ final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
555
+ final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
556
+ gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
557
+
558
+ final_result = Disj([
559
+ e1_dyn_constraint,
560
+ e2_dyn_constraint,
561
+ final_tensor_1_constraint,
562
+ final_tensor_2_constraint_no_padding,
563
+ final_tensor_2_constraint_padding_arg1,
564
+ final_tensor_2_constraint_padding_arg2,
565
+ final_tensor_3_constraint_no_padding,
566
+ final_tensor_3_constraint_padding_arg1,
567
+ final_tensor_3_constraint_padding_arg2,
568
+ final_tensor_4_constraint_no_padding,
569
+ final_tensor_4_constraint_padding_arg1,
570
+ final_tensor_4_constraint_padding_arg2
571
+ ])
572
+
573
+ return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
574
+
575
+
576
+ def transform_constraint(constraint: Constraint, counter: int):
577
+ """
578
+ Transforms a constraint into a simpler constraint.
579
+ Ex: precision and consistency are transformed to equality
580
+ Args:
581
+ constraint: constraint to be transformed
582
+ counter: for variable tracking
583
+
584
+ Returns: Constraint
585
+
586
+ """
587
+ if type(constraint) in _TRANSFORMATION_RULES:
588
+ return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
589
+
590
+ else:
591
+ return constraint, counter
592
+
593
+
594
+
595
+
596
+ def calc_last_two_dims(constraint, d: List[DVar]):
597
+ """
598
+ Generates constraints for the last two dimensions of a convolution or a maxpool output
599
+ Args:
600
+ constraint: CalcConv or CalcMaxPool
601
+ d: The list of output dimensions
602
+
603
+ Returns: Constraints for calculating the last two dimensions of the output
604
+
605
+ """
606
+
607
+ assert isinstance(constraint, (CalcConv, CalcMaxPool))
608
+
609
+ b3 = constraint.matching_constraint[2]
610
+ b4 = constraint.matching_constraint[3]
611
+
612
+ b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
613
+ b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
614
+
615
+ d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
616
+ d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
617
+
618
+ # transform parameters into tuples incase they are not already
619
+ padding = (constraint.padding, constraint.padding) \
620
+ if isinstance(constraint.padding, int) else constraint.padding
621
+ kernel = (constraint.kernel, constraint.kernel) \
622
+ if isinstance(constraint.kernel, int) else constraint.kernel
623
+ stride = (constraint.stride, constraint.stride) \
624
+ if isinstance(constraint.stride, int) else constraint.stride
625
+ dilation = (constraint.dilation, constraint.dilation) \
626
+ if isinstance(constraint.dilation, int) else constraint.dilation
627
+
628
+ f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
629
+ f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
630
+ f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
631
+ f4 = BinConstraintD(f3, 1, op_add)
632
+
633
+ c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
634
+
635
+ f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
636
+ f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
637
+ f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
638
+ f44 = BinConstraintD(f33, 1, op_add)
639
+
640
+ c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
641
+
642
+ return c4, c5
643
+
644
+
645
+ def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
646
+ """
647
+ Generate all possibilities of being equal or not equal to dyn for my_list
648
+ Args:
649
+ my_list: List of tensor dimensions
650
+
651
+ Returns: A list of a list of constraints. Each list of constraints corresponds to
652
+ one possibility about the values of the dimension variables
653
+ """
654
+ # generate all possibilities of being equal or not equal to dyn for my_list
655
+ eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
656
+ neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
657
+ d_possibilities = []
658
+
659
+ for i in zip(eq_possibilities, neq_possibilities):
660
+ d_possibilities.append(list(i))
661
+ all_possibilities = list(itertools.product(*d_possibilities))
662
+ return all_possibilities
663
+
664
+
665
+ def is_target_div_by_dim(target: List[int], dim: List[DVar]):
666
+ """
667
+ Generate constraints to check if the target dimensions are divisible by the input dimensions
668
+ Args:
669
+ target: Target dimensions
670
+ dim: Input dimensions
671
+
672
+ Returns: Constraints to check divisibility
673
+
674
+ """
675
+ return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
676
+
677
+
678
+ def is_dim_div_by_target(target: List[int], dim: List[DVar]):
679
+ """
680
+ Generate constraints to check if the input dimensions is divisible by the target dimensions
681
+ Args:
682
+ target: Target dimensions
683
+ dim: Input dimensions
684
+
685
+ Returns: Constraints to check divisibility
686
+
687
+ """
688
+ return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
689
+
690
+
691
+ def gen_all_reshape_possibilities(list_of_dims, target):
692
+ """
693
+ Consider all possibilities what the input dimensions could be (number or dynamic)
694
+ Then generate the appropriate constraints using multiplication or mod depending on the possibility
695
+ The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
696
+ for the input. Target is fixed because at most one dimension could be dyn.
697
+ We have different cases for this.
698
+
699
+ Args:
700
+ list_of_dims: The input list of dimensions
701
+ target: The tensor we want to reshape to
702
+
703
+ Returns: A disjunction of transformed reshape constraints
704
+
705
+ """
706
+ all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
707
+
708
+ all_constraints = []
709
+
710
+ for p in all_possibilities:
711
+ to_multiply = []
712
+
713
+ p = list(p)
714
+
715
+ for constraint in p:
716
+ assert isinstance(constraint, BinConstraintD)
717
+ if constraint.op == op_neq:
718
+ to_multiply.append(constraint.lhs)
719
+
720
+ if not to_multiply:
721
+ all_constraints.append(Conj(p))
722
+
723
+ elif len(to_multiply) < len(list_of_dims):
724
+ all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
725
+ else:
726
+ all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
727
+ Prod(target), op_eq)]))
728
+
729
+ return Disj(all_constraints)
730
+
731
+
732
+ def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
733
+ """
734
+ Apply broadcasting to the 'index' dimension of tensor_input1.
735
+ Args:
736
+ tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
737
+ tensor_input2: represents the second input
738
+ res1: broadcasted result 1
739
+ res2: broadcasted result 2
740
+ index: the index to broadcast
741
+ padding: If padding was used, then tensor_input1[index] does not exist
742
+
743
+ Returns:
744
+
745
+ """
746
+ if tensor_input1[index] is None:
747
+ assert padding
748
+
749
+
750
+ if not padding:
751
+ # then the inputs are the same length so they all have dimensions at "index"
752
+ return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
753
+ BinConstraintD(res1[index], res2[index], op_eq),
754
+ BinConstraintD(res2[index], tensor_input2[index], op_eq)])
755
+
756
+ else:
757
+ # we don't set the input dimension to 1, since it doesn't exist.
758
+ return Conj([BinConstraintD(res1[index], res2[index], op_eq),
759
+ BinConstraintD(res2[index], tensor_input2[index], op_eq)])
760
+
761
+
762
+ def apply_padding(e1_var: TVar,
763
+ e11: BinConstraintT,
764
+ e2: BinConstraintT,
765
+ e12: BinConstraintT,
766
+ d2: List[DVar],
767
+ d11: List[DVar],
768
+ d12: List[DVar],
769
+ counter: int):
770
+ """
771
+ We are considering the possibility where one input has less dimensions than
772
+ another input, so we apply padding to the broadcasted results
773
+
774
+ Args:
775
+ e1_var: Variable representing the first input where padding will be
776
+ e11: constraint of the form e11 = Tensortype[d1, ..., dn]
777
+ e2: constraint of the form e2 = Tensortype[d1, ..., dn]
778
+ e12: constraint of the form e11 = Tensortype[d1, ..., dn]
779
+ d2: Tensor variables for the second input
780
+ d11: Tensor variables for the broadcasted first input
781
+ d12: Tensor variables for the broadcasted second input
782
+ counter: variable tracking
783
+
784
+ Returns: A new constraint whose goal is to apply padding to the broadcasted result
785
+
786
+ """
787
+
788
+ res = []
789
+
790
+ # pad the shorter input with None so we can pass it to the broadcasting helper function
791
+ for i in range(1, len(d2)):
792
+
793
+ d1, counter = gen_tensor_dims(i, counter)
794
+
795
+ nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
796
+
797
+ e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
798
+
799
+ simulate_padding = [None] * (len(d2) - i)
800
+
801
+ assert len(simulate_padding + d1) == len(d2)
802
+
803
+ broadcast_padding = []
804
+
805
+ # for every padding size, we also consider broadcasting
806
+ for j in range(len(d2) - i):
807
+ broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
808
+
809
+ # we consider the possibilities for broadcasting for every dimension. Since we already
810
+ # padded d1, we do not consider it while broadcasting
811
+ all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
812
+ d2[(len(d2) - i):],
813
+ d11[(len(d2) - i):],
814
+ d12[(len(d2) - i):])
815
+ # combine all constraints into a conjunction
816
+ c = Conj([e1, e11, e2, e12,
817
+ *broadcast_padding,
818
+ all_broadcasting_possibilities,
819
+ *nat_constraints
820
+ ])
821
+ res.append(c)
822
+
823
+ return Disj(res), counter
824
+
825
+
826
+ def no_broadcast_dim_with_index(d1: List[DVar],
827
+ d2: List[DVar],
828
+ d3: List[DVar],
829
+ d4: List[DVar],
830
+ i: int):
831
+ """
832
+ Args:
833
+ d1: input 1
834
+ d2: input 2
835
+ d3: simulated broadcasting for input 1
836
+ d4: simulated broadcasting for input 2
837
+ i: the rank of the resulting tensor addition
838
+
839
+ Returns: Constraints for when no broadcasting occurs
840
+ """
841
+ return Conj([
842
+ Disj([
843
+ Conj([BinConstraintD(d1[i], 1, op_eq),
844
+ BinConstraintD(d2[i], 1, op_eq)]),
845
+
846
+ Conj([BinConstraintD(d1[i], 1, op_neq),
847
+ BinConstraintD(d2[i], 1, op_neq)])]),
848
+
849
+ BinConstraintD(d1[i], d3[i], op_eq),
850
+ BinConstraintD(d2[i], d4[i], op_eq)])
851
+
852
+
853
+
854
+ def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
855
+ """
856
+ Generate lists of DVar to represent tensor dimensions
857
+ Args:
858
+ num_tensors: the required number of tensors
859
+ dim_size: the number of dimensions for each tensor
860
+ counter: variable tracking
861
+
862
+ Returns: A list of a list of tensor dimensions
863
+
864
+ """
865
+ res = []
866
+
867
+ for _ in range(num_tensors):
868
+ dims, counter = gen_tensor_dims(dim_size, counter)
869
+ res.append(dims)
870
+
871
+ return res, counter
872
+
873
+
874
+ def create_equality_constraints_for_broadcasting(e1: TVar,
875
+ e2: TVar,
876
+ e11: TVar,
877
+ e12: TVar,
878
+ d1: List[DVar],
879
+ d2: List[DVar],
880
+ d11: List[DVar],
881
+ d12: List[DVar]):
882
+ """
883
+ Create equality constraints for when no broadcasting occurs
884
+ Args:
885
+ e1: Input 1
886
+ e2: Input 2
887
+ e11: Broadcasted input 1
888
+ e12: Broadcasted input 2
889
+ d1: Variables that store dimensions for e1
890
+ d2: Variables that store dimensions for e2
891
+ d11: Variables that store dimensions for e11
892
+ d12: Variables that store dimensions for e22
893
+
894
+ Returns: Four equality constraints
895
+
896
+ """
897
+
898
+ e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
899
+ e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
900
+ e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
901
+ e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
902
+ return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
903
+
904
+
905
+ def gen_consistency_constraints(constraint: Constraint, counter: int):
906
+ """
907
+ Args:
908
+ constraint: Consistency constraint on tensors
909
+ counter: for variable tracking
910
+
911
+ Returns: Equality and consistency constraints on dimensions
912
+
913
+ """
914
+
915
+ all_constraints = []
916
+
917
+ for i in range(1, MAX_TENSOR_RANK + 1):
918
+ new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
919
+ new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
920
+
921
+ nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
922
+
923
+ c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
924
+ BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
925
+ [BinConstraintD(d1, d2, op_consistency) for
926
+ d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
927
+
928
+ all_constraints.append(c_tensor_i)
929
+
930
+ return all_constraints, counter
931
+
932
+
933
+ def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
934
+ """
935
+ Args:
936
+ constraint: Greatest upper bound on tensors
937
+ counter: variable tracking
938
+
939
+ Returns: A set of equality constraints and DGreatestUpperBound constraints
940
+
941
+ """
942
+
943
+ all_constraints = []
944
+
945
+ for i in range(1, MAX_TENSOR_RANK + 1):
946
+ c = []
947
+ dims1, counter = gen_tensor_dims(i, counter)
948
+ c1tensor = TensorType(dims1)
949
+
950
+ dims2, counter = gen_tensor_dims(i, counter)
951
+ c2tensor = TensorType(dims2)
952
+
953
+ dims3, counter = gen_tensor_dims(i, counter)
954
+ c3tensor = TensorType(dims3)
955
+
956
+ c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
957
+ BinConstraintT(constraint.rhs2, c2tensor, op_eq),
958
+ BinConstraintT(constraint.res, c3tensor, op_eq)] + \
959
+ gen_nat_constraints(dims1 + dims2 + dims3)
960
+
961
+ assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
962
+ for i in range(len(c3tensor.__args__)):
963
+ c.append(DGreatestUpperBound(c3tensor.__args__[i],
964
+ c1tensor.__args__[i],
965
+ c2tensor.__args__[i]))
966
+
967
+ all_constraints.append(Conj(c))
968
+ return all_constraints, counter
969
+
970
+
971
+ def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
972
+ """
973
+ Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
974
+ We look at all combinations for all dimensions in d1 and d2
975
+ Args:
976
+ d1: input1 dimensions
977
+ d2: input2 dimensions
978
+ d11: broadcasted input1 dimensions
979
+ d12: broadcasted input2 dimensions
980
+
981
+ Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
982
+
983
+ """
984
+
985
+ size = len(d1)
986
+
987
+ res2 = []
988
+
989
+ for i in range(size):
990
+ t1 = broadcast_dim(d1, d2, d11, d12, i)
991
+ t2 = broadcast_dim(d2, d1, d12, d11, i)
992
+ t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
993
+
994
+ res2.append(Disj([t1, t2, t3]))
995
+
996
+ return Conj(res2)
997
+
998
+
999
+ def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
1000
+ """
1001
+ Simulates broadcasting on e1 and e2 and returns the results
1002
+ respectively in e11 and e12. Because of gradual types,
1003
+ e1 and e2 may not be equal. Similarly, e11 and e12 may not
1004
+ be equal. e11 and e12 should be guaranteed to be consistent
1005
+ as they represent the shapes of the tensors to be added after
1006
+ broadcasting.
1007
+ Args:
1008
+ e1: TVar representing the type of input 1
1009
+ e2: TVar representing the type of input 2
1010
+ e11: TVar representing the representing broadcasted input 1
1011
+ e12: TVar representing the representing broadcasted input 2
1012
+ i: The rank of the resulting type of addition
1013
+ counter: for variable tracking
1014
+
1015
+ Returns: Simplified broadcasting constraints
1016
+
1017
+ """
1018
+ dims, counter = gen_lists_of_dims(4, i, counter)
1019
+ [d1, d2, d3, d4] = dims
1020
+ nat_dims_i = gen_nat_constraints(list(itertools.chain.from_iterable(dims)))
1021
+
1022
+ initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
1023
+ d1, d2, d3, d4)
1024
+
1025
+ [e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
1026
+
1027
+ # without padding, broadcast all possibilities for tensors of size i
1028
+ final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
1029
+ generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
1030
+
1031
+ # with padding, broadcast all possibilities for tensors of size i
1032
+ final_tensor_constraint_padding_arg1, counter = \
1033
+ apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
1034
+
1035
+ final_tensor_constraint_padding_arg2, counter = \
1036
+ apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
1037
+
1038
+ return final_tensor_constraint_no_padding, \
1039
+ final_tensor_constraint_padding_arg1, \
1040
+ final_tensor_constraint_padding_arg2, nat_dims_i, counter
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ op_add = '+'
2
+ op_sub = '-'
3
+ op_mul = '*'
4
+ op_div = '/'
5
+ op_eq = '='
6
+ op_neq = '!='
7
+ op_imp = '=>'
8
+ op_matching = '⊳'
9
+ op_consistency = '~'
10
+ op_precision = '⊑'
11
+ op_leq = '≤'
12
+ op_lt = '<'
13
+ op_gt = '>'
14
+ op_mod = '%'
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
2
+ from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
3
+ from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
4
+ from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
5
+ from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
6
+ from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
7
+ from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
8
+ from torch.fx.tensor_type import TensorType, Dyn
9
+
10
+ try:
11
+ import z3 # type: ignore[import]
12
+ from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
13
+ HAS_Z3 = True
14
+
15
+ def transform_to_z3(constraint, counter, dimension_dict):
16
+ if isinstance(constraint, Conj):
17
+ conjuncts = []
18
+ for c in constraint.conjucts:
19
+ new_c, counter = transform_to_z3(c, counter, dimension_dict)
20
+ conjuncts.append(new_c)
21
+ return z3.And(conjuncts), counter
22
+
23
+ elif isinstance(constraint, Disj):
24
+ disjuncts = []
25
+ for c in constraint.disjuncts:
26
+ new_c, counter = transform_to_z3(c, counter, dimension_dict)
27
+ disjuncts.append(new_c)
28
+ return z3.Or(disjuncts), counter
29
+
30
+ elif isinstance(constraint, T):
31
+ return True, counter
32
+
33
+ elif isinstance(constraint, F):
34
+ return False, counter
35
+
36
+ elif isinstance(constraint, BinConstraintT):
37
+ if constraint.op == op_eq:
38
+ lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
39
+ rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
40
+ return (lhs == rhs), counter
41
+
42
+ else:
43
+ raise NotImplementedError('Method not yet implemented')
44
+
45
+ elif isinstance(constraint, BinConstraintD):
46
+ if constraint.op == op_eq:
47
+
48
+ if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
49
+ transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
50
+ transformed_lhs = z3.Bool(constraint.lhs.c)
51
+ return transformed_lhs == transformed_rhs, counter
52
+
53
+ elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
54
+ # with dimension transformations we consider the encoding
55
+ lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
56
+ rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
57
+ return lhs == rhs, counter
58
+
59
+ else:
60
+ # then we have an algebraic expression which means that we disregard the
61
+ # first element of the encoding
62
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
63
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
64
+ return lhs == rhs, counter
65
+
66
+ # The assumption here is that the LHS and RHS must be dimensions
67
+ elif constraint.op == op_neq:
68
+ assert is_dim(constraint.lhs)
69
+ assert is_dim(constraint.rhs)
70
+ lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
71
+ rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
72
+ if constraint.rhs == Dyn or constraint.lhs == Dyn:
73
+ if constraint.rhs == Dyn:
74
+ return lhs.arg(0) == 1, counter
75
+ elif constraint.lhs == Dyn:
76
+ return rhs.arg(0) == 1, counter
77
+
78
+ # if one of the instances is a number
79
+ elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
80
+ if isinstance(constraint.lhs, int):
81
+ return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
82
+
83
+ elif isinstance(constraint.rhs, int):
84
+ return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
85
+
86
+ else:
87
+ return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
88
+ z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
89
+ z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
90
+
91
+
92
+ elif constraint.op == op_leq:
93
+ # if the dimensions are not dyn, this will come into effect
94
+ # there would have been another constraint specifying if a given dimension
95
+ # is dyn or not
96
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
97
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
98
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
99
+ return lhs <= rhs, counter
100
+
101
+ elif constraint.op == op_gt:
102
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
103
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
104
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
105
+ return lhs > rhs, counter
106
+
107
+ elif constraint.op == op_lt:
108
+ assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
109
+ lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
110
+ rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
111
+ return lhs < rhs, counter
112
+
113
+ else:
114
+ raise NotImplementedError('operation not yet implemented')
115
+
116
+ else:
117
+ raise NotImplementedError('Operation not yet implemented')
118
+
119
+
120
+ def transform_var(tensor, counter, dimension_dict):
121
+ """
122
+ Transforms tensor variables to a format understood by z3
123
+ Args:
124
+ tensor: Tensor variable or a tensor type potentially with variable dimensions
125
+ Returns: Transformed variable to a z3 format
126
+
127
+ """
128
+ if isinstance(tensor, TensorType):
129
+ res = []
130
+ for t in tensor.__args__:
131
+ transformed, counter = transform_dimension(t, counter, dimension_dict)
132
+ res.append(transformed)
133
+
134
+ assert len(res) <= 4
135
+ if len(tensor.__args__) == 1:
136
+ return tensor_type.tensor1(res[0]), counter
137
+ elif len(tensor.__args__) == 2:
138
+ return tensor_type.tensor2(res[0], res[1]), counter
139
+ elif len(tensor.__args__) == 3:
140
+ return tensor_type.tensor3(res[0], res[1], res[2]), counter
141
+ elif len(tensor.__args__) == 4:
142
+ return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
143
+
144
+ elif tensor == Dyn:
145
+ return z3_dyn, counter
146
+
147
+ elif isinstance(tensor, TVar):
148
+ return z3.Const(tensor.tvar, tensor_type), counter
149
+
150
+ def transform_dimension(dimension, counter, dimension_dict):
151
+ """
152
+ Takes a dimension variable or a number and transforms it to a tuple
153
+ according to our scheme
154
+ Args:
155
+ dimension: The dimension to be transformed
156
+ counter: variable tracking
157
+
158
+ Returns: tuple and the current counter
159
+
160
+ """
161
+ if dimension == Dyn:
162
+ counter += 1
163
+ return D(0, z3.Int(counter)), counter
164
+ elif isinstance(dimension, int):
165
+ return D(1, dimension), counter
166
+ elif isinstance(dimension, DVar):
167
+ if dimension.c in dimension_dict:
168
+ return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
169
+ else:
170
+ counter += 1
171
+ dimension_dict[dimension.c] = counter
172
+ return D(z3.Int(counter), z3.Int(dimension.c)), counter
173
+
174
+
175
+ def transform_algebraic_expression(expr, counter, dimension_dict):
176
+ """
177
+ Transforms an algebraic expression to z3 format
178
+ Args:
179
+ expr: An expression is either a dimension variable or an algebraic-expression
180
+
181
+
182
+ Returns: the transformed expression
183
+
184
+ """
185
+ assert is_algebraic_expression(expr) or is_dim(expr)
186
+
187
+ if is_dim(expr):
188
+ transformed, counter = transform_dimension(expr, counter, dimension_dict)
189
+ return transformed.arg(1), counter
190
+
191
+ elif isinstance(expr, Prod):
192
+
193
+ dims = []
194
+ for dim in expr.products:
195
+ assert is_dim(dim)
196
+ d, counter = transform_dimension(dim, counter, dimension_dict)
197
+ dims.append(d.arg(1))
198
+ return z3.Product(dims), counter
199
+
200
+ elif is_algebraic_expression(expr):
201
+
202
+ lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
203
+ rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
204
+
205
+ if expr.op == op_sub:
206
+ c = lhs - rhs
207
+
208
+ elif expr.op == op_add:
209
+ c = lhs + rhs
210
+
211
+ elif expr.op == op_div:
212
+ c = lhs / rhs
213
+
214
+ elif expr.op == op_mul:
215
+ c = lhs * rhs
216
+
217
+ elif expr.op == op_mod:
218
+ c = lhs % rhs
219
+
220
+ else:
221
+ raise NotImplementedError('operation not yet implemented')
222
+
223
+ return c, counter
224
+
225
+ else:
226
+ raise RuntimeError
227
+
228
+
229
+ def transform_all_constraints(traced, counter=0):
230
+ """
231
+ Given a trace, generates constraints and transforms them to z3 format
232
+
233
+ """
234
+ dimension_dict = {} # type: ignore[var-annotated]
235
+
236
+ generator = ConstraintGenerator(traced)
237
+ new_constraints, counter = generator.generate_constraints(counter)
238
+
239
+ # print(new_constraints.conjucts[0])
240
+ # print(*new_constraints.conjucts, sep='\n')
241
+
242
+ # transform precision, matching, consistency till obtaining a fixed point
243
+ new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
244
+ # print(new_constraints)
245
+ # print(new_constraints.conjucts)
246
+ # new_constraints.conjucts = new_constraints.conjucts[:-1]
247
+ # print(*new_constraints.conjucts, sep='\n')
248
+
249
+ transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
250
+ # print(transformed)
251
+ return transformed
252
+
253
+ def iterate_till_fixed_point(constraints, counter):
254
+ """
255
+ Transform constraints till reaching a fixed point
256
+ """
257
+ old_c = None
258
+ while old_c != constraints:
259
+ old_c = constraints
260
+ constraints, counter = transform_constraint(constraints, counter)
261
+ return constraints, counter
262
+
263
+ def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
264
+ """
265
+ Takes a node and a graph and generates two sets of constraints.
266
+ One set constraints the node's constraints and another set
267
+ constraints the negation of the node's constraints
268
+ Args:
269
+ tracer_root: the root for getting the module instances
270
+ graph: the graph so far in the tracing process
271
+ node: node that represents a conditional
272
+ counter: variable tracking
273
+
274
+ Returns: Two sets of constraints. One with a conjunction with the
275
+ the conditional constraint and the other with a conjunction with
276
+ its negation.
277
+
278
+ """
279
+ dimension_dict = {} # type: ignore[var-annotated]
280
+
281
+ generator = ConstraintGenerator(tracer_root, graph)
282
+ new_constraints, counter = generator.generate_constraints(counter)
283
+
284
+ condition_constraint = new_constraints.conjucts[-1]
285
+
286
+ # we know the constraint is a conjunction where the last constraint is about the conditional
287
+ # so remove the last constraint
288
+ new_constraints.conjucts = new_constraints.conjucts[:-1]
289
+
290
+ # transform precision, matching, consistency till obtaining a fixed point
291
+ new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
292
+
293
+
294
+ # since the function returns a list of one element, we get the first element
295
+ # we are only interested in the RHS in this case because the LHS just stores
296
+ # the result
297
+
298
+ # we make sure the constraint is of the form:
299
+ # c = b where b is a boolean expression
300
+ # and we consider b (constraint.rhs) for transformation
301
+ assert isinstance(condition_constraint.lhs, BVar)
302
+ assert is_bool_expr(condition_constraint.rhs)
303
+ condition_constraint_rhs = condition_constraint.rhs
304
+
305
+ # transform the condition constraint
306
+ condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
307
+
308
+ transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
309
+
310
+ transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
311
+
312
+ negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
313
+
314
+ return z3.And([transformed, transformed_condition_constraint]), \
315
+ z3.And([transformed, negation_transformed_condition_constraint])
316
+
317
+
318
+ def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
319
+ """
320
+ Given an IR and a node representing a conditional, evaluate the conditional
321
+ and its negation
322
+ Args:
323
+ tracer_root: Tracer root for module instances
324
+ node: The node to be evaluated
325
+
326
+ Returns: the results of evaluating the condition and the negation with
327
+ the rest of the constraints
328
+
329
+ """
330
+
331
+ transformed_positive, transformed_negative = \
332
+ transform_all_constraints_trace_time(tracer_root, graph, node, counter)
333
+
334
+ s = z3.Solver()
335
+ s.add(transformed_positive)
336
+ if user_constraints is not None:
337
+ s.add(user_constraints)
338
+ condition = s.check()
339
+
340
+ s = z3.Solver()
341
+ s.add(transformed_negative)
342
+ if user_constraints is not None:
343
+ s.add(user_constraints)
344
+ negation = s.check()
345
+ return condition, negation
346
+
347
+ except ImportError:
348
+ HAS_Z3 = False
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
2
+ BVar
3
+ from torch.fx.experimental.migrate_gradual_types.operation import op_leq
4
+
5
+
6
+ def gen_tvar(curr):
7
+ """
8
+ Generate a tensor variable
9
+ :param curr: The current counter
10
+ :return: a tensor variable and the updated counter
11
+ """
12
+ curr += 1
13
+ return TVar(curr), curr
14
+
15
+
16
+ def gen_dvar(curr):
17
+ """
18
+ Generate a dimension variable
19
+ :param curr: the current counter
20
+ :return: a dimension variable and an updated counter
21
+ """
22
+ curr += 1
23
+ return DVar(curr), curr
24
+
25
+ def gen_bvar(curr):
26
+ """
27
+ Generate a boolean variable
28
+ :param curr: the current counter
29
+ :return: a boolean variable and an updated counter
30
+ """
31
+ curr += 1
32
+ return BVar(curr), curr
33
+
34
+ def gen_tensor_dims(n, curr):
35
+ """
36
+ Generate a list of tensor dimensions
37
+ :param n: the number of dimensions
38
+ :param curr: the current counter
39
+ :return: a list of dimension variables and an updated counter
40
+ """
41
+ dims = []
42
+ for _ in range(n):
43
+ dvar, curr = gen_dvar(curr)
44
+ dims.append(dvar)
45
+ return dims, curr
46
+
47
+
48
+ def gen_nat_constraints(list_of_dims):
49
+ """
50
+ Generate natural number constraints for dimensions
51
+ """
52
+ return [BinConstraintD(0, d, op_leq) for d in list_of_dims]
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import z3 # type: ignore[import]
3
+ HAS_Z3 = True
4
+ # dynamic type
5
+ dyn = z3.DeclareSort('Dyn')
6
+ dyn_type = z3.Const('dyn', dyn)
7
+
8
+ # dimension
9
+ dim = z3.Datatype('dim')
10
+ dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort()))
11
+ dim = dim.create()
12
+
13
+ # tensors
14
+ tensor_type = z3.Datatype('TensorType')
15
+ tensor_type.declare('Dyn', ('dyn', dyn))
16
+ tensor_type.declare('tensor1', ('0', dim))
17
+ tensor_type.declare('tensor2', ('0', dim), ('1', dim))
18
+ tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim))
19
+ tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim))
20
+ tensor_type = tensor_type.create()
21
+
22
+ # create dimension
23
+ D = dim.dim
24
+
25
+ z3_dyn = tensor_type.Dyn(dyn_type)
26
+
27
+
28
+ except ImportError:
29
+ HAS_Z3 = False
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/normalize.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from typing import Any, Callable, Dict, Tuple, Optional
3
+
4
+ import torch
5
+ import torch.fx
6
+ import torch.fx as fx
7
+ from torch.fx import Transformer, Proxy
8
+ from torch.fx.node import Argument, Target, Node, map_aggregate
9
+ from torch.fx.operator_schemas import (
10
+ normalize_module,
11
+ normalize_function,
12
+ create_type_hint,
13
+ )
14
+
15
+ from .schema_type_annotation import AnnotateTypesWithSchema
16
+
17
+
18
+ class NormalizeArgs(Transformer):
19
+ """
20
+ Normalize arguments to Python targets. This means that
21
+ `args/kwargs` will be matched up to the module/functional's
22
+ signature and rewritten to exclusively kwargs in positional order
23
+ if `normalize_to_only_use_kwargs` is true. Also populates default
24
+ values. Does not support positional-only parameters or varargs
25
+ parameters (*args, **kwargs).
26
+
27
+ If the nodes have 'type' metadata, it will use it to disambiguate
28
+ overloads. Otherwise, it will throw an error.
29
+
30
+ Example usage:
31
+ m = torchvision.models.resnet18()
32
+ traced = torch.fx.symbolic_trace(m)
33
+ traced = NormalizeArgs(traced).transform()
34
+ """
35
+
36
+ def __init__(
37
+ self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True
38
+ ):
39
+ super().__init__(module)
40
+ self.node_map: Dict[Proxy, Node] = {}
41
+ self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs
42
+
43
+ def run_node(self, n: Node) -> Any:
44
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
45
+
46
+ def get_type(arg):
47
+ if isinstance(arg, fx.Node):
48
+ return n.meta["type"] if "type" in n.meta else None
49
+ return type(arg)
50
+
51
+ arg_types = map_aggregate(n.args, get_type)
52
+ assert isinstance(arg_types, tuple)
53
+ arg_types = tuple([create_type_hint(i) for i in arg_types])
54
+ kwarg_types = {k: get_type(v) for k, v in kwargs.items()}
55
+ if n.op == "call_function":
56
+ out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types)
57
+ else:
58
+ out = super().run_node(n)
59
+ if n.op != "output":
60
+ self.node_map[out] = n
61
+ out.node.meta = n.meta
62
+ out.node.type = n.type
63
+ return out
64
+
65
+ def call_function(
66
+ self,
67
+ target: Target,
68
+ args: Tuple[Argument, ...],
69
+ kwargs: Dict[str, Any],
70
+ arg_types: Optional[Tuple[Any, ...]] = None,
71
+ kwarg_types: Optional[Dict[str, Any]] = None,
72
+ ):
73
+ assert callable(target)
74
+ new_args_and_kwargs = normalize_function(
75
+ target,
76
+ args, # type: ignore[arg-type]
77
+ kwargs,
78
+ arg_types, # type: ignore[arg-type]
79
+ kwarg_types,
80
+ self.normalize_to_only_use_kwargs,
81
+ )
82
+ if new_args_and_kwargs:
83
+ new_args, new_kwargs = new_args_and_kwargs
84
+ return self.tracer.create_proxy(
85
+ "call_function", target, new_args, new_kwargs
86
+ )
87
+ else:
88
+ return super().call_function(target, args, kwargs)
89
+
90
+ def call_module(
91
+ self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
92
+ ):
93
+ assert isinstance(target, str)
94
+ new_args_and_kwargs = normalize_module(
95
+ self.module,
96
+ target,
97
+ args, # type: ignore[arg-type]
98
+ kwargs,
99
+ self.normalize_to_only_use_kwargs,
100
+ )
101
+ if new_args_and_kwargs:
102
+ new_args, new_kwargs = new_args_and_kwargs
103
+ return super().call_module(target, new_args, new_kwargs)
104
+ else:
105
+ return super().call_module(target, args, kwargs)
106
+
107
+
108
+ class NormalizeOperators(AnnotateTypesWithSchema):
109
+ """
110
+ Normalize callsites that are different ways of "spelling" the same
111
+ invocation into a single, canonical call. Currently supports:
112
+
113
+ 1. Normalize operators (e.g. operator.add) to the `torch` ops they
114
+ ultimately invoke (e.g. torch.add) when it is possible to statically
115
+ reason that
116
+
117
+ Example usage:
118
+
119
+ m = torchvision.models.resnet18()
120
+
121
+ traced = torch.fx.symbolic_trace(m)
122
+
123
+ traced = NormalizeOperators(traced).transform()
124
+ """
125
+
126
+ binary_magic_method_remap: Dict[
127
+ Callable[[Any, Any], Any], Callable[[Any, Any], Any]
128
+ ] = {
129
+ torch.add: operator.add,
130
+ torch.mul: operator.mul,
131
+ torch.sub: operator.sub,
132
+ torch.div: operator.truediv,
133
+ torch.floor_divide: operator.floordiv,
134
+ torch.remainder: operator.mod,
135
+ torch.eq: operator.eq,
136
+ torch.ne: operator.ne,
137
+ torch.lt: operator.lt,
138
+ torch.le: operator.le,
139
+ torch.gt: operator.gt,
140
+ torch.ge: operator.ge,
141
+ }
142
+
143
+ def call_function(
144
+ self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
145
+ ):
146
+ # Normalize operators according to the magic methods implemented on tensors here:
147
+ # https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950
148
+
149
+ assert callable(target)
150
+
151
+ if target in self.binary_magic_method_remap:
152
+ if len(args) != 2:
153
+ return super().call_function(target, args, kwargs)
154
+ lhs, rhs = args
155
+
156
+ return super().call_function(
157
+ target=self.binary_magic_method_remap[target],
158
+ args=(lhs, rhs),
159
+ kwargs={},
160
+ )
161
+
162
+ return super().call_function(target, args, kwargs)
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (395 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc ADDED
Binary file (339 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterator # type: ignore[import]
2
+ from functools import partial
3
+
4
+ from .unification_tools import assoc # type: ignore[import]
5
+ from .utils import transitive_get as walk
6
+ from .variable import isvar
7
+ from .dispatch import dispatch
8
+
9
+ __all__ = ["reify", "unify"]
10
+
11
+ ###############
12
+ # Reification #
13
+ ###############
14
+
15
+ @dispatch(Iterator, dict)
16
+ def _reify(t, s):
17
+ return map(partial(reify, s=s), t)
18
+ # return (reify(arg, s) for arg in t)
19
+ _reify
20
+
21
+ @dispatch(tuple, dict) # type: ignore[no-redef]
22
+ def _reify(t, s):
23
+ return tuple(reify(iter(t), s))
24
+ _reify
25
+
26
+ @dispatch(list, dict) # type: ignore[no-redef]
27
+ def _reify(t, s):
28
+ return list(reify(iter(t), s))
29
+ _reify
30
+
31
+ @dispatch(dict, dict) # type: ignore[no-redef]
32
+ def _reify(d, s):
33
+ return {k: reify(v, s) for k, v in d.items()}
34
+ _reify
35
+
36
+ @dispatch(object, dict) # type: ignore[no-redef]
37
+ def _reify(o, s):
38
+ return o # catch all, just return the object
39
+
40
+ def reify(e, s):
41
+ """ Replace variables of expression with substitution
42
+ >>> # xdoctest: +SKIP
43
+ >>> x, y = var(), var()
44
+ >>> e = (1, x, (3, y))
45
+ >>> s = {x: 2, y: 4}
46
+ >>> reify(e, s)
47
+ (1, 2, (3, 4))
48
+ >>> e = {1: x, 3: (y, 5)}
49
+ >>> reify(e, s)
50
+ {1: 2, 3: (4, 5)}
51
+ """
52
+ if isvar(e):
53
+ return reify(s[e], s) if e in s else e
54
+ return _reify(e, s)
55
+
56
+ ###############
57
+ # Unification #
58
+ ###############
59
+
60
+ seq = tuple, list, Iterator
61
+
62
+ @dispatch(seq, seq, dict)
63
+ def _unify(u, v, s):
64
+ if len(u) != len(v):
65
+ return False
66
+ for uu, vv in zip(u, v): # avoiding recursion
67
+ s = unify(uu, vv, s)
68
+ if s is False:
69
+ return False
70
+ return s
71
+ #
72
+ # @dispatch((set, frozenset), (set, frozenset), dict)
73
+ # def _unify(u, v, s):
74
+ # i = u & v
75
+ # u = u - i
76
+ # v = v - i
77
+ # return _unify(sorted(u), sorted(v), s)
78
+ #
79
+ #
80
+ # @dispatch(dict, dict, dict)
81
+ # def _unify(u, v, s):
82
+ # if len(u) != len(v):
83
+ # return False
84
+ # for key, uval in iteritems(u):
85
+ # if key not in v:
86
+ # return False
87
+ # s = unify(uval, v[key], s)
88
+ # if s is False:
89
+ # return False
90
+ # return s
91
+ #
92
+ #
93
+ # @dispatch(object, object, dict)
94
+ # def _unify(u, v, s):
95
+ # return False # catch all
96
+
97
+
98
+ @dispatch(object, object, dict)
99
+ def unify(u, v, s): # no check at the moment
100
+ """ Find substitution so that u == v while satisfying s
101
+ >>> x = var('x')
102
+ >>> unify((1, x), (1, 2), {})
103
+ {~x: 2}
104
+ """
105
+ u = walk(u, s)
106
+ v = walk(v, s)
107
+ if u == v:
108
+ return s
109
+ if isvar(u):
110
+ return assoc(s, u, v)
111
+ if isvar(v):
112
+ return assoc(s, v, u)
113
+ return _unify(u, v, s)
114
+ unify
115
+
116
+ @dispatch(object, object) # type: ignore[no-redef]
117
+ def unify(u, v):
118
+ return unify(u, v, {})
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .core import dispatch
2
+ from .dispatcher import (Dispatcher, halt_ordering, restart_ordering,
3
+ MDNotImplementedError)
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (388 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc ADDED
Binary file (4.62 kB). View file