Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/functions.py +353 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/interp.py +118 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/reference.py +214 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py +94 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/solve.py +175 -0
- venv/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py +782 -0
- venv/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py +21 -0
- venv/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py +221 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py +98 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py +85 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py +103 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py +25 -0
- venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py +113 -0
ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:255b1e73d291ef09187bd33a97bef7e0a99d45a78d2c50ef8c5373492ef112a0
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d91b7c85c573039abde02e42940f67d909e777dcd2921634e46998eb5e35ed25
|
3 |
+
size 33555533
|
ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abe02cc8d63470ba51c596e8fabac7b894dedb01b303de7bace6acb43d8b93b7
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a15e7453f2541b71d2984e7da86894f6e2cae3c37cd00dd4bb072e0e142bc7cc
|
3 |
+
size 9387
|
ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43c4bbc0928e7a01beda5b136cfc1ce0227c43088b057671a39987f7ee304358
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.17 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc
ADDED
Binary file (6.12 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc
ADDED
Binary file (18 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc
ADDED
Binary file (44.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc
ADDED
Binary file (5.91 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc
ADDED
Binary file (540 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc
ADDED
Binary file (1.96 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc
ADDED
Binary file (17.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc
ADDED
Binary file (43.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc
ADDED
Binary file (15.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc
ADDED
Binary file (3.86 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc
ADDED
Binary file (15.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc
ADDED
Binary file (263 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc
ADDED
Binary file (5.08 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (186 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc
ADDED
Binary file (9.69 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc
ADDED
Binary file (3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc
ADDED
Binary file (6.58 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc
ADDED
Binary file (2.82 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc
ADDED
Binary file (2.88 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc
ADDED
Binary file (23.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/functions.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sympy
|
2 |
+
from sympy import S
|
3 |
+
from sympy.core.logic import fuzzy_and, fuzzy_not, fuzzy_or
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
"FloorDiv", "ModularIndexing", "CleanDiv", "CeilDiv", "Pow", "TrueDiv",
|
7 |
+
"LShift", "RShift", "IsNonOverlappingAndDenseIndicator", "Round", "RoundDecimal",
|
8 |
+
]
|
9 |
+
|
10 |
+
|
11 |
+
def fuzzy_eq(x, y):
|
12 |
+
if None in (x, y):
|
13 |
+
return None
|
14 |
+
return x == y
|
15 |
+
|
16 |
+
|
17 |
+
class FloorDiv(sympy.Function):
|
18 |
+
"""
|
19 |
+
We maintain this so that:
|
20 |
+
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
|
21 |
+
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
|
22 |
+
"""
|
23 |
+
nargs = (2,)
|
24 |
+
precedence = 50 # precedence of mul # noqa: F811
|
25 |
+
|
26 |
+
# Default return type for SymPy assumptions.
|
27 |
+
# https://docs.sympy.org/latest/guides/assumptions.html#implementing-assumptions-handlers
|
28 |
+
is_real = True
|
29 |
+
|
30 |
+
@property
|
31 |
+
def base(self):
|
32 |
+
return self.args[0]
|
33 |
+
|
34 |
+
@property
|
35 |
+
def divisor(self):
|
36 |
+
return self.args[1]
|
37 |
+
|
38 |
+
def _sympystr(self, printer):
|
39 |
+
base = printer.parenthesize(self.base, self.precedence)
|
40 |
+
divisor = printer.parenthesize(self.divisor, self.precedence)
|
41 |
+
return f"({base}//{divisor})"
|
42 |
+
|
43 |
+
# SymPy assumptions based on argument types.
|
44 |
+
def _eval_is_real(self):
|
45 |
+
return fuzzy_or([self.base.is_real, self.divisor.is_real])
|
46 |
+
|
47 |
+
def _eval_is_integer(self):
|
48 |
+
return fuzzy_and([self.base.is_integer, self.divisor.is_integer])
|
49 |
+
|
50 |
+
# Automatic evaluation.
|
51 |
+
# https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval
|
52 |
+
@classmethod
|
53 |
+
def eval(cls, base, divisor):
|
54 |
+
def check_supported_type(x):
|
55 |
+
if (x.is_integer is False and x.is_real is False and x.is_complex) or x.is_Boolean:
|
56 |
+
raise TypeError(
|
57 |
+
f"unsupported operand type(s) for //: "
|
58 |
+
f"'{type(base).__name__}' and '{type(divisor).__name__}'"
|
59 |
+
f", expected integer or real")
|
60 |
+
|
61 |
+
check_supported_type(base)
|
62 |
+
check_supported_type(divisor)
|
63 |
+
|
64 |
+
# We don't provide the same error message as in Python because SymPy
|
65 |
+
# makes it difficult to check the types.
|
66 |
+
if divisor.is_zero:
|
67 |
+
raise ZeroDivisionError("division by zero")
|
68 |
+
|
69 |
+
if base.is_zero:
|
70 |
+
return sympy.S.Zero
|
71 |
+
if base.is_integer and divisor == 1:
|
72 |
+
return base
|
73 |
+
if base.is_real and divisor == 1:
|
74 |
+
return sympy.floor(base)
|
75 |
+
if base.is_integer and divisor == -1:
|
76 |
+
return sympy.Mul(base, -1)
|
77 |
+
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
|
78 |
+
return base // divisor
|
79 |
+
if isinstance(base, (sympy.Integer, sympy.Float)) and isinstance(divisor, (sympy.Integer, sympy.Float)):
|
80 |
+
return sympy.floor(base / divisor)
|
81 |
+
if isinstance(base, FloorDiv):
|
82 |
+
return FloorDiv(base.args[0], base.args[1] * divisor)
|
83 |
+
if isinstance(divisor, sympy.Rational) and divisor.p == 1:
|
84 |
+
return sympy.floor(base * divisor.q)
|
85 |
+
|
86 |
+
if isinstance(base, sympy.Add):
|
87 |
+
for a in base.args:
|
88 |
+
gcd = sympy.gcd(a, divisor)
|
89 |
+
if gcd == divisor:
|
90 |
+
return FloorDiv(base - a, divisor) + a / gcd
|
91 |
+
|
92 |
+
try:
|
93 |
+
gcd = sympy.gcd(base, divisor)
|
94 |
+
if gcd != 1:
|
95 |
+
return FloorDiv(
|
96 |
+
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
|
97 |
+
)
|
98 |
+
except sympy.PolynomialError:
|
99 |
+
pass # https://github.com/pytorch/pytorch/issues/108276
|
100 |
+
|
101 |
+
|
102 |
+
class ModularIndexing(sympy.Function):
|
103 |
+
"""
|
104 |
+
ModularIndexing(a, b, c) => (a // b) % c where % is the C modulus
|
105 |
+
"""
|
106 |
+
|
107 |
+
nargs = (3,)
|
108 |
+
is_integer = True
|
109 |
+
|
110 |
+
@classmethod
|
111 |
+
def eval(cls, base, divisor, modulus):
|
112 |
+
if base == 0 or modulus == 1:
|
113 |
+
return sympy.Integer(0)
|
114 |
+
|
115 |
+
if (
|
116 |
+
isinstance(base, sympy.Integer)
|
117 |
+
and isinstance(divisor, sympy.Integer)
|
118 |
+
and isinstance(modulus, sympy.Integer)
|
119 |
+
):
|
120 |
+
return (base // divisor) % modulus
|
121 |
+
|
122 |
+
try:
|
123 |
+
if divisor != 1:
|
124 |
+
gcd = sympy.gcd(base, divisor)
|
125 |
+
if gcd != 1:
|
126 |
+
return ModularIndexing(
|
127 |
+
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd), modulus
|
128 |
+
)
|
129 |
+
except sympy.PolynomialError:
|
130 |
+
pass # https://github.com/pytorch/pytorch/issues/108276
|
131 |
+
|
132 |
+
if isinstance(base, sympy.Add):
|
133 |
+
new_terms = []
|
134 |
+
all_positive = True
|
135 |
+
for term in base.args:
|
136 |
+
if sympy.gcd(term, modulus * divisor) != modulus * divisor:
|
137 |
+
if (isinstance(term, sympy.Integer) and term < 0) or (
|
138 |
+
isinstance(term, sympy.Mul)
|
139 |
+
and isinstance(term.args[0], sympy.Integer)
|
140 |
+
and term.args[0] < 0
|
141 |
+
):
|
142 |
+
# workaround for https://github.com/openai/triton/issues/619,
|
143 |
+
# if there are negative terms, // produces wrong result
|
144 |
+
# TODO if https://github.com/openai/triton/issues/619 is fixed
|
145 |
+
# this optimization would become valid
|
146 |
+
all_positive = False
|
147 |
+
break
|
148 |
+
else:
|
149 |
+
new_terms.append(term)
|
150 |
+
|
151 |
+
if len(new_terms) != len(base.args) and all_positive:
|
152 |
+
return ModularIndexing(sum(new_terms), divisor, modulus)
|
153 |
+
|
154 |
+
if isinstance(base, FloorDiv):
|
155 |
+
return ModularIndexing(base.args[0], base.args[1] * divisor, modulus)
|
156 |
+
|
157 |
+
def _eval_is_nonnegative(self):
|
158 |
+
p, q = self.args[:2]
|
159 |
+
return fuzzy_eq(p.is_nonnegative, q.is_nonnegative) # type: ignore[attr-defined]
|
160 |
+
|
161 |
+
def _eval_is_positive(self):
|
162 |
+
p, q = self.args[:2]
|
163 |
+
return fuzzy_eq(p.is_positive, q.is_positive) # type: ignore[attr-defined]
|
164 |
+
|
165 |
+
|
166 |
+
class Where(sympy.Function):
|
167 |
+
"""
|
168 |
+
Good ol' ternary operator
|
169 |
+
"""
|
170 |
+
|
171 |
+
nargs = (3,)
|
172 |
+
|
173 |
+
@classmethod
|
174 |
+
def eval(cls, c, p, q):
|
175 |
+
if c == sympy.true:
|
176 |
+
return p
|
177 |
+
elif c == sympy.false:
|
178 |
+
return q
|
179 |
+
|
180 |
+
class Mod(sympy.Function):
|
181 |
+
"""
|
182 |
+
We maintain this so that we avoid SymPy correctness issues, such as:
|
183 |
+
https://github.com/sympy/sympy/issues/25146
|
184 |
+
"""
|
185 |
+
|
186 |
+
nargs = (2,)
|
187 |
+
|
188 |
+
@classmethod
|
189 |
+
def eval(cls, p, q):
|
190 |
+
# This was adapted from: sympy/core/mod.py
|
191 |
+
|
192 |
+
if q.is_zero:
|
193 |
+
raise ZeroDivisionError("Modulo by zero")
|
194 |
+
# If either of them is NaN or infinite.
|
195 |
+
if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False:
|
196 |
+
return S.NaN
|
197 |
+
# Three cases:
|
198 |
+
# 1. p == 0
|
199 |
+
# 2. p is either q or -q
|
200 |
+
# 3. p is integer and q == 1
|
201 |
+
if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1):
|
202 |
+
return S.Zero
|
203 |
+
|
204 |
+
# Evaluate if they are both literals.
|
205 |
+
if q.is_Number and p.is_Number:
|
206 |
+
return p % q
|
207 |
+
|
208 |
+
# If q == 2, it's a matter of whether p is odd or even.
|
209 |
+
if q.is_Number and q == 2:
|
210 |
+
if p.is_even:
|
211 |
+
return S.Zero
|
212 |
+
if p.is_odd:
|
213 |
+
return S.One
|
214 |
+
|
215 |
+
# If p is a multiple of q.
|
216 |
+
r = p / q
|
217 |
+
if r.is_integer:
|
218 |
+
return S.Zero
|
219 |
+
|
220 |
+
# If p < q and its ratio is positive, then:
|
221 |
+
# - floor(p / q) = 0
|
222 |
+
# - p % q = p - floor(p / q) * q = p
|
223 |
+
less = p < q
|
224 |
+
if less.is_Boolean and bool(less) and r.is_positive:
|
225 |
+
return p
|
226 |
+
|
227 |
+
def _eval_is_integer(self):
|
228 |
+
p, q = self.args
|
229 |
+
return fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]) # type: ignore[attr-defined]
|
230 |
+
|
231 |
+
def _eval_is_nonnegative(self):
|
232 |
+
return True if self.args[1].is_positive else None # type: ignore[attr-defined]
|
233 |
+
|
234 |
+
def _eval_is_nonpositive(self):
|
235 |
+
return True if self.args[1].is_negative else None # type: ignore[attr-defined]
|
236 |
+
|
237 |
+
|
238 |
+
class CleanDiv(FloorDiv):
|
239 |
+
"""
|
240 |
+
Div where we can assume no rounding.
|
241 |
+
This is to enable future optimizations.
|
242 |
+
"""
|
243 |
+
|
244 |
+
pass
|
245 |
+
|
246 |
+
|
247 |
+
class CeilDiv(sympy.Function):
|
248 |
+
"""
|
249 |
+
Div used in indexing that rounds up.
|
250 |
+
"""
|
251 |
+
|
252 |
+
is_integer = True
|
253 |
+
|
254 |
+
def __new__(cls, base, divisor):
|
255 |
+
if sympy.gcd(base, divisor) == divisor:
|
256 |
+
return CleanDiv(base, divisor)
|
257 |
+
else:
|
258 |
+
return FloorDiv(base + (divisor - 1), divisor)
|
259 |
+
|
260 |
+
|
261 |
+
class LShift(sympy.Function):
|
262 |
+
@classmethod
|
263 |
+
def eval(cls, base, shift):
|
264 |
+
if shift < 0:
|
265 |
+
raise ValueError('negative shift count')
|
266 |
+
return base * 2 ** shift
|
267 |
+
|
268 |
+
|
269 |
+
class RShift(sympy.Function):
|
270 |
+
@classmethod
|
271 |
+
def eval(cls, base, shift):
|
272 |
+
if shift < 0:
|
273 |
+
raise ValueError('negative shift count')
|
274 |
+
return base // 2 ** shift
|
275 |
+
|
276 |
+
# Overloaded to be compatible with regular Python.
|
277 |
+
# https://github.com/pytorch/pytorch/issues/90900
|
278 |
+
class Pow(sympy.Function):
|
279 |
+
@classmethod
|
280 |
+
def eval(cls, base, exp):
|
281 |
+
if exp.is_zero:
|
282 |
+
return sympy.Integer(1)
|
283 |
+
elif base.is_zero and exp < 0:
|
284 |
+
raise ZeroDivisionError(f"{base} cannot be raised to a negative power")
|
285 |
+
else:
|
286 |
+
return base ** exp
|
287 |
+
|
288 |
+
# Overloaded to be compatible with regular Python.
|
289 |
+
# https://github.com/pytorch/pytorch/issues/90900
|
290 |
+
class TrueDiv(sympy.Function):
|
291 |
+
@classmethod
|
292 |
+
def eval(cls, base, divisor):
|
293 |
+
if divisor.is_zero:
|
294 |
+
raise ZeroDivisionError("division by zero")
|
295 |
+
else:
|
296 |
+
return base / divisor
|
297 |
+
|
298 |
+
|
299 |
+
# TODO: As an indicator, this != 0 implies == 1 (and vice versa).
|
300 |
+
# Because we do not have the ability to guard on the stride permutation
|
301 |
+
# at the moment, it is hard to make further inferences when this is true,
|
302 |
+
# as although we know the tensor is contiguous in *some* layout, we don't
|
303 |
+
# know which one (however, you could, for example, make the inference that
|
304 |
+
# reshaping this to a 1D tensor can be guard-free.)
|
305 |
+
class IsNonOverlappingAndDenseIndicator(sympy.Function):
|
306 |
+
is_integer = True
|
307 |
+
|
308 |
+
@classmethod
|
309 |
+
def eval(cls, *args):
|
310 |
+
assert len(args) % 2 == 0
|
311 |
+
dim = len(args) // 2
|
312 |
+
# TODO: it is possible to make progress evaluating this guard
|
313 |
+
# even if not all of the inputs are known. For example, a 2D
|
314 |
+
# tensor with non-0/1 sizes but strides (0, 1) is definitely
|
315 |
+
# false, because we know its numel > 1 but it's broadcasted
|
316 |
+
# in dim 0.
|
317 |
+
if all(isinstance(a, sympy.Integer) for a in args):
|
318 |
+
# sym_node imported in torch.__init__. Local import to avoid an import cycle
|
319 |
+
from torch.fx.experimental.symbolic_shapes import eval_is_non_overlapping_and_dense
|
320 |
+
|
321 |
+
size_args = args[0:dim]
|
322 |
+
stride_args = args[dim:]
|
323 |
+
return eval_is_non_overlapping_and_dense(
|
324 |
+
[int(a) for a in size_args],
|
325 |
+
[int(a) for a in stride_args]
|
326 |
+
)
|
327 |
+
return None
|
328 |
+
|
329 |
+
|
330 |
+
class Round(sympy.Function):
|
331 |
+
is_integer = True
|
332 |
+
|
333 |
+
@classmethod
|
334 |
+
def eval(cls, number):
|
335 |
+
if number.is_integer:
|
336 |
+
return number
|
337 |
+
elif isinstance(number, sympy.Number):
|
338 |
+
return sympy.Integer(round(float(number)))
|
339 |
+
|
340 |
+
def __int__(self):
|
341 |
+
# This will only ever be called when computing size hints. At that point, self.args[0] should be a number and
|
342 |
+
# no longer an expression. If it were, the float call would fail and the caller would handle this further.
|
343 |
+
return round(float(self.args[0])) # type: ignore[arg-type]
|
344 |
+
|
345 |
+
|
346 |
+
class RoundDecimal(sympy.Function):
|
347 |
+
@classmethod
|
348 |
+
def eval(cls, number, ndigits):
|
349 |
+
if number.is_integer and ndigits >= 0:
|
350 |
+
return number
|
351 |
+
elif isinstance(number, sympy.Number) and isinstance(ndigits, sympy.Integer):
|
352 |
+
value_type, output_type = (int, sympy.Integer) if isinstance(number, sympy.Integer) else (float, sympy.Float)
|
353 |
+
return output_type(round(value_type(number), int(ndigits)))
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/interp.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This is a simple interpreter for Sympy expressions that dispatches to
|
3 |
+
classes following the torch._inductor.virtualized calling convention.
|
4 |
+
For directness, the interpreter takes the handler directly rather than
|
5 |
+
consulting the TLS. It does not use most of the methods on the full
|
6 |
+
handler; only those with corresponding Sympy expressions. To see an example
|
7 |
+
of a full handler, see torch.utils._sympy.value_ranges.ValueRangeAnalysis.
|
8 |
+
"""
|
9 |
+
|
10 |
+
import functools
|
11 |
+
from typing import Any, Dict, Union
|
12 |
+
|
13 |
+
import sympy
|
14 |
+
from sympy.logic.boolalg import Boolean as SympyBoolean, BooleanAtom
|
15 |
+
|
16 |
+
import torch
|
17 |
+
from .functions import (
|
18 |
+
CleanDiv,
|
19 |
+
FloorDiv,
|
20 |
+
IsNonOverlappingAndDenseIndicator,
|
21 |
+
Mod,
|
22 |
+
ModularIndexing,
|
23 |
+
Pow,
|
24 |
+
Round,
|
25 |
+
RoundDecimal,
|
26 |
+
TrueDiv,
|
27 |
+
Where,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
# TODO: Dedupe this with SYMPY_INTERP
|
32 |
+
|
33 |
+
|
34 |
+
@functools.lru_cache(None)
|
35 |
+
def handlers():
|
36 |
+
# TODO add CeilDiv (it doesn't appear in the index_expr)
|
37 |
+
|
38 |
+
# TODO default to some decompositions if the interpreter doesn't have them
|
39 |
+
# like decomposing ModularIndexing or implementing Le(a,b) as Ge(b, a)
|
40 |
+
|
41 |
+
HANDLERS = {
|
42 |
+
sympy.Or: "or_",
|
43 |
+
sympy.And: "and_",
|
44 |
+
sympy.Eq: "eq",
|
45 |
+
sympy.Ne: "ne",
|
46 |
+
sympy.Lt: "lt",
|
47 |
+
sympy.Gt: "gt",
|
48 |
+
sympy.Le: "le",
|
49 |
+
sympy.Ge: "ge",
|
50 |
+
sympy.Not: "not_",
|
51 |
+
TrueDiv: "truediv",
|
52 |
+
FloorDiv: "floordiv",
|
53 |
+
CleanDiv: "div",
|
54 |
+
Where: "where",
|
55 |
+
sympy.Add: "add",
|
56 |
+
sympy.Mul: "mul",
|
57 |
+
Pow: "pow",
|
58 |
+
sympy.Pow: "pow",
|
59 |
+
Mod: "mod",
|
60 |
+
sympy.Mod: "mod",
|
61 |
+
sympy.Abs: "abs",
|
62 |
+
sympy.log: "log",
|
63 |
+
sympy.exp: "exp",
|
64 |
+
sympy.floor: "floor",
|
65 |
+
sympy.ceiling: "ceil",
|
66 |
+
sympy.Min: "minimum",
|
67 |
+
sympy.Max: "maximum",
|
68 |
+
ModularIndexing: "modular_indexing",
|
69 |
+
sympy.functions.elementary.piecewise.ExprCondPair: "expr_cond_pair",
|
70 |
+
sympy.Piecewise: "piecewise",
|
71 |
+
IsNonOverlappingAndDenseIndicator: "is_non_overlapping_and_dense_indicator",
|
72 |
+
Round: "round",
|
73 |
+
RoundDecimal: "round",
|
74 |
+
}
|
75 |
+
for name in ["cos", "sin", "tan", "sinh", "cosh", "tanh", "asin", "acos", "atan"]:
|
76 |
+
HANDLERS[getattr(sympy, name)] = name
|
77 |
+
|
78 |
+
return HANDLERS
|
79 |
+
|
80 |
+
|
81 |
+
ASSOCIATIVE_OPS = {"minimum", "maximum", "mul", "add", "and_", "or_"}
|
82 |
+
|
83 |
+
|
84 |
+
def sympy_interp(
|
85 |
+
analysis, env: Dict[sympy.Symbol, Any], expr: Union[sympy.Expr, SympyBoolean]
|
86 |
+
):
|
87 |
+
# Handle base cases
|
88 |
+
dtype = None
|
89 |
+
if isinstance(expr, BooleanAtom):
|
90 |
+
dtype = torch.bool
|
91 |
+
elif isinstance(expr, sympy.Integer):
|
92 |
+
dtype = torch.int64
|
93 |
+
elif isinstance(expr, sympy.Number):
|
94 |
+
dtype = torch.double
|
95 |
+
|
96 |
+
if dtype is not None:
|
97 |
+
return analysis.constant(expr, dtype)
|
98 |
+
elif isinstance(expr, sympy.Symbol):
|
99 |
+
return env[expr]
|
100 |
+
|
101 |
+
# Special cases
|
102 |
+
if isinstance(expr, sympy.Pow) and isinstance(
|
103 |
+
expr.args[1], sympy.core.numbers.Half
|
104 |
+
):
|
105 |
+
return analysis.sqrt(sympy_interp(analysis, env, expr.args[0]))
|
106 |
+
|
107 |
+
# Recursive case
|
108 |
+
args = [sympy_interp(analysis, env, arg) for arg in expr.args] # type: ignore[arg-type]
|
109 |
+
handler_name = handlers()[expr.func]
|
110 |
+
handler = getattr(analysis, handler_name)
|
111 |
+
if handler_name in ASSOCIATIVE_OPS:
|
112 |
+
assert len(args) > 1
|
113 |
+
acc = handler(args[0], args[1])
|
114 |
+
for i in range(2, len(args)):
|
115 |
+
acc = handler(acc, args[i])
|
116 |
+
return acc
|
117 |
+
else:
|
118 |
+
return handler(*args)
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/reference.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import sympy
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
|
8 |
+
# The sympy interpretation of operators. It will also sometimes work with
|
9 |
+
# plain int/float, but if you do certain operations you will get out a
|
10 |
+
# sympy.Basic in the end. If you want the Python/FX traceable interpretation,
|
11 |
+
# check PythonReferenceAnalysis.
|
12 |
+
# NB: For magic methods this needs to use normal magic methods
|
13 |
+
# so that test_magic_methods works
|
14 |
+
class ReferenceAnalysis:
|
15 |
+
@staticmethod
|
16 |
+
def constant(c, dtype):
|
17 |
+
return sympy.sympify(c)
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def or_(a, b):
|
21 |
+
return a | b
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def and_(a, b):
|
25 |
+
return a & b
|
26 |
+
|
27 |
+
@staticmethod
|
28 |
+
def eq(a, b):
|
29 |
+
if isinstance(a, sympy.Expr) or isinstance(b, sympy.Expr):
|
30 |
+
return sympy.Eq(a, b)
|
31 |
+
return a == b
|
32 |
+
|
33 |
+
@classmethod
|
34 |
+
def ne(cls, a, b):
|
35 |
+
return cls.not_(cls.eq(a, b))
|
36 |
+
|
37 |
+
@staticmethod
|
38 |
+
def lt(a, b):
|
39 |
+
return a < b
|
40 |
+
|
41 |
+
@staticmethod
|
42 |
+
def gt(a, b):
|
43 |
+
return a > b
|
44 |
+
|
45 |
+
@staticmethod
|
46 |
+
def le(a, b):
|
47 |
+
return a <= b
|
48 |
+
|
49 |
+
@staticmethod
|
50 |
+
def ge(a, b):
|
51 |
+
return a >= b
|
52 |
+
|
53 |
+
@staticmethod
|
54 |
+
def not_(a):
|
55 |
+
assert not isinstance(a, bool)
|
56 |
+
return ~a
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def reciprocal(x):
|
60 |
+
return 1 / x
|
61 |
+
|
62 |
+
@staticmethod
|
63 |
+
def square(x):
|
64 |
+
return x * x
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
def mod(x, y):
|
68 |
+
return x % y
|
69 |
+
|
70 |
+
@staticmethod
|
71 |
+
def abs(x):
|
72 |
+
return abs(x)
|
73 |
+
|
74 |
+
@staticmethod
|
75 |
+
def neg(x):
|
76 |
+
return -x
|
77 |
+
|
78 |
+
@staticmethod
|
79 |
+
def truediv(a, b):
|
80 |
+
return a / b
|
81 |
+
|
82 |
+
@staticmethod
|
83 |
+
def div(a, b):
|
84 |
+
return ReferenceAnalysis.truediv(a, b)
|
85 |
+
|
86 |
+
@staticmethod
|
87 |
+
def floordiv(a, b):
|
88 |
+
if b == 0:
|
89 |
+
return sympy.nan if a == 0 else sympy.zoo
|
90 |
+
return a // b
|
91 |
+
|
92 |
+
@staticmethod
|
93 |
+
def truncdiv(a, b):
|
94 |
+
result = a / b
|
95 |
+
if result.is_finite:
|
96 |
+
result = sympy.Integer(result)
|
97 |
+
|
98 |
+
return result
|
99 |
+
|
100 |
+
@staticmethod
|
101 |
+
def add(a, b):
|
102 |
+
return a + b
|
103 |
+
|
104 |
+
@staticmethod
|
105 |
+
def mul(a, b):
|
106 |
+
return a * b
|
107 |
+
|
108 |
+
@staticmethod
|
109 |
+
def sub(a, b):
|
110 |
+
return a - b
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
def exp(x):
|
114 |
+
return sympy.exp(x)
|
115 |
+
|
116 |
+
@staticmethod
|
117 |
+
def log(x):
|
118 |
+
return sympy.log(x)
|
119 |
+
|
120 |
+
@staticmethod
|
121 |
+
def sqrt(x):
|
122 |
+
return sympy.sqrt(x)
|
123 |
+
|
124 |
+
@staticmethod
|
125 |
+
def pow(a, b):
|
126 |
+
return a**b
|
127 |
+
|
128 |
+
@staticmethod
|
129 |
+
def minimum(a, b):
|
130 |
+
# Poorman's version of upcasting in Sympy
|
131 |
+
# This won't do for sympy.Expr as the casting does nothing for those
|
132 |
+
if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite:
|
133 |
+
result_type = sympy.Float
|
134 |
+
else:
|
135 |
+
assert a.is_Integer
|
136 |
+
assert b.is_Integer
|
137 |
+
result_type = sympy.Integer
|
138 |
+
return sympy.Min(result_type(a), result_type(b))
|
139 |
+
|
140 |
+
@staticmethod
|
141 |
+
def maximum(a, b):
|
142 |
+
# Poorman's version of upcasting in Sympy
|
143 |
+
# This won't do for sympy.Expr as the casting does nothing for those
|
144 |
+
if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite:
|
145 |
+
result_type = sympy.Float
|
146 |
+
else:
|
147 |
+
assert a.is_Integer
|
148 |
+
assert b.is_Integer
|
149 |
+
result_type = sympy.Integer
|
150 |
+
return sympy.Max(result_type(a), result_type(b))
|
151 |
+
|
152 |
+
@staticmethod
|
153 |
+
def floor(x):
|
154 |
+
return sympy.floor(x)
|
155 |
+
|
156 |
+
@staticmethod
|
157 |
+
def ceil(x):
|
158 |
+
return sympy.ceiling(x)
|
159 |
+
|
160 |
+
|
161 |
+
# Unlike ReferenceAnalysis, does NOT sympyify, instead, works with plain
|
162 |
+
# Python types and is FX traceable. Inheritance here is purely for code
|
163 |
+
# sharing (TODO: considering splitting out a BaseReferenceAnalysis).
|
164 |
+
class PythonReferenceAnalysis(ReferenceAnalysis):
|
165 |
+
@staticmethod
|
166 |
+
def constant(c, dtype):
|
167 |
+
if dtype is torch.int64:
|
168 |
+
return int(c)
|
169 |
+
elif dtype is torch.double:
|
170 |
+
return float(c)
|
171 |
+
elif dtype is torch.bool:
|
172 |
+
return bool(c)
|
173 |
+
else:
|
174 |
+
raise AssertionError(f"unrecognized dtype {dtype}")
|
175 |
+
|
176 |
+
@staticmethod
|
177 |
+
def not_(a):
|
178 |
+
return torch.sym_not(a)
|
179 |
+
|
180 |
+
@staticmethod
|
181 |
+
def floordiv(a, b):
|
182 |
+
return a // b
|
183 |
+
|
184 |
+
@staticmethod
|
185 |
+
def truncdiv(a, b):
|
186 |
+
return a / b
|
187 |
+
|
188 |
+
@staticmethod
|
189 |
+
def exp(x):
|
190 |
+
raise AssertionError("exp is not valid shape sympy expr")
|
191 |
+
|
192 |
+
@staticmethod
|
193 |
+
def log(x):
|
194 |
+
raise AssertionError("log is not valid shape sympy expr")
|
195 |
+
|
196 |
+
@staticmethod
|
197 |
+
def sqrt(x):
|
198 |
+
return torch._sym_sqrt(x) # type: ignore[attr-defined]
|
199 |
+
|
200 |
+
@staticmethod
|
201 |
+
def minimum(a, b):
|
202 |
+
return torch.sym_min(a, b)
|
203 |
+
|
204 |
+
@staticmethod
|
205 |
+
def maximum(a, b):
|
206 |
+
return torch.sym_max(a, b)
|
207 |
+
|
208 |
+
@staticmethod
|
209 |
+
def floor(x):
|
210 |
+
return math.floor(x)
|
211 |
+
|
212 |
+
@staticmethod
|
213 |
+
def ceil(x):
|
214 |
+
return math.ceil(x)
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sympy
|
2 |
+
from sympy.multipledispatch import dispatch
|
3 |
+
|
4 |
+
__all__ = ["SingletonInt"]
|
5 |
+
|
6 |
+
|
7 |
+
class SingletonInt(sympy.AtomicExpr):
|
8 |
+
# This is probably not super important unless we are in multiple dispatch
|
9 |
+
# situations with other more exotic Expr types.
|
10 |
+
_op_priority = 99999
|
11 |
+
|
12 |
+
def __new__(cls, *args, coeff=None, **kwargs):
|
13 |
+
instance = super().__new__(cls, *args, **kwargs)
|
14 |
+
return instance
|
15 |
+
|
16 |
+
# The semantics of this class should match that of NestedIntSymNodeImpl in
|
17 |
+
# c10/core/NestedIntSymNodeImpl.h
|
18 |
+
def __init__(self, val, *, coeff=1):
|
19 |
+
self._val = val
|
20 |
+
self._coeff = coeff
|
21 |
+
super().__init__()
|
22 |
+
|
23 |
+
# See NOTE [ Inequalities with nested int ]
|
24 |
+
def _eval_Eq(self, other):
|
25 |
+
if (
|
26 |
+
isinstance(other, SingletonInt)
|
27 |
+
and other._val == self._val
|
28 |
+
and self._coeff == other._coeff
|
29 |
+
):
|
30 |
+
return sympy.true
|
31 |
+
else:
|
32 |
+
return sympy.false
|
33 |
+
|
34 |
+
# This is necessary so that calling expr.free_symbols on exprs that contain
|
35 |
+
# this Singleton does not error
|
36 |
+
@property
|
37 |
+
def free_symbols(self):
|
38 |
+
return set()
|
39 |
+
|
40 |
+
def __mul__(self, other):
|
41 |
+
if isinstance(other, SingletonInt):
|
42 |
+
raise ValueError(
|
43 |
+
"SingletonInt cannot be multiplied by another SingletonInt"
|
44 |
+
)
|
45 |
+
return SingletonInt(self._val, coeff=self._coeff * other)
|
46 |
+
|
47 |
+
def __rmul__(self, other):
|
48 |
+
if isinstance(other, SingletonInt):
|
49 |
+
raise ValueError(
|
50 |
+
"SingletonInt cannot be multiplied by another SingletonInt"
|
51 |
+
)
|
52 |
+
return SingletonInt(self._val, coeff=self._coeff * other)
|
53 |
+
|
54 |
+
# Make sure we promptly raise an error instead of falling back to building
|
55 |
+
# an expression tree. There are probably more ops, how can we be exhaustive?
|
56 |
+
def __add__(self, other):
|
57 |
+
raise NotImplementedError("NYI")
|
58 |
+
|
59 |
+
def __sub__(self, other):
|
60 |
+
raise NotImplementedError("NYI")
|
61 |
+
|
62 |
+
def __truediv__(self, other):
|
63 |
+
raise NotImplementedError("NYI")
|
64 |
+
|
65 |
+
def __floordiv__(self, other):
|
66 |
+
raise NotImplementedError("NYI")
|
67 |
+
|
68 |
+
def __mod__(self, other):
|
69 |
+
raise NotImplementedError("NYI")
|
70 |
+
|
71 |
+
|
72 |
+
# See NOTE [ Inequalities with nested int ]
|
73 |
+
@dispatch(sympy.Integer, SingletonInt)
|
74 |
+
def _eval_is_ge(a, b):
|
75 |
+
if a < 2:
|
76 |
+
return sympy.false
|
77 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
78 |
+
|
79 |
+
|
80 |
+
@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef]
|
81 |
+
def _eval_is_ge(a, b): # noqa: F811
|
82 |
+
if b <= 2:
|
83 |
+
return sympy.true
|
84 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
85 |
+
|
86 |
+
|
87 |
+
@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef]
|
88 |
+
def _eval_is_ge(a, b): # noqa: F811
|
89 |
+
if a._val == b._val:
|
90 |
+
if a._coeff >= b._coeff:
|
91 |
+
return sympy.true
|
92 |
+
else:
|
93 |
+
return sympy.false
|
94 |
+
raise ValueError("Symbolic SingletonInt: Relation is indeterminate")
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/solve.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
from typing import Dict, Optional, Tuple, Type
|
4 |
+
|
5 |
+
import sympy
|
6 |
+
|
7 |
+
from torch.utils._sympy.functions import FloorDiv
|
8 |
+
|
9 |
+
log = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
_MIRROR_REL_OP: Dict[Type[sympy.Basic], Type[sympy.Rel]] = {
|
12 |
+
sympy.Eq: sympy.Eq,
|
13 |
+
sympy.Ne: sympy.Ne,
|
14 |
+
sympy.Ge: sympy.Le,
|
15 |
+
sympy.Gt: sympy.Lt,
|
16 |
+
sympy.Le: sympy.Ge,
|
17 |
+
sympy.Lt: sympy.Gt,
|
18 |
+
}
|
19 |
+
|
20 |
+
INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le)
|
21 |
+
|
22 |
+
|
23 |
+
def mirror_rel_op(type: Type) -> Optional[Type[sympy.Rel]]:
|
24 |
+
return _MIRROR_REL_OP.get(type, None)
|
25 |
+
|
26 |
+
|
27 |
+
# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side.
|
28 |
+
#
|
29 |
+
# Returns a tuple of:
|
30 |
+
# 1. The simplified expression
|
31 |
+
# 2. The expression on the right-hand side
|
32 |
+
#
|
33 |
+
# Returns 'None' if it can't reach a state where the only thing in the left
|
34 |
+
# hand side is 'thing'.
|
35 |
+
#
|
36 |
+
# 'trials': number of times 'try_solve' will try to isolate 'thing' to the
|
37 |
+
# left-hand side.
|
38 |
+
#
|
39 |
+
# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into
|
40 |
+
# inequalities.
|
41 |
+
def try_solve(
|
42 |
+
expr: sympy.Basic,
|
43 |
+
thing: sympy.Basic,
|
44 |
+
trials: int = 5,
|
45 |
+
floordiv_inequality: bool = True,
|
46 |
+
) -> Optional[Tuple[sympy.Rel, sympy.Basic]]:
|
47 |
+
mirror = mirror_rel_op(type(expr))
|
48 |
+
|
49 |
+
# Ignore unsupported expressions:
|
50 |
+
# - Those that are not relational operations
|
51 |
+
# - Those that don't have a mirror (just avoiding unexpected classes)
|
52 |
+
if not isinstance(expr, sympy.Rel) or mirror is None:
|
53 |
+
log.debug("expression with unsupported type: %s", type(expr))
|
54 |
+
return None
|
55 |
+
|
56 |
+
lhs_has_thing = expr.lhs.has(thing)
|
57 |
+
rhs_has_thing = expr.rhs.has(thing)
|
58 |
+
|
59 |
+
# Give up when 'thing' appears on both sides of the relational expression.
|
60 |
+
# That is because, as is, we assume the thing we are trying to isolate is
|
61 |
+
# only on the right-hand side.
|
62 |
+
if lhs_has_thing and rhs_has_thing:
|
63 |
+
log.debug("thing (%s) found in both sides of expression: %s", thing, expr)
|
64 |
+
return None
|
65 |
+
|
66 |
+
# Try considering both LHS and RHS by mirroring the original expression:
|
67 |
+
# a < b ==> b > a
|
68 |
+
expressions = []
|
69 |
+
|
70 |
+
# Add each version of 'expr' if 'thing' is in its left-hand side.
|
71 |
+
if lhs_has_thing:
|
72 |
+
expressions.append(expr)
|
73 |
+
if rhs_has_thing:
|
74 |
+
expressions.append(mirror(expr.rhs, expr.lhs))
|
75 |
+
|
76 |
+
for e in expressions:
|
77 |
+
if e is None:
|
78 |
+
continue
|
79 |
+
|
80 |
+
assert isinstance(e, sympy.Rel)
|
81 |
+
|
82 |
+
for _ in range(trials):
|
83 |
+
trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality)
|
84 |
+
# Stop if there was no change in this trial.
|
85 |
+
if trial == e:
|
86 |
+
break
|
87 |
+
e = trial # type: ignore[assignment]
|
88 |
+
|
89 |
+
# Return if we were able to isolate 'thing' on the left-hand side.
|
90 |
+
if isinstance(e, sympy.Rel) and e.lhs == thing:
|
91 |
+
return e, e.rhs
|
92 |
+
|
93 |
+
return None
|
94 |
+
|
95 |
+
|
96 |
+
def _try_isolate_lhs(
|
97 |
+
expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool
|
98 |
+
) -> sympy.Basic:
|
99 |
+
e = expr
|
100 |
+
op = type(expr)
|
101 |
+
|
102 |
+
if isinstance(e, sympy.Rel):
|
103 |
+
# Move any constants in the left-hand side to the right-hand side.
|
104 |
+
lhs_not_thing = (
|
105 |
+
sum([a for a in e.lhs.args if not a.has(thing)])
|
106 |
+
if isinstance(e.lhs, sympy.Add)
|
107 |
+
else 0
|
108 |
+
)
|
109 |
+
e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined]
|
110 |
+
|
111 |
+
# Divide both sides by the factors that don't contain thing.
|
112 |
+
if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul):
|
113 |
+
lhs, rhs = e.args
|
114 |
+
other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)])
|
115 |
+
|
116 |
+
# If we can't tell whether 'other' is negative or positive, we do nothing.
|
117 |
+
# That is because we don't know whether we have mirror the operation or not.
|
118 |
+
if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None):
|
119 |
+
# Divide both sides by 'other'.
|
120 |
+
lhs = lhs / other
|
121 |
+
rhs = rhs / other
|
122 |
+
|
123 |
+
# If 'e' is an inequality and 'other' is negative, we have to
|
124 |
+
# mirror the expression.
|
125 |
+
if isinstance(e, INEQUALITY_TYPES) and other.is_negative:
|
126 |
+
op = mirror_rel_op(op) # type: ignore[assignment]
|
127 |
+
|
128 |
+
assert op is not None
|
129 |
+
e = op(lhs, rhs)
|
130 |
+
|
131 |
+
################################################################################
|
132 |
+
# left-hand side is FloorDiv
|
133 |
+
################################################################################
|
134 |
+
#
|
135 |
+
# Given the expression: a // b op c
|
136 |
+
# where 'op' is a relational operation, these rules only work if:
|
137 |
+
# - b > 0
|
138 |
+
# - c is an integer
|
139 |
+
if (
|
140 |
+
floordiv_inequality
|
141 |
+
and isinstance(e, sympy.Rel)
|
142 |
+
and isinstance(e.lhs, FloorDiv)
|
143 |
+
and e.lhs.divisor.is_positive
|
144 |
+
and e.rhs.is_integer
|
145 |
+
):
|
146 |
+
# a // b == expr
|
147 |
+
# => a >= (b * expr) and a < (b * (expr + 1))
|
148 |
+
if isinstance(expr, sympy.Eq):
|
149 |
+
numerator, denominator = e.lhs.args
|
150 |
+
return sympy.And(
|
151 |
+
sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
|
152 |
+
sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
|
153 |
+
)
|
154 |
+
# a // b != expr
|
155 |
+
# => a < (b * expr) or a >= (b * (expr + 1))
|
156 |
+
if isinstance(expr, sympy.Ne):
|
157 |
+
numerator, denominator = e.lhs.args
|
158 |
+
return sympy.Or(
|
159 |
+
sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
|
160 |
+
sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
|
161 |
+
)
|
162 |
+
# The transformations below only work if b is positive.
|
163 |
+
# Note: we only have this information for constants.
|
164 |
+
# a // b > expr => a >= b * (expr + 1)
|
165 |
+
# a // b >= expr => a >= b * expr
|
166 |
+
if isinstance(expr, (sympy.Gt, sympy.Ge)):
|
167 |
+
quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type]
|
168 |
+
return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
|
169 |
+
# a // b < expr => a < b * expr
|
170 |
+
# a // b <= expr => a < b * (expr + 1)
|
171 |
+
if isinstance(expr, (sympy.Lt, sympy.Le)):
|
172 |
+
quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type]
|
173 |
+
return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
|
174 |
+
|
175 |
+
return e
|
venv/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py
ADDED
@@ -0,0 +1,782 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import dataclasses
|
4 |
+
import itertools
|
5 |
+
import sympy
|
6 |
+
from sympy.logic.boolalg import BooleanAtom, Boolean as SympyBoolean
|
7 |
+
import operator
|
8 |
+
import math
|
9 |
+
import logging
|
10 |
+
import torch
|
11 |
+
from typing import Dict, Optional, SupportsFloat, TypeVar, Generic, Union, overload, Callable, TYPE_CHECKING
|
12 |
+
from typing_extensions import TypeGuard
|
13 |
+
|
14 |
+
from torch._prims_common import dtype_to_type
|
15 |
+
from .interp import sympy_interp
|
16 |
+
from .functions import Round, RoundDecimal
|
17 |
+
|
18 |
+
log = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
__all__ = ["ValueRanges", "ValueRangeAnalysis", "bound_sympy"]
|
21 |
+
|
22 |
+
_T = TypeVar('_T', sympy.Expr, SympyBoolean)
|
23 |
+
|
24 |
+
class ValueRangeError(RuntimeError):
|
25 |
+
pass
|
26 |
+
|
27 |
+
|
28 |
+
# Like sympify, but supports less stuff, and also ensures that direct
|
29 |
+
# sympy expressions don't have free variables
|
30 |
+
def simple_sympify(e):
|
31 |
+
if isinstance(e, bool):
|
32 |
+
return sympy.true if e else sympy.false
|
33 |
+
elif isinstance(e, int):
|
34 |
+
return sympy.Integer(e)
|
35 |
+
elif isinstance(e, float):
|
36 |
+
# infinity is special; we use it to bracket integers as well
|
37 |
+
if math.isinf(e):
|
38 |
+
return sympy.oo if e > 0 else -sympy.oo
|
39 |
+
return sympy.Float(e)
|
40 |
+
elif isinstance(e, sympy.Expr):
|
41 |
+
assert e.is_number, e
|
42 |
+
# NaNs can occur when doing things like 0 * sympy.oo, but it is better
|
43 |
+
# if the operator notices this and takes care of it, because sometimes
|
44 |
+
# the NaN is inappropriate (for example, for ints, the [-oo, oo] range
|
45 |
+
# should go to zero when multiplied with [0, 0])
|
46 |
+
assert e != sympy.nan
|
47 |
+
return e
|
48 |
+
elif isinstance(e, BooleanAtom):
|
49 |
+
return e
|
50 |
+
else:
|
51 |
+
raise AssertionError(f"not simple sympy type {type(e)}: {e}")
|
52 |
+
|
53 |
+
|
54 |
+
# Sympy atomics only. Unlike <=, it also works on Sympy bools.
|
55 |
+
def sympy_generic_le(lower, upper):
|
56 |
+
if isinstance(lower, sympy.Expr):
|
57 |
+
assert isinstance(upper, sympy.Expr)
|
58 |
+
return lower <= upper
|
59 |
+
else:
|
60 |
+
# only negative condition is True > False
|
61 |
+
assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean)
|
62 |
+
return not (lower and not upper)
|
63 |
+
|
64 |
+
|
65 |
+
def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]:
|
66 |
+
return vr.is_bool
|
67 |
+
|
68 |
+
|
69 |
+
def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]:
|
70 |
+
return not vr.is_bool
|
71 |
+
|
72 |
+
|
73 |
+
ExprIn = Union[int, float, sympy.Expr]
|
74 |
+
BoolIn = Union[bool, SympyBoolean]
|
75 |
+
AllIn = Union[ExprIn, BoolIn]
|
76 |
+
ExprFn = Callable[[sympy.Expr], sympy.Expr]
|
77 |
+
ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr]
|
78 |
+
BoolFn = Callable[[SympyBoolean], SympyBoolean]
|
79 |
+
BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean]
|
80 |
+
AllFn = Union[ExprFn, BoolFn]
|
81 |
+
AllFn2 = Union[ExprFn2, BoolFn2]
|
82 |
+
|
83 |
+
|
84 |
+
@dataclasses.dataclass(frozen=True)
|
85 |
+
class ValueRanges(Generic[_T]):
|
86 |
+
if TYPE_CHECKING:
|
87 |
+
# ruff doesn't understand circular references but mypy does
|
88 |
+
ExprVR = ValueRanges[sympy.Expr] # noqa: F821
|
89 |
+
BoolVR = ValueRanges[SympyBoolean] # noqa: F821
|
90 |
+
AllVR = Union[ExprVR, BoolVR]
|
91 |
+
|
92 |
+
# Although the type signature here suggests you can pass any
|
93 |
+
# sympy expression, in practice the analysis here only works
|
94 |
+
# with constant sympy expressions
|
95 |
+
lower: _T
|
96 |
+
upper: _T
|
97 |
+
is_bool: bool
|
98 |
+
|
99 |
+
@overload
|
100 |
+
def __init__(self: ValueRanges[sympy.Expr], lower: ExprIn, upper: ExprIn) -> None:
|
101 |
+
...
|
102 |
+
|
103 |
+
@overload
|
104 |
+
def __init__(self: ValueRanges[SympyBoolean], lower: BoolIn, upper: BoolIn) -> None:
|
105 |
+
...
|
106 |
+
|
107 |
+
def __init__(self, lower: AllIn, upper: AllIn) -> None:
|
108 |
+
lower = simple_sympify(lower)
|
109 |
+
upper = simple_sympify(upper)
|
110 |
+
# TODO: when the bounds have free variables, this may be
|
111 |
+
# nontrivial to actually verify
|
112 |
+
if not sympy_generic_le(lower, upper):
|
113 |
+
raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]")
|
114 |
+
# Because this is a frozen class
|
115 |
+
object.__setattr__(self, "lower", lower)
|
116 |
+
object.__setattr__(self, "upper", upper)
|
117 |
+
object.__setattr__(self, "is_bool", isinstance(lower, SympyBoolean))
|
118 |
+
assert isinstance(upper, SympyBoolean) == self.is_bool
|
119 |
+
|
120 |
+
def boolify(self) -> ValueRanges[SympyBoolean]:
|
121 |
+
if vr_is_bool(self):
|
122 |
+
return self
|
123 |
+
elif self == ValueRanges.unknown():
|
124 |
+
return ValueRanges.unknown_bool()
|
125 |
+
else:
|
126 |
+
raise AssertionError(f"not bool like {self}")
|
127 |
+
|
128 |
+
def __contains__(self, x: AllIn) -> bool:
|
129 |
+
x = simple_sympify(x)
|
130 |
+
return sympy_generic_le(self.lower, x) and sympy_generic_le(x, self.upper)
|
131 |
+
|
132 |
+
def issubset(self, other):
|
133 |
+
return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(self.upper, other.upper)
|
134 |
+
|
135 |
+
def tighten(self, other) -> ValueRanges:
|
136 |
+
"""Given two ValueRanges, returns their intersection"""
|
137 |
+
return self & other
|
138 |
+
|
139 |
+
# Intersection
|
140 |
+
@overload
|
141 |
+
def __and__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]:
|
142 |
+
...
|
143 |
+
|
144 |
+
@overload
|
145 |
+
def __and__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]:
|
146 |
+
...
|
147 |
+
|
148 |
+
def __and__(self: AllVR, other: AllVR) -> AllVR:
|
149 |
+
if other == ValueRanges.unknown():
|
150 |
+
return self
|
151 |
+
if self == ValueRanges.unknown():
|
152 |
+
return other
|
153 |
+
assert self.is_bool == other.is_bool, (self, other)
|
154 |
+
if self.is_bool:
|
155 |
+
return ValueRanges(sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper))
|
156 |
+
else:
|
157 |
+
return ValueRanges(sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper))
|
158 |
+
|
159 |
+
# Union
|
160 |
+
@overload
|
161 |
+
def __or__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]:
|
162 |
+
...
|
163 |
+
|
164 |
+
@overload
|
165 |
+
def __or__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]:
|
166 |
+
...
|
167 |
+
|
168 |
+
def __or__(self: AllVR, other: AllVR) -> AllVR:
|
169 |
+
if ValueRanges.unknown() in (self, other):
|
170 |
+
return ValueRanges.unknown()
|
171 |
+
assert self.is_bool == other.is_bool, (self, other)
|
172 |
+
if self.is_bool:
|
173 |
+
return ValueRanges(sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper))
|
174 |
+
else:
|
175 |
+
return ValueRanges(sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper))
|
176 |
+
|
177 |
+
def is_singleton(self) -> bool:
|
178 |
+
return self.lower == self.upper
|
179 |
+
|
180 |
+
# TODO: this doesn't work with bools but arguably it should
|
181 |
+
@staticmethod
|
182 |
+
def unknown() -> ValueRanges[sympy.Expr]:
|
183 |
+
return ValueRanges(-sympy.oo, sympy.oo)
|
184 |
+
|
185 |
+
@staticmethod
|
186 |
+
def unknown_bool() -> ValueRanges[SympyBoolean]:
|
187 |
+
return ValueRanges(sympy.false, sympy.true)
|
188 |
+
|
189 |
+
@overload
|
190 |
+
@staticmethod
|
191 |
+
# work around the fact that bool and int overlap
|
192 |
+
def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap]
|
193 |
+
...
|
194 |
+
|
195 |
+
@overload
|
196 |
+
@staticmethod
|
197 |
+
def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR:
|
198 |
+
...
|
199 |
+
|
200 |
+
@staticmethod
|
201 |
+
def wrap(arg: Union[AllIn, AllVR]) -> AllVR:
|
202 |
+
if isinstance(arg, ValueRanges):
|
203 |
+
return arg
|
204 |
+
# arg is either ExprIn or BoolIn, but we don't know it here
|
205 |
+
return ValueRanges(arg, arg) # type: ignore[arg-type]
|
206 |
+
|
207 |
+
@staticmethod
|
208 |
+
def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
209 |
+
"""Increasing: x <= y => f(x) <= f(y)."""
|
210 |
+
x = ValueRanges.wrap(x)
|
211 |
+
return ValueRanges(fn(x.lower), fn(x.upper))
|
212 |
+
|
213 |
+
@overload
|
214 |
+
@staticmethod
|
215 |
+
def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
216 |
+
...
|
217 |
+
|
218 |
+
@overload
|
219 |
+
@staticmethod
|
220 |
+
def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR:
|
221 |
+
...
|
222 |
+
|
223 |
+
@staticmethod
|
224 |
+
def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR:
|
225 |
+
"""Decreasing: x <= y => f(x) >= f(y)."""
|
226 |
+
x = ValueRanges.wrap(x)
|
227 |
+
# consistently either Expr or Bool, but we don't know it here
|
228 |
+
return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type]
|
229 |
+
|
230 |
+
@staticmethod
|
231 |
+
def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
232 |
+
"""It's increasing or decreasing."""
|
233 |
+
x = ValueRanges.wrap(x)
|
234 |
+
l = fn(x.lower)
|
235 |
+
u = fn(x.upper)
|
236 |
+
return ValueRanges(min(l, u), max(l, u))
|
237 |
+
|
238 |
+
@staticmethod
|
239 |
+
def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:
|
240 |
+
"""Fn is convex and has a minimum at 0."""
|
241 |
+
x = ValueRanges.wrap(x)
|
242 |
+
if 0 in x:
|
243 |
+
return ValueRanges(0, max(fn(x.lower), fn(x.upper)))
|
244 |
+
else:
|
245 |
+
return ValueRanges.monotone_map(x, fn)
|
246 |
+
|
247 |
+
@overload
|
248 |
+
@staticmethod
|
249 |
+
def coordinatewise_increasing_map(x: Union[ExprIn, ExprVR], y: Union[ExprIn, ExprVR], fn: ExprFn2) -> ExprVR:
|
250 |
+
...
|
251 |
+
|
252 |
+
@overload
|
253 |
+
@staticmethod
|
254 |
+
def coordinatewise_increasing_map(x: Union[BoolIn, BoolVR], y: Union[BoolIn, BoolVR], fn: BoolFn2) -> BoolVR:
|
255 |
+
...
|
256 |
+
|
257 |
+
@staticmethod
|
258 |
+
def coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR:
|
259 |
+
"""
|
260 |
+
It's increasing on each coordinate.
|
261 |
+
|
262 |
+
Mathematically:
|
263 |
+
For every 1 <= i <= n and x_i <= y_i we have that
|
264 |
+
f(x1, .., xn) <= f(x1, , yi, ..., xn)
|
265 |
+
"""
|
266 |
+
x, y = ValueRanges.wrap(x), ValueRanges.wrap(y)
|
267 |
+
return ValueRanges(
|
268 |
+
fn(x.lower, y.lower), # type: ignore[arg-type]
|
269 |
+
fn(x.upper, y.upper), # type: ignore[arg-type]
|
270 |
+
)
|
271 |
+
|
272 |
+
@classmethod
|
273 |
+
def coordinatewise_monotone_map(cls, x, y, fn):
|
274 |
+
"""It's increasing or decreasing on each coordinate."""
|
275 |
+
x, y = cls.wrap(x), cls.wrap(y)
|
276 |
+
products = [
|
277 |
+
fn(a, b)
|
278 |
+
for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])
|
279 |
+
]
|
280 |
+
return ValueRanges(min(products), max(products))
|
281 |
+
|
282 |
+
class SymPyValueRangeAnalysis:
|
283 |
+
"""
|
284 |
+
It gives bounds on a SymPy operator given bounds on its arguments
|
285 |
+
See the function `bound_sympy` for a function that applies this logic to a full SymPy expression
|
286 |
+
"""
|
287 |
+
|
288 |
+
@staticmethod
|
289 |
+
def constant(value, dtype):
|
290 |
+
# NB: value is NOT a sympy expression, it's a constant!
|
291 |
+
is_python = isinstance(value, (int, float, bool))
|
292 |
+
assert is_python or isinstance(value, (BooleanAtom, sympy.Integer, sympy.Number))
|
293 |
+
|
294 |
+
# using nan makes subsequent computation throw, and for the purposes of optimization
|
295 |
+
# returning -math.inf - math.inf is equivalent to giving up
|
296 |
+
if isinstance(value, SupportsFloat) and math.isnan(value):
|
297 |
+
return ValueRanges.unknown()
|
298 |
+
|
299 |
+
if is_python:
|
300 |
+
type_ = dtype_to_type(dtype)
|
301 |
+
value = type_(value)
|
302 |
+
else:
|
303 |
+
# We do a type check on a best-effort basis
|
304 |
+
# We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision
|
305 |
+
if dtype == torch.bool:
|
306 |
+
assert isinstance(value, BooleanAtom)
|
307 |
+
elif dtype.is_floating_point:
|
308 |
+
assert not value.is_finite or value.is_real
|
309 |
+
else:
|
310 |
+
# dtype is intXX
|
311 |
+
assert value.is_integer
|
312 |
+
|
313 |
+
return ValueRanges.wrap(value)
|
314 |
+
|
315 |
+
@staticmethod
|
316 |
+
def not_(a):
|
317 |
+
a = ValueRanges.wrap(a)
|
318 |
+
a = a.boolify()
|
319 |
+
assert a.is_bool
|
320 |
+
return ValueRanges.decreasing_map(a, sympy.Not)
|
321 |
+
|
322 |
+
@staticmethod
|
323 |
+
def or_(a, b):
|
324 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or)
|
325 |
+
|
326 |
+
@staticmethod
|
327 |
+
def and_(a, b):
|
328 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And)
|
329 |
+
|
330 |
+
@staticmethod
|
331 |
+
def eq(a, b):
|
332 |
+
a = ValueRanges.wrap(a)
|
333 |
+
b = ValueRanges.wrap(b)
|
334 |
+
if a.is_singleton() and b.is_singleton() and a.lower == b.lower:
|
335 |
+
return ValueRanges.wrap(sympy.true)
|
336 |
+
elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint
|
337 |
+
return ValueRanges.wrap(sympy.false)
|
338 |
+
return ValueRanges(sympy.false, sympy.true)
|
339 |
+
|
340 |
+
@classmethod
|
341 |
+
def ne(cls, a, b):
|
342 |
+
return cls.not_(cls.eq(a, b))
|
343 |
+
|
344 |
+
@classmethod
|
345 |
+
def lt(cls, a, b):
|
346 |
+
a = ValueRanges.wrap(a)
|
347 |
+
b = ValueRanges.wrap(b)
|
348 |
+
assert a.is_bool == b.is_bool
|
349 |
+
if a.is_bool:
|
350 |
+
return cls.and_(cls.not_(a), b)
|
351 |
+
else:
|
352 |
+
if a.upper < b.lower:
|
353 |
+
return ValueRanges.wrap(sympy.true)
|
354 |
+
elif a.lower >= b.upper:
|
355 |
+
return ValueRanges.wrap(sympy.false)
|
356 |
+
return ValueRanges(sympy.false, sympy.true)
|
357 |
+
|
358 |
+
@classmethod
|
359 |
+
def gt(cls, a, b):
|
360 |
+
return cls.lt(b, a)
|
361 |
+
|
362 |
+
@classmethod
|
363 |
+
def le(cls, a, b):
|
364 |
+
return cls.not_(cls.gt(a, b))
|
365 |
+
|
366 |
+
@classmethod
|
367 |
+
def ge(cls, a, b):
|
368 |
+
return cls.not_(cls.lt(a, b))
|
369 |
+
|
370 |
+
@staticmethod
|
371 |
+
def add(a, b):
|
372 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, operator.add)
|
373 |
+
|
374 |
+
@classmethod
|
375 |
+
def mul(cls, a, b):
|
376 |
+
a = ValueRanges.wrap(a)
|
377 |
+
b = ValueRanges.wrap(b)
|
378 |
+
|
379 |
+
assert a.is_bool == b.is_bool
|
380 |
+
if a.is_bool:
|
381 |
+
return cls.and_(a, b)
|
382 |
+
|
383 |
+
def safe_mul(a, b):
|
384 |
+
# Make unknown() * wrap(0) == wrap(0)
|
385 |
+
if a == 0:
|
386 |
+
return a
|
387 |
+
elif b == 0:
|
388 |
+
return b
|
389 |
+
else:
|
390 |
+
return a * b
|
391 |
+
|
392 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, safe_mul)
|
393 |
+
|
394 |
+
@classmethod
|
395 |
+
def div(cls, a, b):
|
396 |
+
return cls.truediv(a, b)
|
397 |
+
|
398 |
+
@staticmethod
|
399 |
+
def truediv(a, b):
|
400 |
+
a = ValueRanges.wrap(a)
|
401 |
+
b = ValueRanges.wrap(b)
|
402 |
+
if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)):
|
403 |
+
return ValueRanges.unknown()
|
404 |
+
else:
|
405 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, operator.truediv)
|
406 |
+
|
407 |
+
@staticmethod
|
408 |
+
def floordiv(a, b):
|
409 |
+
a = ValueRanges.wrap(a)
|
410 |
+
b = ValueRanges.wrap(b)
|
411 |
+
if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)):
|
412 |
+
return ValueRanges.unknown()
|
413 |
+
else:
|
414 |
+
return ValueRanges.coordinatewise_monotone_map(a, b, operator.floordiv)
|
415 |
+
|
416 |
+
@staticmethod
|
417 |
+
def mod(x, y):
|
418 |
+
x = ValueRanges.wrap(x)
|
419 |
+
y = ValueRanges.wrap(y)
|
420 |
+
if x.is_singleton() and y.is_singleton() and y.lower != 0:
|
421 |
+
return ValueRanges.wrap(x.lower % y.lower)
|
422 |
+
if y.lower <= 0:
|
423 |
+
return ValueRanges.unknown()
|
424 |
+
return ValueRanges(0, y.upper)
|
425 |
+
|
426 |
+
@classmethod
|
427 |
+
def modular_indexing(cls, a, b, c):
|
428 |
+
return cls.mod(cls.floordiv(a, b), c)
|
429 |
+
|
430 |
+
@classmethod
|
431 |
+
def is_non_overlapping_and_dense_indicator(cls, *args):
|
432 |
+
return ValueRanges.unknown()
|
433 |
+
|
434 |
+
@classmethod
|
435 |
+
def pow(cls, a, b):
|
436 |
+
def is_integer(val):
|
437 |
+
return isinstance(val, int) or (
|
438 |
+
hasattr(val, "is_integer") and val.is_integer
|
439 |
+
)
|
440 |
+
|
441 |
+
a = ValueRanges.wrap(a)
|
442 |
+
b = ValueRanges.wrap(b)
|
443 |
+
# Not implemented yet. It's a bit tricky
|
444 |
+
# If you want to implement it, compute the partial derivatives of a ** b
|
445 |
+
# and check the ranges where the function is increasing / decreasing
|
446 |
+
# Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a))
|
447 |
+
# If this second option is implemented, by carefult about the types and possible infinities here and there.
|
448 |
+
if not b.is_singleton():
|
449 |
+
return ValueRanges.unknown()
|
450 |
+
|
451 |
+
b = b.lower
|
452 |
+
if a.is_singleton():
|
453 |
+
a = a.lower
|
454 |
+
r = a ** b
|
455 |
+
if not r.is_finite:
|
456 |
+
return ValueRanges.unknown()
|
457 |
+
return ValueRanges.wrap(r)
|
458 |
+
|
459 |
+
if b == 0:
|
460 |
+
if not a.lower.is_finite:
|
461 |
+
return ValueRanges.unknown()
|
462 |
+
type_ = sympy.Float if a.lower.is_real else sympy.Integer
|
463 |
+
return ValueRanges.wrap(type_(1))
|
464 |
+
|
465 |
+
if b < 0:
|
466 |
+
a = cls.reciprocal(a)
|
467 |
+
b = -b
|
468 |
+
|
469 |
+
if a == ValueRanges.unknown():
|
470 |
+
return ValueRanges.unknown()
|
471 |
+
|
472 |
+
# Here b > 0
|
473 |
+
if not is_integer(b):
|
474 |
+
# If the base is positive, then we're good, otherwise nothing's defined
|
475 |
+
if a.lower >= 0:
|
476 |
+
return ValueRanges.increasing_map(a, lambda x: x ** b)
|
477 |
+
else:
|
478 |
+
return ValueRanges.unknown()
|
479 |
+
else:
|
480 |
+
# b > 0 integer
|
481 |
+
if b % 2 == 0:
|
482 |
+
# x^n where n is even
|
483 |
+
return ValueRanges.convex_min_zero_map(a, lambda x: x ** b)
|
484 |
+
else:
|
485 |
+
# x^n where n is odd
|
486 |
+
return ValueRanges.increasing_map(a, lambda x: x ** b)
|
487 |
+
|
488 |
+
@staticmethod
|
489 |
+
def reciprocal(x):
|
490 |
+
""" Needed as it's used in pow, but it won't appear on a SymPy expression """
|
491 |
+
x = ValueRanges.wrap(x)
|
492 |
+
if 0 in x:
|
493 |
+
return ValueRanges.unknown()
|
494 |
+
else:
|
495 |
+
return ValueRanges.decreasing_map(x, lambda y: 1 / y)
|
496 |
+
|
497 |
+
@staticmethod
|
498 |
+
def abs(x):
|
499 |
+
return ValueRanges.convex_min_zero_map(x, abs)
|
500 |
+
|
501 |
+
@staticmethod
|
502 |
+
def exp(x):
|
503 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.exponential.exp)
|
504 |
+
|
505 |
+
@staticmethod
|
506 |
+
def log(x):
|
507 |
+
x = ValueRanges.wrap(x)
|
508 |
+
if x.lower <= 0:
|
509 |
+
return ValueRanges.unknown()
|
510 |
+
return ValueRanges.increasing_map(x, sympy.log)
|
511 |
+
|
512 |
+
@classmethod
|
513 |
+
def minimum(cls, a, b):
|
514 |
+
return cls.min_or_max(a, b, sympy.Min)
|
515 |
+
|
516 |
+
@classmethod
|
517 |
+
def maximum(cls, a, b):
|
518 |
+
return cls.min_or_max(a, b, sympy.Max)
|
519 |
+
|
520 |
+
@staticmethod
|
521 |
+
def min_or_max(a, b, fn):
|
522 |
+
a = ValueRanges.wrap(a)
|
523 |
+
b = ValueRanges.wrap(b)
|
524 |
+
|
525 |
+
# Performs upcasting first
|
526 |
+
def fn_(x: sympy.Expr, y: sympy.Expr) -> sympy.Expr:
|
527 |
+
# Poorman's version of upcasting in Sympy
|
528 |
+
# Inf is not a float...
|
529 |
+
if x.is_Integer and y.is_Integer:
|
530 |
+
result_type = sympy.Integer
|
531 |
+
elif x.is_rational and y.is_rational:
|
532 |
+
result_type = sympy.Rational
|
533 |
+
else:
|
534 |
+
assert x.is_real or not x.is_finite or y.is_real or not y.is_finite
|
535 |
+
result_type = sympy.Float
|
536 |
+
return fn(result_type(x), result_type(y))
|
537 |
+
|
538 |
+
return ValueRanges.coordinatewise_increasing_map(a, b, fn_)
|
539 |
+
|
540 |
+
@classmethod
|
541 |
+
def floor(cls, x):
|
542 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor)
|
543 |
+
|
544 |
+
@classmethod
|
545 |
+
def ceil(cls, x):
|
546 |
+
return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.ceiling)
|
547 |
+
|
548 |
+
@classmethod
|
549 |
+
def round(cls, number, ndigits=None):
|
550 |
+
if ndigits is None:
|
551 |
+
fn = Round
|
552 |
+
else:
|
553 |
+
assert ndigits.is_singleton()
|
554 |
+
ndigits = ndigits.lower
|
555 |
+
# We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind
|
556 |
+
# the second parameter.
|
557 |
+
fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731
|
558 |
+
|
559 |
+
return ValueRanges.increasing_map(number, fn)
|
560 |
+
|
561 |
+
# It's used in some models on symints
|
562 |
+
@staticmethod
|
563 |
+
def sqrt(x):
|
564 |
+
x = ValueRanges.wrap(x)
|
565 |
+
if x.lower < 0:
|
566 |
+
return ValueRanges.unknown()
|
567 |
+
return ValueRanges.increasing_map(x, sympy.sqrt)
|
568 |
+
|
569 |
+
@staticmethod
|
570 |
+
def where(a, b, c):
|
571 |
+
b = ValueRanges.wrap(b)
|
572 |
+
c = ValueRanges.wrap(c)
|
573 |
+
a = a.boolify()
|
574 |
+
assert b.is_bool == c.is_bool
|
575 |
+
if b.is_bool:
|
576 |
+
return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper))
|
577 |
+
else:
|
578 |
+
return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper))
|
579 |
+
|
580 |
+
# expr_cond_pair is used to represent a single (expr, condition) pair in piecewise.
|
581 |
+
# We just return the value range of the expression and its corresponding condition as a tuple
|
582 |
+
# and defer the analysis to piecewise
|
583 |
+
@staticmethod
|
584 |
+
def expr_cond_pair(a, b):
|
585 |
+
b = b.boolify()
|
586 |
+
return (a, b)
|
587 |
+
|
588 |
+
# piecewise function can be used to convert a SymBool to SymInt:
|
589 |
+
# int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise.
|
590 |
+
#
|
591 |
+
# ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair.
|
592 |
+
# The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True.
|
593 |
+
@staticmethod
|
594 |
+
def piecewise(*ranges):
|
595 |
+
init_range = None
|
596 |
+
for expr_range, cond_range in ranges:
|
597 |
+
if sympy.true in cond_range:
|
598 |
+
if init_range is None:
|
599 |
+
init_range = expr_range
|
600 |
+
else:
|
601 |
+
init_range = init_range | expr_range
|
602 |
+
return init_range
|
603 |
+
|
604 |
+
@staticmethod
|
605 |
+
def cos(x):
|
606 |
+
# TODO: We should tighten value ranges
|
607 |
+
# If input range span is pi + 2*pi*k, then output range is (-1, 1)
|
608 |
+
# otherwise the minimum of the value of the function on the extremes
|
609 |
+
return ValueRanges(-1.0, 1.0)
|
610 |
+
|
611 |
+
@staticmethod
|
612 |
+
def cosh(x):
|
613 |
+
x = ValueRanges.wrap(x)
|
614 |
+
if x.lower > 0:
|
615 |
+
return ValueRanges.increasing_map(x, sympy.cosh)
|
616 |
+
elif x.upper < 0:
|
617 |
+
return ValueRanges.decreasing_map(x, sympy.cosh)
|
618 |
+
return ValueRanges(0.0, sympy.oo)
|
619 |
+
|
620 |
+
@staticmethod
|
621 |
+
def sin(x):
|
622 |
+
# TODO: We should tighten value ranges
|
623 |
+
# See details on cos
|
624 |
+
return ValueRanges(-1.0, 1.0)
|
625 |
+
|
626 |
+
@staticmethod
|
627 |
+
def sinh(x):
|
628 |
+
return ValueRanges.increasing_map(x, sympy.sinh)
|
629 |
+
|
630 |
+
@staticmethod
|
631 |
+
def tan(x):
|
632 |
+
return ValueRanges(-sympy.oo, sympy.oo)
|
633 |
+
|
634 |
+
@staticmethod
|
635 |
+
def tanh(x):
|
636 |
+
return ValueRanges.increasing_map(x, sympy.tanh)
|
637 |
+
|
638 |
+
@staticmethod
|
639 |
+
def asin(x):
|
640 |
+
x = ValueRanges.wrap(x)
|
641 |
+
if -1 <= x.lower and x.upper <= 1:
|
642 |
+
return ValueRanges.increasing_map(x, sympy.asin)
|
643 |
+
return ValueRanges.unknown()
|
644 |
+
|
645 |
+
@staticmethod
|
646 |
+
def acos(x):
|
647 |
+
x = ValueRanges.wrap(x)
|
648 |
+
if -1 <= x.lower and x.upper <= 1:
|
649 |
+
return ValueRanges.decreasing_map(x, sympy.acos)
|
650 |
+
return ValueRanges.unknown()
|
651 |
+
|
652 |
+
@staticmethod
|
653 |
+
def atan(x):
|
654 |
+
return ValueRanges.increasing_map(x, sympy.atan)
|
655 |
+
|
656 |
+
|
657 |
+
class ValueRangeAnalysis(SymPyValueRangeAnalysis):
|
658 |
+
def __init__(self):
|
659 |
+
self.name = "ValueRangeAnalysis"
|
660 |
+
boolean_operators = (
|
661 |
+
"xor",
|
662 |
+
"logical_and",
|
663 |
+
"logical_or",
|
664 |
+
"logical_not",
|
665 |
+
)
|
666 |
+
for op in boolean_operators:
|
667 |
+
setattr(self, op, self.bool_handler)
|
668 |
+
|
669 |
+
@staticmethod
|
670 |
+
def bool_handler(*args, **kwargs):
|
671 |
+
# just assuming bools can have both values
|
672 |
+
return ValueRanges(sympy.false, sympy.true) # type: ignore[arg-type]
|
673 |
+
|
674 |
+
@staticmethod
|
675 |
+
def default_handler(*args, **kwargs):
|
676 |
+
# many ops are unlikely to show up in optimizable indexing compute,
|
677 |
+
# so we dont have full coverage
|
678 |
+
return ValueRanges.unknown()
|
679 |
+
|
680 |
+
def load(self, name: str, index: sympy.Expr):
|
681 |
+
return ValueRanges.unknown()
|
682 |
+
|
683 |
+
def store(self, name, index, value, mode=None):
|
684 |
+
return
|
685 |
+
|
686 |
+
def reduction(self, name, dtype, src_dtype, reduction_type, index, value):
|
687 |
+
return ValueRanges.unknown()
|
688 |
+
|
689 |
+
def index_expr(self, index, dtype):
|
690 |
+
assert isinstance(index, ValueRanges)
|
691 |
+
return index
|
692 |
+
|
693 |
+
@staticmethod
|
694 |
+
def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None):
|
695 |
+
x = ValueRanges.wrap(x)
|
696 |
+
|
697 |
+
if dtype == torch.bool:
|
698 |
+
if x.is_singleton():
|
699 |
+
return ValueRanges.wrap(x.lower != 0)
|
700 |
+
elif 0 not in x:
|
701 |
+
return ValueRanges.wrap(sympy.true)
|
702 |
+
else:
|
703 |
+
return ValueRanges(sympy.false, sympy.true)
|
704 |
+
|
705 |
+
def cast(x, dtype):
|
706 |
+
# dtype is int or float
|
707 |
+
if dtype.is_floating_point:
|
708 |
+
return sympy.Float(x)
|
709 |
+
else:
|
710 |
+
try:
|
711 |
+
return sympy.Integer(x)
|
712 |
+
except TypeError:
|
713 |
+
# inf cannot be cast to Integer
|
714 |
+
return x
|
715 |
+
|
716 |
+
if x.is_bool:
|
717 |
+
if x.is_singleton():
|
718 |
+
val = 1 if x.lower else 0
|
719 |
+
return ValueRanges.wrap(cast(val, dtype))
|
720 |
+
else:
|
721 |
+
return ValueRanges(cast(0, dtype), cast(1, dtype))
|
722 |
+
else:
|
723 |
+
# int to float or float to int
|
724 |
+
return ValueRanges(cast(x.lower, dtype), cast(x.upper, dtype))
|
725 |
+
|
726 |
+
@staticmethod
|
727 |
+
def square(x):
|
728 |
+
return ValueRanges.convex_min_zero_map(x, lambda y: y * y)
|
729 |
+
|
730 |
+
@staticmethod
|
731 |
+
def neg(x):
|
732 |
+
return ValueRanges.decreasing_map(x, operator.neg)
|
733 |
+
|
734 |
+
@classmethod
|
735 |
+
def truncdiv(cls, a, b):
|
736 |
+
x = cls.truediv(a, b)
|
737 |
+
if x == ValueRanges.unknown():
|
738 |
+
return x
|
739 |
+
|
740 |
+
def trunc(x):
|
741 |
+
return sympy.Integer(x) if x.is_finite else x
|
742 |
+
|
743 |
+
return ValueRanges.increasing_map(x, trunc)
|
744 |
+
|
745 |
+
@classmethod
|
746 |
+
def sub(cls, a, b):
|
747 |
+
return cls.add(a, cls.neg(b))
|
748 |
+
|
749 |
+
def __getattr__(self, name):
|
750 |
+
log.debug("unhandled ValueRange op %s", name)
|
751 |
+
return self.default_handler
|
752 |
+
|
753 |
+
|
754 |
+
def bound_sympy(expr: sympy.Expr, ranges: Optional[Dict[sympy.Symbol, ValueRanges]] = None) -> ValueRanges:
|
755 |
+
if isinstance(expr, sympy.Number):
|
756 |
+
return ValueRanges.wrap(expr)
|
757 |
+
|
758 |
+
ranges = ranges or {}
|
759 |
+
|
760 |
+
# If there's a tracing context, augment available constrained ranges.
|
761 |
+
context = torch._guards.TracingContext.try_get()
|
762 |
+
if context and context.fake_mode.shape_env:
|
763 |
+
ranges = {**context.fake_mode.shape_env.var_to_range, **ranges}
|
764 |
+
|
765 |
+
unbounded_vars = expr.free_symbols - ranges.keys()
|
766 |
+
if unbounded_vars:
|
767 |
+
# Give some bounds to the free variables via their SymPy assumptions
|
768 |
+
# TODO A better way of doing this would be to assign them a range upon creation, as
|
769 |
+
# size variables can come with a lower bound of 2, as we specialise on 0 and 1
|
770 |
+
unbounded_ranges: Dict[sympy.Symbol, ValueRanges] = {}
|
771 |
+
for s in unbounded_vars:
|
772 |
+
assert s.is_integer # type: ignore[attr-defined]
|
773 |
+
if s.is_positive: # type: ignore[attr-defined]
|
774 |
+
lower = 1
|
775 |
+
elif s.is_nonnegative: # type: ignore[attr-defined]
|
776 |
+
lower = 0
|
777 |
+
else:
|
778 |
+
lower = -math.inf # type: ignore[assignment]
|
779 |
+
unbounded_ranges[s] = ValueRanges(lower, math.inf) # type: ignore[index]
|
780 |
+
ranges = {**ranges, **unbounded_ranges}
|
781 |
+
|
782 |
+
return sympy_interp(SymPyValueRangeAnalysis, ranges, expr)
|
venv/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch._C import _set_backcompat_broadcast_warn
|
2 |
+
from torch._C import _get_backcompat_broadcast_warn
|
3 |
+
from torch._C import _set_backcompat_keepdim_warn
|
4 |
+
from torch._C import _get_backcompat_keepdim_warn
|
5 |
+
|
6 |
+
|
7 |
+
class Warning:
|
8 |
+
def __init__(self, setter, getter):
|
9 |
+
self.setter = setter
|
10 |
+
self.getter = getter
|
11 |
+
|
12 |
+
def set_enabled(self, value):
|
13 |
+
self.setter(value)
|
14 |
+
|
15 |
+
def get_enabled(self):
|
16 |
+
return self.getter()
|
17 |
+
|
18 |
+
enabled = property(get_enabled, set_enabled)
|
19 |
+
|
20 |
+
broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn)
|
21 |
+
keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn)
|
venv/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.09 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (503 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (198 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc
ADDED
Binary file (4.06 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc
ADDED
Binary file (3.53 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc
ADDED
Binary file (2.79 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc
ADDED
Binary file (4.08 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc
ADDED
Binary file (892 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc
ADDED
Binary file (4.49 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
import subprocess
|
5 |
+
|
6 |
+
try:
|
7 |
+
# no type stub for conda command line interface
|
8 |
+
import conda.cli.python_api # type: ignore[import]
|
9 |
+
from conda.cli.python_api import Commands as conda_commands
|
10 |
+
except ImportError:
|
11 |
+
# blas_compare.py will fail to import these when it's inside a conda env,
|
12 |
+
# but that's fine as it only wants the constants.
|
13 |
+
pass
|
14 |
+
|
15 |
+
|
16 |
+
WORKING_ROOT = "/tmp/pytorch_blas_compare_environments"
|
17 |
+
MKL_2020_3 = "mkl_2020_3"
|
18 |
+
MKL_2020_0 = "mkl_2020_0"
|
19 |
+
OPEN_BLAS = "open_blas"
|
20 |
+
EIGEN = "eigen"
|
21 |
+
|
22 |
+
|
23 |
+
GENERIC_ENV_VARS = ("USE_CUDA=0", "USE_ROCM=0")
|
24 |
+
BASE_PKG_DEPS = (
|
25 |
+
"cmake",
|
26 |
+
"hypothesis",
|
27 |
+
"ninja",
|
28 |
+
"numpy",
|
29 |
+
"pyyaml",
|
30 |
+
"setuptools",
|
31 |
+
"typing_extensions",
|
32 |
+
)
|
33 |
+
|
34 |
+
|
35 |
+
SubEnvSpec = collections.namedtuple(
|
36 |
+
"SubEnvSpec", (
|
37 |
+
"generic_installs",
|
38 |
+
"special_installs",
|
39 |
+
"environment_variables",
|
40 |
+
|
41 |
+
# Validate install.
|
42 |
+
"expected_blas_symbols",
|
43 |
+
"expected_mkl_version",
|
44 |
+
))
|
45 |
+
|
46 |
+
|
47 |
+
SUB_ENVS = {
|
48 |
+
MKL_2020_3: SubEnvSpec(
|
49 |
+
generic_installs=(),
|
50 |
+
special_installs=("intel", ("mkl=2020.3", "mkl-include=2020.3")),
|
51 |
+
environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
|
52 |
+
expected_blas_symbols=("mkl_blas_sgemm",),
|
53 |
+
expected_mkl_version="2020.0.3",
|
54 |
+
),
|
55 |
+
|
56 |
+
MKL_2020_0: SubEnvSpec(
|
57 |
+
generic_installs=(),
|
58 |
+
special_installs=("intel", ("mkl=2020.0", "mkl-include=2020.0")),
|
59 |
+
environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
|
60 |
+
expected_blas_symbols=("mkl_blas_sgemm",),
|
61 |
+
expected_mkl_version="2020.0.0",
|
62 |
+
),
|
63 |
+
|
64 |
+
OPEN_BLAS: SubEnvSpec(
|
65 |
+
generic_installs=("openblas",),
|
66 |
+
special_installs=(),
|
67 |
+
environment_variables=("BLAS=OpenBLAS",) + GENERIC_ENV_VARS,
|
68 |
+
expected_blas_symbols=("exec_blas",),
|
69 |
+
expected_mkl_version=None,
|
70 |
+
),
|
71 |
+
|
72 |
+
# EIGEN: SubEnvSpec(
|
73 |
+
# generic_installs=(),
|
74 |
+
# special_installs=(),
|
75 |
+
# environment_variables=("BLAS=Eigen",) + GENERIC_ENV_VARS,
|
76 |
+
# expected_blas_symbols=(),
|
77 |
+
# ),
|
78 |
+
}
|
79 |
+
|
80 |
+
|
81 |
+
def conda_run(*args):
|
82 |
+
"""Convenience method."""
|
83 |
+
stdout, stderr, retcode = conda.cli.python_api.run_command(*args)
|
84 |
+
if retcode:
|
85 |
+
raise OSError(f"conda error: {str(args)} retcode: {retcode}\n{stderr}")
|
86 |
+
|
87 |
+
return stdout
|
88 |
+
|
89 |
+
|
90 |
+
def main():
|
91 |
+
if os.path.exists(WORKING_ROOT):
|
92 |
+
print("Cleaning: removing old working root.")
|
93 |
+
shutil.rmtree(WORKING_ROOT)
|
94 |
+
os.makedirs(WORKING_ROOT)
|
95 |
+
|
96 |
+
git_root = subprocess.check_output(
|
97 |
+
"git rev-parse --show-toplevel",
|
98 |
+
shell=True,
|
99 |
+
cwd=os.path.dirname(os.path.realpath(__file__))
|
100 |
+
).decode("utf-8").strip()
|
101 |
+
|
102 |
+
for env_name, env_spec in SUB_ENVS.items():
|
103 |
+
env_path = os.path.join(WORKING_ROOT, env_name)
|
104 |
+
print(f"Creating env: {env_name}: ({env_path})")
|
105 |
+
conda_run(
|
106 |
+
conda_commands.CREATE,
|
107 |
+
"--no-default-packages",
|
108 |
+
"--prefix", env_path,
|
109 |
+
"python=3",
|
110 |
+
)
|
111 |
+
|
112 |
+
print("Testing that env can be activated:")
|
113 |
+
base_source = subprocess.run(
|
114 |
+
f"source activate {env_path}",
|
115 |
+
shell=True,
|
116 |
+
capture_output=True,
|
117 |
+
check=False,
|
118 |
+
)
|
119 |
+
if base_source.returncode:
|
120 |
+
raise OSError(
|
121 |
+
"Failed to source base environment:\n"
|
122 |
+
f" stdout: {base_source.stdout.decode('utf-8')}\n"
|
123 |
+
f" stderr: {base_source.stderr.decode('utf-8')}"
|
124 |
+
)
|
125 |
+
|
126 |
+
print("Installing packages:")
|
127 |
+
conda_run(
|
128 |
+
conda_commands.INSTALL,
|
129 |
+
"--prefix", env_path,
|
130 |
+
*(BASE_PKG_DEPS + env_spec.generic_installs)
|
131 |
+
)
|
132 |
+
|
133 |
+
if env_spec.special_installs:
|
134 |
+
channel, channel_deps = env_spec.special_installs
|
135 |
+
print(f"Installing packages from channel: {channel}")
|
136 |
+
conda_run(
|
137 |
+
conda_commands.INSTALL,
|
138 |
+
"--prefix", env_path,
|
139 |
+
"-c", channel, *channel_deps
|
140 |
+
)
|
141 |
+
|
142 |
+
if env_spec.environment_variables:
|
143 |
+
print("Setting environment variables.")
|
144 |
+
|
145 |
+
# This does not appear to be possible using the python API.
|
146 |
+
env_set = subprocess.run(
|
147 |
+
f"source activate {env_path} && "
|
148 |
+
f"conda env config vars set {' '.join(env_spec.environment_variables)}",
|
149 |
+
shell=True,
|
150 |
+
capture_output=True,
|
151 |
+
check=False,
|
152 |
+
)
|
153 |
+
if env_set.returncode:
|
154 |
+
raise OSError(
|
155 |
+
"Failed to set environment variables:\n"
|
156 |
+
f" stdout: {env_set.stdout.decode('utf-8')}\n"
|
157 |
+
f" stderr: {env_set.stderr.decode('utf-8')}"
|
158 |
+
)
|
159 |
+
|
160 |
+
# Check that they were actually set correctly.
|
161 |
+
actual_env_vars = subprocess.run(
|
162 |
+
f"source activate {env_path} && env",
|
163 |
+
shell=True,
|
164 |
+
capture_output=True,
|
165 |
+
check=True,
|
166 |
+
).stdout.decode("utf-8").strip().splitlines()
|
167 |
+
for e in env_spec.environment_variables:
|
168 |
+
assert e in actual_env_vars, f"{e} not in envs"
|
169 |
+
|
170 |
+
print(f"Building PyTorch for env: `{env_name}`")
|
171 |
+
# We have to re-run during each build to pick up the new
|
172 |
+
# build config settings.
|
173 |
+
build_run = subprocess.run(
|
174 |
+
f"source activate {env_path} && "
|
175 |
+
f"cd {git_root} && "
|
176 |
+
"python setup.py install --cmake",
|
177 |
+
shell=True,
|
178 |
+
capture_output=True,
|
179 |
+
check=True,
|
180 |
+
)
|
181 |
+
|
182 |
+
print("Checking configuration:")
|
183 |
+
check_run = subprocess.run(
|
184 |
+
# Shameless abuse of `python -c ...`
|
185 |
+
f"source activate {env_path} && "
|
186 |
+
"python -c \""
|
187 |
+
"import torch;"
|
188 |
+
"from torch.utils.benchmark import Timer;"
|
189 |
+
"print(torch.__config__.show());"
|
190 |
+
"setup = 'x=torch.ones((128, 128));y=torch.ones((128, 128))';"
|
191 |
+
"counts = Timer('torch.mm(x, y)', setup).collect_callgrind(collect_baseline=False);"
|
192 |
+
"stats = counts.as_standardized().stats(inclusive=True);"
|
193 |
+
"print(stats.filter(lambda l: 'blas' in l.lower()))\"",
|
194 |
+
shell=True,
|
195 |
+
capture_output=True,
|
196 |
+
check=False,
|
197 |
+
)
|
198 |
+
if check_run.returncode:
|
199 |
+
raise OSError(
|
200 |
+
"Failed to set environment variables:\n"
|
201 |
+
f" stdout: {check_run.stdout.decode('utf-8')}\n"
|
202 |
+
f" stderr: {check_run.stderr.decode('utf-8')}"
|
203 |
+
)
|
204 |
+
check_run_stdout = check_run.stdout.decode('utf-8')
|
205 |
+
print(check_run_stdout)
|
206 |
+
|
207 |
+
for e in env_spec.environment_variables:
|
208 |
+
if "BLAS" in e:
|
209 |
+
assert e in check_run_stdout, f"PyTorch build did not respect `BLAS=...`: {e}"
|
210 |
+
|
211 |
+
for s in env_spec.expected_blas_symbols:
|
212 |
+
assert s in check_run_stdout
|
213 |
+
|
214 |
+
if env_spec.expected_mkl_version is not None:
|
215 |
+
assert f"- Intel(R) Math Kernel Library Version {env_spec.expected_mkl_version}" in check_run_stdout
|
216 |
+
|
217 |
+
print(f"Build complete: {env_name}")
|
218 |
+
|
219 |
+
|
220 |
+
if __name__ == "__main__":
|
221 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Example of Timer and Compare APIs:
|
2 |
+
|
3 |
+
$ python -m examples.compare
|
4 |
+
"""
|
5 |
+
|
6 |
+
import pickle
|
7 |
+
import sys
|
8 |
+
import time
|
9 |
+
|
10 |
+
import torch
|
11 |
+
|
12 |
+
import torch.utils.benchmark as benchmark_utils
|
13 |
+
|
14 |
+
|
15 |
+
class FauxTorch:
|
16 |
+
"""Emulate different versions of pytorch.
|
17 |
+
|
18 |
+
In normal circumstances this would be done with multiple processes
|
19 |
+
writing serialized measurements, but this simplifies that model to
|
20 |
+
make the example clearer.
|
21 |
+
"""
|
22 |
+
def __init__(self, real_torch, extra_ns_per_element):
|
23 |
+
self._real_torch = real_torch
|
24 |
+
self._extra_ns_per_element = extra_ns_per_element
|
25 |
+
|
26 |
+
def extra_overhead(self, result):
|
27 |
+
# time.sleep has a ~65 us overhead, so only fake a
|
28 |
+
# per-element overhead if numel is large enough.
|
29 |
+
numel = int(result.numel())
|
30 |
+
if numel > 5000:
|
31 |
+
time.sleep(numel * self._extra_ns_per_element * 1e-9)
|
32 |
+
return result
|
33 |
+
|
34 |
+
def add(self, *args, **kwargs):
|
35 |
+
return self.extra_overhead(self._real_torch.add(*args, **kwargs))
|
36 |
+
|
37 |
+
def mul(self, *args, **kwargs):
|
38 |
+
return self.extra_overhead(self._real_torch.mul(*args, **kwargs))
|
39 |
+
|
40 |
+
def cat(self, *args, **kwargs):
|
41 |
+
return self.extra_overhead(self._real_torch.cat(*args, **kwargs))
|
42 |
+
|
43 |
+
def matmul(self, *args, **kwargs):
|
44 |
+
return self.extra_overhead(self._real_torch.matmul(*args, **kwargs))
|
45 |
+
|
46 |
+
|
47 |
+
def main():
|
48 |
+
tasks = [
|
49 |
+
("add", "add", "torch.add(x, y)"),
|
50 |
+
("add", "add (extra +0)", "torch.add(x, y + zero)"),
|
51 |
+
]
|
52 |
+
|
53 |
+
serialized_results = []
|
54 |
+
repeats = 2
|
55 |
+
timers = [
|
56 |
+
benchmark_utils.Timer(
|
57 |
+
stmt=stmt,
|
58 |
+
globals={
|
59 |
+
"torch": torch if branch == "master" else FauxTorch(torch, overhead_ns),
|
60 |
+
"x": torch.ones((size, 4)),
|
61 |
+
"y": torch.ones((1, 4)),
|
62 |
+
"zero": torch.zeros(()),
|
63 |
+
},
|
64 |
+
label=label,
|
65 |
+
sub_label=sub_label,
|
66 |
+
description=f"size: {size}",
|
67 |
+
env=branch,
|
68 |
+
num_threads=num_threads,
|
69 |
+
)
|
70 |
+
for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)]
|
71 |
+
for label, sub_label, stmt in tasks
|
72 |
+
for size in [1, 10, 100, 1000, 10000, 50000]
|
73 |
+
for num_threads in [1, 4]
|
74 |
+
]
|
75 |
+
|
76 |
+
for i, timer in enumerate(timers * repeats):
|
77 |
+
serialized_results.append(pickle.dumps(
|
78 |
+
timer.blocked_autorange(min_run_time=0.05)
|
79 |
+
))
|
80 |
+
print(f"\r{i + 1} / {len(timers) * repeats}", end="")
|
81 |
+
sys.stdout.flush()
|
82 |
+
print()
|
83 |
+
|
84 |
+
comparison = benchmark_utils.Compare([
|
85 |
+
pickle.loads(i) for i in serialized_results
|
86 |
+
])
|
87 |
+
|
88 |
+
print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
|
89 |
+
comparison.print()
|
90 |
+
|
91 |
+
print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n")
|
92 |
+
comparison.trim_significant_figures()
|
93 |
+
comparison.colorize()
|
94 |
+
comparison.print()
|
95 |
+
|
96 |
+
|
97 |
+
if __name__ == "__main__":
|
98 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Example of the Timer and Fuzzer APIs:
|
2 |
+
|
3 |
+
$ python -m examples.fuzzer
|
4 |
+
"""
|
5 |
+
|
6 |
+
import sys
|
7 |
+
|
8 |
+
import torch.utils.benchmark as benchmark_utils
|
9 |
+
|
10 |
+
|
11 |
+
def main():
|
12 |
+
add_fuzzer = benchmark_utils.Fuzzer(
|
13 |
+
parameters=[
|
14 |
+
[
|
15 |
+
benchmark_utils.FuzzedParameter(
|
16 |
+
name=f"k{i}",
|
17 |
+
minval=16,
|
18 |
+
maxval=16 * 1024,
|
19 |
+
distribution="loguniform",
|
20 |
+
) for i in range(3)
|
21 |
+
],
|
22 |
+
benchmark_utils.FuzzedParameter(
|
23 |
+
name="d",
|
24 |
+
distribution={2: 0.6, 3: 0.4},
|
25 |
+
),
|
26 |
+
],
|
27 |
+
tensors=[
|
28 |
+
[
|
29 |
+
benchmark_utils.FuzzedTensor(
|
30 |
+
name=name,
|
31 |
+
size=("k0", "k1", "k2"),
|
32 |
+
dim_parameter="d",
|
33 |
+
probability_contiguous=0.75,
|
34 |
+
min_elements=64 * 1024,
|
35 |
+
max_elements=128 * 1024,
|
36 |
+
) for name in ("x", "y")
|
37 |
+
],
|
38 |
+
],
|
39 |
+
seed=0,
|
40 |
+
)
|
41 |
+
|
42 |
+
n = 250
|
43 |
+
measurements = []
|
44 |
+
for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
|
45 |
+
x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
|
46 |
+
y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
|
47 |
+
shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
|
48 |
+
|
49 |
+
description = "".join([
|
50 |
+
f"{x.numel():>7} | {shape:<16} | ",
|
51 |
+
f"{'contiguous' if x.is_contiguous() else x_order:<12} | ",
|
52 |
+
f"{'contiguous' if y.is_contiguous() else y_order:<12} | ",
|
53 |
+
])
|
54 |
+
|
55 |
+
timer = benchmark_utils.Timer(
|
56 |
+
stmt="x + y",
|
57 |
+
globals=tensors,
|
58 |
+
description=description,
|
59 |
+
)
|
60 |
+
|
61 |
+
measurements.append(timer.blocked_autorange(min_run_time=0.1))
|
62 |
+
measurements[-1].metadata = {"numel": x.numel()}
|
63 |
+
print(f"\r{i + 1} / {n}", end="")
|
64 |
+
sys.stdout.flush()
|
65 |
+
print()
|
66 |
+
|
67 |
+
# More string munging to make pretty output.
|
68 |
+
print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
|
69 |
+
|
70 |
+
def time_fn(m):
|
71 |
+
return m.median / m.metadata["numel"]
|
72 |
+
measurements.sort(key=time_fn)
|
73 |
+
|
74 |
+
template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}"
|
75 |
+
print(template.format("Best:"))
|
76 |
+
for m in measurements[:15]:
|
77 |
+
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
|
78 |
+
|
79 |
+
print("\n" + template.format("Worst:"))
|
80 |
+
for m in measurements[-15:]:
|
81 |
+
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
|
82 |
+
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Example use of Timer and op fuzzers to measure kernel performance.
|
2 |
+
|
3 |
+
$ python -m examples.op_benchmark
|
4 |
+
"""
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from torch.utils.benchmark import Timer
|
10 |
+
from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer
|
11 |
+
from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer
|
12 |
+
|
13 |
+
|
14 |
+
_MEASURE_TIME = 1.0
|
15 |
+
|
16 |
+
|
17 |
+
def assert_dicts_equal(dict_0, dict_1):
|
18 |
+
"""Builtin dict comparison will not compare numpy arrays.
|
19 |
+
e.g.
|
20 |
+
x = {"a": np.ones((2, 1))}
|
21 |
+
x == x # Raises ValueError
|
22 |
+
"""
|
23 |
+
assert set(dict_0.keys()) == set(dict_0.keys())
|
24 |
+
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
|
25 |
+
|
26 |
+
|
27 |
+
def run(n, stmt, fuzzer_cls):
|
28 |
+
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
|
29 |
+
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
|
30 |
+
raw_results = []
|
31 |
+
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
|
32 |
+
float_tensors, float_tensor_params, float_params = float_values
|
33 |
+
int_tensors, int_tensor_params, int_params = int_values
|
34 |
+
|
35 |
+
# This benchmark assumes that the two fuzzers generate identically
|
36 |
+
# sized and strided Tensors, since the same seed is used.
|
37 |
+
assert_dicts_equal(float_params, int_params)
|
38 |
+
assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"])
|
39 |
+
|
40 |
+
float_measurement, int_measurement = (
|
41 |
+
Timer(
|
42 |
+
stmt,
|
43 |
+
globals=tensors,
|
44 |
+
).blocked_autorange(min_run_time=_MEASURE_TIME)
|
45 |
+
for tensors in (float_tensors, int_tensors)
|
46 |
+
)
|
47 |
+
|
48 |
+
descriptions = []
|
49 |
+
for name in float_tensors:
|
50 |
+
shape_str = "(" + ", ".join([
|
51 |
+
f"2 ** {int(np.log2(i))}"
|
52 |
+
if 2 ** int(np.log2(i)) == i and i > 1
|
53 |
+
else str(i)
|
54 |
+
for i in float_tensors[name].shape
|
55 |
+
]) + ")"
|
56 |
+
order = float_tensor_params[name]["order"]
|
57 |
+
order_str = ("" if all(order == np.arange(len(order))) else str(tuple(order)))
|
58 |
+
steps = float_tensor_params[name]["steps"]
|
59 |
+
steps_str = str(steps) if sum(steps) > len(steps) else ""
|
60 |
+
descriptions.append((name, shape_str, order_str, steps_str))
|
61 |
+
raw_results.append((float_measurement, int_measurement, descriptions))
|
62 |
+
|
63 |
+
print(f"\r{i + 1} / {n}", end="")
|
64 |
+
print()
|
65 |
+
|
66 |
+
parsed_results, name_len, shape_len, order_len, steps_len = [], 0, 0, 0, 0
|
67 |
+
for float_measurement, int_measurement, descriptions in raw_results:
|
68 |
+
t_float = float_measurement.median * 1e6
|
69 |
+
t_int = int_measurement.median * 1e6
|
70 |
+
rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2
|
71 |
+
parsed_results.append((t_float, t_int, rel_diff, descriptions))
|
72 |
+
for name, shape, order, steps in descriptions:
|
73 |
+
name_len = max(name_len, len(name))
|
74 |
+
shape_len = max(shape_len, len(shape))
|
75 |
+
order_len = max(order_len, len(order))
|
76 |
+
steps_len = max(steps_len, len(steps))
|
77 |
+
|
78 |
+
parsed_results.sort(key=lambda x: x[2])
|
79 |
+
|
80 |
+
print(f"stmt: {stmt}")
|
81 |
+
print(f" diff faster{'':>17}{' ' * name_len} ", end="")
|
82 |
+
print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end="")
|
83 |
+
print(f" steps\n{'-' * 100}")
|
84 |
+
for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]:
|
85 |
+
for t_float, t_int, rel_diff, descriptions in results:
|
86 |
+
time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"]
|
87 |
+
time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]])
|
88 |
+
for t_str, (name, shape, order, steps) in zip(time_str, descriptions):
|
89 |
+
name = f"{name}:".ljust(name_len + 1)
|
90 |
+
shape = shape.ljust(shape_len + 10)
|
91 |
+
order = order.ljust(order_len)
|
92 |
+
print(f"{t_str} {name} {shape}| {order} | {steps}")
|
93 |
+
print(spacer)
|
94 |
+
|
95 |
+
|
96 |
+
def main():
|
97 |
+
run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer)
|
98 |
+
run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer)
|
99 |
+
run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer)
|
100 |
+
|
101 |
+
|
102 |
+
if __name__ == "__main__":
|
103 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Trivial use of Timer API:
|
2 |
+
|
3 |
+
$ python -m examples.simple_timeit
|
4 |
+
"""
|
5 |
+
|
6 |
+
import torch
|
7 |
+
|
8 |
+
import torch.utils.benchmark as benchmark_utils
|
9 |
+
|
10 |
+
|
11 |
+
def main():
|
12 |
+
timer = benchmark_utils.Timer(
|
13 |
+
stmt="x + y",
|
14 |
+
globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
|
15 |
+
label="Broadcasting add (4x8)",
|
16 |
+
)
|
17 |
+
|
18 |
+
for i in range(3):
|
19 |
+
print(f"Run: {i}\n{'-' * 40}")
|
20 |
+
print(f"timeit:\n{timer.timeit(10000)}\n")
|
21 |
+
print(f"autorange:\n{timer.blocked_autorange()}\n\n")
|
22 |
+
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Microbenchmarks for the torch.fft module"""
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
from collections import namedtuple
|
4 |
+
from collections.abc import Iterable
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.fft
|
8 |
+
from torch.utils import benchmark
|
9 |
+
from torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer
|
10 |
+
|
11 |
+
|
12 |
+
def _dim_options(ndim):
|
13 |
+
if ndim == 1:
|
14 |
+
return [None]
|
15 |
+
elif ndim == 2:
|
16 |
+
return [0, 1, None]
|
17 |
+
elif ndim == 3:
|
18 |
+
return [0, 1, 2, (0, 1), (0, 2), None]
|
19 |
+
raise ValueError(f"Expected ndim in range 1-3, got {ndim}")
|
20 |
+
|
21 |
+
|
22 |
+
def run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int,
|
23 |
+
probability_regular: float):
|
24 |
+
cuda = device == 'cuda'
|
25 |
+
spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda,
|
26 |
+
probability_regular=probability_regular)
|
27 |
+
results = []
|
28 |
+
for tensors, tensor_params, params in spectral_fuzzer.take(samples):
|
29 |
+
shape = [params['k0'], params['k1'], params['k2']][:params['ndim']]
|
30 |
+
str_shape = ' x '.join([f"{s:<4}" for s in shape])
|
31 |
+
sub_label = f"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}"
|
32 |
+
for dim in _dim_options(params['ndim']):
|
33 |
+
for nthreads in (1, 4, 16) if not cuda else (1,):
|
34 |
+
measurement = benchmark.Timer(
|
35 |
+
stmt='func(x, dim=dim)',
|
36 |
+
globals={'func': function, 'x': tensors['x'], 'dim': dim},
|
37 |
+
label=f"{name}_{device}",
|
38 |
+
sub_label=sub_label,
|
39 |
+
description=f"dim={dim}",
|
40 |
+
num_threads=nthreads,
|
41 |
+
).blocked_autorange(min_run_time=1)
|
42 |
+
measurement.metadata = {
|
43 |
+
'name': name,
|
44 |
+
'device': device,
|
45 |
+
'dim': dim,
|
46 |
+
'shape': shape,
|
47 |
+
}
|
48 |
+
measurement.metadata.update(tensor_params['x'])
|
49 |
+
results.append(measurement)
|
50 |
+
return results
|
51 |
+
|
52 |
+
|
53 |
+
Benchmark = namedtuple('Benchmark', ['name', 'function', 'dtype'])
|
54 |
+
BENCHMARKS = [
|
55 |
+
Benchmark('fft_real', torch.fft.fftn, torch.float32),
|
56 |
+
Benchmark('fft_complex', torch.fft.fftn, torch.complex64),
|
57 |
+
Benchmark('ifft', torch.fft.ifftn, torch.complex64),
|
58 |
+
Benchmark('rfft', torch.fft.rfftn, torch.float32),
|
59 |
+
Benchmark('irfft', torch.fft.irfftn, torch.complex64),
|
60 |
+
]
|
61 |
+
BENCHMARK_MAP = {b.name: b for b in BENCHMARKS}
|
62 |
+
BENCHMARK_NAMES = [b.name for b in BENCHMARKS]
|
63 |
+
DEVICE_NAMES = ['cpu', 'cuda']
|
64 |
+
|
65 |
+
def _output_csv(file, results):
|
66 |
+
file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n')
|
67 |
+
for measurement in results:
|
68 |
+
metadata = measurement.metadata
|
69 |
+
device, dim, shape, name, numel, contiguous = (
|
70 |
+
metadata['device'], metadata['dim'], metadata['shape'],
|
71 |
+
metadata['name'], metadata['numel'], metadata['is_contiguous'])
|
72 |
+
|
73 |
+
if isinstance(dim, Iterable):
|
74 |
+
dim_str = '-'.join(str(d) for d in dim)
|
75 |
+
else:
|
76 |
+
dim_str = str(dim)
|
77 |
+
shape_str = 'x'.join(str(s) for s in shape)
|
78 |
+
|
79 |
+
print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, # type: ignore[possibly-undefined]
|
80 |
+
measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6,
|
81 |
+
sep=',', file=file)
|
82 |
+
|
83 |
+
|
84 |
+
if __name__ == '__main__':
|
85 |
+
parser = ArgumentParser(description=__doc__)
|
86 |
+
parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES)
|
87 |
+
parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES)
|
88 |
+
parser.add_argument('--seed', type=int, default=0)
|
89 |
+
parser.add_argument('--samples', type=int, default=10)
|
90 |
+
parser.add_argument('--probability-regular', '--probability_regular', type=float, default=1.0)
|
91 |
+
parser.add_argument('-o', '--output', type=str)
|
92 |
+
args = parser.parse_args()
|
93 |
+
|
94 |
+
num_benchmarks = len(args.device) * len(args.bench)
|
95 |
+
i = 0
|
96 |
+
results = []
|
97 |
+
for device in args.device:
|
98 |
+
for bench in (BENCHMARK_MAP[b] for b in args.bench):
|
99 |
+
results += run_benchmark(
|
100 |
+
name=bench.name, function=bench.function, dtype=bench.dtype,
|
101 |
+
seed=args.seed, device=device, samples=args.samples,
|
102 |
+
probability_regular=args.probability_regular)
|
103 |
+
i += 1
|
104 |
+
print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})')
|
105 |
+
|
106 |
+
if args.output is not None:
|
107 |
+
with open(args.output, 'w') as f:
|
108 |
+
_output_csv(f, results)
|
109 |
+
|
110 |
+
compare = benchmark.Compare(results)
|
111 |
+
compare.trim_significant_figures()
|
112 |
+
compare.colorize()
|
113 |
+
compare.print()
|