Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py +52 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py +46 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py +19 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py +20 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py +32 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py +29 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py +17 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py +32 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py +41 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__init__.py +1 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py +231 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py +66 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py +94 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py +248 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py +141 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py +18 -0
- llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py +71 -0
- llmeval-env/lib/python3.10/site-packages/torch/_functorch/__init__.py +5 -0
llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc
ADDED
Binary file (5.46 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc
ADDED
Binary file (838 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc
ADDED
Binary file (326 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import glob
|
2 |
+
import importlib
|
3 |
+
from os.path import basename, dirname, isfile, join
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch._export.db.case import (
|
7 |
+
_EXAMPLE_CASES,
|
8 |
+
_EXAMPLE_CONFLICT_CASES,
|
9 |
+
_EXAMPLE_REWRITE_CASES,
|
10 |
+
SupportLevel,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
modules = glob.glob(join(dirname(__file__), "*.py"))
|
15 |
+
__all__ = [
|
16 |
+
basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py")
|
17 |
+
]
|
18 |
+
|
19 |
+
# Import all module in the current directory.
|
20 |
+
from . import * # noqa: F403
|
21 |
+
|
22 |
+
|
23 |
+
def all_examples():
|
24 |
+
return _EXAMPLE_CASES
|
25 |
+
|
26 |
+
|
27 |
+
if len(_EXAMPLE_CONFLICT_CASES) > 0:
|
28 |
+
|
29 |
+
def get_name(case):
|
30 |
+
model = case.model
|
31 |
+
if isinstance(model, torch.nn.Module):
|
32 |
+
model = type(model)
|
33 |
+
return model.__name__
|
34 |
+
|
35 |
+
msg = "Error on conflict export case name.\n"
|
36 |
+
for case_name, cases in _EXAMPLE_CONFLICT_CASES.items():
|
37 |
+
msg += f"Case name {case_name} is associated with multiple cases:\n "
|
38 |
+
msg += f"[{','.join(map(get_name, cases))}]\n"
|
39 |
+
|
40 |
+
raise RuntimeError(msg)
|
41 |
+
|
42 |
+
|
43 |
+
def filter_examples_by_support_level(support_level: SupportLevel):
|
44 |
+
return {
|
45 |
+
key: val
|
46 |
+
for key, val in all_examples().items()
|
47 |
+
if val.support_level == support_level
|
48 |
+
}
|
49 |
+
|
50 |
+
|
51 |
+
def get_rewrite_cases(case):
|
52 |
+
return _EXAMPLE_REWRITE_CASES.get(case.name, [])
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/autograd_function.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nonlocal_variables.cpython-310.pyc
ADDED
Binary file (2.64 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc
ADDED
Binary file (1.34 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_contains.cpython-310.pyc
ADDED
Binary file (1.11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/model_attr_mutation.cpython-310.pyc
ADDED
Binary file (1.26 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/optional_input.cpython-310.pyc
ADDED
Binary file (873 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/scalar_output.cpython-310.pyc
ADDED
Binary file (1.14 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/torch_sym_min.cpython-310.pyc
ADDED
Binary file (859 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch._dynamo as torchdynamo
|
3 |
+
|
4 |
+
from torch._export.db.case import export_case
|
5 |
+
|
6 |
+
|
7 |
+
@export_case(
|
8 |
+
example_inputs=(torch.ones(3, 2), torch.tensor(4)),
|
9 |
+
tags={"torch.escape-hatch"},
|
10 |
+
)
|
11 |
+
class AssumeConstantResult(torch.nn.Module):
|
12 |
+
"""
|
13 |
+
Applying `assume_constant_result` decorator to burn make non-tracable code as constant.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
super().__init__()
|
18 |
+
|
19 |
+
@torchdynamo.assume_constant_result
|
20 |
+
def get_item(self, y):
|
21 |
+
return y.int().item()
|
22 |
+
|
23 |
+
def forward(self, x, y):
|
24 |
+
return x[: self.get_item(y)]
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
from functorch.experimental.control_flow import cond
|
5 |
+
|
6 |
+
|
7 |
+
class MySubModule(torch.nn.Module):
|
8 |
+
def foo(self, x):
|
9 |
+
return x.cos()
|
10 |
+
|
11 |
+
def forward(self, x):
|
12 |
+
return self.foo(x)
|
13 |
+
|
14 |
+
|
15 |
+
@export_case(
|
16 |
+
example_inputs=(torch.ones(3),),
|
17 |
+
tags={
|
18 |
+
"torch.cond",
|
19 |
+
"torch.dynamic-shape",
|
20 |
+
},
|
21 |
+
)
|
22 |
+
class CondBranchClassMethod(torch.nn.Module):
|
23 |
+
"""
|
24 |
+
The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules:
|
25 |
+
- both branches must take the same args, which must also match the branch args passed to cond.
|
26 |
+
- both branches must return a single tensor
|
27 |
+
- returned tensor must have the same tensor metadata, e.g. shape and dtype
|
28 |
+
- branch function can be free function, nested function, lambda, class methods
|
29 |
+
- branch function can not have closure variables
|
30 |
+
- no inplace mutations on inputs or global variables
|
31 |
+
|
32 |
+
|
33 |
+
This example demonstrates using class method in cond().
|
34 |
+
|
35 |
+
NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(self):
|
39 |
+
super().__init__()
|
40 |
+
self.subm = MySubModule()
|
41 |
+
|
42 |
+
def bar(self, x):
|
43 |
+
return x.sin()
|
44 |
+
|
45 |
+
def forward(self, x):
|
46 |
+
return cond(x.shape[0] <= 2, self.subm.forward, self.bar, [x])
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.tensor(4), torch.randn(5, 5)),
|
8 |
+
tags={
|
9 |
+
"torch.dynamic-value",
|
10 |
+
"torch.escape-hatch",
|
11 |
+
},
|
12 |
+
)
|
13 |
+
class ConstrainAsValueExample(torch.nn.Module):
|
14 |
+
"""
|
15 |
+
If the value is not known at tracing time, you can provide hint so that we
|
16 |
+
can trace further. Please look at constrain_as_value and constrain_as_size APIs.
|
17 |
+
constrain_as_value is used for values that don't need to be used for constructing
|
18 |
+
tensor.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
super().__init__()
|
23 |
+
|
24 |
+
def forward(self, x, y):
|
25 |
+
a = x.item()
|
26 |
+
torch._constrain_as_value(a, min=0, max=5)
|
27 |
+
|
28 |
+
if a < 6:
|
29 |
+
return y.sin()
|
30 |
+
return y.cos()
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"torch.dynamic-shape"},
|
9 |
+
)
|
10 |
+
class DynamicShapeConstructor(torch.nn.Module):
|
11 |
+
"""
|
12 |
+
Tensor constructors should be captured with dynamic shape inputs rather
|
13 |
+
than being baked in with static shape.
|
14 |
+
"""
|
15 |
+
def __init__(self):
|
16 |
+
super().__init__()
|
17 |
+
|
18 |
+
def forward(self, x):
|
19 |
+
return torch.ones(x.shape[0] * 2)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"torch.dynamic-shape"},
|
9 |
+
)
|
10 |
+
class DynamicShapeSlicing(torch.nn.Module):
|
11 |
+
"""
|
12 |
+
Slices with dynamic shape arguments should be captured into the graph
|
13 |
+
rather than being baked in.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
super().__init__()
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2]
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(10, 10),),
|
8 |
+
tags={"torch.dynamic-shape"},
|
9 |
+
)
|
10 |
+
class DynamicShapeView(torch.nn.Module):
|
11 |
+
"""
|
12 |
+
Dynamic shapes should be propagated to view arguments instead of being
|
13 |
+
baked into the exported graph.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
super().__init__()
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
new_x_shape = x.size()[:-1] + (2, 5)
|
21 |
+
x = x.view(*new_x_shape)
|
22 |
+
return x.permute(0, 2, 1)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case, ExportArgs, SupportLevel
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=ExportArgs(
|
8 |
+
torch.randn(4),
|
9 |
+
(torch.randn(4), torch.randn(4)),
|
10 |
+
*[torch.randn(4), torch.randn(4)],
|
11 |
+
mykw0=torch.randn(4),
|
12 |
+
input0=torch.randn(4), input1=torch.randn(4)
|
13 |
+
),
|
14 |
+
tags={"python.data-structure"},
|
15 |
+
support_level=SupportLevel.SUPPORTED,
|
16 |
+
)
|
17 |
+
class FnWithKwargs(torch.nn.Module):
|
18 |
+
"""
|
19 |
+
Keyword arguments are not supported at the moment.
|
20 |
+
"""
|
21 |
+
def __init__(self):
|
22 |
+
super().__init__()
|
23 |
+
|
24 |
+
def forward(self, pos0, tuple0, *myargs, mykw0, **mykwargs):
|
25 |
+
out = pos0
|
26 |
+
for arg in tuple0:
|
27 |
+
out = out * arg
|
28 |
+
for arg in myargs:
|
29 |
+
out = out * arg
|
30 |
+
out = out * mykw0
|
31 |
+
out = out * mykwargs["input0"] * mykwargs["input1"]
|
32 |
+
return out
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"torch.dynamic-shape", "python.data-structure", "python.assert"},
|
9 |
+
)
|
10 |
+
class ListContains(torch.nn.Module):
|
11 |
+
"""
|
12 |
+
List containment relation can be checked on a dynamic shape or constants.
|
13 |
+
"""
|
14 |
+
def __init__(self):
|
15 |
+
super().__init__()
|
16 |
+
|
17 |
+
def forward(self, x):
|
18 |
+
assert x.size(-1) in [6, 2]
|
19 |
+
assert x.size(0) not in [4, 5, 6]
|
20 |
+
assert "monkey" not in ["cow", "pig"]
|
21 |
+
return x + x
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case, SupportLevel
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"python.object-model"},
|
9 |
+
support_level=SupportLevel.NOT_SUPPORTED_YET,
|
10 |
+
)
|
11 |
+
class ModelAttrMutation(torch.nn.Module):
|
12 |
+
"""
|
13 |
+
Attribute mutation is not supported.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
super().__init__()
|
18 |
+
self.attr_list = [torch.ones(3, 2), torch.ones(3, 2)]
|
19 |
+
|
20 |
+
def recreate_list(self):
|
21 |
+
return [torch.zeros(3, 2), torch.zeros(3, 2)]
|
22 |
+
|
23 |
+
def forward(self, x):
|
24 |
+
self.attr_list = self.recreate_list()
|
25 |
+
return x.sum() + self.attr_list[0].sum()
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from torch._export.db.case import export_case
|
6 |
+
|
7 |
+
|
8 |
+
class Animal(Enum):
|
9 |
+
COW = "moo"
|
10 |
+
|
11 |
+
|
12 |
+
@export_case(
|
13 |
+
example_inputs=(torch.ones(3, 2),),
|
14 |
+
)
|
15 |
+
class SpecializedAttribute(torch.nn.Module):
|
16 |
+
"""
|
17 |
+
Model attributes are specialized.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self):
|
21 |
+
super().__init__()
|
22 |
+
self.a = "moo"
|
23 |
+
self.b = 4
|
24 |
+
|
25 |
+
def forward(self, x):
|
26 |
+
if self.a == Animal.COW.value:
|
27 |
+
return x * x + self.b
|
28 |
+
else:
|
29 |
+
raise ValueError("bad")
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_for_loop.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"python.control-flow"},
|
9 |
+
)
|
10 |
+
class StaticForLoop(torch.nn.Module):
|
11 |
+
"""
|
12 |
+
A for loop with constant number of iterations should be unrolled in the exported graph.
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self):
|
16 |
+
super().__init__()
|
17 |
+
|
18 |
+
def forward(self, x):
|
19 |
+
ret = []
|
20 |
+
for i in range(10): # constant
|
21 |
+
ret.append(i + x)
|
22 |
+
return ret
|
llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from torch._export.db.case import export_case, SupportLevel
|
4 |
+
|
5 |
+
|
6 |
+
@export_case(
|
7 |
+
example_inputs=(torch.ones(3, 2),),
|
8 |
+
tags={"torch.operator"},
|
9 |
+
support_level=SupportLevel.NOT_SUPPORTED_YET,
|
10 |
+
)
|
11 |
+
class TorchSymMin(torch.nn.Module):
|
12 |
+
"""
|
13 |
+
torch.sym_min operator is not supported in export.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def forward(self, x):
|
17 |
+
return x.sum() + torch.sym_min(x.size(0), 100)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (197 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc
ADDED
Binary file (1.49 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc
ADDED
Binary file (1.75 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Set
|
2 |
+
|
3 |
+
|
4 |
+
NodeMetadataValue = Any
|
5 |
+
|
6 |
+
|
7 |
+
PROTECTED_KEYS: Set[str] = {
|
8 |
+
"val",
|
9 |
+
"stack_trace",
|
10 |
+
"nn_module_stack",
|
11 |
+
"debug_handle",
|
12 |
+
"tensor_meta",
|
13 |
+
}
|
14 |
+
|
15 |
+
|
16 |
+
class NodeMetadata:
|
17 |
+
def __init__(self, data: Dict[str, Any]) -> None:
|
18 |
+
self.data: Dict[str, Any] = data.copy()
|
19 |
+
|
20 |
+
def __getitem__(self, key: str) -> NodeMetadataValue:
|
21 |
+
return self.data[key]
|
22 |
+
|
23 |
+
def __setitem__(self, key: str, value: NodeMetadataValue) -> NodeMetadataValue:
|
24 |
+
if key in PROTECTED_KEYS:
|
25 |
+
raise RuntimeError(f"Could not override node key: {key}")
|
26 |
+
self.data[key] = value
|
27 |
+
|
28 |
+
def __contains__(self, key: str) -> bool:
|
29 |
+
return key in self.data
|
30 |
+
|
31 |
+
def copy(self) -> "NodeMetadata":
|
32 |
+
return NodeMetadata(self.data.copy())
|
llmeval-env/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pyre-strict
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
class ProxyValue:
|
8 |
+
# pyre-ignore
|
9 |
+
def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]):
|
10 |
+
# pyre-ignore
|
11 |
+
self.data = data
|
12 |
+
self.proxy_or_node = proxy
|
13 |
+
|
14 |
+
@property
|
15 |
+
def node(self) -> torch.fx.Node:
|
16 |
+
if isinstance(self.proxy_or_node, torch.fx.Node):
|
17 |
+
return self.proxy_or_node
|
18 |
+
assert isinstance(self.proxy_or_node, torch.fx.Proxy)
|
19 |
+
return self.proxy_or_node.node
|
20 |
+
|
21 |
+
@property
|
22 |
+
def proxy(self) -> torch.fx.Proxy:
|
23 |
+
if not isinstance(self.proxy_or_node, torch.fx.Proxy):
|
24 |
+
raise RuntimeError(
|
25 |
+
f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}"
|
26 |
+
)
|
27 |
+
return self.proxy_or_node
|
28 |
+
|
29 |
+
def to_tensor(self) -> torch.Tensor:
|
30 |
+
assert isinstance(self.data, torch.Tensor)
|
31 |
+
return self.data
|
32 |
+
|
33 |
+
def is_tensor(self) -> bool:
|
34 |
+
return isinstance(self.data, torch.Tensor)
|
35 |
+
|
36 |
+
# pyre-ignore
|
37 |
+
def __iter__(self):
|
38 |
+
yield from self.data
|
39 |
+
|
40 |
+
def __bool__(self) -> bool:
|
41 |
+
return bool(self.data)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (291 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc
ADDED
Binary file (6.13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc
ADDED
Binary file (2.32 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc
ADDED
Binary file (3.42 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc
ADDED
Binary file (6.94 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc
ADDED
Binary file (4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc
ADDED
Binary file (794 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc
ADDED
Binary file (2.47 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import operator
|
3 |
+
import traceback
|
4 |
+
from functools import partial
|
5 |
+
from typing import Callable, Dict, List, NamedTuple, Set
|
6 |
+
|
7 |
+
import sympy
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.fx
|
11 |
+
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, ProxyValue, PassResult
|
12 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
13 |
+
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
|
14 |
+
|
15 |
+
|
16 |
+
__all__ = ["InputDim"]
|
17 |
+
|
18 |
+
|
19 |
+
class InputDim(NamedTuple):
|
20 |
+
input_name: str
|
21 |
+
dim: int
|
22 |
+
|
23 |
+
|
24 |
+
def _convert_to_int(val):
|
25 |
+
# Convert simple sympy Integers into concrete int
|
26 |
+
if val == sympy.oo:
|
27 |
+
return math.inf
|
28 |
+
if val == -sympy.oo:
|
29 |
+
return -math.inf
|
30 |
+
if isinstance(val, sympy.Integer):
|
31 |
+
return int(val)
|
32 |
+
raise RuntimeError(
|
33 |
+
"Export constraints cannot be non-integer expressions"
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
def _convert_range_to_int(range: ValueRanges):
|
38 |
+
assert isinstance(range, ValueRanges)
|
39 |
+
min_val = _convert_to_int(range.lower)
|
40 |
+
max_val = _convert_to_int(range.upper)
|
41 |
+
return min_val, max_val
|
42 |
+
|
43 |
+
|
44 |
+
class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoNotUse):
|
45 |
+
def __init__(
|
46 |
+
self,
|
47 |
+
range_constraints: Dict[sympy.Symbol, ValueRanges],
|
48 |
+
):
|
49 |
+
super().__init__()
|
50 |
+
self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints
|
51 |
+
self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set()
|
52 |
+
self.counter = 0
|
53 |
+
|
54 |
+
def _assert_range_constraint(self, proxy, lower, upper, assert_msg):
|
55 |
+
if lower > -math.inf:
|
56 |
+
self._insert_assert_async(operator.ge, proxy, lower, assert_msg)
|
57 |
+
|
58 |
+
if upper < math.inf:
|
59 |
+
self._insert_assert_async(operator.le, proxy, upper, assert_msg)
|
60 |
+
|
61 |
+
def _insert_assert_async(self, operator, lower, upper, assert_msg):
|
62 |
+
"""
|
63 |
+
Inserts assert_async call_function nodes in the graph. This function is
|
64 |
+
called **during** the interpreter-based pass.
|
65 |
+
"""
|
66 |
+
self.counter += 1
|
67 |
+
cmp = super().call_operator(operator, (lower, upper), {}, self._create_dummy_node_metadata())
|
68 |
+
cmp_tensor = super().call_operator(torch.ops.aten.scalar_tensor.default, (cmp,), {}, self._create_dummy_node_metadata())
|
69 |
+
super().call_operator(
|
70 |
+
torch.ops.aten._assert_async.msg,
|
71 |
+
(cmp_tensor, assert_msg),
|
72 |
+
{},
|
73 |
+
self._create_dummy_node_metadata(),
|
74 |
+
)
|
75 |
+
|
76 |
+
def call_operator(self, op, args, kwargs, meta) -> ProxyValue:
|
77 |
+
ret = super().call_operator(op, args, kwargs, meta)
|
78 |
+
if "val" not in meta:
|
79 |
+
return ret
|
80 |
+
|
81 |
+
val = meta["val"]
|
82 |
+
|
83 |
+
# In general, we may have to deal the case such as: ret[1].shape[0].
|
84 |
+
# We need first find out what symbols require assertion, then we need to follow the path
|
85 |
+
# from ret to the symbol, construct the proxies along the way and construct the messages
|
86 |
+
# piece-wise at the same time.
|
87 |
+
#
|
88 |
+
# We use post-order traversal to collect all the proxies callbacks needed, construct
|
89 |
+
# the error message callbacks, and at the top-level traversal tree we execute all the callbacks.
|
90 |
+
# We need the callbacks because, in order to call the function to create a proxy for shape[0], we
|
91 |
+
# need the proxy for shape, which further requires the proxy for ret[1], etc.
|
92 |
+
def add_assertions(val):
|
93 |
+
call_backs: List[Callable] = []
|
94 |
+
messages: List[str] = []
|
95 |
+
if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
|
96 |
+
symbol = val.node.expr
|
97 |
+
if symbol in self.existing_inline_assertions:
|
98 |
+
return call_backs, messages
|
99 |
+
if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol):
|
100 |
+
if symbol in self._asserts_generated_unbacked_symbols:
|
101 |
+
return call_backs, messages
|
102 |
+
# We only care about unbacked symints for these inline
|
103 |
+
# constraints, which are prefixed with 'u'
|
104 |
+
constraint = self.range_constraints[symbol]
|
105 |
+
min_val, max_val = _convert_range_to_int(constraint)
|
106 |
+
assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]."
|
107 |
+
call_backs.append(
|
108 |
+
partial(self._assert_range_constraint, lower=min_val, upper=max_val)
|
109 |
+
)
|
110 |
+
messages.append(assert_msg)
|
111 |
+
self._asserts_generated_unbacked_symbols.add(symbol)
|
112 |
+
|
113 |
+
elif isinstance(val, torch.Tensor):
|
114 |
+
for i, sym in enumerate(val.shape):
|
115 |
+
cbs, msgs = add_assertions(sym)
|
116 |
+
for cb, msg in zip(cbs, msgs):
|
117 |
+
def sym_size_cb(proxy, assert_msg, dim):
|
118 |
+
dim_proxy = super(
|
119 |
+
_AddRuntimeAssertionsForInlineConstraintsPass,
|
120 |
+
self
|
121 |
+
).call_operator(
|
122 |
+
torch.ops.aten.sym_size.int,
|
123 |
+
(proxy, dim),
|
124 |
+
{},
|
125 |
+
self._create_dummy_node_metadata(),
|
126 |
+
)
|
127 |
+
cb(proxy=dim_proxy, assert_msg=assert_msg)
|
128 |
+
call_backs.append(partial(sym_size_cb, dim=i))
|
129 |
+
messages.append(f".shape[{i}]" + msg)
|
130 |
+
return call_backs, messages
|
131 |
+
|
132 |
+
callbacks, messages = add_assertions(val)
|
133 |
+
for cb, msg in zip(callbacks, messages):
|
134 |
+
cb(proxy=ret, assert_msg=f"{ret.node}" + msg)
|
135 |
+
return ret
|
136 |
+
|
137 |
+
def call(self, graph_module):
|
138 |
+
self.existing_inline_assertions = _get_existing_inline_assertions(
|
139 |
+
graph_module, self.range_constraints
|
140 |
+
)
|
141 |
+
|
142 |
+
# Add runtime asserts for inline constraints
|
143 |
+
val = super().call(graph_module)
|
144 |
+
|
145 |
+
# Sometimes this pass would return a wrong graph where we have mismatched
|
146 |
+
# node names in signature. Before we fix it, let's just skip it.
|
147 |
+
if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass:
|
148 |
+
return PassResult(graph_module, False)
|
149 |
+
|
150 |
+
# Populate the stack trace with dummy vals to respect IR
|
151 |
+
for node in val.graph_module.graph.nodes:
|
152 |
+
if not node.meta.get("stack_trace", None):
|
153 |
+
node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1))
|
154 |
+
|
155 |
+
return PassResult(val.graph_module, val.modified)
|
156 |
+
|
157 |
+
|
158 |
+
def _get_existing_inline_assertions(
|
159 |
+
graph_module: torch.fx.GraphModule,
|
160 |
+
range_constraints: Dict[sympy.Symbol, ValueRanges],
|
161 |
+
) -> Dict[sympy.Symbol, ValueRanges]:
|
162 |
+
existing_inline_assertions: Dict[sympy.Symbol, ValueRanges] = {}
|
163 |
+
|
164 |
+
for module in graph_module.modules():
|
165 |
+
if not isinstance(module, torch.fx.GraphModule):
|
166 |
+
continue
|
167 |
+
|
168 |
+
# Find all the existing inline assertions. They will look something like:
|
169 |
+
# %_local_scalar_dense = call_function[target=torch.ops.aten._local_scalar_dense.default](args = (%arg1_1,), kwargs = {})
|
170 |
+
# %ge = call_function[target=operator.ge](args = (%_local_scalar_dense, 0), kwargs = {})
|
171 |
+
# %scalar_tensor = call_function[target=torch.ops.aten.scalar_tensor.default](args = (%ge,), kwargs = {})
|
172 |
+
# %_assert_async = call_function[target=torch.ops.aten._assert_async.msg](args = (%scalar_tensor, "..."), kwargs = {})
|
173 |
+
for node in module.graph.nodes:
|
174 |
+
if node.target != torch.ops.aten._assert_async.msg:
|
175 |
+
continue
|
176 |
+
|
177 |
+
scalar_tensor_arg = node.args[0]
|
178 |
+
if not (
|
179 |
+
scalar_tensor_arg.op == "call_function" and
|
180 |
+
scalar_tensor_arg.target == torch.ops.aten.scalar_tensor.default
|
181 |
+
):
|
182 |
+
continue
|
183 |
+
|
184 |
+
compare_arg = scalar_tensor_arg.args[0]
|
185 |
+
if not (
|
186 |
+
compare_arg.op == "call_function" and
|
187 |
+
compare_arg.target in (operator.le, operator.ge) and
|
188 |
+
len(compare_arg.args) == 2
|
189 |
+
):
|
190 |
+
continue
|
191 |
+
|
192 |
+
compare_op = compare_arg.target
|
193 |
+
maybe_symint_arg, compare_int = compare_arg.args
|
194 |
+
|
195 |
+
# x >= 0 will sometimes be canonicalized to -x <= 0, so in some
|
196 |
+
# cases the operation before the comparison is to multiply by -1. We
|
197 |
+
# can undo the canonicalization here
|
198 |
+
if (
|
199 |
+
maybe_symint_arg.op == "call_function" and
|
200 |
+
maybe_symint_arg.target == operator.mul and
|
201 |
+
maybe_symint_arg.args[0] == -1
|
202 |
+
):
|
203 |
+
maybe_symint_arg = maybe_symint_arg.args[1]
|
204 |
+
compare_op = operator.ge
|
205 |
+
compare_int = -1 * compare_int
|
206 |
+
|
207 |
+
if not (
|
208 |
+
"val" in maybe_symint_arg.meta and
|
209 |
+
isinstance(maybe_symint_arg.meta["val"], torch.SymInt)
|
210 |
+
):
|
211 |
+
continue
|
212 |
+
|
213 |
+
symint = maybe_symint_arg.meta["val"].node.expr
|
214 |
+
if not isinstance(symint, sympy.Symbol):
|
215 |
+
continue
|
216 |
+
|
217 |
+
if symint not in range_constraints:
|
218 |
+
raise RuntimeError(f"Unable to find symint {symint} in {range_constraints}")
|
219 |
+
|
220 |
+
found_range = existing_inline_assertions.get(symint, ValueRanges(-math.inf, math.inf))
|
221 |
+
|
222 |
+
if compare_arg.target == operator.le:
|
223 |
+
existing_inline_assertions[symint] = ValueRanges(
|
224 |
+
lower=found_range.lower, upper=compare_int
|
225 |
+
)
|
226 |
+
elif compare_arg.target == operator.ge:
|
227 |
+
existing_inline_assertions[symint] = ValueRanges(
|
228 |
+
lower=compare_int, upper=found_range.upper
|
229 |
+
)
|
230 |
+
|
231 |
+
return existing_inline_assertions
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from torch.export.exported_program import ConstantArgument, TensorArgument
|
6 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
7 |
+
|
8 |
+
__all__ = ["CollectTracepointsPass"]
|
9 |
+
|
10 |
+
|
11 |
+
class CollectTracepointsPass(PassBase):
|
12 |
+
"""
|
13 |
+
Performs constant folding and constant propagation.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self, specs, sig) -> None:
|
17 |
+
super().__init__()
|
18 |
+
self.specs = specs
|
19 |
+
self.sig = sig
|
20 |
+
|
21 |
+
def call(self, gm):
|
22 |
+
def get_arg_spec(arg):
|
23 |
+
if isinstance(arg, torch.fx.Node):
|
24 |
+
if isinstance(arg.meta.get("val"), torch.Tensor):
|
25 |
+
return TensorArgument(name=arg.name)
|
26 |
+
else:
|
27 |
+
raise AssertionError(
|
28 |
+
"Symint input is not implemented yet for submodule call signature."
|
29 |
+
)
|
30 |
+
else:
|
31 |
+
return ConstantArgument(value=arg)
|
32 |
+
|
33 |
+
for module in gm.modules():
|
34 |
+
if not isinstance(module, torch.fx.GraphModule):
|
35 |
+
continue
|
36 |
+
for node in module.graph.nodes:
|
37 |
+
if node.op != "call_function":
|
38 |
+
continue
|
39 |
+
if node.target == torch.ops.higher_order._export_tracepoint:
|
40 |
+
for i, arg in enumerate(node.args):
|
41 |
+
kind = node.kwargs["kind"]
|
42 |
+
if kind == "module_call_inputs":
|
43 |
+
self.specs[node.kwargs["path"]].inputs.append(
|
44 |
+
get_arg_spec(arg)
|
45 |
+
)
|
46 |
+
elif kind == "module_call_outputs":
|
47 |
+
self.specs[node.kwargs["path"]].outputs.append(
|
48 |
+
get_arg_spec(arg)
|
49 |
+
)
|
50 |
+
else:
|
51 |
+
raise AssertionError(f"Unknown tracepoint kind: {kind}")
|
52 |
+
if isinstance(arg, torch.fx.Node):
|
53 |
+
for user in node.users:
|
54 |
+
assert user.op == "call_function"
|
55 |
+
assert user.target == operator.getitem
|
56 |
+
assert isinstance(user.args[1], int)
|
57 |
+
if user.args[1] == i:
|
58 |
+
user.replace_all_uses_with(arg)
|
59 |
+
self.sig.replace_all_uses(user.name, arg.name)
|
60 |
+
break
|
61 |
+
users = list(node.users)
|
62 |
+
for user in users:
|
63 |
+
assert len(user.users) == 0
|
64 |
+
gm.graph.erase_node(user)
|
65 |
+
gm.graph.erase_node(node)
|
66 |
+
return PassResult(gm, True)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
from typing import Dict, Optional, Tuple, List
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, PassResult, Argument
|
6 |
+
from torch._export.pass_infra.node_metadata import NodeMetadata
|
7 |
+
from torch._export.pass_infra.proxy_value import ProxyValue
|
8 |
+
from torch._ops import OpOverload
|
9 |
+
|
10 |
+
aten = torch.ops.aten
|
11 |
+
|
12 |
+
_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = {
|
13 |
+
aten.sym_constrain_range.default: aten._functional_sym_constrain_range,
|
14 |
+
aten._assert_async.msg: aten._functional_assert_async.msg,
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
class _FunctionalizeSideEffectfulOpsPass(_ExportPassBaseDeprecatedDoNotUse):
|
19 |
+
"""
|
20 |
+
Functionalize ops with side effect in graph module by replacing the op with
|
21 |
+
functional version of it. A new dependency token (`dep_token`) will be
|
22 |
+
created and propagated through functional ops to output.
|
23 |
+
For example:
|
24 |
+
```
|
25 |
+
def f(x):
|
26 |
+
sym_constrain_range(x.shape[0], min=1, max=3)
|
27 |
+
return x.add(3)
|
28 |
+
```
|
29 |
+
Will be transformed to:
|
30 |
+
```
|
31 |
+
def f(x):
|
32 |
+
dep_token0 = _make_dep_token()
|
33 |
+
dep_token1 = _functional_sym_constrain_range(
|
34 |
+
x.shape[0], min=1, max=3, dep_token=dep_token0
|
35 |
+
)
|
36 |
+
|
37 |
+
return x.add(3), dep_token1
|
38 |
+
```
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self) -> None:
|
42 |
+
super().__init__()
|
43 |
+
self._dep_token: Optional[ProxyValue] = None
|
44 |
+
self._next_dep_token_index: Optional[int] = None
|
45 |
+
|
46 |
+
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
|
47 |
+
# Early return if no non-functional assertions.
|
48 |
+
if not any(
|
49 |
+
n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS
|
50 |
+
for n in graph_module.graph.nodes
|
51 |
+
):
|
52 |
+
return PassResult(graph_module=graph_module, modified=False)
|
53 |
+
|
54 |
+
gm = copy.deepcopy(graph_module)
|
55 |
+
self._dep_token = None
|
56 |
+
self._next_dep_token_index = None
|
57 |
+
return super().call(gm)
|
58 |
+
|
59 |
+
def call_operator(
|
60 |
+
self,
|
61 |
+
op: OpOverload,
|
62 |
+
args: Tuple[Argument, ...],
|
63 |
+
kwargs: Dict[str, Argument],
|
64 |
+
meta: NodeMetadata,
|
65 |
+
) -> ProxyValue:
|
66 |
+
if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS:
|
67 |
+
return super().call_operator(op, args, kwargs, meta)
|
68 |
+
|
69 |
+
if self._dep_token is None:
|
70 |
+
self._dep_token = super().call_operator(
|
71 |
+
aten._make_dep_token,
|
72 |
+
args=(),
|
73 |
+
kwargs={},
|
74 |
+
meta=self._create_dummy_node_metadata(),
|
75 |
+
)
|
76 |
+
self._dep_token.node.name = "dep_token0"
|
77 |
+
self._next_dep_token_index = 1
|
78 |
+
|
79 |
+
self._dep_token = super().call_operator(
|
80 |
+
_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op],
|
81 |
+
args=args,
|
82 |
+
kwargs={**kwargs, "dep_token": self._dep_token},
|
83 |
+
meta=meta,
|
84 |
+
)
|
85 |
+
assert self._next_dep_token_index is not None
|
86 |
+
self._dep_token.node.name = f"dep_token{self._next_dep_token_index}"
|
87 |
+
self._next_dep_token_index += 1
|
88 |
+
|
89 |
+
return self._dep_token
|
90 |
+
|
91 |
+
def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
|
92 |
+
assert self._dep_token is not None
|
93 |
+
|
94 |
+
return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type]
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
from typing import Any, Dict, Union
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch._export.verifier import SpecViolationError
|
6 |
+
from torch._guards import detect_fake_mode
|
7 |
+
from torch.export.exported_program import (
|
8 |
+
ArgumentSpec,
|
9 |
+
CustomObjArgument,
|
10 |
+
ExportGraphSignature,
|
11 |
+
InputKind,
|
12 |
+
InputSpec,
|
13 |
+
TensorArgument,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
class ConstantAttrMap(collections.abc.MutableMapping):
|
18 |
+
"""A mapping class that understands how to use module constants (tensors and
|
19 |
+
ScriptObjects) as keys. We store tensors normally, but ScriptObjects are
|
20 |
+
stored by hash, because different torch.ScriptObjects can point to the same
|
21 |
+
underlying value (but we guarantee that they will `hash()` to the same value
|
22 |
+
if that's the case).
|
23 |
+
"""
|
24 |
+
|
25 |
+
def __init__(self):
|
26 |
+
# Underlying dict that we use to implement this mapping.
|
27 |
+
self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {}
|
28 |
+
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
|
29 |
+
# APIs like `__iter__` that should look like they're returning the
|
30 |
+
# original ScriptObjects.
|
31 |
+
self._script_object_map: Dict[int, torch.ScriptObject] = {}
|
32 |
+
|
33 |
+
def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any:
|
34 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
35 |
+
assert isinstance(real_key, (int, torch.Tensor))
|
36 |
+
return self._constant_attrs[real_key]
|
37 |
+
|
38 |
+
def __setitem__(
|
39 |
+
self, key: Union[torch.Tensor, torch.ScriptObject], value: Any
|
40 |
+
) -> None:
|
41 |
+
if isinstance(key, torch.ScriptObject):
|
42 |
+
self._constant_attrs[hash(key)] = value
|
43 |
+
self._script_object_map[hash(key)] = key
|
44 |
+
elif isinstance(key, torch.Tensor):
|
45 |
+
self._constant_attrs[key] = value
|
46 |
+
else:
|
47 |
+
raise TypeError(
|
48 |
+
f"Expected key to be a tensor or ScriptObject, got {type(key)}"
|
49 |
+
)
|
50 |
+
|
51 |
+
def __delitem__(self, key):
|
52 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
53 |
+
|
54 |
+
del self._constant_attrs[real_key]
|
55 |
+
|
56 |
+
def __iter__(self):
|
57 |
+
for key in self._constant_attrs:
|
58 |
+
if isinstance(key, int):
|
59 |
+
yield self._script_object_map[key]
|
60 |
+
else:
|
61 |
+
yield key
|
62 |
+
|
63 |
+
def __len__(self):
|
64 |
+
return len(self._constant_attrs)
|
65 |
+
|
66 |
+
def __contains__(self, key: object) -> bool:
|
67 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
68 |
+
return real_key in self._constant_attrs
|
69 |
+
|
70 |
+
|
71 |
+
def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str:
|
72 |
+
# The FQN of the constant tensor in the state dict should
|
73 |
+
# correspond to the module where the constant tensor was
|
74 |
+
# originally used.
|
75 |
+
parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0]
|
76 |
+
if len(parent_fqn) > 0:
|
77 |
+
return f"{parent_fqn}.{constant_name}"
|
78 |
+
else:
|
79 |
+
return constant_name
|
80 |
+
|
81 |
+
|
82 |
+
def lift_constants_pass(
|
83 |
+
gm: torch.fx.GraphModule,
|
84 |
+
graph_signature: ExportGraphSignature,
|
85 |
+
constant_attrs: ConstantAttrMap,
|
86 |
+
) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]:
|
87 |
+
"""
|
88 |
+
Takes a graph module, graph signature, and modifies them implace to lift any
|
89 |
+
constants (tensors or custom classes) as inputs to the graph. Returns a
|
90 |
+
dictionary of names to constants.
|
91 |
+
|
92 |
+
Arguments:
|
93 |
+
gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift.
|
94 |
+
graph_signature (ExportGraphSignature): This graph signature will be
|
95 |
+
mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs.
|
96 |
+
constant_attrs (ConstantAttr): A mapping from a constant value to its
|
97 |
+
fully-qualified path in `gm`. This is used to maintain consistent
|
98 |
+
location of constants between the original module and the exported
|
99 |
+
version.
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
A dictionary of fqn => constant value.
|
103 |
+
"""
|
104 |
+
all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
|
105 |
+
|
106 |
+
inputs = graph_signature.input_specs
|
107 |
+
num_custom_obj = sum(
|
108 |
+
input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs
|
109 |
+
)
|
110 |
+
num_tensor_constants = sum(
|
111 |
+
input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs
|
112 |
+
)
|
113 |
+
|
114 |
+
fake_mode = detect_fake_mode(
|
115 |
+
tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder")
|
116 |
+
)
|
117 |
+
|
118 |
+
first_user_input_loc, first_user_input = 0, None
|
119 |
+
for node in gm.graph.nodes:
|
120 |
+
if node.op == "placeholder" and node.name in graph_signature.user_inputs:
|
121 |
+
first_user_input = node
|
122 |
+
break
|
123 |
+
first_user_input_loc += 1
|
124 |
+
|
125 |
+
lifted_objs = ConstantAttrMap()
|
126 |
+
for node in gm.graph.nodes:
|
127 |
+
if node.op == "get_attr":
|
128 |
+
constant_val = getattr(gm, node.target)
|
129 |
+
if constant_val in lifted_objs:
|
130 |
+
# We already lifted this constant elsewhere. Just rewrite uses
|
131 |
+
# of this get_attr to point to the already-existing placeholder
|
132 |
+
# node.
|
133 |
+
const_placeholder_node = lifted_objs[constant_val]
|
134 |
+
node.replace_all_uses_with(const_placeholder_node)
|
135 |
+
gm.graph.erase_node(node)
|
136 |
+
continue
|
137 |
+
|
138 |
+
# For ScriptObject and Tensor constants:
|
139 |
+
# First check if the constant was an attribute on some module by
|
140 |
+
# consulting `constant_attrs` map. If it is, use the fqn that keeps
|
141 |
+
# its location consistent with the eager module.
|
142 |
+
#
|
143 |
+
# If it's not in the `constant_attrs` map, that means it's an inline
|
144 |
+
# constant (e.g. x + torch.tensor(0)), and thus did not have a
|
145 |
+
# specific location in the eager module. In that case, just generate
|
146 |
+
# some name and attach it to the module in which it was used.
|
147 |
+
if isinstance(constant_val, torch.ScriptObject):
|
148 |
+
constant_kind = InputKind.CUSTOM_OBJ
|
149 |
+
constant_fqn = constant_attrs.get(constant_val)
|
150 |
+
if constant_fqn is not None:
|
151 |
+
_, _, constant_name = constant_fqn.rpartition(".")
|
152 |
+
else:
|
153 |
+
constant_name = f"_lifted_custom_obj{num_custom_obj}"
|
154 |
+
constant_fqn = get_constant_fqn(node, constant_name)
|
155 |
+
num_custom_obj += 1
|
156 |
+
elif isinstance(constant_val, torch.Tensor):
|
157 |
+
constant_kind = InputKind.CONSTANT_TENSOR
|
158 |
+
constant_fqn = constant_attrs.get(constant_val)
|
159 |
+
if constant_fqn is not None:
|
160 |
+
_, _, constant_name = constant_fqn.rpartition(".")
|
161 |
+
else:
|
162 |
+
constant_name = f"_lifted_tensor_constant{num_tensor_constants}"
|
163 |
+
constant_fqn = get_constant_fqn(node, constant_name)
|
164 |
+
num_tensor_constants += 1
|
165 |
+
elif isinstance(constant_val, torch.fx.GraphModule):
|
166 |
+
continue
|
167 |
+
elif "LoweredBackendModule" in type(constant_val).__name__:
|
168 |
+
continue
|
169 |
+
else:
|
170 |
+
raise SpecViolationError(
|
171 |
+
f"getattr node {node} referencing unsupported type {type(constant_val)}"
|
172 |
+
)
|
173 |
+
|
174 |
+
with gm.graph.inserting_before(first_user_input):
|
175 |
+
# Insert the constant node before the first user input
|
176 |
+
const_placeholder_node = gm.graph.placeholder(constant_name)
|
177 |
+
# match target name with its node name in case there is name collision
|
178 |
+
# and suffix is added to node name in fx
|
179 |
+
const_placeholder_node.target = const_placeholder_node.name
|
180 |
+
|
181 |
+
for k, v in node.meta.items():
|
182 |
+
const_placeholder_node.meta[k] = v
|
183 |
+
|
184 |
+
input_spec_arg: ArgumentSpec
|
185 |
+
if isinstance(constant_val, torch.Tensor):
|
186 |
+
if fake_mode is not None:
|
187 |
+
const_placeholder_node.meta["val"] = fake_mode.from_tensor(
|
188 |
+
constant_val, static_shapes=True
|
189 |
+
)
|
190 |
+
const_placeholder_node.meta["val"].constant = constant_val
|
191 |
+
else:
|
192 |
+
const_placeholder_node.meta["val"] = constant_val
|
193 |
+
input_spec_arg = TensorArgument(name=const_placeholder_node.name)
|
194 |
+
elif isinstance(constant_val, torch._C.ScriptObject):
|
195 |
+
class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined]
|
196 |
+
const_placeholder_node.meta["val"] = CustomObjArgument(
|
197 |
+
constant_fqn, class_fqn
|
198 |
+
)
|
199 |
+
input_spec_arg = CustomObjArgument(
|
200 |
+
name=const_placeholder_node.name, class_fqn=class_fqn
|
201 |
+
)
|
202 |
+
else:
|
203 |
+
raise SpecViolationError(
|
204 |
+
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
|
205 |
+
)
|
206 |
+
|
207 |
+
lifted_objs[constant_val] = const_placeholder_node
|
208 |
+
node.replace_all_uses_with(const_placeholder_node)
|
209 |
+
gm.graph.erase_node(node)
|
210 |
+
|
211 |
+
# Add the constant as a buffer to the graph signature
|
212 |
+
graph_signature.input_specs.insert(
|
213 |
+
first_user_input_loc,
|
214 |
+
InputSpec(
|
215 |
+
kind=constant_kind,
|
216 |
+
arg=input_spec_arg,
|
217 |
+
target=constant_fqn,
|
218 |
+
),
|
219 |
+
)
|
220 |
+
all_constants[constant_fqn] = constant_val
|
221 |
+
first_user_input_loc += 1
|
222 |
+
|
223 |
+
return all_constants
|
224 |
+
|
225 |
+
|
226 |
+
def rewrite_script_object_meta(
|
227 |
+
gm: torch.fx.GraphModule,
|
228 |
+
) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]:
|
229 |
+
"""When tracing, we produce a graph with an actual ScriptObject in the
|
230 |
+
meta["val"]. Eventually we want to change this behavior, when FakeMode infra
|
231 |
+
for ScriptObjects lands.
|
232 |
+
|
233 |
+
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
|
234 |
+
"""
|
235 |
+
constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
|
236 |
+
for node in gm.graph.nodes:
|
237 |
+
if "val" not in node.meta or not isinstance(
|
238 |
+
node.meta["val"], torch.ScriptObject
|
239 |
+
):
|
240 |
+
continue
|
241 |
+
|
242 |
+
old_meta = node.meta["val"]
|
243 |
+
class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
|
244 |
+
new_meta = CustomObjArgument(node.name, class_fqn)
|
245 |
+
constants[node.name] = old_meta
|
246 |
+
node.meta["val"] = new_meta
|
247 |
+
|
248 |
+
return constants
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
3 |
+
|
4 |
+
|
5 |
+
class _RemoveRuntimeAssertionsPass(PassBase):
|
6 |
+
"""
|
7 |
+
Remove runtime assertions inserted by the
|
8 |
+
_AddRuntimeAssertionsForInlineConstraintsPass.
|
9 |
+
"""
|
10 |
+
|
11 |
+
def call(self, graph_module) -> PassResult:
|
12 |
+
modified = False
|
13 |
+
for module in graph_module.modules():
|
14 |
+
if not isinstance(module, torch.fx.GraphModule):
|
15 |
+
continue
|
16 |
+
for node in module.graph.nodes:
|
17 |
+
if node.target == torch.ops.aten._assert_async.msg:
|
18 |
+
assert_async_node = node
|
19 |
+
if len(assert_async_node.users) > 0:
|
20 |
+
continue
|
21 |
+
module.graph.erase_node(assert_async_node)
|
22 |
+
# the upstream scalar_tensor <- {le, ge} <- sym_size
|
23 |
+
# linear chain of nodes of nodes is removed by the
|
24 |
+
# downstream dead code elimination
|
25 |
+
modified = True
|
26 |
+
return PassResult(graph_module, modified)
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch._higher_order_ops.wrap import wrap_with_set_grad_enabled
|
3 |
+
|
4 |
+
from ..utils import (
|
5 |
+
node_inline_,
|
6 |
+
node_replace_,
|
7 |
+
nodes_filter,
|
8 |
+
nodes_first,
|
9 |
+
nodes_map,
|
10 |
+
sequential_split,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
def _is_set_grad_enabled_node(node: torch.fx.Node):
|
15 |
+
return (
|
16 |
+
node
|
17 |
+
and node.op == "call_function"
|
18 |
+
and node.target == torch._C._set_grad_enabled
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
def _is_set_grad_enabled_sub_mod(node: torch.fx.Node, omit_if_same_with_ambient=False):
|
23 |
+
if node.op == "call_module":
|
24 |
+
assert isinstance(node.target, str)
|
25 |
+
subgm = getattr(node.graph.owning_module, node.target)
|
26 |
+
first_non_ph = nodes_first(
|
27 |
+
subgm.graph.nodes, lambda node: node.op != "placeholder"
|
28 |
+
)
|
29 |
+
if (
|
30 |
+
first_non_ph
|
31 |
+
and first_non_ph.op == "call_function"
|
32 |
+
and first_non_ph.target == torch._C._set_grad_enabled
|
33 |
+
):
|
34 |
+
return (
|
35 |
+
first_non_ph.args[0] != torch.is_grad_enabled()
|
36 |
+
if omit_if_same_with_ambient
|
37 |
+
else True
|
38 |
+
)
|
39 |
+
return False
|
40 |
+
|
41 |
+
|
42 |
+
def _replace_with_hop(node: torch.fx.Node):
|
43 |
+
assert node.op == "call_module"
|
44 |
+
graph: torch.fx.Graph = node.graph
|
45 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
46 |
+
assert isinstance(node.target, str)
|
47 |
+
sub_gm = getattr(gm, node.target)
|
48 |
+
sub_graph = sub_gm.graph
|
49 |
+
set_grad_nodes = nodes_filter(sub_graph.nodes, _is_set_grad_enabled_node)
|
50 |
+
if len(set_grad_nodes) > 0:
|
51 |
+
assert len(set_grad_nodes) == 1
|
52 |
+
set_grad_node = set_grad_nodes[0]
|
53 |
+
enable_grad_val = set_grad_node.args[0]
|
54 |
+
with graph.inserting_before(node):
|
55 |
+
get_attr_node = graph.get_attr(node.target)
|
56 |
+
output_node = next(iter(reversed(sub_gm.graph.nodes)), None)
|
57 |
+
if output_node is not None:
|
58 |
+
assert len(output_node.args) == 1
|
59 |
+
output_args = output_node.args[0]
|
60 |
+
if isinstance(output_args, (tuple, list)):
|
61 |
+
call_func_node = graph.call_function(
|
62 |
+
wrap_with_set_grad_enabled,
|
63 |
+
(enable_grad_val, get_attr_node, *node.args),
|
64 |
+
{},
|
65 |
+
)
|
66 |
+
# Create the metadata
|
67 |
+
call_func_node.meta["val"] = tuple(
|
68 |
+
arg.meta["val"] for arg in output_args
|
69 |
+
)
|
70 |
+
node_replace_(node, call_func_node, delete_old=True)
|
71 |
+
|
72 |
+
# Rename the name of getitem nodes to the actual name of its contents
|
73 |
+
# for passing verifier and better readability, also propagate metadata
|
74 |
+
for get_item_node in call_func_node.users.keys():
|
75 |
+
idx: int = get_item_node.args[1]
|
76 |
+
output_node = output_args[idx]
|
77 |
+
get_item_node._rename(output_node.name)
|
78 |
+
get_item_node.meta = output_node.meta
|
79 |
+
pass
|
80 |
+
|
81 |
+
elif isinstance(output_args, torch.fx.Node):
|
82 |
+
call_func_node = graph.create_node(
|
83 |
+
"call_function",
|
84 |
+
wrap_with_set_grad_enabled,
|
85 |
+
(enable_grad_val, get_attr_node, *node.args),
|
86 |
+
{},
|
87 |
+
output_args.name,
|
88 |
+
)
|
89 |
+
call_func_node.meta = output_args.meta
|
90 |
+
node_replace_(node, call_func_node, delete_old=True)
|
91 |
+
else:
|
92 |
+
raise NotImplementedError(
|
93 |
+
f"repalce_set_grad_with_hop_pass doesnt' support output type {type(output_args)}"
|
94 |
+
)
|
95 |
+
else:
|
96 |
+
raise NotImplementedError(
|
97 |
+
"Cannot replace a call_module with a hop if it has no output. This module will gets DCEed."
|
98 |
+
)
|
99 |
+
sub_graph.erase_node(set_grad_node)
|
100 |
+
|
101 |
+
|
102 |
+
def _remove_set_grad_and_inline(node: torch.fx.Node):
|
103 |
+
assert node.op == "call_module"
|
104 |
+
graph: torch.fx.Graph = node.graph
|
105 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
106 |
+
assert isinstance(node.target, str)
|
107 |
+
sub_gm = getattr(gm, node.target)
|
108 |
+
sub_graph = sub_gm.graph
|
109 |
+
nodes_map(
|
110 |
+
sub_graph.nodes,
|
111 |
+
lambda n: sub_graph.erase_node(n) if _is_set_grad_enabled_node(n) else n,
|
112 |
+
)
|
113 |
+
node_inline_(node)
|
114 |
+
|
115 |
+
|
116 |
+
def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule):
|
117 |
+
# If there is no set_grad_enabled node, return the original graph module
|
118 |
+
need_replacing = False
|
119 |
+
for node in gm.graph.nodes:
|
120 |
+
if _is_set_grad_enabled_node(node):
|
121 |
+
need_replacing = True
|
122 |
+
|
123 |
+
if not need_replacing:
|
124 |
+
return gm
|
125 |
+
|
126 |
+
new_gm = sequential_split(gm, _is_set_grad_enabled_node)
|
127 |
+
|
128 |
+
def _maybe_inline_or_replace_with_hop(node: torch.fx.Node):
|
129 |
+
if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True):
|
130 |
+
_replace_with_hop(node)
|
131 |
+
else:
|
132 |
+
_remove_set_grad_and_inline(node)
|
133 |
+
|
134 |
+
nodes_map(
|
135 |
+
list(new_gm.graph.nodes),
|
136 |
+
lambda node: _maybe_inline_or_replace_with_hop(node)
|
137 |
+
if node.op == "call_module"
|
138 |
+
else node,
|
139 |
+
)
|
140 |
+
new_gm.graph.lint()
|
141 |
+
return new_gm
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
replacements: Dict[torch._ops.OpOverloadPacket, torch._ops.OpOverload] = {
|
6 |
+
torch.ops.aten.sym_size: torch.ops.aten.sym_size.int,
|
7 |
+
torch.ops.aten.sym_stride: torch.ops.aten.sym_stride.int,
|
8 |
+
torch.ops.aten.sym_numel: torch.ops.aten.sym_numel.default,
|
9 |
+
}
|
10 |
+
|
11 |
+
|
12 |
+
def _replace_sym_size_ops_pass(gm: torch.fx.GraphModule):
|
13 |
+
for module in gm.modules():
|
14 |
+
if not isinstance(module, torch.fx.GraphModule):
|
15 |
+
continue
|
16 |
+
for node in module.graph.nodes:
|
17 |
+
if node.target in replacements:
|
18 |
+
node.target = replacements[node.target]
|
llmeval-env/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Optional, Set
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch._ops import OpOverload, OpOverloadPacket, HigherOrderOperator
|
5 |
+
from torch._export.error import InternalError
|
6 |
+
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
|
7 |
+
|
8 |
+
|
9 |
+
__all__ = ["ReplaceViewOpsWithViewCopyOpsPass"]
|
10 |
+
|
11 |
+
|
12 |
+
_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: Dict[OpOverload, OpOverload] = {
|
13 |
+
torch.ops.aten._unsafe_view.default: torch.ops.aten.view_copy.default,
|
14 |
+
}
|
15 |
+
|
16 |
+
# TODO (tmanlaibaatar) remove this after https://github.com/pytorch/pytorch/pull/100749
|
17 |
+
_BLACK_LISTED_OPS: Set[OpOverloadPacket] = {
|
18 |
+
torch.ops.aten.sym_size,
|
19 |
+
torch.ops.aten.sym_stride,
|
20 |
+
torch.ops.aten.sym_numel,
|
21 |
+
}
|
22 |
+
|
23 |
+
def is_view_op(schema: torch._C.FunctionSchema) -> bool:
|
24 |
+
if len(schema.arguments) == 0:
|
25 |
+
return False
|
26 |
+
alias_info = schema.arguments[0].alias_info
|
27 |
+
return (alias_info is not None) and (not alias_info.is_write)
|
28 |
+
|
29 |
+
|
30 |
+
def get_view_copy_of_view_op(schema: torch._C.FunctionSchema) -> Optional[OpOverload]:
|
31 |
+
if is_view_op(schema) and schema.name.startswith("aten::"):
|
32 |
+
view_op_name = schema.name.split("::")[1]
|
33 |
+
view_op_overload = (
|
34 |
+
schema.overload_name
|
35 |
+
if schema.overload_name != ""
|
36 |
+
else "default"
|
37 |
+
)
|
38 |
+
view_copy_op_name = view_op_name + "_copy"
|
39 |
+
if not hasattr(torch.ops.aten, view_copy_op_name):
|
40 |
+
raise InternalError(f"{schema.name} is missing a view_copy variant")
|
41 |
+
|
42 |
+
view_copy_op_overload_packet = getattr(torch.ops.aten, view_copy_op_name)
|
43 |
+
|
44 |
+
if not hasattr(view_copy_op_overload_packet, view_op_overload):
|
45 |
+
raise InternalError(f"{schema.name} is missing a view_copy variant")
|
46 |
+
|
47 |
+
return getattr(view_copy_op_overload_packet, view_op_overload)
|
48 |
+
|
49 |
+
return None
|
50 |
+
|
51 |
+
|
52 |
+
class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBaseDeprecatedDoNotUse):
|
53 |
+
"""
|
54 |
+
Our backend expects pure functional operators. For efficiency
|
55 |
+
purposes, we keep view ops around while functionalizing the exported
|
56 |
+
program. This pass replaces view ops with view copy ops for backends that
|
57 |
+
need AOT memory planning.
|
58 |
+
"""
|
59 |
+
def call_operator(self, op, args, kwargs, meta):
|
60 |
+
if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS:
|
61 |
+
return super().call_operator(
|
62 |
+
(_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op]), args, kwargs, meta
|
63 |
+
)
|
64 |
+
|
65 |
+
if op in _BLACK_LISTED_OPS or isinstance(op, HigherOrderOperator):
|
66 |
+
return super().call_operator(op, args, kwargs, meta)
|
67 |
+
|
68 |
+
if view_copy_op := get_view_copy_of_view_op(op._schema):
|
69 |
+
return super().call_operator(view_copy_op, args, kwargs, meta)
|
70 |
+
|
71 |
+
return super().call_operator(op, args, kwargs, meta)
|
llmeval-env/lib/python3.10/site-packages/torch/_functorch/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|