Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/torch/_awaits/__init__.py +54 -0
- venv/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/autograd.py +274 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/functional.py +187 -0
- venv/lib/python3.10/site-packages/torch/_custom_op/impl.py +976 -0
- venv/lib/python3.10/site-packages/torch/_prims_common/__init__.py +1985 -0
- venv/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_prims_common/wrappers.py +401 -0
- venv/lib/python3.10/site-packages/torch/cpu/__init__.py +157 -0
- venv/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/__init__.py +2 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py +43 -0
- venv/lib/python3.10/site-packages/torch/cpu/amp/grad_scaler.py +27 -0
- venv/lib/python3.10/site-packages/torch/fft/__init__.py +1360 -0
- venv/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/linalg/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/monitor/__init__.py +37 -0
- venv/lib/python3.10/site-packages/torch/monitor/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/_deprecation.py +64 -0
- venv/lib/python3.10/site-packages/torch/onnx/_onnx_supported_ops.py +97 -0
- venv/lib/python3.10/site-packages/torch/onnx/_type_utils.py +380 -0
- venv/lib/python3.10/site-packages/torch/onnx/errors.py +106 -0
- venv/lib/python3.10/site-packages/torch/onnx/operators.py +20 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_caffe2.py +359 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_helper.py +1823 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset11.py +1650 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset13.py +1156 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset15.py +82 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset16.py +187 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset17.py +211 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset18.py +70 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset7.py +66 -0
- venv/lib/python3.10/site-packages/torch/onnx/utils.py +2121 -0
- venv/lib/python3.10/site-packages/torch/onnx/verification.py +1884 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py +1 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py +1 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7c6f7149751d2588ad60ee61c2184dc41aa60842460a6ae1d2daf16ac1d0f1b
|
3 |
+
size 16778411
|
venv/lib/python3.10/site-packages/torch/_awaits/__init__.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import cast, Callable, Generic, Type, TypeVar
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
__all__ = ['Await']
|
8 |
+
|
9 |
+
W = TypeVar("W")
|
10 |
+
|
11 |
+
class _PyAwaitMeta(type(torch._C._Await), type(Generic)): # type: ignore[misc, no-redef]
|
12 |
+
pass
|
13 |
+
|
14 |
+
class _Await(torch._C._Await, Generic[W], metaclass=_PyAwaitMeta):
|
15 |
+
r"""
|
16 |
+
Wrapper around a ``torch._C.Await`` which encapsulates delayed execution
|
17 |
+
of a callable. All manipulations happen with functions ``torch.jit._awaitable``,
|
18 |
+
``torch.jit._awaitable_wait``, ``torch.jit._awaitable_nowait``.
|
19 |
+
|
20 |
+
Torch scriptable manipulations:
|
21 |
+
``torch.jit._awaitable(func, *args)``
|
22 |
+
Creates ``Await[W]`` object, where W is return type of func.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
``torch.jit._awaitable_wait(Await[W])``
|
26 |
+
Returns the result of the function, specified at ``_awaitable``, with specified arguments.
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
The result of type ``W`` of the function call. The result is owned by ``Await[W]``
|
30 |
+
and returned on all following ``_awaitable_wait`` calls.
|
31 |
+
|
32 |
+
|
33 |
+
``torch.jit._awaitable_nowait(W)``
|
34 |
+
Returns:
|
35 |
+
Trivial ``Await[W]`` with specified result.
|
36 |
+
|
37 |
+
|
38 |
+
Only in eager mode:
|
39 |
+
``fn() -> Callable[Tuple[Any], W]``
|
40 |
+
Returns:
|
41 |
+
Specified at ``_awaitable`` python function ``func``.
|
42 |
+
|
43 |
+
``args() -> Tuple[Any]``
|
44 |
+
Returns:
|
45 |
+
Specified at ``_awaitable`` python args.
|
46 |
+
|
47 |
+
``is_nowait() -> _bool``
|
48 |
+
Returns:
|
49 |
+
``True`` if this object was created via ``_awaitable_nowait`` call (trivial `Await[W]`).
|
50 |
+
|
51 |
+
In eager mode ``Await[W]`` can be used as ``W`` i.e. attributes of W can be called on ``Await[W]``,
|
52 |
+
``_awaitable_wait()`` call will be transparently added.
|
53 |
+
"""
|
54 |
+
pass
|
venv/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.11 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_custom_op/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (184 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc
ADDED
Binary file (8.89 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc
ADDED
Binary file (5.97 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc
ADDED
Binary file (33.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_custom_op/autograd.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.utils._pytree as pytree
|
3 |
+
from collections import namedtuple
|
4 |
+
import functools
|
5 |
+
|
6 |
+
|
7 |
+
# NOTE [CustomOp autograd kernel indirection]
|
8 |
+
# We register `inner` as the autograd kernel for this custom_op.
|
9 |
+
# `inner` either calls the autograd formula registered by the user,
|
10 |
+
# or goes into an `autograd_not_implemented` kernel.
|
11 |
+
#
|
12 |
+
# The reason why this indirection exists is
|
13 |
+
# so that we can swap out the autograd kernel (the PyTorch dispatcher
|
14 |
+
# doesn't actually allow us to do this). By default, we want
|
15 |
+
# the `autograd_not_implemented` behavior, but then the user may come
|
16 |
+
# and register something that is actually a backward formula
|
17 |
+
def autograd_kernel_indirection(custom_op):
|
18 |
+
autograd_fallback = autograd_not_implemented(custom_op)
|
19 |
+
|
20 |
+
def inner(*args, **kwargs):
|
21 |
+
if custom_op._has_impl('autograd'):
|
22 |
+
kernel = custom_op._get_impl('autograd').func
|
23 |
+
return kernel(*args, **kwargs)
|
24 |
+
# As explained in NOTE ["backward", "save_for_backward", and "autograd"],
|
25 |
+
# after the user gives us "backward" and "save_for_backward", we generate
|
26 |
+
# the "autograd" impl. If the user only provided one, then we tell
|
27 |
+
# the user they've done something wrong.
|
28 |
+
if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'):
|
29 |
+
missing = (
|
30 |
+
'save_for_backward' if custom_op._has_impl('backward')
|
31 |
+
else 'backward'
|
32 |
+
)
|
33 |
+
found = 'save_for_backward' if missing == 'backward' else 'backward'
|
34 |
+
loc = custom_op._get_impl(found).location
|
35 |
+
raise RuntimeError(
|
36 |
+
f"We found a '{found}' registration for {custom_op} at "
|
37 |
+
f"{loc} but were unable to find a '{missing}' registration. "
|
38 |
+
f"To use the CustomOp API to register a backward formula, "
|
39 |
+
f"please provide us both a backward function and a "
|
40 |
+
f"'save for backward' function via `impl_backward` and "
|
41 |
+
f"`impl_save_for_backward` respectively.")
|
42 |
+
return autograd_fallback(*args, **kwargs)
|
43 |
+
return inner
|
44 |
+
|
45 |
+
|
46 |
+
# TODO(#101191): Use the actual C++ autograd not implemented fallback,
|
47 |
+
# or change the default autograd fallback to the autograd not implemented fallback.
|
48 |
+
def autograd_not_implemented(custom_op):
|
49 |
+
def kernel(*args, **kwargs):
|
50 |
+
if torch.is_grad_enabled() and pytree.tree_any(
|
51 |
+
lambda x: isinstance(x, torch.Tensor) and x.requires_grad, (args, kwargs)
|
52 |
+
):
|
53 |
+
raise RuntimeError("Autograd has not been implemented for operator")
|
54 |
+
with torch._C._AutoDispatchBelowAutograd():
|
55 |
+
return custom_op(*args, **kwargs)
|
56 |
+
return kernel
|
57 |
+
|
58 |
+
|
59 |
+
def mark_non_differentiable(ctx, output, output_differentiability):
|
60 |
+
# Output types are restricted to be:
|
61 |
+
# - Tensor
|
62 |
+
# - Tensor[]
|
63 |
+
# - int, bool, Scalar, float
|
64 |
+
# See _check_can_register_backward
|
65 |
+
if output_differentiability is not None:
|
66 |
+
if not isinstance(output, tuple):
|
67 |
+
tuple_output = (output,)
|
68 |
+
else:
|
69 |
+
tuple_output = output # type: ignore[assignment]
|
70 |
+
assert len(output_differentiability) == len(tuple_output)
|
71 |
+
non_differentiable_tensors = []
|
72 |
+
for idx, (differentiable, out) in enumerate(zip(output_differentiability, tuple_output)):
|
73 |
+
if isinstance(out, torch.Tensor):
|
74 |
+
if not differentiable:
|
75 |
+
non_differentiable_tensors.append(out)
|
76 |
+
continue
|
77 |
+
if isinstance(out, list):
|
78 |
+
if not differentiable:
|
79 |
+
non_differentiable_tensors.extend(out)
|
80 |
+
continue
|
81 |
+
if differentiable:
|
82 |
+
raise RuntimeError(
|
83 |
+
f"With output_differentiability={output_differentiability}. "
|
84 |
+
f"At idx {idx}, we received an object of type {type(out)} that "
|
85 |
+
f"is not a Tensor, so it cannot have be marked as differentiable in "
|
86 |
+
f"output_differentiability.")
|
87 |
+
if non_differentiable_tensors:
|
88 |
+
ctx.mark_non_differentiable(*non_differentiable_tensors)
|
89 |
+
|
90 |
+
|
91 |
+
def construct_autograd_kernel(
|
92 |
+
schema,
|
93 |
+
output_differentiability,
|
94 |
+
custom_op,
|
95 |
+
op_overload,
|
96 |
+
save_for_backward_fn,
|
97 |
+
backward_fn):
|
98 |
+
|
99 |
+
def apply(*args):
|
100 |
+
flat_args, spec = pytree.tree_flatten(args)
|
101 |
+
out_spec = None
|
102 |
+
|
103 |
+
def forward(ctx, *flat_args):
|
104 |
+
ctx.set_materialize_grads(True)
|
105 |
+
args = pytree.tree_unflatten(list(flat_args), spec)
|
106 |
+
with torch._C._AutoDispatchBelowAutograd():
|
107 |
+
output = op_overload(*args)
|
108 |
+
|
109 |
+
# We use the info about args to give better error messages in backward
|
110 |
+
args_info = namedtuple_args(
|
111 |
+
schema, pytree.tree_map(type, args))
|
112 |
+
|
113 |
+
save_for_backward_fn_inputs = namedtuple_args(schema, args)
|
114 |
+
to_save = save_for_backward_fn(save_for_backward_fn_inputs, output)
|
115 |
+
|
116 |
+
save_pytree_for_backward(ctx, (to_save, args_info))
|
117 |
+
mark_non_differentiable(ctx, output, output_differentiability)
|
118 |
+
|
119 |
+
nonlocal out_spec
|
120 |
+
flat_output, out_spec = pytree.tree_flatten(output)
|
121 |
+
return tuple(flat_output)
|
122 |
+
|
123 |
+
def backward(ctx, *flat_grad_output):
|
124 |
+
assert out_spec is not None
|
125 |
+
grads = pytree.tree_unflatten(list(flat_grad_output), out_spec)
|
126 |
+
saved, args_info = unpack_saved(ctx)
|
127 |
+
# There is nothing on the ctx object for now, it is just there so
|
128 |
+
# that we can add additional things in the future.
|
129 |
+
inner_ctx = object()
|
130 |
+
if not isinstance(grads, tuple):
|
131 |
+
grads = (grads,)
|
132 |
+
grad_inputs_dict = backward_fn(inner_ctx, saved, *grads)
|
133 |
+
|
134 |
+
# Massage the grad_inputs_dict to a form acceptable by
|
135 |
+
# autograd.Function.
|
136 |
+
validate_grad_inputs_dict(grad_inputs_dict, custom_op, args_info)
|
137 |
+
return grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info)
|
138 |
+
|
139 |
+
generated_cls = gen_autograd_function(
|
140 |
+
custom_op._opname + '_customop', forward, backward)
|
141 |
+
|
142 |
+
flat_output = generated_cls.apply(*flat_args)
|
143 |
+
assert out_spec is not None
|
144 |
+
return pytree.tree_unflatten(list(flat_output), out_spec)
|
145 |
+
return apply
|
146 |
+
|
147 |
+
|
148 |
+
def gen_autograd_function(name, forward, backward):
|
149 |
+
generated_cls = type(
|
150 |
+
name,
|
151 |
+
(torch.autograd.Function,),
|
152 |
+
{
|
153 |
+
'forward': staticmethod(forward),
|
154 |
+
'backward': staticmethod(backward),
|
155 |
+
}
|
156 |
+
)
|
157 |
+
return generated_cls
|
158 |
+
|
159 |
+
|
160 |
+
@functools.lru_cache
|
161 |
+
def namedtuple_args_cls(schema):
|
162 |
+
attribs = [arg.name for arg in schema.arguments.flat_all]
|
163 |
+
name = str(schema.name) + "_args"
|
164 |
+
# mypy doesn't support dynamic namedtuple name
|
165 |
+
tuple_cls = namedtuple(name, attribs) # type: ignore[misc]
|
166 |
+
return tuple_cls
|
167 |
+
|
168 |
+
|
169 |
+
def namedtuple_args(schema, args):
|
170 |
+
assert isinstance(args, tuple)
|
171 |
+
tuple_cls = namedtuple_args_cls(schema)
|
172 |
+
return tuple_cls(*args)
|
173 |
+
|
174 |
+
|
175 |
+
def validate_grad_inputs_dict(grad_inputs_dict, forward_op, args_info):
|
176 |
+
def error(what):
|
177 |
+
backward = forward_op._get_impl('backward')
|
178 |
+
raise RuntimeError(
|
179 |
+
f"In the backward function defined for {forward_op} at "
|
180 |
+
f"{backward.location} using the CustomOp API, {what}")
|
181 |
+
|
182 |
+
if not isinstance(grad_inputs_dict, dict):
|
183 |
+
error(f"expected the output of the backward function to be a dict but "
|
184 |
+
f"got {type(grad_inputs_dict)}")
|
185 |
+
|
186 |
+
expected_keys = {arg.name for arg in forward_op._schema.arguments.flat_all
|
187 |
+
if arg.type.is_tensor_like()}
|
188 |
+
actual_keys = grad_inputs_dict.keys()
|
189 |
+
if expected_keys != actual_keys:
|
190 |
+
error(f"expected the returned grad_input dict to have keys "
|
191 |
+
f"{expected_keys} but got {actual_keys}. The backward "
|
192 |
+
f"function must return a gradient (can be None) for each arg "
|
193 |
+
f"to the CustomOp that may be a Tensor or Sequence[Tensor]. "
|
194 |
+
f"Args declared to be non-Tensor-like types should not appear "
|
195 |
+
f"in the grad_input dict")
|
196 |
+
|
197 |
+
for name, grad in grad_inputs_dict.items():
|
198 |
+
arg_info = getattr(args_info, name)
|
199 |
+
|
200 |
+
if isinstance(arg_info, list):
|
201 |
+
if not isinstance(grad, (tuple, list)):
|
202 |
+
error(f"for input '{name}' expected the grad_input dict to "
|
203 |
+
f"hold a list of gradients but got object of type "
|
204 |
+
f"{type(grad)}.")
|
205 |
+
if not len(grad) == len(arg_info):
|
206 |
+
error(f"for input '{name}' expected the grad_input dict to "
|
207 |
+
f"hold a list of {len(arg_info)} gradients but got "
|
208 |
+
f"{len(grad)}")
|
209 |
+
for idx, (g, info) in enumerate(zip(grad, arg_info)):
|
210 |
+
if g is None:
|
211 |
+
continue
|
212 |
+
if not isinstance(g, torch.Tensor):
|
213 |
+
error(f"for input '{name}' expected the grad_input dict to "
|
214 |
+
f"hold a list of None or Tensor gradients but got "
|
215 |
+
f"object of {type(g)} at index {idx}")
|
216 |
+
if not issubclass(info, torch.Tensor):
|
217 |
+
error(f"for input '{name}', got a Tensor as the gradient "
|
218 |
+
f"for the {idx}-th value but expected None because "
|
219 |
+
f"the {idx}-th value was not a Tensor (it was "
|
220 |
+
f"type {arg_info}")
|
221 |
+
continue
|
222 |
+
|
223 |
+
if grad is None:
|
224 |
+
continue
|
225 |
+
if not isinstance(grad, torch.Tensor):
|
226 |
+
error(f"got object of type {type(grad)} as the gradient for input "
|
227 |
+
f"'{name}', "
|
228 |
+
f"but expected the gradient to be either None or a Tensor")
|
229 |
+
if not issubclass(arg_info, torch.Tensor):
|
230 |
+
error(f"got a Tensor as the gradient for input '{name}' but "
|
231 |
+
f"expected None as the gradient because input '{name}' "
|
232 |
+
f"was not a Tensor (it was type {arg_info}).")
|
233 |
+
|
234 |
+
|
235 |
+
def grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info):
|
236 |
+
result = []
|
237 |
+
for name, arg_info in args_info._asdict().items():
|
238 |
+
if name not in grad_inputs_dict:
|
239 |
+
result.append(pytree.tree_map(lambda x: None, arg_info))
|
240 |
+
continue
|
241 |
+
result.append(grad_inputs_dict[name])
|
242 |
+
return tuple(pytree.tree_leaves(result))
|
243 |
+
|
244 |
+
# Saves "stuff" (a pytree) onto the ctx object. Use unpack_saved to unpack it.
|
245 |
+
# autograd.Function prefers that users use ctx.save_for_backward to
|
246 |
+
# save Tensors (to avoid reference cycles) and for non-Tensors to go onto the
|
247 |
+
# ctx object.
|
248 |
+
def save_pytree_for_backward(ctx, stuff):
|
249 |
+
flat_stuff, spec = pytree.tree_flatten(stuff)
|
250 |
+
num_elts = len(flat_stuff)
|
251 |
+
tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
|
252 |
+
if isinstance(thing, torch.Tensor)]
|
253 |
+
non_tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
|
254 |
+
if not isinstance(thing, torch.Tensor)]
|
255 |
+
tensors = [thing for thing in flat_stuff if isinstance(thing, torch.Tensor)]
|
256 |
+
non_tensors = [thing for thing in flat_stuff if not isinstance(thing, torch.Tensor)]
|
257 |
+
|
258 |
+
ctx.spec = spec
|
259 |
+
ctx.num_elts = num_elts
|
260 |
+
ctx.save_for_backward(*tensors)
|
261 |
+
ctx.tensor_idxs = tensor_idxs
|
262 |
+
ctx.saved_non_tensors = non_tensors
|
263 |
+
ctx.non_tensor_idxs = non_tensor_idxs
|
264 |
+
|
265 |
+
|
266 |
+
# Inverse operation to save_pytree_for_backward
|
267 |
+
def unpack_saved(ctx):
|
268 |
+
flat_stuff = [None] * ctx.num_elts
|
269 |
+
for tensor, idx in zip(ctx.saved_tensors, ctx.tensor_idxs):
|
270 |
+
flat_stuff[idx] = tensor
|
271 |
+
for non_tensor, idx in zip(ctx.saved_non_tensors, ctx.non_tensor_idxs):
|
272 |
+
flat_stuff[idx] = non_tensor
|
273 |
+
stuff = pytree.tree_unflatten(flat_stuff, ctx.spec)
|
274 |
+
return stuff
|
venv/lib/python3.10/site-packages/torch/_custom_op/functional.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import weakref
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.utils._pytree as pytree
|
5 |
+
from torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet
|
6 |
+
from torch._ops import OpOverload
|
7 |
+
from torch.library import Library
|
8 |
+
from torchgen.model import (
|
9 |
+
BaseTy,
|
10 |
+
BaseType,
|
11 |
+
FunctionSchema,
|
12 |
+
OperatorName,
|
13 |
+
OptionalType,
|
14 |
+
SchemaKind,
|
15 |
+
)
|
16 |
+
|
17 |
+
from .autograd import autograd_not_implemented
|
18 |
+
|
19 |
+
|
20 |
+
def register_functional_op(
|
21 |
+
lib: Library,
|
22 |
+
new_op_name: str,
|
23 |
+
mutable_op: OpOverload,
|
24 |
+
) -> None:
|
25 |
+
"""Given a mutable operator, registers the functional variant.
|
26 |
+
|
27 |
+
This API also correctly links the functional variant with the mutable
|
28 |
+
operator for the purposes of functionalization.
|
29 |
+
|
30 |
+
All of the new registrations are performed on the ``lib`` passed in.
|
31 |
+
|
32 |
+
Arguments:
|
33 |
+
lib (Library): Should be a torch.library.Library object that has
|
34 |
+
the same namespace as ``mutable_op``'s namespace.
|
35 |
+
lib will be used to register the new functional op as well
|
36 |
+
as a functionalization kernel for the ``mutable_op``
|
37 |
+
If you don't have a library handy, use
|
38 |
+
``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
|
39 |
+
new_op_name (str): The name of the functional operator (without the
|
40 |
+
namespace). If no namespace, the new functional variant will be
|
41 |
+
accessible under ``torch.ops.{lib.ns}.new_op_name``.
|
42 |
+
mutable_op (OpOverload): The mutable custom operator. Note
|
43 |
+
that you may need to add a `.default` to it, like
|
44 |
+
`torch.ops.aten.abs_.default`.
|
45 |
+
|
46 |
+
"""
|
47 |
+
validate(mutable_op)
|
48 |
+
schema = functional_schema(new_op_name, mutable_op)
|
49 |
+
lib.define(schema)
|
50 |
+
|
51 |
+
functional_impl = construct_functional_impl(mutable_op)
|
52 |
+
lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd')
|
53 |
+
|
54 |
+
functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default
|
55 |
+
|
56 |
+
# There's no easy way for us to generate the autograd kernel, so we
|
57 |
+
# use autograd_not_implemented. Also, this makes it so that the user
|
58 |
+
# is unable to register an autograd formula themselves. This shouldn't
|
59 |
+
# be a problem if the user doesn't use the functional op direclty
|
60 |
+
# in their program, but we may need to revist this in the future.
|
61 |
+
lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd')
|
62 |
+
|
63 |
+
f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op)
|
64 |
+
|
65 |
+
lib.impl(mutable_op, f_kernel, 'Functionalize')
|
66 |
+
|
67 |
+
|
68 |
+
def construct_functional_impl(mutable_op):
|
69 |
+
def functional_impl(*args):
|
70 |
+
# Strategy:
|
71 |
+
# - clone args that would have been mutated
|
72 |
+
# - run mutable_op
|
73 |
+
# - return the cloned args as additional outputs
|
74 |
+
new_args = []
|
75 |
+
extra_rets = []
|
76 |
+
for is_write, arg in zip(mutable_args(mutable_op), args):
|
77 |
+
if is_write:
|
78 |
+
cloned = arg.clone() if arg is not None else None
|
79 |
+
new_args.append(cloned)
|
80 |
+
extra_rets.append(cloned)
|
81 |
+
else:
|
82 |
+
new_args.append(arg)
|
83 |
+
result = mutable_op(*new_args)
|
84 |
+
if result is None:
|
85 |
+
return tuple(extra_rets)
|
86 |
+
if isinstance(result, tuple):
|
87 |
+
return (*result, *extra_rets)
|
88 |
+
return (result, *extra_rets)
|
89 |
+
return functional_impl
|
90 |
+
|
91 |
+
|
92 |
+
def construct_functionalization_kernel(mutable_op, functional_op):
|
93 |
+
def kernel(*args):
|
94 |
+
# There's nothing to be functionalized!
|
95 |
+
# We can still end up here because DispatchKey::Functionalize is a mode key
|
96 |
+
if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args):
|
97 |
+
with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
|
98 |
+
return mutable_op(*args)
|
99 |
+
|
100 |
+
# NB: This differs from the codegen -- codegen handles cases where there
|
101 |
+
# are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper.
|
102 |
+
# This only really matters for XLA (mixed CPU-XLA tensors) and
|
103 |
+
# running functionalization without the PT2 stack (which guarantees to us that
|
104 |
+
# all tensors are FunctionalTensorWrapper).
|
105 |
+
if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args):
|
106 |
+
raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper")
|
107 |
+
|
108 |
+
unwrapped_args = []
|
109 |
+
for arg in args:
|
110 |
+
if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg):
|
111 |
+
torch._sync(arg)
|
112 |
+
unwrapped = torch._from_functional_tensor(arg)
|
113 |
+
unwrapped_args.append(unwrapped)
|
114 |
+
else:
|
115 |
+
unwrapped_args.append(arg)
|
116 |
+
|
117 |
+
with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
|
118 |
+
output = functional_op(*unwrapped_args)
|
119 |
+
|
120 |
+
num_actual_output = len(mutable_op._schema.returns)
|
121 |
+
actual_output = pytree.tree_map(
|
122 |
+
torch._to_functional_tensor, output[:num_actual_output])
|
123 |
+
|
124 |
+
new_values_to_propagate = output[num_actual_output:]
|
125 |
+
inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args)
|
126 |
+
if is_write]
|
127 |
+
assert len(new_values_to_propagate) == len(inputs_to_replace)
|
128 |
+
for new_value, arg in zip(new_values_to_propagate, inputs_to_replace):
|
129 |
+
if (arg is None and new_value is None) or (arg is not None and new_value is not None):
|
130 |
+
continue
|
131 |
+
torch._C._propagate_xla_data(arg, new_value)
|
132 |
+
torch._C._replace_(arg, new_value)
|
133 |
+
torch._C._commit_update(arg)
|
134 |
+
torch._sync(arg)
|
135 |
+
|
136 |
+
if len(actual_output) == 1:
|
137 |
+
return actual_output[0]
|
138 |
+
elif len(actual_output) == 0:
|
139 |
+
return None
|
140 |
+
return actual_output
|
141 |
+
|
142 |
+
return kernel
|
143 |
+
|
144 |
+
|
145 |
+
def validate(mutable_op: OpOverload):
|
146 |
+
if not isinstance(mutable_op, OpOverload):
|
147 |
+
raise TypeError(
|
148 |
+
f"register_functional_op(mutable_op): expected mutable_op to be instance of "
|
149 |
+
f"OpOverload but got {type(mutable_op)}")
|
150 |
+
|
151 |
+
# There are generally three types of "in-place" or "mutable" ops.
|
152 |
+
# Each of them have their own conventions:
|
153 |
+
# - inplace (first input modified in-place and returned as only output)
|
154 |
+
# - out= (some args modified in-place and returned as outputs)
|
155 |
+
# - mutable (some args modified in-place but none of those returned as outputs)
|
156 |
+
# In theory we can support all three, but we'll just support the last
|
157 |
+
# option right now for simplicity.
|
158 |
+
schema = FunctionSchema.parse(str(mutable_op._schema))
|
159 |
+
if not schema.kind() == SchemaKind.mutable:
|
160 |
+
raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)")
|
161 |
+
for ret in schema.returns:
|
162 |
+
# construct_functionalization_kernel assumes this for simplicity
|
163 |
+
if ret.annotation is not None:
|
164 |
+
raise NotImplementedError(
|
165 |
+
"NYI: register_functional_op(op) where op returns a mutated or aliased value. "
|
166 |
+
"Please file an issue (and as a workaround, modify your operator to "
|
167 |
+
"not return the mutated value or aliases)")
|
168 |
+
for arg in schema.arguments.flat_all:
|
169 |
+
# construct_functionalization_kernel assumes this for simplicity
|
170 |
+
if arg.type.is_tensor_like() and (
|
171 |
+
arg.type != BaseType(BaseTy.Tensor)
|
172 |
+
and arg.type != OptionalType(BaseType(BaseTy.Tensor))
|
173 |
+
):
|
174 |
+
raise NotImplementedError(
|
175 |
+
"NYI: register_functional_op(op) where op has a List[Tensor] input."
|
176 |
+
"Please file an issue.")
|
177 |
+
|
178 |
+
|
179 |
+
def functional_schema(new_op_name, op: OpOverload):
|
180 |
+
schema = FunctionSchema.parse(str(op._schema))
|
181 |
+
schema = schema.signature().with_name(OperatorName.parse(new_op_name))
|
182 |
+
return str(schema)
|
183 |
+
|
184 |
+
|
185 |
+
def mutable_args(op: OpOverload):
|
186 |
+
return tuple(False if arg.alias_info is None else arg.alias_info.is_write
|
187 |
+
for arg in op._schema.arguments)
|
venv/lib/python3.10/site-packages/torch/_custom_op/impl.py
ADDED
@@ -0,0 +1,976 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
import functools
|
3 |
+
import inspect
|
4 |
+
import sys
|
5 |
+
import typing
|
6 |
+
import weakref
|
7 |
+
|
8 |
+
from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch._C as _C
|
12 |
+
import torch.library as library
|
13 |
+
from torch._library.abstract_impl import AbstractImplCtx
|
14 |
+
from torch.library import get_ctx
|
15 |
+
|
16 |
+
from .autograd import autograd_kernel_indirection, construct_autograd_kernel
|
17 |
+
|
18 |
+
"""
|
19 |
+
For a detailed guide on custom ops, please see
|
20 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
21 |
+
|
22 |
+
This file includes pieces of the implementation of our custom operator API.
|
23 |
+
"""
|
24 |
+
|
25 |
+
__all__ = ["custom_op", "CustomOp", "get_ctx", "AbstractImplCtx"]
|
26 |
+
|
27 |
+
|
28 |
+
SUPPORTED_DEVICE_TYPE_TO_KEY = {
|
29 |
+
"cpu": "CPU",
|
30 |
+
"cuda": "CUDA",
|
31 |
+
}
|
32 |
+
|
33 |
+
# We will not let users register CustomOps with anything that could look like
|
34 |
+
# PyTorch internals to avoid confusion.
|
35 |
+
RESERVED_NS = {
|
36 |
+
"prim",
|
37 |
+
"prims",
|
38 |
+
"aten",
|
39 |
+
"at",
|
40 |
+
"torch",
|
41 |
+
"pytorch",
|
42 |
+
}
|
43 |
+
|
44 |
+
|
45 |
+
def custom_op(
|
46 |
+
qualname: str, manual_schema: typing.Optional[str] = None
|
47 |
+
) -> typing.Callable:
|
48 |
+
r"""Creates a new CustomOp object.
|
49 |
+
|
50 |
+
WARNING: if you're a user, please do not use this directly
|
51 |
+
(instead use the torch._custom_ops APIs).
|
52 |
+
Also please see the following for a detailed guide on custom ops.
|
53 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
54 |
+
|
55 |
+
In PyTorch, defining an op (short for "operator") is a two step-process:
|
56 |
+
- we need to define (create) the op
|
57 |
+
- we need to implement behavior for how the operator interacts with
|
58 |
+
various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
|
59 |
+
|
60 |
+
This entrypoint defines the CustomOp object (the first step);
|
61 |
+
you must then perform the second step by calling various methods on
|
62 |
+
the CustomOp object.
|
63 |
+
|
64 |
+
This API is used as a decorator (see examples).
|
65 |
+
|
66 |
+
Arguments:
|
67 |
+
qualname (str): Should be a string that looks like
|
68 |
+
"namespace::operator_name". Operators in PyTorch need a namespace to
|
69 |
+
avoid name collisions; a given operator may only be created once.
|
70 |
+
If you are writing a Python library, we recommend the namespace to
|
71 |
+
be the name of your top-level module. The operator_name must be
|
72 |
+
the same as the name of the function you pass to custom_op
|
73 |
+
(see examples).
|
74 |
+
manual_schema (Optional[str]): Each PyTorch operator needs a schema that
|
75 |
+
tells PyTorch the types of the inputs/outputs. If None (default),
|
76 |
+
we will infer the schema from the type annotations on the function
|
77 |
+
(see examples). Otherwise, if you don't want to use type annotations,
|
78 |
+
you may provide us the schema string.
|
79 |
+
|
80 |
+
Example::
|
81 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
82 |
+
>>> import numpy as np
|
83 |
+
>>> from torch import Tensor
|
84 |
+
>>>
|
85 |
+
>>> # Step 1: define the CustomOp.
|
86 |
+
>>> # We need to provide the decorator a "prototype function"
|
87 |
+
>>> # (a function with Python ellipses as the body).
|
88 |
+
>>> @custom_op("my_library::numpy_sin")
|
89 |
+
>>> def numpy_sin(x: Tensor) -> Tensor:
|
90 |
+
>>> ...
|
91 |
+
>>>
|
92 |
+
>>> # numpy_sin is now an instance of class CustomOp
|
93 |
+
>>> print(type(numpy_sin))
|
94 |
+
>>>
|
95 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
96 |
+
>>>
|
97 |
+
>>> # Register an implementation for CPU tensors
|
98 |
+
>>> @numpy_sin.impl('cpu')
|
99 |
+
>>> def numpy_sin_impl_cpu(x):
|
100 |
+
>>> return torch.from_numpy(np.sin(x.numpy()))
|
101 |
+
>>>
|
102 |
+
>>> # Register an implementation for CUDA tensors
|
103 |
+
>>> @numpy_sin.impl('cuda')
|
104 |
+
>>> def numpy_sin_impl_cuda(x):
|
105 |
+
>>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
|
106 |
+
>>>
|
107 |
+
>>> x = torch.randn(3)
|
108 |
+
>>> numpy_sin(x) # calls numpy_sin_impl_cpu
|
109 |
+
>>>
|
110 |
+
>>> x_cuda = x.cuda()
|
111 |
+
>>> numpy_sin(x) # calls numpy_sin_impl_cuda
|
112 |
+
|
113 |
+
"""
|
114 |
+
|
115 |
+
def inner(func):
|
116 |
+
if not inspect.isfunction(func):
|
117 |
+
raise ValueError(
|
118 |
+
f"custom_op(...)(func): Expected `func` to be a Python "
|
119 |
+
f"function, got: {type(func)}"
|
120 |
+
)
|
121 |
+
|
122 |
+
ns, name = parse_qualname(qualname)
|
123 |
+
validate_namespace(ns)
|
124 |
+
if func.__name__ != name:
|
125 |
+
raise ValueError(
|
126 |
+
f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
|
127 |
+
f"to have name '{name}' but got '{func.__name__}'. "
|
128 |
+
f"Please either change the name of `func` or the qualname that "
|
129 |
+
f"is passed to `custom_op`"
|
130 |
+
)
|
131 |
+
|
132 |
+
schema = infer_schema(func) if manual_schema is None else manual_schema
|
133 |
+
schema_str = f"{name}{schema}"
|
134 |
+
function_schema = FunctionSchema.parse(schema_str)
|
135 |
+
validate_schema(function_schema)
|
136 |
+
if manual_schema is not None:
|
137 |
+
validate_function_matches_schema(function_schema, func)
|
138 |
+
|
139 |
+
lib = library.Library(ns, "FRAGMENT")
|
140 |
+
lib.define(schema_str)
|
141 |
+
ophandle = find_ophandle_or_throw(ns, function_schema.name)
|
142 |
+
result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
|
143 |
+
|
144 |
+
result.__name__ = func.__name__
|
145 |
+
result.__module__ = func.__module__
|
146 |
+
result.__doc__ = func.__doc__
|
147 |
+
|
148 |
+
library.impl(lib, result._opname, "Autograd")(
|
149 |
+
autograd_kernel_indirection(weakref.proxy(result))
|
150 |
+
)
|
151 |
+
|
152 |
+
torch._C._dispatch_set_report_error_callback(
|
153 |
+
ophandle, functools.partial(report_error_callback, weakref.proxy(result))
|
154 |
+
)
|
155 |
+
|
156 |
+
return result
|
157 |
+
|
158 |
+
return inner
|
159 |
+
|
160 |
+
|
161 |
+
# Global dictionary holding references to all CustomOp objects
|
162 |
+
# Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime])
|
163 |
+
# Used to query the CustomOp associated with a specific C++ dispatcher operator.
|
164 |
+
# An example usage is FakeTensor: FakeTensor checks if a specific operator
|
165 |
+
# has an implementation registered via the CustomOp API.
|
166 |
+
# Indexed by qualname (e.g. aten::foo)
|
167 |
+
global_registry: typing.Dict[str, "CustomOp"] = {}
|
168 |
+
|
169 |
+
|
170 |
+
class CustomOp:
|
171 |
+
r"""Class for custom operators in PyTorch.
|
172 |
+
|
173 |
+
Use the CustomOp API to create user-defined custom operators that behave
|
174 |
+
just like regular PyTorch operators (e.g. torch.sin, torch.mm) when it
|
175 |
+
comes to various PyTorch subsystems (like torch.compile).
|
176 |
+
|
177 |
+
To construct a `CustomOp`, use `custom_op`.
|
178 |
+
"""
|
179 |
+
|
180 |
+
def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False):
|
181 |
+
super().__init__()
|
182 |
+
if not _private_access:
|
183 |
+
raise RuntimeError(
|
184 |
+
"The CustomOp constructor is private and we do not guarantee "
|
185 |
+
"BC for it. Please use custom_op(...) to create a CustomOp object"
|
186 |
+
)
|
187 |
+
name = f"{cpp_ns}::{operator_name}"
|
188 |
+
self._schema = schema
|
189 |
+
self._cpp_ns = cpp_ns
|
190 |
+
self._lib: library.Library = lib
|
191 |
+
self._ophandle: _C._DispatchOperatorHandle = ophandle
|
192 |
+
# Has the name of the op, e.g. "foo". We cache here for convenience.
|
193 |
+
self._opname: str = operator_name
|
194 |
+
# this is _opname but with namespace. e.g. "custom::foo"
|
195 |
+
self._qualname: str = name
|
196 |
+
self.__name__ = None # mypy requires this
|
197 |
+
# NB: Some of these impls are registered as kernels to DispatchKeys.
|
198 |
+
# Modifying the _impls dict directly won't do anything in that case.
|
199 |
+
self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {}
|
200 |
+
# See NOTE [CustomOp autograd kernel indirection]
|
201 |
+
self._registered_autograd_kernel_indirection = False
|
202 |
+
|
203 |
+
global_registry[self._qualname] = self
|
204 |
+
|
205 |
+
def _register_autograd_kernel_indirection(self):
|
206 |
+
assert not self._registered_autograd_kernel_indirection
|
207 |
+
self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd")
|
208 |
+
self._registered_autograd_kernel_indirection = True
|
209 |
+
|
210 |
+
# Records the impl and the source location in self._impls
|
211 |
+
# Note that this doesn't cause torch.library to use the impl, that
|
212 |
+
# needs to be done in a separate self._lib.impl call.
|
213 |
+
def _register_impl(self, kind, func, stacklevel=2):
|
214 |
+
if self._has_impl(kind):
|
215 |
+
func_and_location = self._impls[kind]
|
216 |
+
assert func_and_location is not None # Pacify mypy
|
217 |
+
location = func_and_location.location
|
218 |
+
raise RuntimeError(
|
219 |
+
f"Attempting to register a {kind} impl for operator {self._qualname} "
|
220 |
+
f"that already has a {kind} impl registered from Python at "
|
221 |
+
f"{location}. This is not supported."
|
222 |
+
)
|
223 |
+
frame = inspect.getframeinfo(sys._getframe(stacklevel))
|
224 |
+
location = f"{frame.filename}:{frame.lineno}"
|
225 |
+
self._impls[kind] = FuncAndLocation(func, location)
|
226 |
+
|
227 |
+
def _get_impl(self, kind):
|
228 |
+
return self._impls[kind]
|
229 |
+
|
230 |
+
def _has_impl(self, kind):
|
231 |
+
return kind in self._impls
|
232 |
+
|
233 |
+
def _destroy(self):
|
234 |
+
# NOTE: [CustomOp lifetime]
|
235 |
+
# A CustomOp, once created, lives forever. The mechanism is that the
|
236 |
+
# global registry holds a reference to it. However, to make testing
|
237 |
+
# easier, we want to be able to destroy CustomOp objects.
|
238 |
+
# CustomOp._destroy does the job, though it leaves the CustomOp
|
239 |
+
# in a garbage state.
|
240 |
+
del self._lib
|
241 |
+
|
242 |
+
opnamespace = getattr(torch.ops, self._cpp_ns)
|
243 |
+
if hasattr(opnamespace, self._opname):
|
244 |
+
delattr(opnamespace, self._opname)
|
245 |
+
|
246 |
+
del global_registry[self._qualname]
|
247 |
+
|
248 |
+
def __repr__(self):
|
249 |
+
return f'<CustomOp(op="{self._qualname}")>'
|
250 |
+
|
251 |
+
def __call__(self, *args, **kwargs):
|
252 |
+
# Bypass torch.ops.* and directly do OperatorHandle::callBoxed.
|
253 |
+
# Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime
|
254 |
+
# issues from caching operators that make testing CustomOp difficult).
|
255 |
+
result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs)
|
256 |
+
return result
|
257 |
+
|
258 |
+
def impl(
|
259 |
+
self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2,
|
260 |
+
) -> typing.Callable:
|
261 |
+
r"""Register an implementation for a device type for this CustomOp object.
|
262 |
+
|
263 |
+
WARNING: if you're a user, please do not use this directly
|
264 |
+
(instead use the torch._custom_ops APIs).
|
265 |
+
Also please see the following for a detailed guide on custom ops.
|
266 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
267 |
+
|
268 |
+
If the CustomOp is passed multiple Tensor inputs with different device
|
269 |
+
types, it will dispatch to the registered implementation for the highest
|
270 |
+
priority device type among those present.
|
271 |
+
The supported device types, in order of priority, are {'cuda', 'cpu'}.
|
272 |
+
|
273 |
+
This API is used as a decorator (see examples).
|
274 |
+
|
275 |
+
Arguments:
|
276 |
+
device_types (str or Iterable[str]): the device type(s) to register the function for.
|
277 |
+
|
278 |
+
Examples::
|
279 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
280 |
+
>>> import numpy as np
|
281 |
+
>>> from torch import Tensor
|
282 |
+
>>>
|
283 |
+
>>> @custom_op("my_library::numpy_cos")
|
284 |
+
>>> def numpy_cos(x: Tensor) -> Tensor:
|
285 |
+
>>> ...
|
286 |
+
>>>
|
287 |
+
>>> # Register an implementation for CPU Tensors
|
288 |
+
>>> @numpy_cos.impl('cpu')
|
289 |
+
>>> def numpy_cos_impl_cpu(x):
|
290 |
+
>>> return torch.from_numpy(np.cos(x.numpy()))
|
291 |
+
>>>
|
292 |
+
>>> # Register an implementation for CUDA Tensors
|
293 |
+
>>> @numpy_cos.impl('cuda')
|
294 |
+
>>> def numpy_cos_impl_cuda(x):
|
295 |
+
>>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
|
296 |
+
>>>
|
297 |
+
>>> x = torch.randn(3)
|
298 |
+
>>> numpy_cos(x) # calls numpy_cos_impl_cpu
|
299 |
+
>>>
|
300 |
+
>>> x_cuda = x.cuda()
|
301 |
+
>>> numpy_cos(x) # calls numpy_cos_impl_cuda
|
302 |
+
|
303 |
+
"""
|
304 |
+
if isinstance(device_types, str):
|
305 |
+
device_types = [device_types]
|
306 |
+
for device_type in device_types:
|
307 |
+
validate_device_type(device_type)
|
308 |
+
|
309 |
+
def inner(f):
|
310 |
+
for device_type in set(device_types):
|
311 |
+
self._check_doesnt_have_library_impl(device_type)
|
312 |
+
self._register_impl(device_type, f, stacklevel=_stacklevel)
|
313 |
+
dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
|
314 |
+
library.impl(self._lib, self._opname, dispatch_key)(f)
|
315 |
+
return f
|
316 |
+
|
317 |
+
return inner
|
318 |
+
|
319 |
+
def _check_doesnt_have_library_impl(self, device_type):
|
320 |
+
if self._has_impl(device_type):
|
321 |
+
return
|
322 |
+
key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
|
323 |
+
if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key):
|
324 |
+
raise RuntimeError(
|
325 |
+
f"impl(..., device_types={device_type}): the operator {self._qualname} "
|
326 |
+
f"already has an implementation for this device type via a "
|
327 |
+
f"pre-existing torch.library or TORCH_LIBRARY registration.")
|
328 |
+
|
329 |
+
def impl_factory(self) -> typing.Callable:
|
330 |
+
r"""Register an implementation for a factory function."""
|
331 |
+
|
332 |
+
def inner(f):
|
333 |
+
self._register_impl("factory", f)
|
334 |
+
library.impl(self._lib, self._opname, "BackendSelect")(f)
|
335 |
+
return f
|
336 |
+
|
337 |
+
return inner
|
338 |
+
|
339 |
+
def impl_abstract(self, _stacklevel=2) -> typing.Callable:
|
340 |
+
r"""Register an abstract implementation for this operator.
|
341 |
+
|
342 |
+
WARNING: please do not use this directly (and instead use the torch._custom_ops
|
343 |
+
APIs). Also please see the following for a detailed guide on custom ops.
|
344 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
345 |
+
|
346 |
+
An "abstract implementation" specifies the behavior of this operator on
|
347 |
+
Tensors that carry no data. Given some input Tensors with certain properties
|
348 |
+
(sizes/strides/storage_offset/device), it specifies what the properties of
|
349 |
+
the output Tensors are.
|
350 |
+
|
351 |
+
The abstract implementation has the same signature as the operator.
|
352 |
+
It is run for both FakeTensors and meta tensors. To write an abstract
|
353 |
+
implementation, assume that all Tensor inputs to the operator are
|
354 |
+
regular CPU/CUDA/Meta tensors, but they do not have storage, and
|
355 |
+
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
|
356 |
+
The abstract implementation must consist of only PyTorch operations
|
357 |
+
(and may not directly access the storage or data of any input or
|
358 |
+
intermediate Tensors).
|
359 |
+
|
360 |
+
This API is used as a decorator (see examples).
|
361 |
+
|
362 |
+
Examples::
|
363 |
+
>>> import numpy as np
|
364 |
+
>>> from torch import Tensor
|
365 |
+
>>>
|
366 |
+
>>> # Example 1: an operator without data-dependent output shape
|
367 |
+
>>> @custom_op('my_library::custom_linear')
|
368 |
+
>>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
|
369 |
+
>>> ...
|
370 |
+
>>>
|
371 |
+
>>> @custom_linear.impl_abstract()
|
372 |
+
>>> def custom_linear_abstract(x, weight):
|
373 |
+
>>> assert x.dim() == 2
|
374 |
+
>>> assert weight.dim() == 2
|
375 |
+
>>> assert bias.dim() == 1
|
376 |
+
>>> assert x.shape[1] == weight.shape[1]
|
377 |
+
>>> assert weight.shape[0] == bias.shape[0]
|
378 |
+
>>> assert x.device == weight.device
|
379 |
+
>>>
|
380 |
+
>>> return (x @ weight.t()) + bias
|
381 |
+
>>>
|
382 |
+
>>> # Example 2: an operator with data-dependent output shape
|
383 |
+
>>> @custom_op('my_library::custom_nonzero')
|
384 |
+
>>> def custom_nonzero(x: Tensor) -> Tensor:
|
385 |
+
>>> ...
|
386 |
+
>>>
|
387 |
+
>>> @custom_nonzero.impl_abstract()
|
388 |
+
>>> def custom_nonzero_abstract(x):
|
389 |
+
>>> # Number of nonzero-elements is data-dependent.
|
390 |
+
>>> # Since we cannot peek at the data in an abstract impl,
|
391 |
+
>>> # we use the ctx object to construct a new symint that
|
392 |
+
>>> # represents the data-dependent size.
|
393 |
+
>>> ctx = torch._custom_op.get_ctx()
|
394 |
+
>>> nnz = ctx.create_unbacked_symint()
|
395 |
+
>>> shape = [x.dim(), nnz]
|
396 |
+
>>> result = x.new_empty(shape, dtype=torch.long)
|
397 |
+
>>> return result
|
398 |
+
>>>
|
399 |
+
>>> @custom_nonzero.impl(['cpu', 'cuda'])
|
400 |
+
>>> def custom_nonzero_impl(x):
|
401 |
+
>>> x_np = to_numpy(x)
|
402 |
+
>>> res = np.stack(np.nonzero(x_np), axis=1)
|
403 |
+
>>> # unbacked symbolic ints in PyTorch must be >= 2, so we
|
404 |
+
>>> # constrain the range to at least 2
|
405 |
+
>>> if res.shape[0] <= 1:
|
406 |
+
>>> raise RuntimeError("not supported")
|
407 |
+
>>> return torch.tensor(res, device=x.device)
|
408 |
+
|
409 |
+
"""
|
410 |
+
|
411 |
+
def inner(f):
|
412 |
+
self._check_doesnt_have_library_meta_impl()
|
413 |
+
self._register_impl("abstract", f, stacklevel=_stacklevel)
|
414 |
+
location = self._get_impl("abstract").location
|
415 |
+
|
416 |
+
qualname = self._qualname
|
417 |
+
|
418 |
+
# Handle DispatchKey.Meta registration
|
419 |
+
@functools.wraps(f)
|
420 |
+
def f_with_ctx(*args, **kwargs):
|
421 |
+
def error_on_ctx():
|
422 |
+
raise RuntimeError(
|
423 |
+
f"Attempted to call get_ctx() for the meta implementation "
|
424 |
+
f"for {qualname}."
|
425 |
+
f"You have presumably called get_ctx() because the operator "
|
426 |
+
f"has a data-dependent output shape; if so, there is no "
|
427 |
+
f"such meta implementation and this error is the correct "
|
428 |
+
f"behavior. Otherwise, please remove the call to get_ctx() "
|
429 |
+
f"in the implementation registered with impl_abstract "
|
430 |
+
f"at {location}"
|
431 |
+
)
|
432 |
+
|
433 |
+
with torch._library.abstract_impl.set_ctx_getter(error_on_ctx):
|
434 |
+
return f(*args, **kwargs)
|
435 |
+
|
436 |
+
self._lib.impl(self._opname, f_with_ctx, "Meta")
|
437 |
+
return f
|
438 |
+
|
439 |
+
return inner
|
440 |
+
|
441 |
+
def _check_can_register_backward(self):
|
442 |
+
def error(detail):
|
443 |
+
raise RuntimeError(
|
444 |
+
f"Cannot use torch._custom_ops APIs to register backward "
|
445 |
+
f"formula for {detail}. Got operator "
|
446 |
+
f"{self._qualname} with schema: {schema}"
|
447 |
+
)
|
448 |
+
|
449 |
+
schema = self._schema
|
450 |
+
if schema.kind() != SchemaKind.functional:
|
451 |
+
error("non-functional operator")
|
452 |
+
|
453 |
+
rets = schema.returns
|
454 |
+
if not schema.returns:
|
455 |
+
error("operator with no returns")
|
456 |
+
|
457 |
+
assert len(rets) > 0
|
458 |
+
is_non_mutating_view = any(
|
459 |
+
r.annotation is not None and not r.annotation.is_write for r in rets
|
460 |
+
)
|
461 |
+
if is_non_mutating_view:
|
462 |
+
error("operator that returns views")
|
463 |
+
|
464 |
+
# We make assumptions about the schema's return types.
|
465 |
+
allowed_return_types = {
|
466 |
+
BaseType(BaseTy.int): "int",
|
467 |
+
BaseType(BaseTy.SymInt): "SymInt",
|
468 |
+
BaseType(BaseTy.bool): "bool",
|
469 |
+
BaseType(BaseTy.float): "float",
|
470 |
+
BaseType(BaseTy.Tensor): "Tensor",
|
471 |
+
ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]",
|
472 |
+
}
|
473 |
+
for ret in schema.returns:
|
474 |
+
if ret.type in allowed_return_types:
|
475 |
+
continue
|
476 |
+
error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})")
|
477 |
+
|
478 |
+
def _check_doesnt_have_library_autograd_impl(self):
|
479 |
+
if self._registered_autograd_kernel_indirection:
|
480 |
+
return
|
481 |
+
|
482 |
+
if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
|
483 |
+
raise RuntimeError(
|
484 |
+
f"impl_backward/impl_save_for_backward: the operator {self._qualname} "
|
485 |
+
f"already has an implementation for this device type via a "
|
486 |
+
f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
|
487 |
+
f"CompositeImplicitAutograd operators do not need an autograd formula; "
|
488 |
+
f"instead, the operator will decompose into its constituents and those "
|
489 |
+
f"can have autograd formulas defined on them.")
|
490 |
+
|
491 |
+
# We can improve this by adding "all Autograd<BACKEND> keys", but
|
492 |
+
# realistically people will just be using this API for CPU/CUDA for now.
|
493 |
+
for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]:
|
494 |
+
if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key):
|
495 |
+
raise RuntimeError(
|
496 |
+
f"impl_backward/impl_save_for_backward: "
|
497 |
+
f"the operator {self._qualname} already has an Autograd kernel "
|
498 |
+
f"registered to DispatchKey::{key} vi a pre-existing "
|
499 |
+
f"torch.library or TORCH_LIBRARY registration. Please either "
|
500 |
+
f"remove those registrations or don't use the torch._custom_ops APIs")
|
501 |
+
|
502 |
+
def _check_doesnt_have_library_meta_impl(self):
|
503 |
+
if self._has_impl("abstract"):
|
504 |
+
return
|
505 |
+
|
506 |
+
# If the user's operator is CompositeExplicitAutograd,
|
507 |
+
# allow them to impl_abstract. This is being pragmatic
|
508 |
+
# (existing custom ops may have CompositeExplicitAutograd
|
509 |
+
# registration that don't work with Meta kernels, so this
|
510 |
+
# gives them an escape hatch).
|
511 |
+
if (
|
512 |
+
_C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd")
|
513 |
+
and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta")
|
514 |
+
):
|
515 |
+
return
|
516 |
+
|
517 |
+
# Otherwise, if the user's already has a Meta kernel or their
|
518 |
+
# op is CompositeImplicitAutograd or some other alias dispatch key,
|
519 |
+
# raise.
|
520 |
+
|
521 |
+
# Special case for CompositeImplicitAutograd
|
522 |
+
if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
|
523 |
+
raise RuntimeError(
|
524 |
+
f"impl_abstract(...): the operator {self._qualname} "
|
525 |
+
f"already has an implementation for this device type via a "
|
526 |
+
f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
|
527 |
+
f"CompositeImplicitAutograd operators do not need an abstract impl; "
|
528 |
+
f"instead, the operator will decompose into its constituents and those "
|
529 |
+
f"can have abstract impls defined on them.")
|
530 |
+
|
531 |
+
if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta"):
|
532 |
+
raise RuntimeError(
|
533 |
+
f"impl_abstract(...): the operator {self._qualname} "
|
534 |
+
f"already has an DispatchKey::Meta implementation via a "
|
535 |
+
f"pre-existing torch.library or TORCH_LIBRARY registration. "
|
536 |
+
f"Please either remove that registration or don't call impl_abstract.")
|
537 |
+
|
538 |
+
# NOTE ["backward", "save_for_backward", and "autograd"]
|
539 |
+
# As a part of the explicit autograd API, a user must provide us
|
540 |
+
# a "save_for_backward" function and a "backward" function.
|
541 |
+
# When both of these have been provided, then we automatically
|
542 |
+
# construct the "autograd" kernel.
|
543 |
+
def _register_autograd_kernel(self):
|
544 |
+
assert self._has_impl("backward")
|
545 |
+
assert self._has_impl("save_for_backward")
|
546 |
+
kernel = construct_autograd_kernel(
|
547 |
+
self._schema,
|
548 |
+
self._output_differentiability,
|
549 |
+
self,
|
550 |
+
get_op(self._qualname),
|
551 |
+
self._get_impl("save_for_backward").func,
|
552 |
+
self._get_impl("backward").func)
|
553 |
+
self._register_impl("autograd", kernel)
|
554 |
+
|
555 |
+
def impl_save_for_backward(self, _stacklevel=2):
|
556 |
+
r"""Register a function that tells us what to save for backward.
|
557 |
+
|
558 |
+
Please see impl_backward for more details.
|
559 |
+
"""
|
560 |
+
def inner(f):
|
561 |
+
self._check_can_register_backward()
|
562 |
+
self._check_doesnt_have_library_autograd_impl()
|
563 |
+
if not self._registered_autograd_kernel_indirection:
|
564 |
+
self._register_autograd_kernel_indirection()
|
565 |
+
self._register_impl("save_for_backward", f, stacklevel=_stacklevel)
|
566 |
+
if self._has_impl("backward"):
|
567 |
+
self._register_autograd_kernel()
|
568 |
+
return inner
|
569 |
+
|
570 |
+
def impl_backward(self, output_differentiability=None, _stacklevel=2):
|
571 |
+
r"""Registers a backward formula.
|
572 |
+
|
573 |
+
WARNING: if you're a user, please do not use this directly
|
574 |
+
(instead use the torch._custom_ops APIs).
|
575 |
+
Also please see the following for a detailed guide on custom ops.
|
576 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
577 |
+
|
578 |
+
In order for the CustomOp to work with autograd, you need to register
|
579 |
+
a backward formula. There are two pieces to this:
|
580 |
+
1. You must give us a function to specify what to save for backward.
|
581 |
+
Call this the "save for backward" function.
|
582 |
+
2. You must give us a function that computes gradients. Call this the
|
583 |
+
"backward" function.
|
584 |
+
|
585 |
+
Use `impl_save_for_backward` to define a "save for backward" function
|
586 |
+
that specifies what gets saved for backward. The function should accept
|
587 |
+
two arguments ``(inputs, output)`` and return the quantities to be saved
|
588 |
+
for backward.
|
589 |
+
|
590 |
+
During runtime, when you call the CustomOp, PyTorch will invoke the
|
591 |
+
"save for backward" function with the inputs and output of the CustomOp.
|
592 |
+
|
593 |
+
Use `impl_backward` to define the "backward" function. The backward
|
594 |
+
function must accept ``(ctx, saved, *grads)``:
|
595 |
+
- ``ctx`` is a context object where we may provide information
|
596 |
+
- ``saved`` is exactly what gets returned from the "save for backward"
|
597 |
+
function
|
598 |
+
- ``grads`` is one or more gradients. The number of gradients matches
|
599 |
+
the number of outputs of the CustomOp.
|
600 |
+
|
601 |
+
The backward function must return a dict that maps the name of
|
602 |
+
an input to the CustomOp to its corresponding gradient. All inputs that
|
603 |
+
were declared to be Tensors in the CustomOp definition must be accounted
|
604 |
+
for in the dict. The gradient may be a Tensor or None.
|
605 |
+
|
606 |
+
"""
|
607 |
+
if output_differentiability is not None:
|
608 |
+
def yell():
|
609 |
+
raise RuntimeError(
|
610 |
+
f"impl_backward(output_differentiability): expected "
|
611 |
+
f"output_differentiability to be a list of bools with "
|
612 |
+
f"length equal to the number of outputs of this CustomOp "
|
613 |
+
f"got: {output_differentiability}")
|
614 |
+
|
615 |
+
if not isinstance(output_differentiability, list):
|
616 |
+
yell()
|
617 |
+
for diff in output_differentiability:
|
618 |
+
if not isinstance(diff, bool):
|
619 |
+
yell()
|
620 |
+
if len(self._schema.returns) != len(output_differentiability):
|
621 |
+
yell()
|
622 |
+
|
623 |
+
def inner(f):
|
624 |
+
self._check_can_register_backward()
|
625 |
+
self._check_doesnt_have_library_autograd_impl()
|
626 |
+
if not self._registered_autograd_kernel_indirection:
|
627 |
+
self._register_autograd_kernel_indirection()
|
628 |
+
self._register_impl("backward", f, stacklevel=_stacklevel)
|
629 |
+
self._output_differentiability = output_differentiability
|
630 |
+
if self._has_impl("save_for_backward"):
|
631 |
+
self._register_autograd_kernel()
|
632 |
+
return inner
|
633 |
+
|
634 |
+
|
635 |
+
@dataclasses.dataclass
|
636 |
+
class FuncAndLocation:
|
637 |
+
func: typing.Callable
|
638 |
+
location: str
|
639 |
+
|
640 |
+
|
641 |
+
def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName):
|
642 |
+
overload_name = (
|
643 |
+
"" if operator_name.overload_name is None else operator_name.overload_name
|
644 |
+
)
|
645 |
+
return _C._dispatch_find_schema_or_throw(
|
646 |
+
f"{cpp_ns}::{str(operator_name.name)}", overload_name
|
647 |
+
)
|
648 |
+
|
649 |
+
|
650 |
+
def validate_namespace(ns: str) -> None:
|
651 |
+
if "." in ns:
|
652 |
+
raise ValueError(
|
653 |
+
f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a '
|
654 |
+
f"valid variable name)"
|
655 |
+
)
|
656 |
+
if ns in RESERVED_NS:
|
657 |
+
raise ValueError(
|
658 |
+
f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, "
|
659 |
+
f"please choose something else. "
|
660 |
+
)
|
661 |
+
|
662 |
+
def validate_schema(schema: FunctionSchema) -> None:
|
663 |
+
if not torch._library.utils.is_functional_schema(schema):
|
664 |
+
raise ValueError(
|
665 |
+
f"custom_op only supports functional operators "
|
666 |
+
f"(ops that do not mutate any inputs, do not return "
|
667 |
+
f"views of the inputs, and has at least one return). "
|
668 |
+
f"Got the following non-functional schema: {schema}"
|
669 |
+
)
|
670 |
+
|
671 |
+
# For simplicity: don't allow self arguments
|
672 |
+
if schema.arguments.self_arg is not None:
|
673 |
+
raise ValueError(
|
674 |
+
f"custom_op does not support arguments named 'self'. Please "
|
675 |
+
f"rename your argument. Got: {schema}"
|
676 |
+
)
|
677 |
+
|
678 |
+
|
679 |
+
def parse_qualname(qualname: str) -> typing.Tuple[str, str]:
|
680 |
+
names = qualname.split("::", 1)
|
681 |
+
if len(names) != 2:
|
682 |
+
raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The "
|
683 |
+
f"operator name should look something like ns::foo")
|
684 |
+
if '.' in names[1]:
|
685 |
+
raise ValueError(f"The torch.custom_ops APIs do not handle overloads, "
|
686 |
+
f"i.e. operator names with '.' in them. "
|
687 |
+
f"Please name your operator something like ns::foo. "
|
688 |
+
f"Got: {qualname}")
|
689 |
+
return names[0], names[1]
|
690 |
+
|
691 |
+
|
692 |
+
def validate_device_type(device_type: str) -> None:
|
693 |
+
if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY:
|
694 |
+
raise ValueError(
|
695 |
+
f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type "
|
696 |
+
f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}."
|
697 |
+
)
|
698 |
+
|
699 |
+
|
700 |
+
def supported_param(param: inspect.Parameter) -> bool:
|
701 |
+
return param.kind in (
|
702 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
703 |
+
inspect.Parameter.KEYWORD_ONLY,
|
704 |
+
)
|
705 |
+
|
706 |
+
|
707 |
+
def validate_function_matches_schema(
|
708 |
+
schema: FunctionSchema, func: typing.Callable
|
709 |
+
) -> None:
|
710 |
+
sig = inspect.signature(func)
|
711 |
+
|
712 |
+
if not all(supported_param(p) for _, p in sig.parameters.items()):
|
713 |
+
raise ValueError(
|
714 |
+
f"custom_op(..., manual_schema)(func): positional-only args, "
|
715 |
+
f"varargs, and kwargs are not supported. Please rewrite `func` "
|
716 |
+
f"to not have them. Got `func` with signature: {sig}"
|
717 |
+
)
|
718 |
+
|
719 |
+
if (
|
720 |
+
any(
|
721 |
+
p.annotation is not inspect.Parameter.empty
|
722 |
+
for _, p in sig.parameters.items()
|
723 |
+
)
|
724 |
+
or sig.return_annotation is not inspect.Signature.empty
|
725 |
+
):
|
726 |
+
raise ValueError(
|
727 |
+
f"custom_op(..., manual_schema)(func): When passing in a manual "
|
728 |
+
f"schema, we expect `func` to have no type annotations to avoid "
|
729 |
+
f"ambiguity. Got `func` with signature: {sig}"
|
730 |
+
)
|
731 |
+
|
732 |
+
positional = [
|
733 |
+
(name, param)
|
734 |
+
for name, param in sig.parameters.items()
|
735 |
+
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
736 |
+
]
|
737 |
+
kwargonly = [
|
738 |
+
(name, param)
|
739 |
+
for name, param in sig.parameters.items()
|
740 |
+
if param.kind == inspect.Parameter.KEYWORD_ONLY
|
741 |
+
]
|
742 |
+
|
743 |
+
def error():
|
744 |
+
raise ValueError(
|
745 |
+
f"custom_op(..., manual_schema)(func): When passing in a manual "
|
746 |
+
f"schema, we expect `func`'s signature to match `manual_schema` "
|
747 |
+
f"(aside from type annotations). "
|
748 |
+
f"func's signature: {sig}, manual_schema: {schema}"
|
749 |
+
)
|
750 |
+
|
751 |
+
def error_default_args():
|
752 |
+
raise ValueError(
|
753 |
+
f"custom_op(..., manual_schema)(func): "
|
754 |
+
f"neither func nor manual_schema should have default "
|
755 |
+
f"arguments. Got "
|
756 |
+
f"func's signature: {sig}, manual_schema: {schema}"
|
757 |
+
)
|
758 |
+
|
759 |
+
def compare(sig_args, schema_args):
|
760 |
+
if len(sig_args) != len(schema_args):
|
761 |
+
error()
|
762 |
+
for (name, param), arg in zip(sig_args, schema_args):
|
763 |
+
if name != arg.name:
|
764 |
+
error()
|
765 |
+
if param.default is not inspect.Parameter.empty or arg.default is not None:
|
766 |
+
error_default_args()
|
767 |
+
|
768 |
+
compare(positional, schema.arguments.flat_positional)
|
769 |
+
compare(kwargonly, schema.arguments.flat_kwarg_only)
|
770 |
+
|
771 |
+
|
772 |
+
def infer_schema(prototype_function: typing.Callable) -> str:
|
773 |
+
sig = inspect.signature(prototype_function)
|
774 |
+
|
775 |
+
def error_fn(what):
|
776 |
+
raise ValueError(
|
777 |
+
f"custom_op(...)(func): {what} " f"Got func with signature {sig})"
|
778 |
+
)
|
779 |
+
|
780 |
+
params = [
|
781 |
+
parse_param(name, param, error_fn) for name, param in sig.parameters.items()
|
782 |
+
]
|
783 |
+
ret = parse_return(sig.return_annotation, error_fn)
|
784 |
+
return f"({', '.join(params)}) -> {ret}"
|
785 |
+
|
786 |
+
|
787 |
+
def parse_param(name, param, error_fn):
|
788 |
+
if not supported_param(param):
|
789 |
+
error_fn("We do not support positional-only args, varargs, or varkwargs.")
|
790 |
+
|
791 |
+
if param.annotation is inspect.Parameter.empty:
|
792 |
+
error_fn(f"Parameter {name} must have a type annotation.")
|
793 |
+
|
794 |
+
if param.annotation not in SUPPORTED_PARAM_TYPES.keys():
|
795 |
+
error_fn(
|
796 |
+
f"Parameter {name} has unsupported type {param.annotation}. "
|
797 |
+
f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}."
|
798 |
+
)
|
799 |
+
|
800 |
+
if param.default is not inspect.Parameter.empty:
|
801 |
+
error_fn(
|
802 |
+
f"Parameter {name} has a default value; this is not supported. "
|
803 |
+
f"If you want to use default values then create a function with "
|
804 |
+
f"default values that calls the CustomOp"
|
805 |
+
)
|
806 |
+
|
807 |
+
return f"{SUPPORTED_PARAM_TYPES[param.annotation]} {name}"
|
808 |
+
|
809 |
+
|
810 |
+
def derived_types(
|
811 |
+
base_type, cpp_type, list_base, optional_base_list, optional_list_base
|
812 |
+
):
|
813 |
+
result = [
|
814 |
+
(base_type, cpp_type),
|
815 |
+
(typing.Optional[base_type], f"{cpp_type}?"),
|
816 |
+
]
|
817 |
+
if list_base:
|
818 |
+
result.append((typing.Sequence[base_type], f"{cpp_type}[]")) # type: ignore[valid-type]
|
819 |
+
if optional_base_list:
|
820 |
+
result.append((typing.Sequence[typing.Optional[base_type]], f"{cpp_type}?[]")) # type: ignore[valid-type]
|
821 |
+
if optional_list_base:
|
822 |
+
result.append((typing.Optional[typing.Sequence[base_type]], f"{cpp_type}[]?")) # type: ignore[valid-type]
|
823 |
+
return result
|
824 |
+
|
825 |
+
|
826 |
+
def get_supported_param_types():
|
827 |
+
data = [
|
828 |
+
# (python type, schema type, type[] variant, type?[] variant, type[]? variant
|
829 |
+
(torch.Tensor, "Tensor", True, True, False),
|
830 |
+
(int, "SymInt", True, False, True),
|
831 |
+
(float, "float", True, False, True),
|
832 |
+
(bool, "bool", True, False, True),
|
833 |
+
(str, "str", False, False, False),
|
834 |
+
(torch.types.Number, "Scalar", True, False, False),
|
835 |
+
(torch.dtype, "ScalarType", False, False, False),
|
836 |
+
(torch.device, "Device", False, False, False),
|
837 |
+
]
|
838 |
+
result = []
|
839 |
+
for line in data:
|
840 |
+
result.extend(derived_types(*line))
|
841 |
+
return dict(result)
|
842 |
+
|
843 |
+
|
844 |
+
SUPPORTED_RETURN_TYPES = {
|
845 |
+
torch.Tensor: "Tensor",
|
846 |
+
typing.List[torch.Tensor]: "Tensor[]",
|
847 |
+
int: "SymInt",
|
848 |
+
float: "float",
|
849 |
+
bool: "bool",
|
850 |
+
torch.types.Number: "Scalar",
|
851 |
+
}
|
852 |
+
|
853 |
+
|
854 |
+
def parse_return(annotation, error_fn):
|
855 |
+
origin = typing.get_origin(annotation)
|
856 |
+
if origin is not tuple:
|
857 |
+
if annotation not in SUPPORTED_RETURN_TYPES.keys():
|
858 |
+
error_fn(
|
859 |
+
f"Return has unsupported type {annotation}. "
|
860 |
+
f"The valid types are: {SUPPORTED_RETURN_TYPES}."
|
861 |
+
)
|
862 |
+
return SUPPORTED_RETURN_TYPES[annotation]
|
863 |
+
|
864 |
+
args = typing.get_args(annotation)
|
865 |
+
for arg in args:
|
866 |
+
if arg not in SUPPORTED_RETURN_TYPES:
|
867 |
+
error_fn(
|
868 |
+
f"Return has unsupported type {annotation}. "
|
869 |
+
f"The valid types are: {SUPPORTED_RETURN_TYPES}."
|
870 |
+
)
|
871 |
+
|
872 |
+
return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")"
|
873 |
+
|
874 |
+
|
875 |
+
SUPPORTED_PARAM_TYPES = get_supported_param_types()
|
876 |
+
|
877 |
+
|
878 |
+
def report_error_callback(custom_op: typing.Any, key: str) -> None:
|
879 |
+
if key == "Undefined":
|
880 |
+
raise NotImplementedError(
|
881 |
+
f"{custom_op}: There were no Tensor inputs to this operator "
|
882 |
+
f"(e.g. you passed an empty list of Tensors). If your operator is a "
|
883 |
+
f"factory function (that is, it takes no Tensors and constructs "
|
884 |
+
f"a new one), then please use CustomOp.impl_factory to register "
|
885 |
+
f"an implementation for it"
|
886 |
+
)
|
887 |
+
if key == "Meta":
|
888 |
+
raise NotImplementedError(
|
889 |
+
f"{custom_op}: when running with device='Meta' tensors: there is no "
|
890 |
+
f"abstract impl registered for this CustomOp. Please register one via "
|
891 |
+
f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors"
|
892 |
+
)
|
893 |
+
if key in ("CPU", "CUDA"):
|
894 |
+
device = key.lower()
|
895 |
+
raise NotImplementedError(
|
896 |
+
f"{custom_op}: when running with device='{device}' tensors: there is no "
|
897 |
+
f"{device} impl registered for this CustomOp. Please register one via "
|
898 |
+
f"CustomOp.impl(device_type='{device}')"
|
899 |
+
)
|
900 |
+
raise NotImplementedError(
|
901 |
+
f"{custom_op}: No implementation for dispatch key {key}. It is likely "
|
902 |
+
f"that we have not added this functionality yet, please either open an "
|
903 |
+
f"issue or if you're feeling adventurous, use the low-level "
|
904 |
+
f"torch.library API"
|
905 |
+
)
|
906 |
+
|
907 |
+
|
908 |
+
def custom_op_from_existing(op):
|
909 |
+
ns = op.namespace
|
910 |
+
lib = torch.library.Library(ns, "FRAGMENT")
|
911 |
+
name = op.name().split("::")[-1]
|
912 |
+
schema_str = str(op._schema)
|
913 |
+
# CustomOp expects the schema string without the namespace
|
914 |
+
schema_str = schema_str.split("::")[-1]
|
915 |
+
schema = FunctionSchema.parse(schema_str)
|
916 |
+
return CustomOp(lib, ns, schema, name, op, _private_access=True)
|
917 |
+
|
918 |
+
|
919 |
+
def get_op(qualname):
|
920 |
+
def error_not_found():
|
921 |
+
raise ValueError(
|
922 |
+
f"Could not find the operator {qualname}. Please make sure you have "
|
923 |
+
f"already registered the operator and (if registered from C++) "
|
924 |
+
f"loaded it via torch.ops.load_library.")
|
925 |
+
|
926 |
+
ns, name = parse_qualname(qualname)
|
927 |
+
if not hasattr(torch.ops, ns):
|
928 |
+
error_not_found()
|
929 |
+
opnamespace = getattr(torch.ops, ns)
|
930 |
+
if not hasattr(opnamespace, name):
|
931 |
+
error_not_found()
|
932 |
+
packet = getattr(opnamespace, name)
|
933 |
+
if not hasattr(packet, 'default'):
|
934 |
+
error_not_found()
|
935 |
+
return packet.default
|
936 |
+
|
937 |
+
|
938 |
+
def _find_custom_op(qualname, also_check_torch_library=False):
|
939 |
+
if qualname in global_registry:
|
940 |
+
return global_registry[qualname]
|
941 |
+
if not also_check_torch_library:
|
942 |
+
raise RuntimeError(
|
943 |
+
f"Could not find custom op \"{qualname}\". Did you register it via "
|
944 |
+
f"the torch._custom_ops API?")
|
945 |
+
overload = get_op(qualname)
|
946 |
+
result = custom_op_from_existing(overload)
|
947 |
+
return result
|
948 |
+
|
949 |
+
|
950 |
+
def get_abstract_impl(qualname):
|
951 |
+
if qualname not in torch._custom_op.impl.global_registry:
|
952 |
+
return None
|
953 |
+
custom_op = torch._custom_op.impl.global_registry[qualname]
|
954 |
+
if custom_op is None:
|
955 |
+
return None
|
956 |
+
if not custom_op._has_impl("abstract"):
|
957 |
+
return None
|
958 |
+
return custom_op._get_impl("abstract").func
|
959 |
+
|
960 |
+
|
961 |
+
def _custom_op_with_schema(qualname, schema, needs_fixed_stride_order=True):
|
962 |
+
ns, name = qualname.split("::")
|
963 |
+
schema_str = f"{name}{schema}"
|
964 |
+
function_schema = FunctionSchema.parse(schema_str)
|
965 |
+
validate_schema(function_schema)
|
966 |
+
tags = [torch._C.Tag.needs_fixed_stride_order] if needs_fixed_stride_order else []
|
967 |
+
lib = library.Library(ns, "FRAGMENT")
|
968 |
+
lib.define(schema_str, tags=tags)
|
969 |
+
ophandle = find_ophandle_or_throw(ns, function_schema.name)
|
970 |
+
result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
|
971 |
+
result._register_autograd_kernel_indirection()
|
972 |
+
|
973 |
+
torch._C._dispatch_set_report_error_callback(
|
974 |
+
ophandle, functools.partial(report_error_callback, weakref.proxy(result))
|
975 |
+
)
|
976 |
+
return get_op(qualname)
|
venv/lib/python3.10/site-packages/torch/_prims_common/__init__.py
ADDED
@@ -0,0 +1,1985 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import operator
|
4 |
+
import warnings
|
5 |
+
import weakref
|
6 |
+
|
7 |
+
from contextlib import nullcontext
|
8 |
+
from enum import Enum
|
9 |
+
from functools import cmp_to_key, reduce
|
10 |
+
from typing import (
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
cast,
|
14 |
+
List,
|
15 |
+
NamedTuple,
|
16 |
+
Optional,
|
17 |
+
overload,
|
18 |
+
Sequence,
|
19 |
+
Tuple,
|
20 |
+
Type,
|
21 |
+
TYPE_CHECKING,
|
22 |
+
Union,
|
23 |
+
)
|
24 |
+
|
25 |
+
from typing_extensions import TypeAlias
|
26 |
+
|
27 |
+
|
28 |
+
if TYPE_CHECKING:
|
29 |
+
# Import the following modules during type checking to enable code intelligence features,
|
30 |
+
# such as auto-completion in tools like pylance, even when these modules are not explicitly
|
31 |
+
# imported in user code.
|
32 |
+
|
33 |
+
import sympy
|
34 |
+
|
35 |
+
import torch
|
36 |
+
from torch import sym_float, sym_int, sym_max
|
37 |
+
|
38 |
+
|
39 |
+
ShapeType: TypeAlias = Union[torch.Size, List[int], Tuple[int, ...]]
|
40 |
+
StrideType: TypeAlias = Union[List[int], Tuple[int, ...]]
|
41 |
+
DimsType: TypeAlias = Union[int, List[int], Tuple[int, ...]]
|
42 |
+
DimsSequenceType: TypeAlias = Union[List[int], Tuple[int, ...]]
|
43 |
+
# TODO: Type[torch.SymInt], Type[torch.SymFloat]
|
44 |
+
NumberTypeType: TypeAlias = Union[Type[bool], Type[int], Type[float], Type[complex]]
|
45 |
+
# TODO: This needs a lot more type annotations
|
46 |
+
# NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat]
|
47 |
+
NumberType: TypeAlias = Union[bool, int, float, complex]
|
48 |
+
RealNumberType: TypeAlias = Union[bool, int, float]
|
49 |
+
|
50 |
+
Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat)
|
51 |
+
# I don't call it Integral because numbers.Integral includes bool, but IntLike
|
52 |
+
# does not
|
53 |
+
Dim = int
|
54 |
+
IntLike = (int, torch.SymInt)
|
55 |
+
FloatLike = (float, torch.SymFloat)
|
56 |
+
IntWithoutSymInt = int
|
57 |
+
FloatWithoutSymFloat = float
|
58 |
+
DeviceLikeType: TypeAlias = Union[str, torch.device, int]
|
59 |
+
Tensor = torch.Tensor
|
60 |
+
|
61 |
+
|
62 |
+
torch_function_passthrough = {
|
63 |
+
torch.device,
|
64 |
+
torch.sym_not,
|
65 |
+
torch.sym_float,
|
66 |
+
torch.sym_int,
|
67 |
+
torch.sym_max,
|
68 |
+
torch.sym_min,
|
69 |
+
torch._sym_sqrt, # type: ignore[attr-defined]
|
70 |
+
torch.sym_ite,
|
71 |
+
torch.Tensor.dim,
|
72 |
+
torch.Tensor.ndim.__get__, # type: ignore[attr-defined]
|
73 |
+
torch.Tensor.numel,
|
74 |
+
torch.Tensor.size,
|
75 |
+
torch.Tensor.storage_offset,
|
76 |
+
torch.Tensor.stride,
|
77 |
+
torch.Tensor.dtype.__get__, # type: ignore[attr-defined]
|
78 |
+
torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined]
|
79 |
+
torch.Tensor.shape.__get__, # type: ignore[attr-defined]
|
80 |
+
torch.Tensor.device.__get__, # type: ignore[attr-defined]
|
81 |
+
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
|
82 |
+
torch.Tensor.layout.__get__, # type: ignore[attr-defined]
|
83 |
+
torch.Tensor.is_contiguous,
|
84 |
+
# For TorchRefsMode only
|
85 |
+
torch.Tensor.__format__,
|
86 |
+
torch.Tensor.__repr__,
|
87 |
+
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
|
88 |
+
}
|
89 |
+
|
90 |
+
|
91 |
+
TensorLikeType = torch.Tensor
|
92 |
+
TensorLike = torch.Tensor
|
93 |
+
TensorSequenceType: TypeAlias = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]]
|
94 |
+
TensorOrNumberLikeType: TypeAlias = Union[TensorLikeType, NumberType]
|
95 |
+
|
96 |
+
CustomOutParamAnnotation = "__custom_out_param__"
|
97 |
+
|
98 |
+
|
99 |
+
def same_shape(a: ShapeType, b: ShapeType, *, allow_rhs_unbacked=False) -> bool:
|
100 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
101 |
+
|
102 |
+
if len(a) != len(b):
|
103 |
+
return False
|
104 |
+
|
105 |
+
for x, y in zip(a, b):
|
106 |
+
if allow_rhs_unbacked:
|
107 |
+
# TODO: We should check that the symbols are consistent
|
108 |
+
# with each other
|
109 |
+
if isinstance(y, torch.SymInt):
|
110 |
+
continue
|
111 |
+
# NB: Naively, you would not expect to have to do an oblivious guard
|
112 |
+
# here because there is seemingly no broadcasting here, but in fact we
|
113 |
+
# use this in some situations to determine if we need to do an expand
|
114 |
+
# on the tensor because they don't line up, so you can definitely end
|
115 |
+
# up trying to prove u0 != 1 in this situation. See
|
116 |
+
# python test/test_proxy_tensor.py -k test_cumsum_unbacked
|
117 |
+
if guard_size_oblivious(x != y):
|
118 |
+
return False
|
119 |
+
|
120 |
+
return True
|
121 |
+
|
122 |
+
|
123 |
+
def _maybe_get_pytype(t):
|
124 |
+
if t is torch.SymFloat:
|
125 |
+
return float
|
126 |
+
elif t is torch.SymInt:
|
127 |
+
return int
|
128 |
+
elif t is torch.SymBool:
|
129 |
+
return bool
|
130 |
+
else:
|
131 |
+
return t
|
132 |
+
|
133 |
+
|
134 |
+
# TODO: look at using torch.testing.assert_close instead with an option
|
135 |
+
# to just compare metadata
|
136 |
+
def compare_tensor_meta(
|
137 |
+
a: TensorLikeType,
|
138 |
+
b: TensorLikeType,
|
139 |
+
check_strides=False,
|
140 |
+
*,
|
141 |
+
allow_rhs_unbacked=False,
|
142 |
+
check_conj=True,
|
143 |
+
):
|
144 |
+
"""
|
145 |
+
Checks that two tensor likes have the same shape,
|
146 |
+
dtype and device.
|
147 |
+
|
148 |
+
In the future this will validate additional metadata, like
|
149 |
+
strides.
|
150 |
+
"""
|
151 |
+
assert isinstance(a, TensorLike)
|
152 |
+
assert isinstance(b, TensorLike)
|
153 |
+
|
154 |
+
if not same_shape(a.shape, b.shape, allow_rhs_unbacked=allow_rhs_unbacked):
|
155 |
+
msg = f"Shapes {a.shape} and {b.shape} are not equal!"
|
156 |
+
raise AssertionError(msg)
|
157 |
+
|
158 |
+
if a.dtype != b.dtype:
|
159 |
+
msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!"
|
160 |
+
raise AssertionError(msg)
|
161 |
+
|
162 |
+
if a.device != b.device:
|
163 |
+
# Handles special cuda:0 vs cuda case
|
164 |
+
# TODO: we should review why this happens and see about fixing it
|
165 |
+
if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and (
|
166 |
+
str(b.device) == "cuda:0" or str(b.device) == "cuda"
|
167 |
+
):
|
168 |
+
pass
|
169 |
+
else:
|
170 |
+
msg = f"Devices {a.device} and {b.device} are not equal!"
|
171 |
+
raise AssertionError(msg)
|
172 |
+
|
173 |
+
# Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050
|
174 |
+
if check_strides:
|
175 |
+
same_strides, idx = check_significant_strides(a, b)
|
176 |
+
if not same_strides:
|
177 |
+
msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!"
|
178 |
+
raise RuntimeError(msg)
|
179 |
+
|
180 |
+
if a.storage_offset() != b.storage_offset():
|
181 |
+
msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!"
|
182 |
+
raise RuntimeError(msg)
|
183 |
+
|
184 |
+
if check_conj:
|
185 |
+
if a.is_conj() != b.is_conj():
|
186 |
+
raise RuntimeError(
|
187 |
+
f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}"
|
188 |
+
)
|
189 |
+
|
190 |
+
if a.is_neg() != b.is_neg():
|
191 |
+
raise RuntimeError(
|
192 |
+
f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}"
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
def _check_strides_helper(
|
197 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True
|
198 |
+
) -> Tuple[bool, Optional[int]]:
|
199 |
+
# NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch
|
200 |
+
# See https://github.com/pytorch/pytorch/issues/77553
|
201 |
+
# Only compares strides that are "meaningful" -- strides for dimensions with length > 1
|
202 |
+
# and for tensors with more than one element
|
203 |
+
if (
|
204 |
+
not only_cuda or a.device.type == "cuda" or b.device.type == "cuda"
|
205 |
+
) and a.numel() > 0:
|
206 |
+
for idx in range(a.ndim):
|
207 |
+
check = not significant_only or a.shape[idx] > 1
|
208 |
+
if a.stride()[idx] != b.stride()[idx] and check:
|
209 |
+
return False, idx
|
210 |
+
|
211 |
+
return True, None
|
212 |
+
|
213 |
+
|
214 |
+
def check_significant_strides(
|
215 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
|
216 |
+
) -> Tuple[bool, Optional[int]]:
|
217 |
+
return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True)
|
218 |
+
|
219 |
+
|
220 |
+
def check_all_strides(
|
221 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
|
222 |
+
) -> Tuple[bool, Optional[int]]:
|
223 |
+
return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False)
|
224 |
+
|
225 |
+
|
226 |
+
# This function is equivalent to compute_contiguous() from TensorImpl.cpp
|
227 |
+
def is_contiguous(a: TensorLikeType) -> bool:
|
228 |
+
"""
|
229 |
+
Tests whether a tensor is contiguous or not.
|
230 |
+
|
231 |
+
Tensors are contiguous when they have no elements,
|
232 |
+
one element, or when they have "nested" strides.
|
233 |
+
"""
|
234 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
235 |
+
|
236 |
+
if guard_size_oblivious(a.numel() < 2):
|
237 |
+
return True
|
238 |
+
|
239 |
+
expected_stride = 1
|
240 |
+
for x, y in reversed(tuple(zip(a.shape, a.stride()))):
|
241 |
+
# Skips checking strides when a dimension has length 1
|
242 |
+
if guard_size_oblivious(x == 1):
|
243 |
+
continue
|
244 |
+
|
245 |
+
if y != expected_stride:
|
246 |
+
return False
|
247 |
+
expected_stride = expected_stride * x
|
248 |
+
|
249 |
+
return True
|
250 |
+
|
251 |
+
|
252 |
+
# This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp
|
253 |
+
def is_channels_last_contiguous_2d(a: Tensor) -> bool:
|
254 |
+
# NHWC or not channels last 2D contiguous
|
255 |
+
if a.ndim != 4:
|
256 |
+
return False
|
257 |
+
|
258 |
+
expected_stride = 1
|
259 |
+
for idx in (1, 3, 2, 0):
|
260 |
+
length = a.shape[idx]
|
261 |
+
if length == 1:
|
262 |
+
continue
|
263 |
+
|
264 |
+
stride = a.stride()[idx]
|
265 |
+
if stride != expected_stride:
|
266 |
+
return False
|
267 |
+
|
268 |
+
expected_stride *= length
|
269 |
+
|
270 |
+
return True
|
271 |
+
|
272 |
+
|
273 |
+
def is_channels_last_contiguous_3d(a: Tensor) -> bool:
|
274 |
+
# NDHWC or not channels last 3D contiguous
|
275 |
+
if a.ndim != 5:
|
276 |
+
return False
|
277 |
+
|
278 |
+
expected_stride = 1
|
279 |
+
for idx in (1, 4, 3, 2, 0):
|
280 |
+
length = a.shape[idx]
|
281 |
+
if length == 1:
|
282 |
+
continue
|
283 |
+
|
284 |
+
stride = a.stride()[idx]
|
285 |
+
if stride != expected_stride:
|
286 |
+
return False
|
287 |
+
|
288 |
+
expected_stride *= length
|
289 |
+
|
290 |
+
return True
|
291 |
+
|
292 |
+
|
293 |
+
_memory_formats = {
|
294 |
+
torch.contiguous_format,
|
295 |
+
torch.preserve_format,
|
296 |
+
torch.channels_last,
|
297 |
+
torch.channels_last_3d,
|
298 |
+
}
|
299 |
+
|
300 |
+
|
301 |
+
def validate_memory_format(memory_format: torch.memory_format):
|
302 |
+
torch._check(
|
303 |
+
memory_format in _memory_formats,
|
304 |
+
lambda: f"Received unknown memory format {memory_format}!",
|
305 |
+
)
|
306 |
+
|
307 |
+
|
308 |
+
def is_contiguous_for_memory_format( # type: ignore[return]
|
309 |
+
a: Tensor, *, memory_format: torch.memory_format
|
310 |
+
) -> bool:
|
311 |
+
validate_memory_format(memory_format)
|
312 |
+
|
313 |
+
if memory_format == torch.contiguous_format:
|
314 |
+
return is_contiguous(a)
|
315 |
+
if memory_format == torch.channels_last:
|
316 |
+
return is_channels_last_contiguous_2d(a)
|
317 |
+
if memory_format == torch.channels_last_3d:
|
318 |
+
return is_channels_last_contiguous_3d(a)
|
319 |
+
|
320 |
+
torch._check(
|
321 |
+
False,
|
322 |
+
lambda: f"is_contiguous received unsupported memory format {memory_format}",
|
323 |
+
)
|
324 |
+
|
325 |
+
|
326 |
+
# NOTE: that tensors with no elements and channels last is ???
|
327 |
+
def is_channels_last_contiguous(a: Tensor) -> bool:
|
328 |
+
"""
|
329 |
+
True when a tensor is channels-last contiguous.
|
330 |
+
|
331 |
+
This requires that:
|
332 |
+
|
333 |
+
- the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions
|
334 |
+
- if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the
|
335 |
+
stride of the 'C' dimension (Cs) is 1 and the strides corresponding to
|
336 |
+
each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are
|
337 |
+
"nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension,
|
338 |
+
for example.
|
339 |
+
"""
|
340 |
+
return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)
|
341 |
+
|
342 |
+
|
343 |
+
def is_non_overlapping_and_dense(a: Tensor) -> bool:
|
344 |
+
"""
|
345 |
+
True when a tensor is non-overlapping and dense.
|
346 |
+
|
347 |
+
A tensor is non-overlapping and dense when there exists a permutation of
|
348 |
+
its dimensions that is contiguous.
|
349 |
+
"""
|
350 |
+
|
351 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
352 |
+
|
353 |
+
if a.is_sparse:
|
354 |
+
return False
|
355 |
+
|
356 |
+
# Short-circuits if the tensor is already contiguous or channels-last contiguous
|
357 |
+
if is_contiguous(a) or is_channels_last_contiguous(a):
|
358 |
+
return True
|
359 |
+
|
360 |
+
# The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp
|
361 |
+
|
362 |
+
# Short-circuits for tensors of rank one, which are
|
363 |
+
# non-overlapping and "dense" if their stride is one
|
364 |
+
if a.ndim == 1:
|
365 |
+
return a.stride()[0] == 1
|
366 |
+
|
367 |
+
# Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
|
368 |
+
# Sorts (length, stride) pairs by stride
|
369 |
+
#
|
370 |
+
# This sort is done in a size-oblivious way, which helps if we do a
|
371 |
+
# comparison like 2048*u0 > u0; we just want this to return True
|
372 |
+
# (and not worry about what if u0 is zero).
|
373 |
+
class K(NamedTuple):
|
374 |
+
size: int
|
375 |
+
stride: int
|
376 |
+
|
377 |
+
def __lt__(self, other):
|
378 |
+
return guard_size_oblivious(self.stride < other.stride)
|
379 |
+
|
380 |
+
def __gt__(self, other):
|
381 |
+
return guard_size_oblivious(self.stride > other.stride)
|
382 |
+
|
383 |
+
def __le__(self, other):
|
384 |
+
return guard_size_oblivious(self.stride <= other.stride)
|
385 |
+
|
386 |
+
def __ge__(self, other):
|
387 |
+
return guard_size_oblivious(self.stride >= other.stride)
|
388 |
+
|
389 |
+
def __eq__(self, other):
|
390 |
+
return guard_size_oblivious(self.stride == other.stride)
|
391 |
+
|
392 |
+
lengths_and_strides = sorted(map(K, a.shape, a.stride()))
|
393 |
+
|
394 |
+
expected_stride = 1
|
395 |
+
for length, stride in lengths_and_strides:
|
396 |
+
if guard_size_oblivious(length == 1):
|
397 |
+
continue
|
398 |
+
|
399 |
+
if stride != expected_stride:
|
400 |
+
return False
|
401 |
+
|
402 |
+
expected_stride *= length
|
403 |
+
|
404 |
+
return True
|
405 |
+
|
406 |
+
|
407 |
+
# NOTE: Based on the implementation in TensorIterator.cpp, but note that
|
408 |
+
# the note [Computing output strides] is incorrect, because it
|
409 |
+
# says that strides will be preserved even if they are not
|
410 |
+
# "non overlapping and dense", but this is incorrect. The
|
411 |
+
# output of elementwise operations are always given
|
412 |
+
# non overlapping and dense strides.
|
413 |
+
# This is also INCORRECT because it does not model TensorIterator's
|
414 |
+
# short-circuit, which can cause different strides.
|
415 |
+
def compute_elementwise_output_logical_to_physical_perm(
|
416 |
+
*tensors, _skip_checks=False
|
417 |
+
) -> List[int]:
|
418 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
419 |
+
|
420 |
+
if not _skip_checks and len(tensors) == 0:
|
421 |
+
msg = "Can't compute elementwise output strides for zero tensors!"
|
422 |
+
raise ValueError(msg)
|
423 |
+
|
424 |
+
if not _skip_checks:
|
425 |
+
check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
|
426 |
+
|
427 |
+
# Filters the tensors to actual tensors
|
428 |
+
if not _skip_checks:
|
429 |
+
tensors = tuple(
|
430 |
+
a
|
431 |
+
for a in tensors
|
432 |
+
if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
|
433 |
+
)
|
434 |
+
|
435 |
+
# Short-circuits for CPU scalar case
|
436 |
+
if len(tensors) == 0:
|
437 |
+
return []
|
438 |
+
|
439 |
+
# Short-circuits for shapes with zero or one dimensions
|
440 |
+
# TODO: are these necessary?
|
441 |
+
ndim = tensors[0].ndim
|
442 |
+
if ndim == 0:
|
443 |
+
return []
|
444 |
+
if ndim == 1:
|
445 |
+
return [0]
|
446 |
+
|
447 |
+
# Short-circuits if contiguous, following the fake fast path.
|
448 |
+
# This reduces the number of guards we end up making
|
449 |
+
# TODO: do channels last too
|
450 |
+
is_contiguous = True
|
451 |
+
for t in tensors:
|
452 |
+
is_contiguous = is_contiguous and t.is_contiguous(
|
453 |
+
memory_format=torch.contiguous_format
|
454 |
+
)
|
455 |
+
|
456 |
+
if is_contiguous:
|
457 |
+
return list(range(ndim))
|
458 |
+
|
459 |
+
shape = tensors[0].shape
|
460 |
+
|
461 |
+
def should_swap(idx_a, idx_b):
|
462 |
+
for tensor in tensors:
|
463 |
+
stride_a = tensor.stride()[idx_a]
|
464 |
+
stride_b = tensor.stride()[idx_b]
|
465 |
+
|
466 |
+
if guard_size_oblivious(stride_a == 0) or guard_size_oblivious(
|
467 |
+
stride_b == 0
|
468 |
+
):
|
469 |
+
continue
|
470 |
+
|
471 |
+
if guard_size_oblivious(stride_a < stride_b):
|
472 |
+
return -1
|
473 |
+
|
474 |
+
if guard_size_oblivious(stride_a > stride_b):
|
475 |
+
return 1
|
476 |
+
|
477 |
+
# stride_a == stride_b
|
478 |
+
if guard_size_oblivious(shape[idx_a] > shape[idx_b]):
|
479 |
+
return 1
|
480 |
+
|
481 |
+
# Note: this case is hit if all strides are zero,
|
482 |
+
# or all strides are equal and all dimensions have the same length
|
483 |
+
return 0
|
484 |
+
|
485 |
+
# The "sort" order for the permutation is back-to-front, but
|
486 |
+
# the natural order for permutations is front-to-back. Do the
|
487 |
+
# sorting back-to-front and then reverse it on output.
|
488 |
+
#
|
489 |
+
# also, note this returns the logical to physical shape permutation
|
490 |
+
perm = list(reversed(range(ndim)))
|
491 |
+
|
492 |
+
# insertion sort with support for ambiguous comparisons
|
493 |
+
for i in range(1, ndim):
|
494 |
+
dim1 = i
|
495 |
+
for dim0 in reversed(range(i)):
|
496 |
+
comparison = should_swap(perm[dim0], perm[dim1])
|
497 |
+
if comparison > 0:
|
498 |
+
perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
|
499 |
+
dim1 = dim0
|
500 |
+
elif comparison < 0:
|
501 |
+
break
|
502 |
+
|
503 |
+
return list(reversed(perm))
|
504 |
+
|
505 |
+
|
506 |
+
def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]:
|
507 |
+
"""
|
508 |
+
Computes the output strides for elementwise operations.
|
509 |
+
"""
|
510 |
+
if len(tensors) == 0:
|
511 |
+
msg = "Can't compute elementwise output strides for zero tensors!"
|
512 |
+
raise ValueError(msg)
|
513 |
+
|
514 |
+
check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
|
515 |
+
|
516 |
+
# Filters the tensors to actual tensors
|
517 |
+
tensors = tuple(
|
518 |
+
a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
|
519 |
+
)
|
520 |
+
|
521 |
+
# Short-circuits for CPU scalar case
|
522 |
+
if len(tensors) == 0:
|
523 |
+
return ()
|
524 |
+
|
525 |
+
ndim = tensors[0].ndim
|
526 |
+
shape = tensors[0].shape
|
527 |
+
|
528 |
+
if ndim == 0:
|
529 |
+
return ()
|
530 |
+
if ndim == 1:
|
531 |
+
return (1,)
|
532 |
+
|
533 |
+
logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm(
|
534 |
+
*tensors, _skip_checks=True
|
535 |
+
)
|
536 |
+
permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical
|
537 |
+
|
538 |
+
new_strides = make_contiguous_strides_for(permuted_shape)
|
539 |
+
permuted_strides = apply_perm(
|
540 |
+
new_strides, invert_perm(logical_to_physical_perm)
|
541 |
+
) # to logical
|
542 |
+
|
543 |
+
return tuple(permuted_strides)
|
544 |
+
|
545 |
+
|
546 |
+
# Identity permutation is [0, 1, 2]
|
547 |
+
def apply_perm(inp, perm):
|
548 |
+
ndim = len(inp)
|
549 |
+
permuted_inp = [-1] * ndim
|
550 |
+
for idx, x in enumerate(perm):
|
551 |
+
permuted_inp[idx] = inp[x]
|
552 |
+
return permuted_inp
|
553 |
+
|
554 |
+
|
555 |
+
def invert_perm(perm):
|
556 |
+
ndim = len(perm)
|
557 |
+
new_perm = [-1] * ndim
|
558 |
+
for idx, x in enumerate(perm):
|
559 |
+
new_perm[x] = idx
|
560 |
+
return new_perm
|
561 |
+
|
562 |
+
|
563 |
+
#
|
564 |
+
# Common helper functions
|
565 |
+
#
|
566 |
+
|
567 |
+
|
568 |
+
def validate_dim_length(length: int):
|
569 |
+
"""
|
570 |
+
Validates that an object represents a valid
|
571 |
+
dimension length.
|
572 |
+
"""
|
573 |
+
|
574 |
+
if isinstance(length, (int, torch.SymInt)):
|
575 |
+
torch._check_is_size(length)
|
576 |
+
else:
|
577 |
+
# sometimes called with sympy expression by inductor
|
578 |
+
assert length >= 0
|
579 |
+
|
580 |
+
|
581 |
+
def validate_shape(shape: ShapeType):
|
582 |
+
"""
|
583 |
+
Validates that a sequence represents a valid shape.
|
584 |
+
"""
|
585 |
+
|
586 |
+
assert isinstance(shape, Sequence), type(shape)
|
587 |
+
for l in shape:
|
588 |
+
validate_dim_length(l)
|
589 |
+
|
590 |
+
|
591 |
+
def validate_strides(strides: StrideType):
|
592 |
+
"""
|
593 |
+
Verifies the object specifies valid strides.
|
594 |
+
"""
|
595 |
+
|
596 |
+
assert isinstance(strides, Sequence)
|
597 |
+
for stride in strides:
|
598 |
+
assert stride >= 0
|
599 |
+
|
600 |
+
|
601 |
+
def validate_idx(rank: int, idx: int):
|
602 |
+
"""
|
603 |
+
Validates that idx is a valid index for the given shape.
|
604 |
+
Assumes the index is already canonicalized.
|
605 |
+
"""
|
606 |
+
|
607 |
+
assert isinstance(idx, Dim)
|
608 |
+
assert isinstance(rank, Dim)
|
609 |
+
|
610 |
+
assert idx >= 0 and idx < rank or idx == 0
|
611 |
+
|
612 |
+
|
613 |
+
def validate_dimension_indices(rank: int, indices: DimsSequenceType):
|
614 |
+
for idx in indices:
|
615 |
+
validate_idx(rank, idx)
|
616 |
+
|
617 |
+
|
618 |
+
def validate_exclusive_idx(rank: int, ex_idx: int):
|
619 |
+
"""
|
620 |
+
Validates that ex_idx is a valid exclusive index
|
621 |
+
for the given shape.
|
622 |
+
"""
|
623 |
+
|
624 |
+
assert isinstance(ex_idx, Dim)
|
625 |
+
assert isinstance(rank, Dim)
|
626 |
+
assert ex_idx > 0 and ex_idx <= rank
|
627 |
+
|
628 |
+
|
629 |
+
# "Wraps" a dim (up to one time) for the given rank, allowing dims to be
|
630 |
+
# specified using negative indices. If `wrap_scalar` is true then scalar
|
631 |
+
# tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise,
|
632 |
+
# idx should be in the range [-rank, rank-1].
|
633 |
+
def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int:
|
634 |
+
if rank < 0:
|
635 |
+
msg = f"Rank cannot be negative but got {rank}"
|
636 |
+
raise IndexError(msg)
|
637 |
+
|
638 |
+
if rank == 0:
|
639 |
+
if not wrap_scalar:
|
640 |
+
msg = f"Dimension specified as {idx} but tensor has no dimensions"
|
641 |
+
raise IndexError(msg)
|
642 |
+
rank = 1
|
643 |
+
|
644 |
+
if idx >= 0 and idx < rank:
|
645 |
+
return idx
|
646 |
+
|
647 |
+
if idx < 0:
|
648 |
+
_idx = idx + rank
|
649 |
+
else:
|
650 |
+
_idx = idx
|
651 |
+
|
652 |
+
if _idx < 0 or _idx >= rank:
|
653 |
+
# Same error message as in aten/src/ATen/WrapDimUtils.h:49
|
654 |
+
msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})"
|
655 |
+
raise IndexError(msg)
|
656 |
+
|
657 |
+
return _idx
|
658 |
+
|
659 |
+
|
660 |
+
# Takes a dimension or sequence of dimensions and "wraps" them,
|
661 |
+
# mapping negative offsets to positive ones
|
662 |
+
@overload
|
663 |
+
def canonicalize_dims(
|
664 |
+
rank: int, indices: Sequence[int], wrap_scalar: bool = True
|
665 |
+
) -> Tuple[int, ...]:
|
666 |
+
pass
|
667 |
+
|
668 |
+
|
669 |
+
@overload
|
670 |
+
def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int:
|
671 |
+
pass
|
672 |
+
|
673 |
+
|
674 |
+
def canonicalize_dims(rank, indices, wrap_scalar=True):
|
675 |
+
if isinstance(indices, Dim):
|
676 |
+
return canonicalize_dim(rank, indices, wrap_scalar)
|
677 |
+
|
678 |
+
return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices)
|
679 |
+
|
680 |
+
|
681 |
+
def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
|
682 |
+
"""
|
683 |
+
Validates that perm is a permutation of length rank.
|
684 |
+
"""
|
685 |
+
|
686 |
+
if not isinstance(perm, Sequence):
|
687 |
+
return False
|
688 |
+
|
689 |
+
if not (tuple(sorted(perm)) == tuple(range(0, rank))):
|
690 |
+
return False
|
691 |
+
|
692 |
+
return True
|
693 |
+
|
694 |
+
|
695 |
+
def is_same_shape(a: Sequence, b: Sequence) -> bool:
|
696 |
+
"""
|
697 |
+
Compares two shapes a and b, returning True if they are the same
|
698 |
+
(their ranks and corresponding lengths match) and False otherwise.
|
699 |
+
"""
|
700 |
+
|
701 |
+
return tuple(a) == tuple(b)
|
702 |
+
|
703 |
+
|
704 |
+
def is_cpu_scalar_tensor(a: Any) -> bool:
|
705 |
+
return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
|
706 |
+
|
707 |
+
|
708 |
+
def check_same_device(*args, allow_cpu_scalar_tensors):
|
709 |
+
"""
|
710 |
+
Checks that all Tensors in args have the same device.
|
711 |
+
|
712 |
+
Raises a RuntimeError when:
|
713 |
+
- args contains an object whose type is not Tensor or Number
|
714 |
+
- two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
|
715 |
+
"""
|
716 |
+
# Short-circuits if all (one or fewer) arguments are trivially on the same device
|
717 |
+
if len(args) <= 1:
|
718 |
+
return
|
719 |
+
|
720 |
+
# Note: cannot initialize device to the first arg's device (it may not have one)
|
721 |
+
device = None
|
722 |
+
for arg in args:
|
723 |
+
if isinstance(arg, Number):
|
724 |
+
continue
|
725 |
+
elif isinstance(arg, TensorLike):
|
726 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
727 |
+
continue
|
728 |
+
|
729 |
+
if device is None:
|
730 |
+
device = arg.device
|
731 |
+
|
732 |
+
if device != arg.device:
|
733 |
+
msg = (
|
734 |
+
"Tensor on device "
|
735 |
+
+ str(arg.device)
|
736 |
+
+ " is not on the expected device "
|
737 |
+
+ str(device)
|
738 |
+
+ "!"
|
739 |
+
)
|
740 |
+
raise RuntimeError(msg)
|
741 |
+
else:
|
742 |
+
msg = (
|
743 |
+
"Unexpected type when checking for same device, " + str(type(arg)) + "!"
|
744 |
+
)
|
745 |
+
raise RuntimeError(msg)
|
746 |
+
|
747 |
+
|
748 |
+
def canonicalize_device(device: DeviceLikeType) -> torch.device:
|
749 |
+
if isinstance(device, torch.device):
|
750 |
+
return device
|
751 |
+
|
752 |
+
assert isinstance(device, str)
|
753 |
+
return torch.device(device)
|
754 |
+
|
755 |
+
|
756 |
+
# Asserts if any of the following are true:
|
757 |
+
# - a non-scalar or non-Tensor is given
|
758 |
+
# - the shape of any tensors is distinct
|
759 |
+
def check_same_shape(*args, allow_cpu_scalar_tensors: bool):
|
760 |
+
"""
|
761 |
+
Checks that all Tensors in args have the same shape.
|
762 |
+
|
763 |
+
Raises a RuntimeError when:
|
764 |
+
- args contains an object whose type is not Tensor or Number
|
765 |
+
- two Tensor objects in args have different devices
|
766 |
+
"""
|
767 |
+
shape = None
|
768 |
+
|
769 |
+
for arg in args:
|
770 |
+
if isinstance(arg, Number):
|
771 |
+
continue
|
772 |
+
elif isinstance(arg, TensorLike):
|
773 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
774 |
+
continue
|
775 |
+
|
776 |
+
if shape is None:
|
777 |
+
shape = arg.shape
|
778 |
+
|
779 |
+
if not is_same_shape(shape, arg.shape):
|
780 |
+
msg = f"Shape {arg.shape} is not the expected shape {shape}!"
|
781 |
+
raise RuntimeError(msg)
|
782 |
+
else:
|
783 |
+
msg = (
|
784 |
+
"Unexpected type when checking for same shape, " + str(type(arg)) + "!"
|
785 |
+
)
|
786 |
+
raise RuntimeError(msg)
|
787 |
+
|
788 |
+
|
789 |
+
# Acquires a common shape, if it exists, from one or more tensor arguments,
|
790 |
+
# filtering number arguments
|
791 |
+
def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]:
|
792 |
+
shape = None
|
793 |
+
scalar_shape = None
|
794 |
+
|
795 |
+
for arg in args:
|
796 |
+
if isinstance(arg, Number):
|
797 |
+
continue
|
798 |
+
elif isinstance(arg, TensorLike):
|
799 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
800 |
+
scalar_shape = arg.shape
|
801 |
+
continue
|
802 |
+
|
803 |
+
if shape is None:
|
804 |
+
shape = arg.shape
|
805 |
+
|
806 |
+
if not is_same_shape(shape, arg.shape):
|
807 |
+
return None
|
808 |
+
else:
|
809 |
+
return None
|
810 |
+
|
811 |
+
return shape if shape is not None else scalar_shape
|
812 |
+
|
813 |
+
|
814 |
+
# Extracts dimensions that might be passed either as a list/tuple or as varargs.
|
815 |
+
# A typical case is Tensor.permute .
|
816 |
+
def extract_dims_from_varargs(
|
817 |
+
dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]]
|
818 |
+
) -> DimsSequenceType:
|
819 |
+
if dims and isinstance(dims[0], Sequence):
|
820 |
+
assert len(dims) == 1
|
821 |
+
dims = cast(Tuple[DimsSequenceType], dims)
|
822 |
+
return dims[0]
|
823 |
+
else:
|
824 |
+
return cast(DimsSequenceType, dims)
|
825 |
+
|
826 |
+
|
827 |
+
def extract_shape_from_varargs(
|
828 |
+
shape: Union[ShapeType, Tuple[ShapeType]],
|
829 |
+
validate=True,
|
830 |
+
) -> Tuple[int, ...]:
|
831 |
+
"""
|
832 |
+
Returns a shape from varargs.
|
833 |
+
|
834 |
+
In PyTorch, operations that accept shapes often accept them as varargs, like
|
835 |
+
foo(*shape). However a user can pass the shape as a sequence of integers,
|
836 |
+
like this:
|
837 |
+
|
838 |
+
foo(1, 2, 3)
|
839 |
+
|
840 |
+
or as a sequence of integers
|
841 |
+
|
842 |
+
foo((1, 2, 3))
|
843 |
+
|
844 |
+
In the first case shape will be a tuple of integers, and in the second case it's a tuple
|
845 |
+
containing a tuple of integers. This validates those inputs and canonicalizes them
|
846 |
+
to a tuple of integers.
|
847 |
+
"""
|
848 |
+
|
849 |
+
# Handles tuple unwrapping
|
850 |
+
if len(shape) == 1 and isinstance(shape[0], Sequence):
|
851 |
+
shape = shape[0]
|
852 |
+
|
853 |
+
if validate:
|
854 |
+
validate_shape(shape) # type: ignore[arg-type]
|
855 |
+
return shape # type: ignore[return-value]
|
856 |
+
|
857 |
+
|
858 |
+
def infer_size_shapes(a: ShapeType, b: ShapeType) -> Tuple[int, ...]:
|
859 |
+
ndim = max(len(a), len(b))
|
860 |
+
expandedSizes = [0] * ndim
|
861 |
+
|
862 |
+
for i in range(ndim - 1, -1, -1):
|
863 |
+
offset = ndim - 1 - i
|
864 |
+
dimA = len(a) - 1 - offset
|
865 |
+
dimB = len(b) - 1 - offset
|
866 |
+
sizeA = a[dimA] if dimA >= 0 else 1
|
867 |
+
sizeB = b[dimB] if dimB >= 0 else 1
|
868 |
+
|
869 |
+
torch._check(
|
870 |
+
(sizeA == sizeB) or (sizeA == 1) or (sizeB == 1),
|
871 |
+
lambda: (
|
872 |
+
f"The size of tensor a ({sizeA}) must match the size of "
|
873 |
+
f"tensor b ({sizeB}) at non-jagged dimension {i}"
|
874 |
+
),
|
875 |
+
)
|
876 |
+
|
877 |
+
# 1s map to the other size (even 0)
|
878 |
+
expandedSizes[i] = sizeB if sizeA == 1 else sizeA
|
879 |
+
|
880 |
+
return tuple(expandedSizes)
|
881 |
+
|
882 |
+
|
883 |
+
def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]:
|
884 |
+
"""
|
885 |
+
Infers the size of a dim with size -1, if it exists.
|
886 |
+
Also checks that new shape is compatible with the number of elements.
|
887 |
+
"""
|
888 |
+
dim = None
|
889 |
+
newsize = 1
|
890 |
+
for i, d in enumerate(shape):
|
891 |
+
if d == -1:
|
892 |
+
torch._check(dim is None, lambda: "only one dimension can be inferred")
|
893 |
+
dim = i
|
894 |
+
elif d >= 0:
|
895 |
+
newsize *= d
|
896 |
+
else:
|
897 |
+
torch._check(False, lambda: f"invalid shape dimension {d}")
|
898 |
+
if dim is None:
|
899 |
+
torch._check(
|
900 |
+
numel == newsize,
|
901 |
+
lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
|
902 |
+
)
|
903 |
+
else:
|
904 |
+
from torch.fx.experimental.symbolic_shapes import definitely_true
|
905 |
+
|
906 |
+
torch._check(
|
907 |
+
newsize != 0,
|
908 |
+
lambda: (
|
909 |
+
f"cannot reshape tensor of 0 elements into shape {list(shape)} because the "
|
910 |
+
f"unspecified dimension size -1 can be any value and is ambiguous"
|
911 |
+
if definitely_true(numel == 0)
|
912 |
+
else f"shape '{list(shape)}' is invalid for input of size {numel}"
|
913 |
+
),
|
914 |
+
)
|
915 |
+
torch._check(
|
916 |
+
numel % newsize == 0,
|
917 |
+
lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
|
918 |
+
)
|
919 |
+
# Convert to list to produce a compatible error message with core
|
920 |
+
# PyTorch, which prints sequences in square brackets.
|
921 |
+
shape = list(shape)
|
922 |
+
shape[dim] = numel // newsize
|
923 |
+
# NB: This is pretty important when you have unbacked SymInts.
|
924 |
+
# Suppose you have (i0, 12) resizing into (2, -1, 12). The old
|
925 |
+
# range for i0 is typically [2, inf], which means if you divide
|
926 |
+
# by two the new range should be [1, inf]. But this is bad news
|
927 |
+
# if you have an unbacked SymInt: we need to reapply the unsound
|
928 |
+
# assumption that the size is >= 2.
|
929 |
+
torch._check_is_size(shape[dim])
|
930 |
+
return tuple(shape)
|
931 |
+
|
932 |
+
|
933 |
+
_integer_dtypes = (
|
934 |
+
torch.uint8,
|
935 |
+
torch.uint16,
|
936 |
+
torch.uint32,
|
937 |
+
torch.uint64,
|
938 |
+
torch.int8,
|
939 |
+
torch.int16,
|
940 |
+
torch.int32,
|
941 |
+
torch.int64,
|
942 |
+
)
|
943 |
+
_low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
|
944 |
+
_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
|
945 |
+
|
946 |
+
|
947 |
+
def is_boolean_dtype(dtype: torch.dtype) -> bool:
|
948 |
+
assert isinstance(dtype, torch.dtype)
|
949 |
+
return dtype is torch.bool
|
950 |
+
|
951 |
+
|
952 |
+
def is_integer_dtype(dtype: torch.dtype) -> bool:
|
953 |
+
assert isinstance(dtype, torch.dtype)
|
954 |
+
return dtype in _integer_dtypes
|
955 |
+
|
956 |
+
|
957 |
+
def is_low_precision_dtype(dtype: torch.dtype) -> bool:
|
958 |
+
assert isinstance(dtype, torch.dtype)
|
959 |
+
return dtype in _low_precision_dtypes
|
960 |
+
|
961 |
+
|
962 |
+
def is_float_dtype(dtype: torch.dtype) -> bool:
|
963 |
+
assert isinstance(dtype, torch.dtype)
|
964 |
+
return dtype.is_floating_point
|
965 |
+
|
966 |
+
|
967 |
+
def is_complex_dtype(dtype: torch.dtype) -> bool:
|
968 |
+
assert isinstance(dtype, torch.dtype)
|
969 |
+
return dtype in _complex_dtypes
|
970 |
+
|
971 |
+
|
972 |
+
def is_grad_dtype(dtype: torch.dtype) -> bool:
|
973 |
+
"""
|
974 |
+
Checks if the dtype can require a gradient.
|
975 |
+
"""
|
976 |
+
return dtype.is_floating_point or is_complex_dtype(dtype)
|
977 |
+
|
978 |
+
|
979 |
+
_complex_to_real_dtype_map = {
|
980 |
+
torch.complex128: torch.float64,
|
981 |
+
torch.complex64: torch.float32,
|
982 |
+
torch.complex32: torch.float16,
|
983 |
+
}
|
984 |
+
|
985 |
+
_real_to_complex_dtype_map = {
|
986 |
+
torch.float16: torch.complex32,
|
987 |
+
torch.bfloat16: torch.complex64,
|
988 |
+
torch.float32: torch.complex64,
|
989 |
+
torch.float64: torch.complex128,
|
990 |
+
}
|
991 |
+
|
992 |
+
|
993 |
+
def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
|
994 |
+
return _complex_to_real_dtype_map[dtype]
|
995 |
+
|
996 |
+
|
997 |
+
def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
|
998 |
+
return _real_to_complex_dtype_map[dtype]
|
999 |
+
|
1000 |
+
|
1001 |
+
def dtype_to_type(dtype: torch.dtype) -> type:
|
1002 |
+
"""
|
1003 |
+
Computes the corresponding Python type (AKA "type kind") for the
|
1004 |
+
given dtype.
|
1005 |
+
"""
|
1006 |
+
assert isinstance(dtype, torch.dtype)
|
1007 |
+
|
1008 |
+
if dtype is torch.bool:
|
1009 |
+
return bool
|
1010 |
+
if dtype in _integer_dtypes:
|
1011 |
+
return int
|
1012 |
+
if dtype.is_floating_point:
|
1013 |
+
return float
|
1014 |
+
if dtype in _complex_dtypes:
|
1015 |
+
return complex
|
1016 |
+
|
1017 |
+
raise ValueError("Invalid dtype!")
|
1018 |
+
|
1019 |
+
|
1020 |
+
def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]:
|
1021 |
+
"""
|
1022 |
+
Computes the corresponding Python type constructor for the
|
1023 |
+
given dtype.
|
1024 |
+
"""
|
1025 |
+
assert isinstance(dtype, torch.dtype)
|
1026 |
+
|
1027 |
+
if dtype is torch.bool:
|
1028 |
+
return lambda x: bool(x)
|
1029 |
+
if dtype in _integer_dtypes:
|
1030 |
+
return sym_int
|
1031 |
+
if dtype.is_floating_point:
|
1032 |
+
return sym_float
|
1033 |
+
if dtype in _complex_dtypes:
|
1034 |
+
# TODO: type error here is real, replace with sym_complex
|
1035 |
+
return lambda x: complex(x) # type: ignore[arg-type]
|
1036 |
+
|
1037 |
+
raise ValueError("Invalid dtype!")
|
1038 |
+
|
1039 |
+
|
1040 |
+
def type_to_dtype(typ: type) -> torch.dtype:
|
1041 |
+
"""
|
1042 |
+
Computes the corresponding dtype for a Number type.
|
1043 |
+
"""
|
1044 |
+
|
1045 |
+
assert isinstance(typ, type)
|
1046 |
+
|
1047 |
+
if typ is bool:
|
1048 |
+
return torch.bool
|
1049 |
+
if typ in [int, torch.SymInt]:
|
1050 |
+
return torch.long
|
1051 |
+
if typ in [float, torch.SymFloat]:
|
1052 |
+
return torch.get_default_dtype()
|
1053 |
+
# TODO: sym_complex_float?
|
1054 |
+
if typ is complex:
|
1055 |
+
return corresponding_complex_dtype(torch.get_default_dtype())
|
1056 |
+
|
1057 |
+
raise ValueError("Invalid type!")
|
1058 |
+
|
1059 |
+
|
1060 |
+
def get_dtype(x: Union[torch.Tensor, NumberType]):
|
1061 |
+
if isinstance(x, torch.Tensor):
|
1062 |
+
return x.dtype
|
1063 |
+
else:
|
1064 |
+
return type_to_dtype(type(x))
|
1065 |
+
|
1066 |
+
|
1067 |
+
_ordered_types = (bool, int, float, complex)
|
1068 |
+
|
1069 |
+
|
1070 |
+
def check_fp_or_complex(
|
1071 |
+
dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True
|
1072 |
+
):
|
1073 |
+
"""
|
1074 |
+
Checks whether the input is floating point or complex.
|
1075 |
+
If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32
|
1076 |
+
"""
|
1077 |
+
torch._check(
|
1078 |
+
is_float_dtype(dtype) or is_complex_dtype(dtype),
|
1079 |
+
lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}",
|
1080 |
+
)
|
1081 |
+
torch._check(
|
1082 |
+
allow_low_precision_dtypes or not is_low_precision_dtype(dtype),
|
1083 |
+
lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}",
|
1084 |
+
)
|
1085 |
+
|
1086 |
+
|
1087 |
+
def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"):
|
1088 |
+
torch._check(
|
1089 |
+
len(A.shape) >= 2,
|
1090 |
+
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
|
1091 |
+
)
|
1092 |
+
|
1093 |
+
|
1094 |
+
def get_higher_type(a: type, b: type) -> type:
|
1095 |
+
"""
|
1096 |
+
Returns the higher of the two given Number types.
|
1097 |
+
|
1098 |
+
The types are ordered bool -> int -> float -> complex.
|
1099 |
+
"""
|
1100 |
+
a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
|
1101 |
+
# Type checking
|
1102 |
+
if a not in _ordered_types or b not in _ordered_types:
|
1103 |
+
raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
|
1104 |
+
|
1105 |
+
if a is b:
|
1106 |
+
return a
|
1107 |
+
|
1108 |
+
for typ in _ordered_types:
|
1109 |
+
if a is typ:
|
1110 |
+
return b
|
1111 |
+
if b is typ:
|
1112 |
+
return a
|
1113 |
+
|
1114 |
+
raise ValueError("Unknown Python scalar type!")
|
1115 |
+
|
1116 |
+
|
1117 |
+
# Returns the higher of two torch datatypes a and b or, if the two
|
1118 |
+
# are not ordered relative to each other, the next
|
1119 |
+
# higher datatype
|
1120 |
+
def get_higher_dtype(
|
1121 |
+
a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
|
1122 |
+
b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
|
1123 |
+
) -> Optional[torch.dtype]:
|
1124 |
+
"""
|
1125 |
+
Computes the "lowest" datatype that is weakly
|
1126 |
+
"higher" than both a and b.
|
1127 |
+
"""
|
1128 |
+
|
1129 |
+
# Type checking
|
1130 |
+
assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
|
1131 |
+
assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
|
1132 |
+
|
1133 |
+
def _extract_dtype(
|
1134 |
+
x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
|
1135 |
+
) -> Optional[torch.dtype]:
|
1136 |
+
if x is None:
|
1137 |
+
return None
|
1138 |
+
if isinstance(x, torch.dtype):
|
1139 |
+
return x
|
1140 |
+
if isinstance(x, TensorLike):
|
1141 |
+
return x.dtype
|
1142 |
+
if isinstance(x, Number):
|
1143 |
+
return type_to_dtype(type(x))
|
1144 |
+
|
1145 |
+
raise RuntimeError("Unexpected type given to _extract_dtype!")
|
1146 |
+
|
1147 |
+
a, b = _extract_dtype(a), _extract_dtype(b)
|
1148 |
+
|
1149 |
+
if a is b:
|
1150 |
+
return a
|
1151 |
+
|
1152 |
+
if a is None:
|
1153 |
+
return b
|
1154 |
+
|
1155 |
+
if b is None:
|
1156 |
+
return a
|
1157 |
+
|
1158 |
+
ordered_datatypes = (
|
1159 |
+
(torch.bool,),
|
1160 |
+
(torch.uint8, torch.int8),
|
1161 |
+
(torch.int16,),
|
1162 |
+
(torch.int32,),
|
1163 |
+
(torch.int64,),
|
1164 |
+
(torch.float16, torch.bfloat16),
|
1165 |
+
(torch.float32,),
|
1166 |
+
(torch.float64,),
|
1167 |
+
(torch.complex32,),
|
1168 |
+
(torch.complex64,),
|
1169 |
+
(torch.complex128,),
|
1170 |
+
)
|
1171 |
+
|
1172 |
+
for idx, dtypes in enumerate(ordered_datatypes):
|
1173 |
+
if a in dtypes and b in dtypes:
|
1174 |
+
return ordered_datatypes[idx + 1][0]
|
1175 |
+
if a in dtypes:
|
1176 |
+
return b
|
1177 |
+
if b in dtypes:
|
1178 |
+
return a
|
1179 |
+
|
1180 |
+
raise RuntimeError("Unexpected termination!")
|
1181 |
+
|
1182 |
+
|
1183 |
+
def check_pin_memory(pin_memory: bool):
|
1184 |
+
torch._check_not_implemented(
|
1185 |
+
not pin_memory, lambda: "PrimTorch does not support pinned memory"
|
1186 |
+
)
|
1187 |
+
|
1188 |
+
|
1189 |
+
def check_layout(layout: torch.layout):
|
1190 |
+
torch._check_not_implemented(
|
1191 |
+
layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}"
|
1192 |
+
)
|
1193 |
+
|
1194 |
+
|
1195 |
+
# TODO: maybe unify with can_cast_to?
|
1196 |
+
def is_weakly_lesser_type(a: type, b: type) -> bool:
|
1197 |
+
"""
|
1198 |
+
Compares two types, a and b, returning True if a is weakly "less" than b.
|
1199 |
+
|
1200 |
+
The comparison is determined by the following type ordering: bool, int, float, complex.
|
1201 |
+
"""
|
1202 |
+
|
1203 |
+
a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
|
1204 |
+
|
1205 |
+
if a not in _ordered_types or b not in _ordered_types:
|
1206 |
+
raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
|
1207 |
+
|
1208 |
+
for typ in _ordered_types:
|
1209 |
+
if a == typ:
|
1210 |
+
return True
|
1211 |
+
if b == typ:
|
1212 |
+
return False
|
1213 |
+
|
1214 |
+
raise RuntimeError("Unexpected termination!")
|
1215 |
+
|
1216 |
+
|
1217 |
+
def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
|
1218 |
+
for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
|
1219 |
+
if fn(cast_to):
|
1220 |
+
return True
|
1221 |
+
if fn(cast_from):
|
1222 |
+
return False
|
1223 |
+
|
1224 |
+
raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!")
|
1225 |
+
|
1226 |
+
|
1227 |
+
def check_same_dtype(*args):
|
1228 |
+
"""
|
1229 |
+
Checks that all Tensors in args have the same device and that all Numbers have the
|
1230 |
+
same corresponding Python type.
|
1231 |
+
|
1232 |
+
Raises a RuntimeError when:
|
1233 |
+
- args contains an object whose type is not Tensor or Number
|
1234 |
+
- two Tensors objects in args have different dtypes
|
1235 |
+
- two Number objects in args have different types
|
1236 |
+
- there are Tensors and Numbers in args, and one of those Tensors corresponding
|
1237 |
+
Python types is different from the type of one of those Numbers
|
1238 |
+
"""
|
1239 |
+
full_dtype = None
|
1240 |
+
scalar_type = None
|
1241 |
+
|
1242 |
+
for arg in args:
|
1243 |
+
if isinstance(arg, Number):
|
1244 |
+
# Scalar type checking is disabled (and may be removed in the future)
|
1245 |
+
continue
|
1246 |
+
# if scalar_type is None:
|
1247 |
+
# scalar_type = type(arg)
|
1248 |
+
|
1249 |
+
# if scalar_type is not type(arg):
|
1250 |
+
# msg = (
|
1251 |
+
# "Scalar of type "
|
1252 |
+
# + str(type(arg))
|
1253 |
+
# + " is not the expected type of "
|
1254 |
+
# + str(scalar_type)
|
1255 |
+
# + "!"
|
1256 |
+
# )
|
1257 |
+
# raise RuntimeError(msg)
|
1258 |
+
elif isinstance(arg, TensorLike):
|
1259 |
+
if full_dtype is None:
|
1260 |
+
full_dtype = arg.dtype
|
1261 |
+
if scalar_type is None:
|
1262 |
+
scalar_type = dtype_to_type(arg.dtype)
|
1263 |
+
|
1264 |
+
if full_dtype is not arg.dtype:
|
1265 |
+
msg = (
|
1266 |
+
"Tensor with dtype "
|
1267 |
+
+ str(arg.dtype)
|
1268 |
+
+ " is not the expected dtype of "
|
1269 |
+
+ str(full_dtype)
|
1270 |
+
+ "!"
|
1271 |
+
)
|
1272 |
+
raise RuntimeError(msg)
|
1273 |
+
|
1274 |
+
arg_type = dtype_to_type(arg.dtype)
|
1275 |
+
if arg_type is not scalar_type:
|
1276 |
+
msg = (
|
1277 |
+
"Tensor with corresponding Python type "
|
1278 |
+
+ str(arg_type)
|
1279 |
+
+ " is not the expected type of "
|
1280 |
+
+ str(scalar_type)
|
1281 |
+
+ "!"
|
1282 |
+
)
|
1283 |
+
raise RuntimeError(msg)
|
1284 |
+
else:
|
1285 |
+
msg = (
|
1286 |
+
"Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
|
1287 |
+
)
|
1288 |
+
raise RuntimeError(msg)
|
1289 |
+
|
1290 |
+
|
1291 |
+
# Maps datatypes to their computation types for elementwise operations
|
1292 |
+
_computation_dtype_map = {
|
1293 |
+
torch.bfloat16: torch.float32,
|
1294 |
+
torch.float16: torch.float32,
|
1295 |
+
torch.complex32: torch.complex64,
|
1296 |
+
}
|
1297 |
+
|
1298 |
+
|
1299 |
+
def get_computation_dtype(dtype: torch.dtype) -> torch.dtype:
|
1300 |
+
return _computation_dtype_map.get(dtype, dtype)
|
1301 |
+
|
1302 |
+
|
1303 |
+
_cpu_acc_type_map = {
|
1304 |
+
torch.bfloat16: torch.float64,
|
1305 |
+
torch.float16: torch.float64,
|
1306 |
+
torch.float32: torch.float64,
|
1307 |
+
torch.complex32: torch.complex128,
|
1308 |
+
torch.complex64: torch.complex128,
|
1309 |
+
}
|
1310 |
+
|
1311 |
+
|
1312 |
+
def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype:
|
1313 |
+
# Equivalent to at::toAccumulateType, prefer computation_dtype where possible
|
1314 |
+
if device.type == "cpu":
|
1315 |
+
return _cpu_acc_type_map.get(dtype, dtype)
|
1316 |
+
else:
|
1317 |
+
return get_computation_dtype(dtype)
|
1318 |
+
|
1319 |
+
|
1320 |
+
class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum):
|
1321 |
+
DEFAULT = (0,)
|
1322 |
+
NO_OPMATH = (1,)
|
1323 |
+
INT_TO_FLOAT = (2,)
|
1324 |
+
ALWAYS_BOOL = (3,)
|
1325 |
+
COMPLEX_TO_FLOAT = (4,)
|
1326 |
+
BOOL_TO_LONG = (5,)
|
1327 |
+
|
1328 |
+
|
1329 |
+
class REDUCTION_OUTPUT_TYPE_KIND(Enum):
|
1330 |
+
SAME = (0,)
|
1331 |
+
COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type
|
1332 |
+
KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean
|
1333 |
+
ALWAYS_BOOL = (3,)
|
1334 |
+
|
1335 |
+
|
1336 |
+
# Describes the return type of the primitive:
|
1337 |
+
#
|
1338 |
+
# - NEW, a new tensor is created
|
1339 |
+
# - VIEW, a view of an input tensor is returned
|
1340 |
+
# - INPLACE, one or more input tensors is modified
|
1341 |
+
#
|
1342 |
+
# these descriptors are mututally exclusive and exhaustive.
|
1343 |
+
class RETURN_TYPE(Enum):
|
1344 |
+
NEW = (0,)
|
1345 |
+
VIEW = (1,)
|
1346 |
+
INPLACE = (2,)
|
1347 |
+
|
1348 |
+
|
1349 |
+
# TODO: when NumberType contains the sym types, can simplify this
|
1350 |
+
def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type:
|
1351 |
+
if isinstance(x, torch.SymInt):
|
1352 |
+
return int
|
1353 |
+
elif isinstance(x, torch.SymFloat):
|
1354 |
+
return float
|
1355 |
+
else:
|
1356 |
+
return type(x)
|
1357 |
+
|
1358 |
+
|
1359 |
+
def expr_type(x: sympy.Expr) -> Type:
|
1360 |
+
if x.is_integer: # type: ignore[attr-defined]
|
1361 |
+
return int
|
1362 |
+
else:
|
1363 |
+
# NB: Not strictly correct, but we don't support SymPy complex or bool.
|
1364 |
+
return float
|
1365 |
+
|
1366 |
+
|
1367 |
+
# TODO: document type promotion kinds
|
1368 |
+
def elementwise_dtypes(
|
1369 |
+
*_args,
|
1370 |
+
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
|
1371 |
+
) -> Tuple[torch.dtype, torch.dtype]:
|
1372 |
+
"""
|
1373 |
+
Computes the computation and result dtypes for elementwise type promotion
|
1374 |
+
on the given arguments and with the given elementwise type promotion kind.
|
1375 |
+
|
1376 |
+
Note that not all inputs to an elementwise operation necessarily participate in type promotion.
|
1377 |
+
For example, the "alpha" parameter of torch.add does not participate in type promotion,
|
1378 |
+
although it may be cast to the Python type corresponding to the computation dtype that
|
1379 |
+
the type promotion algorithm determines.
|
1380 |
+
|
1381 |
+
Default elementwise type promotion, which all other type promotion kinds tweak (see below),
|
1382 |
+
first decides which of four ordered types to use:
|
1383 |
+
|
1384 |
+
bool -> integer -> floating point -> complex
|
1385 |
+
|
1386 |
+
The selected type is the "lowest" type in the above list such that all number arguments
|
1387 |
+
have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
|
1388 |
+
type for their dtype.
|
1389 |
+
|
1390 |
+
Once the type is determined, the particular result dtype is found. The dtypes are
|
1391 |
+
partially ordered as follows:
|
1392 |
+
|
1393 |
+
bool -> uint8, int8 -> int16 -> int32 -> int64 ->
|
1394 |
+
float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
|
1395 |
+
|
1396 |
+
The result dtype is selected by:
|
1397 |
+
- if no tensor's dtype has the same corresponding type as the one selected,
|
1398 |
+
then the result dtype is the (default) dtype corresponding to the selected type
|
1399 |
+
(for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
|
1400 |
+
- if the result type is complex then the dtype is:
|
1401 |
+
- the default complex dtype if there are no floating point or complex tensors
|
1402 |
+
- if there are floating point or complex tensors with one or more dimensions, then
|
1403 |
+
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
|
1404 |
+
(for example, double + cfloat -> cdouble)
|
1405 |
+
- if there are only floating point or complex tensors with zero dimensions, then
|
1406 |
+
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
|
1407 |
+
- if the first two cases do not apply, the result dtype is the highest dtype among
|
1408 |
+
all tensors with one or more dimensions of the output type, and if there are no such
|
1409 |
+
tensors then it's the highest dtype among all tensors with zero dimensions of the output type
|
1410 |
+
(for example, long + half -> half, even if the half tensor has zero dimensions)
|
1411 |
+
|
1412 |
+
The "corresponding complex dtypes" are:
|
1413 |
+
float16 -> complex32
|
1414 |
+
bfloat16 -> complex64
|
1415 |
+
float32 -> complex64
|
1416 |
+
float64 -> complex128
|
1417 |
+
complex32 -> complex32
|
1418 |
+
complex64 -> complex64
|
1419 |
+
complex128 -> complex128
|
1420 |
+
|
1421 |
+
The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
|
1422 |
+
dtype by mapping low precision floating point and complex dtypes as follows:
|
1423 |
+
|
1424 |
+
float16 -> float32
|
1425 |
+
bfloat16 -> float32
|
1426 |
+
complex32 -> complex64
|
1427 |
+
|
1428 |
+
This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
|
1429 |
+
computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
|
1430 |
+
which perform no mathematical operations on their tensors (see below for examples).
|
1431 |
+
|
1432 |
+
The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype,
|
1433 |
+
and computation dtypes to the appropriate op math dtype.
|
1434 |
+
|
1435 |
+
The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
|
1436 |
+
mapping:
|
1437 |
+
|
1438 |
+
complex32 -> float16
|
1439 |
+
complex64 -> float32
|
1440 |
+
complex128 -> float64
|
1441 |
+
|
1442 |
+
Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
|
1443 |
+
|
1444 |
+
The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
|
1445 |
+
|
1446 |
+
The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
|
1447 |
+
|
1448 |
+
Example operators for each type promotion option:
|
1449 |
+
DEFAULT : add
|
1450 |
+
NO_OPMATH : where, nextafter, cat
|
1451 |
+
INT_TO_FLOAT : sin
|
1452 |
+
COMPLEX_TO_FLOAT : abs
|
1453 |
+
BOOL_TO_LONG : pow
|
1454 |
+
ALWAYS_BOOL : eq
|
1455 |
+
|
1456 |
+
"""
|
1457 |
+
|
1458 |
+
args = tuple(x for x in _args if x is not None)
|
1459 |
+
|
1460 |
+
highest_type: type = bool
|
1461 |
+
|
1462 |
+
# Import sympy locally, as importing it eagerly at a module level is too slow
|
1463 |
+
# See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589
|
1464 |
+
import sympy
|
1465 |
+
|
1466 |
+
for x in args:
|
1467 |
+
if not isinstance(x, (Number, TensorLike, sympy.Expr)):
|
1468 |
+
msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!"
|
1469 |
+
raise ValueError(msg)
|
1470 |
+
|
1471 |
+
if isinstance(x, Number):
|
1472 |
+
highest_type = get_higher_type(highest_type, number_type(x))
|
1473 |
+
elif isinstance(x, sympy.Expr):
|
1474 |
+
highest_type = get_higher_type(highest_type, expr_type(x))
|
1475 |
+
else:
|
1476 |
+
# x is a TensorLike
|
1477 |
+
highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
|
1478 |
+
|
1479 |
+
result_dtype = None
|
1480 |
+
|
1481 |
+
def _find_highest_dtype_filtered(
|
1482 |
+
args, filter, *, float_as_complex=False
|
1483 |
+
) -> Optional[torch.dtype]:
|
1484 |
+
zero_dim_tensor_dtype = None
|
1485 |
+
one_plus_dim_tensor_dtype = None
|
1486 |
+
for x in args:
|
1487 |
+
if isinstance(x, TensorLike) and filter(x.dtype):
|
1488 |
+
_dtype = x.dtype
|
1489 |
+
if float_as_complex and is_float_dtype(_dtype):
|
1490 |
+
_dtype = corresponding_complex_dtype(_dtype)
|
1491 |
+
if x.ndim == 0:
|
1492 |
+
zero_dim_tensor_dtype = get_higher_dtype(
|
1493 |
+
zero_dim_tensor_dtype, _dtype
|
1494 |
+
)
|
1495 |
+
else:
|
1496 |
+
# x.ndim > 0
|
1497 |
+
one_plus_dim_tensor_dtype = get_higher_dtype(
|
1498 |
+
one_plus_dim_tensor_dtype, _dtype
|
1499 |
+
)
|
1500 |
+
|
1501 |
+
# Prefers dtype of tensors with one or more dimensions
|
1502 |
+
if one_plus_dim_tensor_dtype is not None:
|
1503 |
+
return one_plus_dim_tensor_dtype
|
1504 |
+
|
1505 |
+
return zero_dim_tensor_dtype
|
1506 |
+
|
1507 |
+
if highest_type is float:
|
1508 |
+
result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
|
1509 |
+
result_dtype = (
|
1510 |
+
torch.get_default_dtype() if result_dtype is None else result_dtype
|
1511 |
+
)
|
1512 |
+
elif highest_type is complex:
|
1513 |
+
result_dtype = _find_highest_dtype_filtered(
|
1514 |
+
args,
|
1515 |
+
lambda x: is_float_dtype(x) or is_complex_dtype(x),
|
1516 |
+
float_as_complex=True,
|
1517 |
+
)
|
1518 |
+
if result_dtype is None:
|
1519 |
+
result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
|
1520 |
+
elif highest_type is int:
|
1521 |
+
result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
|
1522 |
+
result_dtype = torch.long if result_dtype is None else result_dtype
|
1523 |
+
else:
|
1524 |
+
# highest_type is bool
|
1525 |
+
result_dtype = torch.bool
|
1526 |
+
|
1527 |
+
if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
|
1528 |
+
return get_computation_dtype(result_dtype), result_dtype
|
1529 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
|
1530 |
+
return result_dtype, result_dtype
|
1531 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
|
1532 |
+
if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
|
1533 |
+
result_dtype = torch.get_default_dtype()
|
1534 |
+
return get_computation_dtype(result_dtype), result_dtype
|
1535 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
|
1536 |
+
# NOTE: computation can still occur in a complex dtype
|
1537 |
+
computation_dtype = get_computation_dtype(result_dtype)
|
1538 |
+
if is_complex_dtype(result_dtype):
|
1539 |
+
result_dtype = corresponding_real_dtype(result_dtype)
|
1540 |
+
return computation_dtype, result_dtype
|
1541 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
|
1542 |
+
if is_boolean_dtype(result_dtype):
|
1543 |
+
return torch.long, torch.long
|
1544 |
+
return get_computation_dtype(result_dtype), result_dtype
|
1545 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
|
1546 |
+
return get_computation_dtype(result_dtype), torch.bool
|
1547 |
+
else:
|
1548 |
+
raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}")
|
1549 |
+
|
1550 |
+
|
1551 |
+
def reduction_dtypes(
|
1552 |
+
arg,
|
1553 |
+
output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
|
1554 |
+
dtype: Optional[torch.dtype] = None,
|
1555 |
+
) -> Tuple[torch.dtype, Optional[torch.dtype]]:
|
1556 |
+
# even though some reductions, like amin or amax, don't strictly require type promotion,
|
1557 |
+
# all the math ops (including comparisons) are still defined only for a computation type,
|
1558 |
+
# so promotion will still happen. We are doing it explicitly here
|
1559 |
+
inp_dtype = dtype if dtype is not None else arg.dtype
|
1560 |
+
computation_dtype = get_computation_dtype(inp_dtype)
|
1561 |
+
if (
|
1562 |
+
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
|
1563 |
+
or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
|
1564 |
+
):
|
1565 |
+
result_dtype = dtype if dtype else arg.dtype
|
1566 |
+
if (
|
1567 |
+
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
|
1568 |
+
and is_complex_dtype(result_dtype)
|
1569 |
+
):
|
1570 |
+
result_dtype = corresponding_real_dtype(result_dtype)
|
1571 |
+
elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
|
1572 |
+
result_dtype = None
|
1573 |
+
else: # ALWAYS_BOOL
|
1574 |
+
result_dtype = torch.bool
|
1575 |
+
return computation_dtype, result_dtype
|
1576 |
+
|
1577 |
+
|
1578 |
+
# This function's logic is borrowed from the following functions defined in C++:
|
1579 |
+
# batched_matrix_contiguous_strides and contiguous_strides
|
1580 |
+
def make_contiguous_strides_for(
|
1581 |
+
shape: ShapeType, row_major: bool = True
|
1582 |
+
) -> Tuple[int, ...]:
|
1583 |
+
"""
|
1584 |
+
Returns the strides of a contiguous tensor if row_major
|
1585 |
+
If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
|
1586 |
+
This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
|
1587 |
+
"""
|
1588 |
+
# contiguous_strides from c10/util/strides.h
|
1589 |
+
validate_shape(shape)
|
1590 |
+
if not shape:
|
1591 |
+
return ()
|
1592 |
+
|
1593 |
+
from torch.fx.experimental.symbolic_shapes import is_nested_int
|
1594 |
+
|
1595 |
+
multiplier = 1
|
1596 |
+
strides = []
|
1597 |
+
for l in reversed(shape):
|
1598 |
+
strides.append(multiplier)
|
1599 |
+
multiplier *= l if is_nested_int(l) else sym_max(l, 1)
|
1600 |
+
|
1601 |
+
result = tuple(reversed(strides))
|
1602 |
+
|
1603 |
+
# batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h
|
1604 |
+
if row_major:
|
1605 |
+
return result
|
1606 |
+
else:
|
1607 |
+
if len(shape) < 2:
|
1608 |
+
return result
|
1609 |
+
return result[:-2] + (1, max(shape[-2], 1))
|
1610 |
+
|
1611 |
+
|
1612 |
+
def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
1613 |
+
torch._check(
|
1614 |
+
len(shape) == 3,
|
1615 |
+
lambda: "Only tensors of rank 3 can use the channels_last_1d memory format",
|
1616 |
+
)
|
1617 |
+
|
1618 |
+
multiplier = 1
|
1619 |
+
strides = [0] * 3
|
1620 |
+
for idx in (1, -1, 0):
|
1621 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
1622 |
+
# This is consistent with eager
|
1623 |
+
strides[idx] = multiplier
|
1624 |
+
multiplier *= shape[idx]
|
1625 |
+
|
1626 |
+
return tuple(strides)
|
1627 |
+
|
1628 |
+
|
1629 |
+
def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
1630 |
+
# TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
|
1631 |
+
torch._check(
|
1632 |
+
len(shape) == 4,
|
1633 |
+
lambda: "Only tensors of rank 4 can use the channels_last memory format",
|
1634 |
+
)
|
1635 |
+
|
1636 |
+
multiplier = 1
|
1637 |
+
strides = [0] * 4
|
1638 |
+
for idx in (1, -1, -2, 0):
|
1639 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
1640 |
+
# This is consistent with eager
|
1641 |
+
strides[idx] = multiplier
|
1642 |
+
multiplier *= shape[idx]
|
1643 |
+
|
1644 |
+
return tuple(strides)
|
1645 |
+
|
1646 |
+
|
1647 |
+
def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
1648 |
+
torch._check(
|
1649 |
+
len(shape) == 5,
|
1650 |
+
lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
|
1651 |
+
)
|
1652 |
+
|
1653 |
+
multiplier = 1
|
1654 |
+
strides = [0] * 5
|
1655 |
+
for idx in (1, -1, -2, -3, 0):
|
1656 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
1657 |
+
# This is consistent with eager
|
1658 |
+
strides[idx] = multiplier
|
1659 |
+
multiplier *= shape[idx]
|
1660 |
+
|
1661 |
+
return tuple(strides)
|
1662 |
+
|
1663 |
+
|
1664 |
+
def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
1665 |
+
ndim = len(shape) if isinstance(shape, Sequence) else 1
|
1666 |
+
if ndim == 3:
|
1667 |
+
return make_channels_last_1d_strides_for(shape)
|
1668 |
+
elif ndim == 4:
|
1669 |
+
return make_channels_last_2d_strides_for(shape)
|
1670 |
+
elif ndim == 5:
|
1671 |
+
return make_channels_last_3d_strides_for(shape)
|
1672 |
+
else:
|
1673 |
+
raise RuntimeError(
|
1674 |
+
f"no channels last format strides exist in {ndim} dimensions"
|
1675 |
+
)
|
1676 |
+
|
1677 |
+
|
1678 |
+
def compute_reduction_output_shape(
|
1679 |
+
shape: ShapeType, dimensions: Sequence
|
1680 |
+
) -> Tuple[int, ...]:
|
1681 |
+
for idx in dimensions:
|
1682 |
+
validate_idx(len(shape), idx)
|
1683 |
+
|
1684 |
+
new_shape = []
|
1685 |
+
for idx in range(len(shape)):
|
1686 |
+
if idx in dimensions:
|
1687 |
+
continue
|
1688 |
+
|
1689 |
+
new_shape.append(shape[idx])
|
1690 |
+
|
1691 |
+
return tuple(new_shape)
|
1692 |
+
|
1693 |
+
|
1694 |
+
def validate_no_repeating_dims(dims: Sequence):
|
1695 |
+
if len(dims) != len(set(dims)):
|
1696 |
+
raise RuntimeError("duplicate value in the list of dims")
|
1697 |
+
|
1698 |
+
|
1699 |
+
def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]:
|
1700 |
+
if dims is None:
|
1701 |
+
return tuple(range(len(shape)))
|
1702 |
+
dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
|
1703 |
+
validate_no_repeating_dims(dims)
|
1704 |
+
return dims
|
1705 |
+
|
1706 |
+
|
1707 |
+
def set_correction(
|
1708 |
+
unbiased: Optional[bool] = None,
|
1709 |
+
correction: Optional[NumberType] = None,
|
1710 |
+
) -> float:
|
1711 |
+
if correction is not None and unbiased is not None:
|
1712 |
+
raise RuntimeError("cannot specify both correction and unbiased arguments")
|
1713 |
+
elif correction is None and unbiased is None:
|
1714 |
+
correction = 1.0
|
1715 |
+
elif correction is None and unbiased is not None:
|
1716 |
+
correction = 0.0 if unbiased is False else 1.0
|
1717 |
+
# NB: we don't actually support symint here, but it's harmless to accept
|
1718 |
+
if not isinstance(correction, (IntLike, FloatLike)):
|
1719 |
+
raise ValueError("correction argument should be integer or float")
|
1720 |
+
if correction < 0:
|
1721 |
+
raise ValueError("correction argument should be non-negative")
|
1722 |
+
return sym_float(correction)
|
1723 |
+
|
1724 |
+
|
1725 |
+
def compute_required_storage_length(
|
1726 |
+
shape: ShapeType, strides: StrideType, storage_offset: int
|
1727 |
+
) -> int:
|
1728 |
+
"""Computes the minimum storage size to hold the given tensor geometry.
|
1729 |
+
|
1730 |
+
Example
|
1731 |
+
=======
|
1732 |
+
|
1733 |
+
This is the size of a newly allocated tensor's storage, in units of elements
|
1734 |
+
|
1735 |
+
>>> t = torch.empty((10, 20))
|
1736 |
+
>>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
|
1737 |
+
200
|
1738 |
+
|
1739 |
+
>>> # xdoctest: +SKIP(failing)
|
1740 |
+
>>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
|
1741 |
+
>>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
|
1742 |
+
>>> size == t.storage().size()
|
1743 |
+
True
|
1744 |
+
|
1745 |
+
A valid tensor may have a larger storage size, but never smaller
|
1746 |
+
|
1747 |
+
>>> slice = torch.empty(100)[20:40]
|
1748 |
+
>>> slice.storage().size()
|
1749 |
+
100
|
1750 |
+
|
1751 |
+
>>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset())
|
1752 |
+
40
|
1753 |
+
|
1754 |
+
"""
|
1755 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
1756 |
+
|
1757 |
+
# Short-circuits if the shape has no elements
|
1758 |
+
if guard_size_oblivious(reduce(operator.mul, shape, 1) == 0):
|
1759 |
+
return 0
|
1760 |
+
|
1761 |
+
max_offset = sum((x - 1) * y for x, y in zip(shape, strides))
|
1762 |
+
# +1 to account for the first element which offsets are taken from
|
1763 |
+
return 1 + storage_offset + max_offset
|
1764 |
+
|
1765 |
+
|
1766 |
+
def check_in_bounds_for_storage(
|
1767 |
+
a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
|
1768 |
+
):
|
1769 |
+
"""
|
1770 |
+
Determines if the given shape, strides, and offset are valid for the given storage.
|
1771 |
+
"""
|
1772 |
+
|
1773 |
+
required_length = compute_required_storage_length(shape, strides, storage_offset)
|
1774 |
+
if a.size() < required_length:
|
1775 |
+
msg = (
|
1776 |
+
"Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, "
|
1777 |
+
"which requires a storage of size {}".format(
|
1778 |
+
a.size(), storage_offset, str(shape), str(strides), required_length
|
1779 |
+
)
|
1780 |
+
)
|
1781 |
+
raise ValueError(msg)
|
1782 |
+
|
1783 |
+
|
1784 |
+
# NOTE: This function should ideally be removed, but some Meta internal models
|
1785 |
+
# packaged with `torch.package` are using it, so it will have to be removed
|
1786 |
+
# at some point in the future when those models no longer use this function.
|
1787 |
+
def check(
|
1788 |
+
b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
|
1789 |
+
) -> None:
|
1790 |
+
"""
|
1791 |
+
Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
|
1792 |
+
Error message is a callable producing a string (to avoid wasting time
|
1793 |
+
string formatting in non-error case, and also to make it easier for torchdynamo
|
1794 |
+
to trace.)
|
1795 |
+
|
1796 |
+
.. note:: This function is planned for removal in the future. Please use
|
1797 |
+
`torch._check*` functions instead.
|
1798 |
+
"""
|
1799 |
+
warnings.warn(
|
1800 |
+
DeprecationWarning(
|
1801 |
+
"'torch._prims_common.check' will be removed in the future. Please use "
|
1802 |
+
"'torch._check*' functions instead"
|
1803 |
+
)
|
1804 |
+
)
|
1805 |
+
torch._check_with(exc_type, b, s)
|
1806 |
+
|
1807 |
+
|
1808 |
+
# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
|
1809 |
+
# c10/core/MemoryFormat.h into one function
|
1810 |
+
def are_strides_like_channels_last(
|
1811 |
+
shape: Sequence[int], strides: Sequence[int]
|
1812 |
+
) -> bool:
|
1813 |
+
ndim = len(shape)
|
1814 |
+
|
1815 |
+
if ndim == 4:
|
1816 |
+
# Check for channels_last_2d
|
1817 |
+
dim_order = [1, 3, 2, 0]
|
1818 |
+
elif ndim == 5:
|
1819 |
+
# Check for channels_last_3d
|
1820 |
+
dim_order = [1, 4, 3, 2, 0]
|
1821 |
+
else:
|
1822 |
+
return False
|
1823 |
+
|
1824 |
+
if strides[1] == 0:
|
1825 |
+
return False
|
1826 |
+
|
1827 |
+
min = 0
|
1828 |
+
for d in dim_order:
|
1829 |
+
if shape[d] == 0:
|
1830 |
+
return False
|
1831 |
+
if strides[d] < min:
|
1832 |
+
return False
|
1833 |
+
if d == 0 and min == strides[1]:
|
1834 |
+
return False
|
1835 |
+
min = strides[d]
|
1836 |
+
if strides[d] > 1:
|
1837 |
+
min *= shape[d]
|
1838 |
+
return True
|
1839 |
+
|
1840 |
+
|
1841 |
+
def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
|
1842 |
+
if x.layout != torch.strided:
|
1843 |
+
return torch.contiguous_format
|
1844 |
+
|
1845 |
+
if are_strides_like_channels_last(x.shape, x.stride()):
|
1846 |
+
return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
|
1847 |
+
|
1848 |
+
return torch.contiguous_format
|
1849 |
+
|
1850 |
+
|
1851 |
+
def prod(xs: Sequence[NumberType]) -> NumberType:
|
1852 |
+
"""Product of elements in input sequence. Returns 1 for empty sequence"""
|
1853 |
+
return reduce(operator.mul, xs, 1)
|
1854 |
+
|
1855 |
+
|
1856 |
+
def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:
|
1857 |
+
"""Checks if a shape can be expanded to another shape.
|
1858 |
+
This is equivalent to checking if the two shapes are broadcastable.
|
1859 |
+
"""
|
1860 |
+
# This is a Python implementation of
|
1861 |
+
# aten/src/ATen/ExpandUtils.h:is_expandable_to
|
1862 |
+
if len(shape) > len(desired):
|
1863 |
+
return False
|
1864 |
+
for i in range(len(shape)):
|
1865 |
+
if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:
|
1866 |
+
return False
|
1867 |
+
return True
|
1868 |
+
|
1869 |
+
|
1870 |
+
def mask_tensor(mask: TensorLikeType, t: TensorLikeType):
|
1871 |
+
"""
|
1872 |
+
Similar to torch.where(mask, t, 0) but if t is boolean,
|
1873 |
+
result is also boolean and not promoted to int.
|
1874 |
+
"""
|
1875 |
+
# torch.where(mask, t, False) is equivalent
|
1876 |
+
# but feels hacky and might break in the future
|
1877 |
+
if t.dtype is torch.bool:
|
1878 |
+
return mask.logical_and(t)
|
1879 |
+
else:
|
1880 |
+
return torch.where(mask, t, 0)
|
1881 |
+
|
1882 |
+
|
1883 |
+
def get_aten_op(fn: Callable, name: str):
|
1884 |
+
"""
|
1885 |
+
Given the __module__ of reference and its name, it returns
|
1886 |
+
(our best guess of) the ATen name of the associated operation
|
1887 |
+
|
1888 |
+
Note: In ATen, the __name__ of a function within a module often
|
1889 |
+
starts by the module name. E.g. linalg_eigh, or special_zeta
|
1890 |
+
"""
|
1891 |
+
module = fn.__module__
|
1892 |
+
prefix = "torch._refs"
|
1893 |
+
assert module.startswith(prefix)
|
1894 |
+
module = module[len(prefix) :]
|
1895 |
+
# We want to go from .special / .nn.functional
|
1896 |
+
# to special and special_ / nn_functional_
|
1897 |
+
if module:
|
1898 |
+
module = module[1:]
|
1899 |
+
module = module.replace(".", "_")
|
1900 |
+
module = module + "_"
|
1901 |
+
return getattr(torch._ops.ops.aten, f"{module}{name}")
|
1902 |
+
|
1903 |
+
|
1904 |
+
def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype:
|
1905 |
+
return dtype if dtype is not None else torch.get_default_dtype()
|
1906 |
+
|
1907 |
+
|
1908 |
+
def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType:
|
1909 |
+
return device if device is not None else torch.device("cpu")
|
1910 |
+
|
1911 |
+
|
1912 |
+
def layout_or_default(layout: Optional[torch.layout]) -> torch.layout:
|
1913 |
+
return layout if layout is not None else torch.strided
|
1914 |
+
|
1915 |
+
|
1916 |
+
def clone_preserve_strides(x):
|
1917 |
+
needed_size = compute_required_storage_length(
|
1918 |
+
x.size(), x.stride(), x.storage_offset()
|
1919 |
+
)
|
1920 |
+
# Our eager implementations for *_scatter ops are all primitives w.r.t autograd,
|
1921 |
+
# so these as_strided() calls are not seen by autograd.
|
1922 |
+
# We need to mimic this behavior in our ref/prim implementations.
|
1923 |
+
# TODO: a better way to handle this would be with a new op, "_unsafe_as_strided"
|
1924 |
+
# We should revisit this when we add a compositional as_strided op,
|
1925 |
+
# and also as part of https://github.com/pytorch/pytorch/issues/90507
|
1926 |
+
try:
|
1927 |
+
old = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
1928 |
+
torch._C.DispatchKey.ADInplaceOrView
|
1929 |
+
)
|
1930 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
1931 |
+
torch._C.DispatchKey.ADInplaceOrView, True
|
1932 |
+
)
|
1933 |
+
buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone()
|
1934 |
+
return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
|
1935 |
+
finally:
|
1936 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
1937 |
+
torch._C.DispatchKey.ADInplaceOrView, old
|
1938 |
+
)
|
1939 |
+
|
1940 |
+
|
1941 |
+
def alert_not_deterministic(caller: str):
|
1942 |
+
if torch.are_deterministic_algorithms_enabled():
|
1943 |
+
if torch.is_deterministic_algorithms_warn_only_enabled():
|
1944 |
+
warnings.warn(
|
1945 |
+
f"{caller} does not have a deterministic implementation, but you set "
|
1946 |
+
f"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
|
1947 |
+
f"You can file an issue at https://github.com/pytorch/pytorch/issues "
|
1948 |
+
f"to help us prioritize adding deterministic support for this operation."
|
1949 |
+
)
|
1950 |
+
else:
|
1951 |
+
torch._check(
|
1952 |
+
False,
|
1953 |
+
lambda: (
|
1954 |
+
f"{caller} does not have a deterministic implementation, but you set "
|
1955 |
+
f"'torch.use_deterministic_algorithms(True)'. You can turn off "
|
1956 |
+
f"determinism just for this operation, or you can use the "
|
1957 |
+
f"'warn_only=True' option, if that's acceptable for your application. "
|
1958 |
+
f"You can also file an issue at https://github.com/pytorch/pytorch/issues "
|
1959 |
+
f"to help us prioritize adding deterministic support for this operation."
|
1960 |
+
),
|
1961 |
+
)
|
1962 |
+
|
1963 |
+
|
1964 |
+
class CUDARngStateHelper:
|
1965 |
+
@staticmethod
|
1966 |
+
def get_torch_state_as_tuple(fake_mode=nullcontext()):
|
1967 |
+
if not torch.cuda.is_available():
|
1968 |
+
raise RuntimeError("CUDA not available")
|
1969 |
+
|
1970 |
+
with fake_mode:
|
1971 |
+
seed = torch.tensor(torch.cuda.initial_seed())
|
1972 |
+
offset = torch.tensor(torch.cuda._get_rng_state_offset())
|
1973 |
+
return seed, offset
|
1974 |
+
|
1975 |
+
@staticmethod
|
1976 |
+
def set_torch_state_tensor(seed, offset):
|
1977 |
+
# Rng state is [64-bit seed, 64-bit offset]
|
1978 |
+
seed_portion = seed.reshape([1]).view(torch.uint8)
|
1979 |
+
offset_portion = offset.reshape([1]).view(torch.uint8)
|
1980 |
+
new_state = torch.cat([seed_portion, offset_portion])
|
1981 |
+
torch.cuda.set_rng_state(new_state)
|
1982 |
+
|
1983 |
+
@staticmethod
|
1984 |
+
def set_new_offset(relative_offset):
|
1985 |
+
torch.cuda._set_rng_state_offset(relative_offset.item())
|
venv/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (49.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_prims_common/wrappers.py
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import inspect
|
2 |
+
import warnings
|
3 |
+
from functools import wraps
|
4 |
+
from itertools import chain
|
5 |
+
|
6 |
+
from typing import Callable, NamedTuple, Optional, overload, Sequence, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch._prims_common as utils
|
10 |
+
from torch._prims_common import (
|
11 |
+
CustomOutParamAnnotation,
|
12 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
13 |
+
Number,
|
14 |
+
NumberType,
|
15 |
+
ShapeType,
|
16 |
+
TensorLike,
|
17 |
+
TensorLikeType,
|
18 |
+
)
|
19 |
+
from torch.utils import _pytree as pytree
|
20 |
+
from torch.utils._pytree import tree_flatten, tree_unflatten
|
21 |
+
|
22 |
+
|
23 |
+
@overload
|
24 |
+
def _maybe_convert_to_dtype(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType:
|
25 |
+
pass
|
26 |
+
|
27 |
+
|
28 |
+
@overload
|
29 |
+
def _maybe_convert_to_dtype(a: NumberType, dtype: torch.dtype) -> NumberType:
|
30 |
+
pass
|
31 |
+
|
32 |
+
|
33 |
+
@overload
|
34 |
+
def _maybe_convert_to_dtype(a: Sequence, dtype: torch.dtype) -> Sequence:
|
35 |
+
pass
|
36 |
+
|
37 |
+
|
38 |
+
@overload
|
39 |
+
def _maybe_convert_to_dtype(a: None, dtype: torch.dtype) -> None:
|
40 |
+
pass
|
41 |
+
|
42 |
+
|
43 |
+
# TODO: implement ref.cast with an option to enforce safe casting
|
44 |
+
def _maybe_convert_to_dtype(a, dtype):
|
45 |
+
if isinstance(a, TensorLike):
|
46 |
+
if a.dtype != dtype:
|
47 |
+
return a.to(dtype)
|
48 |
+
return a
|
49 |
+
if isinstance(a, Number):
|
50 |
+
return utils.dtype_to_type_ctor(dtype)(a) # type: ignore[arg-type]
|
51 |
+
if isinstance(a, Sequence):
|
52 |
+
return tuple(_maybe_convert_to_dtype(x, dtype) for x in a)
|
53 |
+
# Passthrough None because some functions wrapped with type promotion
|
54 |
+
# wrapper might have optional args
|
55 |
+
if a is None:
|
56 |
+
return None
|
57 |
+
|
58 |
+
raise ValueError(f"Received type {type(a)} that is neither a tensor or a number!")
|
59 |
+
|
60 |
+
|
61 |
+
def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType:
|
62 |
+
if not isinstance(a, Number):
|
63 |
+
msg = f"Found unknown type {type(a)} when trying to convert scalars!"
|
64 |
+
raise ValueError(msg)
|
65 |
+
if not utils.is_weakly_lesser_type(type(a), typ):
|
66 |
+
msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!"
|
67 |
+
raise ValueError(msg)
|
68 |
+
|
69 |
+
return typ(a)
|
70 |
+
|
71 |
+
|
72 |
+
def _annotation_has_type(*, typ, annotation):
|
73 |
+
if hasattr(annotation, "__args__"):
|
74 |
+
for a in annotation.__args__:
|
75 |
+
if _annotation_has_type(typ=typ, annotation=a):
|
76 |
+
return True
|
77 |
+
return False
|
78 |
+
|
79 |
+
return typ is annotation
|
80 |
+
|
81 |
+
|
82 |
+
class elementwise_type_promotion_wrapper:
|
83 |
+
"""
|
84 |
+
Adds elementwise type promotion to a Python reference implementation.
|
85 |
+
|
86 |
+
Takes two kwargs, type_promoting_args and type_promotion_kind.
|
87 |
+
|
88 |
+
type_promoting_args must be a string Sequence specifiying the argument names of all
|
89 |
+
arguments that participate in type promotion (and should be type promoted). If the
|
90 |
+
arg specifies a Sequence-type then every element of the Sequence will participate in
|
91 |
+
type promotion.
|
92 |
+
|
93 |
+
type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND.
|
94 |
+
See its documentation for details.
|
95 |
+
|
96 |
+
The return_dtype will be coerced to the wrapped function's dtype arg if it is available and
|
97 |
+
not None.
|
98 |
+
|
99 |
+
Other type promotion behavior, like validating the Python type of scalar arguments, must
|
100 |
+
be handled separately.
|
101 |
+
"""
|
102 |
+
|
103 |
+
def __init__(
|
104 |
+
self,
|
105 |
+
*,
|
106 |
+
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
|
107 |
+
type_promoting_args: Optional[Sequence[str]] = None,
|
108 |
+
):
|
109 |
+
self.type_promoting_arg_names = type_promoting_args
|
110 |
+
self.type_promotion_kind = type_promotion_kind
|
111 |
+
|
112 |
+
def __call__(self, fn: Callable) -> Callable:
|
113 |
+
sig = inspect.signature(fn)
|
114 |
+
|
115 |
+
@wraps(fn)
|
116 |
+
def _fn(*args, **kwargs):
|
117 |
+
bound = sig.bind(*args, **kwargs)
|
118 |
+
type_promoting_args = tuple(
|
119 |
+
bound.arguments[x]
|
120 |
+
for x in self.type_promoting_arg_names # type: ignore[union-attr]
|
121 |
+
if x in bound.arguments.keys()
|
122 |
+
)
|
123 |
+
|
124 |
+
flattened_type_promoting_args = pytree.arg_tree_leaves(*type_promoting_args)
|
125 |
+
compute_dtype, result_dtype = utils.elementwise_dtypes(
|
126 |
+
*flattened_type_promoting_args,
|
127 |
+
type_promotion_kind=self.type_promotion_kind,
|
128 |
+
)
|
129 |
+
|
130 |
+
promoted_args = {
|
131 |
+
x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype)
|
132 |
+
for x in self.type_promoting_arg_names # type: ignore[union-attr]
|
133 |
+
if x in bound.arguments.keys()
|
134 |
+
}
|
135 |
+
bound.arguments.update(promoted_args)
|
136 |
+
|
137 |
+
result = fn(**bound.arguments)
|
138 |
+
|
139 |
+
# Override the return_dtype if a dtype arg is present and not None
|
140 |
+
if "dtype" in bound.arguments:
|
141 |
+
maybe_dtype = bound.arguments["dtype"]
|
142 |
+
if maybe_dtype: # dtype cannot be None
|
143 |
+
result_dtype = maybe_dtype
|
144 |
+
|
145 |
+
if isinstance(result, TensorLike):
|
146 |
+
return _maybe_convert_to_dtype(result, result_dtype)
|
147 |
+
if isinstance(result, Sequence):
|
148 |
+
return tuple(_maybe_convert_to_dtype(x, result_dtype) for x in result)
|
149 |
+
raise AssertionError(f"Unhandled result type: {type(result)}")
|
150 |
+
|
151 |
+
_fn.__signature__ = sig # type: ignore[attr-defined]
|
152 |
+
return _fn
|
153 |
+
|
154 |
+
|
155 |
+
# Returns True if resize is necessary
|
156 |
+
def _resize_output_check(out: TensorLikeType, shape: ShapeType):
|
157 |
+
# If the shapes are correct there's nothing to do
|
158 |
+
if utils.same_shape(out.shape, shape):
|
159 |
+
return False
|
160 |
+
if out.numel() != 0:
|
161 |
+
msg = (
|
162 |
+
f"An output with one or more elements was resized since it had shape {str(out.shape)} "
|
163 |
+
"which does not match the required output shape {str(shape)}. "
|
164 |
+
"This behavior is deprecated, and in a future PyTorch release outputs will not "
|
165 |
+
"be resized unless they have zero elements. "
|
166 |
+
"You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0)."
|
167 |
+
)
|
168 |
+
warnings.warn(msg)
|
169 |
+
return True
|
170 |
+
|
171 |
+
|
172 |
+
# TODO: handle tuples of tensors
|
173 |
+
def _maybe_resize_out(out: TensorLikeType, shape: ShapeType):
|
174 |
+
if _resize_output_check(out, shape):
|
175 |
+
return out.resize_(shape)
|
176 |
+
else:
|
177 |
+
return out
|
178 |
+
|
179 |
+
|
180 |
+
def _safe_copy_out(
|
181 |
+
*, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False
|
182 |
+
):
|
183 |
+
# Checks same device
|
184 |
+
if copy_from.device != copy_to.device:
|
185 |
+
msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format(
|
186 |
+
copy_from.device, copy_to.device
|
187 |
+
)
|
188 |
+
raise RuntimeError(msg)
|
189 |
+
|
190 |
+
# Checks safe cast
|
191 |
+
if exact_dtype:
|
192 |
+
torch._check(
|
193 |
+
copy_from.dtype == copy_to.dtype,
|
194 |
+
lambda: f"Expected out tensor to have dtype {copy_from.dtype} "
|
195 |
+
f"but got {copy_to.dtype} instead",
|
196 |
+
)
|
197 |
+
else:
|
198 |
+
torch._check(
|
199 |
+
utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype),
|
200 |
+
lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, "
|
201 |
+
"but this can't be cast because it is not safe!",
|
202 |
+
)
|
203 |
+
|
204 |
+
return copy_to.copy_(copy_from)
|
205 |
+
|
206 |
+
|
207 |
+
def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool = False):
|
208 |
+
# The wrapped function needs to convert the output parameters to ensure
|
209 |
+
# compatibility between the Python API (which always uses "out" as the
|
210 |
+
# parameter name and may be a tuple) and the Aten API (which may have
|
211 |
+
# multiple output parameters and use different parameter names such as
|
212 |
+
# "grad_input", "indices" or "values".)
|
213 |
+
|
214 |
+
default_out_names = ("out",)
|
215 |
+
if len(out_names) == 0:
|
216 |
+
# Use default in out name
|
217 |
+
out_names = default_out_names
|
218 |
+
|
219 |
+
is_tensor = len(out_names) == 1
|
220 |
+
|
221 |
+
def _out_wrapper(fn: Callable) -> Callable:
|
222 |
+
"""
|
223 |
+
Adds the out parameter to a Python reference.
|
224 |
+
"""
|
225 |
+
out_type = (
|
226 |
+
TensorLikeType
|
227 |
+
if is_tensor
|
228 |
+
else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))]
|
229 |
+
)
|
230 |
+
return_type = (
|
231 |
+
TensorLikeType
|
232 |
+
if is_tensor
|
233 |
+
else NamedTuple(
|
234 |
+
f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names]
|
235 |
+
)
|
236 |
+
)
|
237 |
+
|
238 |
+
sig = inspect.signature(fn)
|
239 |
+
factory_kwargs = ("device", "dtype")
|
240 |
+
is_factory_fn = all(p in sig.parameters for p in factory_kwargs)
|
241 |
+
|
242 |
+
@wraps(fn)
|
243 |
+
def _fn(*args, out=None, **kwargs):
|
244 |
+
if is_factory_fn and out is not None:
|
245 |
+
for k in factory_kwargs:
|
246 |
+
out_attr = getattr(out, k)
|
247 |
+
if k not in kwargs:
|
248 |
+
kwargs[k] = out_attr
|
249 |
+
if pass_is_out:
|
250 |
+
result = fn(*args, is_out=(out is not None), **kwargs)
|
251 |
+
else:
|
252 |
+
result = fn(*args, **kwargs)
|
253 |
+
assert (
|
254 |
+
isinstance(result, TensorLike)
|
255 |
+
and is_tensor
|
256 |
+
or isinstance(result, Tuple) # type: ignore[arg-type]
|
257 |
+
and len(result) == len(out_names)
|
258 |
+
)
|
259 |
+
if out is not None:
|
260 |
+
# Naively you might expect this assert to be true, but
|
261 |
+
# it's not:
|
262 |
+
#
|
263 |
+
# assert type(out) == type(result)
|
264 |
+
#
|
265 |
+
# The reason is that functions under this wrapper can
|
266 |
+
# get registered to the Meta dispatch key, and that
|
267 |
+
# means they can be executed in a context where tensor
|
268 |
+
# subclasses are disabled (with no_dispatch), which is a
|
269 |
+
# handy way for an is-a tensor subclass (e.g.,
|
270 |
+
# FakeTensor) to have the normal meta backend create a
|
271 |
+
# meta tensor, to be wrapped once it gets returned.
|
272 |
+
# In this situation, you will get a FakeTensor as
|
273 |
+
# the output tensor, but not the result--which will
|
274 |
+
# be a normal meta tensor, but this is perfectly
|
275 |
+
# harmless.
|
276 |
+
if is_tensor:
|
277 |
+
assert isinstance(out, TensorLike)
|
278 |
+
# These two operations are done in-place
|
279 |
+
_maybe_resize_out(out, result.shape)
|
280 |
+
_safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type]
|
281 |
+
else:
|
282 |
+
assert isinstance(out, Tuple) # type: ignore[arg-type]
|
283 |
+
torch._check_type(
|
284 |
+
len(out) == len(result),
|
285 |
+
lambda: f"expected tuple of {len(result)} elements but got {len(out)}",
|
286 |
+
)
|
287 |
+
for r, o in zip(result, out):
|
288 |
+
# These two operations are done in-place
|
289 |
+
_maybe_resize_out(o, r.shape)
|
290 |
+
_safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type]
|
291 |
+
else:
|
292 |
+
out = result
|
293 |
+
# mypy does not see through the definition of out_type given that it's in a different scope
|
294 |
+
return out if is_tensor else return_type(*out) # type: ignore[operator]
|
295 |
+
|
296 |
+
out_param = inspect.Parameter(
|
297 |
+
"out",
|
298 |
+
kind=inspect.Parameter.KEYWORD_ONLY,
|
299 |
+
default=None,
|
300 |
+
annotation=out_type,
|
301 |
+
)
|
302 |
+
# Mark that the function now returns a tuple
|
303 |
+
assert isinstance(sig.return_annotation, str) or sig.return_annotation in (
|
304 |
+
sig.empty,
|
305 |
+
out_type,
|
306 |
+
)
|
307 |
+
params = chain(sig.parameters.values(), (out_param,))
|
308 |
+
_fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
|
309 |
+
parameters=params, return_annotation=return_type # type: ignore[arg-type]
|
310 |
+
)
|
311 |
+
|
312 |
+
_fn.__annotations__ = fn.__annotations__
|
313 |
+
_fn.__annotations__["out"] = out_type
|
314 |
+
_fn.__annotations__["return"] = return_type
|
315 |
+
|
316 |
+
# In the special case of having a single tensor out parameter with a
|
317 |
+
# name other than out, add a special annotation to name the parameter
|
318 |
+
if is_tensor and out_names != default_out_names:
|
319 |
+
_fn.__annotations__[CustomOutParamAnnotation] = out_names[0]
|
320 |
+
|
321 |
+
# Add an indicator attribute that can be used in special cases
|
322 |
+
# where having a function wrapped by `out_wrapper` is not desirable e.g.
|
323 |
+
# jit
|
324 |
+
_fn._torch_decompositions_out_wrapper = f"This function is wrapped by {out_wrapper.__module__}.out_wrapper" # type: ignore[attr-defined]
|
325 |
+
|
326 |
+
return _fn
|
327 |
+
|
328 |
+
return _out_wrapper
|
329 |
+
|
330 |
+
|
331 |
+
def _maybe_remove_out_wrapper(fn: Callable):
|
332 |
+
return inspect.unwrap(
|
333 |
+
fn,
|
334 |
+
stop=lambda f: not hasattr(f, "_torch_decompositions_out_wrapper"),
|
335 |
+
)
|
336 |
+
|
337 |
+
|
338 |
+
def backwards_not_supported(prim):
|
339 |
+
def redispatch_prim(args, kwargs):
|
340 |
+
with torch._C._AutoDispatchBelowAutograd():
|
341 |
+
old = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
342 |
+
torch._C.DispatchKey.ADInplaceOrView
|
343 |
+
)
|
344 |
+
return prim(*args, **kwargs)
|
345 |
+
|
346 |
+
class BackwardsNotSupported(torch.autograd.Function):
|
347 |
+
@staticmethod
|
348 |
+
def forward(ctx, args_spec, *flat_args):
|
349 |
+
args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type]
|
350 |
+
return redispatch_prim(args, kwargs)
|
351 |
+
|
352 |
+
@staticmethod
|
353 |
+
def backward(ctx, *args):
|
354 |
+
raise RuntimeError("backwards not supported on prim")
|
355 |
+
|
356 |
+
@wraps(prim)
|
357 |
+
def _autograd_impl(*args, **kwargs):
|
358 |
+
flat_args, args_spec = tree_flatten((args, kwargs))
|
359 |
+
if torch.is_grad_enabled() and any(
|
360 |
+
a.requires_grad for a in flat_args if isinstance(a, torch.Tensor)
|
361 |
+
):
|
362 |
+
# TODO: There is a subtle bug here: prims like copy_to
|
363 |
+
# return their input argument after mutating it; and custom
|
364 |
+
# autograd function will incorrectly turn the result into
|
365 |
+
# a view which will fail test_python_ref_executor tests.
|
366 |
+
# At the moment, we sidestep this by observing that the
|
367 |
+
# unit tests don't ever try to run the executor with
|
368 |
+
# autograd, so we don't exercise the buggy case, but if
|
369 |
+
# you ever want to feed autograd through this, be aware
|
370 |
+
# of it! We need a way of properly implementing autograd
|
371 |
+
# for mutating operations in Python to do this.
|
372 |
+
return BackwardsNotSupported.apply(args_spec, *flat_args)
|
373 |
+
else:
|
374 |
+
return redispatch_prim(args, kwargs)
|
375 |
+
|
376 |
+
return _autograd_impl
|
377 |
+
|
378 |
+
|
379 |
+
# TODO: when tracing this will add torch tensors and not TensorMeta objects
|
380 |
+
# to the trace -- we should fix this by adding a tracing context and NumberMeta classes
|
381 |
+
# TODO: this wrapper is currently untested
|
382 |
+
def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable:
|
383 |
+
"""
|
384 |
+
Allows unary operators that accept tensors to work with Python numbers.
|
385 |
+
"""
|
386 |
+
sig = inspect.signature(fn)
|
387 |
+
|
388 |
+
@wraps(fn)
|
389 |
+
def _fn(*args, **kwargs):
|
390 |
+
if len(args) > 0 and isinstance(args[0], Number):
|
391 |
+
dtype = utils.type_to_dtype(type(args[0]))
|
392 |
+
args_ = list(args)
|
393 |
+
args_[0] = torch.tensor(args[0], dtype=dtype)
|
394 |
+
result = fn(*args_, **kwargs)
|
395 |
+
assert isinstance(result, torch.Tensor)
|
396 |
+
return result.item()
|
397 |
+
|
398 |
+
return fn(*args, **kwargs)
|
399 |
+
|
400 |
+
_fn.__signature__ = sig # type: ignore[attr-defined]
|
401 |
+
return _fn
|
venv/lib/python3.10/site-packages/torch/cpu/__init__.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""
|
2 |
+
This package implements abstractions found in ``torch.cuda``
|
3 |
+
to facilitate writing device-agnostic code.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from contextlib import AbstractContextManager
|
7 |
+
from typing import Any, Optional, Union
|
8 |
+
|
9 |
+
import torch
|
10 |
+
|
11 |
+
from .. import device as _device
|
12 |
+
from . import amp
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
"is_available",
|
16 |
+
"synchronize",
|
17 |
+
"current_device",
|
18 |
+
"current_stream",
|
19 |
+
"stream",
|
20 |
+
"set_device",
|
21 |
+
"device_count",
|
22 |
+
"Stream",
|
23 |
+
"StreamContext",
|
24 |
+
"Event",
|
25 |
+
]
|
26 |
+
|
27 |
+
_device_t = Union[_device, str, int, None]
|
28 |
+
|
29 |
+
|
30 |
+
def _is_cpu_support_vnni() -> bool:
|
31 |
+
r"""Returns a bool indicating if CPU supports VNNI."""
|
32 |
+
return torch._C._cpu._is_cpu_support_vnni()
|
33 |
+
|
34 |
+
|
35 |
+
def is_available() -> bool:
|
36 |
+
r"""Returns a bool indicating if CPU is currently available.
|
37 |
+
|
38 |
+
N.B. This function only exists to facilitate device-agnostic code
|
39 |
+
|
40 |
+
"""
|
41 |
+
return True
|
42 |
+
|
43 |
+
|
44 |
+
def synchronize(device: _device_t = None) -> None:
|
45 |
+
r"""Waits for all kernels in all streams on the CPU device to complete.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
device (torch.device or int, optional): ignored, there's only one CPU device.
|
49 |
+
|
50 |
+
N.B. This function only exists to facilitate device-agnostic code.
|
51 |
+
"""
|
52 |
+
pass
|
53 |
+
|
54 |
+
|
55 |
+
class Stream:
|
56 |
+
"""
|
57 |
+
N.B. This class only exists to facilitate device-agnostic code
|
58 |
+
"""
|
59 |
+
|
60 |
+
def __init__(self, priority: int = -1):
|
61 |
+
pass
|
62 |
+
|
63 |
+
def wait_stream(self, stream) -> None:
|
64 |
+
pass
|
65 |
+
|
66 |
+
|
67 |
+
class Event:
|
68 |
+
def query(self) -> bool:
|
69 |
+
return True
|
70 |
+
|
71 |
+
def record(self, stream=None):
|
72 |
+
pass
|
73 |
+
|
74 |
+
def synchronize(self):
|
75 |
+
pass
|
76 |
+
|
77 |
+
def wait(self, stream=None):
|
78 |
+
pass
|
79 |
+
|
80 |
+
|
81 |
+
_default_cpu_stream = Stream()
|
82 |
+
_current_stream = _default_cpu_stream
|
83 |
+
|
84 |
+
|
85 |
+
def current_stream(device: _device_t = None) -> Stream:
|
86 |
+
r"""Returns the currently selected :class:`Stream` for a given device.
|
87 |
+
|
88 |
+
Args:
|
89 |
+
device (torch.device or int, optional): Ignored.
|
90 |
+
|
91 |
+
N.B. This function only exists to facilitate device-agnostic code
|
92 |
+
|
93 |
+
"""
|
94 |
+
return _current_stream
|
95 |
+
|
96 |
+
|
97 |
+
class StreamContext(AbstractContextManager):
|
98 |
+
r"""Context-manager that selects a given stream.
|
99 |
+
|
100 |
+
N.B. This class only exists to facilitate device-agnostic code
|
101 |
+
|
102 |
+
"""
|
103 |
+
cur_stream: Optional[Stream]
|
104 |
+
|
105 |
+
def __init__(self, stream):
|
106 |
+
self.stream = stream
|
107 |
+
self.prev_stream = _default_cpu_stream
|
108 |
+
|
109 |
+
def __enter__(self):
|
110 |
+
cur_stream = self.stream
|
111 |
+
if cur_stream is None:
|
112 |
+
return
|
113 |
+
|
114 |
+
global _current_stream
|
115 |
+
self.prev_stream = _current_stream
|
116 |
+
_current_stream = cur_stream
|
117 |
+
|
118 |
+
def __exit__(self, type: Any, value: Any, traceback: Any):
|
119 |
+
cur_stream = self.stream
|
120 |
+
if cur_stream is None:
|
121 |
+
return
|
122 |
+
|
123 |
+
global _current_stream
|
124 |
+
_current_stream = self.prev_stream
|
125 |
+
|
126 |
+
|
127 |
+
def stream(stream: Stream) -> AbstractContextManager:
|
128 |
+
r"""Wrapper around the Context-manager StreamContext that
|
129 |
+
selects a given stream.
|
130 |
+
|
131 |
+
N.B. This function only exists to facilitate device-agnostic code
|
132 |
+
"""
|
133 |
+
return StreamContext(stream)
|
134 |
+
|
135 |
+
|
136 |
+
def device_count() -> int:
|
137 |
+
r"""Returns number of CPU devices (not cores). Always 1.
|
138 |
+
|
139 |
+
N.B. This function only exists to facilitate device-agnostic code
|
140 |
+
"""
|
141 |
+
return 1
|
142 |
+
|
143 |
+
|
144 |
+
def set_device(device: _device_t) -> None:
|
145 |
+
r"""Sets the current device, in CPU we do nothing.
|
146 |
+
|
147 |
+
N.B. This function only exists to facilitate device-agnostic code
|
148 |
+
"""
|
149 |
+
pass
|
150 |
+
|
151 |
+
|
152 |
+
def current_device() -> str:
|
153 |
+
r"""Returns current device for cpu. Always 'cpu'.
|
154 |
+
|
155 |
+
N.B. This function only exists to facilitate device-agnostic code
|
156 |
+
"""
|
157 |
+
return "cpu"
|
venv/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.83 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/cpu/amp/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .autocast_mode import autocast
|
2 |
+
from .grad_scaler import GradScaler
|
venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (273 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/grad_scaler.cpython-310.pyc
ADDED
Binary file (1.03 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
__all__ = ["autocast"]
|
6 |
+
|
7 |
+
|
8 |
+
class autocast(torch.amp.autocast_mode.autocast):
|
9 |
+
r"""
|
10 |
+
See :class:`torch.autocast`.
|
11 |
+
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
enabled: bool = True,
|
17 |
+
dtype: torch.dtype = torch.bfloat16,
|
18 |
+
cache_enabled: bool = True,
|
19 |
+
):
|
20 |
+
if torch._jit_internal.is_scripting():
|
21 |
+
self._enabled = enabled
|
22 |
+
self.device = "cpu"
|
23 |
+
self.fast_dtype = dtype
|
24 |
+
return
|
25 |
+
super().__init__(
|
26 |
+
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
27 |
+
)
|
28 |
+
|
29 |
+
def __enter__(self):
|
30 |
+
if torch._jit_internal.is_scripting():
|
31 |
+
return self
|
32 |
+
return super().__enter__()
|
33 |
+
|
34 |
+
# TODO: discuss a unified TorchScript-friendly API for autocast
|
35 |
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
|
36 |
+
if torch._jit_internal.is_scripting():
|
37 |
+
return
|
38 |
+
return super().__exit__(exc_type, exc_val, exc_tb)
|
39 |
+
|
40 |
+
def __call__(self, func):
|
41 |
+
if torch._jit_internal.is_scripting():
|
42 |
+
return func
|
43 |
+
return super().__call__(func)
|
venv/lib/python3.10/site-packages/torch/cpu/amp/grad_scaler.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
__all__ = ["GradScaler"]
|
4 |
+
|
5 |
+
|
6 |
+
class GradScaler(torch.amp.GradScaler):
|
7 |
+
r"""
|
8 |
+
See :class:`torch.amp.GradScaler`.
|
9 |
+
``torch.cpu.amp.GradScaler(args...)`` is equivalent to ``torch.amp.GradScaler("cpu", args...)``
|
10 |
+
"""
|
11 |
+
|
12 |
+
def __init__(
|
13 |
+
self,
|
14 |
+
init_scale: float = 2.0**16,
|
15 |
+
growth_factor: float = 2.0,
|
16 |
+
backoff_factor: float = 0.5,
|
17 |
+
growth_interval: int = 2000,
|
18 |
+
enabled: bool = True,
|
19 |
+
) -> None:
|
20 |
+
super().__init__(
|
21 |
+
"cpu",
|
22 |
+
init_scale=init_scale,
|
23 |
+
growth_factor=growth_factor,
|
24 |
+
backoff_factor=backoff_factor,
|
25 |
+
growth_interval=growth_interval,
|
26 |
+
enabled=enabled,
|
27 |
+
)
|
venv/lib/python3.10/site-packages/torch/fft/__init__.py
ADDED
@@ -0,0 +1,1360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch._C import _add_docstr, _fft # type: ignore[attr-defined]
|
5 |
+
from torch._torch_docs import factory_common_args, common_args
|
6 |
+
|
7 |
+
__all__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
|
8 |
+
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
|
9 |
+
'hfft', 'ihfft', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
|
10 |
+
'Tensor']
|
11 |
+
|
12 |
+
Tensor = torch.Tensor
|
13 |
+
|
14 |
+
# Note: This not only adds the doc strings for the spectral ops, but
|
15 |
+
# connects the torch.fft Python namespace to the torch._C._fft builtins.
|
16 |
+
|
17 |
+
fft = _add_docstr(_fft.fft_fft, r"""
|
18 |
+
fft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
19 |
+
|
20 |
+
Computes the one dimensional discrete Fourier transform of :attr:`input`.
|
21 |
+
|
22 |
+
Note:
|
23 |
+
The Fourier domain representation of any real signal satisfies the
|
24 |
+
Hermitian property: `X[i] = conj(X[-i])`. This function always returns both
|
25 |
+
the positive and negative frequency terms even though, for real inputs, the
|
26 |
+
negative frequencies are redundant. :func:`~torch.fft.rfft` returns the
|
27 |
+
more compact one-sided representation where only the positive frequencies
|
28 |
+
are returned.
|
29 |
+
|
30 |
+
Note:
|
31 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
32 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
input (Tensor): the input tensor
|
36 |
+
n (int, optional): Signal length. If given, the input will either be zero-padded
|
37 |
+
or trimmed to this length before computing the FFT.
|
38 |
+
dim (int, optional): The dimension along which to take the one dimensional FFT.
|
39 |
+
norm (str, optional): Normalization mode. For the forward transform
|
40 |
+
(:func:`~torch.fft.fft`), these correspond to:
|
41 |
+
|
42 |
+
* ``"forward"`` - normalize by ``1/n``
|
43 |
+
* ``"backward"`` - no normalization
|
44 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
|
45 |
+
|
46 |
+
Calling the backward transform (:func:`~torch.fft.ifft`) with the same
|
47 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
48 |
+
the two transforms. This is required to make :func:`~torch.fft.ifft`
|
49 |
+
the exact inverse.
|
50 |
+
|
51 |
+
Default is ``"backward"`` (no normalization).
|
52 |
+
|
53 |
+
Keyword args:
|
54 |
+
{out}
|
55 |
+
|
56 |
+
Example:
|
57 |
+
|
58 |
+
>>> t = torch.arange(4)
|
59 |
+
>>> t
|
60 |
+
tensor([0, 1, 2, 3])
|
61 |
+
>>> torch.fft.fft(t)
|
62 |
+
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
|
63 |
+
|
64 |
+
>>> t = torch.tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j])
|
65 |
+
>>> torch.fft.fft(t)
|
66 |
+
tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j])
|
67 |
+
""".format(**common_args))
|
68 |
+
|
69 |
+
ifft = _add_docstr(_fft.fft_ifft, r"""
|
70 |
+
ifft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
71 |
+
|
72 |
+
Computes the one dimensional inverse discrete Fourier transform of :attr:`input`.
|
73 |
+
|
74 |
+
Note:
|
75 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
76 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
input (Tensor): the input tensor
|
80 |
+
n (int, optional): Signal length. If given, the input will either be zero-padded
|
81 |
+
or trimmed to this length before computing the IFFT.
|
82 |
+
dim (int, optional): The dimension along which to take the one dimensional IFFT.
|
83 |
+
norm (str, optional): Normalization mode. For the backward transform
|
84 |
+
(:func:`~torch.fft.ifft`), these correspond to:
|
85 |
+
|
86 |
+
* ``"forward"`` - no normalization
|
87 |
+
* ``"backward"`` - normalize by ``1/n``
|
88 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
|
89 |
+
|
90 |
+
Calling the forward transform (:func:`~torch.fft.fft`) with the same
|
91 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
92 |
+
the two transforms. This is required to make :func:`~torch.fft.ifft`
|
93 |
+
the exact inverse.
|
94 |
+
|
95 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
96 |
+
|
97 |
+
Keyword args:
|
98 |
+
{out}
|
99 |
+
|
100 |
+
Example:
|
101 |
+
|
102 |
+
>>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
|
103 |
+
>>> torch.fft.ifft(t)
|
104 |
+
tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j])
|
105 |
+
""".format(**common_args))
|
106 |
+
|
107 |
+
fft2 = _add_docstr(_fft.fft_fft2, r"""
|
108 |
+
fft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
109 |
+
|
110 |
+
Computes the 2 dimensional discrete Fourier transform of :attr:`input`.
|
111 |
+
Equivalent to :func:`~torch.fft.fftn` but FFTs only the last two dimensions by default.
|
112 |
+
|
113 |
+
Note:
|
114 |
+
The Fourier domain representation of any real signal satisfies the
|
115 |
+
Hermitian property: ``X[i, j] = conj(X[-i, -j])``. This
|
116 |
+
function always returns all positive and negative frequency terms even
|
117 |
+
though, for real inputs, half of these values are redundant.
|
118 |
+
:func:`~torch.fft.rfft2` returns the more compact one-sided representation
|
119 |
+
where only the positive frequencies of the last dimension are returned.
|
120 |
+
|
121 |
+
Note:
|
122 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
123 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
124 |
+
|
125 |
+
Args:
|
126 |
+
input (Tensor): the input tensor
|
127 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
128 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
129 |
+
trimmed to the length ``s[i]`` before computing the FFT.
|
130 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
131 |
+
Default: ``s = [input.size(d) for d in dim]``
|
132 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
133 |
+
Default: last two dimensions.
|
134 |
+
norm (str, optional): Normalization mode. For the forward transform
|
135 |
+
(:func:`~torch.fft.fft2`), these correspond to:
|
136 |
+
|
137 |
+
* ``"forward"`` - normalize by ``1/n``
|
138 |
+
* ``"backward"`` - no normalization
|
139 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
|
140 |
+
|
141 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
142 |
+
Calling the backward transform (:func:`~torch.fft.ifft2`) with the same
|
143 |
+
normalization mode will apply an overall normalization of ``1/n``
|
144 |
+
between the two transforms. This is required to make
|
145 |
+
:func:`~torch.fft.ifft2` the exact inverse.
|
146 |
+
|
147 |
+
Default is ``"backward"`` (no normalization).
|
148 |
+
|
149 |
+
Keyword args:
|
150 |
+
{out}
|
151 |
+
|
152 |
+
Example:
|
153 |
+
|
154 |
+
>>> x = torch.rand(10, 10, dtype=torch.complex64)
|
155 |
+
>>> fft2 = torch.fft.fft2(x)
|
156 |
+
|
157 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.fft2`
|
158 |
+
here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
|
159 |
+
|
160 |
+
>>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
|
161 |
+
>>> torch.testing.assert_close(fft2, two_ffts, check_stride=False)
|
162 |
+
|
163 |
+
""".format(**common_args))
|
164 |
+
|
165 |
+
ifft2 = _add_docstr(_fft.fft_ifft2, r"""
|
166 |
+
ifft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
167 |
+
|
168 |
+
Computes the 2 dimensional inverse discrete Fourier transform of :attr:`input`.
|
169 |
+
Equivalent to :func:`~torch.fft.ifftn` but IFFTs only the last two dimensions by default.
|
170 |
+
|
171 |
+
Note:
|
172 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
173 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
174 |
+
|
175 |
+
Args:
|
176 |
+
input (Tensor): the input tensor
|
177 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
178 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
179 |
+
trimmed to the length ``s[i]`` before computing the IFFT.
|
180 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
181 |
+
Default: ``s = [input.size(d) for d in dim]``
|
182 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
183 |
+
Default: last two dimensions.
|
184 |
+
norm (str, optional): Normalization mode. For the backward transform
|
185 |
+
(:func:`~torch.fft.ifft2`), these correspond to:
|
186 |
+
|
187 |
+
* ``"forward"`` - no normalization
|
188 |
+
* ``"backward"`` - normalize by ``1/n``
|
189 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
|
190 |
+
|
191 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
192 |
+
Calling the forward transform (:func:`~torch.fft.fft2`) with the same
|
193 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
194 |
+
the two transforms. This is required to make :func:`~torch.fft.ifft2`
|
195 |
+
the exact inverse.
|
196 |
+
|
197 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
198 |
+
|
199 |
+
Keyword args:
|
200 |
+
{out}
|
201 |
+
|
202 |
+
Example:
|
203 |
+
|
204 |
+
>>> x = torch.rand(10, 10, dtype=torch.complex64)
|
205 |
+
>>> ifft2 = torch.fft.ifft2(x)
|
206 |
+
|
207 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.ifft2`
|
208 |
+
here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
|
209 |
+
|
210 |
+
>>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
|
211 |
+
>>> torch.testing.assert_close(ifft2, two_iffts, check_stride=False)
|
212 |
+
|
213 |
+
""".format(**common_args))
|
214 |
+
|
215 |
+
fftn = _add_docstr(_fft.fft_fftn, r"""
|
216 |
+
fftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
217 |
+
|
218 |
+
Computes the N dimensional discrete Fourier transform of :attr:`input`.
|
219 |
+
|
220 |
+
Note:
|
221 |
+
The Fourier domain representation of any real signal satisfies the
|
222 |
+
Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This
|
223 |
+
function always returns all positive and negative frequency terms even
|
224 |
+
though, for real inputs, half of these values are redundant.
|
225 |
+
:func:`~torch.fft.rfftn` returns the more compact one-sided representation
|
226 |
+
where only the positive frequencies of the last dimension are returned.
|
227 |
+
|
228 |
+
Note:
|
229 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
230 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
231 |
+
|
232 |
+
Args:
|
233 |
+
input (Tensor): the input tensor
|
234 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
235 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
236 |
+
trimmed to the length ``s[i]`` before computing the FFT.
|
237 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
238 |
+
Default: ``s = [input.size(d) for d in dim]``
|
239 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
240 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
241 |
+
norm (str, optional): Normalization mode. For the forward transform
|
242 |
+
(:func:`~torch.fft.fftn`), these correspond to:
|
243 |
+
|
244 |
+
* ``"forward"`` - normalize by ``1/n``
|
245 |
+
* ``"backward"`` - no normalization
|
246 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
|
247 |
+
|
248 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
249 |
+
Calling the backward transform (:func:`~torch.fft.ifftn`) with the same
|
250 |
+
normalization mode will apply an overall normalization of ``1/n``
|
251 |
+
between the two transforms. This is required to make
|
252 |
+
:func:`~torch.fft.ifftn` the exact inverse.
|
253 |
+
|
254 |
+
Default is ``"backward"`` (no normalization).
|
255 |
+
|
256 |
+
Keyword args:
|
257 |
+
{out}
|
258 |
+
|
259 |
+
Example:
|
260 |
+
|
261 |
+
>>> x = torch.rand(10, 10, dtype=torch.complex64)
|
262 |
+
>>> fftn = torch.fft.fftn(x)
|
263 |
+
|
264 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.fftn`
|
265 |
+
here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
|
266 |
+
|
267 |
+
>>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
|
268 |
+
>>> torch.testing.assert_close(fftn, two_ffts, check_stride=False)
|
269 |
+
|
270 |
+
""".format(**common_args))
|
271 |
+
|
272 |
+
ifftn = _add_docstr(_fft.fft_ifftn, r"""
|
273 |
+
ifftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
274 |
+
|
275 |
+
Computes the N dimensional inverse discrete Fourier transform of :attr:`input`.
|
276 |
+
|
277 |
+
Note:
|
278 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
279 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
280 |
+
|
281 |
+
Args:
|
282 |
+
input (Tensor): the input tensor
|
283 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
284 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
285 |
+
trimmed to the length ``s[i]`` before computing the IFFT.
|
286 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
287 |
+
Default: ``s = [input.size(d) for d in dim]``
|
288 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
289 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
290 |
+
norm (str, optional): Normalization mode. For the backward transform
|
291 |
+
(:func:`~torch.fft.ifftn`), these correspond to:
|
292 |
+
|
293 |
+
* ``"forward"`` - no normalization
|
294 |
+
* ``"backward"`` - normalize by ``1/n``
|
295 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
|
296 |
+
|
297 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
298 |
+
Calling the forward transform (:func:`~torch.fft.fftn`) with the same
|
299 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
300 |
+
the two transforms. This is required to make :func:`~torch.fft.ifftn`
|
301 |
+
the exact inverse.
|
302 |
+
|
303 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
304 |
+
|
305 |
+
Keyword args:
|
306 |
+
{out}
|
307 |
+
|
308 |
+
Example:
|
309 |
+
|
310 |
+
>>> x = torch.rand(10, 10, dtype=torch.complex64)
|
311 |
+
>>> ifftn = torch.fft.ifftn(x)
|
312 |
+
|
313 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn`
|
314 |
+
here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
|
315 |
+
|
316 |
+
>>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
|
317 |
+
>>> torch.testing.assert_close(ifftn, two_iffts, check_stride=False)
|
318 |
+
|
319 |
+
""".format(**common_args))
|
320 |
+
|
321 |
+
rfft = _add_docstr(_fft.fft_rfft, r"""
|
322 |
+
rfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
323 |
+
|
324 |
+
Computes the one dimensional Fourier transform of real-valued :attr:`input`.
|
325 |
+
|
326 |
+
The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so
|
327 |
+
the output contains only the positive frequencies below the Nyquist frequency.
|
328 |
+
To compute the full output, use :func:`~torch.fft.fft`
|
329 |
+
|
330 |
+
Note:
|
331 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
332 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
333 |
+
|
334 |
+
Args:
|
335 |
+
input (Tensor): the real input tensor
|
336 |
+
n (int, optional): Signal length. If given, the input will either be zero-padded
|
337 |
+
or trimmed to this length before computing the real FFT.
|
338 |
+
dim (int, optional): The dimension along which to take the one dimensional real FFT.
|
339 |
+
norm (str, optional): Normalization mode. For the forward transform
|
340 |
+
(:func:`~torch.fft.rfft`), these correspond to:
|
341 |
+
|
342 |
+
* ``"forward"`` - normalize by ``1/n``
|
343 |
+
* ``"backward"`` - no normalization
|
344 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
|
345 |
+
|
346 |
+
Calling the backward transform (:func:`~torch.fft.irfft`) with the same
|
347 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
348 |
+
the two transforms. This is required to make :func:`~torch.fft.irfft`
|
349 |
+
the exact inverse.
|
350 |
+
|
351 |
+
Default is ``"backward"`` (no normalization).
|
352 |
+
|
353 |
+
Keyword args:
|
354 |
+
{out}
|
355 |
+
|
356 |
+
Example:
|
357 |
+
|
358 |
+
>>> t = torch.arange(4)
|
359 |
+
>>> t
|
360 |
+
tensor([0, 1, 2, 3])
|
361 |
+
>>> torch.fft.rfft(t)
|
362 |
+
tensor([ 6.+0.j, -2.+2.j, -2.+0.j])
|
363 |
+
|
364 |
+
Compare against the full output from :func:`~torch.fft.fft`:
|
365 |
+
|
366 |
+
>>> torch.fft.fft(t)
|
367 |
+
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
|
368 |
+
|
369 |
+
Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted.
|
370 |
+
At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair,
|
371 |
+
and therefore must always be real-valued.
|
372 |
+
""".format(**common_args))
|
373 |
+
|
374 |
+
irfft = _add_docstr(_fft.fft_irfft, r"""
|
375 |
+
irfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
376 |
+
|
377 |
+
Computes the inverse of :func:`~torch.fft.rfft`.
|
378 |
+
|
379 |
+
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
|
380 |
+
domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the
|
381 |
+
output will be real-valued.
|
382 |
+
|
383 |
+
Note:
|
384 |
+
Some input frequencies must be real-valued to satisfy the Hermitian
|
385 |
+
property. In these cases the imaginary component will be ignored.
|
386 |
+
For example, any imaginary component in the zero-frequency term cannot
|
387 |
+
be represented in a real output and so will always be ignored.
|
388 |
+
|
389 |
+
Note:
|
390 |
+
The correct interpretation of the Hermitian input depends on the length of
|
391 |
+
the original data, as given by :attr:`n`. This is because each input shape
|
392 |
+
could correspond to either an odd or even length signal. By default, the
|
393 |
+
signal is assumed to be even length and odd signals will not round-trip
|
394 |
+
properly. So, it is recommended to always pass the signal length :attr:`n`.
|
395 |
+
|
396 |
+
Note:
|
397 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
398 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
399 |
+
With default arguments, size of the transformed dimension should be (2^n + 1) as argument
|
400 |
+
`n` defaults to even output size = 2 * (transformed_dim_size - 1)
|
401 |
+
|
402 |
+
Args:
|
403 |
+
input (Tensor): the input tensor representing a half-Hermitian signal
|
404 |
+
n (int, optional): Output signal length. This determines the length of the
|
405 |
+
output signal. If given, the input will either be zero-padded or trimmed to this
|
406 |
+
length before computing the real IFFT.
|
407 |
+
Defaults to even output: ``n=2*(input.size(dim) - 1)``.
|
408 |
+
dim (int, optional): The dimension along which to take the one dimensional real IFFT.
|
409 |
+
norm (str, optional): Normalization mode. For the backward transform
|
410 |
+
(:func:`~torch.fft.irfft`), these correspond to:
|
411 |
+
|
412 |
+
* ``"forward"`` - no normalization
|
413 |
+
* ``"backward"`` - normalize by ``1/n``
|
414 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
|
415 |
+
|
416 |
+
Calling the forward transform (:func:`~torch.fft.rfft`) with the same
|
417 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
418 |
+
the two transforms. This is required to make :func:`~torch.fft.irfft`
|
419 |
+
the exact inverse.
|
420 |
+
|
421 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
422 |
+
|
423 |
+
Keyword args:
|
424 |
+
{out}
|
425 |
+
|
426 |
+
Example:
|
427 |
+
|
428 |
+
>>> t = torch.linspace(0, 1, 5)
|
429 |
+
>>> t
|
430 |
+
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
|
431 |
+
>>> T = torch.fft.rfft(t)
|
432 |
+
>>> T
|
433 |
+
tensor([ 2.5000+0.0000j, -0.6250+0.8602j, -0.6250+0.2031j])
|
434 |
+
|
435 |
+
Without specifying the output length to :func:`~torch.fft.irfft`, the output
|
436 |
+
will not round-trip properly because the input is odd-length:
|
437 |
+
|
438 |
+
>>> torch.fft.irfft(T)
|
439 |
+
tensor([0.1562, 0.3511, 0.7812, 1.2114])
|
440 |
+
|
441 |
+
So, it is recommended to always pass the signal length :attr:`n`:
|
442 |
+
|
443 |
+
>>> roundtrip = torch.fft.irfft(T, t.numel())
|
444 |
+
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
|
445 |
+
|
446 |
+
""".format(**common_args))
|
447 |
+
|
448 |
+
rfft2 = _add_docstr(_fft.fft_rfft2, r"""
|
449 |
+
rfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
450 |
+
|
451 |
+
Computes the 2-dimensional discrete Fourier transform of real :attr:`input`.
|
452 |
+
Equivalent to :func:`~torch.fft.rfftn` but FFTs only the last two dimensions by default.
|
453 |
+
|
454 |
+
The FFT of a real signal is Hermitian-symmetric, ``X[i, j] = conj(X[-i, -j])``,
|
455 |
+
so the full :func:`~torch.fft.fft2` output contains redundant information.
|
456 |
+
:func:`~torch.fft.rfft2` instead omits the negative frequencies in the last
|
457 |
+
dimension.
|
458 |
+
|
459 |
+
Note:
|
460 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
461 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
462 |
+
|
463 |
+
Args:
|
464 |
+
input (Tensor): the input tensor
|
465 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
466 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
467 |
+
trimmed to the length ``s[i]`` before computing the real FFT.
|
468 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
469 |
+
Default: ``s = [input.size(d) for d in dim]``
|
470 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
471 |
+
Default: last two dimensions.
|
472 |
+
norm (str, optional): Normalization mode. For the forward transform
|
473 |
+
(:func:`~torch.fft.rfft2`), these correspond to:
|
474 |
+
|
475 |
+
* ``"forward"`` - normalize by ``1/n``
|
476 |
+
* ``"backward"`` - no normalization
|
477 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
|
478 |
+
|
479 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
480 |
+
Calling the backward transform (:func:`~torch.fft.irfft2`) with the same
|
481 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
482 |
+
the two transforms. This is required to make :func:`~torch.fft.irfft2`
|
483 |
+
the exact inverse.
|
484 |
+
|
485 |
+
Default is ``"backward"`` (no normalization).
|
486 |
+
|
487 |
+
Keyword args:
|
488 |
+
{out}
|
489 |
+
|
490 |
+
Example:
|
491 |
+
|
492 |
+
>>> t = torch.rand(10, 10)
|
493 |
+
>>> rfft2 = torch.fft.rfft2(t)
|
494 |
+
>>> rfft2.size()
|
495 |
+
torch.Size([10, 6])
|
496 |
+
|
497 |
+
Compared against the full output from :func:`~torch.fft.fft2`, we have all
|
498 |
+
elements up to the Nyquist frequency.
|
499 |
+
|
500 |
+
>>> fft2 = torch.fft.fft2(t)
|
501 |
+
>>> torch.testing.assert_close(fft2[..., :6], rfft2, check_stride=False)
|
502 |
+
|
503 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.rfft2`
|
504 |
+
here is equivalent to a combination of :func:`~torch.fft.fft` and
|
505 |
+
:func:`~torch.fft.rfft`:
|
506 |
+
|
507 |
+
>>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
|
508 |
+
>>> torch.testing.assert_close(rfft2, two_ffts, check_stride=False)
|
509 |
+
|
510 |
+
""".format(**common_args))
|
511 |
+
|
512 |
+
irfft2 = _add_docstr(_fft.fft_irfft2, r"""
|
513 |
+
irfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
514 |
+
|
515 |
+
Computes the inverse of :func:`~torch.fft.rfft2`.
|
516 |
+
Equivalent to :func:`~torch.fft.irfftn` but IFFTs only the last two dimensions by default.
|
517 |
+
|
518 |
+
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
|
519 |
+
domain, as produced by :func:`~torch.fft.rfft2`. By the Hermitian property, the
|
520 |
+
output will be real-valued.
|
521 |
+
|
522 |
+
Note:
|
523 |
+
Some input frequencies must be real-valued to satisfy the Hermitian
|
524 |
+
property. In these cases the imaginary component will be ignored.
|
525 |
+
For example, any imaginary component in the zero-frequency term cannot
|
526 |
+
be represented in a real output and so will always be ignored.
|
527 |
+
|
528 |
+
Note:
|
529 |
+
The correct interpretation of the Hermitian input depends on the length of
|
530 |
+
the original data, as given by :attr:`s`. This is because each input shape
|
531 |
+
could correspond to either an odd or even length signal. By default, the
|
532 |
+
signal is assumed to be even length and odd signals will not round-trip
|
533 |
+
properly. So, it is recommended to always pass the signal shape :attr:`s`.
|
534 |
+
|
535 |
+
Note:
|
536 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
537 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
538 |
+
With default arguments, the size of last dimension should be (2^n + 1) as argument
|
539 |
+
`s` defaults to even output size = 2 * (last_dim_size - 1)
|
540 |
+
|
541 |
+
Args:
|
542 |
+
input (Tensor): the input tensor
|
543 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
544 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
545 |
+
trimmed to the length ``s[i]`` before computing the real FFT.
|
546 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
547 |
+
Defaults to even output in the last dimension:
|
548 |
+
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
|
549 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
550 |
+
The last dimension must be the half-Hermitian compressed dimension.
|
551 |
+
Default: last two dimensions.
|
552 |
+
norm (str, optional): Normalization mode. For the backward transform
|
553 |
+
(:func:`~torch.fft.irfft2`), these correspond to:
|
554 |
+
|
555 |
+
* ``"forward"`` - no normalization
|
556 |
+
* ``"backward"`` - normalize by ``1/n``
|
557 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
|
558 |
+
|
559 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
560 |
+
Calling the forward transform (:func:`~torch.fft.rfft2`) with the same
|
561 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
562 |
+
the two transforms. This is required to make :func:`~torch.fft.irfft2`
|
563 |
+
the exact inverse.
|
564 |
+
|
565 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
566 |
+
|
567 |
+
Keyword args:
|
568 |
+
{out}
|
569 |
+
|
570 |
+
Example:
|
571 |
+
|
572 |
+
>>> t = torch.rand(10, 9)
|
573 |
+
>>> T = torch.fft.rfft2(t)
|
574 |
+
|
575 |
+
Without specifying the output length to :func:`~torch.fft.irfft2`, the output
|
576 |
+
will not round-trip properly because the input is odd-length in the last
|
577 |
+
dimension:
|
578 |
+
|
579 |
+
>>> torch.fft.irfft2(T).size()
|
580 |
+
torch.Size([10, 8])
|
581 |
+
|
582 |
+
So, it is recommended to always pass the signal shape :attr:`s`.
|
583 |
+
|
584 |
+
>>> roundtrip = torch.fft.irfft2(T, t.size())
|
585 |
+
>>> roundtrip.size()
|
586 |
+
torch.Size([10, 9])
|
587 |
+
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
|
588 |
+
|
589 |
+
""".format(**common_args))
|
590 |
+
|
591 |
+
rfftn = _add_docstr(_fft.fft_rfftn, r"""
|
592 |
+
rfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
593 |
+
|
594 |
+
Computes the N-dimensional discrete Fourier transform of real :attr:`input`.
|
595 |
+
|
596 |
+
The FFT of a real signal is Hermitian-symmetric,
|
597 |
+
``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])`` so the full
|
598 |
+
:func:`~torch.fft.fftn` output contains redundant information.
|
599 |
+
:func:`~torch.fft.rfftn` instead omits the negative frequencies in the
|
600 |
+
last dimension.
|
601 |
+
|
602 |
+
Note:
|
603 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
604 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
605 |
+
|
606 |
+
Args:
|
607 |
+
input (Tensor): the input tensor
|
608 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
609 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
610 |
+
trimmed to the length ``s[i]`` before computing the real FFT.
|
611 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
612 |
+
Default: ``s = [input.size(d) for d in dim]``
|
613 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
614 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
615 |
+
norm (str, optional): Normalization mode. For the forward transform
|
616 |
+
(:func:`~torch.fft.rfftn`), these correspond to:
|
617 |
+
|
618 |
+
* ``"forward"`` - normalize by ``1/n``
|
619 |
+
* ``"backward"`` - no normalization
|
620 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
|
621 |
+
|
622 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
623 |
+
Calling the backward transform (:func:`~torch.fft.irfftn`) with the same
|
624 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
625 |
+
the two transforms. This is required to make :func:`~torch.fft.irfftn`
|
626 |
+
the exact inverse.
|
627 |
+
|
628 |
+
Default is ``"backward"`` (no normalization).
|
629 |
+
|
630 |
+
Keyword args:
|
631 |
+
{out}
|
632 |
+
|
633 |
+
Example:
|
634 |
+
|
635 |
+
>>> t = torch.rand(10, 10)
|
636 |
+
>>> rfftn = torch.fft.rfftn(t)
|
637 |
+
>>> rfftn.size()
|
638 |
+
torch.Size([10, 6])
|
639 |
+
|
640 |
+
Compared against the full output from :func:`~torch.fft.fftn`, we have all
|
641 |
+
elements up to the Nyquist frequency.
|
642 |
+
|
643 |
+
>>> fftn = torch.fft.fftn(t)
|
644 |
+
>>> torch.testing.assert_close(fftn[..., :6], rfftn, check_stride=False)
|
645 |
+
|
646 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.rfftn`
|
647 |
+
here is equivalent to a combination of :func:`~torch.fft.fft` and
|
648 |
+
:func:`~torch.fft.rfft`:
|
649 |
+
|
650 |
+
>>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
|
651 |
+
>>> torch.testing.assert_close(rfftn, two_ffts, check_stride=False)
|
652 |
+
|
653 |
+
""".format(**common_args))
|
654 |
+
|
655 |
+
irfftn = _add_docstr(_fft.fft_irfftn, r"""
|
656 |
+
irfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
657 |
+
|
658 |
+
Computes the inverse of :func:`~torch.fft.rfftn`.
|
659 |
+
|
660 |
+
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
|
661 |
+
domain, as produced by :func:`~torch.fft.rfftn`. By the Hermitian property, the
|
662 |
+
output will be real-valued.
|
663 |
+
|
664 |
+
Note:
|
665 |
+
Some input frequencies must be real-valued to satisfy the Hermitian
|
666 |
+
property. In these cases the imaginary component will be ignored.
|
667 |
+
For example, any imaginary component in the zero-frequency term cannot
|
668 |
+
be represented in a real output and so will always be ignored.
|
669 |
+
|
670 |
+
Note:
|
671 |
+
The correct interpretation of the Hermitian input depends on the length of
|
672 |
+
the original data, as given by :attr:`s`. This is because each input shape
|
673 |
+
could correspond to either an odd or even length signal. By default, the
|
674 |
+
signal is assumed to be even length and odd signals will not round-trip
|
675 |
+
properly. So, it is recommended to always pass the signal shape :attr:`s`.
|
676 |
+
|
677 |
+
Note:
|
678 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
679 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
680 |
+
With default arguments, the size of last dimension should be (2^n + 1) as argument
|
681 |
+
`s` defaults to even output size = 2 * (last_dim_size - 1)
|
682 |
+
|
683 |
+
Args:
|
684 |
+
input (Tensor): the input tensor
|
685 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
686 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
687 |
+
trimmed to the length ``s[i]`` before computing the real FFT.
|
688 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
689 |
+
Defaults to even output in the last dimension:
|
690 |
+
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
|
691 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
692 |
+
The last dimension must be the half-Hermitian compressed dimension.
|
693 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
694 |
+
norm (str, optional): Normalization mode. For the backward transform
|
695 |
+
(:func:`~torch.fft.irfftn`), these correspond to:
|
696 |
+
|
697 |
+
* ``"forward"`` - no normalization
|
698 |
+
* ``"backward"`` - normalize by ``1/n``
|
699 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
|
700 |
+
|
701 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
702 |
+
Calling the forward transform (:func:`~torch.fft.rfftn`) with the same
|
703 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
704 |
+
the two transforms. This is required to make :func:`~torch.fft.irfftn`
|
705 |
+
the exact inverse.
|
706 |
+
|
707 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
708 |
+
|
709 |
+
Keyword args:
|
710 |
+
{out}
|
711 |
+
|
712 |
+
Example:
|
713 |
+
|
714 |
+
>>> t = torch.rand(10, 9)
|
715 |
+
>>> T = torch.fft.rfftn(t)
|
716 |
+
|
717 |
+
Without specifying the output length to :func:`~torch.fft.irfft`, the output
|
718 |
+
will not round-trip properly because the input is odd-length in the last
|
719 |
+
dimension:
|
720 |
+
|
721 |
+
>>> torch.fft.irfftn(T).size()
|
722 |
+
torch.Size([10, 8])
|
723 |
+
|
724 |
+
So, it is recommended to always pass the signal shape :attr:`s`.
|
725 |
+
|
726 |
+
>>> roundtrip = torch.fft.irfftn(T, t.size())
|
727 |
+
>>> roundtrip.size()
|
728 |
+
torch.Size([10, 9])
|
729 |
+
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
|
730 |
+
|
731 |
+
""".format(**common_args))
|
732 |
+
|
733 |
+
hfft = _add_docstr(_fft.fft_hfft, r"""
|
734 |
+
hfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
735 |
+
|
736 |
+
Computes the one dimensional discrete Fourier transform of a Hermitian
|
737 |
+
symmetric :attr:`input` signal.
|
738 |
+
|
739 |
+
Note:
|
740 |
+
|
741 |
+
:func:`~torch.fft.hfft`/:func:`~torch.fft.ihfft` are analogous to
|
742 |
+
:func:`~torch.fft.rfft`/:func:`~torch.fft.irfft`. The real FFT expects
|
743 |
+
a real signal in the time-domain and gives a Hermitian symmetry in the
|
744 |
+
frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
|
745 |
+
the time-domain and real-valued in the frequency-domain. For this reason,
|
746 |
+
special care needs to be taken with the length argument :attr:`n`, in the
|
747 |
+
same way as with :func:`~torch.fft.irfft`.
|
748 |
+
|
749 |
+
Note:
|
750 |
+
Because the signal is Hermitian in the time-domain, the result will be
|
751 |
+
real in the frequency domain. Note that some input frequencies must be
|
752 |
+
real-valued to satisfy the Hermitian property. In these cases the imaginary
|
753 |
+
component will be ignored. For example, any imaginary component in
|
754 |
+
``input[0]`` would result in one or more complex frequency terms which
|
755 |
+
cannot be represented in a real output and so will always be ignored.
|
756 |
+
|
757 |
+
Note:
|
758 |
+
The correct interpretation of the Hermitian input depends on the length of
|
759 |
+
the original data, as given by :attr:`n`. This is because each input shape
|
760 |
+
could correspond to either an odd or even length signal. By default, the
|
761 |
+
signal is assumed to be even length and odd signals will not round-trip
|
762 |
+
properly. So, it is recommended to always pass the signal length :attr:`n`.
|
763 |
+
|
764 |
+
Note:
|
765 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
766 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
767 |
+
With default arguments, size of the transformed dimension should be (2^n + 1) as argument
|
768 |
+
`n` defaults to even output size = 2 * (transformed_dim_size - 1)
|
769 |
+
|
770 |
+
Args:
|
771 |
+
input (Tensor): the input tensor representing a half-Hermitian signal
|
772 |
+
n (int, optional): Output signal length. This determines the length of the
|
773 |
+
real output. If given, the input will either be zero-padded or trimmed to this
|
774 |
+
length before computing the Hermitian FFT.
|
775 |
+
Defaults to even output: ``n=2*(input.size(dim) - 1)``.
|
776 |
+
dim (int, optional): The dimension along which to take the one dimensional Hermitian FFT.
|
777 |
+
norm (str, optional): Normalization mode. For the forward transform
|
778 |
+
(:func:`~torch.fft.hfft`), these correspond to:
|
779 |
+
|
780 |
+
* ``"forward"`` - normalize by ``1/n``
|
781 |
+
* ``"backward"`` - no normalization
|
782 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
|
783 |
+
|
784 |
+
Calling the backward transform (:func:`~torch.fft.ihfft`) with the same
|
785 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
786 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfft`
|
787 |
+
the exact inverse.
|
788 |
+
|
789 |
+
Default is ``"backward"`` (no normalization).
|
790 |
+
|
791 |
+
Keyword args:
|
792 |
+
{out}
|
793 |
+
|
794 |
+
Example:
|
795 |
+
|
796 |
+
Taking a real-valued frequency signal and bringing it into the time domain
|
797 |
+
gives Hermitian symmetric output:
|
798 |
+
|
799 |
+
>>> t = torch.linspace(0, 1, 5)
|
800 |
+
>>> t
|
801 |
+
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
|
802 |
+
>>> T = torch.fft.ifft(t)
|
803 |
+
>>> T
|
804 |
+
tensor([ 0.5000-0.0000j, -0.1250-0.1720j, -0.1250-0.0406j, -0.1250+0.0406j,
|
805 |
+
-0.1250+0.1720j])
|
806 |
+
|
807 |
+
Note that ``T[1] == T[-1].conj()`` and ``T[2] == T[-2].conj()`` is
|
808 |
+
redundant. We can thus compute the forward transform without considering
|
809 |
+
negative frequencies:
|
810 |
+
|
811 |
+
>>> torch.fft.hfft(T[:3], n=5)
|
812 |
+
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
|
813 |
+
|
814 |
+
Like with :func:`~torch.fft.irfft`, the output length must be given in order
|
815 |
+
to recover an even length output:
|
816 |
+
|
817 |
+
>>> torch.fft.hfft(T[:3])
|
818 |
+
tensor([0.1250, 0.2809, 0.6250, 0.9691])
|
819 |
+
""".format(**common_args))
|
820 |
+
|
821 |
+
ihfft = _add_docstr(_fft.fft_ihfft, r"""
|
822 |
+
ihfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
|
823 |
+
|
824 |
+
Computes the inverse of :func:`~torch.fft.hfft`.
|
825 |
+
|
826 |
+
:attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
|
827 |
+
The IFFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``.
|
828 |
+
:func:`~torch.fft.ihfft` represents this in the one-sided form where only the
|
829 |
+
positive frequencies below the Nyquist frequency are included. To compute the
|
830 |
+
full output, use :func:`~torch.fft.ifft`.
|
831 |
+
|
832 |
+
Note:
|
833 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
834 |
+
However it only supports powers of 2 signal length in every transformed dimension.
|
835 |
+
|
836 |
+
Args:
|
837 |
+
input (Tensor): the real input tensor
|
838 |
+
n (int, optional): Signal length. If given, the input will either be zero-padded
|
839 |
+
or trimmed to this length before computing the Hermitian IFFT.
|
840 |
+
dim (int, optional): The dimension along which to take the one dimensional Hermitian IFFT.
|
841 |
+
norm (str, optional): Normalization mode. For the backward transform
|
842 |
+
(:func:`~torch.fft.ihfft`), these correspond to:
|
843 |
+
|
844 |
+
* ``"forward"`` - no normalization
|
845 |
+
* ``"backward"`` - normalize by ``1/n``
|
846 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
|
847 |
+
|
848 |
+
Calling the forward transform (:func:`~torch.fft.hfft`) with the same
|
849 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
850 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfft`
|
851 |
+
the exact inverse.
|
852 |
+
|
853 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
854 |
+
|
855 |
+
Keyword args:
|
856 |
+
{out}
|
857 |
+
|
858 |
+
Example:
|
859 |
+
|
860 |
+
>>> t = torch.arange(5)
|
861 |
+
>>> t
|
862 |
+
tensor([0, 1, 2, 3, 4])
|
863 |
+
>>> torch.fft.ihfft(t)
|
864 |
+
tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j])
|
865 |
+
|
866 |
+
Compare against the full output from :func:`~torch.fft.ifft`:
|
867 |
+
|
868 |
+
>>> torch.fft.ifft(t)
|
869 |
+
tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j, -0.5000+0.1625j,
|
870 |
+
-0.5000+0.6882j])
|
871 |
+
""".format(**common_args))
|
872 |
+
|
873 |
+
hfft2 = _add_docstr(_fft.fft_hfft2, r"""
|
874 |
+
hfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
875 |
+
|
876 |
+
Computes the 2-dimensional discrete Fourier transform of a Hermitian symmetric
|
877 |
+
:attr:`input` signal. Equivalent to :func:`~torch.fft.hfftn` but only
|
878 |
+
transforms the last two dimensions by default.
|
879 |
+
|
880 |
+
:attr:`input` is interpreted as a one-sided Hermitian signal in the time
|
881 |
+
domain. By the Hermitian property, the Fourier transform will be real-valued.
|
882 |
+
|
883 |
+
Note:
|
884 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
885 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
886 |
+
With default arguments, the size of last dimension should be (2^n + 1) as argument
|
887 |
+
`s` defaults to even output size = 2 * (last_dim_size - 1)
|
888 |
+
|
889 |
+
Args:
|
890 |
+
input (Tensor): the input tensor
|
891 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
892 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
893 |
+
trimmed to the length ``s[i]`` before computing the Hermitian FFT.
|
894 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
895 |
+
Defaults to even output in the last dimension:
|
896 |
+
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
|
897 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
898 |
+
The last dimension must be the half-Hermitian compressed dimension.
|
899 |
+
Default: last two dimensions.
|
900 |
+
norm (str, optional): Normalization mode. For the forward transform
|
901 |
+
(:func:`~torch.fft.hfft2`), these correspond to:
|
902 |
+
|
903 |
+
* ``"forward"`` - normalize by ``1/n``
|
904 |
+
* ``"backward"`` - no normalization
|
905 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
|
906 |
+
|
907 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
908 |
+
Calling the backward transform (:func:`~torch.fft.ihfft2`) with the same
|
909 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
910 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfft2`
|
911 |
+
the exact inverse.
|
912 |
+
|
913 |
+
Default is ``"backward"`` (no normalization).
|
914 |
+
|
915 |
+
Keyword args:
|
916 |
+
{out}
|
917 |
+
|
918 |
+
Example:
|
919 |
+
|
920 |
+
Starting from a real frequency-space signal, we can generate a
|
921 |
+
Hermitian-symmetric time-domain signal:
|
922 |
+
>>> T = torch.rand(10, 9)
|
923 |
+
>>> t = torch.fft.ihfft2(T)
|
924 |
+
|
925 |
+
Without specifying the output length to :func:`~torch.fft.hfftn`, the
|
926 |
+
output will not round-trip properly because the input is odd-length in the
|
927 |
+
last dimension:
|
928 |
+
|
929 |
+
>>> torch.fft.hfft2(t).size()
|
930 |
+
torch.Size([10, 10])
|
931 |
+
|
932 |
+
So, it is recommended to always pass the signal shape :attr:`s`.
|
933 |
+
|
934 |
+
>>> roundtrip = torch.fft.hfft2(t, T.size())
|
935 |
+
>>> roundtrip.size()
|
936 |
+
torch.Size([10, 9])
|
937 |
+
>>> torch.allclose(roundtrip, T)
|
938 |
+
True
|
939 |
+
|
940 |
+
""".format(**common_args))
|
941 |
+
|
942 |
+
ihfft2 = _add_docstr(_fft.fft_ihfft2, r"""
|
943 |
+
ihfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
|
944 |
+
|
945 |
+
Computes the 2-dimensional inverse discrete Fourier transform of real
|
946 |
+
:attr:`input`. Equivalent to :func:`~torch.fft.ihfftn` but transforms only the
|
947 |
+
two last dimensions by default.
|
948 |
+
|
949 |
+
Note:
|
950 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
951 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
952 |
+
|
953 |
+
Args:
|
954 |
+
input (Tensor): the input tensor
|
955 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
956 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
957 |
+
trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
|
958 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
959 |
+
Default: ``s = [input.size(d) for d in dim]``
|
960 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
961 |
+
Default: last two dimensions.
|
962 |
+
norm (str, optional): Normalization mode. For the backward transform
|
963 |
+
(:func:`~torch.fft.ihfft2`), these correspond to:
|
964 |
+
|
965 |
+
* ``"forward"`` - no normalization
|
966 |
+
* ``"backward"`` - normalize by ``1/n``
|
967 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
|
968 |
+
|
969 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
970 |
+
Calling the forward transform (:func:`~torch.fft.hfft2`) with the same
|
971 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
972 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfft2`
|
973 |
+
the exact inverse.
|
974 |
+
|
975 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
976 |
+
|
977 |
+
Keyword args:
|
978 |
+
{out}
|
979 |
+
|
980 |
+
Example:
|
981 |
+
|
982 |
+
>>> T = torch.rand(10, 10)
|
983 |
+
>>> t = torch.fft.ihfft2(t)
|
984 |
+
>>> t.size()
|
985 |
+
torch.Size([10, 6])
|
986 |
+
|
987 |
+
Compared against the full output from :func:`~torch.fft.ifft2`, the
|
988 |
+
Hermitian time-space signal takes up only half the space.
|
989 |
+
|
990 |
+
>>> fftn = torch.fft.ifft2(t)
|
991 |
+
>>> torch.allclose(fftn[..., :6], rfftn)
|
992 |
+
True
|
993 |
+
|
994 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.ihfft2`
|
995 |
+
here is equivalent to a combination of :func:`~torch.fft.ifft` and
|
996 |
+
:func:`~torch.fft.ihfft`:
|
997 |
+
|
998 |
+
>>> two_ffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
|
999 |
+
>>> torch.allclose(t, two_ffts)
|
1000 |
+
True
|
1001 |
+
|
1002 |
+
""".format(**common_args))
|
1003 |
+
|
1004 |
+
hfftn = _add_docstr(_fft.fft_hfftn, r"""
|
1005 |
+
hfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
1006 |
+
|
1007 |
+
Computes the n-dimensional discrete Fourier transform of a Hermitian symmetric
|
1008 |
+
:attr:`input` signal.
|
1009 |
+
|
1010 |
+
:attr:`input` is interpreted as a one-sided Hermitian signal in the time
|
1011 |
+
domain. By the Hermitian property, the Fourier transform will be real-valued.
|
1012 |
+
|
1013 |
+
Note:
|
1014 |
+
:func:`~torch.fft.hfftn`/:func:`~torch.fft.ihfftn` are analogous to
|
1015 |
+
:func:`~torch.fft.rfftn`/:func:`~torch.fft.irfftn`. The real FFT expects
|
1016 |
+
a real signal in the time-domain and gives Hermitian symmetry in the
|
1017 |
+
frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
|
1018 |
+
the time-domain and real-valued in the frequency-domain. For this reason,
|
1019 |
+
special care needs to be taken with the shape argument :attr:`s`, in the
|
1020 |
+
same way as with :func:`~torch.fft.irfftn`.
|
1021 |
+
|
1022 |
+
Note:
|
1023 |
+
Some input frequencies must be real-valued to satisfy the Hermitian
|
1024 |
+
property. In these cases the imaginary component will be ignored.
|
1025 |
+
For example, any imaginary component in the zero-frequency term cannot
|
1026 |
+
be represented in a real output and so will always be ignored.
|
1027 |
+
|
1028 |
+
Note:
|
1029 |
+
The correct interpretation of the Hermitian input depends on the length of
|
1030 |
+
the original data, as given by :attr:`s`. This is because each input shape
|
1031 |
+
could correspond to either an odd or even length signal. By default, the
|
1032 |
+
signal is assumed to be even length and odd signals will not round-trip
|
1033 |
+
properly. It is recommended to always pass the signal shape :attr:`s`.
|
1034 |
+
|
1035 |
+
Note:
|
1036 |
+
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
|
1037 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
1038 |
+
With default arguments, the size of last dimension should be (2^n + 1) as argument
|
1039 |
+
`s` defaults to even output size = 2 * (last_dim_size - 1)
|
1040 |
+
|
1041 |
+
Args:
|
1042 |
+
input (Tensor): the input tensor
|
1043 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
1044 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
1045 |
+
trimmed to the length ``s[i]`` before computing the real FFT.
|
1046 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
1047 |
+
Defaults to even output in the last dimension:
|
1048 |
+
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
|
1049 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
1050 |
+
The last dimension must be the half-Hermitian compressed dimension.
|
1051 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
1052 |
+
norm (str, optional): Normalization mode. For the forward transform
|
1053 |
+
(:func:`~torch.fft.hfftn`), these correspond to:
|
1054 |
+
|
1055 |
+
* ``"forward"`` - normalize by ``1/n``
|
1056 |
+
* ``"backward"`` - no normalization
|
1057 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
|
1058 |
+
|
1059 |
+
Where ``n = prod(s)`` is the logical FFT size.
|
1060 |
+
Calling the backward transform (:func:`~torch.fft.ihfftn`) with the same
|
1061 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
1062 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfftn`
|
1063 |
+
the exact inverse.
|
1064 |
+
|
1065 |
+
Default is ``"backward"`` (no normalization).
|
1066 |
+
|
1067 |
+
Keyword args:
|
1068 |
+
{out}
|
1069 |
+
|
1070 |
+
Example:
|
1071 |
+
|
1072 |
+
Starting from a real frequency-space signal, we can generate a
|
1073 |
+
Hermitian-symmetric time-domain signal:
|
1074 |
+
>>> T = torch.rand(10, 9)
|
1075 |
+
>>> t = torch.fft.ihfftn(T)
|
1076 |
+
|
1077 |
+
Without specifying the output length to :func:`~torch.fft.hfftn`, the
|
1078 |
+
output will not round-trip properly because the input is odd-length in the
|
1079 |
+
last dimension:
|
1080 |
+
|
1081 |
+
>>> torch.fft.hfftn(t).size()
|
1082 |
+
torch.Size([10, 10])
|
1083 |
+
|
1084 |
+
So, it is recommended to always pass the signal shape :attr:`s`.
|
1085 |
+
|
1086 |
+
>>> roundtrip = torch.fft.hfftn(t, T.size())
|
1087 |
+
>>> roundtrip.size()
|
1088 |
+
torch.Size([10, 9])
|
1089 |
+
>>> torch.allclose(roundtrip, T)
|
1090 |
+
True
|
1091 |
+
|
1092 |
+
""".format(**common_args))
|
1093 |
+
|
1094 |
+
ihfftn = _add_docstr(_fft.fft_ihfftn, r"""
|
1095 |
+
ihfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
|
1096 |
+
|
1097 |
+
Computes the N-dimensional inverse discrete Fourier transform of real :attr:`input`.
|
1098 |
+
|
1099 |
+
:attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
|
1100 |
+
The n-dimensional IFFT of a real signal is Hermitian-symmetric,
|
1101 |
+
``X[i, j, ...] = conj(X[-i, -j, ...])``. :func:`~torch.fft.ihfftn` represents
|
1102 |
+
this in the one-sided form where only the positive frequencies below the
|
1103 |
+
Nyquist frequency are included in the last signal dimension. To compute the
|
1104 |
+
full output, use :func:`~torch.fft.ifftn`.
|
1105 |
+
|
1106 |
+
Note:
|
1107 |
+
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
|
1108 |
+
However it only supports powers of 2 signal length in every transformed dimensions.
|
1109 |
+
|
1110 |
+
Args:
|
1111 |
+
input (Tensor): the input tensor
|
1112 |
+
s (Tuple[int], optional): Signal size in the transformed dimensions.
|
1113 |
+
If given, each dimension ``dim[i]`` will either be zero-padded or
|
1114 |
+
trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
|
1115 |
+
If a length ``-1`` is specified, no padding is done in that dimension.
|
1116 |
+
Default: ``s = [input.size(d) for d in dim]``
|
1117 |
+
dim (Tuple[int], optional): Dimensions to be transformed.
|
1118 |
+
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
|
1119 |
+
norm (str, optional): Normalization mode. For the backward transform
|
1120 |
+
(:func:`~torch.fft.ihfftn`), these correspond to:
|
1121 |
+
|
1122 |
+
* ``"forward"`` - no normalization
|
1123 |
+
* ``"backward"`` - normalize by ``1/n``
|
1124 |
+
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
|
1125 |
+
|
1126 |
+
Where ``n = prod(s)`` is the logical IFFT size.
|
1127 |
+
Calling the forward transform (:func:`~torch.fft.hfftn`) with the same
|
1128 |
+
normalization mode will apply an overall normalization of ``1/n`` between
|
1129 |
+
the two transforms. This is required to make :func:`~torch.fft.ihfftn`
|
1130 |
+
the exact inverse.
|
1131 |
+
|
1132 |
+
Default is ``"backward"`` (normalize by ``1/n``).
|
1133 |
+
|
1134 |
+
Keyword args:
|
1135 |
+
{out}
|
1136 |
+
|
1137 |
+
Example:
|
1138 |
+
|
1139 |
+
>>> T = torch.rand(10, 10)
|
1140 |
+
>>> ihfftn = torch.fft.ihfftn(T)
|
1141 |
+
>>> ihfftn.size()
|
1142 |
+
torch.Size([10, 6])
|
1143 |
+
|
1144 |
+
Compared against the full output from :func:`~torch.fft.ifftn`, we have all
|
1145 |
+
elements up to the Nyquist frequency.
|
1146 |
+
|
1147 |
+
>>> ifftn = torch.fft.ifftn(t)
|
1148 |
+
>>> torch.allclose(ifftn[..., :6], ihfftn)
|
1149 |
+
True
|
1150 |
+
|
1151 |
+
The discrete Fourier transform is separable, so :func:`~torch.fft.ihfftn`
|
1152 |
+
here is equivalent to a combination of :func:`~torch.fft.ihfft` and
|
1153 |
+
:func:`~torch.fft.ifft`:
|
1154 |
+
|
1155 |
+
>>> two_iffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
|
1156 |
+
>>> torch.allclose(ihfftn, two_iffts)
|
1157 |
+
True
|
1158 |
+
|
1159 |
+
""".format(**common_args))
|
1160 |
+
|
1161 |
+
fftfreq = _add_docstr(_fft.fft_fftfreq, r"""
|
1162 |
+
fftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
|
1163 |
+
|
1164 |
+
Computes the discrete Fourier Transform sample frequencies for a signal of size :attr:`n`.
|
1165 |
+
|
1166 |
+
Note:
|
1167 |
+
By convention, :func:`~torch.fft.fft` returns positive frequency terms
|
1168 |
+
first, followed by the negative frequencies in reverse order, so that
|
1169 |
+
``f[-i]`` for all :math:`0 < i \leq n/2`` in Python gives the negative
|
1170 |
+
frequency terms. For an FFT of length :attr:`n` and with inputs spaced in
|
1171 |
+
length unit :attr:`d`, the frequencies are::
|
1172 |
+
|
1173 |
+
f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
|
1174 |
+
|
1175 |
+
Note:
|
1176 |
+
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
|
1177 |
+
either negative or positive. :func:`~torch.fft.fftfreq` follows NumPy's
|
1178 |
+
convention of taking it to be negative.
|
1179 |
+
|
1180 |
+
Args:
|
1181 |
+
n (int): the FFT length
|
1182 |
+
d (float, optional): The sampling length scale.
|
1183 |
+
The spacing between individual samples of the FFT input.
|
1184 |
+
The default assumes unit spacing, dividing that result by the actual
|
1185 |
+
spacing gives the result in physical frequency units.
|
1186 |
+
|
1187 |
+
Keyword Args:
|
1188 |
+
{out}
|
1189 |
+
{dtype}
|
1190 |
+
{layout}
|
1191 |
+
{device}
|
1192 |
+
{requires_grad}
|
1193 |
+
|
1194 |
+
Example:
|
1195 |
+
|
1196 |
+
>>> torch.fft.fftfreq(5)
|
1197 |
+
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
|
1198 |
+
|
1199 |
+
For even input, we can see the Nyquist frequency at ``f[2]`` is given as
|
1200 |
+
negative:
|
1201 |
+
|
1202 |
+
>>> torch.fft.fftfreq(4)
|
1203 |
+
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
|
1204 |
+
|
1205 |
+
""".format(**factory_common_args))
|
1206 |
+
|
1207 |
+
rfftfreq = _add_docstr(_fft.fft_rfftfreq, r"""
|
1208 |
+
rfftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
|
1209 |
+
|
1210 |
+
Computes the sample frequencies for :func:`~torch.fft.rfft` with a signal of size :attr:`n`.
|
1211 |
+
|
1212 |
+
Note:
|
1213 |
+
:func:`~torch.fft.rfft` returns Hermitian one-sided output, so only the
|
1214 |
+
positive frequency terms are returned. For a real FFT of length :attr:`n`
|
1215 |
+
and with inputs spaced in length unit :attr:`d`, the frequencies are::
|
1216 |
+
|
1217 |
+
f = torch.arange((n + 1) // 2) / (d * n)
|
1218 |
+
|
1219 |
+
Note:
|
1220 |
+
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
|
1221 |
+
either negative or positive. Unlike :func:`~torch.fft.fftfreq`,
|
1222 |
+
:func:`~torch.fft.rfftfreq` always returns it as positive.
|
1223 |
+
|
1224 |
+
Args:
|
1225 |
+
n (int): the real FFT length
|
1226 |
+
d (float, optional): The sampling length scale.
|
1227 |
+
The spacing between individual samples of the FFT input.
|
1228 |
+
The default assumes unit spacing, dividing that result by the actual
|
1229 |
+
spacing gives the result in physical frequency units.
|
1230 |
+
|
1231 |
+
Keyword Args:
|
1232 |
+
{out}
|
1233 |
+
{dtype}
|
1234 |
+
{layout}
|
1235 |
+
{device}
|
1236 |
+
{requires_grad}
|
1237 |
+
|
1238 |
+
Example:
|
1239 |
+
|
1240 |
+
>>> torch.fft.rfftfreq(5)
|
1241 |
+
tensor([0.0000, 0.2000, 0.4000])
|
1242 |
+
|
1243 |
+
>>> torch.fft.rfftfreq(4)
|
1244 |
+
tensor([0.0000, 0.2500, 0.5000])
|
1245 |
+
|
1246 |
+
Compared to the output from :func:`~torch.fft.fftfreq`, we see that the
|
1247 |
+
Nyquist frequency at ``f[2]`` has changed sign:
|
1248 |
+
>>> torch.fft.fftfreq(4)
|
1249 |
+
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
|
1250 |
+
|
1251 |
+
""".format(**factory_common_args))
|
1252 |
+
|
1253 |
+
fftshift = _add_docstr(_fft.fft_fftshift, r"""
|
1254 |
+
fftshift(input, dim=None) -> Tensor
|
1255 |
+
|
1256 |
+
Reorders n-dimensional FFT data, as provided by :func:`~torch.fft.fftn`, to have
|
1257 |
+
negative frequency terms first.
|
1258 |
+
|
1259 |
+
This performs a periodic shift of n-dimensional data such that the origin
|
1260 |
+
``(0, ..., 0)`` is moved to the center of the tensor. Specifically, to
|
1261 |
+
``input.shape[dim] // 2`` in each selected dimension.
|
1262 |
+
|
1263 |
+
Note:
|
1264 |
+
By convention, the FFT returns positive frequency terms first, followed by
|
1265 |
+
the negative frequencies in reverse order, so that ``f[-i]`` for all
|
1266 |
+
:math:`0 < i \leq n/2` in Python gives the negative frequency terms.
|
1267 |
+
:func:`~torch.fft.fftshift` rearranges all frequencies into ascending order
|
1268 |
+
from negative to positive with the zero-frequency term in the center.
|
1269 |
+
|
1270 |
+
Note:
|
1271 |
+
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
|
1272 |
+
either negative or positive. :func:`~torch.fft.fftshift` always puts the
|
1273 |
+
Nyquist term at the 0-index. This is the same convention used by
|
1274 |
+
:func:`~torch.fft.fftfreq`.
|
1275 |
+
|
1276 |
+
Args:
|
1277 |
+
input (Tensor): the tensor in FFT order
|
1278 |
+
dim (int, Tuple[int], optional): The dimensions to rearrange.
|
1279 |
+
Only dimensions specified here will be rearranged, any other dimensions
|
1280 |
+
will be left in their original order.
|
1281 |
+
Default: All dimensions of :attr:`input`.
|
1282 |
+
|
1283 |
+
Example:
|
1284 |
+
|
1285 |
+
>>> f = torch.fft.fftfreq(4)
|
1286 |
+
>>> f
|
1287 |
+
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
|
1288 |
+
|
1289 |
+
>>> torch.fft.fftshift(f)
|
1290 |
+
tensor([-0.5000, -0.2500, 0.0000, 0.2500])
|
1291 |
+
|
1292 |
+
Also notice that the Nyquist frequency term at ``f[2]`` was moved to the
|
1293 |
+
beginning of the tensor.
|
1294 |
+
|
1295 |
+
This also works for multi-dimensional transforms:
|
1296 |
+
|
1297 |
+
>>> x = torch.fft.fftfreq(5, d=1/5) + 0.1 * torch.fft.fftfreq(5, d=1/5).unsqueeze(1)
|
1298 |
+
>>> x
|
1299 |
+
tensor([[ 0.0000, 1.0000, 2.0000, -2.0000, -1.0000],
|
1300 |
+
[ 0.1000, 1.1000, 2.1000, -1.9000, -0.9000],
|
1301 |
+
[ 0.2000, 1.2000, 2.2000, -1.8000, -0.8000],
|
1302 |
+
[-0.2000, 0.8000, 1.8000, -2.2000, -1.2000],
|
1303 |
+
[-0.1000, 0.9000, 1.9000, -2.1000, -1.1000]])
|
1304 |
+
|
1305 |
+
>>> torch.fft.fftshift(x)
|
1306 |
+
tensor([[-2.2000, -1.2000, -0.2000, 0.8000, 1.8000],
|
1307 |
+
[-2.1000, -1.1000, -0.1000, 0.9000, 1.9000],
|
1308 |
+
[-2.0000, -1.0000, 0.0000, 1.0000, 2.0000],
|
1309 |
+
[-1.9000, -0.9000, 0.1000, 1.1000, 2.1000],
|
1310 |
+
[-1.8000, -0.8000, 0.2000, 1.2000, 2.2000]])
|
1311 |
+
|
1312 |
+
:func:`~torch.fft.fftshift` can also be useful for spatial data. If our
|
1313 |
+
data is defined on a centered grid (``[-(N//2), (N-1)//2]``) then we can
|
1314 |
+
use the standard FFT defined on an uncentered grid (``[0, N)``) by first
|
1315 |
+
applying an :func:`~torch.fft.ifftshift`.
|
1316 |
+
|
1317 |
+
>>> x_centered = torch.arange(-5, 5)
|
1318 |
+
>>> x_uncentered = torch.fft.ifftshift(x_centered)
|
1319 |
+
>>> fft_uncentered = torch.fft.fft(x_uncentered)
|
1320 |
+
|
1321 |
+
Similarly, we can convert the frequency domain components to centered
|
1322 |
+
convention by applying :func:`~torch.fft.fftshift`.
|
1323 |
+
|
1324 |
+
>>> fft_centered = torch.fft.fftshift(fft_uncentered)
|
1325 |
+
|
1326 |
+
The inverse transform, from centered Fourier space back to centered spatial
|
1327 |
+
data, can be performed by applying the inverse shifts in reverse order:
|
1328 |
+
|
1329 |
+
>>> x_centered_2 = torch.fft.fftshift(torch.fft.ifft(torch.fft.ifftshift(fft_centered)))
|
1330 |
+
>>> torch.testing.assert_close(x_centered.to(torch.complex64), x_centered_2, check_stride=False)
|
1331 |
+
|
1332 |
+
|
1333 |
+
""")
|
1334 |
+
|
1335 |
+
ifftshift = _add_docstr(_fft.fft_ifftshift, r"""
|
1336 |
+
ifftshift(input, dim=None) -> Tensor
|
1337 |
+
|
1338 |
+
Inverse of :func:`~torch.fft.fftshift`.
|
1339 |
+
|
1340 |
+
Args:
|
1341 |
+
input (Tensor): the tensor in FFT order
|
1342 |
+
dim (int, Tuple[int], optional): The dimensions to rearrange.
|
1343 |
+
Only dimensions specified here will be rearranged, any other dimensions
|
1344 |
+
will be left in their original order.
|
1345 |
+
Default: All dimensions of :attr:`input`.
|
1346 |
+
|
1347 |
+
Example:
|
1348 |
+
|
1349 |
+
>>> f = torch.fft.fftfreq(5)
|
1350 |
+
>>> f
|
1351 |
+
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
|
1352 |
+
|
1353 |
+
A round-trip through :func:`~torch.fft.fftshift` and
|
1354 |
+
:func:`~torch.fft.ifftshift` gives the same result:
|
1355 |
+
|
1356 |
+
>>> shifted = torch.fft.fftshift(f)
|
1357 |
+
>>> torch.fft.ifftshift(shifted)
|
1358 |
+
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
|
1359 |
+
|
1360 |
+
""")
|
venv/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (54.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/linalg/__init__.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (113 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/monitor/__init__.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch._C._monitor import * # noqa: F403
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
|
5 |
+
if TYPE_CHECKING:
|
6 |
+
from torch.utils.tensorboard import SummaryWriter
|
7 |
+
|
8 |
+
|
9 |
+
STAT_EVENT = "torch.monitor.Stat"
|
10 |
+
|
11 |
+
|
12 |
+
class TensorboardEventHandler:
|
13 |
+
"""
|
14 |
+
TensorboardEventHandler is an event handler that will write known events to
|
15 |
+
the provided SummaryWriter.
|
16 |
+
|
17 |
+
This currently only supports ``torch.monitor.Stat`` events which are logged
|
18 |
+
as scalars.
|
19 |
+
|
20 |
+
Example:
|
21 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_MONITOR)
|
22 |
+
>>> # xdoctest: +REQUIRES(module:tensorboard)
|
23 |
+
>>> from torch.utils.tensorboard import SummaryWriter
|
24 |
+
>>> from torch.monitor import TensorboardEventHandler, register_event_handler
|
25 |
+
>>> writer = SummaryWriter("log_dir")
|
26 |
+
>>> register_event_handler(TensorboardEventHandler(writer))
|
27 |
+
"""
|
28 |
+
def __init__(self, writer: "SummaryWriter") -> None:
|
29 |
+
"""
|
30 |
+
Constructs the ``TensorboardEventHandler``.
|
31 |
+
"""
|
32 |
+
self._writer = writer
|
33 |
+
|
34 |
+
def __call__(self, event: Event) -> None:
|
35 |
+
if event.name == STAT_EVENT:
|
36 |
+
for k, v in event.data.items():
|
37 |
+
self._writer.add_scalar(k, v, walltime=event.timestamp.timestamp())
|
venv/lib/python3.10/site-packages/torch/monitor/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/_deprecation.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Utility for deprecating functions."""
|
2 |
+
|
3 |
+
import functools
|
4 |
+
import textwrap
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
|
8 |
+
def deprecated(since: str, removed_in: str, instructions: str):
|
9 |
+
"""Marks functions as deprecated.
|
10 |
+
|
11 |
+
It will result in a warning when the function is called and a note in the
|
12 |
+
docstring.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
since: The version when the function was first deprecated.
|
16 |
+
removed_in: The version when the function will be removed.
|
17 |
+
instructions: The action users should take.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def decorator(function):
|
21 |
+
@functools.wraps(function)
|
22 |
+
def wrapper(*args, **kwargs):
|
23 |
+
warnings.warn(
|
24 |
+
f"'{function.__module__}.{function.__name__}' "
|
25 |
+
f"is deprecated in version {since} and will be "
|
26 |
+
f"removed in {removed_in}. Please {instructions}.",
|
27 |
+
category=FutureWarning,
|
28 |
+
stacklevel=2,
|
29 |
+
)
|
30 |
+
return function(*args, **kwargs)
|
31 |
+
|
32 |
+
# Add a deprecation note to the docstring.
|
33 |
+
docstring = function.__doc__ or ""
|
34 |
+
|
35 |
+
# Add a note to the docstring.
|
36 |
+
deprecation_note = textwrap.dedent(
|
37 |
+
f"""\
|
38 |
+
.. deprecated:: {since}
|
39 |
+
Deprecated and will be removed in version {removed_in}.
|
40 |
+
Please {instructions}.
|
41 |
+
"""
|
42 |
+
)
|
43 |
+
|
44 |
+
# Split docstring at first occurrence of newline
|
45 |
+
summary_and_body = docstring.split("\n\n", 1)
|
46 |
+
|
47 |
+
if len(summary_and_body) > 1:
|
48 |
+
summary, body = summary_and_body
|
49 |
+
|
50 |
+
# Dedent the body. We cannot do this with the presence of the summary because
|
51 |
+
# the body contains leading whitespaces when the summary does not.
|
52 |
+
body = textwrap.dedent(body)
|
53 |
+
|
54 |
+
new_docstring_parts = [deprecation_note, "\n\n", summary, body]
|
55 |
+
else:
|
56 |
+
summary = summary_and_body[0]
|
57 |
+
|
58 |
+
new_docstring_parts = [deprecation_note, "\n\n", summary]
|
59 |
+
|
60 |
+
wrapper.__doc__ = "".join(new_docstring_parts)
|
61 |
+
|
62 |
+
return wrapper
|
63 |
+
|
64 |
+
return decorator
|
venv/lib/python3.10/site-packages/torch/onnx/_onnx_supported_ops.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import inspect
|
2 |
+
from typing import Dict, List, Union
|
3 |
+
|
4 |
+
from torch import _C
|
5 |
+
from torch.onnx import _constants
|
6 |
+
from torch.onnx._internal import registration
|
7 |
+
|
8 |
+
|
9 |
+
class _TorchSchema:
|
10 |
+
def __init__(self, schema: Union[_C.FunctionSchema, str]) -> None:
|
11 |
+
if isinstance(schema, _C.FunctionSchema):
|
12 |
+
self.name: str = schema.name
|
13 |
+
self.overload_name: str = schema.overload_name
|
14 |
+
self.arguments: List[str] = [arg.name for arg in schema.arguments]
|
15 |
+
self.optional_arguments: List[str] = []
|
16 |
+
self.returns: List[str] = [ret.name for ret in schema.returns]
|
17 |
+
self.opsets: List[int] = []
|
18 |
+
else:
|
19 |
+
self.name = schema
|
20 |
+
self.overload_name = ""
|
21 |
+
self.arguments = []
|
22 |
+
self.optional_arguments = []
|
23 |
+
self.returns = []
|
24 |
+
self.opsets = []
|
25 |
+
|
26 |
+
def __str__(self) -> str:
|
27 |
+
s = (
|
28 |
+
f"{self.name}.{self.overload_name}("
|
29 |
+
+ ", ".join(self.arguments)
|
30 |
+
+ ") -> ("
|
31 |
+
+ ", ".join(self.returns)
|
32 |
+
+ ")"
|
33 |
+
+ " in opsets "
|
34 |
+
+ ", ".join(str(opset) for opset in self.opsets)
|
35 |
+
)
|
36 |
+
return s
|
37 |
+
|
38 |
+
def __hash__(self):
|
39 |
+
# TODO(thiagocrepaldi): handle overload_name?
|
40 |
+
return hash(self.name)
|
41 |
+
|
42 |
+
def __eq__(self, other) -> bool:
|
43 |
+
if not isinstance(other, _TorchSchema):
|
44 |
+
return False
|
45 |
+
# TODO(thiagocrepaldi): handle overload_name?
|
46 |
+
return self.name == other.name
|
47 |
+
|
48 |
+
def is_aten(self) -> bool:
|
49 |
+
return self.name.startswith("aten::")
|
50 |
+
|
51 |
+
def is_backward(self) -> bool:
|
52 |
+
return "backward" in self.name
|
53 |
+
|
54 |
+
|
55 |
+
def _symbolic_argument_count(func):
|
56 |
+
params = []
|
57 |
+
signature = inspect.signature(func)
|
58 |
+
optional_params = []
|
59 |
+
for name, parameter in signature.parameters.items():
|
60 |
+
if name in {"_outputs", "g"}:
|
61 |
+
continue
|
62 |
+
if parameter.default is parameter.empty:
|
63 |
+
optional_params.append(parameter)
|
64 |
+
else:
|
65 |
+
params.append(str(parameter))
|
66 |
+
return params
|
67 |
+
|
68 |
+
|
69 |
+
def all_forward_schemas() -> Dict[str, _TorchSchema]:
|
70 |
+
"""Returns schemas for all TorchScript forward ops."""
|
71 |
+
torch_schemas = [_TorchSchema(s) for s in _C._jit_get_all_schemas()]
|
72 |
+
return {schema.name: schema for schema in torch_schemas if not schema.is_backward()}
|
73 |
+
|
74 |
+
|
75 |
+
def all_symbolics_schemas() -> Dict[str, _TorchSchema]:
|
76 |
+
"""Returns schemas for all onnx supported ops."""
|
77 |
+
symbolics_schemas = {}
|
78 |
+
|
79 |
+
for name in registration.registry.all_functions():
|
80 |
+
func_group = registration.registry.get_function_group(name)
|
81 |
+
assert func_group is not None
|
82 |
+
symbolics_schema = _TorchSchema(name)
|
83 |
+
func = func_group.get(_constants.ONNX_MAX_OPSET)
|
84 |
+
if func is not None:
|
85 |
+
symbolics_schema.arguments = _symbolic_argument_count(func)
|
86 |
+
symbolics_schema.opsets = list(
|
87 |
+
range(func_group.get_min_supported(), _constants.ONNX_MAX_OPSET + 1)
|
88 |
+
)
|
89 |
+
else:
|
90 |
+
# Only support opset < 9
|
91 |
+
func = func_group.get(7)
|
92 |
+
symbolics_schema.arguments = _symbolic_argument_count(func)
|
93 |
+
symbolics_schema.opsets = list(range(7, _constants.ONNX_BASE_OPSET))
|
94 |
+
|
95 |
+
symbolics_schemas[name] = symbolics_schema
|
96 |
+
|
97 |
+
return symbolics_schemas
|
venv/lib/python3.10/site-packages/torch/onnx/_type_utils.py
ADDED
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Utilities for converting and operating on ONNX, JIT and torch types."""
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
import enum
|
5 |
+
import typing
|
6 |
+
from typing import Dict, Literal, Optional, Union
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch._C import _onnx as _C_onnx
|
10 |
+
from torch.onnx import errors
|
11 |
+
from torch.onnx._internal import _beartype
|
12 |
+
|
13 |
+
if typing.TYPE_CHECKING:
|
14 |
+
# Hack to help mypy to recognize torch._C.Value
|
15 |
+
from torch import _C # noqa: F401
|
16 |
+
|
17 |
+
ScalarName = Literal[
|
18 |
+
"Byte",
|
19 |
+
"Char",
|
20 |
+
"Double",
|
21 |
+
"Float",
|
22 |
+
"Half",
|
23 |
+
"Int",
|
24 |
+
"Long",
|
25 |
+
"Short",
|
26 |
+
"Bool",
|
27 |
+
"ComplexHalf",
|
28 |
+
"ComplexFloat",
|
29 |
+
"ComplexDouble",
|
30 |
+
"QInt8",
|
31 |
+
"QUInt8",
|
32 |
+
"QInt32",
|
33 |
+
"BFloat16",
|
34 |
+
"Float8E5M2",
|
35 |
+
"Float8E4M3FN",
|
36 |
+
"Float8E5M2FNUZ",
|
37 |
+
"Float8E4M3FNUZ",
|
38 |
+
"Undefined",
|
39 |
+
]
|
40 |
+
|
41 |
+
TorchName = Literal[
|
42 |
+
"bool",
|
43 |
+
"uint8_t",
|
44 |
+
"int8_t",
|
45 |
+
"double",
|
46 |
+
"float",
|
47 |
+
"half",
|
48 |
+
"int",
|
49 |
+
"int64_t",
|
50 |
+
"int16_t",
|
51 |
+
"complex32",
|
52 |
+
"complex64",
|
53 |
+
"complex128",
|
54 |
+
"qint8",
|
55 |
+
"quint8",
|
56 |
+
"qint32",
|
57 |
+
"bfloat16",
|
58 |
+
"float8_e5m2",
|
59 |
+
"float8_e4m3fn",
|
60 |
+
"float8_e5m2fnuz",
|
61 |
+
"float8_e4m3fnuz",
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
class JitScalarType(enum.IntEnum):
|
66 |
+
"""Scalar types defined in torch.
|
67 |
+
|
68 |
+
Use ``JitScalarType`` to convert from torch and JIT scalar types to ONNX scalar types.
|
69 |
+
|
70 |
+
Examples:
|
71 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX)
|
72 |
+
>>> # xdoctest: +IGNORE_WANT("win32 has different output")
|
73 |
+
>>> JitScalarType.from_value(torch.ones(1, 2)).onnx_type()
|
74 |
+
TensorProtoDataType.FLOAT
|
75 |
+
|
76 |
+
>>> JitScalarType.from_value(torch_c_value_with_type_float).onnx_type()
|
77 |
+
TensorProtoDataType.FLOAT
|
78 |
+
|
79 |
+
>>> JitScalarType.from_dtype(torch.get_default_dtype).onnx_type()
|
80 |
+
TensorProtoDataType.FLOAT
|
81 |
+
|
82 |
+
"""
|
83 |
+
|
84 |
+
# Order defined in https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
|
85 |
+
UINT8 = 0
|
86 |
+
INT8 = enum.auto() # 1
|
87 |
+
INT16 = enum.auto() # 2
|
88 |
+
INT = enum.auto() # 3
|
89 |
+
INT64 = enum.auto() # 4
|
90 |
+
HALF = enum.auto() # 5
|
91 |
+
FLOAT = enum.auto() # 6
|
92 |
+
DOUBLE = enum.auto() # 7
|
93 |
+
COMPLEX32 = enum.auto() # 8
|
94 |
+
COMPLEX64 = enum.auto() # 9
|
95 |
+
COMPLEX128 = enum.auto() # 10
|
96 |
+
BOOL = enum.auto() # 11
|
97 |
+
QINT8 = enum.auto() # 12
|
98 |
+
QUINT8 = enum.auto() # 13
|
99 |
+
QINT32 = enum.auto() # 14
|
100 |
+
BFLOAT16 = enum.auto() # 15
|
101 |
+
FLOAT8E5M2 = enum.auto() # 16
|
102 |
+
FLOAT8E4M3FN = enum.auto() # 17
|
103 |
+
FLOAT8E5M2FNUZ = enum.auto() # 18
|
104 |
+
FLOAT8E4M3FNUZ = enum.auto() # 19
|
105 |
+
UNDEFINED = enum.auto() # 20
|
106 |
+
|
107 |
+
@classmethod
|
108 |
+
@_beartype.beartype
|
109 |
+
def _from_name(
|
110 |
+
cls, name: Union[ScalarName, TorchName, Optional[str]]
|
111 |
+
) -> JitScalarType:
|
112 |
+
"""Convert a JIT scalar type or torch type name to ScalarType.
|
113 |
+
|
114 |
+
Note: DO NOT USE this API when `name` comes from a `torch._C.Value.type()` calls.
|
115 |
+
A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can
|
116 |
+
be raised in several scenarios where shape info is not present.
|
117 |
+
Instead use `from_value` API which is safer.
|
118 |
+
|
119 |
+
Args:
|
120 |
+
name: JIT scalar type name (Byte) or torch type name (uint8_t).
|
121 |
+
|
122 |
+
Returns:
|
123 |
+
JitScalarType
|
124 |
+
|
125 |
+
Raises:
|
126 |
+
OnnxExporterError: if name is not a valid scalar type name or if it is None.
|
127 |
+
"""
|
128 |
+
if name is None:
|
129 |
+
raise errors.OnnxExporterError("Scalar type name cannot be None")
|
130 |
+
if valid_scalar_name(name):
|
131 |
+
return _SCALAR_NAME_TO_TYPE[name] # type: ignore[index]
|
132 |
+
if valid_torch_name(name):
|
133 |
+
return _TORCH_NAME_TO_SCALAR_TYPE[name] # type: ignore[index]
|
134 |
+
|
135 |
+
raise errors.OnnxExporterError(f"Unknown torch or scalar type: '{name}'")
|
136 |
+
|
137 |
+
@classmethod
|
138 |
+
@_beartype.beartype
|
139 |
+
def from_dtype(cls, dtype: Optional[torch.dtype]) -> JitScalarType:
|
140 |
+
"""Convert a torch dtype to JitScalarType.
|
141 |
+
|
142 |
+
Note: DO NOT USE this API when `dtype` comes from a `torch._C.Value.type()` calls.
|
143 |
+
A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can
|
144 |
+
be raised in several scenarios where shape info is not present.
|
145 |
+
Instead use `from_value` API which is safer.
|
146 |
+
|
147 |
+
Args:
|
148 |
+
dtype: A torch.dtype to create a JitScalarType from
|
149 |
+
|
150 |
+
Returns:
|
151 |
+
JitScalarType
|
152 |
+
|
153 |
+
Raises:
|
154 |
+
OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.
|
155 |
+
"""
|
156 |
+
if dtype not in _DTYPE_TO_SCALAR_TYPE:
|
157 |
+
raise errors.OnnxExporterError(f"Unknown dtype: {dtype}")
|
158 |
+
return _DTYPE_TO_SCALAR_TYPE[dtype]
|
159 |
+
|
160 |
+
@classmethod
|
161 |
+
@_beartype.beartype
|
162 |
+
def from_value(
|
163 |
+
cls, value: Union[None, torch._C.Value, torch.Tensor], default=None
|
164 |
+
) -> JitScalarType:
|
165 |
+
"""Create a JitScalarType from an value's scalar type.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
value: An object to fetch scalar type from.
|
169 |
+
default: The JitScalarType to return if a valid scalar cannot be fetched from value
|
170 |
+
|
171 |
+
Returns:
|
172 |
+
JitScalarType.
|
173 |
+
|
174 |
+
Raises:
|
175 |
+
OnnxExporterError: if value does not have a valid scalar type and default is None.
|
176 |
+
SymbolicValueError: when value.type()'s info are empty and default is None
|
177 |
+
"""
|
178 |
+
|
179 |
+
if not isinstance(value, (torch._C.Value, torch.Tensor)) or (
|
180 |
+
isinstance(value, torch._C.Value) and value.node().mustBeNone()
|
181 |
+
):
|
182 |
+
# default value of type JitScalarType is returned when value is not valid
|
183 |
+
if default is None:
|
184 |
+
raise errors.OnnxExporterError(
|
185 |
+
"value must be either torch._C.Value or torch.Tensor objects."
|
186 |
+
)
|
187 |
+
elif not isinstance(default, JitScalarType):
|
188 |
+
raise errors.OnnxExporterError(
|
189 |
+
"default value must be a JitScalarType object."
|
190 |
+
)
|
191 |
+
return default
|
192 |
+
|
193 |
+
# Each value type has their own way of storing scalar type
|
194 |
+
if isinstance(value, torch.Tensor):
|
195 |
+
return cls.from_dtype(value.dtype)
|
196 |
+
if isinstance(value.type(), torch.ListType):
|
197 |
+
try:
|
198 |
+
return cls.from_dtype(value.type().getElementType().dtype())
|
199 |
+
except RuntimeError:
|
200 |
+
return cls._from_name(str(value.type().getElementType()))
|
201 |
+
if isinstance(value.type(), torch._C.OptionalType):
|
202 |
+
if value.type().getElementType().dtype() is None:
|
203 |
+
if isinstance(default, JitScalarType):
|
204 |
+
return default
|
205 |
+
raise errors.OnnxExporterError(
|
206 |
+
"default value must be a JitScalarType object."
|
207 |
+
)
|
208 |
+
return cls.from_dtype(value.type().getElementType().dtype())
|
209 |
+
|
210 |
+
scalar_type = None
|
211 |
+
if value.node().kind() != "prim::Constant" or not isinstance(
|
212 |
+
value.type(), torch._C.NoneType
|
213 |
+
):
|
214 |
+
# value must be a non-list torch._C.Value scalar
|
215 |
+
scalar_type = value.type().scalarType()
|
216 |
+
|
217 |
+
if scalar_type is not None:
|
218 |
+
return cls._from_name(scalar_type)
|
219 |
+
|
220 |
+
# When everything fails... try to default
|
221 |
+
if default is not None:
|
222 |
+
return default
|
223 |
+
raise errors.SymbolicValueError(
|
224 |
+
f"Cannot determine scalar type for this '{type(value.type())}' instance and "
|
225 |
+
"a default value was not provided.",
|
226 |
+
value,
|
227 |
+
)
|
228 |
+
|
229 |
+
@_beartype.beartype
|
230 |
+
def scalar_name(self) -> ScalarName:
|
231 |
+
"""Convert a JitScalarType to a JIT scalar type name."""
|
232 |
+
return _SCALAR_TYPE_TO_NAME[self]
|
233 |
+
|
234 |
+
@_beartype.beartype
|
235 |
+
def torch_name(self) -> TorchName:
|
236 |
+
"""Convert a JitScalarType to a torch type name."""
|
237 |
+
return _SCALAR_TYPE_TO_TORCH_NAME[self]
|
238 |
+
|
239 |
+
@_beartype.beartype
|
240 |
+
def dtype(self) -> torch.dtype:
|
241 |
+
"""Convert a JitScalarType to a torch dtype."""
|
242 |
+
return _SCALAR_TYPE_TO_DTYPE[self]
|
243 |
+
|
244 |
+
@_beartype.beartype
|
245 |
+
def onnx_type(self) -> _C_onnx.TensorProtoDataType:
|
246 |
+
"""Convert a JitScalarType to an ONNX data type."""
|
247 |
+
if self not in _SCALAR_TYPE_TO_ONNX:
|
248 |
+
raise errors.OnnxExporterError(
|
249 |
+
f"Scalar type {self} cannot be converted to ONNX"
|
250 |
+
)
|
251 |
+
return _SCALAR_TYPE_TO_ONNX[self]
|
252 |
+
|
253 |
+
@_beartype.beartype
|
254 |
+
def onnx_compatible(self) -> bool:
|
255 |
+
"""Return whether this JitScalarType is compatible with ONNX."""
|
256 |
+
return (
|
257 |
+
self in _SCALAR_TYPE_TO_ONNX
|
258 |
+
and self != JitScalarType.UNDEFINED
|
259 |
+
and self != JitScalarType.COMPLEX32
|
260 |
+
)
|
261 |
+
|
262 |
+
|
263 |
+
@_beartype.beartype
|
264 |
+
def valid_scalar_name(scalar_name: Union[ScalarName, str]) -> bool:
|
265 |
+
"""Return whether the given scalar name is a valid JIT scalar type name."""
|
266 |
+
return scalar_name in _SCALAR_NAME_TO_TYPE
|
267 |
+
|
268 |
+
|
269 |
+
@_beartype.beartype
|
270 |
+
def valid_torch_name(torch_name: Union[TorchName, str]) -> bool:
|
271 |
+
"""Return whether the given torch name is a valid torch type name."""
|
272 |
+
return torch_name in _TORCH_NAME_TO_SCALAR_TYPE
|
273 |
+
|
274 |
+
|
275 |
+
# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
|
276 |
+
_SCALAR_TYPE_TO_NAME: Dict[JitScalarType, ScalarName] = {
|
277 |
+
JitScalarType.BOOL: "Bool",
|
278 |
+
JitScalarType.UINT8: "Byte",
|
279 |
+
JitScalarType.INT8: "Char",
|
280 |
+
JitScalarType.INT16: "Short",
|
281 |
+
JitScalarType.INT: "Int",
|
282 |
+
JitScalarType.INT64: "Long",
|
283 |
+
JitScalarType.HALF: "Half",
|
284 |
+
JitScalarType.FLOAT: "Float",
|
285 |
+
JitScalarType.DOUBLE: "Double",
|
286 |
+
JitScalarType.COMPLEX32: "ComplexHalf",
|
287 |
+
JitScalarType.COMPLEX64: "ComplexFloat",
|
288 |
+
JitScalarType.COMPLEX128: "ComplexDouble",
|
289 |
+
JitScalarType.QINT8: "QInt8",
|
290 |
+
JitScalarType.QUINT8: "QUInt8",
|
291 |
+
JitScalarType.QINT32: "QInt32",
|
292 |
+
JitScalarType.BFLOAT16: "BFloat16",
|
293 |
+
JitScalarType.FLOAT8E5M2: "Float8E5M2",
|
294 |
+
JitScalarType.FLOAT8E4M3FN: "Float8E4M3FN",
|
295 |
+
JitScalarType.FLOAT8E5M2FNUZ: "Float8E5M2FNUZ",
|
296 |
+
JitScalarType.FLOAT8E4M3FNUZ: "Float8E4M3FNUZ",
|
297 |
+
JitScalarType.UNDEFINED: "Undefined",
|
298 |
+
}
|
299 |
+
|
300 |
+
_SCALAR_NAME_TO_TYPE: Dict[ScalarName, JitScalarType] = {
|
301 |
+
v: k for k, v in _SCALAR_TYPE_TO_NAME.items()
|
302 |
+
}
|
303 |
+
|
304 |
+
_SCALAR_TYPE_TO_TORCH_NAME: Dict[JitScalarType, TorchName] = {
|
305 |
+
JitScalarType.BOOL: "bool",
|
306 |
+
JitScalarType.UINT8: "uint8_t",
|
307 |
+
JitScalarType.INT8: "int8_t",
|
308 |
+
JitScalarType.INT16: "int16_t",
|
309 |
+
JitScalarType.INT: "int",
|
310 |
+
JitScalarType.INT64: "int64_t",
|
311 |
+
JitScalarType.HALF: "half",
|
312 |
+
JitScalarType.FLOAT: "float",
|
313 |
+
JitScalarType.DOUBLE: "double",
|
314 |
+
JitScalarType.COMPLEX32: "complex32",
|
315 |
+
JitScalarType.COMPLEX64: "complex64",
|
316 |
+
JitScalarType.COMPLEX128: "complex128",
|
317 |
+
JitScalarType.QINT8: "qint8",
|
318 |
+
JitScalarType.QUINT8: "quint8",
|
319 |
+
JitScalarType.QINT32: "qint32",
|
320 |
+
JitScalarType.BFLOAT16: "bfloat16",
|
321 |
+
JitScalarType.FLOAT8E5M2: "float8_e5m2",
|
322 |
+
JitScalarType.FLOAT8E4M3FN: "float8_e4m3fn",
|
323 |
+
JitScalarType.FLOAT8E5M2FNUZ: "float8_e5m2fnuz",
|
324 |
+
JitScalarType.FLOAT8E4M3FNUZ: "float8_e4m3fnuz",
|
325 |
+
}
|
326 |
+
|
327 |
+
_TORCH_NAME_TO_SCALAR_TYPE: Dict[TorchName, JitScalarType] = {
|
328 |
+
v: k for k, v in _SCALAR_TYPE_TO_TORCH_NAME.items()
|
329 |
+
}
|
330 |
+
|
331 |
+
_SCALAR_TYPE_TO_ONNX = {
|
332 |
+
JitScalarType.BOOL: _C_onnx.TensorProtoDataType.BOOL,
|
333 |
+
JitScalarType.UINT8: _C_onnx.TensorProtoDataType.UINT8,
|
334 |
+
JitScalarType.INT8: _C_onnx.TensorProtoDataType.INT8,
|
335 |
+
JitScalarType.INT16: _C_onnx.TensorProtoDataType.INT16,
|
336 |
+
JitScalarType.INT: _C_onnx.TensorProtoDataType.INT32,
|
337 |
+
JitScalarType.INT64: _C_onnx.TensorProtoDataType.INT64,
|
338 |
+
JitScalarType.HALF: _C_onnx.TensorProtoDataType.FLOAT16,
|
339 |
+
JitScalarType.FLOAT: _C_onnx.TensorProtoDataType.FLOAT,
|
340 |
+
JitScalarType.DOUBLE: _C_onnx.TensorProtoDataType.DOUBLE,
|
341 |
+
JitScalarType.COMPLEX64: _C_onnx.TensorProtoDataType.COMPLEX64,
|
342 |
+
JitScalarType.COMPLEX128: _C_onnx.TensorProtoDataType.COMPLEX128,
|
343 |
+
JitScalarType.BFLOAT16: _C_onnx.TensorProtoDataType.BFLOAT16,
|
344 |
+
JitScalarType.UNDEFINED: _C_onnx.TensorProtoDataType.UNDEFINED,
|
345 |
+
JitScalarType.COMPLEX32: _C_onnx.TensorProtoDataType.UNDEFINED,
|
346 |
+
JitScalarType.QINT8: _C_onnx.TensorProtoDataType.INT8,
|
347 |
+
JitScalarType.QUINT8: _C_onnx.TensorProtoDataType.UINT8,
|
348 |
+
JitScalarType.QINT32: _C_onnx.TensorProtoDataType.INT32,
|
349 |
+
JitScalarType.FLOAT8E5M2: _C_onnx.TensorProtoDataType.FLOAT8E5M2,
|
350 |
+
JitScalarType.FLOAT8E4M3FN: _C_onnx.TensorProtoDataType.FLOAT8E4M3FN,
|
351 |
+
JitScalarType.FLOAT8E5M2FNUZ: _C_onnx.TensorProtoDataType.FLOAT8E5M2FNUZ,
|
352 |
+
JitScalarType.FLOAT8E4M3FNUZ: _C_onnx.TensorProtoDataType.FLOAT8E4M3FNUZ,
|
353 |
+
}
|
354 |
+
|
355 |
+
# source of truth is
|
356 |
+
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp
|
357 |
+
_SCALAR_TYPE_TO_DTYPE = {
|
358 |
+
JitScalarType.BOOL: torch.bool,
|
359 |
+
JitScalarType.UINT8: torch.uint8,
|
360 |
+
JitScalarType.INT8: torch.int8,
|
361 |
+
JitScalarType.INT16: torch.short,
|
362 |
+
JitScalarType.INT: torch.int,
|
363 |
+
JitScalarType.INT64: torch.int64,
|
364 |
+
JitScalarType.HALF: torch.half,
|
365 |
+
JitScalarType.FLOAT: torch.float,
|
366 |
+
JitScalarType.DOUBLE: torch.double,
|
367 |
+
JitScalarType.COMPLEX32: torch.complex32,
|
368 |
+
JitScalarType.COMPLEX64: torch.complex64,
|
369 |
+
JitScalarType.COMPLEX128: torch.complex128,
|
370 |
+
JitScalarType.QINT8: torch.qint8,
|
371 |
+
JitScalarType.QUINT8: torch.quint8,
|
372 |
+
JitScalarType.QINT32: torch.qint32,
|
373 |
+
JitScalarType.BFLOAT16: torch.bfloat16,
|
374 |
+
JitScalarType.FLOAT8E5M2: torch.float8_e5m2,
|
375 |
+
JitScalarType.FLOAT8E4M3FN: torch.float8_e4m3fn,
|
376 |
+
JitScalarType.FLOAT8E5M2FNUZ: torch.float8_e5m2fnuz,
|
377 |
+
JitScalarType.FLOAT8E4M3FNUZ: torch.float8_e4m3fnuz,
|
378 |
+
}
|
379 |
+
|
380 |
+
_DTYPE_TO_SCALAR_TYPE = {v: k for k, v in _SCALAR_TYPE_TO_DTYPE.items()}
|
venv/lib/python3.10/site-packages/torch/onnx/errors.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""ONNX exporter exceptions."""
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
import textwrap
|
5 |
+
from typing import Optional
|
6 |
+
|
7 |
+
from torch import _C
|
8 |
+
from torch.onnx import _constants
|
9 |
+
from torch.onnx._internal import diagnostics
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"OnnxExporterError",
|
13 |
+
"OnnxExporterWarning",
|
14 |
+
"CheckerError",
|
15 |
+
"SymbolicValueError",
|
16 |
+
"UnsupportedOperatorError",
|
17 |
+
]
|
18 |
+
|
19 |
+
|
20 |
+
class OnnxExporterWarning(UserWarning):
|
21 |
+
"""Base class for all warnings in the ONNX exporter."""
|
22 |
+
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
class OnnxExporterError(RuntimeError):
|
27 |
+
"""Errors raised by the ONNX exporter."""
|
28 |
+
|
29 |
+
pass
|
30 |
+
|
31 |
+
|
32 |
+
class CheckerError(OnnxExporterError):
|
33 |
+
"""Raised when ONNX checker detects an invalid model."""
|
34 |
+
|
35 |
+
pass
|
36 |
+
|
37 |
+
|
38 |
+
class UnsupportedOperatorError(OnnxExporterError):
|
39 |
+
"""Raised when an operator is unsupported by the exporter."""
|
40 |
+
|
41 |
+
def __init__(self, name: str, version: int, supported_version: Optional[int]):
|
42 |
+
if supported_version is not None:
|
43 |
+
diagnostic_rule: diagnostics.infra.Rule = (
|
44 |
+
diagnostics.rules.operator_supported_in_newer_opset_version
|
45 |
+
)
|
46 |
+
msg = diagnostic_rule.format_message(name, version, supported_version)
|
47 |
+
diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg)
|
48 |
+
else:
|
49 |
+
if name.startswith(("aten::", "prim::", "quantized::")):
|
50 |
+
diagnostic_rule = diagnostics.rules.missing_standard_symbolic_function
|
51 |
+
msg = diagnostic_rule.format_message(
|
52 |
+
name, version, _constants.PYTORCH_GITHUB_ISSUES_URL
|
53 |
+
)
|
54 |
+
diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg)
|
55 |
+
else:
|
56 |
+
diagnostic_rule = diagnostics.rules.missing_custom_symbolic_function
|
57 |
+
msg = diagnostic_rule.format_message(name)
|
58 |
+
diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg)
|
59 |
+
super().__init__(msg)
|
60 |
+
|
61 |
+
|
62 |
+
class SymbolicValueError(OnnxExporterError):
|
63 |
+
"""Errors around TorchScript values and nodes."""
|
64 |
+
|
65 |
+
def __init__(self, msg: str, value: _C.Value):
|
66 |
+
message = (
|
67 |
+
f"{msg} [Caused by the value '{value}' (type '{value.type()}') in the "
|
68 |
+
f"TorchScript graph. The containing node has kind '{value.node().kind()}'.] "
|
69 |
+
)
|
70 |
+
|
71 |
+
code_location = value.node().sourceRange()
|
72 |
+
if code_location:
|
73 |
+
message += f"\n (node defined in {code_location})"
|
74 |
+
|
75 |
+
try:
|
76 |
+
# Add its input and output to the message.
|
77 |
+
message += "\n\n"
|
78 |
+
message += textwrap.indent(
|
79 |
+
(
|
80 |
+
"Inputs:\n"
|
81 |
+
+ (
|
82 |
+
"\n".join(
|
83 |
+
f" #{i}: {input_} (type '{input_.type()}')"
|
84 |
+
for i, input_ in enumerate(value.node().inputs())
|
85 |
+
)
|
86 |
+
or " Empty"
|
87 |
+
)
|
88 |
+
+ "\n"
|
89 |
+
+ "Outputs:\n"
|
90 |
+
+ (
|
91 |
+
"\n".join(
|
92 |
+
f" #{i}: {output} (type '{output.type()}')"
|
93 |
+
for i, output in enumerate(value.node().outputs())
|
94 |
+
)
|
95 |
+
or " Empty"
|
96 |
+
)
|
97 |
+
),
|
98 |
+
" ",
|
99 |
+
)
|
100 |
+
except AttributeError:
|
101 |
+
message += (
|
102 |
+
" Failed to obtain its input and output for debugging. "
|
103 |
+
"Please refer to the TorchScript graph for debugging information."
|
104 |
+
)
|
105 |
+
|
106 |
+
super().__init__(message)
|
venv/lib/python3.10/site-packages/torch/onnx/operators.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""This file provides a location for operators that help exporting models via onnx.
|
2 |
+
|
3 |
+
E.g. `shape_as_tensor` and `reshape_from_tensor_shape`
|
4 |
+
are to make all dynamic sizes operations traceable.
|
5 |
+
|
6 |
+
NOTE: at one point these functions were implemented differently.
|
7 |
+
Since then we have implemented these directly in ATen, so this
|
8 |
+
file is kept purely for backward-compatibility.
|
9 |
+
"""
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.onnx
|
13 |
+
|
14 |
+
|
15 |
+
def shape_as_tensor(x):
|
16 |
+
return torch._shape_as_tensor(x)
|
17 |
+
|
18 |
+
|
19 |
+
def reshape_from_tensor_shape(x, shape):
|
20 |
+
return torch._reshape_from_tensor(x, shape)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_caffe2.py
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import inspect
|
3 |
+
|
4 |
+
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9
|
5 |
+
from torch.onnx._internal import jit_utils, registration
|
6 |
+
|
7 |
+
|
8 |
+
def register_quantized_ops(domain: str, version: int):
|
9 |
+
# Register all quantized ops
|
10 |
+
module = importlib.import_module("torch.onnx.symbolic_caffe2")
|
11 |
+
quant_version_ops = inspect.getmembers(module)
|
12 |
+
aten_q_ops = {
|
13 |
+
"relu",
|
14 |
+
"_empty_affine_quantized",
|
15 |
+
"dequantize",
|
16 |
+
"quantize_per_tensor",
|
17 |
+
"upsample_nearest2d",
|
18 |
+
"avg_pool2d",
|
19 |
+
"reshape",
|
20 |
+
"slice",
|
21 |
+
"cat",
|
22 |
+
"max_pool2d",
|
23 |
+
"sigmoid",
|
24 |
+
}
|
25 |
+
for op, func in quant_version_ops:
|
26 |
+
name = f"{domain}::{op}"
|
27 |
+
if inspect.isfunction(func) and not registration.registry.is_registered_op(
|
28 |
+
name, version
|
29 |
+
):
|
30 |
+
if op in aten_q_ops:
|
31 |
+
# Override the builtin aten ops
|
32 |
+
registration.registry.register(
|
33 |
+
f"aten::{op}", version, func, custom=True
|
34 |
+
)
|
35 |
+
registration.registry.register(name, version, func)
|
36 |
+
|
37 |
+
|
38 |
+
def _permute_helper(g: jit_utils.GraphContext, input, axes):
|
39 |
+
quant_args = {
|
40 |
+
"axes_i": axes,
|
41 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
42 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
43 |
+
}
|
44 |
+
output = g.op("_caffe2::Int8Transpose", input, **quant_args)
|
45 |
+
symbolic_helper._quantized_ops.add(output)
|
46 |
+
return output
|
47 |
+
|
48 |
+
|
49 |
+
def nchw2nhwc(g: jit_utils.GraphContext, input):
|
50 |
+
axes = [0, 2, 3, 1]
|
51 |
+
return _permute_helper(g, input, axes)
|
52 |
+
|
53 |
+
|
54 |
+
def nhwc2nchw(g: jit_utils.GraphContext, input):
|
55 |
+
axes = [0, 3, 1, 2]
|
56 |
+
return _permute_helper(g, input, axes)
|
57 |
+
|
58 |
+
|
59 |
+
def linear_prepack(g: jit_utils.GraphContext, weight, bias):
|
60 |
+
# Mapping to a dummy caffe2 prepack node.
|
61 |
+
# During the onnx -> c2 conversion we can look up original weight and bias
|
62 |
+
# from this node
|
63 |
+
output = g.op("_caffe2::WeightPrepack", weight, bias)
|
64 |
+
symbolic_helper._quantized_ops.add(output)
|
65 |
+
return output
|
66 |
+
|
67 |
+
|
68 |
+
@symbolic_helper.parse_args("v", "v", "v", "f", "i")
|
69 |
+
def linear(g: jit_utils.GraphContext, input, weight, bias, scale, zero_point):
|
70 |
+
kwargs = {
|
71 |
+
"Y_scale_f": scale,
|
72 |
+
"Y_zero_point_i": zero_point,
|
73 |
+
}
|
74 |
+
output = g.op("_caffe2::Int8FC", input, weight, bias, **kwargs)
|
75 |
+
symbolic_helper._quantized_ops.add(output)
|
76 |
+
return output
|
77 |
+
|
78 |
+
|
79 |
+
def conv_prepack(
|
80 |
+
g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups
|
81 |
+
):
|
82 |
+
# Mapping to a dummy caffe2 prepack node.
|
83 |
+
# During the onnx -> c2 conversion we can look up original weight and bias
|
84 |
+
# from this node
|
85 |
+
output = g.op("_caffe2::WeightPrepack", input, weight, bias)
|
86 |
+
symbolic_helper._quantized_ops.add(output)
|
87 |
+
return output
|
88 |
+
|
89 |
+
|
90 |
+
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i")
|
91 |
+
def conv2d(
|
92 |
+
g: jit_utils.GraphContext,
|
93 |
+
input,
|
94 |
+
weight,
|
95 |
+
bias,
|
96 |
+
stride,
|
97 |
+
padding,
|
98 |
+
dilation,
|
99 |
+
groups,
|
100 |
+
scale,
|
101 |
+
zero_point,
|
102 |
+
):
|
103 |
+
kernel_size = weight.node()["shape"][1:3]
|
104 |
+
kwargs = {
|
105 |
+
"strides_i": stride,
|
106 |
+
"pads_i": padding + padding,
|
107 |
+
"dilations_i": dilation,
|
108 |
+
"group_i": groups,
|
109 |
+
"kernels_i": kernel_size,
|
110 |
+
"order_s": "NHWC",
|
111 |
+
"Y_scale_f": scale,
|
112 |
+
"Y_zero_point_i": zero_point,
|
113 |
+
}
|
114 |
+
output = g.op("_caffe2::Int8Conv", input, weight, bias, **kwargs)
|
115 |
+
symbolic_helper._quantized_ops.add(output)
|
116 |
+
return output
|
117 |
+
|
118 |
+
|
119 |
+
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i")
|
120 |
+
def conv2d_relu(
|
121 |
+
g: jit_utils.GraphContext,
|
122 |
+
input,
|
123 |
+
weight,
|
124 |
+
bias,
|
125 |
+
stride,
|
126 |
+
padding,
|
127 |
+
dilation,
|
128 |
+
groups,
|
129 |
+
scale,
|
130 |
+
zero_point,
|
131 |
+
):
|
132 |
+
kernel_size = weight.node()["shape"][1:3]
|
133 |
+
kwargs = {
|
134 |
+
"strides_i": stride,
|
135 |
+
"pads_i": padding + padding,
|
136 |
+
"dilations_i": dilation,
|
137 |
+
"group_i": groups,
|
138 |
+
"kernels_i": kernel_size,
|
139 |
+
"order_s": "NHWC",
|
140 |
+
"Y_scale_f": scale,
|
141 |
+
"Y_zero_point_i": zero_point,
|
142 |
+
}
|
143 |
+
output = g.op("_caffe2::Int8ConvRelu", input, weight, bias, **kwargs)
|
144 |
+
symbolic_helper._quantized_ops.add(output)
|
145 |
+
return output
|
146 |
+
|
147 |
+
|
148 |
+
@symbolic_helper.parse_args("v", "v", "f", "i")
|
149 |
+
def add(g: jit_utils.GraphContext, input_a, input_b, scale, zero_point):
|
150 |
+
kwargs = {
|
151 |
+
"Y_scale_f": scale,
|
152 |
+
"Y_zero_point_i": zero_point,
|
153 |
+
}
|
154 |
+
output = g.op("_caffe2::Int8Add", input_a, input_b, **kwargs)
|
155 |
+
symbolic_helper._quantized_ops.add(output)
|
156 |
+
return output
|
157 |
+
|
158 |
+
|
159 |
+
@symbolic_helper.parse_args("v")
|
160 |
+
def relu(g: jit_utils.GraphContext, input):
|
161 |
+
if input not in symbolic_helper._quantized_ops:
|
162 |
+
return opset9.relu(g, input)
|
163 |
+
kwargs = {
|
164 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
165 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
166 |
+
}
|
167 |
+
output = g.op("_caffe2::Int8Relu", input, **kwargs)
|
168 |
+
symbolic_helper._quantized_ops.add(output)
|
169 |
+
return output
|
170 |
+
|
171 |
+
|
172 |
+
@symbolic_helper.parse_args("v", "f", "i", "t")
|
173 |
+
def quantize_per_tensor(g: jit_utils.GraphContext, input, scale, zero_point, dtype):
|
174 |
+
kwargs = {
|
175 |
+
"Y_scale_f": scale,
|
176 |
+
"Y_zero_point_i": zero_point,
|
177 |
+
}
|
178 |
+
output = g.op("_caffe2::Int8Quantize", input, **kwargs)
|
179 |
+
symbolic_helper._quantized_ops.add(output)
|
180 |
+
return output
|
181 |
+
|
182 |
+
|
183 |
+
@symbolic_helper.parse_args("v")
|
184 |
+
def dequantize(g: jit_utils.GraphContext, input):
|
185 |
+
return g.op("_caffe2::Int8Dequantize", input)
|
186 |
+
|
187 |
+
|
188 |
+
@symbolic_helper.parse_args("v", "t", "t", "t", "t", "t", "t", "t")
|
189 |
+
def _empty_affine_quantized(
|
190 |
+
g: jit_utils.GraphContext,
|
191 |
+
input,
|
192 |
+
shape,
|
193 |
+
scale,
|
194 |
+
zero_point,
|
195 |
+
dtype,
|
196 |
+
pin_memory,
|
197 |
+
memory_format,
|
198 |
+
layout,
|
199 |
+
):
|
200 |
+
return input
|
201 |
+
|
202 |
+
|
203 |
+
def upsample_nearest2d(
|
204 |
+
g: jit_utils.GraphContext,
|
205 |
+
input,
|
206 |
+
output_size,
|
207 |
+
align_corners=None,
|
208 |
+
scales_h=None,
|
209 |
+
scales_w=None,
|
210 |
+
):
|
211 |
+
if input not in symbolic_helper._quantized_ops:
|
212 |
+
return opset9.upsample_nearest2d(g, input, output_size, align_corners) # type: ignore[attr-defined]
|
213 |
+
|
214 |
+
output_size = symbolic_helper._parse_arg(output_size, "is")
|
215 |
+
kwargs = {
|
216 |
+
"output_size_i": output_size,
|
217 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
218 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
219 |
+
}
|
220 |
+
input = nchw2nhwc(g, input)
|
221 |
+
output = g.op("_caffe2::Int8ResizeNearest", input, **kwargs)
|
222 |
+
output = nhwc2nchw(g, output)
|
223 |
+
symbolic_helper._quantized_ops.add(output)
|
224 |
+
return output
|
225 |
+
|
226 |
+
|
227 |
+
@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i")
|
228 |
+
def max_pool2d(
|
229 |
+
g: jit_utils.GraphContext,
|
230 |
+
input,
|
231 |
+
kernel_size,
|
232 |
+
stride,
|
233 |
+
padding,
|
234 |
+
dilation,
|
235 |
+
ceil_mode,
|
236 |
+
):
|
237 |
+
if input not in symbolic_helper._quantized_ops:
|
238 |
+
return opset9.max_pool2d( # type: ignore[attr-defined]
|
239 |
+
g, input, kernel_size, stride, padding, dilation, ceil_mode
|
240 |
+
)
|
241 |
+
kwargs = {
|
242 |
+
"strides_i": stride,
|
243 |
+
"pads_i": padding + padding,
|
244 |
+
"kernel_i": kernel_size[0],
|
245 |
+
"order_s": "NHWC",
|
246 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
247 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
248 |
+
}
|
249 |
+
input = nchw2nhwc(g, input)
|
250 |
+
output = g.op("_caffe2::Int8MaxPool", input, **kwargs)
|
251 |
+
output = nhwc2nchw(g, output)
|
252 |
+
symbolic_helper._quantized_ops.add(output)
|
253 |
+
return output
|
254 |
+
|
255 |
+
|
256 |
+
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
|
257 |
+
def avg_pool2d(
|
258 |
+
g: jit_utils.GraphContext,
|
259 |
+
input,
|
260 |
+
kernel_size,
|
261 |
+
stride,
|
262 |
+
padding,
|
263 |
+
ceil_mode,
|
264 |
+
count_include_pad,
|
265 |
+
divisor_override=None,
|
266 |
+
):
|
267 |
+
if input not in symbolic_helper._quantized_ops:
|
268 |
+
return opset9.avg_pool2d( # type: ignore[attr-defined]
|
269 |
+
g,
|
270 |
+
input,
|
271 |
+
kernel_size,
|
272 |
+
stride,
|
273 |
+
padding,
|
274 |
+
ceil_mode,
|
275 |
+
count_include_pad,
|
276 |
+
divisor_override,
|
277 |
+
)
|
278 |
+
kwargs = {
|
279 |
+
"strides_i": stride,
|
280 |
+
"pads_i": padding + padding,
|
281 |
+
"kernel_i": kernel_size[0],
|
282 |
+
"order_s": "NHWC",
|
283 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
284 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
285 |
+
}
|
286 |
+
input = nchw2nhwc(g, input)
|
287 |
+
output = g.op("_caffe2::Int8AveragePool", input, **kwargs)
|
288 |
+
output = nhwc2nchw(g, output)
|
289 |
+
symbolic_helper._quantized_ops.add(output)
|
290 |
+
return output
|
291 |
+
|
292 |
+
|
293 |
+
def reshape(g: jit_utils.GraphContext, input, shape):
|
294 |
+
if input not in symbolic_helper._quantized_ops:
|
295 |
+
return opset9.reshape(g, input, shape)
|
296 |
+
|
297 |
+
kwargs = {
|
298 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
299 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
300 |
+
}
|
301 |
+
output = g.op("_caffe2::Int8Reshape", input, shape, **kwargs)
|
302 |
+
symbolic_helper._quantized_ops.add(output)
|
303 |
+
return output
|
304 |
+
|
305 |
+
|
306 |
+
@symbolic_helper.parse_args("v", "v", "v", "v", "i")
|
307 |
+
def slice(g: jit_utils.GraphContext, input, dim, start, end, step):
|
308 |
+
if input not in symbolic_helper._quantized_ops:
|
309 |
+
return opset9.slice(g, input, dim, start, end, step)
|
310 |
+
|
311 |
+
if step != 1:
|
312 |
+
raise RuntimeError("ONNX quantized slice export only works for step 1.")
|
313 |
+
start = symbolic_helper._parse_arg(start, "i")
|
314 |
+
end = symbolic_helper._parse_arg(end, "i")
|
315 |
+
dim = symbolic_helper._parse_arg(dim, "i")
|
316 |
+
|
317 |
+
kwargs = {
|
318 |
+
"start_idx_i": start,
|
319 |
+
"end_idx_i": end,
|
320 |
+
"dim_i": dim,
|
321 |
+
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
|
322 |
+
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
|
323 |
+
}
|
324 |
+
output = g.op("_caffe2::Int8Slice", input, **kwargs)
|
325 |
+
symbolic_helper._quantized_ops.add(output)
|
326 |
+
return output
|
327 |
+
|
328 |
+
|
329 |
+
def cat(g: jit_utils.GraphContext, tensor_list, dim, scale=None, zero_point=None):
|
330 |
+
tensors = symbolic_helper._unpack_list(tensor_list)
|
331 |
+
input = tensors[0]
|
332 |
+
if input not in symbolic_helper._quantized_ops:
|
333 |
+
return opset9.cat(g, tensor_list, dim)
|
334 |
+
|
335 |
+
dim = symbolic_helper._parse_arg(dim, "i")
|
336 |
+
kwargs = {
|
337 |
+
"Y_scale_f": tensors[0].node()["Y_scale"],
|
338 |
+
"Y_zero_point_i": tensors[0].node()["Y_zero_point"],
|
339 |
+
}
|
340 |
+
output = g.op("_caffe2::Int8Concat", *tensors, axis_i=dim, **kwargs)
|
341 |
+
symbolic_helper._quantized_ops.add(output)
|
342 |
+
return output
|
343 |
+
|
344 |
+
|
345 |
+
@symbolic_helper.parse_args("v")
|
346 |
+
def sigmoid(g: jit_utils.GraphContext, input):
|
347 |
+
if input not in symbolic_helper._quantized_ops:
|
348 |
+
return opset9.sigmoid(g, input)
|
349 |
+
# Caffe2 expects the output scale to be 1/2^8
|
350 |
+
# and output zero_point to be 0 (quint8 type)
|
351 |
+
out_scale = 1.0 / 256
|
352 |
+
zero_point = 0
|
353 |
+
kwargs = {
|
354 |
+
"Y_scale_f": out_scale,
|
355 |
+
"Y_zero_point_i": zero_point,
|
356 |
+
}
|
357 |
+
output = g.op("_caffe2::Int8Sigmoid", input, **kwargs)
|
358 |
+
symbolic_helper._quantized_ops.add(output)
|
359 |
+
return output
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_helper.py
ADDED
@@ -0,0 +1,1823 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
import inspect
|
5 |
+
import sys
|
6 |
+
import typing
|
7 |
+
import warnings
|
8 |
+
from typing import (
|
9 |
+
Any,
|
10 |
+
Callable,
|
11 |
+
List,
|
12 |
+
Literal,
|
13 |
+
NoReturn,
|
14 |
+
Optional,
|
15 |
+
Sequence,
|
16 |
+
Set,
|
17 |
+
Tuple,
|
18 |
+
Union,
|
19 |
+
)
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch._C._onnx as _C_onnx
|
23 |
+
from torch import _C
|
24 |
+
|
25 |
+
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
|
26 |
+
from torch.onnx import _constants, _type_utils, errors
|
27 |
+
from torch.onnx._globals import GLOBALS
|
28 |
+
from torch.onnx._internal import _beartype, jit_utils
|
29 |
+
from torch.types import Number
|
30 |
+
|
31 |
+
__all__ = [
|
32 |
+
"args_have_same_dtype",
|
33 |
+
"cast_pytorch_to_onnx",
|
34 |
+
"check_training_mode",
|
35 |
+
"dequantize_helper",
|
36 |
+
"is_caffe2_aten_fallback",
|
37 |
+
"is_complex_value",
|
38 |
+
"parse_args",
|
39 |
+
"pytorch_name_to_type",
|
40 |
+
"quantize_helper",
|
41 |
+
"quantized_args",
|
42 |
+
"requantize_bias_helper",
|
43 |
+
"scalar_name_to_pytorch",
|
44 |
+
"scalar_type_to_onnx",
|
45 |
+
"scalar_type_to_pytorch_type",
|
46 |
+
]
|
47 |
+
|
48 |
+
# ---------------------------------------------------------------------------------
|
49 |
+
# Helper functions
|
50 |
+
# ---------------------------------------------------------------------------------
|
51 |
+
|
52 |
+
_ValueDescriptor = Literal[
|
53 |
+
"v",
|
54 |
+
"i",
|
55 |
+
"is",
|
56 |
+
"f",
|
57 |
+
"fs",
|
58 |
+
"b",
|
59 |
+
"s",
|
60 |
+
"t",
|
61 |
+
"none",
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
@_beartype.beartype
|
66 |
+
def _parse_arg(
|
67 |
+
value,
|
68 |
+
desc: _ValueDescriptor,
|
69 |
+
arg_name: Optional[str] = None,
|
70 |
+
node_name: Optional[str] = None,
|
71 |
+
):
|
72 |
+
if desc == "none":
|
73 |
+
return value
|
74 |
+
if desc == "v" or not _is_value(value):
|
75 |
+
return value
|
76 |
+
|
77 |
+
node = value.node()
|
78 |
+
if node.mustBeNone():
|
79 |
+
return None
|
80 |
+
if node.kind() == "onnx::Constant":
|
81 |
+
node_val = _node_get(node, "value")
|
82 |
+
if desc == "i":
|
83 |
+
return int(node_val)
|
84 |
+
elif desc == "f":
|
85 |
+
return float(node_val)
|
86 |
+
elif desc == "b":
|
87 |
+
return bool(node_val)
|
88 |
+
elif desc == "s":
|
89 |
+
return str(node_val)
|
90 |
+
elif desc == "t":
|
91 |
+
return node_val
|
92 |
+
elif desc == "is":
|
93 |
+
return [int(v) for v in node_val]
|
94 |
+
elif desc == "fs":
|
95 |
+
return [float(v) for v in node_val]
|
96 |
+
else:
|
97 |
+
raise errors.SymbolicValueError(
|
98 |
+
f"ONNX symbolic does not understand the Constant node '{node}' "
|
99 |
+
f"specified with descriptor '{desc}'.",
|
100 |
+
value,
|
101 |
+
)
|
102 |
+
elif node.kind() == "prim::ListConstruct":
|
103 |
+
if desc == "is":
|
104 |
+
for v in node.inputs():
|
105 |
+
element_node = v.node()
|
106 |
+
if element_node.kind() != "onnx::Constant":
|
107 |
+
raise errors.SymbolicValueError(
|
108 |
+
f"Failed to export a node '{element_node}' "
|
109 |
+
f"(in list node {node}) "
|
110 |
+
f"because it is not constant. "
|
111 |
+
f"Please try to make things (e.g. kernel sizes) static if possible.",
|
112 |
+
value,
|
113 |
+
)
|
114 |
+
return [int(_node_get(v.node(), "value")) for v in value.node().inputs()]
|
115 |
+
else:
|
116 |
+
raise errors.SymbolicValueError(
|
117 |
+
f"ONNX symbolic does not know how to unpack the ListConstruct node that "
|
118 |
+
f"is not a list of integers: '{node}'",
|
119 |
+
value,
|
120 |
+
)
|
121 |
+
|
122 |
+
if arg_name is None or node_name is None:
|
123 |
+
raise errors.SymbolicValueError(
|
124 |
+
f"Expected node type 'onnx::Constant', got '{node.kind()}'.",
|
125 |
+
value,
|
126 |
+
)
|
127 |
+
|
128 |
+
raise errors.SymbolicValueError(
|
129 |
+
"Expected node type 'onnx::Constant' "
|
130 |
+
f"for argument '{arg_name}' of node '{node_name}', got '{node.kind()}'.",
|
131 |
+
value,
|
132 |
+
)
|
133 |
+
|
134 |
+
|
135 |
+
@_beartype.beartype
|
136 |
+
def _node_get(node: _C.Node, key: str):
|
137 |
+
"""Gets attributes of a node which is polymorphic over return type."""
|
138 |
+
assert isinstance(node, _C.Node)
|
139 |
+
sel = node.kindOf(key)
|
140 |
+
return getattr(node, sel)(key)
|
141 |
+
|
142 |
+
|
143 |
+
@_beartype.beartype
|
144 |
+
def _is_onnx_constant(value: _C.Value):
|
145 |
+
"""Whether a Value is an ONNX constant."""
|
146 |
+
return value.node().kind() == "onnx::Constant"
|
147 |
+
|
148 |
+
|
149 |
+
@_beartype.beartype
|
150 |
+
def _maybe_get_const(
|
151 |
+
value: Optional[Union[_C.Value, torch.Tensor, Number, Sequence]],
|
152 |
+
descriptor: _ValueDescriptor,
|
153 |
+
):
|
154 |
+
# NOTE: prim::Constant at this stage usually means something not compatible in ONNX,
|
155 |
+
# otherwise it'd be converted to onnx::Constant
|
156 |
+
# TODO(justinchuby): Replace insinstance with _is_value once we figure out mypy
|
157 |
+
if isinstance(value, _C.Value) and _is_onnx_constant(value):
|
158 |
+
return _parse_arg(value, descriptor)
|
159 |
+
return value
|
160 |
+
|
161 |
+
|
162 |
+
@_beartype.beartype
|
163 |
+
def _maybe_get_scalar(value):
|
164 |
+
value_t = _maybe_get_const(value, "t")
|
165 |
+
if isinstance(value_t, torch.Tensor) and value_t.shape == ():
|
166 |
+
return value_t
|
167 |
+
return value
|
168 |
+
|
169 |
+
|
170 |
+
@_beartype.beartype
|
171 |
+
def _get_const(value, desc, arg_name):
|
172 |
+
if not _is_constant(value):
|
173 |
+
raise errors.SymbolicValueError(
|
174 |
+
f"ONNX symbolic expected a constant value of the '{arg_name}' argument, "
|
175 |
+
f"got '{value}'",
|
176 |
+
value,
|
177 |
+
)
|
178 |
+
return _parse_arg(value, desc)
|
179 |
+
|
180 |
+
|
181 |
+
@_beartype.beartype
|
182 |
+
def _unpack_list(list_value: _C.Value) -> List[_C.Value]:
|
183 |
+
list_node = list_value.node()
|
184 |
+
if list_node.kind() != "prim::ListConstruct":
|
185 |
+
raise errors.SymbolicValueError(
|
186 |
+
f"ONNX symbolic expected node type prim::ListConstruct, "
|
187 |
+
f"got '{list_node}'.",
|
188 |
+
list_value,
|
189 |
+
)
|
190 |
+
return list(list_node.inputs())
|
191 |
+
|
192 |
+
|
193 |
+
@_beartype.beartype
|
194 |
+
def _unpack_tuple(tuple_value: _C.Value) -> Tuple[_C.Value, ...]:
|
195 |
+
tuple_node = tuple_value.node()
|
196 |
+
if not _is_tuple_construct(tuple_value):
|
197 |
+
raise errors.SymbolicValueError(
|
198 |
+
f"ONNX symbolic expected node type 'prim::TupleConstruct', "
|
199 |
+
f"got '{tuple_node.kind()}'.",
|
200 |
+
tuple_value,
|
201 |
+
)
|
202 |
+
return tuple(tuple_node.inputs())
|
203 |
+
|
204 |
+
|
205 |
+
@_beartype.beartype
|
206 |
+
def _unpack_quantized_tensor(tuple_value: _C.Value) -> Tuple[_C.Value, ...]:
|
207 |
+
"""Unpacks a quantized tensor into a tuple of tensor and scale/zero_point.
|
208 |
+
Args:
|
209 |
+
tuple_value: A tuple of tensor, scale, zero_point, and optionally axis.
|
210 |
+
Returns:
|
211 |
+
A tuple of tensor, scale, zero_point, and optionally axis.
|
212 |
+
"""
|
213 |
+
tuple_node = tuple_value.node()
|
214 |
+
# A quantized tensor is represented as tuple of the form (tensor, scale, zero_point, <axis>)
|
215 |
+
if not _is_tuple_construct(tuple_value):
|
216 |
+
raise errors.SymbolicValueError(
|
217 |
+
f"ONNX symbolic expected the output of `{tuple_node}` to be a quantized "
|
218 |
+
f"tensor. Is this likely due to missing support for quantized "
|
219 |
+
f"`{tuple_node.kind()}`. Please create an issue on {_constants.PYTORCH_GITHUB_ISSUES_URL}",
|
220 |
+
tuple_value,
|
221 |
+
)
|
222 |
+
unpacked = tuple(tuple_node.inputs())
|
223 |
+
assert len(unpacked) == 3 or len(unpacked) == 4
|
224 |
+
return unpacked
|
225 |
+
|
226 |
+
|
227 |
+
# Check if list_value is output from prim::ListConstruct
|
228 |
+
# This is usually called before _unpack_list to ensure the list can be unpacked.
|
229 |
+
@_beartype.beartype
|
230 |
+
def _is_packed_list(list_value: Any) -> bool:
|
231 |
+
return _is_value(list_value) and list_value.node().kind() == "prim::ListConstruct"
|
232 |
+
|
233 |
+
|
234 |
+
@_beartype.beartype
|
235 |
+
def parse_args(*arg_descriptors: _ValueDescriptor):
|
236 |
+
"""A decorator which converts args from torch._C.Value to built-in types.
|
237 |
+
|
238 |
+
For example:
|
239 |
+
|
240 |
+
```
|
241 |
+
@parse_args('v', 'i', 'fs')
|
242 |
+
foo(g, a, b, c):
|
243 |
+
assert isinstance(a, torch._C.Value)
|
244 |
+
assert isinstance(b, int)
|
245 |
+
assert isinstance(c, list)
|
246 |
+
assert isinstance(c[0], float)
|
247 |
+
```
|
248 |
+
|
249 |
+
Args:
|
250 |
+
arg_descriptors: list of str, where each element is
|
251 |
+
a string that specifies the type to convert to. Valid descriptors:
|
252 |
+
"v": no conversion, keep torch._C.Value.
|
253 |
+
"i": int
|
254 |
+
"is": list of int
|
255 |
+
"f": float
|
256 |
+
"fs": list of float
|
257 |
+
"b": bool
|
258 |
+
"s": str
|
259 |
+
"t": torch.Tensor
|
260 |
+
"none": the variable is unused
|
261 |
+
"""
|
262 |
+
|
263 |
+
def decorator(fn):
|
264 |
+
fn._arg_descriptors = arg_descriptors
|
265 |
+
|
266 |
+
@functools.wraps(fn)
|
267 |
+
def wrapper(g, *args, **kwargs):
|
268 |
+
# some args may be optional, so the length may be smaller
|
269 |
+
FILE_BUG_MSG = (
|
270 |
+
"If you believe this is not due to custom symbolic implementation within your code or "
|
271 |
+
"an external library, please file an issue at "
|
272 |
+
"https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to report this bug."
|
273 |
+
)
|
274 |
+
assert len(arg_descriptors) >= len(args), (
|
275 |
+
f"A mismatch between the number of arguments ({len(args)}) and "
|
276 |
+
f"their descriptors ({len(arg_descriptors)}) was found at symbolic function '{fn.__name__}'. "
|
277 |
+
f"{FILE_BUG_MSG}"
|
278 |
+
)
|
279 |
+
|
280 |
+
try:
|
281 |
+
sig = inspect.signature(fn)
|
282 |
+
arg_names = list(sig.parameters.keys())[1:]
|
283 |
+
fn_name = fn.__name__
|
284 |
+
except Exception:
|
285 |
+
# FIXME(justinchuby): Avoid catching Exception.
|
286 |
+
# Catch a more specific exception instead.
|
287 |
+
arg_names = [None] * len(args) # type: ignore[list-item]
|
288 |
+
fn_name = None
|
289 |
+
args = [
|
290 |
+
_parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[method-assign]
|
291 |
+
for arg, arg_desc, arg_name in zip(args, arg_descriptors, arg_names)
|
292 |
+
]
|
293 |
+
# only support _outputs in kwargs
|
294 |
+
assert len(kwargs) <= 1, (
|
295 |
+
f"Symbolic function {fn.__name__}'s '**kwargs' can contain a single "
|
296 |
+
f"key/value entry. "
|
297 |
+
f"{FILE_BUG_MSG}"
|
298 |
+
)
|
299 |
+
|
300 |
+
if len(kwargs) == 1:
|
301 |
+
assert "_outputs" in kwargs, (
|
302 |
+
f"Symbolic function {fn.__name__}'s '**kwargs' can only contain "
|
303 |
+
f"'_outputs' key at '**kwargs'. "
|
304 |
+
f"{FILE_BUG_MSG}"
|
305 |
+
)
|
306 |
+
return fn(g, *args, **kwargs)
|
307 |
+
|
308 |
+
return wrapper
|
309 |
+
|
310 |
+
return decorator
|
311 |
+
|
312 |
+
|
313 |
+
@_beartype.beartype
|
314 |
+
def quantized_args(
|
315 |
+
*arg_q_descriptors: bool,
|
316 |
+
scale: Optional[float] = None,
|
317 |
+
zero_point: Optional[int] = None,
|
318 |
+
quantize_output: bool = True,
|
319 |
+
):
|
320 |
+
"""A decorator which extends support for quantized version of the base operator.
|
321 |
+
|
322 |
+
Quantization is detected by examining the arguments that are annotated by
|
323 |
+
`arg_q_descriptors`.
|
324 |
+
|
325 |
+
If quantization is detected, the base operator symbolic function will be wrapped with
|
326 |
+
argument de-quantization and output quantization.
|
327 |
+
|
328 |
+
Otherwise, only the base symbolic function will be invoked.
|
329 |
+
|
330 |
+
For example:
|
331 |
+
|
332 |
+
```
|
333 |
+
@quantized_args(True, False)
|
334 |
+
def foo(g, x, y):
|
335 |
+
return x + y
|
336 |
+
```
|
337 |
+
|
338 |
+
is equivalent to
|
339 |
+
|
340 |
+
```
|
341 |
+
def q_foo(g, x, y):
|
342 |
+
if is_quantized_tensor(x):
|
343 |
+
x = dequantize(x)
|
344 |
+
out = foo(g, x, y)
|
345 |
+
return quantize(out)
|
346 |
+
else:
|
347 |
+
return foo(g, x, y)
|
348 |
+
```
|
349 |
+
|
350 |
+
Args:
|
351 |
+
arg_q_descriptors: A sequence of bool, where each element represents if the
|
352 |
+
argument is QTensor for quantized version of this operator. It defaults
|
353 |
+
to False for unspecified (variable length) arguments.
|
354 |
+
scale: Quantized output scale. If None, derive from
|
355 |
+
the first quantized input scale.
|
356 |
+
zero_point: Quantized output zero point. If None,
|
357 |
+
derive from the first quantized input zero point.
|
358 |
+
quantize_output: If True, quantize the output of the base operator. Default is True
|
359 |
+
"""
|
360 |
+
|
361 |
+
def decorator(fn):
|
362 |
+
@functools.wraps(fn)
|
363 |
+
def wrapper(g, *args, **kwargs):
|
364 |
+
nonlocal scale
|
365 |
+
nonlocal zero_point
|
366 |
+
if scale is not None:
|
367 |
+
_scale = g.op("Constant", value_t=torch.tensor(scale))
|
368 |
+
else:
|
369 |
+
_scale = None
|
370 |
+
if zero_point is not None:
|
371 |
+
_zero_point = g.op("Constant", value_t=torch.tensor(zero_point))
|
372 |
+
else:
|
373 |
+
_zero_point = None
|
374 |
+
|
375 |
+
# Support variable length arguments by marking unspecified ones as non-quantized
|
376 |
+
arg_q_descriptors_extended = arg_q_descriptors + (False,) * (
|
377 |
+
len(args) - len(arg_q_descriptors)
|
378 |
+
)
|
379 |
+
descriptor_args = tuple(zip(arg_q_descriptors_extended, args))
|
380 |
+
|
381 |
+
def _is_arg_quantized(descriptor, arg):
|
382 |
+
return descriptor and _is_value(arg) and _is_tuple_construct(arg)
|
383 |
+
|
384 |
+
# Run regular symbolic function if none of the argument is QTensor.
|
385 |
+
is_quantized = list()
|
386 |
+
for descriptor, arg in descriptor_args:
|
387 |
+
# ListConstruct
|
388 |
+
if _is_packed_list(arg):
|
389 |
+
for arg_input in arg.node().inputs():
|
390 |
+
is_quantized.append(_is_arg_quantized(descriptor, arg_input))
|
391 |
+
else:
|
392 |
+
is_quantized.append(_is_arg_quantized(descriptor, arg))
|
393 |
+
|
394 |
+
if not any(is_quantized):
|
395 |
+
return fn(g, *args, **kwargs)
|
396 |
+
|
397 |
+
# Dequantize arguments that are quantized
|
398 |
+
non_quantized_args = []
|
399 |
+
for descriptor, arg in descriptor_args:
|
400 |
+
if _is_arg_quantized(descriptor, arg):
|
401 |
+
# Quantized arg is a tuple of (value, scale, zero_point)
|
402 |
+
dequantized_arg, arg_scale, arg_zero_point, _ = dequantize_helper(
|
403 |
+
g, arg
|
404 |
+
)
|
405 |
+
non_quantized_args.append(dequantized_arg)
|
406 |
+
# Set scale and zero_point to the first quantized input if not already set
|
407 |
+
if _scale is None:
|
408 |
+
_scale = arg_scale
|
409 |
+
if _zero_point is None:
|
410 |
+
_zero_point = arg_zero_point
|
411 |
+
# ListConstruct
|
412 |
+
elif _is_packed_list(arg):
|
413 |
+
for arg_input in arg.node().inputs():
|
414 |
+
if _is_arg_quantized(descriptor, arg_input):
|
415 |
+
# Quantized arg is a tuple of (value, scale, zero_point)
|
416 |
+
(
|
417 |
+
dequantized_arg,
|
418 |
+
arg_scale,
|
419 |
+
arg_zero_point,
|
420 |
+
_,
|
421 |
+
) = dequantize_helper(g, arg_input)
|
422 |
+
# Set scale and zero_point to the first quantized input if not already set
|
423 |
+
if _scale is None:
|
424 |
+
_scale = arg_scale
|
425 |
+
if _zero_point is None:
|
426 |
+
_zero_point = arg_zero_point
|
427 |
+
arg_input.replaceAllUsesWith(dequantized_arg)
|
428 |
+
non_quantized_args.append(arg)
|
429 |
+
else:
|
430 |
+
# Non-quantized arg
|
431 |
+
non_quantized_args.append(arg)
|
432 |
+
# TODO(justinchuby): Only single output is supported for now. We may want to
|
433 |
+
# support multiple outputs in the future.
|
434 |
+
output = fn(g, *non_quantized_args, **kwargs)
|
435 |
+
|
436 |
+
assert _scale is not None, "Bug: Scale must be set for quantized operator"
|
437 |
+
assert (
|
438 |
+
_zero_point is not None
|
439 |
+
), "Bug: Zero point must be set for quantized operator"
|
440 |
+
|
441 |
+
if quantize_output:
|
442 |
+
return quantize_helper(g, output, _scale, _zero_point)
|
443 |
+
return output
|
444 |
+
|
445 |
+
return wrapper
|
446 |
+
|
447 |
+
return decorator
|
448 |
+
|
449 |
+
|
450 |
+
@_beartype.beartype
|
451 |
+
def _scalar(x: Any) -> Optional[Number]:
|
452 |
+
"""Convert a scalar tensor into a Python value."""
|
453 |
+
if isinstance(x, torch.Tensor) and x.shape == ():
|
454 |
+
return x.item()
|
455 |
+
return None
|
456 |
+
|
457 |
+
|
458 |
+
@_beartype.beartype
|
459 |
+
def _if_scalar_type_as(self, tensor):
|
460 |
+
"""
|
461 |
+
Convert self into the same type of tensor, as necessary.
|
462 |
+
We only support implicit casting for scalars, so we never
|
463 |
+
actually need to insert an ONNX cast operator here; just
|
464 |
+
fix up the scalar.
|
465 |
+
"""
|
466 |
+
if isinstance(self, _C.Value):
|
467 |
+
return self
|
468 |
+
|
469 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
470 |
+
tensor, _type_utils.JitScalarType.UNDEFINED
|
471 |
+
)
|
472 |
+
if scalar_type != _type_utils.JitScalarType.UNDEFINED:
|
473 |
+
ty = scalar_type.scalar_name().lower()
|
474 |
+
return getattr(self, ty)()
|
475 |
+
return self
|
476 |
+
|
477 |
+
|
478 |
+
@_beartype.beartype
|
479 |
+
def _is_none(x: Any) -> bool:
|
480 |
+
return x is None or (x.node().mustBeNone() if isinstance(x, _C.Value) else False)
|
481 |
+
|
482 |
+
|
483 |
+
@_beartype.beartype
|
484 |
+
def _is_value(x: Any) -> bool:
|
485 |
+
return isinstance(x, _C.Value)
|
486 |
+
|
487 |
+
|
488 |
+
@_beartype.beartype
|
489 |
+
def _is_constant(value: Any) -> bool:
|
490 |
+
return not _is_value(value) or value.node().kind() in {
|
491 |
+
"onnx::Constant",
|
492 |
+
"prim::Constant",
|
493 |
+
}
|
494 |
+
|
495 |
+
|
496 |
+
@_beartype.beartype
|
497 |
+
def _is_tensor(x: _C.Value) -> bool:
|
498 |
+
return x.type().isSubtypeOf(_C.TensorType.get())
|
499 |
+
|
500 |
+
|
501 |
+
# Note: _C.JitType is not exposed to Python and cannot be checked in runtime.
|
502 |
+
def _as_list_type(jit_type: _C.JitType) -> Optional[_C.ListType]:
|
503 |
+
if isinstance(jit_type, _C.ListType):
|
504 |
+
return jit_type
|
505 |
+
return None
|
506 |
+
|
507 |
+
|
508 |
+
@_beartype.beartype
|
509 |
+
def _is_list(x: _C.Value) -> bool:
|
510 |
+
return _as_list_type(x.type()) is not None
|
511 |
+
|
512 |
+
|
513 |
+
@_beartype.beartype
|
514 |
+
def _is_tensor_list(x: _C.Value) -> bool:
|
515 |
+
x_type = _as_list_type(x.type())
|
516 |
+
if x_type is None:
|
517 |
+
return False
|
518 |
+
return isinstance(x_type.getElementType(), _C.TensorType)
|
519 |
+
|
520 |
+
|
521 |
+
@_beartype.beartype
|
522 |
+
def _is_scalar_list(x: _C.Value) -> bool:
|
523 |
+
"""Checks if x is a scalar list, for example: List[float], List[int].
|
524 |
+
|
525 |
+
Besides checking the type is ListType, we also check if the data type is
|
526 |
+
a valid ONNX data type.
|
527 |
+
"""
|
528 |
+
x_type = _as_list_type(x.type())
|
529 |
+
if x_type is None:
|
530 |
+
return False
|
531 |
+
scalar_type = _type_utils.JitScalarType.from_value(x)
|
532 |
+
return scalar_type.onnx_compatible()
|
533 |
+
|
534 |
+
|
535 |
+
@_beartype.beartype
|
536 |
+
def _is_tuple_construct(x: _C.Value) -> bool:
|
537 |
+
return x.node().kind() == "prim::TupleConstruct"
|
538 |
+
|
539 |
+
|
540 |
+
@_beartype.beartype
|
541 |
+
def is_complex_value(x: _C.Value) -> bool:
|
542 |
+
assert _is_value(x)
|
543 |
+
return _type_utils.JitScalarType.from_value(
|
544 |
+
x, _type_utils.JitScalarType.UNDEFINED
|
545 |
+
) in {
|
546 |
+
_type_utils.JitScalarType.COMPLEX32,
|
547 |
+
_type_utils.JitScalarType.COMPLEX64,
|
548 |
+
_type_utils.JitScalarType.COMPLEX128,
|
549 |
+
}
|
550 |
+
|
551 |
+
|
552 |
+
@_beartype.beartype
|
553 |
+
def is_caffe2_aten_fallback() -> bool:
|
554 |
+
return (
|
555 |
+
GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
556 |
+
and _C_onnx._CAFFE2_ATEN_FALLBACK
|
557 |
+
)
|
558 |
+
|
559 |
+
|
560 |
+
@_beartype.beartype
|
561 |
+
def _get_tensor_rank(x: _C.Value) -> Optional[int]:
|
562 |
+
if not _is_tensor(x) or x.type() is None:
|
563 |
+
return None
|
564 |
+
x_type = x.type()
|
565 |
+
x_type = typing.cast(_C.TensorType, x_type)
|
566 |
+
return x_type.dim()
|
567 |
+
|
568 |
+
|
569 |
+
@_beartype.beartype
|
570 |
+
def _get_tensor_sizes(x: _C.Value, allow_nonstatic: bool = True):
|
571 |
+
if not _is_tensor(x) or x.type() is None:
|
572 |
+
return None
|
573 |
+
x_type = x.type()
|
574 |
+
x_type = typing.cast(_C.TensorType, x_type)
|
575 |
+
if allow_nonstatic:
|
576 |
+
# Each individual symbol is returned as None.
|
577 |
+
# e.g. [1, "a", "b"] -> [1, None, None]
|
578 |
+
return x_type.varyingSizes()
|
579 |
+
# returns None, if exists any symbol in sizes.
|
580 |
+
# e.g. [1, "a", "b"] -> None
|
581 |
+
return x_type.sizes()
|
582 |
+
|
583 |
+
|
584 |
+
@_beartype.beartype
|
585 |
+
def _get_tensor_dim_size(x: _C.Value, dim: int) -> Optional[int]:
|
586 |
+
sizes = _get_tensor_sizes(x)
|
587 |
+
return sizes[dim] if sizes else None
|
588 |
+
|
589 |
+
|
590 |
+
@_beartype.beartype
|
591 |
+
def _get_dim_for_cross(x: _C.Value, dim: Optional[int]):
|
592 |
+
if dim == -1:
|
593 |
+
tensor_rank = _get_tensor_rank(x)
|
594 |
+
assert tensor_rank is not None
|
595 |
+
return dim + tensor_rank
|
596 |
+
# If dim is not given, it defaults to the first dimension found with the size 3
|
597 |
+
if dim is None:
|
598 |
+
sizes = _get_tensor_sizes(x)
|
599 |
+
assert sizes is not None
|
600 |
+
for index, size in enumerate(sizes):
|
601 |
+
if size is not None and size == 3:
|
602 |
+
return index
|
603 |
+
return dim
|
604 |
+
|
605 |
+
|
606 |
+
@_beartype.beartype
|
607 |
+
def _unimplemented(op: str, msg: str, value: Optional[_C.Value] = None) -> None:
|
608 |
+
# For BC reasons, the behavior for Caffe2 does not raise exception for unimplemented operators
|
609 |
+
if _C_onnx._CAFFE2_ATEN_FALLBACK:
|
610 |
+
warnings.warn(f"ONNX export failed on {op} because {msg} not supported")
|
611 |
+
elif GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX:
|
612 |
+
_onnx_unsupported(f"{op}, {msg}", value)
|
613 |
+
|
614 |
+
|
615 |
+
@_beartype.beartype
|
616 |
+
def _onnx_unsupported(op_name: str, value: Optional[_C.Value] = None) -> NoReturn:
|
617 |
+
message = (
|
618 |
+
f"Unsupported: ONNX export of operator {op_name}. "
|
619 |
+
f"Please feel free to request support or submit a pull request "
|
620 |
+
f"on PyTorch GitHub: {_constants.PYTORCH_GITHUB_ISSUES_URL}"
|
621 |
+
)
|
622 |
+
if isinstance(value, _C.Value):
|
623 |
+
raise errors.SymbolicValueError(
|
624 |
+
message,
|
625 |
+
value,
|
626 |
+
)
|
627 |
+
raise errors.OnnxExporterError(message)
|
628 |
+
|
629 |
+
|
630 |
+
@_beartype.beartype
|
631 |
+
def _onnx_opset_unsupported(
|
632 |
+
op_name: str,
|
633 |
+
current_opset: int,
|
634 |
+
supported_opset: int,
|
635 |
+
value: Optional[_C.Value] = None,
|
636 |
+
) -> NoReturn:
|
637 |
+
message = (
|
638 |
+
f"Unsupported: ONNX export of {op_name} in opset {current_opset}. "
|
639 |
+
f"Please try opset version {supported_opset}."
|
640 |
+
)
|
641 |
+
if isinstance(value, _C.Value):
|
642 |
+
raise errors.SymbolicValueError(
|
643 |
+
message,
|
644 |
+
value,
|
645 |
+
)
|
646 |
+
raise errors.OnnxExporterError(message)
|
647 |
+
|
648 |
+
|
649 |
+
@_beartype.beartype
|
650 |
+
def _onnx_opset_unsupported_detailed(
|
651 |
+
op_name: str,
|
652 |
+
current_opset: int,
|
653 |
+
supported_opset: int,
|
654 |
+
reason: str,
|
655 |
+
value: Optional[_C.Value] = None,
|
656 |
+
) -> NoReturn:
|
657 |
+
message = (
|
658 |
+
f"Unsupported: ONNX export of {op_name} in "
|
659 |
+
f"opset {current_opset}. {reason}. Please try opset version {supported_opset}."
|
660 |
+
)
|
661 |
+
if isinstance(value, _C.Value):
|
662 |
+
raise errors.SymbolicValueError(
|
663 |
+
message,
|
664 |
+
value,
|
665 |
+
)
|
666 |
+
raise errors.OnnxExporterError(message)
|
667 |
+
|
668 |
+
|
669 |
+
@_beartype.beartype
|
670 |
+
def _block_list_in_opset(name: str):
|
671 |
+
def symbolic_fn(*args, **kwargs):
|
672 |
+
raise errors.OnnxExporterError(
|
673 |
+
f"ONNX export failed on {name}, which is not implemented for opset "
|
674 |
+
f"{GLOBALS.export_onnx_opset_version}. "
|
675 |
+
"Try exporting with other opset versions."
|
676 |
+
)
|
677 |
+
|
678 |
+
return symbolic_fn
|
679 |
+
|
680 |
+
|
681 |
+
@_beartype.beartype
|
682 |
+
def _try_get_scalar_type(*args) -> Optional[_type_utils.JitScalarType]:
|
683 |
+
for arg in args:
|
684 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
685 |
+
arg, _type_utils.JitScalarType.UNDEFINED
|
686 |
+
)
|
687 |
+
if scalar_type != _type_utils.JitScalarType.UNDEFINED:
|
688 |
+
return scalar_type
|
689 |
+
return None
|
690 |
+
|
691 |
+
|
692 |
+
@_beartype.beartype
|
693 |
+
def _select_helper(g: jit_utils.GraphContext, self, dim, index, apply_reshape=True):
|
694 |
+
index_const = _maybe_get_scalar(index)
|
695 |
+
index_dim = _get_tensor_rank(index)
|
696 |
+
if not _is_value(index_const):
|
697 |
+
# Index is a constant scalar. Make it a size 1 constant tensor.
|
698 |
+
index = g.op("Constant", value_t=torch.LongTensor([index_const]))
|
699 |
+
elif index_dim is not None and apply_reshape:
|
700 |
+
if index_dim == 0:
|
701 |
+
# Index is a scalar. Reshape it to a size 1 tensor.
|
702 |
+
index = _reshape_helper(
|
703 |
+
g, index, g.op("Constant", value_t=torch.LongTensor([1]))
|
704 |
+
)
|
705 |
+
|
706 |
+
index_scalar_type = _type_utils.JitScalarType.from_value(
|
707 |
+
index, _type_utils.JitScalarType.UNDEFINED
|
708 |
+
)
|
709 |
+
if index_scalar_type not in {
|
710 |
+
_type_utils.JitScalarType.INT64,
|
711 |
+
_type_utils.JitScalarType.INT,
|
712 |
+
}:
|
713 |
+
index = g.op("Cast", index, to_i=_C_onnx.TensorProtoDataType.INT64)
|
714 |
+
return g.op("Gather", self, index, axis_i=dim)
|
715 |
+
|
716 |
+
|
717 |
+
@_beartype.beartype
|
718 |
+
def _slice_helper(
|
719 |
+
g: jit_utils.GraphContext,
|
720 |
+
input,
|
721 |
+
axes,
|
722 |
+
starts,
|
723 |
+
ends,
|
724 |
+
steps=None,
|
725 |
+
):
|
726 |
+
if g.opset <= 9:
|
727 |
+
from torch.onnx.symbolic_opset9 import _slice as _slice9
|
728 |
+
|
729 |
+
return _slice9(g, input, axes, starts, ends)
|
730 |
+
else:
|
731 |
+
from torch.onnx.symbolic_opset10 import _slice as _slice10
|
732 |
+
|
733 |
+
return _slice10(g, input, axes, starts, ends, steps)
|
734 |
+
|
735 |
+
|
736 |
+
@_beartype.beartype
|
737 |
+
def _is_fp(value) -> bool:
|
738 |
+
return _type_utils.JitScalarType.from_value(
|
739 |
+
value, _type_utils.JitScalarType.UNDEFINED
|
740 |
+
) in {
|
741 |
+
_type_utils.JitScalarType.FLOAT,
|
742 |
+
_type_utils.JitScalarType.DOUBLE,
|
743 |
+
_type_utils.JitScalarType.HALF,
|
744 |
+
_type_utils.JitScalarType.BFLOAT16,
|
745 |
+
}
|
746 |
+
|
747 |
+
|
748 |
+
@_beartype.beartype
|
749 |
+
def _is_bool(value) -> bool:
|
750 |
+
return _type_utils.JitScalarType.from_value(
|
751 |
+
value, _type_utils.JitScalarType.UNDEFINED
|
752 |
+
) in {_type_utils.JitScalarType.BOOL}
|
753 |
+
|
754 |
+
|
755 |
+
@_beartype.beartype
|
756 |
+
def _generate_wrapped_number(g: jit_utils.GraphContext, scalar):
|
757 |
+
"""Creates a wrapped number based on https://github.com/pytorch/pytorch/issues/9515.
|
758 |
+
|
759 |
+
A Tensor is a considered a "wrapped number" if it is
|
760 |
+
auto-wrapped from a C++ or Python number type. Integer types are
|
761 |
+
wrapped as 0-dim int64 tensors and floating-point types are
|
762 |
+
wrapped as 0-dim double tensors.
|
763 |
+
|
764 |
+
The input to this function is constant value. If the data type
|
765 |
+
is a floating point type, it is converted to a 0-dim double
|
766 |
+
tensor, else it is converted to a 0-dim tensor of its original type
|
767 |
+
"""
|
768 |
+
assert not isinstance(scalar, torch.Tensor)
|
769 |
+
if isinstance(scalar, float):
|
770 |
+
return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double))
|
771 |
+
return g.op("Constant", value_t=torch.tensor(scalar))
|
772 |
+
|
773 |
+
|
774 |
+
@_beartype.beartype
|
775 |
+
def _sort_helper(g: jit_utils.GraphContext, input, dim, decending=True, out=None):
|
776 |
+
if out is not None:
|
777 |
+
_unimplemented("Sort", "Out parameter is not supported")
|
778 |
+
shape_ = g.op("Shape", input)
|
779 |
+
dim_size_ = g.op(
|
780 |
+
"Gather",
|
781 |
+
shape_,
|
782 |
+
g.op("Constant", value_t=torch.tensor([dim], dtype=torch.int64)),
|
783 |
+
)
|
784 |
+
if g.opset <= 10:
|
785 |
+
if not decending:
|
786 |
+
_unimplemented("Sort", "Ascending is not supported")
|
787 |
+
return g.op("TopK", input, dim_size_, axis_i=dim, outputs=2)
|
788 |
+
else:
|
789 |
+
return g.op(
|
790 |
+
"TopK", input, dim_size_, axis_i=dim, largest_i=decending, outputs=2
|
791 |
+
)
|
792 |
+
|
793 |
+
|
794 |
+
@_beartype.beartype
|
795 |
+
def _topk_helper(
|
796 |
+
g: jit_utils.GraphContext, input, k, dim, largest=True, sorted=False, out=None
|
797 |
+
):
|
798 |
+
if out is not None:
|
799 |
+
_unimplemented("TopK", "Out parameter is not supported")
|
800 |
+
if not _is_value(k):
|
801 |
+
k = g.op("Constant", value_t=torch.tensor([k], dtype=torch.int64))
|
802 |
+
else:
|
803 |
+
k = _reshape_helper(g, k, g.op("Constant", value_t=torch.tensor([1])))
|
804 |
+
if _try_get_scalar_type(k) != _type_utils.JitScalarType.INT64:
|
805 |
+
k = g.op("Cast", k, to_i=_C_onnx.TensorProtoDataType.INT64)
|
806 |
+
if g.opset <= 10:
|
807 |
+
if not largest:
|
808 |
+
_unimplemented("TopK", "Ascending is not supported")
|
809 |
+
return g.op("TopK", input, k, axis_i=dim, outputs=2)
|
810 |
+
else:
|
811 |
+
return g.op(
|
812 |
+
"TopK", input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2
|
813 |
+
)
|
814 |
+
|
815 |
+
|
816 |
+
@_beartype.beartype
|
817 |
+
def _lt_helper(g: jit_utils.GraphContext, input, other):
|
818 |
+
if g.opset <= 8:
|
819 |
+
from torch.onnx.symbolic_opset8 import lt as _lt8
|
820 |
+
|
821 |
+
return _lt8(g, input, other)
|
822 |
+
else:
|
823 |
+
from torch.onnx.symbolic_opset9 import lt as _lt9
|
824 |
+
|
825 |
+
return _lt9(g, input, other)
|
826 |
+
|
827 |
+
|
828 |
+
@_beartype.beartype
|
829 |
+
def _interpolate_warning(interpolate_mode):
|
830 |
+
onnx_op = (
|
831 |
+
"onnx:Resize" if GLOBALS.export_onnx_opset_version >= 10 else "onnx:Upsample"
|
832 |
+
)
|
833 |
+
warnings.warn(
|
834 |
+
"You are trying to export the model with "
|
835 |
+
+ onnx_op
|
836 |
+
+ " for ONNX opset version "
|
837 |
+
"" + str(GLOBALS.export_onnx_opset_version) + ". "
|
838 |
+
"This operator might cause results to not match the expected results by PyTorch.\n"
|
839 |
+
"ONNX's Upsample/Resize operator did not match Pytorch's Interpolation until opset 11. "
|
840 |
+
"Attributes to determine how to transform the input were added in onnx:Resize in opset 11 "
|
841 |
+
"to support Pytorch's behavior (like coordinate_transformation_mode and nearest_mode).\n"
|
842 |
+
"We recommend using opset 11 and above for models using this operator."
|
843 |
+
)
|
844 |
+
|
845 |
+
|
846 |
+
@_beartype.beartype
|
847 |
+
def _unsqueeze_helper(g: jit_utils.GraphContext, input, axes_i):
|
848 |
+
if _is_constant(axes_i[0]):
|
849 |
+
if g.opset >= 13:
|
850 |
+
axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long))
|
851 |
+
return g.op("Unsqueeze", input, axes)
|
852 |
+
return g.op("Unsqueeze", input, axes_i=axes_i)
|
853 |
+
# Tensor type
|
854 |
+
if g.opset < 13:
|
855 |
+
raise errors.SymbolicValueError(
|
856 |
+
"Opset version must be >= 13 for Unsqueeze with dynamic axes.", input
|
857 |
+
)
|
858 |
+
return g.op("Unsqueeze", input, axes_i[0])
|
859 |
+
|
860 |
+
|
861 |
+
@_beartype.beartype
|
862 |
+
def _squeeze_helper(g: jit_utils.GraphContext, input, axes_i):
|
863 |
+
if _is_constant(axes_i[0]):
|
864 |
+
if g.opset >= 13:
|
865 |
+
axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long))
|
866 |
+
return g.op("Squeeze", input, axes)
|
867 |
+
return g.op("Squeeze", input, axes_i=axes_i)
|
868 |
+
# Tensor type
|
869 |
+
if g.opset < 13:
|
870 |
+
raise errors.SymbolicValueError(
|
871 |
+
"Opset version must be >= 13 for Squeeze with dynamic axes.", input
|
872 |
+
)
|
873 |
+
axes_t = axes_i[0]
|
874 |
+
axes_rank = _get_tensor_rank(axes_t)
|
875 |
+
assert axes_rank is not None
|
876 |
+
if axes_rank > 1:
|
877 |
+
raise errors.SymbolicValueError(
|
878 |
+
"For Squeeze axses as input, the axes rank must be one in ONNX spec.", input
|
879 |
+
)
|
880 |
+
elif axes_rank == 0:
|
881 |
+
# The axes is a scalar. Unsqueeze it to a rank 1 tensor.
|
882 |
+
axes_t = _unsqueeze_helper(g, axes_t, [0])
|
883 |
+
return g.op("Squeeze", input, axes_t)
|
884 |
+
return g.op("Squeeze", input, axes_t)
|
885 |
+
|
886 |
+
|
887 |
+
@_beartype.beartype
|
888 |
+
def _reducesum_helper(
|
889 |
+
g: jit_utils.GraphContext,
|
890 |
+
input,
|
891 |
+
axes_i=None,
|
892 |
+
keepdims_i=1,
|
893 |
+
noop_with_empty_axes_i=0,
|
894 |
+
):
|
895 |
+
keepdims_i = _maybe_get_const(keepdims_i, "i")
|
896 |
+
if g.opset >= 13:
|
897 |
+
if axes_i:
|
898 |
+
if not _is_value(axes_i):
|
899 |
+
axes_i = g.op(
|
900 |
+
"Constant", value_t=torch.tensor(axes_i, dtype=torch.long)
|
901 |
+
)
|
902 |
+
return g.op(
|
903 |
+
"ReduceSum",
|
904 |
+
input,
|
905 |
+
axes_i,
|
906 |
+
keepdims_i=keepdims_i,
|
907 |
+
noop_with_empty_axes_i=noop_with_empty_axes_i,
|
908 |
+
)
|
909 |
+
return g.op(
|
910 |
+
"ReduceSum",
|
911 |
+
input,
|
912 |
+
keepdims_i=keepdims_i,
|
913 |
+
noop_with_empty_axes_i=noop_with_empty_axes_i,
|
914 |
+
)
|
915 |
+
else:
|
916 |
+
return g.op("ReduceSum", input, axes_i=axes_i, keepdims_i=keepdims_i)
|
917 |
+
|
918 |
+
|
919 |
+
@_beartype.beartype
|
920 |
+
def _interpolate_size_to_scales(g: jit_utils.GraphContext, input, output_size, dim):
|
921 |
+
output_size = _maybe_get_const(output_size, "is")
|
922 |
+
if _is_value(output_size):
|
923 |
+
offset = 2
|
924 |
+
offsets = g.op("Constant", value_t=torch.ones(offset, dtype=torch.float32))
|
925 |
+
dividend = g.op("Cast", output_size, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
926 |
+
divisor = _slice_helper(
|
927 |
+
g, g.op("Shape", input), axes=[0], ends=[sys.maxsize], starts=[offset]
|
928 |
+
)
|
929 |
+
divisor = g.op("Cast", divisor, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
930 |
+
scale_dims = g.op("Div", dividend, divisor)
|
931 |
+
scales = g.op("Concat", offsets, scale_dims, axis_i=0)
|
932 |
+
else:
|
933 |
+
scales_constant = [
|
934 |
+
1.0
|
935 |
+
if i < 2
|
936 |
+
else float(output_size[-(dim - i)])
|
937 |
+
/ float(input.type().sizes()[-(dim - i)])
|
938 |
+
for i in range(0, dim)
|
939 |
+
]
|
940 |
+
scales = g.op(
|
941 |
+
"Constant", value_t=torch.tensor(scales_constant, dtype=torch.float32)
|
942 |
+
)
|
943 |
+
return scales
|
944 |
+
|
945 |
+
|
946 |
+
@_beartype.beartype
|
947 |
+
def _interpolate_get_scales_if_available(g: jit_utils.GraphContext, scales):
|
948 |
+
available_scales = _maybe_get_const(scales[0], "fs") != -1 and not _is_none(
|
949 |
+
scales[0]
|
950 |
+
)
|
951 |
+
|
952 |
+
if not available_scales:
|
953 |
+
return None
|
954 |
+
|
955 |
+
offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32))
|
956 |
+
scales_list = g.op(
|
957 |
+
"Constant", value_t=torch.tensor(_maybe_get_const(scales[0], "fs"))
|
958 |
+
)
|
959 |
+
scales = g.op("Concat", offsets, scales_list, axis_i=0)
|
960 |
+
return scales
|
961 |
+
|
962 |
+
|
963 |
+
@_beartype.beartype
|
964 |
+
def _get_interpolate_attributes(g: jit_utils.GraphContext, mode, args):
|
965 |
+
if mode == "nearest":
|
966 |
+
align_corners = None
|
967 |
+
scales = args[0:]
|
968 |
+
else:
|
969 |
+
align_corners = args[0]
|
970 |
+
scales = args[1:]
|
971 |
+
scales = _interpolate_get_scales_if_available(g, scales)
|
972 |
+
return scales, align_corners
|
973 |
+
|
974 |
+
|
975 |
+
@_beartype.beartype
|
976 |
+
def _interpolate_get_scales(g: jit_utils.GraphContext, scale_factor, dim):
|
977 |
+
offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32))
|
978 |
+
scale_factor_rank = _get_tensor_rank(scale_factor)
|
979 |
+
if isinstance(scale_factor.type(), _C.ListType) or (
|
980 |
+
scale_factor_rank is not None and scale_factor_rank > 0
|
981 |
+
):
|
982 |
+
return g.op("Concat", offsets, scale_factor, axis_i=0)
|
983 |
+
else:
|
984 |
+
scale_factor = _unsqueeze_helper(g, scale_factor, [0])
|
985 |
+
scale_factor = g.op(
|
986 |
+
"Cast", scale_factor, to_i=_C_onnx.TensorProtoDataType.FLOAT
|
987 |
+
)
|
988 |
+
scales = [scale_factor for i in range(dim - 2)]
|
989 |
+
scale_factor = g.op("Concat", offsets, *scales, axis_i=0)
|
990 |
+
return scale_factor
|
991 |
+
|
992 |
+
|
993 |
+
@_beartype.beartype
|
994 |
+
def _interpolate_get_scales_and_mode(
|
995 |
+
g: jit_utils.GraphContext, input, size, scale_factor, mode, align_corners
|
996 |
+
):
|
997 |
+
mode = _maybe_get_const(mode, "s")
|
998 |
+
if "linear" in mode:
|
999 |
+
mode = "linear"
|
1000 |
+
if "cubic" in mode:
|
1001 |
+
mode = "cubic"
|
1002 |
+
_interpolate_warning(mode)
|
1003 |
+
|
1004 |
+
align_corners = _maybe_get_const(align_corners, "b")
|
1005 |
+
if isinstance(align_corners, bool) and align_corners:
|
1006 |
+
return _unimplemented("interpolate", "align_corners == True")
|
1007 |
+
|
1008 |
+
if not input.type().dim():
|
1009 |
+
return _unimplemented("interpolate", "missing input shape")
|
1010 |
+
dim = input.type().dim()
|
1011 |
+
|
1012 |
+
if not _is_none(scale_factor):
|
1013 |
+
scale_factor = _interpolate_get_scales(g, scale_factor, dim)
|
1014 |
+
elif not _is_none(size):
|
1015 |
+
if not _is_packed_list(size):
|
1016 |
+
is_scalar = _maybe_get_const(size, "t").dim() == 0
|
1017 |
+
if is_scalar:
|
1018 |
+
size = _unsqueeze_helper(g, size, [0])
|
1019 |
+
size = [size for i in range(dim - 2)]
|
1020 |
+
size = g.op("Concat", *size, axis_i=0)
|
1021 |
+
scale_factor = _interpolate_size_to_scales(g, input, size, dim)
|
1022 |
+
else:
|
1023 |
+
return _unimplemented(
|
1024 |
+
"interpolate", "Both size and scales are None in __interpolate"
|
1025 |
+
)
|
1026 |
+
return scale_factor, mode
|
1027 |
+
|
1028 |
+
|
1029 |
+
@_beartype.beartype
|
1030 |
+
def _argmin_argmax_helper(
|
1031 |
+
g: jit_utils.GraphContext,
|
1032 |
+
input: torch._C.Value,
|
1033 |
+
dim: torch._C.Value,
|
1034 |
+
keepdim: bool,
|
1035 |
+
op_name: str,
|
1036 |
+
):
|
1037 |
+
def op_wrapper(input, axis_i, keepdims_i):
|
1038 |
+
if g.opset >= 12:
|
1039 |
+
return g.op(
|
1040 |
+
op_name,
|
1041 |
+
input,
|
1042 |
+
axis_i=axis_i,
|
1043 |
+
keepdims_i=keepdims_i,
|
1044 |
+
select_last_index_i=False,
|
1045 |
+
)
|
1046 |
+
return g.op(op_name, input, axis_i=axis_i, keepdims_i=keepdims_i)
|
1047 |
+
|
1048 |
+
if _is_none(dim):
|
1049 |
+
flattened = _reshape_helper(
|
1050 |
+
g, input, g.op("Constant", value_t=torch.tensor([-1]))
|
1051 |
+
)
|
1052 |
+
output = op_wrapper(flattened, axis_i=0, keepdims_i=False)
|
1053 |
+
if keepdim:
|
1054 |
+
input_shape = g.op("Shape", input)
|
1055 |
+
input_shape_shape = g.op("Shape", input_shape)
|
1056 |
+
new_shape = g.op(
|
1057 |
+
"ConstantOfShape",
|
1058 |
+
input_shape_shape,
|
1059 |
+
value_t=torch.tensor([1], dtype=torch.int64),
|
1060 |
+
)
|
1061 |
+
output = g.op("Reshape", output, new_shape)
|
1062 |
+
return output
|
1063 |
+
|
1064 |
+
dim = _parse_arg(dim, "i")
|
1065 |
+
return op_wrapper(input, axis_i=dim, keepdims_i=keepdim)
|
1066 |
+
|
1067 |
+
|
1068 |
+
@_beartype.beartype
|
1069 |
+
def _interpolate_helper(name, dim, interpolate_mode):
|
1070 |
+
@quantized_args(True, False, False)
|
1071 |
+
def symbolic_fn(g, input, output_size, *args):
|
1072 |
+
scales, align_corners = _get_interpolate_attributes(g, interpolate_mode, args)
|
1073 |
+
align_corners = _maybe_get_scalar(align_corners)
|
1074 |
+
coordinate_transformation_mode = (
|
1075 |
+
"asymmetric"
|
1076 |
+
if interpolate_mode == "nearest"
|
1077 |
+
else "align_corners"
|
1078 |
+
if align_corners
|
1079 |
+
else "half_pixel"
|
1080 |
+
)
|
1081 |
+
|
1082 |
+
if scales is None:
|
1083 |
+
input_size = g.op("Shape", input)
|
1084 |
+
input_size_beg = _slice_helper(
|
1085 |
+
g, input_size, axes=[0], ends=[2], starts=[0]
|
1086 |
+
)
|
1087 |
+
output_size = g.op(
|
1088 |
+
"Cast", output_size, to_i=_C_onnx.TensorProtoDataType.INT64
|
1089 |
+
)
|
1090 |
+
output_size = g.op("Concat", input_size_beg, output_size, axis_i=0)
|
1091 |
+
|
1092 |
+
if g.opset >= 13:
|
1093 |
+
empty_roi = _optional_input_placeholder_tensor(g)
|
1094 |
+
empty_scales = _optional_input_placeholder_tensor(g)
|
1095 |
+
else:
|
1096 |
+
empty_roi = g.op(
|
1097 |
+
"Constant", value_t=torch.tensor([], dtype=torch.float32)
|
1098 |
+
)
|
1099 |
+
empty_scales = g.op(
|
1100 |
+
"Constant", value_t=torch.tensor([], dtype=torch.float32)
|
1101 |
+
)
|
1102 |
+
|
1103 |
+
return g.op(
|
1104 |
+
"Resize",
|
1105 |
+
input,
|
1106 |
+
empty_roi,
|
1107 |
+
empty_scales,
|
1108 |
+
output_size,
|
1109 |
+
coordinate_transformation_mode_s=coordinate_transformation_mode,
|
1110 |
+
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
|
1111 |
+
mode_s=interpolate_mode, # nearest, linear, or cubic
|
1112 |
+
nearest_mode_s="floor",
|
1113 |
+
) # only valid when mode="nearest"
|
1114 |
+
else:
|
1115 |
+
if g.opset >= 13:
|
1116 |
+
empty_roi = _optional_input_placeholder_tensor(g)
|
1117 |
+
else:
|
1118 |
+
empty_roi = g.op(
|
1119 |
+
"Constant", value_t=torch.tensor([], dtype=torch.float32)
|
1120 |
+
)
|
1121 |
+
|
1122 |
+
return g.op(
|
1123 |
+
"Resize",
|
1124 |
+
input,
|
1125 |
+
empty_roi,
|
1126 |
+
scales,
|
1127 |
+
coordinate_transformation_mode_s=coordinate_transformation_mode,
|
1128 |
+
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
|
1129 |
+
mode_s=interpolate_mode, # nearest, linear, or cubic
|
1130 |
+
nearest_mode_s="floor",
|
1131 |
+
) # only valid when mode="nearest"
|
1132 |
+
|
1133 |
+
return symbolic_fn
|
1134 |
+
|
1135 |
+
|
1136 |
+
@_beartype.beartype
|
1137 |
+
def __interpolate_helper(
|
1138 |
+
g: jit_utils.GraphContext,
|
1139 |
+
input,
|
1140 |
+
size,
|
1141 |
+
scale_factor,
|
1142 |
+
mode,
|
1143 |
+
align_corners,
|
1144 |
+
recompute_scale_factor,
|
1145 |
+
):
|
1146 |
+
mode = _maybe_get_const(mode, "s")
|
1147 |
+
if "linear" in mode:
|
1148 |
+
mode = "linear"
|
1149 |
+
if "cubic" in mode:
|
1150 |
+
mode = "cubic"
|
1151 |
+
align_corners = _maybe_get_const(align_corners, "b")
|
1152 |
+
align_corners = False if not isinstance(align_corners, bool) else align_corners
|
1153 |
+
coordinate_transformation_mode = (
|
1154 |
+
"asymmetric"
|
1155 |
+
if mode == "nearest"
|
1156 |
+
else "align_corners"
|
1157 |
+
if align_corners
|
1158 |
+
else "half_pixel"
|
1159 |
+
)
|
1160 |
+
|
1161 |
+
if not _is_none(size):
|
1162 |
+
input_size = g.op("Shape", input)
|
1163 |
+
input_size = _slice_helper(g, input_size, axes=[0], ends=[2], starts=[0])
|
1164 |
+
# in some cases size is not a packed list but size is a scalar
|
1165 |
+
# We need to also verify that (_maybe_get_const(size, "t").dim() == 0)
|
1166 |
+
# but this information is not always available. Try to get the dim,
|
1167 |
+
# and if not assume that it is not a scalar.
|
1168 |
+
try:
|
1169 |
+
is_scalar = not _is_packed_list(size) and (
|
1170 |
+
_maybe_get_const(size, "t").dim() == 0
|
1171 |
+
)
|
1172 |
+
except AttributeError:
|
1173 |
+
is_scalar = not _is_packed_list(size)
|
1174 |
+
if not is_scalar:
|
1175 |
+
warnings.warn(
|
1176 |
+
"Cannot verify if the output_size is a scalar "
|
1177 |
+
"while exporting interpolate. Assuming that it is not a scalar."
|
1178 |
+
)
|
1179 |
+
|
1180 |
+
if is_scalar:
|
1181 |
+
rank = _get_tensor_rank(input)
|
1182 |
+
if rank is None:
|
1183 |
+
return _unimplemented(
|
1184 |
+
"interpolate (with a scalar output_size)",
|
1185 |
+
"missing input shape (try giving an array of output_size values)",
|
1186 |
+
)
|
1187 |
+
size = _unsqueeze_helper(g, size, [0])
|
1188 |
+
size = [size for i in range(rank - 2)]
|
1189 |
+
size = g.op("Concat", *size, axis_i=0)
|
1190 |
+
size = g.op("Cast", size, to_i=_C_onnx.TensorProtoDataType.INT64)
|
1191 |
+
size = g.op("Concat", input_size, size, axis_i=0)
|
1192 |
+
|
1193 |
+
if g.opset >= 13:
|
1194 |
+
empty_roi = _optional_input_placeholder_tensor(g)
|
1195 |
+
empty_scales = _optional_input_placeholder_tensor(g)
|
1196 |
+
else:
|
1197 |
+
empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
|
1198 |
+
empty_scales = g.op(
|
1199 |
+
"Constant", value_t=torch.tensor([], dtype=torch.float32)
|
1200 |
+
)
|
1201 |
+
|
1202 |
+
return g.op(
|
1203 |
+
"Resize",
|
1204 |
+
input,
|
1205 |
+
empty_roi,
|
1206 |
+
empty_scales,
|
1207 |
+
size,
|
1208 |
+
coordinate_transformation_mode_s=coordinate_transformation_mode,
|
1209 |
+
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
|
1210 |
+
mode_s=mode, # nearest, linear, or cubic
|
1211 |
+
nearest_mode_s="floor",
|
1212 |
+
)
|
1213 |
+
else: # if not _is_none(scales)
|
1214 |
+
rank = _get_tensor_rank(input)
|
1215 |
+
if rank is None:
|
1216 |
+
return _unimplemented("interpolate (with scales)", "missing input shape")
|
1217 |
+
|
1218 |
+
if g.opset >= 13:
|
1219 |
+
empty_roi = _optional_input_placeholder_tensor(g)
|
1220 |
+
else:
|
1221 |
+
empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
|
1222 |
+
|
1223 |
+
scales = _interpolate_get_scales(g, scale_factor, rank)
|
1224 |
+
return g.op(
|
1225 |
+
"Resize",
|
1226 |
+
input,
|
1227 |
+
empty_roi,
|
1228 |
+
scales,
|
1229 |
+
coordinate_transformation_mode_s=coordinate_transformation_mode,
|
1230 |
+
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
|
1231 |
+
mode_s=mode, # nearest, linear, or cubic
|
1232 |
+
nearest_mode_s="floor",
|
1233 |
+
) # only valid when mode="nearest"
|
1234 |
+
|
1235 |
+
|
1236 |
+
@_beartype.beartype
|
1237 |
+
def _unbind_helper(g: jit_utils.GraphContext, self, dim, _outputs):
|
1238 |
+
if g.opset < 11:
|
1239 |
+
from torch.onnx.symbolic_opset9 import unbind
|
1240 |
+
elif g.opset <= 12:
|
1241 |
+
from torch.onnx.symbolic_opset11 import unbind # type: ignore[no-redef]
|
1242 |
+
else:
|
1243 |
+
from torch.onnx.symbolic_opset13 import unbind # type: ignore[no-redef]
|
1244 |
+
return unbind(g, self, dim, _outputs)
|
1245 |
+
|
1246 |
+
|
1247 |
+
@_beartype.beartype
|
1248 |
+
def _scatter_helper(g: jit_utils.GraphContext, self, dim, index, src):
|
1249 |
+
if g.opset <= 10:
|
1250 |
+
from torch.onnx.symbolic_opset9 import scatter
|
1251 |
+
else:
|
1252 |
+
# for mypy, scatter was imported two lines above
|
1253 |
+
from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef]
|
1254 |
+
return scatter(g, self, dim, index, src)
|
1255 |
+
|
1256 |
+
|
1257 |
+
@_beartype.beartype
|
1258 |
+
def _repeat_interleave_split_helper(g: jit_utils.GraphContext, self, reps, dim):
|
1259 |
+
if g.opset <= 12:
|
1260 |
+
split_out = g.op("Split", self, split_i=[1] * reps, axis_i=dim, outputs=reps)
|
1261 |
+
else:
|
1262 |
+
from torch.onnx.symbolic_opset13 import split
|
1263 |
+
|
1264 |
+
repeats = g.op("Constant", value_t=torch.tensor([1] * reps))
|
1265 |
+
split_out = split(g, self, repeats, dim, _outputs=reps)
|
1266 |
+
return split_out if reps > 1 else [split_out]
|
1267 |
+
|
1268 |
+
|
1269 |
+
@_beartype.beartype
|
1270 |
+
def _repeat_interleave_single_value_repeat_helper(
|
1271 |
+
g: jit_utils.GraphContext, self, repeats, dim
|
1272 |
+
):
|
1273 |
+
from torch.onnx.symbolic_opset9 import flatten, unsqueeze
|
1274 |
+
|
1275 |
+
if not _is_tensor(repeats):
|
1276 |
+
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
|
1277 |
+
|
1278 |
+
const_repeats: bool = _is_constant(repeats)
|
1279 |
+
reps = _maybe_get_const(repeats, "t")
|
1280 |
+
|
1281 |
+
# Convert 'repeats' to 1-d if it is 0-d.
|
1282 |
+
if _get_tensor_rank(repeats) == 0:
|
1283 |
+
repeats = g.op("Reshape", repeats, g.op("Constant", value_t=torch.tensor([1])))
|
1284 |
+
|
1285 |
+
# Create a new dim of size 1, then expand it to be 'repeats' long, and finally collapse it.
|
1286 |
+
unsqueezed = unsqueeze(g, self, dim + 1)
|
1287 |
+
|
1288 |
+
# repeats_per_dim is 1 for all dims except for the new unsqueezed dim, where it has value 'repeats'.
|
1289 |
+
if const_repeats:
|
1290 |
+
# 'Repeats' is a constant, 'repeats_per_dim' can be a constant.
|
1291 |
+
onehot = torch.ones(_get_tensor_rank(unsqueezed), dtype=torch.int64)
|
1292 |
+
onehot[dim + 1] = reps
|
1293 |
+
repeats_per_dim = g.op("Constant", value_t=onehot)
|
1294 |
+
else:
|
1295 |
+
# 'Repeats' is a variable, 'repeats_per_dim' cannot be a constant.
|
1296 |
+
onehot = g.op(
|
1297 |
+
"OneHot",
|
1298 |
+
unsqueeze(g, dim + 1, 0), # indices, must be >= 1-dimensional
|
1299 |
+
g.op(
|
1300 |
+
"Constant", value_t=torch.tensor(_get_tensor_rank(unsqueezed))
|
1301 |
+
), # depth
|
1302 |
+
g.op(
|
1303 |
+
"Concat", g.op("Constant", value_t=torch.tensor([1])), repeats, axis_i=0
|
1304 |
+
), # on/off values
|
1305 |
+
)
|
1306 |
+
repeats_per_dim = flatten(g, onehot, 0, 1)
|
1307 |
+
|
1308 |
+
tiled = g.op("Tile", unsqueezed, repeats_per_dim)
|
1309 |
+
return flatten(g, tiled, dim, dim + 1)
|
1310 |
+
|
1311 |
+
|
1312 |
+
@_beartype.beartype
|
1313 |
+
def _arange_cast_helper(
|
1314 |
+
g: jit_utils.GraphContext, end, start=None, step=None, dtype=None
|
1315 |
+
) -> Tuple[
|
1316 |
+
_type_utils.JitScalarType,
|
1317 |
+
Optional[_C.Value],
|
1318 |
+
Optional[_C.Value],
|
1319 |
+
Optional[_C.Value],
|
1320 |
+
]:
|
1321 |
+
def _is_all_integral(scalars):
|
1322 |
+
for scalar in scalars:
|
1323 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
1324 |
+
scalar, _type_utils.JitScalarType.UNDEFINED
|
1325 |
+
)
|
1326 |
+
if (
|
1327 |
+
scalar_type != _type_utils.JitScalarType.INT64
|
1328 |
+
and scalar_type != _type_utils.JitScalarType.UNDEFINED
|
1329 |
+
):
|
1330 |
+
return False
|
1331 |
+
return True
|
1332 |
+
|
1333 |
+
# This logic is based on torch.arange docs. If "dtype" is provided,
|
1334 |
+
# infer input types from dtype. If not, then check if any of start, stop,
|
1335 |
+
# or step are floating point, and infer the type from get_default.
|
1336 |
+
# Otherwise, the dtype is inferred to be torch.int64.
|
1337 |
+
if dtype is None or (_is_value(dtype) and _is_none(dtype)):
|
1338 |
+
if _is_all_integral([start, end, step]):
|
1339 |
+
scalar_type = _type_utils.JitScalarType.INT64
|
1340 |
+
else:
|
1341 |
+
scalar_type = _type_utils.JitScalarType.from_dtype(
|
1342 |
+
torch.get_default_dtype()
|
1343 |
+
)
|
1344 |
+
else:
|
1345 |
+
assert isinstance(dtype, int)
|
1346 |
+
# TODO(justinchuby): Check if dtype is indeed a int.
|
1347 |
+
scalar_type = _type_utils.JitScalarType(dtype)
|
1348 |
+
|
1349 |
+
start = g.op("Cast", start, to_i=scalar_type.onnx_type()) if start else None
|
1350 |
+
end = g.op("Cast", end, to_i=scalar_type.onnx_type()) if end else None
|
1351 |
+
step = g.op("Cast", step, to_i=scalar_type.onnx_type()) if step else None
|
1352 |
+
return scalar_type, end, start, step
|
1353 |
+
|
1354 |
+
|
1355 |
+
@_beartype.beartype
|
1356 |
+
def _arange_helper(g: jit_utils.GraphContext, *args):
|
1357 |
+
if g.opset <= 10:
|
1358 |
+
from torch.onnx.symbolic_opset9 import arange
|
1359 |
+
else:
|
1360 |
+
from torch.onnx.symbolic_opset11 import arange # type: ignore[no-redef]
|
1361 |
+
return arange(g, *args)
|
1362 |
+
|
1363 |
+
|
1364 |
+
@_beartype.beartype
|
1365 |
+
def _size_helper(g: jit_utils.GraphContext, self, dim):
|
1366 |
+
full_shape = g.op("Shape", self)
|
1367 |
+
from torch.onnx.symbolic_opset9 import select
|
1368 |
+
|
1369 |
+
return select(g, full_shape, g.op("Constant", value_t=torch.tensor([0])), dim)
|
1370 |
+
|
1371 |
+
|
1372 |
+
@_beartype.beartype
|
1373 |
+
def _index_fill_reshape_helper(g: jit_utils.GraphContext, self, dim, index):
|
1374 |
+
# 1. reshape index => [1, ..., 1, dim, 1, ..., 1]
|
1375 |
+
# 2. expand index => [..., dim, ...], same shape as self except for dim.
|
1376 |
+
# 3. expand value as well.
|
1377 |
+
# 4. apply onnx::scatter.
|
1378 |
+
|
1379 |
+
from torch.onnx.symbolic_opset9 import expand
|
1380 |
+
|
1381 |
+
if g.opset <= 10:
|
1382 |
+
from torch.onnx.symbolic_opset9 import scatter
|
1383 |
+
else:
|
1384 |
+
# for mypy, scatter was imported two lines above
|
1385 |
+
from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef]
|
1386 |
+
|
1387 |
+
if self.type().dim() is None:
|
1388 |
+
return _unimplemented("index_fill", "input rank not accessible")
|
1389 |
+
self_dim = self.type().dim()
|
1390 |
+
dim_value = _parse_arg(dim, "i")
|
1391 |
+
if dim_value < 0:
|
1392 |
+
dim_value += self_dim
|
1393 |
+
unsqueezed_index = _unsqueeze_helper(
|
1394 |
+
g, index, [i for i in range(self_dim) if i != dim_value]
|
1395 |
+
)
|
1396 |
+
expanded_index_shape = scatter(
|
1397 |
+
g, g.op("Shape", self), 0, _unsqueeze_helper(g, dim, [0]), g.op("Shape", index)
|
1398 |
+
)
|
1399 |
+
expanded_index = expand(g, unsqueezed_index, expanded_index_shape, None)
|
1400 |
+
return expanded_index_shape, expanded_index
|
1401 |
+
|
1402 |
+
|
1403 |
+
# By default, when any value in the 'shape' input is equal to zero
|
1404 |
+
# the corresponding dimension value is copied from the input tensor dynamically.
|
1405 |
+
# allowzero=1 indicates that if any value in the 'shape' input is set to zero,
|
1406 |
+
# the zero value is honored, similar to NumPy.
|
1407 |
+
# allowzero=1 is only supported for opset version >= 14.
|
1408 |
+
@_beartype.beartype
|
1409 |
+
def _reshape_helper(g: jit_utils.GraphContext, input, shape, allowzero=0):
|
1410 |
+
shape = _maybe_get_const(shape, "is")
|
1411 |
+
if not _is_value(shape):
|
1412 |
+
shape = g.op("Constant", value_t=torch.LongTensor(shape))
|
1413 |
+
if g.opset <= 13:
|
1414 |
+
if allowzero == 1:
|
1415 |
+
_onnx_opset_unsupported(
|
1416 |
+
"Reshape with allowzero=1", GLOBALS.export_onnx_opset_version, 14, input
|
1417 |
+
)
|
1418 |
+
return g.op("Reshape", input, shape)
|
1419 |
+
else:
|
1420 |
+
return g.op("Reshape", input, shape, allowzero_i=allowzero)
|
1421 |
+
|
1422 |
+
|
1423 |
+
@_beartype.beartype
|
1424 |
+
def _batchnorm_helper(
|
1425 |
+
g: jit_utils.GraphContext, input, weight, bias, running_mean, running_var
|
1426 |
+
):
|
1427 |
+
from torch.onnx.symbolic_opset9 import _var_mean
|
1428 |
+
|
1429 |
+
batch_size = _get_tensor_dim_size(input, 0)
|
1430 |
+
channel_size = _get_tensor_dim_size(input, 1)
|
1431 |
+
|
1432 |
+
if weight is None or _is_none(weight):
|
1433 |
+
if channel_size is None:
|
1434 |
+
raise errors.SymbolicValueError(
|
1435 |
+
"Unsupported: ONNX export of batch_norm for unknown channel size.",
|
1436 |
+
input,
|
1437 |
+
)
|
1438 |
+
weight_value = torch.tensor(
|
1439 |
+
[1.0] * channel_size,
|
1440 |
+
dtype=_type_utils.JitScalarType.from_value(input).dtype(),
|
1441 |
+
)
|
1442 |
+
weight = g.op("Constant", value_t=weight_value)
|
1443 |
+
if bias is None or _is_none(bias):
|
1444 |
+
if channel_size is None:
|
1445 |
+
raise errors.SymbolicValueError(
|
1446 |
+
"Unsupported: ONNX export of batch_norm for unknown channel size.",
|
1447 |
+
input,
|
1448 |
+
)
|
1449 |
+
bias_value = torch.tensor(
|
1450 |
+
[0.0] * channel_size,
|
1451 |
+
dtype=_type_utils.JitScalarType.from_value(input).dtype(),
|
1452 |
+
)
|
1453 |
+
bias = g.op("Constant", value_t=bias_value)
|
1454 |
+
# If track_running_stats is set to False batch statistics are instead used during evaluation time
|
1455 |
+
if (
|
1456 |
+
running_mean is None
|
1457 |
+
or _is_none(running_mean)
|
1458 |
+
or running_var is None
|
1459 |
+
or _is_none(running_var)
|
1460 |
+
):
|
1461 |
+
assert batch_size is not None and channel_size is not None
|
1462 |
+
reshape_in = _reshape_helper(
|
1463 |
+
g,
|
1464 |
+
input,
|
1465 |
+
g.op(
|
1466 |
+
"Constant",
|
1467 |
+
value_t=torch.tensor([batch_size, channel_size, -1], dtype=torch.int64),
|
1468 |
+
),
|
1469 |
+
)
|
1470 |
+
trans_in = g.op("Transpose", reshape_in, perm_i=[0, 2, 1])
|
1471 |
+
running_var, running_mean = _var_mean(
|
1472 |
+
g,
|
1473 |
+
trans_in,
|
1474 |
+
g.op("Constant", value_t=torch.tensor([0, 1], dtype=torch.int64)),
|
1475 |
+
False,
|
1476 |
+
False,
|
1477 |
+
)
|
1478 |
+
return weight, bias, running_mean, running_var
|
1479 |
+
|
1480 |
+
|
1481 |
+
@_beartype.beartype
|
1482 |
+
def _avgpool_helper(
|
1483 |
+
tuple_fn: Callable[[Any], Sequence[int]],
|
1484 |
+
padding: Union[int, Sequence[int]],
|
1485 |
+
kernel_size,
|
1486 |
+
stride,
|
1487 |
+
divisor_override,
|
1488 |
+
name,
|
1489 |
+
) -> Tuple[int, ...]:
|
1490 |
+
if divisor_override and divisor_override.node().kind() != "prim::Constant":
|
1491 |
+
_unimplemented(name, "divisor_override")
|
1492 |
+
return tuple(tuple_fn(padding))
|
1493 |
+
|
1494 |
+
|
1495 |
+
@_beartype.beartype
|
1496 |
+
def check_training_mode(op_train_mode: int, op_name: str) -> None:
|
1497 |
+
"""Warns the user if the model's training mode and the export mode do not agree."""
|
1498 |
+
if GLOBALS.training_mode == _C_onnx.TrainingMode.PRESERVE:
|
1499 |
+
return
|
1500 |
+
|
1501 |
+
if op_train_mode:
|
1502 |
+
op_mode_enum = _C_onnx.TrainingMode.TRAINING
|
1503 |
+
else:
|
1504 |
+
op_mode_enum = _C_onnx.TrainingMode.EVAL
|
1505 |
+
if op_mode_enum == GLOBALS.training_mode:
|
1506 |
+
# The modes agree. Do nothing
|
1507 |
+
return
|
1508 |
+
|
1509 |
+
op_mode_text = f"train={bool(op_train_mode)}"
|
1510 |
+
# Setting the model mode could result in op_mode != GLOBALS.training_mode
|
1511 |
+
# if the model is a FuncModule. In this case we warn the user of
|
1512 |
+
# the state and export depending on op_mode
|
1513 |
+
# This is to support use-cases of fixing certain layer weights
|
1514 |
+
# in training.
|
1515 |
+
warnings.warn(
|
1516 |
+
f"ONNX export mode is set to {GLOBALS.training_mode}, but operator '{op_name}' "
|
1517 |
+
f"is set to {op_mode_text}. Exporting with {op_mode_text}."
|
1518 |
+
)
|
1519 |
+
|
1520 |
+
|
1521 |
+
@_beartype.beartype
|
1522 |
+
def _flatten_helper(g: jit_utils.GraphContext, input, start_dim, end_dim, dim):
|
1523 |
+
input_size = g.op("Shape", input)
|
1524 |
+
slice1 = _slice_helper(g, input_size, axes=[0], starts=[0], ends=[start_dim])
|
1525 |
+
slices = [slice1, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long))]
|
1526 |
+
if end_dim < dim - 1:
|
1527 |
+
slice3 = _slice_helper(
|
1528 |
+
g, input_size, axes=[0], starts=[end_dim + 1], ends=[dim]
|
1529 |
+
)
|
1530 |
+
slices = [
|
1531 |
+
slice1,
|
1532 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
|
1533 |
+
slice3,
|
1534 |
+
]
|
1535 |
+
|
1536 |
+
final_shape = g.op("Concat", *slices, axis_i=0)
|
1537 |
+
from torch.onnx.symbolic_opset9 import _reshape_from_tensor
|
1538 |
+
|
1539 |
+
return _reshape_from_tensor(g, input, final_shape)
|
1540 |
+
|
1541 |
+
|
1542 |
+
@_beartype.beartype
|
1543 |
+
def _is_split_static(split_size_or_sizes, _outputs):
|
1544 |
+
if _outputs is None:
|
1545 |
+
return False
|
1546 |
+
if (
|
1547 |
+
_is_value(split_size_or_sizes)
|
1548 |
+
and split_size_or_sizes.node().kind() != "onnx::Constant"
|
1549 |
+
):
|
1550 |
+
return False
|
1551 |
+
return True
|
1552 |
+
|
1553 |
+
|
1554 |
+
@_beartype.beartype
|
1555 |
+
def _optional_input_placeholder_tensor(g):
|
1556 |
+
n = g.op("prim::Constant")
|
1557 |
+
n.setType(_C.OptionalType.ofTensor())
|
1558 |
+
return n
|
1559 |
+
|
1560 |
+
|
1561 |
+
@_beartype.beartype
|
1562 |
+
def _handle_reduce_dim_none(g: jit_utils.GraphContext, self, op_name):
|
1563 |
+
rank = _get_tensor_rank(self)
|
1564 |
+
if rank is not None and any(
|
1565 |
+
_get_tensor_dim_size(self, i) == 0 for i in range(rank)
|
1566 |
+
):
|
1567 |
+
# If input tensor is empty, according to ONNX ReduceSum definition,
|
1568 |
+
# set keepdims=1 so that the resulted tensor has the same rank as the input.
|
1569 |
+
return g.op(op_name, self, keepdims_i=1)
|
1570 |
+
return g.op(op_name, self, keepdims_i=0)
|
1571 |
+
|
1572 |
+
|
1573 |
+
@_beartype.beartype
|
1574 |
+
def dequantize_helper(
|
1575 |
+
g: jit_utils.GraphContext,
|
1576 |
+
qtensor: _C.Value,
|
1577 |
+
qdtype: Optional[_C_onnx.TensorProtoDataType] = None,
|
1578 |
+
) -> Tuple[_C.Value, _C.Value, _C.Value, Optional[_C.Value]]:
|
1579 |
+
"""Appends to graph `g` ONNX nodes that dequantizes `qtensor` into `tensor`.
|
1580 |
+
|
1581 |
+
Args:
|
1582 |
+
g: Graph, the ONNX IR graph that is under construction.
|
1583 |
+
qtensor: torch._C.Value, either a tuple of (quantized_tensor, scale, zero_point)
|
1584 |
+
for per tensor quantization, or
|
1585 |
+
(quantized_tensor, scale, zero_point, axis) for per channel quantization,
|
1586 |
+
representing the quantized tensor.
|
1587 |
+
qdtype: torch.onnx.TensorProtoDataType default None, if not None, represents the
|
1588 |
+
data type of quantized tensor. It must be either
|
1589 |
+
torch.onnx.TensorProtoDataType.UINT8 or torch.onnx.TensorProtoDataType.INT8.
|
1590 |
+
"""
|
1591 |
+
unpacked_qtensors = _unpack_quantized_tensor(qtensor)
|
1592 |
+
tensor, scale, zero_point = unpacked_qtensors[:3]
|
1593 |
+
axis = unpacked_qtensors[3] if len(unpacked_qtensors) >= 4 else None
|
1594 |
+
axis_i = _get_const(axis, "i", "axis")
|
1595 |
+
input_qdtype = _type_utils.JitScalarType.from_value(tensor)
|
1596 |
+
if qdtype is None:
|
1597 |
+
if input_qdtype is not None:
|
1598 |
+
qdtype = input_qdtype.onnx_type()
|
1599 |
+
else:
|
1600 |
+
qdtype = _C_onnx.TensorProtoDataType.UINT8
|
1601 |
+
value = g.op("Cast", tensor, to_i=qdtype)
|
1602 |
+
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
1603 |
+
zero_point = g.op("Cast", zero_point, to_i=qdtype)
|
1604 |
+
|
1605 |
+
if axis_i is not None and GLOBALS.export_onnx_opset_version < 13:
|
1606 |
+
_onnx_opset_unsupported_detailed(
|
1607 |
+
"DequantizeLinear",
|
1608 |
+
GLOBALS.export_onnx_opset_version,
|
1609 |
+
13,
|
1610 |
+
"Attribute axis is not supported.",
|
1611 |
+
qtensor,
|
1612 |
+
)
|
1613 |
+
|
1614 |
+
return (
|
1615 |
+
g.op("DequantizeLinear", value, scale, zero_point, axis_i=axis_i),
|
1616 |
+
scale,
|
1617 |
+
zero_point,
|
1618 |
+
axis,
|
1619 |
+
)
|
1620 |
+
|
1621 |
+
|
1622 |
+
@_beartype.beartype
|
1623 |
+
def quantize_helper(
|
1624 |
+
g: jit_utils.GraphContext,
|
1625 |
+
tensor: _C.Value,
|
1626 |
+
scale: _C.Value,
|
1627 |
+
zero_point: _C.Value,
|
1628 |
+
axis: Optional[_C.Value] = None,
|
1629 |
+
) -> _C.Value:
|
1630 |
+
"""Appends to graph `g` ONNX nodes that quantizes `tensor` based on `scale`, `zero_point` and `axis`.
|
1631 |
+
|
1632 |
+
Args:
|
1633 |
+
g: Graph, the ONNX IR graph that is under construction.
|
1634 |
+
tensor: torch._C.Value, representing the tensor to be quantized.
|
1635 |
+
scale: torch._C.Value, quantized scale.
|
1636 |
+
zero_point: torch._C.Value, quantized zero point.
|
1637 |
+
axis: Optional[torch._C.Value] default None, if None, represents per tensor quantization.
|
1638 |
+
Otherwise, represents per channel quantization, along given axis.
|
1639 |
+
|
1640 |
+
Returns:
|
1641 |
+
A TupleConstruct storing information of the quantized tensor.
|
1642 |
+
"""
|
1643 |
+
if (
|
1644 |
+
axis is not None
|
1645 |
+
and not _is_none(axis)
|
1646 |
+
and GLOBALS.export_onnx_opset_version < 13
|
1647 |
+
):
|
1648 |
+
_onnx_opset_unsupported_detailed(
|
1649 |
+
"QuantizeLinear",
|
1650 |
+
GLOBALS.export_onnx_opset_version,
|
1651 |
+
13,
|
1652 |
+
"Attribute axis is not supported.",
|
1653 |
+
tensor,
|
1654 |
+
)
|
1655 |
+
|
1656 |
+
assert scale is not None
|
1657 |
+
if (
|
1658 |
+
_type_utils.JitScalarType.from_value(scale, _type_utils.JitScalarType.UNDEFINED)
|
1659 |
+
!= _type_utils.JitScalarType.FLOAT
|
1660 |
+
):
|
1661 |
+
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
1662 |
+
|
1663 |
+
assert zero_point is not None
|
1664 |
+
if _type_utils.JitScalarType.from_value(
|
1665 |
+
zero_point, _type_utils.JitScalarType.UNDEFINED
|
1666 |
+
) not in {
|
1667 |
+
_type_utils.JitScalarType.UINT8,
|
1668 |
+
_type_utils.JitScalarType.INT8,
|
1669 |
+
}:
|
1670 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
|
1671 |
+
output = g.op(
|
1672 |
+
"QuantizeLinear",
|
1673 |
+
tensor,
|
1674 |
+
scale,
|
1675 |
+
zero_point,
|
1676 |
+
axis_i=_get_const(axis, "i", "axis"),
|
1677 |
+
)
|
1678 |
+
args = [output, scale, zero_point]
|
1679 |
+
if axis is not None and not _is_none(axis):
|
1680 |
+
args.append(axis)
|
1681 |
+
return g.op("prim::TupleConstruct", *args)
|
1682 |
+
|
1683 |
+
|
1684 |
+
@_beartype.beartype
|
1685 |
+
def requantize_bias_helper(
|
1686 |
+
g: jit_utils.GraphContext, bias, input_scale, weight_scale, axis=None
|
1687 |
+
):
|
1688 |
+
"""In PyTorch, bias is float and is quantized to int32 implicitly inside the quantized ATen op kernel.
|
1689 |
+
In ONNX we need to make the quantization explicit because operators expect all of their inputs to be quantized.
|
1690 |
+
Since int32 is not a supported output type by ONNX operator `QuantizeLinear`, quantization is exported using
|
1691 |
+
regular operators.
|
1692 |
+
"""
|
1693 |
+
bias_scale = g.op("Mul", weight_scale, input_scale)
|
1694 |
+
bias_scale_shape = g.op("Shape", bias_scale)
|
1695 |
+
bias_zero_point = g.op(
|
1696 |
+
"ConstantOfShape", bias_scale_shape, value_t=torch.tensor([0], dtype=torch.int)
|
1697 |
+
)
|
1698 |
+
q_bias = g.op(
|
1699 |
+
"Cast", g.op("Div", bias, bias_scale), to_i=_C_onnx.TensorProtoDataType.INT32
|
1700 |
+
)
|
1701 |
+
axis_args = []
|
1702 |
+
if axis is not None and not _is_none(axis):
|
1703 |
+
axis_args.append(axis)
|
1704 |
+
return g.op("prim::TupleConstruct", q_bias, bias_scale, bias_zero_point, *axis_args)
|
1705 |
+
|
1706 |
+
|
1707 |
+
@_beartype.beartype
|
1708 |
+
def args_have_same_dtype(args):
|
1709 |
+
assert args
|
1710 |
+
base_dtype = _type_utils.JitScalarType.from_value(args[0])
|
1711 |
+
has_same_dtype = all(
|
1712 |
+
_type_utils.JitScalarType.from_value(elem) == base_dtype for elem in args
|
1713 |
+
)
|
1714 |
+
return has_same_dtype
|
1715 |
+
|
1716 |
+
|
1717 |
+
# Deprecated. Internally use _type_utils.ScalarType
|
1718 |
+
# TODO: remove these once we support Type's in the JIT IR and we can once again
|
1719 |
+
# use the unified toType operator
|
1720 |
+
cast_pytorch_to_onnx = {
|
1721 |
+
"Byte": _C_onnx.TensorProtoDataType.UINT8,
|
1722 |
+
"Char": _C_onnx.TensorProtoDataType.INT8,
|
1723 |
+
"Double": _C_onnx.TensorProtoDataType.DOUBLE,
|
1724 |
+
"Float": _C_onnx.TensorProtoDataType.FLOAT,
|
1725 |
+
"Half": _C_onnx.TensorProtoDataType.FLOAT16,
|
1726 |
+
"Int": _C_onnx.TensorProtoDataType.INT32,
|
1727 |
+
"Long": _C_onnx.TensorProtoDataType.INT64,
|
1728 |
+
"Short": _C_onnx.TensorProtoDataType.INT16,
|
1729 |
+
"Bool": _C_onnx.TensorProtoDataType.BOOL,
|
1730 |
+
"ComplexFloat": _C_onnx.TensorProtoDataType.COMPLEX64,
|
1731 |
+
"ComplexDouble": _C_onnx.TensorProtoDataType.COMPLEX128,
|
1732 |
+
"BFloat16": _C_onnx.TensorProtoDataType.BFLOAT16,
|
1733 |
+
"Undefined": _C_onnx.TensorProtoDataType.UNDEFINED,
|
1734 |
+
}
|
1735 |
+
|
1736 |
+
# Deprecated. Internally use _type_utils.ScalarType
|
1737 |
+
scalar_name_to_pytorch = {
|
1738 |
+
"uint8_t": "Byte",
|
1739 |
+
"int8_t": "Char",
|
1740 |
+
"double": "Double",
|
1741 |
+
"float": "Float",
|
1742 |
+
"half": "Half",
|
1743 |
+
"int": "Int",
|
1744 |
+
"int64_t": "Long",
|
1745 |
+
"int16_t": "Short",
|
1746 |
+
"bool": "Bool",
|
1747 |
+
"complex64": "ComplexFloat",
|
1748 |
+
"complex128": "ComplexDouble",
|
1749 |
+
"qint8": "QInt8",
|
1750 |
+
"quint8": "QUInt8",
|
1751 |
+
"qint32": "QInt32",
|
1752 |
+
"bfloat16": "BFloat16",
|
1753 |
+
}
|
1754 |
+
|
1755 |
+
|
1756 |
+
# Deprecated. Internally use _type_utils.ScalarType
|
1757 |
+
# This indicates each scalar type's corresponding
|
1758 |
+
# torch type. Related source:
|
1759 |
+
# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
|
1760 |
+
scalar_type_to_pytorch_type = [
|
1761 |
+
torch.uint8, # 0
|
1762 |
+
torch.int8, # 1
|
1763 |
+
torch.short, # 2
|
1764 |
+
torch.int, # 3
|
1765 |
+
torch.int64, # 4
|
1766 |
+
torch.half, # 5
|
1767 |
+
torch.float, # 6
|
1768 |
+
torch.double, # 7
|
1769 |
+
torch.complex32, # 8
|
1770 |
+
torch.complex64, # 9
|
1771 |
+
torch.complex128, # 10
|
1772 |
+
torch.bool, # 11
|
1773 |
+
torch.qint8, # 12
|
1774 |
+
torch.quint8, # 13
|
1775 |
+
torch.qint32, # 14
|
1776 |
+
torch.bfloat16, # 15
|
1777 |
+
]
|
1778 |
+
|
1779 |
+
# Deprecated. Internally use _type_utils.ScalarType
|
1780 |
+
# source of truth is
|
1781 |
+
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp
|
1782 |
+
pytorch_name_to_type = {
|
1783 |
+
"Byte": torch.uint8,
|
1784 |
+
"Char": torch.int8,
|
1785 |
+
"Double": torch.double,
|
1786 |
+
"Float": torch.float,
|
1787 |
+
"Half": torch.half,
|
1788 |
+
"Int": torch.int,
|
1789 |
+
"Long": torch.int64,
|
1790 |
+
"Short": torch.short,
|
1791 |
+
"Bool": torch.bool,
|
1792 |
+
"ComplexFloat": torch.complex64,
|
1793 |
+
"ComplexDouble": torch.complex128,
|
1794 |
+
"QInt8": torch.qint8,
|
1795 |
+
"QUInt8": torch.quint8,
|
1796 |
+
"QInt32": torch.qint32,
|
1797 |
+
"BFloat16": torch.bfloat16,
|
1798 |
+
}
|
1799 |
+
|
1800 |
+
|
1801 |
+
# Deprecated. Internally use _type_utils.ScalarType
|
1802 |
+
scalar_type_to_onnx = [
|
1803 |
+
cast_pytorch_to_onnx["Byte"], # 0
|
1804 |
+
cast_pytorch_to_onnx["Char"], # 1
|
1805 |
+
cast_pytorch_to_onnx["Short"], # 2
|
1806 |
+
cast_pytorch_to_onnx["Int"], # 3
|
1807 |
+
cast_pytorch_to_onnx["Long"], # 4
|
1808 |
+
cast_pytorch_to_onnx["Half"], # 5
|
1809 |
+
cast_pytorch_to_onnx["Float"], # 6
|
1810 |
+
cast_pytorch_to_onnx["Double"], # 7
|
1811 |
+
cast_pytorch_to_onnx["Undefined"], # 8
|
1812 |
+
cast_pytorch_to_onnx["ComplexFloat"], # 9
|
1813 |
+
cast_pytorch_to_onnx["ComplexDouble"], # 10
|
1814 |
+
cast_pytorch_to_onnx["Bool"], # 11
|
1815 |
+
cast_pytorch_to_onnx["Char"], # 12
|
1816 |
+
cast_pytorch_to_onnx["Byte"], # 13
|
1817 |
+
cast_pytorch_to_onnx["Int"], # 14
|
1818 |
+
cast_pytorch_to_onnx["BFloat16"], # 15
|
1819 |
+
]
|
1820 |
+
|
1821 |
+
# Global set to store the list of quantized operators in the network.
|
1822 |
+
# This is currently only used in the conversion of quantized ops from PT -> C2 via ONNX.
|
1823 |
+
_quantized_ops: Set[int] = set()
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset11.py
ADDED
@@ -0,0 +1,1650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 11."""
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
import functools
|
5 |
+
import sys
|
6 |
+
import warnings
|
7 |
+
from typing import Optional, Sequence
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import _C
|
11 |
+
from torch._C import _onnx as _C_onnx
|
12 |
+
from torch.onnx import (
|
13 |
+
_type_utils,
|
14 |
+
errors,
|
15 |
+
symbolic_helper,
|
16 |
+
symbolic_opset10 as opset10,
|
17 |
+
symbolic_opset9 as opset9,
|
18 |
+
utils,
|
19 |
+
)
|
20 |
+
from torch.onnx._globals import GLOBALS
|
21 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
22 |
+
|
23 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
24 |
+
# see Note [Edit Symbolic Files] in README.md
|
25 |
+
|
26 |
+
__all__ = [
|
27 |
+
"add",
|
28 |
+
"append",
|
29 |
+
"arange",
|
30 |
+
"argsort",
|
31 |
+
"atleast_1d",
|
32 |
+
"atleast_2d",
|
33 |
+
"atleast_3d",
|
34 |
+
"cat",
|
35 |
+
"chunk",
|
36 |
+
"clamp_max",
|
37 |
+
"clamp_min",
|
38 |
+
"clamp",
|
39 |
+
"constant_pad_nd",
|
40 |
+
"cumsum",
|
41 |
+
"Delete",
|
42 |
+
"embedding_bag",
|
43 |
+
"embedding_renorm",
|
44 |
+
"flatten",
|
45 |
+
"gather",
|
46 |
+
"hardtanh",
|
47 |
+
"hstack",
|
48 |
+
"im2col",
|
49 |
+
"index_fill",
|
50 |
+
"index",
|
51 |
+
"index_copy",
|
52 |
+
"index_put",
|
53 |
+
"insert",
|
54 |
+
"linalg_det",
|
55 |
+
"linalg_vector_norm",
|
56 |
+
"logdet",
|
57 |
+
"masked_scatter",
|
58 |
+
"masked_select",
|
59 |
+
"mm",
|
60 |
+
"narrow",
|
61 |
+
"normal",
|
62 |
+
"pad",
|
63 |
+
"pixel_shuffle",
|
64 |
+
"pop",
|
65 |
+
"prim_constant_chunk",
|
66 |
+
"reflection_pad",
|
67 |
+
"relu6",
|
68 |
+
"remainder",
|
69 |
+
"replication_pad",
|
70 |
+
"round",
|
71 |
+
"scatter",
|
72 |
+
"select",
|
73 |
+
"size",
|
74 |
+
"sort",
|
75 |
+
"split_with_sizes",
|
76 |
+
"split",
|
77 |
+
"squeeze",
|
78 |
+
"stack",
|
79 |
+
"topk",
|
80 |
+
"unbind",
|
81 |
+
"unique_dim",
|
82 |
+
"unsqueeze",
|
83 |
+
"vstack",
|
84 |
+
]
|
85 |
+
|
86 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=11)
|
87 |
+
|
88 |
+
|
89 |
+
def _apply_params(*args, **kwargs):
|
90 |
+
"""Returns a decorator that calls the decorated (higher-order) function with the given parameters."""
|
91 |
+
|
92 |
+
def _apply(fn):
|
93 |
+
return fn(*args, **kwargs)
|
94 |
+
|
95 |
+
return _apply
|
96 |
+
|
97 |
+
|
98 |
+
@_onnx_symbolic("aten::hardtanh")
|
99 |
+
@symbolic_helper.quantized_args(True)
|
100 |
+
@symbolic_helper.parse_args("v", "f", "f")
|
101 |
+
@_beartype.beartype
|
102 |
+
def hardtanh(g: jit_utils.GraphContext, self: _C.Value, min_val: float, max_val: float):
|
103 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
104 |
+
self, _type_utils.JitScalarType.FLOAT
|
105 |
+
)
|
106 |
+
min_val = g.op(
|
107 |
+
"Constant",
|
108 |
+
value_t=torch.tensor(min_val, dtype=scalar_type.dtype()),
|
109 |
+
)
|
110 |
+
max_val = g.op(
|
111 |
+
"Constant",
|
112 |
+
value_t=torch.tensor(max_val, dtype=scalar_type.dtype()),
|
113 |
+
)
|
114 |
+
return opset9._op_with_optional_float_cast(
|
115 |
+
g, "Clip", self, min_val, max_val, opset_before=12
|
116 |
+
)
|
117 |
+
|
118 |
+
|
119 |
+
@_onnx_symbolic("aten::clamp")
|
120 |
+
@_beartype.beartype
|
121 |
+
def clamp(g: jit_utils.GraphContext, self, min, max):
|
122 |
+
@_beartype.beartype
|
123 |
+
def _cast_if_not_none(tensor, dtype):
|
124 |
+
if tensor is not None and not symbolic_helper._is_none(tensor):
|
125 |
+
return g.op(
|
126 |
+
"Cast",
|
127 |
+
tensor,
|
128 |
+
to_i=dtype.onnx_type(),
|
129 |
+
)
|
130 |
+
else:
|
131 |
+
return tensor
|
132 |
+
|
133 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
134 |
+
self, _type_utils.JitScalarType.UNDEFINED
|
135 |
+
)
|
136 |
+
if scalar_type != _type_utils.JitScalarType.UNDEFINED:
|
137 |
+
min = _cast_if_not_none(min, scalar_type)
|
138 |
+
max = _cast_if_not_none(max, scalar_type)
|
139 |
+
|
140 |
+
if symbolic_helper._is_none(min):
|
141 |
+
return clamp_max(g, self, max)
|
142 |
+
elif symbolic_helper._is_none(max):
|
143 |
+
return clamp_min(g, self, min)
|
144 |
+
else:
|
145 |
+
if (
|
146 |
+
symbolic_helper._get_tensor_rank(min) == 0
|
147 |
+
and symbolic_helper._get_tensor_rank(max) == 0
|
148 |
+
):
|
149 |
+
return opset9._op_with_optional_float_cast(
|
150 |
+
g, "Clip", self, min, max, opset_before=12
|
151 |
+
)
|
152 |
+
else:
|
153 |
+
return clamp_max(g, clamp_min(g, self, min), max)
|
154 |
+
|
155 |
+
|
156 |
+
@_onnx_symbolic("aten::clamp_min")
|
157 |
+
@symbolic_helper.parse_args("v", "v")
|
158 |
+
@_beartype.beartype
|
159 |
+
def clamp_min(g: jit_utils.GraphContext, self, min):
|
160 |
+
min = g.op("Cast", min, to_i=_type_utils.JitScalarType.from_value(self).onnx_type())
|
161 |
+
if symbolic_helper._get_tensor_rank(min) == 0:
|
162 |
+
max = opset9.unused(g)
|
163 |
+
return opset9._op_with_optional_float_cast(
|
164 |
+
g, "Clip", self, min, max, opset_before=12
|
165 |
+
)
|
166 |
+
else:
|
167 |
+
return opset9._op_with_optional_float_cast(g, "Max", self, min, opset_before=12)
|
168 |
+
|
169 |
+
|
170 |
+
@_onnx_symbolic("aten::clamp_max")
|
171 |
+
@symbolic_helper.parse_args("v", "v")
|
172 |
+
@_beartype.beartype
|
173 |
+
def clamp_max(g: jit_utils.GraphContext, self, max):
|
174 |
+
max = g.op("Cast", max, to_i=_type_utils.JitScalarType.from_value(self).onnx_type())
|
175 |
+
if symbolic_helper._get_tensor_rank(max) == 0:
|
176 |
+
min = opset9.unused(g)
|
177 |
+
return opset9._op_with_optional_float_cast(
|
178 |
+
g, "Clip", self, min, max, opset_before=12
|
179 |
+
)
|
180 |
+
else:
|
181 |
+
return opset9._op_with_optional_float_cast(g, "Min", self, max, opset_before=12)
|
182 |
+
|
183 |
+
|
184 |
+
@_onnx_symbolic("aten::relu6")
|
185 |
+
@_beartype.beartype
|
186 |
+
def relu6(g: jit_utils.GraphContext, input):
|
187 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
188 |
+
input, _type_utils.JitScalarType.FLOAT
|
189 |
+
)
|
190 |
+
min_val = g.op(
|
191 |
+
"Constant",
|
192 |
+
value_t=torch.tensor(0, dtype=scalar_type.dtype()),
|
193 |
+
)
|
194 |
+
max_val = g.op(
|
195 |
+
"Constant",
|
196 |
+
value_t=torch.tensor(6, dtype=scalar_type.dtype()),
|
197 |
+
)
|
198 |
+
return clamp(g, input, min_val, max_val)
|
199 |
+
|
200 |
+
|
201 |
+
@_onnx_symbolic("aten::select")
|
202 |
+
# Opset 11 gather accepts negative indices
|
203 |
+
@symbolic_helper.quantized_args(True)
|
204 |
+
@symbolic_helper.parse_args("v", "i", "v")
|
205 |
+
@_beartype.beartype
|
206 |
+
def select(g: jit_utils.GraphContext, self, dim, index):
|
207 |
+
return g.op("Gather", self, index, axis_i=dim)
|
208 |
+
|
209 |
+
|
210 |
+
@_onnx_symbolic("aten::index_put")
|
211 |
+
@_beartype.beartype
|
212 |
+
def index_put(
|
213 |
+
g: jit_utils.GraphContext, self, indices_list_value, values, accumulate=False
|
214 |
+
):
|
215 |
+
if symbolic_helper._is_packed_list(indices_list_value):
|
216 |
+
indices_list = symbolic_helper._unpack_list(indices_list_value)
|
217 |
+
else:
|
218 |
+
indices_list = [indices_list_value]
|
219 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
220 |
+
args = [self] + indices_list + [values, accumulate]
|
221 |
+
return g.at("index_put", *args)
|
222 |
+
|
223 |
+
accumulate = symbolic_helper._parse_arg(accumulate, "b")
|
224 |
+
|
225 |
+
if len(indices_list) == 0:
|
226 |
+
return values
|
227 |
+
|
228 |
+
if len(indices_list) > 1:
|
229 |
+
for idx_ in range(len(indices_list)):
|
230 |
+
if symbolic_helper._is_bool(indices_list[idx_]):
|
231 |
+
indices_list[idx_] = g.op("NonZero", indices_list[idx_])
|
232 |
+
index = indices_list[0]
|
233 |
+
|
234 |
+
for ind in indices_list[1:]:
|
235 |
+
index = opset9.add(g, index, ind)
|
236 |
+
broadcast_index_shape = g.op("Shape", index)
|
237 |
+
indices_list = [
|
238 |
+
symbolic_helper._unsqueeze_helper(
|
239 |
+
g, opset9.expand(g, ind, broadcast_index_shape, None), [-1]
|
240 |
+
)
|
241 |
+
for ind in indices_list
|
242 |
+
]
|
243 |
+
index = g.op("Concat", *indices_list, axis_i=-1)
|
244 |
+
else:
|
245 |
+
# Replace index_put node with masked_scatter or masked_fill
|
246 |
+
# when inputs to the index_put node contains a single boolean input.
|
247 |
+
#
|
248 |
+
# index_put -> masked_fill
|
249 |
+
# * input index contains single tensor of Bool type (e.g.: %24 <- %23).
|
250 |
+
# * input value contains single element (e.g.: %18).
|
251 |
+
#
|
252 |
+
# Torch IR
|
253 |
+
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
|
254 |
+
# %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
|
255 |
+
# aten::to(%8, %26, %27, %11, %12, %28, %29, %15)
|
256 |
+
# %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]()
|
257 |
+
# %23 : Bool(8, strides=[1], device=cpu) = aten::view(%16, %22)
|
258 |
+
# %24 : Tensor?[] = prim::ListConstruct(%23)
|
259 |
+
# %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
|
260 |
+
# aten::index_put(%mask, %24, %18, %30)
|
261 |
+
# return (%25)
|
262 |
+
#
|
263 |
+
#
|
264 |
+
# index_put -> masked_scatter
|
265 |
+
# * input index contains single tensor of Bool type (e.g.: %32 <- %31).
|
266 |
+
# * input value contains multiple elements (e.g.: %28).
|
267 |
+
#
|
268 |
+
# Torch IR
|
269 |
+
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
|
270 |
+
# %28 : Float(8, strides=[1], requires_grad=0, device=cpu)
|
271 |
+
# = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()
|
272 |
+
# %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
|
273 |
+
# = aten::ne(%mask, %some_const)
|
274 |
+
# %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
|
275 |
+
# = aten::to(%15, %34, %35, %18, %19, %36, %37, %22)
|
276 |
+
# %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
|
277 |
+
# %30 : int[] = prim::Constant[value=[-1]]()
|
278 |
+
# %31 : Bool(8, strides=[1], device=cpu) = aten::view(%23, %30)
|
279 |
+
# %32 : Tensor?[] = prim::ListConstruct(%31)
|
280 |
+
# %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
|
281 |
+
# = aten::index_put(%mask, %32, %28, %38)
|
282 |
+
# return (%33)
|
283 |
+
index = indices_list[0]
|
284 |
+
bool_inp = index
|
285 |
+
if symbolic_helper._is_bool(bool_inp):
|
286 |
+
rank = symbolic_helper._get_tensor_rank(values)
|
287 |
+
if rank is not None and rank == 0:
|
288 |
+
return opset9.masked_fill(g, self, bool_inp, values)
|
289 |
+
mask_rank = symbolic_helper._get_tensor_rank(bool_inp)
|
290 |
+
self_rank = symbolic_helper._get_tensor_rank(self)
|
291 |
+
if (
|
292 |
+
mask_rank is not None
|
293 |
+
and self_rank is not None
|
294 |
+
and self_rank > mask_rank
|
295 |
+
):
|
296 |
+
# Unsqueeze 'bool_inp' to be broadcastable to shape of 'self'.
|
297 |
+
bool_inp = symbolic_helper._unsqueeze_helper(
|
298 |
+
g, bool_inp, list(range(mask_rank, self_rank))
|
299 |
+
)
|
300 |
+
return masked_scatter(g, self, bool_inp, values)
|
301 |
+
broadcast_index_shape = g.op("Shape", index)
|
302 |
+
index = symbolic_helper._unsqueeze_helper(g, index, [-1])
|
303 |
+
sub_data_shape = symbolic_helper._slice_helper(
|
304 |
+
g, g.op("Shape", self), axes=[0], starts=[len(indices_list)], ends=[sys.maxsize]
|
305 |
+
)
|
306 |
+
values_shape = g.op("Concat", broadcast_index_shape, sub_data_shape, axis_i=0)
|
307 |
+
# Check if values is a singular value and expand accordingly
|
308 |
+
rank = symbolic_helper._get_tensor_rank(values)
|
309 |
+
if rank is not None and rank == 0:
|
310 |
+
values = opset9.expand(g, values, values_shape, None)
|
311 |
+
values = symbolic_helper._reshape_helper(g, values, values_shape)
|
312 |
+
|
313 |
+
self_scalar_type = _type_utils.JitScalarType.from_value(
|
314 |
+
self, _type_utils.JitScalarType.UNDEFINED
|
315 |
+
)
|
316 |
+
if self_scalar_type != _type_utils.JitScalarType.UNDEFINED:
|
317 |
+
values_scalar_type = _type_utils.JitScalarType.from_value(
|
318 |
+
values, _type_utils.JitScalarType.UNDEFINED
|
319 |
+
)
|
320 |
+
if self_scalar_type != values_scalar_type:
|
321 |
+
values = g.op("Cast", values, to_i=self_scalar_type.onnx_type())
|
322 |
+
elif accumulate:
|
323 |
+
raise errors.SymbolicValueError("self does not have a valid scalar type.", self)
|
324 |
+
|
325 |
+
if accumulate:
|
326 |
+
zeros = g.op(
|
327 |
+
"ConstantOfShape",
|
328 |
+
g.op("Shape", self),
|
329 |
+
value_t=torch.tensor([0], dtype=self_scalar_type.dtype()),
|
330 |
+
)
|
331 |
+
result = g.op("ScatterND", zeros, index, values)
|
332 |
+
result = add(g, self, result)
|
333 |
+
else:
|
334 |
+
result = g.op("ScatterND", self, index, values)
|
335 |
+
|
336 |
+
return result
|
337 |
+
|
338 |
+
|
339 |
+
@_onnx_symbolic("aten::pixel_shuffle")
|
340 |
+
@symbolic_helper.parse_args("v", "i")
|
341 |
+
@_beartype.beartype
|
342 |
+
def pixel_shuffle(g: jit_utils.GraphContext, self, upscale_factor):
|
343 |
+
rank = symbolic_helper._get_tensor_rank(self)
|
344 |
+
if rank is not None and rank != 4:
|
345 |
+
return symbolic_helper._unimplemented("pixel_shuffle", "only support 4d input")
|
346 |
+
return g.op("DepthToSpace", self, blocksize_i=upscale_factor, mode_s="CRD")
|
347 |
+
|
348 |
+
|
349 |
+
@_onnx_symbolic(
|
350 |
+
"aten::upsample_nearest1d",
|
351 |
+
decorate=[_apply_params("upsample_nearest1d", 3, "nearest")],
|
352 |
+
)
|
353 |
+
@_onnx_symbolic(
|
354 |
+
"aten::upsample_nearest2d",
|
355 |
+
decorate=[_apply_params("upsample_nearest2d", 4, "nearest")],
|
356 |
+
)
|
357 |
+
@_onnx_symbolic(
|
358 |
+
"aten::upsample_nearest3d",
|
359 |
+
decorate=[_apply_params("upsample_nearest3d", 5, "nearest")],
|
360 |
+
)
|
361 |
+
@_onnx_symbolic(
|
362 |
+
"aten::upsample_linear1d",
|
363 |
+
decorate=[_apply_params("upsample_linear1d", 3, "linear")],
|
364 |
+
)
|
365 |
+
@_onnx_symbolic(
|
366 |
+
"aten::upsample_bilinear2d",
|
367 |
+
decorate=[_apply_params("upsample_bilinear2d", 4, "linear")],
|
368 |
+
)
|
369 |
+
@_onnx_symbolic(
|
370 |
+
"aten::upsample_trilinear3d",
|
371 |
+
decorate=[_apply_params("upsample_trilinear3d", 5, "linear")],
|
372 |
+
)
|
373 |
+
@_onnx_symbolic(
|
374 |
+
"aten::upsample_bicubic2d",
|
375 |
+
decorate=[_apply_params("upsample_bicubic2d", 4, "cubic")],
|
376 |
+
)
|
377 |
+
@_beartype.beartype
|
378 |
+
def _interpolate(name: str, dim: int, interpolate_mode: str):
|
379 |
+
return symbolic_helper._interpolate_helper(name, dim, interpolate_mode)
|
380 |
+
|
381 |
+
|
382 |
+
@_onnx_symbolic("aten::__interpolate")
|
383 |
+
@symbolic_helper.quantized_args(True, False, False, False, False, False, False)
|
384 |
+
@_beartype.beartype
|
385 |
+
def __interpolate(
|
386 |
+
g: jit_utils.GraphContext,
|
387 |
+
input,
|
388 |
+
size,
|
389 |
+
scale_factor,
|
390 |
+
mode,
|
391 |
+
align_corners,
|
392 |
+
recompute_scale_factor,
|
393 |
+
antialias,
|
394 |
+
):
|
395 |
+
return symbolic_helper.__interpolate_helper(
|
396 |
+
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor
|
397 |
+
)
|
398 |
+
|
399 |
+
|
400 |
+
@_onnx_symbolic("aten::gather")
|
401 |
+
@symbolic_helper.parse_args("v", "i", "v", "v")
|
402 |
+
@_beartype.beartype
|
403 |
+
def gather(g: jit_utils.GraphContext, self, dim, index, sparse_grad=False):
|
404 |
+
if symbolic_helper._maybe_get_const(sparse_grad, "i"):
|
405 |
+
return symbolic_helper._unimplemented("gather", "sparse_grad == True")
|
406 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
407 |
+
return g.at("gather", self, dim, index, sparse_grad)
|
408 |
+
return g.op("GatherElements", self, index, axis_i=dim)
|
409 |
+
|
410 |
+
|
411 |
+
@_onnx_symbolic("aten::scatter")
|
412 |
+
@symbolic_helper.parse_args("v", "i", "v", "v")
|
413 |
+
@_beartype.beartype
|
414 |
+
def scatter(g: jit_utils.GraphContext, self, dim, index, src):
|
415 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
416 |
+
return g.at("scatter", self, dim, index, src, overload_name="src")
|
417 |
+
src_type = _type_utils.JitScalarType.from_value(src)
|
418 |
+
src = symbolic_helper._maybe_get_scalar(src)
|
419 |
+
if symbolic_helper._is_value(src):
|
420 |
+
return g.op("ScatterElements", self, index, src, axis_i=dim)
|
421 |
+
else:
|
422 |
+
# Check if scalar "src" has same type as self (PyTorch allows different
|
423 |
+
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
|
424 |
+
if _type_utils.JitScalarType.from_value(self) != src_type:
|
425 |
+
src = g.op(
|
426 |
+
"Cast",
|
427 |
+
src,
|
428 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
429 |
+
)
|
430 |
+
return g.op(
|
431 |
+
"ScatterElements", self, index, opset9.expand_as(g, src, index), axis_i=dim
|
432 |
+
)
|
433 |
+
|
434 |
+
|
435 |
+
@_onnx_symbolic("aten::cumsum")
|
436 |
+
@symbolic_helper.parse_args("v", "i", "none")
|
437 |
+
@_beartype.beartype
|
438 |
+
def cumsum(g: jit_utils.GraphContext, self, dim, dtype=None):
|
439 |
+
dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
|
440 |
+
if dtype and dtype.node().kind() != "prim::Constant":
|
441 |
+
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
442 |
+
cast = g.op(
|
443 |
+
"Cast", self, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
|
444 |
+
)
|
445 |
+
else:
|
446 |
+
cast = self
|
447 |
+
csum = g.op("CumSum", cast, dim_tensor)
|
448 |
+
return csum
|
449 |
+
|
450 |
+
|
451 |
+
@_onnx_symbolic("aten::masked_select")
|
452 |
+
@_beartype.beartype
|
453 |
+
def masked_select(g: jit_utils.GraphContext, self, mask):
|
454 |
+
index = opset9.nonzero(g, opset9.expand_as(g, mask, self))
|
455 |
+
return g.op("GatherND", self, index)
|
456 |
+
|
457 |
+
|
458 |
+
@_onnx_symbolic("aten::masked_scatter")
|
459 |
+
@_beartype.beartype
|
460 |
+
def masked_scatter(g: jit_utils.GraphContext, self, mask, source):
|
461 |
+
index = opset9.nonzero(g, opset9.expand_as(g, mask, self))
|
462 |
+
# NOTE: source can have more elements than needed.
|
463 |
+
# It could also have arbitrary shape.
|
464 |
+
# This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
|
465 |
+
source = symbolic_helper._reshape_helper(g, source, torch.LongTensor([-1]))
|
466 |
+
source = symbolic_helper._slice_helper(
|
467 |
+
g,
|
468 |
+
source,
|
469 |
+
axes=torch.LongTensor([0]),
|
470 |
+
starts=torch.LongTensor([0]),
|
471 |
+
ends=opset9.size(g, index, torch.LongTensor([0])),
|
472 |
+
)
|
473 |
+
return g.op("ScatterND", self, index, source)
|
474 |
+
|
475 |
+
|
476 |
+
@_onnx_symbolic("aten::len")
|
477 |
+
@_beartype.beartype
|
478 |
+
def _len(g: jit_utils.GraphContext, self):
|
479 |
+
if (
|
480 |
+
symbolic_helper._is_tensor_list(self)
|
481 |
+
or self.node().kind() == "onnx::SplitToSequence"
|
482 |
+
):
|
483 |
+
return g.op("SequenceLength", self)
|
484 |
+
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
|
485 |
+
return symbolic_helper._squeeze_helper(g, sz_0, [0])
|
486 |
+
|
487 |
+
|
488 |
+
@_onnx_symbolic("aten::__getitem_")
|
489 |
+
@_beartype.beartype
|
490 |
+
def __getitem_(g: jit_utils.GraphContext, self, i):
|
491 |
+
if symbolic_helper._is_tensor_list(self):
|
492 |
+
# SequenceAt requires that the input be a List of Tensors
|
493 |
+
return g.op("SequenceAt", self, i)
|
494 |
+
else:
|
495 |
+
from torch.onnx.symbolic_opset9 import __getitem_ as getitem
|
496 |
+
|
497 |
+
return getitem(g, self, i)
|
498 |
+
|
499 |
+
|
500 |
+
@_onnx_symbolic("aten::_set_item")
|
501 |
+
@_beartype.beartype
|
502 |
+
def _set_item(g: jit_utils.GraphContext, tensor_list, i, v):
|
503 |
+
tensor_list = g.op("SequenceErase", tensor_list, i)
|
504 |
+
return g.op("SequenceInsert", tensor_list, v, i)
|
505 |
+
|
506 |
+
|
507 |
+
@_onnx_symbolic("aten::append")
|
508 |
+
@_beartype.beartype
|
509 |
+
def append(g: jit_utils.GraphContext, self, tensor):
|
510 |
+
return g.op("SequenceInsert", self, tensor)
|
511 |
+
|
512 |
+
|
513 |
+
@_onnx_symbolic("aten::add")
|
514 |
+
@_beartype.beartype
|
515 |
+
def add(g: jit_utils.GraphContext, self, other, alpha=None):
|
516 |
+
if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self):
|
517 |
+
tensor_list_node = other.node()
|
518 |
+
if tensor_list_node.kind() != "prim::ListConstruct":
|
519 |
+
return symbolic_helper._unimplemented(
|
520 |
+
"add", "does not support adding dynamic tensor list to another"
|
521 |
+
)
|
522 |
+
tensors = symbolic_helper._unpack_list(other)
|
523 |
+
l = self
|
524 |
+
for t in tensors:
|
525 |
+
l = g.op("SequenceInsert", l, t)
|
526 |
+
return l
|
527 |
+
|
528 |
+
return opset9.add(g, self, other, alpha)
|
529 |
+
|
530 |
+
|
531 |
+
@_onnx_symbolic("aten::insert")
|
532 |
+
@_beartype.beartype
|
533 |
+
def insert(g: jit_utils.GraphContext, self, pos, tensor):
|
534 |
+
return g.op("SequenceInsert", self, tensor, pos)
|
535 |
+
|
536 |
+
|
537 |
+
@_onnx_symbolic("aten::pop")
|
538 |
+
@_beartype.beartype
|
539 |
+
def pop(g: jit_utils.GraphContext, tensor_list, dim):
|
540 |
+
return g.op("SequenceErase", tensor_list, dim)
|
541 |
+
|
542 |
+
|
543 |
+
@_onnx_symbolic("aten::Delete")
|
544 |
+
@_beartype.beartype
|
545 |
+
def Delete(g: jit_utils.GraphContext, tensor_list, dim):
|
546 |
+
return g.op("SequenceErase", tensor_list, dim)
|
547 |
+
|
548 |
+
|
549 |
+
@_onnx_symbolic("aten::cat")
|
550 |
+
@symbolic_helper.quantized_args(True)
|
551 |
+
@_beartype.beartype
|
552 |
+
def cat(g: jit_utils.GraphContext, tensor_list, dim):
|
553 |
+
if symbolic_helper._is_packed_list(tensor_list):
|
554 |
+
return opset9.cat(g, tensor_list, dim)
|
555 |
+
else:
|
556 |
+
dim = symbolic_helper._get_const(dim, "i", "dim")
|
557 |
+
return g.op("ConcatFromSequence", tensor_list, axis_i=dim)
|
558 |
+
|
559 |
+
|
560 |
+
@_onnx_symbolic("aten::stack")
|
561 |
+
@_beartype.beartype
|
562 |
+
def stack(g: jit_utils.GraphContext, tensor_list, dim):
|
563 |
+
if symbolic_helper._is_packed_list(tensor_list):
|
564 |
+
return opset9.stack(g, tensor_list, dim)
|
565 |
+
else:
|
566 |
+
dim = symbolic_helper._get_const(dim, "i", "dim")
|
567 |
+
return g.op("ConcatFromSequence", tensor_list, axis_i=dim, new_axis_i=1)
|
568 |
+
|
569 |
+
|
570 |
+
@_onnx_symbolic("aten::_unique2")
|
571 |
+
@symbolic_helper.parse_args("v", "i", "i", "i")
|
572 |
+
@_beartype.beartype
|
573 |
+
def _unique2(g: jit_utils.GraphContext, self, sorted, return_inverse, return_counts):
|
574 |
+
u, indices, inverse_indices, counts = g.op(
|
575 |
+
"Unique", self, sorted_i=sorted, outputs=4
|
576 |
+
)
|
577 |
+
return u, inverse_indices, counts
|
578 |
+
|
579 |
+
|
580 |
+
@_onnx_symbolic("aten::unique_dim")
|
581 |
+
@symbolic_helper.parse_args("v", "i", "i", "i", "i")
|
582 |
+
@_beartype.beartype
|
583 |
+
def unique_dim(
|
584 |
+
g: jit_utils.GraphContext, self, dim, sorted, return_inverse, return_counts
|
585 |
+
):
|
586 |
+
u, indices, inverse_indices, counts = g.op(
|
587 |
+
"Unique", self, axis_i=dim, sorted_i=sorted, outputs=4
|
588 |
+
)
|
589 |
+
return u, inverse_indices, counts
|
590 |
+
|
591 |
+
|
592 |
+
@_onnx_symbolic("aten::topk")
|
593 |
+
@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none")
|
594 |
+
@_beartype.beartype
|
595 |
+
def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None):
|
596 |
+
return symbolic_helper._topk_helper(
|
597 |
+
g, self, k, dim, largest=largest, sorted=sorted, out=out
|
598 |
+
)
|
599 |
+
|
600 |
+
|
601 |
+
@_onnx_symbolic("aten::sort")
|
602 |
+
@symbolic_helper.parse_args("v", "i", "i", "none")
|
603 |
+
@_beartype.beartype
|
604 |
+
def sort(g: jit_utils.GraphContext, self, dim, decending, out=None):
|
605 |
+
return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out)
|
606 |
+
|
607 |
+
|
608 |
+
@_onnx_symbolic("aten::argsort")
|
609 |
+
@symbolic_helper.parse_args("v", "i", "i", "none")
|
610 |
+
@_beartype.beartype
|
611 |
+
def argsort(g: jit_utils.GraphContext, self, dim, decending, out=None):
|
612 |
+
_, indices = symbolic_helper._sort_helper(
|
613 |
+
g, self, dim, decending=decending, out=out
|
614 |
+
)
|
615 |
+
return indices
|
616 |
+
|
617 |
+
|
618 |
+
@_onnx_symbolic("aten::round")
|
619 |
+
@symbolic_helper.parse_args("v", "i")
|
620 |
+
@_beartype.beartype
|
621 |
+
def round(g: jit_utils.GraphContext, self, decimals=0):
|
622 |
+
if not symbolic_helper._is_fp(self):
|
623 |
+
return self
|
624 |
+
if decimals == 0:
|
625 |
+
return g.op("Round", self)
|
626 |
+
mul = g.op("Mul", self, g.op("Constant", value_t=torch.tensor(pow(10, decimals))))
|
627 |
+
round = g.op("Round", mul)
|
628 |
+
return g.op(
|
629 |
+
"Mul", round, g.op("Constant", value_t=torch.tensor(pow(10, -1 * decimals)))
|
630 |
+
)
|
631 |
+
|
632 |
+
|
633 |
+
@_onnx_symbolic("aten::remainder")
|
634 |
+
@_beartype.beartype
|
635 |
+
def remainder(g: jit_utils.GraphContext, input, other):
|
636 |
+
if symbolic_helper._is_fp(input) or symbolic_helper._is_fp(other):
|
637 |
+
return opset9.remainder(g, input, other)
|
638 |
+
return g.op("Mod", input, other, fmod_i=0)
|
639 |
+
|
640 |
+
|
641 |
+
@_onnx_symbolic("aten::split")
|
642 |
+
@symbolic_helper.parse_args("v", "v", "i", "i")
|
643 |
+
@_beartype.beartype
|
644 |
+
def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None):
|
645 |
+
if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs):
|
646 |
+
split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim)
|
647 |
+
if _outputs is None:
|
648 |
+
return split_out
|
649 |
+
# Convert to multiple slice nodes iff number of splits and number of outputs are statically known.
|
650 |
+
if (
|
651 |
+
symbolic_helper._is_packed_list(split_size_or_sizes)
|
652 |
+
and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs
|
653 |
+
):
|
654 |
+
split_sizes = [
|
655 |
+
symbolic_helper._unsqueeze_helper(g, v, [0])
|
656 |
+
for v in symbolic_helper._unpack_list(split_size_or_sizes)
|
657 |
+
]
|
658 |
+
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
|
659 |
+
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
|
660 |
+
res = []
|
661 |
+
for i in range(_outputs):
|
662 |
+
end = g.op(
|
663 |
+
"Add", start, split_sizes[i]
|
664 |
+
) # split_sizes is a list of same length as _outputs
|
665 |
+
res.append(g.op("Slice", self, start, end, axis))
|
666 |
+
start = end
|
667 |
+
return res
|
668 |
+
return [
|
669 |
+
g.op(
|
670 |
+
"SequenceAt",
|
671 |
+
split_out,
|
672 |
+
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
|
673 |
+
)
|
674 |
+
for i in range(_outputs)
|
675 |
+
]
|
676 |
+
else:
|
677 |
+
return opset9.split(g, self, split_size_or_sizes, dim, _outputs)
|
678 |
+
|
679 |
+
|
680 |
+
@_onnx_symbolic("aten::split_with_sizes")
|
681 |
+
@symbolic_helper.parse_args("v", "v", "i", "i")
|
682 |
+
@_beartype.beartype
|
683 |
+
def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None):
|
684 |
+
return split(g, self, split_sizes, dim, _outputs)
|
685 |
+
|
686 |
+
|
687 |
+
@_onnx_symbolic("aten::unbind")
|
688 |
+
@symbolic_helper.parse_args("v", "i", "i")
|
689 |
+
@_beartype.beartype
|
690 |
+
def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None):
|
691 |
+
if _outputs is None:
|
692 |
+
return g.op(
|
693 |
+
"SplitToSequence",
|
694 |
+
self,
|
695 |
+
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
|
696 |
+
axis_i=dim,
|
697 |
+
keepdims_i=0,
|
698 |
+
)
|
699 |
+
else:
|
700 |
+
return opset9.unbind(g, self, dim, _outputs)
|
701 |
+
|
702 |
+
|
703 |
+
@_beartype.beartype
|
704 |
+
def _prepare_onnx_paddings(g: jit_utils.GraphContext, input, pad):
|
705 |
+
"""Generate paddings in ONNX order based on pad in pytorch.
|
706 |
+
|
707 |
+
Args:
|
708 |
+
input: the input tensor.
|
709 |
+
pad: the paddings in pytorch.
|
710 |
+
The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end,
|
711 |
+
where m is in range [0, n].
|
712 |
+
"""
|
713 |
+
if (
|
714 |
+
not symbolic_helper._is_packed_list(pad)
|
715 |
+
and symbolic_helper._is_list(pad)
|
716 |
+
and symbolic_helper._is_scalar_list(pad)
|
717 |
+
):
|
718 |
+
pad = g.op("ConcatFromSequence", pad, axis_i=0, new_axis_i=1)
|
719 |
+
# The desired order of paddings is
|
720 |
+
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
|
721 |
+
# n is the dimension of input.
|
722 |
+
# Assume zero-dimensions in the beginning, pad the "pad" sequence with zeros in the beginning
|
723 |
+
pad_len = opset9.size(g, pad, g.op("Constant", value_t=torch.tensor([0])))
|
724 |
+
# Set extension = [0] * (dim * 2 - len(pad))
|
725 |
+
rank = symbolic_helper._get_tensor_rank(input)
|
726 |
+
if rank is None:
|
727 |
+
rank = g.op("Size", g.op("Shape", input))
|
728 |
+
else:
|
729 |
+
rank = g.op("Constant", value_t=torch.tensor(rank, dtype=torch.int64))
|
730 |
+
extension = g.op(
|
731 |
+
"Sub",
|
732 |
+
g.op("Mul", rank, g.op("Constant", value_t=torch.tensor(2, dtype=torch.int64))),
|
733 |
+
pad_len,
|
734 |
+
)
|
735 |
+
# Concat pad with extension: paddings = [dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, 0, 0, ... ]
|
736 |
+
# Currently ONNX only supports int64 type for Pad
|
737 |
+
pad = g.op("Cast", pad, to_i=_C_onnx.TensorProtoDataType.INT64)
|
738 |
+
paddings = g.op(
|
739 |
+
"Concat",
|
740 |
+
pad,
|
741 |
+
g.op(
|
742 |
+
"ConstantOfShape", extension, value_t=torch.tensor([0], dtype=torch.int64)
|
743 |
+
),
|
744 |
+
axis_i=0,
|
745 |
+
)
|
746 |
+
# Reshape and reverse order and collate first beginnings and then ends
|
747 |
+
# paddings = [[..., 0, dim_n-1_begin, dim_n_begin],
|
748 |
+
# [..., 0, dim_n-1_end, dim_n_end]]
|
749 |
+
# Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, ..., 0, dim_n - 1_end, dim_n_end]
|
750 |
+
paddings = symbolic_helper._reshape_helper(
|
751 |
+
g, paddings, g.op("Constant", value_t=torch.tensor([-1, 2]))
|
752 |
+
)
|
753 |
+
paddings = g.op("Transpose", opset10.flip(g, paddings, [0]), perm_i=[1, 0])
|
754 |
+
paddings = symbolic_helper._reshape_helper(
|
755 |
+
g, paddings, g.op("Constant", value_t=torch.tensor([-1]))
|
756 |
+
)
|
757 |
+
padding_c = g.op("Cast", paddings, to_i=_C_onnx.TensorProtoDataType.INT64)
|
758 |
+
return padding_c
|
759 |
+
|
760 |
+
|
761 |
+
@_onnx_symbolic("aten::constant_pad_nd")
|
762 |
+
@_beartype.beartype
|
763 |
+
def constant_pad_nd(g: jit_utils.GraphContext, input, padding, value=None):
|
764 |
+
mode = "constant"
|
765 |
+
value = symbolic_helper._maybe_get_scalar(value)
|
766 |
+
value = symbolic_helper._if_scalar_type_as(value, input)
|
767 |
+
pad = _prepare_onnx_paddings(g, input, padding)
|
768 |
+
return g.op("Pad", input, pad, value, mode_s=mode)
|
769 |
+
|
770 |
+
|
771 |
+
@_onnx_symbolic("aten::reflection_pad1d")
|
772 |
+
@_onnx_symbolic("aten::reflection_pad2d")
|
773 |
+
@_onnx_symbolic("aten::reflection_pad3d")
|
774 |
+
@_beartype.beartype
|
775 |
+
def reflection_pad(g: jit_utils.GraphContext, input, padding):
|
776 |
+
mode = "reflect"
|
777 |
+
paddings = _prepare_onnx_paddings(g, input, padding)
|
778 |
+
return g.op("Pad", input, paddings, mode_s=mode)
|
779 |
+
|
780 |
+
|
781 |
+
@_onnx_symbolic("aten::replication_pad1d")
|
782 |
+
@_onnx_symbolic("aten::replication_pad2d")
|
783 |
+
@_onnx_symbolic("aten::replication_pad3d")
|
784 |
+
@_beartype.beartype
|
785 |
+
def replication_pad(g: jit_utils.GraphContext, input, padding):
|
786 |
+
mode = "edge"
|
787 |
+
paddings = _prepare_onnx_paddings(g, input, padding)
|
788 |
+
return g.op("Pad", input, paddings, mode_s=mode)
|
789 |
+
|
790 |
+
|
791 |
+
@_onnx_symbolic("aten::pad")
|
792 |
+
@_beartype.beartype
|
793 |
+
def pad(
|
794 |
+
g: jit_utils.GraphContext,
|
795 |
+
input: _C.Value,
|
796 |
+
pad: _C.Value,
|
797 |
+
mode: _C.Value,
|
798 |
+
value: _C.Value,
|
799 |
+
):
|
800 |
+
mode = symbolic_helper._parse_arg(mode, "s")
|
801 |
+
if mode == "replicate":
|
802 |
+
return replication_pad(g, input, pad)
|
803 |
+
elif mode == "reflect":
|
804 |
+
return reflection_pad(g, input, pad)
|
805 |
+
elif mode == "constant":
|
806 |
+
return constant_pad_nd(g, input, pad, value)
|
807 |
+
elif mode == "circular":
|
808 |
+
return opset9._pad_circular(g, input, pad)
|
809 |
+
else:
|
810 |
+
raise errors.SymbolicValueError(f"Unrecognized padding mode {mode}", input)
|
811 |
+
|
812 |
+
|
813 |
+
@_onnx_symbolic("aten::linalg_det")
|
814 |
+
@_beartype.beartype
|
815 |
+
def linalg_det(g: jit_utils.GraphContext, self):
|
816 |
+
return g.op("Det", self)
|
817 |
+
|
818 |
+
|
819 |
+
@_onnx_symbolic("aten::logdet")
|
820 |
+
@_beartype.beartype
|
821 |
+
def logdet(g: jit_utils.GraphContext, input):
|
822 |
+
return opset9.log(g, linalg_det(g, input))
|
823 |
+
|
824 |
+
|
825 |
+
@_onnx_symbolic("aten::arange")
|
826 |
+
@_beartype.beartype
|
827 |
+
def arange(g: jit_utils.GraphContext, *args):
|
828 |
+
def _get_arange_dtype(dtype):
|
829 |
+
dtype = symbolic_helper._maybe_get_const(dtype, "i")
|
830 |
+
return dtype
|
831 |
+
|
832 |
+
if len(args) == 2 and all(isinstance(val, int) for val in args):
|
833 |
+
# aten::arange(Scalar start, Scalar end)
|
834 |
+
dtype = torch.int64
|
835 |
+
# Start index.
|
836 |
+
start = g.op(
|
837 |
+
"Constant",
|
838 |
+
value_t=torch.tensor(args[0], dtype=dtype),
|
839 |
+
)
|
840 |
+
# End (exclusive) index.
|
841 |
+
end = g.op(
|
842 |
+
"Constant",
|
843 |
+
value_t=torch.tensor(args[1], dtype=dtype),
|
844 |
+
)
|
845 |
+
# Step size from start to end indexes.
|
846 |
+
delta_default = g.op(
|
847 |
+
"Constant",
|
848 |
+
value_t=torch.tensor(1, dtype=dtype),
|
849 |
+
)
|
850 |
+
return g.op("Range", start, end, delta_default)
|
851 |
+
elif len(args) == 2 or len(args) == 5:
|
852 |
+
if len(args) == 2:
|
853 |
+
# aten::arange(Scalar end, Tensor out)
|
854 |
+
dtype = None
|
855 |
+
else:
|
856 |
+
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
|
857 |
+
dtype = _get_arange_dtype(args[1])
|
858 |
+
type_, end, start, step = symbolic_helper._arange_cast_helper(
|
859 |
+
g, end=args[0], dtype=dtype
|
860 |
+
)
|
861 |
+
start_default = g.op(
|
862 |
+
"Constant",
|
863 |
+
value_t=torch.tensor(0, dtype=type_.dtype()),
|
864 |
+
)
|
865 |
+
delta_default = g.op(
|
866 |
+
"Constant",
|
867 |
+
value_t=torch.tensor(1, dtype=type_.dtype()),
|
868 |
+
)
|
869 |
+
return g.op("Range", start_default, end, delta_default)
|
870 |
+
elif len(args) == 4 or len(args) == 7:
|
871 |
+
if len(args) == 4:
|
872 |
+
# aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)
|
873 |
+
dtype = None
|
874 |
+
else:
|
875 |
+
# aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)
|
876 |
+
dtype = _get_arange_dtype(args[3])
|
877 |
+
_, end, start, step = symbolic_helper._arange_cast_helper(
|
878 |
+
g, start=args[0], end=args[1], step=args[2], dtype=dtype
|
879 |
+
)
|
880 |
+
return g.op("Range", start, end, step)
|
881 |
+
elif len(args) == 6:
|
882 |
+
# aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
|
883 |
+
dtype = _get_arange_dtype(args[2])
|
884 |
+
type_, end, start, step = symbolic_helper._arange_cast_helper(
|
885 |
+
g, start=args[0], end=args[1], dtype=dtype
|
886 |
+
)
|
887 |
+
delta_default = g.op(
|
888 |
+
"Constant",
|
889 |
+
value_t=torch.tensor(1, dtype=type_.dtype()),
|
890 |
+
)
|
891 |
+
return g.op("Range", start, end, delta_default)
|
892 |
+
else:
|
893 |
+
return symbolic_helper._unimplemented(
|
894 |
+
"aten::arange", f"with {len(args)} arguments"
|
895 |
+
)
|
896 |
+
|
897 |
+
|
898 |
+
@_onnx_symbolic("aten::_dim_arange")
|
899 |
+
@symbolic_helper.parse_args("v", "i")
|
900 |
+
@_beartype.beartype
|
901 |
+
def _dim_arange(g: jit_utils.GraphContext, like, dim):
|
902 |
+
like_shape = g.op("Shape", like)
|
903 |
+
stop = g.op(
|
904 |
+
"Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0
|
905 |
+
)
|
906 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
907 |
+
return g.op("_caffe2::Range", stop)
|
908 |
+
return arange(g, stop, 4, None, None, None)
|
909 |
+
|
910 |
+
|
911 |
+
@_onnx_symbolic("aten::size")
|
912 |
+
@symbolic_helper.quantized_args(True, quantize_output=False)
|
913 |
+
@_beartype.beartype
|
914 |
+
def size(g: jit_utils.GraphContext, self, dim=None):
|
915 |
+
if dim is None:
|
916 |
+
return g.op("Shape", self)
|
917 |
+
return symbolic_helper._size_helper(g, self, dim)
|
918 |
+
|
919 |
+
|
920 |
+
@_onnx_symbolic("aten::squeeze")
|
921 |
+
@_beartype.beartype
|
922 |
+
def squeeze(g: jit_utils.GraphContext, self, dim=None):
|
923 |
+
if dim is None:
|
924 |
+
return g.op("Squeeze", self)
|
925 |
+
|
926 |
+
# dim as a tensor
|
927 |
+
if not symbolic_helper._is_constant(dim):
|
928 |
+
return symbolic_helper._squeeze_helper(g, self, [dim])
|
929 |
+
|
930 |
+
dim = symbolic_helper._get_const(dim, "i", "dim")
|
931 |
+
|
932 |
+
input_rank = symbolic_helper._get_tensor_rank(self)
|
933 |
+
adjusted_dim = dim
|
934 |
+
if input_rank is not None and dim < 0:
|
935 |
+
adjusted_dim += input_rank
|
936 |
+
dim_size = symbolic_helper._get_tensor_dim_size(self, adjusted_dim)
|
937 |
+
if (dim < 0 and input_rank is None) or dim_size is None:
|
938 |
+
# If onnx shape inference is not on, export always as dynamic.
|
939 |
+
# Because we cannot tell if observed static shape is also static at runtime.
|
940 |
+
# create "cond" node (condition is shape[i]==1)
|
941 |
+
dim_constant = g.op("Constant", value_t=torch.tensor([dim]))
|
942 |
+
size = symbolic_helper._size_helper(g, self, dim_constant)
|
943 |
+
const_one = g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))
|
944 |
+
cond = g.op("Equal", size, const_one)
|
945 |
+
# create the "If" node and add the "then" and "else" blocks to it.
|
946 |
+
if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks(
|
947 |
+
g, "If", cond, n_blocks=2
|
948 |
+
)
|
949 |
+
squeeze_ = symbolic_helper._squeeze_helper(if_context, self, [dim])
|
950 |
+
utils._add_output_to_block(if_context.block, squeeze_)
|
951 |
+
identity_ = else_context.op("Identity", self)
|
952 |
+
utils._add_output_to_block(else_context.block, identity_)
|
953 |
+
return if_op
|
954 |
+
|
955 |
+
# For static input shape
|
956 |
+
dim = adjusted_dim
|
957 |
+
if dim_size > 1:
|
958 |
+
warnings.warn(
|
959 |
+
"This model contains a squeeze operation on dimension "
|
960 |
+
+ str(dim)
|
961 |
+
+ ". The size of "
|
962 |
+
+ "this dimension in the given input is "
|
963 |
+
+ str(dim_size)
|
964 |
+
+ ". The model will "
|
965 |
+
+ "be exported without the squeeze node. If the model is intended to be used with dynamic "
|
966 |
+
+ "input shapes, please export with dynamic_axes argument."
|
967 |
+
)
|
968 |
+
return self
|
969 |
+
return symbolic_helper._squeeze_helper(g, self, [dim])
|
970 |
+
|
971 |
+
|
972 |
+
@_onnx_symbolic("aten::unsqueeze")
|
973 |
+
@_beartype.beartype
|
974 |
+
def unsqueeze(g: jit_utils.GraphContext, self, dim):
|
975 |
+
if symbolic_helper._is_constant(dim):
|
976 |
+
dim = symbolic_helper._get_const(dim, "i", "dim")
|
977 |
+
|
978 |
+
return symbolic_helper._unsqueeze_helper(g, self, [dim])
|
979 |
+
|
980 |
+
|
981 |
+
@_onnx_symbolic("aten::mm")
|
982 |
+
@_beartype.beartype
|
983 |
+
def mm(g: jit_utils.GraphContext, self, other):
|
984 |
+
return g.op("Gemm", self, other, beta_f=0.0, alpha_f=1.0)
|
985 |
+
|
986 |
+
|
987 |
+
@_onnx_symbolic("aten::index")
|
988 |
+
@_beartype.beartype
|
989 |
+
def index(g: jit_utils.GraphContext, self, index):
|
990 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
991 |
+
return g.at("index", self, index, overload_name="Tensor")
|
992 |
+
|
993 |
+
if symbolic_helper._is_packed_list(index):
|
994 |
+
indices = symbolic_helper._unpack_list(index)
|
995 |
+
else:
|
996 |
+
indices = [index]
|
997 |
+
|
998 |
+
# Handle single mask index.
|
999 |
+
if len(indices) == 1:
|
1000 |
+
index = indices[0]
|
1001 |
+
if not symbolic_helper._is_none(index) and (
|
1002 |
+
symbolic_helper._is_bool(index)
|
1003 |
+
or _type_utils.JitScalarType.from_value(index)
|
1004 |
+
== _type_utils.JitScalarType.UINT8
|
1005 |
+
):
|
1006 |
+
index = opset9.nonzero(g, index)
|
1007 |
+
return g.op("GatherND", self, index)
|
1008 |
+
return opset9.index(g, self, index)
|
1009 |
+
|
1010 |
+
|
1011 |
+
@_onnx_symbolic("aten::index_fill")
|
1012 |
+
@_beartype.beartype
|
1013 |
+
def index_fill(g: jit_utils.GraphContext, self, dim, index, value):
|
1014 |
+
dim_value = symbolic_helper._parse_arg(dim, "i")
|
1015 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
1016 |
+
return g.at(
|
1017 |
+
"index_fill",
|
1018 |
+
self,
|
1019 |
+
index,
|
1020 |
+
value,
|
1021 |
+
overload_name="int_Scalar",
|
1022 |
+
dim_i=dim_value,
|
1023 |
+
)
|
1024 |
+
|
1025 |
+
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
|
1026 |
+
g, self, dim, index
|
1027 |
+
)
|
1028 |
+
value = symbolic_helper._maybe_get_scalar(value)
|
1029 |
+
value = symbolic_helper._if_scalar_type_as(value, self)
|
1030 |
+
expanded_value = opset9.expand(g, value, expanded_index_shape, None)
|
1031 |
+
return scatter(g, self, dim, expanded_index, expanded_value)
|
1032 |
+
|
1033 |
+
|
1034 |
+
@_onnx_symbolic("aten::index_copy")
|
1035 |
+
@_beartype.beartype
|
1036 |
+
def index_copy(g: jit_utils.GraphContext, self, dim, index, source):
|
1037 |
+
dim_value = symbolic_helper._parse_arg(dim, "i")
|
1038 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
1039 |
+
return g.at("index_copy", self, index, source, dim_i=dim_value)
|
1040 |
+
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
|
1041 |
+
g, self, dim, index
|
1042 |
+
)
|
1043 |
+
return scatter(g, self, dim, expanded_index, source)
|
1044 |
+
|
1045 |
+
|
1046 |
+
@_onnx_symbolic("aten::__rshift_")
|
1047 |
+
@_beartype.beartype
|
1048 |
+
def __rshift_(g: jit_utils.GraphContext, self, other):
|
1049 |
+
# make sure to cast other to self's type
|
1050 |
+
# (when self is long, make sure that other is not float)
|
1051 |
+
if _type_utils.JitScalarType.from_value(
|
1052 |
+
other, _type_utils.JitScalarType.UNDEFINED
|
1053 |
+
) != _type_utils.JitScalarType.from_value(self):
|
1054 |
+
other = g.op(
|
1055 |
+
"Cast",
|
1056 |
+
other,
|
1057 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
1058 |
+
)
|
1059 |
+
|
1060 |
+
if (
|
1061 |
+
_type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED)
|
1062 |
+
== _type_utils.JitScalarType.UINT8
|
1063 |
+
):
|
1064 |
+
return g.op("BitShift", self, other, direction_s="RIGHT")
|
1065 |
+
|
1066 |
+
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
|
1067 |
+
# exponent (same type as self) has to be float or double in onnx::Pow
|
1068 |
+
if not symbolic_helper._is_fp(self):
|
1069 |
+
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
1070 |
+
two_pow = g.op("Pow", two, other)
|
1071 |
+
two_pow = g.op(
|
1072 |
+
"Cast",
|
1073 |
+
two_pow,
|
1074 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
1075 |
+
)
|
1076 |
+
rshift = g.op("Div", self, two_pow)
|
1077 |
+
return rshift
|
1078 |
+
|
1079 |
+
|
1080 |
+
@_onnx_symbolic("aten::__lshift_")
|
1081 |
+
@_beartype.beartype
|
1082 |
+
def __lshift_(g: jit_utils.GraphContext, self, other):
|
1083 |
+
# make sure to cast other to self's type
|
1084 |
+
# (when self is long, make sure that other is not float)
|
1085 |
+
if _type_utils.JitScalarType.from_value(
|
1086 |
+
other, _type_utils.JitScalarType.UNDEFINED
|
1087 |
+
) != _type_utils.JitScalarType.from_value(self):
|
1088 |
+
other = g.op(
|
1089 |
+
"Cast",
|
1090 |
+
other,
|
1091 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
1092 |
+
)
|
1093 |
+
|
1094 |
+
if (
|
1095 |
+
_type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED)
|
1096 |
+
== _type_utils.JitScalarType.UINT8
|
1097 |
+
):
|
1098 |
+
return g.op("BitShift", self, other, direction_s="LEFT")
|
1099 |
+
|
1100 |
+
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
|
1101 |
+
# exponent (same type as self) has to be float or double in onnx::Pow
|
1102 |
+
if not symbolic_helper._is_fp(self):
|
1103 |
+
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
1104 |
+
two_pow = g.op("Pow", two, other)
|
1105 |
+
two_pow = g.op(
|
1106 |
+
"Cast",
|
1107 |
+
two_pow,
|
1108 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
1109 |
+
)
|
1110 |
+
lshift = g.op("Mul", self, two_pow)
|
1111 |
+
return lshift
|
1112 |
+
|
1113 |
+
|
1114 |
+
@_beartype.beartype
|
1115 |
+
def _get_im2col_indices_along_dim(
|
1116 |
+
g: jit_utils.GraphContext, input_d, kernel_size_d, dilation_d, padding_d, stride_d
|
1117 |
+
):
|
1118 |
+
# Input is always 4-D (N, C, H, W)
|
1119 |
+
# Calculate indices of sliding blocks along spatial dimension
|
1120 |
+
# Slide kernel over input each dim d:
|
1121 |
+
# each dimension d ranges from 0 to input[d]+2xpadding[d]-dilation[d]x(kernel_size[d]-1)
|
1122 |
+
# with steps = stride
|
1123 |
+
|
1124 |
+
blocks_d = g.op(
|
1125 |
+
"Add", input_d, g.op("Constant", value_t=torch.tensor(padding_d * 2))
|
1126 |
+
)
|
1127 |
+
blocks_d = g.op(
|
1128 |
+
"Sub",
|
1129 |
+
blocks_d,
|
1130 |
+
g.op("Constant", value_t=torch.tensor(dilation_d * (kernel_size_d - 1))),
|
1131 |
+
)
|
1132 |
+
|
1133 |
+
# Stride kernel over input and find starting indices along dim d
|
1134 |
+
blocks_d_indices = g.op(
|
1135 |
+
"Range",
|
1136 |
+
g.op("Constant", value_t=torch.tensor(0)),
|
1137 |
+
blocks_d,
|
1138 |
+
g.op("Constant", value_t=torch.tensor(stride_d)),
|
1139 |
+
)
|
1140 |
+
|
1141 |
+
# Apply dilation on kernel and find its indices along dim d
|
1142 |
+
kernel_grid = torch.arange(0, kernel_size_d * dilation_d, dilation_d)
|
1143 |
+
kernel_grid = g.op("Constant", value_t=kernel_grid.unsqueeze(0))
|
1144 |
+
|
1145 |
+
# Broadcast and add kernel staring positions (indices) with
|
1146 |
+
# kernel_grid along dim d, to get block indices along dim d
|
1147 |
+
blocks_d_indices = symbolic_helper._unsqueeze_helper(
|
1148 |
+
g, blocks_d_indices, [0]
|
1149 |
+
) # Reshape to [1, -1]
|
1150 |
+
kernel_mask = symbolic_helper._reshape_helper(
|
1151 |
+
g, kernel_grid, g.op("Constant", value_t=torch.tensor([-1, 1]))
|
1152 |
+
)
|
1153 |
+
block_mask = g.op("Add", blocks_d_indices, kernel_mask)
|
1154 |
+
|
1155 |
+
return block_mask
|
1156 |
+
|
1157 |
+
|
1158 |
+
@_beartype.beartype
|
1159 |
+
def _get_im2col_padded_input(g: jit_utils.GraphContext, input, padding_h, padding_w):
|
1160 |
+
# Input is always 4-D tensor (N, C, H, W)
|
1161 |
+
# Padding tensor has the following format: (padding_h, padding_w)
|
1162 |
+
# Reshape the padding to follow ONNX format: (dim1_begin, dim2_begin,...,dim1_end, dim2_end,...)
|
1163 |
+
pad = g.op("Constant", value_t=torch.LongTensor([0, 0, padding_h, padding_w] * 2))
|
1164 |
+
return g.op("Pad", input, pad)
|
1165 |
+
|
1166 |
+
|
1167 |
+
@_beartype.beartype
|
1168 |
+
def _get_im2col_output_shape(g: jit_utils.GraphContext, input, kernel_h, kernel_w):
|
1169 |
+
batch_dim = size(g, input, g.op("Constant", value_t=torch.tensor(0)))
|
1170 |
+
channel_dim = size(g, input, g.op("Constant", value_t=torch.tensor(1)))
|
1171 |
+
channel_unfolded = g.op(
|
1172 |
+
"Mul", channel_dim, g.op("Constant", value_t=torch.tensor(kernel_h * kernel_w))
|
1173 |
+
)
|
1174 |
+
|
1175 |
+
return g.op(
|
1176 |
+
"Concat",
|
1177 |
+
symbolic_helper._unsqueeze_helper(g, batch_dim, [0]),
|
1178 |
+
symbolic_helper._unsqueeze_helper(g, channel_unfolded, [0]),
|
1179 |
+
g.op("Constant", value_t=torch.tensor([-1])),
|
1180 |
+
axis_i=0,
|
1181 |
+
)
|
1182 |
+
|
1183 |
+
|
1184 |
+
@_onnx_symbolic("aten::im2col")
|
1185 |
+
@symbolic_helper.parse_args("v", "is", "is", "is", "is")
|
1186 |
+
@_beartype.beartype
|
1187 |
+
def im2col(g: jit_utils.GraphContext, input, kernel_size, dilation, padding, stride):
|
1188 |
+
# Input is always 4-D tensor (N, C, H, W)
|
1189 |
+
# All other args are int[2]
|
1190 |
+
|
1191 |
+
input_h = size(g, input, g.op("Constant", value_t=torch.tensor(2)))
|
1192 |
+
input_w = size(g, input, g.op("Constant", value_t=torch.tensor(3)))
|
1193 |
+
|
1194 |
+
stride_h, stride_w = stride[0], stride[1]
|
1195 |
+
padding_h, padding_w = padding[0], padding[1]
|
1196 |
+
dilation_h, dilation_w = dilation[0], dilation[1]
|
1197 |
+
kernel_h, kernel_w = kernel_size[0], kernel_size[1]
|
1198 |
+
|
1199 |
+
blocks_row_indices = _get_im2col_indices_along_dim(
|
1200 |
+
g, input_h, kernel_h, dilation_h, padding_h, stride_h
|
1201 |
+
)
|
1202 |
+
blocks_col_indices = _get_im2col_indices_along_dim(
|
1203 |
+
g, input_w, kernel_w, dilation_w, padding_w, stride_w
|
1204 |
+
)
|
1205 |
+
|
1206 |
+
output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)
|
1207 |
+
padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)
|
1208 |
+
|
1209 |
+
# For a 4D matrix of size (1, 1, 3, 3) as below with kernel_size=2, stride=1, and dilation=1
|
1210 |
+
# [[[[1., 2., 3.,],
|
1211 |
+
# [4., 5., 6.,],
|
1212 |
+
# [7., 8., 9.,]]]]
|
1213 |
+
# First gather indices along rows (dim=2) with blocks_row_indices = [[0,1], [1,2]] to get:
|
1214 |
+
# [[[[[1., 2., 3.],
|
1215 |
+
# [4., 5., 6.]],
|
1216 |
+
# [[4., 5., 6.],
|
1217 |
+
# [7., 8., 9.]]]]]
|
1218 |
+
# And then gather along cols (dim=4) with blocks_row_indices = [[0,1], [1,2]] to get:
|
1219 |
+
# [[[[[[1., 2.],
|
1220 |
+
# [4., 5.]],
|
1221 |
+
# [[2., 3.],
|
1222 |
+
# [5., 6]]],
|
1223 |
+
# [[[4., 5.],
|
1224 |
+
# [7., 8.]],
|
1225 |
+
# [[5., 6.],
|
1226 |
+
# [8., 9.]]]]]]
|
1227 |
+
# Transpose dims 3 (depth) and 4 (rows), and then reshape to output shape (1, 1, 4, 4) to get:
|
1228 |
+
# [[[1., 2., 4., 5.],
|
1229 |
+
# [2., 3., 5., 6.],
|
1230 |
+
# [4., 5., 7., 8.],
|
1231 |
+
# [5., 6., 8., 9.]]]
|
1232 |
+
output = g.op("Gather", padded_input, blocks_row_indices, axis_i=2)
|
1233 |
+
output = g.op("Gather", output, blocks_col_indices, axis_i=4)
|
1234 |
+
output = g.op("Transpose", output, perm_i=[0, 1, 2, 4, 3, 5])
|
1235 |
+
return symbolic_helper._reshape_helper(g, output, output_shape)
|
1236 |
+
|
1237 |
+
|
1238 |
+
@_onnx_symbolic("aten::narrow")
|
1239 |
+
@_beartype.beartype
|
1240 |
+
def narrow(g: jit_utils.GraphContext, input, dim, start, length):
|
1241 |
+
end = g.op("Add", start, length)
|
1242 |
+
return symbolic_helper._slice_helper(g, input, axes=dim, starts=start, ends=end)
|
1243 |
+
|
1244 |
+
|
1245 |
+
@_onnx_symbolic("aten::flatten")
|
1246 |
+
@symbolic_helper.quantized_args(True, False, False)
|
1247 |
+
@symbolic_helper.parse_args("v", "i", "i")
|
1248 |
+
@_beartype.beartype
|
1249 |
+
def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim):
|
1250 |
+
dim = symbolic_helper._get_tensor_rank(input)
|
1251 |
+
if dim == 1:
|
1252 |
+
return input
|
1253 |
+
# use ONNX's Flatten operator for cases where the output shape is 2D
|
1254 |
+
if start_dim == 1:
|
1255 |
+
if end_dim == -1 or (dim is not None and end_dim == dim - 1):
|
1256 |
+
return g.op("Flatten", input, axis_i=start_dim)
|
1257 |
+
elif start_dim == 0:
|
1258 |
+
if end_dim == -2 or (dim is not None and end_dim == dim - 2):
|
1259 |
+
return g.op("Flatten", input, axis_i=end_dim + 1)
|
1260 |
+
if dim is None:
|
1261 |
+
return symbolic_helper._unimplemented(
|
1262 |
+
"dim",
|
1263 |
+
"ONNX and PyTorch use different strategies to split the input. "
|
1264 |
+
"Input rank must be known at export time.",
|
1265 |
+
)
|
1266 |
+
# if end_dim is negative add dim
|
1267 |
+
if end_dim < 0:
|
1268 |
+
end_dim = dim + end_dim
|
1269 |
+
|
1270 |
+
return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim)
|
1271 |
+
|
1272 |
+
|
1273 |
+
@_onnx_symbolic("aten::linalg_vector_norm")
|
1274 |
+
@symbolic_helper.parse_args("v", "f", "is", "b", "v")
|
1275 |
+
@_beartype.beartype
|
1276 |
+
def linalg_vector_norm(
|
1277 |
+
g: jit_utils.GraphContext,
|
1278 |
+
self,
|
1279 |
+
ord,
|
1280 |
+
dim: Optional[Sequence[int]],
|
1281 |
+
keepdim: bool,
|
1282 |
+
dtype,
|
1283 |
+
):
|
1284 |
+
if ord == 0:
|
1285 |
+
if dim is None:
|
1286 |
+
self = symbolic_helper._reshape_helper(
|
1287 |
+
g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
|
1288 |
+
)
|
1289 |
+
keepdim = False
|
1290 |
+
|
1291 |
+
cond_op = g.op(
|
1292 |
+
"Not", g.op("Equal", self, g.op("Constant", value_t=torch.LongTensor([0])))
|
1293 |
+
)
|
1294 |
+
cond_op = g.op(
|
1295 |
+
"Cast",
|
1296 |
+
cond_op,
|
1297 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
1298 |
+
)
|
1299 |
+
return symbolic_helper._reducesum_helper(
|
1300 |
+
g, cond_op, axes_i=dim, keepdims_i=keepdim
|
1301 |
+
)
|
1302 |
+
else:
|
1303 |
+
return opset9.linalg_vector_norm(g, self, ord, dim, keepdim, dtype)
|
1304 |
+
|
1305 |
+
|
1306 |
+
@_onnx_symbolic("aten::embedding_bag")
|
1307 |
+
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i")
|
1308 |
+
@_beartype.beartype
|
1309 |
+
def embedding_bag(
|
1310 |
+
g: jit_utils.GraphContext,
|
1311 |
+
embedding_matrix,
|
1312 |
+
indices,
|
1313 |
+
offsets,
|
1314 |
+
scale_grad_by_freq,
|
1315 |
+
mode,
|
1316 |
+
sparse,
|
1317 |
+
per_sample_weights,
|
1318 |
+
include_last_offset,
|
1319 |
+
padding_idx,
|
1320 |
+
):
|
1321 |
+
if scale_grad_by_freq and GLOBALS.export_training:
|
1322 |
+
return symbolic_helper._onnx_unsupported(
|
1323 |
+
"embedding_bag with scale_grad_by_freq for training mode"
|
1324 |
+
)
|
1325 |
+
if padding_idx is not None and padding_idx >= 0:
|
1326 |
+
raise RuntimeError("embedding_bag with padding_idx")
|
1327 |
+
|
1328 |
+
loop_condition = g.op("Constant", value_t=torch.tensor(1))
|
1329 |
+
loop_condition = g.op("Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL)
|
1330 |
+
zero = g.op("Constant", value_t=torch.tensor([0]))
|
1331 |
+
|
1332 |
+
indices_len = symbolic_helper._unsqueeze_helper(
|
1333 |
+
g,
|
1334 |
+
symbolic_helper._size_helper(
|
1335 |
+
g, indices, g.op("Constant", value_t=torch.tensor(0))
|
1336 |
+
),
|
1337 |
+
[0],
|
1338 |
+
)
|
1339 |
+
if not include_last_offset:
|
1340 |
+
offsets = [offsets, indices_len]
|
1341 |
+
offsets = g.op("Concat", *offsets, axis_i=0)
|
1342 |
+
|
1343 |
+
# Offsets holds the starting index position of each bag. So we create a list of the indices slices (determined by
|
1344 |
+
# offsets) and gather those indices in indices_row. Then we use this subset of indices to gather from embeddings.
|
1345 |
+
# The embeddings output is a loop scan output, so we can avoid creating a sequence and inserting elements in.
|
1346 |
+
offsets_starts = symbolic_helper._slice_helper(
|
1347 |
+
g, offsets, axes=[0], starts=[0], ends=[sys.maxsize], steps=[1]
|
1348 |
+
)
|
1349 |
+
offsets_ends = symbolic_helper._slice_helper(
|
1350 |
+
g, offsets, axes=[0], starts=[1], ends=[sys.maxsize], steps=[1]
|
1351 |
+
)
|
1352 |
+
|
1353 |
+
loop_len = symbolic_helper._size_helper(
|
1354 |
+
g, offsets_ends, g.op("Constant", value_t=torch.tensor(0))
|
1355 |
+
)
|
1356 |
+
|
1357 |
+
loop, (loop_context,), _ = jit_utils.add_op_with_blocks(
|
1358 |
+
g, "Loop", loop_len, loop_condition, n_blocks=1
|
1359 |
+
)
|
1360 |
+
loop_block = loop_context.block
|
1361 |
+
|
1362 |
+
# FIXME(justinchuby): We need to handle what happens when we call b.op on a node return
|
1363 |
+
block_input_iter = utils._add_input_to_block(loop_block)
|
1364 |
+
cond = utils._add_input_to_block(loop_block)
|
1365 |
+
|
1366 |
+
indices_start = loop_context.op(
|
1367 |
+
"Gather", offsets_starts, block_input_iter, axis_i=0
|
1368 |
+
)
|
1369 |
+
indices_end = loop_context.op("Gather", offsets_ends, block_input_iter, axis_i=0)
|
1370 |
+
indices_start = symbolic_helper._unsqueeze_helper(loop_context, indices_start, [0])
|
1371 |
+
indices_end = symbolic_helper._unsqueeze_helper(loop_context, indices_end, [0])
|
1372 |
+
|
1373 |
+
indices_row = loop_context.op("Slice", indices, indices_start, indices_end, zero)
|
1374 |
+
embeddings = loop_context.op("Gather", embedding_matrix, indices_row, axis_i=0)
|
1375 |
+
if not symbolic_helper._is_none(per_sample_weights):
|
1376 |
+
per_sample_weights_row = loop_context.op(
|
1377 |
+
"Slice", per_sample_weights, indices_start, indices_end, zero
|
1378 |
+
)
|
1379 |
+
per_sample_weights_row = symbolic_helper._unsqueeze_helper(
|
1380 |
+
loop_context, per_sample_weights_row, [1]
|
1381 |
+
)
|
1382 |
+
embeddings = loop_context.op("Mul", embeddings, per_sample_weights_row)
|
1383 |
+
if mode == 0:
|
1384 |
+
embeddings = symbolic_helper._reducesum_helper(
|
1385 |
+
loop_context, embeddings, axes_i=[0], keepdims_i=0
|
1386 |
+
)
|
1387 |
+
elif mode == 1:
|
1388 |
+
embeddings = loop_context.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0)
|
1389 |
+
else:
|
1390 |
+
embeddings = loop_context.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0)
|
1391 |
+
|
1392 |
+
cond_out = loop_context.op(
|
1393 |
+
"Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL
|
1394 |
+
)
|
1395 |
+
utils._add_output_to_block(loop_block, cond_out)
|
1396 |
+
utils._add_output_to_block(loop_block, embeddings)
|
1397 |
+
|
1398 |
+
# aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
|
1399 |
+
# But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
|
1400 |
+
return loop.node().output(), None, None, None
|
1401 |
+
|
1402 |
+
|
1403 |
+
@_onnx_symbolic("aten::embedding_renorm")
|
1404 |
+
@symbolic_helper.parse_args("v", "v", "f", "f")
|
1405 |
+
@_beartype.beartype
|
1406 |
+
def embedding_renorm(g: jit_utils.GraphContext, weight, indices, max_norm, norm_type):
|
1407 |
+
unique_indices = g.op("Unique", indices)
|
1408 |
+
partial_weight = g.op("Gather", weight, unique_indices)
|
1409 |
+
norm_i = int(norm_type)
|
1410 |
+
if norm_i == 1:
|
1411 |
+
norm_type = "ReduceL1"
|
1412 |
+
elif norm_i == 2:
|
1413 |
+
norm_type = "ReduceL2"
|
1414 |
+
else:
|
1415 |
+
raise errors.SymbolicValueError(
|
1416 |
+
f"Unsupported: ONNX export of embedding_renorm with norm: {norm_i}. "
|
1417 |
+
"Only 1. and 2. are supported.",
|
1418 |
+
weight,
|
1419 |
+
)
|
1420 |
+
partial_weight_norm = g.op(norm_type, partial_weight, axes_i=[1], keepdims_i=1)
|
1421 |
+
# https://github.com/pytorch/pytorch/blob/0a07488ed2c47765e337e290bd138c0e6e459cbd/aten/src/ATen/native/Embedding.cpp#L177
|
1422 |
+
# Add 1e-7 to prevent division by zero.
|
1423 |
+
partial_weight_norm_ = g.op(
|
1424 |
+
"Add", partial_weight_norm, g.op("Constant", value_t=torch.tensor(1e-7))
|
1425 |
+
)
|
1426 |
+
max_norm = torch.tensor(max_norm)
|
1427 |
+
scales = g.op("Div", max_norm, partial_weight_norm_)
|
1428 |
+
partial_weight_renorm = g.op("Mul", partial_weight, scales)
|
1429 |
+
partial_weight_renorm = g.op(
|
1430 |
+
"Where",
|
1431 |
+
g.op("Greater", partial_weight_norm, max_norm),
|
1432 |
+
partial_weight_renorm,
|
1433 |
+
partial_weight,
|
1434 |
+
)
|
1435 |
+
return g.op(
|
1436 |
+
"ScatterND",
|
1437 |
+
weight,
|
1438 |
+
symbolic_helper._unsqueeze_helper(g, unique_indices, [1]),
|
1439 |
+
partial_weight_renorm,
|
1440 |
+
)
|
1441 |
+
|
1442 |
+
|
1443 |
+
@_onnx_symbolic("aten::chunk")
|
1444 |
+
@_beartype.beartype
|
1445 |
+
def chunk(g: jit_utils.GraphContext, self, chunks, dim):
|
1446 |
+
# Calculate chunk size for dynamic chunk
|
1447 |
+
dim_size = g.op("Gather", g.op("Shape", self), dim, axis_i=0)
|
1448 |
+
chunk_size_s = g.op(
|
1449 |
+
"Sub", chunks, g.op("Constant", value_t=torch.tensor([1], dtype=torch.long))
|
1450 |
+
)
|
1451 |
+
chunk_size = g.op("Div", g.op("Add", dim_size, chunk_size_s), chunks)
|
1452 |
+
# Create splits vector
|
1453 |
+
chunk_vec = [
|
1454 |
+
opset9.expand(g, chunk_size, chunk_size_s, None),
|
1455 |
+
g.op("Sub", dim_size, g.op("Mul", chunk_size, chunk_size_s)),
|
1456 |
+
]
|
1457 |
+
chunk_vec = g.op("Concat", *chunk_vec, axis_i=0)
|
1458 |
+
return split(g, self, chunk_vec, dim)
|
1459 |
+
|
1460 |
+
|
1461 |
+
@_onnx_symbolic("aten::normal")
|
1462 |
+
@_beartype.beartype
|
1463 |
+
def normal(
|
1464 |
+
g: jit_utils.GraphContext,
|
1465 |
+
mean,
|
1466 |
+
std,
|
1467 |
+
sizes=None,
|
1468 |
+
generator=None,
|
1469 |
+
dtype=None,
|
1470 |
+
layout=None,
|
1471 |
+
device=None,
|
1472 |
+
pin_memory=None,
|
1473 |
+
):
|
1474 |
+
# If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a
|
1475 |
+
# scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample
|
1476 |
+
# from a mean 0 and variance 1 distribution then
|
1477 |
+
# σx+μ
|
1478 |
+
# is a sample with mean μ and variance σ's square.
|
1479 |
+
if sizes is not None and not symbolic_helper._is_none(sizes):
|
1480 |
+
mean = opset9.expand(g, mean, sizes, None)
|
1481 |
+
result = opset9.mul(g, std, g.op("RandomNormalLike", mean))
|
1482 |
+
return add(g, result, mean)
|
1483 |
+
|
1484 |
+
|
1485 |
+
@_onnx_symbolic("aten::atleast_1d")
|
1486 |
+
@_beartype.beartype
|
1487 |
+
def atleast_1d(g: jit_utils.GraphContext, self: torch._C.Value):
|
1488 |
+
# NOTE: If it's 0D, reshape to 1D
|
1489 |
+
|
1490 |
+
# NOTE: self could be a packed list or a tensor
|
1491 |
+
if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self):
|
1492 |
+
tensor_list = symbolic_helper._unpack_list(self)
|
1493 |
+
new_tensor_list = []
|
1494 |
+
for tensor in tensor_list:
|
1495 |
+
new_tensor = tensor
|
1496 |
+
tensor_rank = symbolic_helper._get_tensor_rank(tensor)
|
1497 |
+
if tensor_rank == 0:
|
1498 |
+
new_tensor = symbolic_helper._reshape_helper(
|
1499 |
+
g, new_tensor, g.op("Constant", value_t=torch.tensor([1]))
|
1500 |
+
)
|
1501 |
+
new_tensor_list.append(new_tensor)
|
1502 |
+
return g.op("SequenceConstruct", *new_tensor_list)
|
1503 |
+
|
1504 |
+
tensor_rank = symbolic_helper._get_tensor_rank(self)
|
1505 |
+
if tensor_rank == 0:
|
1506 |
+
self = symbolic_helper._reshape_helper(
|
1507 |
+
g, self, g.op("Constant", value_t=torch.tensor([1]))
|
1508 |
+
)
|
1509 |
+
return self
|
1510 |
+
|
1511 |
+
|
1512 |
+
@_onnx_symbolic("aten::atleast_2d")
|
1513 |
+
@_beartype.beartype
|
1514 |
+
def atleast_2d(g: jit_utils.GraphContext, self: torch._C.Value):
|
1515 |
+
# NOTE: If it's 0D, reshape to 2D
|
1516 |
+
# If it's 1D, unsqueeze to 2D
|
1517 |
+
|
1518 |
+
# NOTE: self could be a packed list or a tensor
|
1519 |
+
if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self):
|
1520 |
+
tensor_list = symbolic_helper._unpack_list(self)
|
1521 |
+
new_tensor_list = []
|
1522 |
+
for tensor in tensor_list:
|
1523 |
+
new_tensor = tensor
|
1524 |
+
tensor_rank = symbolic_helper._get_tensor_rank(tensor)
|
1525 |
+
if tensor_rank == 0:
|
1526 |
+
new_tensor = symbolic_helper._reshape_helper(
|
1527 |
+
g, new_tensor, g.op("Constant", value_t=torch.tensor([1, 1]))
|
1528 |
+
)
|
1529 |
+
elif tensor_rank == 1:
|
1530 |
+
new_tensor = symbolic_helper._unsqueeze_helper(
|
1531 |
+
g, new_tensor, axes_i=[0]
|
1532 |
+
)
|
1533 |
+
new_tensor_list.append(new_tensor)
|
1534 |
+
return g.op("SequenceConstruct", *new_tensor_list)
|
1535 |
+
|
1536 |
+
tensor_rank = symbolic_helper._get_tensor_rank(self)
|
1537 |
+
if tensor_rank == 0:
|
1538 |
+
self = symbolic_helper._reshape_helper(
|
1539 |
+
g, self, g.op("Constant", value_t=torch.tensor([1, 1]))
|
1540 |
+
)
|
1541 |
+
elif tensor_rank == 1:
|
1542 |
+
self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[0])
|
1543 |
+
return self
|
1544 |
+
|
1545 |
+
|
1546 |
+
@_onnx_symbolic("aten::atleast_3d")
|
1547 |
+
@_beartype.beartype
|
1548 |
+
def atleast_3d(g: jit_utils.GraphContext, self: torch._C.Value):
|
1549 |
+
# NOTE: If it's 0D, reshape to 3D
|
1550 |
+
# If it's 1D, unsqueeze to 3D
|
1551 |
+
# If it's 2D, unsqueeze to 3D
|
1552 |
+
|
1553 |
+
# NOTE: self could be a packed list or a tensor
|
1554 |
+
if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self):
|
1555 |
+
tensor_list = symbolic_helper._unpack_list(self)
|
1556 |
+
new_tensor_list = []
|
1557 |
+
for tensor in tensor_list:
|
1558 |
+
new_tensor = tensor
|
1559 |
+
tensor_rank = symbolic_helper._get_tensor_rank(tensor)
|
1560 |
+
if tensor_rank == 0:
|
1561 |
+
new_tensor = symbolic_helper._reshape_helper(
|
1562 |
+
g, new_tensor, g.op("Constant", value_t=torch.tensor([1, 1, 1]))
|
1563 |
+
)
|
1564 |
+
elif tensor_rank == 1:
|
1565 |
+
new_tensor = symbolic_helper._unsqueeze_helper(
|
1566 |
+
g, new_tensor, axes_i=[0]
|
1567 |
+
)
|
1568 |
+
new_tensor = symbolic_helper._unsqueeze_helper(
|
1569 |
+
g, new_tensor, axes_i=[-1]
|
1570 |
+
)
|
1571 |
+
elif tensor_rank == 2:
|
1572 |
+
new_tensor = symbolic_helper._unsqueeze_helper(
|
1573 |
+
g, new_tensor, axes_i=[-1]
|
1574 |
+
)
|
1575 |
+
new_tensor_list.append(new_tensor)
|
1576 |
+
return g.op("SequenceConstruct", *new_tensor_list)
|
1577 |
+
|
1578 |
+
tensor_rank = symbolic_helper._get_tensor_rank(self)
|
1579 |
+
if tensor_rank == 0:
|
1580 |
+
self = symbolic_helper._reshape_helper(
|
1581 |
+
g, self, g.op("Constant", value_t=torch.tensor([1, 1, 1]))
|
1582 |
+
)
|
1583 |
+
elif tensor_rank == 1:
|
1584 |
+
self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[0])
|
1585 |
+
self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[-1])
|
1586 |
+
elif tensor_rank == 2:
|
1587 |
+
self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[-1])
|
1588 |
+
return self
|
1589 |
+
|
1590 |
+
|
1591 |
+
@_onnx_symbolic("prim::ConstantChunk")
|
1592 |
+
@_beartype.beartype
|
1593 |
+
def prim_constant_chunk(g: jit_utils.GraphContext, self, chunks, dim):
|
1594 |
+
input_shape = g.op("Shape", self)
|
1595 |
+
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
|
1596 |
+
input_shape_dim = g.op("Gather", input_shape, axis, axis_i=0)
|
1597 |
+
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
|
1598 |
+
chunk_size = g.op("Constant", value_t=torch.tensor([chunks], dtype=torch.long))
|
1599 |
+
chunk_size_minus_1 = g.op(
|
1600 |
+
"Constant", value_t=torch.tensor([chunks - 1], dtype=torch.long)
|
1601 |
+
)
|
1602 |
+
input_shape_dim_shift = g.op("Add", input_shape_dim, chunk_size_minus_1)
|
1603 |
+
chunk_dim = g.op("Div", input_shape_dim_shift, chunk_size)
|
1604 |
+
res = []
|
1605 |
+
for i in range(chunks):
|
1606 |
+
index = g.op("Constant", value_t=torch.tensor([i + 1], dtype=torch.long))
|
1607 |
+
end = g.op("Mul", chunk_dim, index)
|
1608 |
+
res.append(g.op("Slice", self, start, end, axis))
|
1609 |
+
start = end
|
1610 |
+
return res
|
1611 |
+
|
1612 |
+
|
1613 |
+
@_onnx_symbolic("aten::hstack")
|
1614 |
+
@_beartype.beartype
|
1615 |
+
def hstack(g: jit_utils.GraphContext, tensor_list: _C.Value):
|
1616 |
+
tensor_list = atleast_1d(g, tensor_list)
|
1617 |
+
first_tensor = g.op(
|
1618 |
+
"SequenceAt",
|
1619 |
+
tensor_list,
|
1620 |
+
g.op("Constant", value_t=torch.tensor(0, dtype=torch.long)),
|
1621 |
+
)
|
1622 |
+
first_tensor_shape = g.op("Shape", first_tensor)
|
1623 |
+
first_tensor_dim = g.op("Size", first_tensor_shape)
|
1624 |
+
|
1625 |
+
const_one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.long))
|
1626 |
+
equal_to_one = g.op("Equal", first_tensor_dim, const_one)
|
1627 |
+
|
1628 |
+
(
|
1629 |
+
if_op_greater,
|
1630 |
+
(if_context_equal, else_context_equal),
|
1631 |
+
_,
|
1632 |
+
) = jit_utils.add_op_with_blocks(g, "If", equal_to_one, n_blocks=2, outputs=1)
|
1633 |
+
result_if = if_context_equal.op(
|
1634 |
+
"ConcatFromSequence", tensor_list, axis_i=0, new_axis_i=0
|
1635 |
+
)
|
1636 |
+
utils._add_output_to_block(if_context_equal.block, result_if)
|
1637 |
+
result_else = else_context_equal.op(
|
1638 |
+
"ConcatFromSequence", tensor_list, axis_i=1, new_axis_i=0
|
1639 |
+
)
|
1640 |
+
utils._add_output_to_block(else_context_equal.block, result_else)
|
1641 |
+
result = if_op_greater.node().output()
|
1642 |
+
|
1643 |
+
return result
|
1644 |
+
|
1645 |
+
|
1646 |
+
@_onnx_symbolic("aten::vstack")
|
1647 |
+
@_beartype.beartype
|
1648 |
+
def vstack(g: jit_utils.GraphContext, tensor_list: _C.Value):
|
1649 |
+
tensor_list = atleast_2d(g, tensor_list)
|
1650 |
+
return g.op("ConcatFromSequence", tensor_list, axis_i=0, new_axis_i=0)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset13.py
ADDED
@@ -0,0 +1,1156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
2 |
+
# see Note [Edit Symbolic Files] in README.md
|
3 |
+
|
4 |
+
# This file exports ONNX ops for opset 13
|
5 |
+
import functools
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch._C._onnx as _C_onnx
|
9 |
+
from torch.onnx import (
|
10 |
+
_constants,
|
11 |
+
_type_utils,
|
12 |
+
errors,
|
13 |
+
symbolic_helper,
|
14 |
+
symbolic_opset11 as opset11,
|
15 |
+
symbolic_opset9 as opset9,
|
16 |
+
utils,
|
17 |
+
)
|
18 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
19 |
+
|
20 |
+
|
21 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=13)
|
22 |
+
|
23 |
+
|
24 |
+
def _apply_params(*args, **kwargs):
|
25 |
+
"""Returns a decorator that calls the decorated (higher-order) function with the given parameters."""
|
26 |
+
|
27 |
+
def _apply(fn):
|
28 |
+
return fn(*args, **kwargs)
|
29 |
+
|
30 |
+
return _apply
|
31 |
+
|
32 |
+
|
33 |
+
@_onnx_symbolic("aten::softmax")
|
34 |
+
@symbolic_helper.parse_args("v", "i", "none")
|
35 |
+
@_beartype.beartype
|
36 |
+
def softmax(g: jit_utils.GraphContext, input, dim, dtype=None):
|
37 |
+
softmax = g.op("Softmax", input, axis_i=dim)
|
38 |
+
if dtype and dtype.node().kind() != "prim::Constant":
|
39 |
+
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
40 |
+
softmax = g.op(
|
41 |
+
"Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
|
42 |
+
)
|
43 |
+
|
44 |
+
return softmax
|
45 |
+
|
46 |
+
|
47 |
+
@_onnx_symbolic("aten::log_softmax")
|
48 |
+
@symbolic_helper.parse_args("v", "i", "none")
|
49 |
+
@_beartype.beartype
|
50 |
+
def log_softmax(g: jit_utils.GraphContext, input, dim, dtype=None):
|
51 |
+
return_op = g.op("LogSoftmax", input, axis_i=dim)
|
52 |
+
if dtype and dtype.node().kind() != "prim::Constant":
|
53 |
+
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
54 |
+
return_op = g.op(
|
55 |
+
"Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
|
56 |
+
)
|
57 |
+
return return_op
|
58 |
+
|
59 |
+
|
60 |
+
@_onnx_symbolic("aten::frobenius_norm")
|
61 |
+
@symbolic_helper.parse_args("v", "v", "i")
|
62 |
+
@_beartype.beartype
|
63 |
+
def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False):
|
64 |
+
dim_val = symbolic_helper._maybe_get_const(dim, "is")
|
65 |
+
if not symbolic_helper._is_value(dim_val) and len(dim_val) == 0:
|
66 |
+
return g.op("ReduceL2", self, keepdims_i=0)
|
67 |
+
sqr = g.op("Mul", self, self)
|
68 |
+
sumsqr = symbolic_helper._reducesum_helper(g, sqr, dim, keepdims_i=keepdim)
|
69 |
+
return g.op("Sqrt", sumsqr)
|
70 |
+
|
71 |
+
|
72 |
+
@_onnx_symbolic("aten::split")
|
73 |
+
@symbolic_helper.parse_args("v", "v", "i", "i")
|
74 |
+
@_beartype.beartype
|
75 |
+
def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None):
|
76 |
+
if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs):
|
77 |
+
split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim)
|
78 |
+
if _outputs is None:
|
79 |
+
return split_out
|
80 |
+
# Convert to multiple slice nodes iff number of splits and number of outputs are statically known.
|
81 |
+
if (
|
82 |
+
symbolic_helper._is_packed_list(split_size_or_sizes)
|
83 |
+
and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs
|
84 |
+
):
|
85 |
+
split_sizes = [
|
86 |
+
symbolic_helper._unsqueeze_helper(g, v, [0])
|
87 |
+
for v in symbolic_helper._unpack_list(split_size_or_sizes)
|
88 |
+
]
|
89 |
+
|
90 |
+
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
|
91 |
+
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
|
92 |
+
res = []
|
93 |
+
for i in range(_outputs):
|
94 |
+
end = g.op(
|
95 |
+
"Add", start, split_sizes[i]
|
96 |
+
) # split_sizes is a list of same length as _outputs
|
97 |
+
res.append(g.op("Slice", self, start, end, axis))
|
98 |
+
start = end
|
99 |
+
return res
|
100 |
+
return [
|
101 |
+
g.op(
|
102 |
+
"SequenceAt",
|
103 |
+
split_out,
|
104 |
+
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
|
105 |
+
)
|
106 |
+
for i in range(_outputs)
|
107 |
+
]
|
108 |
+
|
109 |
+
split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value")
|
110 |
+
if split_val.dim() > 0:
|
111 |
+
return g.op("Split", self, split_size_or_sizes, axis_i=dim, outputs=_outputs)
|
112 |
+
split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size")
|
113 |
+
|
114 |
+
size = symbolic_helper._get_tensor_dim_size(self, dim)
|
115 |
+
if size is None:
|
116 |
+
if _outputs is not None:
|
117 |
+
size = split_size * _outputs
|
118 |
+
else:
|
119 |
+
raise errors.SymbolicValueError(
|
120 |
+
"Unknown dimension size not supported", self
|
121 |
+
)
|
122 |
+
splits = [split_size] * (size // split_size)
|
123 |
+
leftover = size % split_size
|
124 |
+
if leftover:
|
125 |
+
splits.append(leftover)
|
126 |
+
splits = g.op("Constant", value_t=torch.tensor(splits))
|
127 |
+
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
|
128 |
+
|
129 |
+
|
130 |
+
@_onnx_symbolic("aten::split_with_sizes")
|
131 |
+
@_beartype.beartype
|
132 |
+
def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None):
|
133 |
+
return split(g, self, split_sizes, dim, _outputs)
|
134 |
+
|
135 |
+
|
136 |
+
@_onnx_symbolic("aten::unsafe_split")
|
137 |
+
@_beartype.beartype
|
138 |
+
def unsafe_split(
|
139 |
+
g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None
|
140 |
+
):
|
141 |
+
return split(g, self, split_size_or_sizes, dim, _outputs)
|
142 |
+
|
143 |
+
|
144 |
+
@_onnx_symbolic("aten::unsafe_split_with_sizes")
|
145 |
+
@_beartype.beartype
|
146 |
+
def unsafe_split_with_sizes(
|
147 |
+
g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None
|
148 |
+
):
|
149 |
+
return split_with_sizes(g, self, split_sizes, dim, _outputs)
|
150 |
+
|
151 |
+
|
152 |
+
@_onnx_symbolic("aten::tensor_split")
|
153 |
+
@symbolic_helper.parse_args("v", "v", "i", "i")
|
154 |
+
@_beartype.beartype
|
155 |
+
def tensor_split(
|
156 |
+
g: jit_utils.GraphContext, self, indices_or_sections, dim, _outputs=None
|
157 |
+
):
|
158 |
+
axis = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.long))
|
159 |
+
axis = opset11.unsqueeze(g, axis, 0)
|
160 |
+
const_1 = g.op("Constant", value_t=torch.tensor(1, dtype=torch.long))
|
161 |
+
|
162 |
+
if symbolic_helper._is_split_static(indices_or_sections, _outputs):
|
163 |
+
split_val = symbolic_helper._node_get(indices_or_sections.node(), "value")
|
164 |
+
|
165 |
+
if split_val.dim() > 0:
|
166 |
+
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
|
167 |
+
res = []
|
168 |
+
assert _outputs is not None
|
169 |
+
for i in range(_outputs - 1):
|
170 |
+
end = g.op(
|
171 |
+
"Gather",
|
172 |
+
indices_or_sections,
|
173 |
+
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
|
174 |
+
axis_i=0,
|
175 |
+
)
|
176 |
+
res.append(g.op("Slice", self, start, end, axis))
|
177 |
+
start = end
|
178 |
+
|
179 |
+
end = symbolic_helper._size_helper(g, self, axis)
|
180 |
+
res.append(g.op("Slice", self, start, end, axis))
|
181 |
+
return res
|
182 |
+
|
183 |
+
split_size = symbolic_helper._get_const(
|
184 |
+
indices_or_sections, "i", "indices_or_sections"
|
185 |
+
)
|
186 |
+
|
187 |
+
size = symbolic_helper._get_tensor_dim_size(self, dim)
|
188 |
+
if size is None:
|
189 |
+
if _outputs is not None:
|
190 |
+
size = split_size * _outputs
|
191 |
+
else:
|
192 |
+
raise errors.SymbolicValueError(
|
193 |
+
"Unknown dimension size not supported", self
|
194 |
+
)
|
195 |
+
|
196 |
+
min_split_size = size // split_size
|
197 |
+
num_splits_one_extra = size % split_size
|
198 |
+
|
199 |
+
splits = num_splits_one_extra * [min_split_size + 1]
|
200 |
+
leftover = (split_size - num_splits_one_extra) * [min_split_size]
|
201 |
+
|
202 |
+
splits = g.op(
|
203 |
+
"Constant", value_t=torch.tensor(splits + leftover, dtype=torch.long)
|
204 |
+
)
|
205 |
+
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
|
206 |
+
|
207 |
+
if (
|
208 |
+
symbolic_helper._is_tensor(indices_or_sections)
|
209 |
+
and symbolic_helper._get_tensor_rank(indices_or_sections) == 1
|
210 |
+
):
|
211 |
+
loop_len = symbolic_helper._size_helper(
|
212 |
+
g, indices_or_sections, g.op("Constant", value_t=torch.tensor(0))
|
213 |
+
)
|
214 |
+
loop_len = opset11.unsqueeze(g, loop_len, 0)
|
215 |
+
loop_condition = g.op("Cast", const_1, to_i=_C_onnx.TensorProtoDataType.BOOL)
|
216 |
+
|
217 |
+
# To make the first slice in the below loop work,
|
218 |
+
# we pad a zero to the first position so that it will be the initial start of slice.
|
219 |
+
padding_0 = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
|
220 |
+
indices_or_sections = g.op("Concat", padding_0, indices_or_sections, axis_i=0)
|
221 |
+
|
222 |
+
final_splits = g.op("SequenceEmpty")
|
223 |
+
# Loop inputs
|
224 |
+
loop, (loop_context,), _ = jit_utils.add_op_with_blocks(
|
225 |
+
g, "Loop", loop_len, loop_condition, final_splits, outputs=1, n_blocks=1
|
226 |
+
)
|
227 |
+
|
228 |
+
loop_block = loop_context.block
|
229 |
+
block_input_iter = utils._add_input_to_block(loop_block)
|
230 |
+
cond = utils._add_input_to_block(loop_block)
|
231 |
+
final_splits = utils._add_input_to_block(loop_block)
|
232 |
+
|
233 |
+
start = loop_context.op(
|
234 |
+
"Gather", indices_or_sections, block_input_iter, axis_i=0
|
235 |
+
)
|
236 |
+
end = loop_context.op(
|
237 |
+
"Gather",
|
238 |
+
indices_or_sections,
|
239 |
+
loop_context.op("Add", block_input_iter, const_1),
|
240 |
+
axis_i=0,
|
241 |
+
)
|
242 |
+
|
243 |
+
slice = loop_context.op("Slice", self, start, end, axis)
|
244 |
+
final_splits = loop_context.op("SequenceInsert", final_splits, slice)
|
245 |
+
|
246 |
+
# Loop outputs
|
247 |
+
cond_out = loop_context.op("Identity", loop_condition)
|
248 |
+
utils._add_output_to_block(loop_block, cond_out)
|
249 |
+
utils._add_output_to_block(loop_block, final_splits)
|
250 |
+
|
251 |
+
loop_out = loop.node().output()
|
252 |
+
start = g.op(
|
253 |
+
"Gather",
|
254 |
+
indices_or_sections,
|
255 |
+
g.op("Constant", value_t=torch.tensor(-1, dtype=torch.long)),
|
256 |
+
axis_i=0,
|
257 |
+
)
|
258 |
+
start = opset11.unsqueeze(g, start, 0)
|
259 |
+
end = symbolic_helper._size_helper(g, self, axis)
|
260 |
+
|
261 |
+
last_slice = g.op("Slice", self, start, end, axis)
|
262 |
+
|
263 |
+
return g.op("SequenceInsert", loop_out, last_slice)
|
264 |
+
|
265 |
+
else: # scalar tensor
|
266 |
+
dim_size = symbolic_helper._size_helper(g, self, axis)
|
267 |
+
min_split_size = g.op("Div", dim_size, indices_or_sections)
|
268 |
+
min_split_size_plus_1 = g.op(
|
269 |
+
"Add",
|
270 |
+
min_split_size,
|
271 |
+
const_1,
|
272 |
+
)
|
273 |
+
num_splits_one_extra = g.op("Mod", dim_size, indices_or_sections)
|
274 |
+
splits = g.op("Tile", min_split_size_plus_1, num_splits_one_extra)
|
275 |
+
leftover = g.op(
|
276 |
+
"Tile",
|
277 |
+
min_split_size,
|
278 |
+
g.op(
|
279 |
+
"Sub",
|
280 |
+
opset11.unsqueeze(g, indices_or_sections, 0),
|
281 |
+
num_splits_one_extra,
|
282 |
+
),
|
283 |
+
)
|
284 |
+
|
285 |
+
splits = g.op("Concat", splits, leftover, axis_i=0)
|
286 |
+
if _outputs is None:
|
287 |
+
return g.op("SplitToSequence", self, splits, axis_i=dim)
|
288 |
+
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
|
289 |
+
|
290 |
+
|
291 |
+
@_onnx_symbolic("aten::unbind")
|
292 |
+
@symbolic_helper.parse_args("v", "i", "i")
|
293 |
+
@_beartype.beartype
|
294 |
+
def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None):
|
295 |
+
if _outputs is None:
|
296 |
+
return g.op(
|
297 |
+
"SplitToSequence",
|
298 |
+
self,
|
299 |
+
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
|
300 |
+
axis_i=dim,
|
301 |
+
keepdims_i=0,
|
302 |
+
)
|
303 |
+
|
304 |
+
splits = g.op("Constant", value_t=torch.tensor([1] * _outputs))
|
305 |
+
outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
|
306 |
+
outputs = [outputs] if _outputs == 1 else outputs
|
307 |
+
squeezed_outputs = [
|
308 |
+
g.op("Squeeze", out, g.op("Constant", value_t=torch.tensor([dim])))
|
309 |
+
for out in outputs
|
310 |
+
]
|
311 |
+
return squeezed_outputs
|
312 |
+
|
313 |
+
|
314 |
+
@_onnx_symbolic("aten::nonzero_numpy")
|
315 |
+
# Emitted from `torch.nonzero(x, as_tuple=True)`
|
316 |
+
@_beartype.beartype
|
317 |
+
def nonzero_numpy(g: jit_utils.GraphContext, input, _outputs=None):
|
318 |
+
return unbind(g, opset9.nonzero(g, input), 1, _outputs=_outputs)
|
319 |
+
|
320 |
+
|
321 |
+
@_onnx_symbolic("aten::where")
|
322 |
+
@symbolic_helper.parse_args("v", "v", "v", "i")
|
323 |
+
@_beartype.beartype
|
324 |
+
def where(g: jit_utils.GraphContext, condition, self=None, other=None, _outputs=None):
|
325 |
+
# Assumes that torch.where's first argument takes only Bool and Byte tensors.
|
326 |
+
if not symbolic_helper._is_bool(condition):
|
327 |
+
condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL)
|
328 |
+
if self is None:
|
329 |
+
condition = opset9.nonzero(g, condition)
|
330 |
+
return symbolic_helper._unbind_helper(
|
331 |
+
g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs
|
332 |
+
)
|
333 |
+
return g.op("Where", condition, self, other)
|
334 |
+
|
335 |
+
|
336 |
+
@_onnx_symbolic("aten::fake_quantize_per_channel_affine")
|
337 |
+
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i")
|
338 |
+
@_beartype.beartype
|
339 |
+
def fake_quantize_per_channel_affine(
|
340 |
+
g: jit_utils.GraphContext,
|
341 |
+
inputs,
|
342 |
+
scale,
|
343 |
+
zero_point,
|
344 |
+
axis,
|
345 |
+
quant_min=-128,
|
346 |
+
quant_max=127,
|
347 |
+
):
|
348 |
+
# NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127).
|
349 |
+
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
|
350 |
+
if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]:
|
351 |
+
raise errors.SymbolicValueError(
|
352 |
+
"For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). "
|
353 |
+
f"Got ({quant_min}, {quant_max})",
|
354 |
+
inputs,
|
355 |
+
)
|
356 |
+
# ONNX defines zero_point to be int8 or uint8
|
357 |
+
if quant_min == 0:
|
358 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
|
359 |
+
else:
|
360 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
|
361 |
+
quantized = g.op("QuantizeLinear", inputs, scale, zero_point, axis_i=axis)
|
362 |
+
if (quant_min, quant_max) == (0, 127):
|
363 |
+
quantized = g.op(
|
364 |
+
"Clip",
|
365 |
+
quantized,
|
366 |
+
opset9.unused(g),
|
367 |
+
g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)),
|
368 |
+
)
|
369 |
+
return g.op("DequantizeLinear", quantized, scale, zero_point, axis_i=axis)
|
370 |
+
|
371 |
+
|
372 |
+
@_onnx_symbolic("aten::fake_quantize_per_tensor_affine")
|
373 |
+
@symbolic_helper.parse_args("v", "v", "v", "i", "i")
|
374 |
+
@_beartype.beartype
|
375 |
+
def fake_quantize_per_tensor_affine(
|
376 |
+
g: jit_utils.GraphContext,
|
377 |
+
inputs,
|
378 |
+
scale,
|
379 |
+
zero_point,
|
380 |
+
quant_min=-128,
|
381 |
+
quant_max=127,
|
382 |
+
):
|
383 |
+
# NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127).
|
384 |
+
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
|
385 |
+
if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]:
|
386 |
+
raise errors.SymbolicValueError(
|
387 |
+
"For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). "
|
388 |
+
f"Got ({quant_min}, {quant_max})",
|
389 |
+
inputs,
|
390 |
+
)
|
391 |
+
if quant_min == 0:
|
392 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
|
393 |
+
else:
|
394 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
|
395 |
+
if (
|
396 |
+
_type_utils.JitScalarType.from_value(scale, _type_utils.JitScalarType.UNDEFINED)
|
397 |
+
!= _type_utils.JitScalarType.FLOAT
|
398 |
+
):
|
399 |
+
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
400 |
+
quantized = g.op("QuantizeLinear", inputs, scale, zero_point)
|
401 |
+
if (quant_min, quant_max) == (0, 127):
|
402 |
+
quantized = g.op(
|
403 |
+
"Clip",
|
404 |
+
quantized,
|
405 |
+
opset9.unused(g),
|
406 |
+
g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)),
|
407 |
+
)
|
408 |
+
return g.op("DequantizeLinear", quantized, scale, zero_point)
|
409 |
+
|
410 |
+
|
411 |
+
@_beartype.beartype
|
412 |
+
def _reduce_op_symbolic(onnx_op_name):
|
413 |
+
@_beartype.beartype
|
414 |
+
def symbolic(g, self, dim=None, keepdim=None):
|
415 |
+
self = opset9._maybe_cast_reduce_op_input(g, self)
|
416 |
+
if dim is None:
|
417 |
+
# all-reduce path
|
418 |
+
return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name)
|
419 |
+
else:
|
420 |
+
keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim")
|
421 |
+
return g.op(onnx_op_name, self, dim, keepdims_i=keepdim)
|
422 |
+
|
423 |
+
return symbolic
|
424 |
+
|
425 |
+
|
426 |
+
@_onnx_symbolic(
|
427 |
+
"aten::sum",
|
428 |
+
decorate=[_apply_params("ReduceSum", "sum")],
|
429 |
+
)
|
430 |
+
@_beartype.beartype
|
431 |
+
def _reduce_with_dtype(onnx_op, name):
|
432 |
+
symbolic = _reduce_op_symbolic(onnx_op)
|
433 |
+
|
434 |
+
@opset9.overload_by_arg_count
|
435 |
+
@_beartype.beartype
|
436 |
+
def reduce(g, *args, **kwargs):
|
437 |
+
@symbolic_helper.parse_args("v", "none")
|
438 |
+
@_beartype.beartype
|
439 |
+
def reduce_nodim(g, self, dtype):
|
440 |
+
dtype_onnx = None
|
441 |
+
if dtype.node().kind() == "onnx::Constant":
|
442 |
+
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
443 |
+
dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type()
|
444 |
+
self = g.op("Cast", self, to_i=dtype_onnx)
|
445 |
+
elif dtype.node().kind() != "prim::Constant":
|
446 |
+
return symbolic_helper._unimplemented(name, "dtype", dtype)
|
447 |
+
result = symbolic(g, self)
|
448 |
+
if dtype_onnx is not None:
|
449 |
+
result_dtype_onnx = _type_utils.JitScalarType.from_value(
|
450 |
+
result
|
451 |
+
).onnx_type()
|
452 |
+
if result_dtype_onnx != dtype_onnx:
|
453 |
+
result = g.op("Cast", result, to_i=dtype_onnx)
|
454 |
+
return result
|
455 |
+
|
456 |
+
@symbolic_helper.parse_args("v", "v", "i", "none")
|
457 |
+
@_beartype.beartype
|
458 |
+
def reduce_dim(g, self, dim, keepdim, dtype):
|
459 |
+
dtype_onnx = None
|
460 |
+
if dtype.node().kind() == "onnx::Constant":
|
461 |
+
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
462 |
+
dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type()
|
463 |
+
self = g.op("Cast", self, to_i=dtype_onnx)
|
464 |
+
elif dtype.node().kind() != "prim::Constant":
|
465 |
+
return symbolic_helper._unimplemented(name, "dtype", dtype)
|
466 |
+
result = symbolic(g, self, dim, keepdim)
|
467 |
+
if dtype_onnx is not None:
|
468 |
+
result_dtype_onnx = _type_utils.JitScalarType.from_value(
|
469 |
+
result
|
470 |
+
).onnx_type()
|
471 |
+
if result_dtype_onnx != dtype_onnx:
|
472 |
+
result = g.op("Cast", result, to_i=dtype_onnx)
|
473 |
+
return result
|
474 |
+
|
475 |
+
return reduce_nodim, reduce_dim
|
476 |
+
|
477 |
+
return reduce
|
478 |
+
|
479 |
+
|
480 |
+
# Ported from
|
481 |
+
# https://github.com/microsoft/onnxscript/blob/6b1b81700b4523f31d8c6d3321e5d8ef5d42b764/onnxscript/function_libs/torch_aten/ops/core.py#L6097
|
482 |
+
# NOTE: Supporting aten::unflatten before opset13 needs helper function to adjust ONNX op changes in Concat, Slice, ...
|
483 |
+
@_onnx_symbolic("aten::unflatten")
|
484 |
+
@_beartype.beartype
|
485 |
+
def unflatten(g: jit_utils.GraphContext, input, dim, unflattened_size):
|
486 |
+
input_dim = symbolic_helper._get_tensor_rank(input)
|
487 |
+
if input_dim is None:
|
488 |
+
return symbolic_helper._unimplemented(
|
489 |
+
"dim",
|
490 |
+
"ONNX and PyTorch use different strategies to split the input. "
|
491 |
+
"Input rank must be known at export time.",
|
492 |
+
)
|
493 |
+
|
494 |
+
# dim could be negative
|
495 |
+
input_dim = g.op("Constant", value_t=torch.tensor([input_dim], dtype=torch.int64))
|
496 |
+
dim = g.op("Add", input_dim, dim)
|
497 |
+
dim = g.op("Mod", dim, input_dim)
|
498 |
+
|
499 |
+
input_size = g.op("Shape", input)
|
500 |
+
|
501 |
+
head_start_idx = g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64))
|
502 |
+
head_end_idx = g.op(
|
503 |
+
"Reshape", dim, g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64))
|
504 |
+
)
|
505 |
+
head_part_rank = g.op("Slice", input_size, head_start_idx, head_end_idx)
|
506 |
+
|
507 |
+
dim_plus_one = g.op(
|
508 |
+
"Add", dim, g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64))
|
509 |
+
)
|
510 |
+
tail_start_idx = g.op(
|
511 |
+
"Reshape",
|
512 |
+
dim_plus_one,
|
513 |
+
g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64)),
|
514 |
+
)
|
515 |
+
tail_end_idx = g.op(
|
516 |
+
"Constant", value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64)
|
517 |
+
)
|
518 |
+
tail_part_rank = g.op("Slice", input_size, tail_start_idx, tail_end_idx)
|
519 |
+
|
520 |
+
final_shape = g.op(
|
521 |
+
"Concat", head_part_rank, unflattened_size, tail_part_rank, axis_i=0
|
522 |
+
)
|
523 |
+
|
524 |
+
return symbolic_helper._reshape_helper(g, input, final_shape)
|
525 |
+
|
526 |
+
|
527 |
+
@_onnx_symbolic("aten::unsafe_chunk")
|
528 |
+
@symbolic_helper.parse_args("v", "i", "i", "i")
|
529 |
+
@_beartype.beartype
|
530 |
+
def unsafe_chunk(g: jit_utils.GraphContext, self, chunks, dim, _outputs=None):
|
531 |
+
if _outputs is None:
|
532 |
+
return g.op(
|
533 |
+
"SplitToSequence",
|
534 |
+
self,
|
535 |
+
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
|
536 |
+
axis_i=dim,
|
537 |
+
keepdims_i=0,
|
538 |
+
)
|
539 |
+
|
540 |
+
size = symbolic_helper._get_tensor_dim_size(self, dim)
|
541 |
+
if size is None:
|
542 |
+
return symbolic_helper._unimplemented("unsafe_chunk", "unknown dimension size")
|
543 |
+
split_size = (size + chunks - 1) // chunks
|
544 |
+
splits = [split_size] * (size // split_size)
|
545 |
+
leftover = size % split_size
|
546 |
+
if leftover:
|
547 |
+
splits.append(leftover)
|
548 |
+
|
549 |
+
# TODO: So far we don"t have a module using this method. We"ll keep
|
550 |
+
# this as a constant unless we see a request of dynamics in any
|
551 |
+
# user's modules.
|
552 |
+
splits = g.op("Constant", value_t=torch.tensor(splits, dtype=torch.long))
|
553 |
+
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
|
554 |
+
|
555 |
+
|
556 |
+
@_onnx_symbolic("aten::tile")
|
557 |
+
@_beartype.beartype
|
558 |
+
def tile(g: jit_utils.GraphContext, self, dims):
|
559 |
+
self_shape = g.op("Shape", self)
|
560 |
+
self_rank = g.op("Size", self_shape)
|
561 |
+
dims_rank = g.op("Size", dims)
|
562 |
+
diff = g.op("Sub", self_rank, dims_rank)
|
563 |
+
const_zero = g.op("Constant", value_t=torch.tensor([0]))
|
564 |
+
|
565 |
+
# 1. If dims is shorter than self.shape pad dims with 1
|
566 |
+
dims_shorter_than_self_shape = g.op("Greater", diff, const_zero)
|
567 |
+
(
|
568 |
+
if_op_greater,
|
569 |
+
(if_context_greater, else_context_greater),
|
570 |
+
_,
|
571 |
+
) = jit_utils.add_op_with_blocks(
|
572 |
+
g, "If", dims_shorter_than_self_shape, n_blocks=2, outputs=1
|
573 |
+
)
|
574 |
+
const_one = if_context_greater.op("Constant", value_t=torch.LongTensor([1]))
|
575 |
+
diff_1d_greater = if_context_greater.op("Reshape", diff, const_one)
|
576 |
+
exapnd_ones_greater = if_context_greater.op("Expand", const_one, diff_1d_greater)
|
577 |
+
dims_ = if_context_greater.op("Concat", exapnd_ones_greater, dims, axis_i=0)
|
578 |
+
utils._add_output_to_block(if_context_greater.block, dims_)
|
579 |
+
identity_dim = else_context_greater.op("Identity", dims)
|
580 |
+
utils._add_output_to_block(else_context_greater.block, identity_dim)
|
581 |
+
dims_final = if_op_greater.node().output()
|
582 |
+
|
583 |
+
# 2. If dims is longer than self.shape pad self.shape with 1
|
584 |
+
dims_longer_than_self_shape = g.op("Less", diff, const_zero)
|
585 |
+
(
|
586 |
+
if_op_less,
|
587 |
+
(if_context_less, else_context_less),
|
588 |
+
_,
|
589 |
+
) = jit_utils.add_op_with_blocks(
|
590 |
+
g, "If", dims_longer_than_self_shape, n_blocks=2, outputs=1
|
591 |
+
)
|
592 |
+
const_one = if_context_less.op("Constant", value_t=torch.LongTensor([1]))
|
593 |
+
diff_1d_less = if_context_less.op(
|
594 |
+
"Reshape",
|
595 |
+
if_context_less.op("Abs", diff),
|
596 |
+
const_one,
|
597 |
+
)
|
598 |
+
exapnd_ones_less = if_context_less.op("Expand", const_one, diff_1d_less)
|
599 |
+
self_final_shape = if_context_less.op(
|
600 |
+
"Concat", exapnd_ones_less, self_shape, axis_i=0
|
601 |
+
)
|
602 |
+
self_ = if_context_less.op("Reshape", self, self_final_shape)
|
603 |
+
utils._add_output_to_block(if_context_less.block, self_)
|
604 |
+
identity_self = else_context_less.op("Identity", self)
|
605 |
+
utils._add_output_to_block(else_context_less.block, identity_self)
|
606 |
+
self_final = if_op_less.node().output()
|
607 |
+
|
608 |
+
dims_final = g.op("Cast", dims_final, to_i=_C_onnx.TensorProtoDataType.INT64)
|
609 |
+
return g.op("Tile", self_final, dims_final)
|
610 |
+
|
611 |
+
|
612 |
+
@_onnx_symbolic("aten::repeat_interleave")
|
613 |
+
@_beartype.beartype
|
614 |
+
def repeat_interleave(
|
615 |
+
g: jit_utils.GraphContext, self, repeats, dim=None, output_size=None
|
616 |
+
):
|
617 |
+
repeats_dim = symbolic_helper._get_tensor_rank(repeats)
|
618 |
+
repeats_sizes = symbolic_helper._get_tensor_sizes(repeats)
|
619 |
+
input_sizes = symbolic_helper._get_tensor_sizes(self)
|
620 |
+
if repeats_dim is None:
|
621 |
+
raise errors.SymbolicValueError(
|
622 |
+
"Unsupported: ONNX export of repeat_interleave for unknown repeats rank.",
|
623 |
+
self,
|
624 |
+
)
|
625 |
+
if repeats_sizes is None:
|
626 |
+
raise errors.SymbolicValueError(
|
627 |
+
"Unsupported: ONNX export of repeat_interleave for unknown repeats size.",
|
628 |
+
self,
|
629 |
+
)
|
630 |
+
if input_sizes is None:
|
631 |
+
raise errors.SymbolicValueError(
|
632 |
+
"Unsupported: ONNX export of repeat_interleave for unknown input size.",
|
633 |
+
self,
|
634 |
+
)
|
635 |
+
|
636 |
+
final_dim = dim
|
637 |
+
# if dim is None flatten
|
638 |
+
# By default, use the flattened input array, and return a flat output array
|
639 |
+
if symbolic_helper._is_none(dim):
|
640 |
+
self = symbolic_helper._reshape_helper(
|
641 |
+
g, self, g.op("Constant", value_t=torch.tensor([-1]))
|
642 |
+
)
|
643 |
+
dim = torch.tensor(0, dtype=torch.int64)
|
644 |
+
else:
|
645 |
+
dim = symbolic_helper._maybe_get_scalar(dim)
|
646 |
+
|
647 |
+
# Handle cases where dim is negative
|
648 |
+
if dim < 0:
|
649 |
+
dim += len(input_sizes)
|
650 |
+
|
651 |
+
output_sizes = input_sizes.copy()
|
652 |
+
for idx, input_size in enumerate(input_sizes):
|
653 |
+
if input_size is None:
|
654 |
+
output_sizes[idx], input_sizes[idx] = 0, -1
|
655 |
+
|
656 |
+
# Check if all indices should be repeated the same number of times.
|
657 |
+
if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1):
|
658 |
+
return symbolic_helper._repeat_interleave_single_value_repeat_helper(
|
659 |
+
g, self, repeats, dim
|
660 |
+
)
|
661 |
+
|
662 |
+
cond_dynamic_repeats = repeats_dim == 1 and repeats_sizes[0] is None
|
663 |
+
# If input size is dynamic or repeats vector is dynamic
|
664 |
+
if output_sizes[dim] == 0 or cond_dynamic_repeats:
|
665 |
+
reps = symbolic_helper._size_helper(g, self, dim)
|
666 |
+
reps = opset11.unsqueeze(g, reps, 0)
|
667 |
+
|
668 |
+
# Check if repeats is dynamic
|
669 |
+
# As repeats is dynamic, we use a where node as a substitute for the if statement
|
670 |
+
# If repests_dim = 1, expand repeats otherwise use original tensor
|
671 |
+
if cond_dynamic_repeats:
|
672 |
+
repeat_dim = symbolic_helper._size_helper(
|
673 |
+
g, repeats, g.op("Constant", value_t=torch.LongTensor([0]))
|
674 |
+
)
|
675 |
+
repeat_cond = g.op(
|
676 |
+
"Equal", repeat_dim, g.op("Constant", value_t=torch.LongTensor([1]))
|
677 |
+
)
|
678 |
+
repeats = where(g, repeat_cond, g.op("Expand", repeats, reps), repeats)
|
679 |
+
# There are cases when the repeats are 1-d tensor with multiple repeats, but dim
|
680 |
+
# provided along one of the dynamic axes provided. A simple example would be
|
681 |
+
# input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2
|
682 |
+
# Now, repeat interleaving can be performed in pytorch when the value of * matches
|
683 |
+
# with the number of elements in repeat, for example if * -> 2, number of repeats
|
684 |
+
# should be 2 as well.
|
685 |
+
else:
|
686 |
+
return opset9.repeat_interleave(g, self, repeats, final_dim)
|
687 |
+
|
688 |
+
reps_like = g.op(
|
689 |
+
"ConstantOfShape",
|
690 |
+
g.op("Shape", repeats),
|
691 |
+
value_t=torch.tensor([1], dtype=torch.long),
|
692 |
+
)
|
693 |
+
r_splits = split(g, repeats, reps_like, 0)
|
694 |
+
i_splits = split(g, self, reps_like, dim)
|
695 |
+
|
696 |
+
output_sizes[dim], input_sizes[dim] = -1, 1
|
697 |
+
|
698 |
+
# Create a loop to iterate over each value along the dimension
|
699 |
+
# and perform individual interleaving using the repeats tensor
|
700 |
+
# Loop is of the following pattern
|
701 |
+
# input (trip_count, cond)
|
702 |
+
# int trip_count = ...;
|
703 |
+
# bool cond = ...;
|
704 |
+
# for (int i=0; i < trip_count && cond; ++i) {
|
705 |
+
# cond = ...;
|
706 |
+
# }
|
707 |
+
|
708 |
+
# Loop conditions
|
709 |
+
loop_condition = g.op("Constant", value_t=torch.tensor(1))
|
710 |
+
loop_condition = g.op("Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL)
|
711 |
+
loop_len = reps
|
712 |
+
|
713 |
+
# Create an empty sequence to store final expansions
|
714 |
+
final_splits = g.op("SequenceEmpty")
|
715 |
+
|
716 |
+
# Loop inputs
|
717 |
+
loop, (loop_context,), _ = jit_utils.add_op_with_blocks(
|
718 |
+
g, "Loop", loop_len, loop_condition, final_splits, n_blocks=1
|
719 |
+
)
|
720 |
+
|
721 |
+
loop_block = loop_context.block
|
722 |
+
block_input_iter = utils._add_input_to_block(loop_block)
|
723 |
+
cond = utils._add_input_to_block(loop_block)
|
724 |
+
final_splits = utils._add_input_to_block(loop_block)
|
725 |
+
|
726 |
+
r_split = loop_context.op("SequenceAt", r_splits, block_input_iter)
|
727 |
+
i_split = loop_context.op("SequenceAt", i_splits, block_input_iter)
|
728 |
+
|
729 |
+
i_split = opset11.unsqueeze(loop_context, i_split, dim + 1)
|
730 |
+
r_concat = [
|
731 |
+
loop_context.op("Constant", value_t=torch.LongTensor(input_sizes[: dim + 1])),
|
732 |
+
r_split,
|
733 |
+
loop_context.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1 :])),
|
734 |
+
]
|
735 |
+
r_concat = loop_context.op("Concat", *r_concat, axis_i=0)
|
736 |
+
i_split = opset9.expand(loop_context, i_split, r_concat, None)
|
737 |
+
i_split = symbolic_helper._reshape_helper(
|
738 |
+
loop_context, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes))
|
739 |
+
)
|
740 |
+
final_splits = loop_context.op("SequenceInsert", final_splits, i_split)
|
741 |
+
|
742 |
+
# Loop outputs
|
743 |
+
cond_out = loop_context.op(
|
744 |
+
"Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL
|
745 |
+
)
|
746 |
+
utils._add_output_to_block(loop_block, cond_out)
|
747 |
+
utils._add_output_to_block(loop_block, final_splits)
|
748 |
+
|
749 |
+
loop_out = loop.node().output()
|
750 |
+
loop_out = g.op("ConcatFromSequence", loop_out, axis_i=dim)
|
751 |
+
return loop_out
|
752 |
+
|
753 |
+
|
754 |
+
@_onnx_symbolic("aten::diagonal")
|
755 |
+
@symbolic_helper.parse_args("v", "i", "i", "i")
|
756 |
+
@_beartype.beartype
|
757 |
+
def diagonal(g: jit_utils.GraphContext, self, offset, dim1, dim2):
|
758 |
+
rank = symbolic_helper._get_tensor_rank(self)
|
759 |
+
# Replace negative indexing when rank is known
|
760 |
+
if rank is not None:
|
761 |
+
dim1 = dim1 if dim1 >= 0 else dim1 + rank
|
762 |
+
dim2 = dim2 if dim2 >= 0 else dim2 + rank
|
763 |
+
|
764 |
+
dim1_size = opset9.size(
|
765 |
+
g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim1]))
|
766 |
+
)
|
767 |
+
dim2_size = opset9.size(
|
768 |
+
g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim2]))
|
769 |
+
)
|
770 |
+
# Create appropriate mask
|
771 |
+
mask_shape = g.op("Concat", dim1_size, dim2_size, axis_i=0)
|
772 |
+
mask = opset9.zeros(g, mask_shape, None, None, None)
|
773 |
+
mask = g.op("EyeLike", mask, k_i=offset)
|
774 |
+
# dim1 and dim2 appended as a dimension at the end of the shape
|
775 |
+
|
776 |
+
if rank is not None:
|
777 |
+
axes = list(range(rank))
|
778 |
+
axes.remove(dim1)
|
779 |
+
axes.remove(dim2)
|
780 |
+
self = g.op("Transpose", self, perm_i=axes + [dim1, dim2])
|
781 |
+
else:
|
782 |
+
return symbolic_helper._unimplemented("diagonal", "unknown input rank")
|
783 |
+
|
784 |
+
# Multiply input and mask to calculate values along diagonal
|
785 |
+
# The mask consists of one values where diagonal values are to be calculated
|
786 |
+
# For example:
|
787 |
+
# [[1.1, 1.2, 1.3], * [[1, 0, 0] = [[1.1, 0, 0],
|
788 |
+
# [2.1, 2.2, 2.3], [0, 1, 0] [0, 2.2, 0],
|
789 |
+
# [3.1, 3.2, 3.3]] [0, 0, 1]] [0, 0, 3.3]]
|
790 |
+
result = g.op("Mul", self, mask)
|
791 |
+
result = symbolic_helper._reducesum_helper(g, result, axes_i=[-1], keepdims_i=0)
|
792 |
+
|
793 |
+
# Calculate gather indices based on offset and dims
|
794 |
+
# If offset is greater than zero, set offset to zero as this aids in
|
795 |
+
# calculation of selection window
|
796 |
+
offset_op = g.op("Constant", value_t=torch.LongTensor([offset]))
|
797 |
+
if offset >= 0:
|
798 |
+
diag_size = g.op(
|
799 |
+
"Max",
|
800 |
+
g.op("Min", dim1_size, g.op("Sub", dim2_size, offset_op)),
|
801 |
+
g.op("Constant", value_t=torch.LongTensor([0])),
|
802 |
+
)
|
803 |
+
offset = 0
|
804 |
+
else:
|
805 |
+
diag_size = g.op(
|
806 |
+
"Max",
|
807 |
+
g.op("Min", g.op("Add", dim1_size, offset_op), dim2_size),
|
808 |
+
g.op("Constant", value_t=torch.LongTensor([0])),
|
809 |
+
)
|
810 |
+
diag_size = g.op("Concat", diag_size, axis_i=0)
|
811 |
+
|
812 |
+
# Calculate which diagonal values to select
|
813 |
+
# For example, in cases with offsets:
|
814 |
+
# [[0, 1.1, 0]
|
815 |
+
# [0, 0, 2.2]]
|
816 |
+
# we need to select the last two columns, so we create a tensor
|
817 |
+
# with all columns that are to be selected
|
818 |
+
# So in this example, it is [1, 2]
|
819 |
+
select_window_ones_fill = opset9.ones(g, diag_size, 4, None, None)
|
820 |
+
select_window = g.op(
|
821 |
+
"CumSum",
|
822 |
+
select_window_ones_fill,
|
823 |
+
g.op("Constant", value_t=torch.LongTensor([0])),
|
824 |
+
)
|
825 |
+
select_window = g.op(
|
826 |
+
"Add",
|
827 |
+
select_window,
|
828 |
+
g.op("Constant", value_t=torch.LongTensor([abs(offset) - 1])),
|
829 |
+
)
|
830 |
+
|
831 |
+
gather_shape = [
|
832 |
+
opset9.size(g, result, dim=g.op("Constant", value_t=torch.LongTensor([axis])))
|
833 |
+
for axis in list(range(rank))[:-2]
|
834 |
+
]
|
835 |
+
gather_shape.append(diag_size)
|
836 |
+
gather_shape = g.op("Concat", *gather_shape, axis_i=0)
|
837 |
+
gather_indices = opset9.zeros(g, gather_shape, 4, None, None)
|
838 |
+
|
839 |
+
# There might be cases where offset value is greater than number of rows/columns
|
840 |
+
# and might cause the diagonal to overrun and as a result of this, diag_size would be zero.
|
841 |
+
# For example, if
|
842 |
+
# offset = 9, dim1_size = 2 (columns), dim2_size = 4 (rows)
|
843 |
+
# diag_size = max(min(2, (4-9)), 0) = 0, based on calculation above
|
844 |
+
# Cases with diagonal overrun always result in diag_size = max(0, -ve value) = 0
|
845 |
+
# In cases without diagonal overrun, we select the appropriate rows/columns along which we
|
846 |
+
# are calculating diagonal values. In cases with diagonal overrun, we return a tensor which has
|
847 |
+
# the dimension of the row/column where overrun occurred as 0-dim, as we are essentially
|
848 |
+
# returning an empty tensor
|
849 |
+
overrun_cond = g.op(
|
850 |
+
"Not",
|
851 |
+
g.op(
|
852 |
+
"Equal",
|
853 |
+
diag_size,
|
854 |
+
g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)),
|
855 |
+
),
|
856 |
+
)
|
857 |
+
|
858 |
+
if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks(
|
859 |
+
g, "If", overrun_cond, n_blocks=2
|
860 |
+
)
|
861 |
+
|
862 |
+
gather_indices_if_block = if_context.op("Add", gather_indices, select_window)
|
863 |
+
gather_indices_if_block = symbolic_helper._unsqueeze_helper(
|
864 |
+
if_context, gather_indices_if_block, [rank - 1]
|
865 |
+
)
|
866 |
+
final_non_overrun = if_context.op(
|
867 |
+
"GatherND", result, gather_indices_if_block, batch_dims_i=rank - 2
|
868 |
+
)
|
869 |
+
final_overrun = opset9.zeros(else_context, gather_shape, 6, None, None)
|
870 |
+
utils._add_output_to_block(if_context.block, final_non_overrun)
|
871 |
+
utils._add_output_to_block(else_context.block, final_overrun)
|
872 |
+
return if_op
|
873 |
+
|
874 |
+
|
875 |
+
# Quantized ops
|
876 |
+
|
877 |
+
|
878 |
+
@_onnx_symbolic("quantized::linear")
|
879 |
+
@_beartype.beartype
|
880 |
+
def quantized_linear(
|
881 |
+
g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point
|
882 |
+
):
|
883 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
884 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
885 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
886 |
+
g, bias, input_scale, weight_scale, axis
|
887 |
+
)
|
888 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
889 |
+
|
890 |
+
output = opset9.linear(g, input, weight, bias)
|
891 |
+
|
892 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
893 |
+
|
894 |
+
|
895 |
+
@_onnx_symbolic("quantized::linear_relu")
|
896 |
+
@_beartype.beartype
|
897 |
+
def quantized_linear_relu(
|
898 |
+
g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point
|
899 |
+
):
|
900 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
901 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
902 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
903 |
+
g, bias, input_scale, weight_scale, axis
|
904 |
+
)
|
905 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
906 |
+
|
907 |
+
output = opset9.linear(g, input, weight, bias)
|
908 |
+
output = opset9.relu(g, output)
|
909 |
+
|
910 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
911 |
+
|
912 |
+
|
913 |
+
@_onnx_symbolic("quantized::conv1d_relu")
|
914 |
+
@_beartype.beartype
|
915 |
+
def quantized_conv1d_relu(
|
916 |
+
g: jit_utils.GraphContext,
|
917 |
+
q_input,
|
918 |
+
q_weight,
|
919 |
+
bias,
|
920 |
+
stride,
|
921 |
+
padding,
|
922 |
+
dilation,
|
923 |
+
groups,
|
924 |
+
op_scale,
|
925 |
+
op_zero_point,
|
926 |
+
):
|
927 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
928 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
929 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
930 |
+
g, bias, input_scale, weight_scale, axis
|
931 |
+
)
|
932 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
933 |
+
|
934 |
+
output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups)
|
935 |
+
output = opset9.relu(g, output)
|
936 |
+
|
937 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
938 |
+
|
939 |
+
|
940 |
+
@_onnx_symbolic("quantized::conv2d_relu")
|
941 |
+
@_beartype.beartype
|
942 |
+
def quantized_conv2d_relu(
|
943 |
+
g: jit_utils.GraphContext,
|
944 |
+
q_input,
|
945 |
+
q_weight,
|
946 |
+
bias,
|
947 |
+
stride,
|
948 |
+
padding,
|
949 |
+
dilation,
|
950 |
+
groups,
|
951 |
+
op_scale,
|
952 |
+
op_zero_point,
|
953 |
+
):
|
954 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
955 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
956 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
957 |
+
g, bias, input_scale, weight_scale, axis
|
958 |
+
)
|
959 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
960 |
+
|
961 |
+
output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups)
|
962 |
+
output = opset9.relu(g, output)
|
963 |
+
|
964 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
965 |
+
|
966 |
+
|
967 |
+
@_onnx_symbolic("quantized::conv3d_relu")
|
968 |
+
@_beartype.beartype
|
969 |
+
def quantized_conv3d_relu(
|
970 |
+
g: jit_utils.GraphContext,
|
971 |
+
q_input,
|
972 |
+
q_weight,
|
973 |
+
bias,
|
974 |
+
stride,
|
975 |
+
padding,
|
976 |
+
dilation,
|
977 |
+
groups,
|
978 |
+
op_scale,
|
979 |
+
op_zero_point,
|
980 |
+
):
|
981 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
982 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
983 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
984 |
+
g, bias, input_scale, weight_scale, axis
|
985 |
+
)
|
986 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
987 |
+
|
988 |
+
output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups)
|
989 |
+
output = opset9.relu(g, output)
|
990 |
+
|
991 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
992 |
+
|
993 |
+
|
994 |
+
@_onnx_symbolic("quantized::conv1d")
|
995 |
+
@_beartype.beartype
|
996 |
+
def quantized_conv1d(
|
997 |
+
g: jit_utils.GraphContext,
|
998 |
+
q_input,
|
999 |
+
q_weight,
|
1000 |
+
bias,
|
1001 |
+
stride,
|
1002 |
+
padding,
|
1003 |
+
dilation,
|
1004 |
+
groups,
|
1005 |
+
op_scale,
|
1006 |
+
op_zero_point,
|
1007 |
+
):
|
1008 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1009 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1010 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1011 |
+
g, bias, input_scale, weight_scale, axis
|
1012 |
+
)
|
1013 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1014 |
+
|
1015 |
+
output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups)
|
1016 |
+
|
1017 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1018 |
+
|
1019 |
+
|
1020 |
+
@_onnx_symbolic("quantized::conv2d")
|
1021 |
+
@_beartype.beartype
|
1022 |
+
def quantized_conv2d(
|
1023 |
+
g: jit_utils.GraphContext,
|
1024 |
+
q_input,
|
1025 |
+
q_weight,
|
1026 |
+
bias,
|
1027 |
+
stride,
|
1028 |
+
padding,
|
1029 |
+
dilation,
|
1030 |
+
groups,
|
1031 |
+
op_scale,
|
1032 |
+
op_zero_point,
|
1033 |
+
):
|
1034 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1035 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1036 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1037 |
+
g, bias, input_scale, weight_scale, axis
|
1038 |
+
)
|
1039 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1040 |
+
|
1041 |
+
output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups)
|
1042 |
+
|
1043 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1044 |
+
|
1045 |
+
|
1046 |
+
@_onnx_symbolic("quantized::conv3d")
|
1047 |
+
@_beartype.beartype
|
1048 |
+
def quantized_conv3d(
|
1049 |
+
g: jit_utils.GraphContext,
|
1050 |
+
q_input,
|
1051 |
+
q_weight,
|
1052 |
+
bias,
|
1053 |
+
stride,
|
1054 |
+
padding,
|
1055 |
+
dilation,
|
1056 |
+
groups,
|
1057 |
+
op_scale,
|
1058 |
+
op_zero_point,
|
1059 |
+
):
|
1060 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1061 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1062 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1063 |
+
g, bias, input_scale, weight_scale, axis
|
1064 |
+
)
|
1065 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1066 |
+
|
1067 |
+
output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups)
|
1068 |
+
|
1069 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1070 |
+
|
1071 |
+
|
1072 |
+
@_onnx_symbolic("quantized::conv_transpose1d")
|
1073 |
+
@_beartype.beartype
|
1074 |
+
def quantized_conv_transpose1d(
|
1075 |
+
g: jit_utils.GraphContext,
|
1076 |
+
q_input,
|
1077 |
+
q_weight,
|
1078 |
+
bias,
|
1079 |
+
stride,
|
1080 |
+
padding,
|
1081 |
+
output_padding,
|
1082 |
+
dilation,
|
1083 |
+
groups,
|
1084 |
+
op_scale,
|
1085 |
+
op_zero_point,
|
1086 |
+
):
|
1087 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1088 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1089 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1090 |
+
g, bias, input_scale, weight_scale, axis
|
1091 |
+
)
|
1092 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1093 |
+
|
1094 |
+
output = opset9.conv_transpose2d(
|
1095 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1096 |
+
)
|
1097 |
+
|
1098 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1099 |
+
|
1100 |
+
|
1101 |
+
@_onnx_symbolic("quantized::conv_transpose2d")
|
1102 |
+
@_beartype.beartype
|
1103 |
+
def quantized_conv_transpose2d(
|
1104 |
+
g: jit_utils.GraphContext,
|
1105 |
+
q_input,
|
1106 |
+
q_weight,
|
1107 |
+
bias,
|
1108 |
+
stride,
|
1109 |
+
padding,
|
1110 |
+
output_padding,
|
1111 |
+
dilation,
|
1112 |
+
groups,
|
1113 |
+
op_scale,
|
1114 |
+
op_zero_point,
|
1115 |
+
):
|
1116 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1117 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1118 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1119 |
+
g, bias, input_scale, weight_scale, axis
|
1120 |
+
)
|
1121 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1122 |
+
|
1123 |
+
output = opset9.conv_transpose2d(
|
1124 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1125 |
+
)
|
1126 |
+
|
1127 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1128 |
+
|
1129 |
+
|
1130 |
+
@_onnx_symbolic("quantized::conv_transpose3d")
|
1131 |
+
@_beartype.beartype
|
1132 |
+
def quantized_conv_transpose3d(
|
1133 |
+
g: jit_utils.GraphContext,
|
1134 |
+
q_input,
|
1135 |
+
q_weight,
|
1136 |
+
bias,
|
1137 |
+
stride,
|
1138 |
+
padding,
|
1139 |
+
output_padding,
|
1140 |
+
dilation,
|
1141 |
+
groups,
|
1142 |
+
op_scale,
|
1143 |
+
op_zero_point,
|
1144 |
+
):
|
1145 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1146 |
+
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
|
1147 |
+
q_bias = symbolic_helper.requantize_bias_helper(
|
1148 |
+
g, bias, input_scale, weight_scale, axis
|
1149 |
+
)
|
1150 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1151 |
+
|
1152 |
+
output = opset9.conv_transpose3d(
|
1153 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1154 |
+
)
|
1155 |
+
|
1156 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset15.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 15.
|
2 |
+
|
3 |
+
Note [ONNX operators that are added/updated in opset 15]
|
4 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
5 |
+
https://github.com/onnx/onnx/blob/master/docs/Changelog.md#version-15-of-the-default-onnx-operator-set
|
6 |
+
New operators:
|
7 |
+
Bernoulli
|
8 |
+
CastLike
|
9 |
+
Optional
|
10 |
+
OptionalGetElement
|
11 |
+
OptionalHasElement
|
12 |
+
|
13 |
+
Updated operators:
|
14 |
+
BatchNormalization https://github.com/onnx/onnx/pull/3545
|
15 |
+
Backwards compatible
|
16 |
+
TODO: test coverage for mixed types inputs.
|
17 |
+
Pow https://github.com/onnx/onnx/pull/3412
|
18 |
+
Backwards compatible
|
19 |
+
TODO: bfloat16 support.
|
20 |
+
Shape https://github.com/onnx/onnx/pull/3580
|
21 |
+
Backwards compatible
|
22 |
+
TODO: optional start/end attribute.
|
23 |
+
"""
|
24 |
+
|
25 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
26 |
+
# see Note [Edit Symbolic Files] in README.md
|
27 |
+
|
28 |
+
import functools
|
29 |
+
|
30 |
+
import torch
|
31 |
+
from torch import _C
|
32 |
+
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9
|
33 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
34 |
+
|
35 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=15)
|
36 |
+
|
37 |
+
|
38 |
+
@_onnx_symbolic("aten::__is_")
|
39 |
+
@_beartype.beartype
|
40 |
+
def aten__is_(g: jit_utils.GraphContext, self, other):
|
41 |
+
if symbolic_helper._is_none(other):
|
42 |
+
if isinstance(self.type(), _C.OptionalType):
|
43 |
+
none = g.op("OptionalHasElement", self)
|
44 |
+
return g.op("Not", none)
|
45 |
+
else:
|
46 |
+
return g.op("Constant", value_t=torch.BoolTensor([0]))
|
47 |
+
return opset9.eq(g, self, other)
|
48 |
+
|
49 |
+
|
50 |
+
@_onnx_symbolic("aten::__isnot_")
|
51 |
+
@opset9.wrap_logical_op_with_negation # type: ignore[has-type]
|
52 |
+
@_beartype.beartype
|
53 |
+
def aten__isnot_(g: jit_utils.GraphContext, self, other):
|
54 |
+
return aten__is_(g, self, other)
|
55 |
+
|
56 |
+
|
57 |
+
@_onnx_symbolic("aten::bernoulli")
|
58 |
+
@_beartype.beartype
|
59 |
+
def bernoulli(g: jit_utils.GraphContext, input, p=None, generator=None, out=None):
|
60 |
+
if out is not None and not symbolic_helper._is_none(out):
|
61 |
+
symbolic_helper._unimplemented(
|
62 |
+
"Bernoulli", "out parameter is not supported for bernoulli", input
|
63 |
+
)
|
64 |
+
if generator is not None and not symbolic_helper._is_none(generator):
|
65 |
+
symbolic_helper._unimplemented(
|
66 |
+
"Bernoulli", "generator is not supported for bernoulli", input
|
67 |
+
)
|
68 |
+
if p is None or symbolic_helper._is_none(p):
|
69 |
+
return g.op("Bernoulli", input)
|
70 |
+
return opset9.bernoulli(g, input, p, generator, out)
|
71 |
+
|
72 |
+
|
73 |
+
@_onnx_symbolic("prim::unchecked_cast")
|
74 |
+
@_beartype.beartype
|
75 |
+
def prim_unchecked_cast(g: jit_utils.GraphContext, self):
|
76 |
+
# exists to refine the type of the Value
|
77 |
+
# if x is Optional[Tensor], unchecked_cast will cast
|
78 |
+
# x to Tensor, so the rest of the graph knows that x is a Tensor.
|
79 |
+
if isinstance(self.type(), _C.OptionalType):
|
80 |
+
return g.op("OptionalGetElement", self)
|
81 |
+
|
82 |
+
return self
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset16.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 16.
|
2 |
+
|
3 |
+
Note [ONNX Operators that are added/updated in opset 16]
|
4 |
+
|
5 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
6 |
+
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-16-of-the-default-onnx-operator-set
|
7 |
+
New operators:
|
8 |
+
GridSample https://github.com/onnx/onnx/pull/3557
|
9 |
+
|
10 |
+
Updated operators:
|
11 |
+
Identity
|
12 |
+
If
|
13 |
+
LeakyRelu
|
14 |
+
Loop
|
15 |
+
PRelu
|
16 |
+
RoiAlign
|
17 |
+
Scan
|
18 |
+
ScatterElements
|
19 |
+
ScatterND
|
20 |
+
Where
|
21 |
+
GreaterOrEqual
|
22 |
+
LessOrEqual
|
23 |
+
"""
|
24 |
+
|
25 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
26 |
+
# see Note [Edit Symbolic Files] in README.md
|
27 |
+
|
28 |
+
import functools
|
29 |
+
|
30 |
+
import torch
|
31 |
+
from torch.nn.functional import (
|
32 |
+
GRID_SAMPLE_INTERPOLATION_MODES,
|
33 |
+
GRID_SAMPLE_PADDING_MODES,
|
34 |
+
)
|
35 |
+
from torch.onnx import _type_utils, errors, symbolic_helper, utils
|
36 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
37 |
+
|
38 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=16)
|
39 |
+
|
40 |
+
|
41 |
+
# note (mkozuki): Why `grid_sampler` instead of `grid_sample`?
|
42 |
+
# Because `torch.nn.functional.grid_sample` calls `torch.grid_sampler`.
|
43 |
+
@_onnx_symbolic("aten::grid_sampler")
|
44 |
+
@symbolic_helper.parse_args("v", "v", "i", "i", "b")
|
45 |
+
@_beartype.beartype
|
46 |
+
def grid_sampler(
|
47 |
+
g: jit_utils.GraphContext,
|
48 |
+
input,
|
49 |
+
grid,
|
50 |
+
mode_enum,
|
51 |
+
padding_mode_enum,
|
52 |
+
align_corners,
|
53 |
+
):
|
54 |
+
# Check the input and grid tensor rank beforehand.
|
55 |
+
if symbolic_helper._get_tensor_rank(input) == 5:
|
56 |
+
return symbolic_helper._onnx_unsupported("GridSample with 5D volumetric input")
|
57 |
+
mode_s = {v: k for k, v in GRID_SAMPLE_INTERPOLATION_MODES.items()}[mode_enum] # type: ignore[call-arg]
|
58 |
+
padding_mode_s = {v: k for k, v in GRID_SAMPLE_PADDING_MODES.items()}[padding_mode_enum] # type: ignore[call-arg]
|
59 |
+
return g.op(
|
60 |
+
"GridSample",
|
61 |
+
input,
|
62 |
+
grid,
|
63 |
+
align_corners_i=int(align_corners),
|
64 |
+
mode_s=mode_s,
|
65 |
+
padding_mode_s=padding_mode_s,
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
@_onnx_symbolic("aten::scatter_add")
|
70 |
+
@symbolic_helper.parse_args("v", "i", "v", "v")
|
71 |
+
@_beartype.beartype
|
72 |
+
def scatter_add(g: jit_utils.GraphContext, self, dim, index, src):
|
73 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
74 |
+
return g.at("scatter", self, dim, index, src, overload_name="src")
|
75 |
+
|
76 |
+
src_type = _type_utils.JitScalarType.from_value(
|
77 |
+
src, _type_utils.JitScalarType.UNDEFINED
|
78 |
+
)
|
79 |
+
src_sizes = symbolic_helper._get_tensor_sizes(src)
|
80 |
+
index_sizes = symbolic_helper._get_tensor_sizes(index)
|
81 |
+
|
82 |
+
if len(src_sizes) != len(index_sizes):
|
83 |
+
return symbolic_helper._unimplemented(
|
84 |
+
"scatter_add",
|
85 |
+
f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})",
|
86 |
+
)
|
87 |
+
|
88 |
+
# PyTorch only allows index shape <= src shape, so we can only consider
|
89 |
+
# taking index as subset size to src, like PyTorch does. When sizes for src
|
90 |
+
# and index are not matched or there are dynamic axes, we take index shape to
|
91 |
+
# slice src to accommodate.
|
92 |
+
if src_sizes != index_sizes or None in index_sizes:
|
93 |
+
adjusted_shape = g.op("Shape", index)
|
94 |
+
starts = g.op("Constant", value_t=torch.tensor([0] * len(index_sizes)))
|
95 |
+
src = g.op("Slice", src, starts, adjusted_shape)
|
96 |
+
|
97 |
+
src = symbolic_helper._maybe_get_scalar(src)
|
98 |
+
if symbolic_helper._is_value(src):
|
99 |
+
return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add")
|
100 |
+
else:
|
101 |
+
# Check if scalar "src" has same type as self (PyTorch allows different
|
102 |
+
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
|
103 |
+
if _type_utils.JitScalarType.from_value(self) != src_type:
|
104 |
+
src = g.op(
|
105 |
+
"Cast",
|
106 |
+
src,
|
107 |
+
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
|
108 |
+
)
|
109 |
+
|
110 |
+
return g.op(
|
111 |
+
"ScatterElements",
|
112 |
+
self,
|
113 |
+
index,
|
114 |
+
src,
|
115 |
+
axis_i=dim,
|
116 |
+
reduction_s="add",
|
117 |
+
)
|
118 |
+
|
119 |
+
|
120 |
+
@_onnx_symbolic("aten::scatter_reduce")
|
121 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "s", "b")
|
122 |
+
@_beartype.beartype
|
123 |
+
def scatter_reduce(
|
124 |
+
g: jit_utils.GraphContext,
|
125 |
+
self: torch._C.Value,
|
126 |
+
dim: int,
|
127 |
+
index: torch._C.Value,
|
128 |
+
src: torch._C.Value,
|
129 |
+
reduce: str,
|
130 |
+
include_self: bool,
|
131 |
+
):
|
132 |
+
if reduce == "mean":
|
133 |
+
raise errors.OnnxExporterError(
|
134 |
+
"ONNX does not support mean reduction for scatter_reduce"
|
135 |
+
)
|
136 |
+
if not include_self:
|
137 |
+
raise errors.OnnxExporterError(
|
138 |
+
"ONNX does not support include_self=False for scatter_reduce"
|
139 |
+
)
|
140 |
+
|
141 |
+
reduce_mode = { # convert torch string name to onnx string name
|
142 |
+
"mean": "none", # 'mean' doesn't support in ONNX 1.14 definition
|
143 |
+
"sum": "add",
|
144 |
+
"prod": "mul",
|
145 |
+
"amin": "min",
|
146 |
+
"amax": "max",
|
147 |
+
}
|
148 |
+
onnx_reduce = reduce_mode[reduce]
|
149 |
+
|
150 |
+
self_rank = g.op("Size", g.op("Shape", self))
|
151 |
+
|
152 |
+
# if self_rank == 0: # assert (index_rank == 0 and rank_src == 0)
|
153 |
+
self_rank_is_zero = g.op(
|
154 |
+
"Equal", self_rank, g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
|
155 |
+
)
|
156 |
+
if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks(
|
157 |
+
g, "If", self_rank_is_zero, n_blocks=2, outputs=3
|
158 |
+
)
|
159 |
+
neg_1 = if_context.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
|
160 |
+
|
161 |
+
self_reshape = if_context.op("Reshape", self, neg_1)
|
162 |
+
utils._add_output_to_block(if_context.block, self_reshape)
|
163 |
+
index_reshape = if_context.op("Reshape", index, neg_1)
|
164 |
+
utils._add_output_to_block(if_context.block, index_reshape)
|
165 |
+
src_reshape = if_context.op("Reshape", src, neg_1)
|
166 |
+
utils._add_output_to_block(if_context.block, src_reshape)
|
167 |
+
|
168 |
+
self_identity = else_context.op("Identity", self)
|
169 |
+
utils._add_output_to_block(else_context.block, self_identity)
|
170 |
+
index_identitye = else_context.op("Identity", index)
|
171 |
+
utils._add_output_to_block(else_context.block, index_identitye)
|
172 |
+
src_identity = else_context.op("Identity", src)
|
173 |
+
utils._add_output_to_block(else_context.block, src_identity)
|
174 |
+
|
175 |
+
result = g.op("ScatterElements", *if_op, axis_i=dim, reduction_s=onnx_reduce)
|
176 |
+
|
177 |
+
# if self_rank == 0:
|
178 |
+
if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks(
|
179 |
+
g, "If", self_rank_is_zero, n_blocks=2, outputs=1
|
180 |
+
)
|
181 |
+
result_squeezed = if_context.op("Squeeze", result)
|
182 |
+
utils._add_output_to_block(if_context.block, result_squeezed)
|
183 |
+
result_identity = else_context.op("Identity", result)
|
184 |
+
utils._add_output_to_block(else_context.block, result_identity)
|
185 |
+
result_final = if_op.node().output()
|
186 |
+
|
187 |
+
return result_final
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset17.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 17.
|
2 |
+
|
3 |
+
Note [ONNX Operators that are added/updated in opset 17]
|
4 |
+
|
5 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
6 |
+
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-17-of-the-default-onnx-operator-set
|
7 |
+
New operators:
|
8 |
+
BlackmanWindow
|
9 |
+
DFT
|
10 |
+
HammingWindow
|
11 |
+
HannWindow
|
12 |
+
LayerNormalization
|
13 |
+
MelWeightMatrix
|
14 |
+
STFT
|
15 |
+
SequenceMap
|
16 |
+
"""
|
17 |
+
|
18 |
+
import functools
|
19 |
+
from typing import Optional, Sequence
|
20 |
+
|
21 |
+
import torch
|
22 |
+
from torch import _C
|
23 |
+
from torch.onnx import _type_utils, errors, symbolic_helper
|
24 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
25 |
+
|
26 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
27 |
+
# see Note [Edit Symbolic Files] in README.md
|
28 |
+
|
29 |
+
__all__ = ["layer_norm", "stft"]
|
30 |
+
|
31 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=17)
|
32 |
+
|
33 |
+
|
34 |
+
@_onnx_symbolic("aten::layer_norm")
|
35 |
+
@symbolic_helper.parse_args("v", "is", "v", "v", "f", "none")
|
36 |
+
def layer_norm(
|
37 |
+
g: jit_utils.GraphContext,
|
38 |
+
input: _C.Value,
|
39 |
+
normalized_shape: Sequence[int],
|
40 |
+
weight: _C.Value,
|
41 |
+
bias: _C.Value,
|
42 |
+
eps: float,
|
43 |
+
cudnn_enable: bool,
|
44 |
+
):
|
45 |
+
# normalized_shape: input shape from an expected input of size
|
46 |
+
# axis: The first normalization dimension.
|
47 |
+
# layer_norm normalizes on the last D dimensions,
|
48 |
+
# where D is the size of normalized_shape
|
49 |
+
axis = -len(normalized_shape)
|
50 |
+
scalar_type = _type_utils.JitScalarType.from_value(
|
51 |
+
input, _type_utils.JitScalarType.FLOAT
|
52 |
+
)
|
53 |
+
dtype = scalar_type.dtype()
|
54 |
+
if symbolic_helper._is_none(weight):
|
55 |
+
weight_value = torch.ones(normalized_shape, dtype=dtype)
|
56 |
+
weight = g.op("Constant", value_t=weight_value)
|
57 |
+
if symbolic_helper._is_none(bias):
|
58 |
+
bias_value = torch.zeros(normalized_shape, dtype=dtype)
|
59 |
+
bias = g.op("Constant", value_t=bias_value)
|
60 |
+
return g.op(
|
61 |
+
"LayerNormalization",
|
62 |
+
input,
|
63 |
+
weight,
|
64 |
+
bias,
|
65 |
+
epsilon_f=eps,
|
66 |
+
axis_i=axis,
|
67 |
+
)
|
68 |
+
|
69 |
+
|
70 |
+
def _compute_edge_sizes(n_fft, window_size):
|
71 |
+
"""Helper function to compute the sizes of the edges (left and right)
|
72 |
+
of a given window centered within an FFT size."""
|
73 |
+
left = (n_fft - window_size) // 2
|
74 |
+
right = n_fft - left - window_size
|
75 |
+
return left, right
|
76 |
+
|
77 |
+
|
78 |
+
@_onnx_symbolic("aten::stft")
|
79 |
+
@symbolic_helper.parse_args("v", "i", "i", "i", "v", "b", "b", "b")
|
80 |
+
@_beartype.beartype
|
81 |
+
def stft(
|
82 |
+
g: jit_utils.GraphContext,
|
83 |
+
input: _C.Value,
|
84 |
+
n_fft: int,
|
85 |
+
hop_length: Optional[int] = None,
|
86 |
+
win_length: Optional[int] = None,
|
87 |
+
window: Optional[_C.Value] = None,
|
88 |
+
normalized: bool = False,
|
89 |
+
onesided: Optional[bool] = True,
|
90 |
+
return_complex: Optional[bool] = False,
|
91 |
+
) -> _C.Value:
|
92 |
+
"""Associates `torch.stft` with the `STFT` ONNX operator.
|
93 |
+
Note that torch.stft calls _VF.stft, without centering or padding options.
|
94 |
+
Hence, this function does not contain these two arguments.
|
95 |
+
See torch.stft source code for more info.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
g: Graph to write the ONNX representation into
|
99 |
+
input: Input tensor for the transformation
|
100 |
+
n_fft: FFT size
|
101 |
+
hop_length: Size of the hop. Defaults to `floot(n_fft // 4)`
|
102 |
+
win_length: Size of the analysis window. Defaults to `n_fft`
|
103 |
+
window: Analysis window. Defaults to a window of all ones
|
104 |
+
normalized: Whether to return a normalized STFT
|
105 |
+
onesided: Whether to return only half (+1) of the results, given the
|
106 |
+
symmetry of the STFT
|
107 |
+
return_complex: Whether to return the complex value (Note: Must be
|
108 |
+
`False` or `None`)
|
109 |
+
|
110 |
+
Returns:
|
111 |
+
op: Operator for torch.stft associated with STFT (ONNX)
|
112 |
+
"""
|
113 |
+
# Checks
|
114 |
+
if return_complex:
|
115 |
+
raise errors.SymbolicValueError(
|
116 |
+
msg="STFT does not currently support complex types", value=input
|
117 |
+
)
|
118 |
+
|
119 |
+
# Get STFT sizes
|
120 |
+
frame_step_value = hop_length if hop_length is not None else n_fft // 4
|
121 |
+
frame_step_const = g.op(
|
122 |
+
"Constant", value_t=torch.tensor(frame_step_value, dtype=torch.int64)
|
123 |
+
)
|
124 |
+
frame_length_const = g.op(
|
125 |
+
"Constant", value_t=torch.tensor(n_fft, dtype=torch.int64)
|
126 |
+
)
|
127 |
+
|
128 |
+
# Pre-process input if needed
|
129 |
+
signal = input
|
130 |
+
signal_rank = symbolic_helper._get_tensor_rank(signal)
|
131 |
+
if signal_rank == 1:
|
132 |
+
# Add batch dimension
|
133 |
+
signal = g.op(
|
134 |
+
"Unsqueeze",
|
135 |
+
signal,
|
136 |
+
g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)),
|
137 |
+
)
|
138 |
+
elif signal_rank > 2:
|
139 |
+
raise errors.SymbolicValueError(
|
140 |
+
msg="STFT can only take inputs of 1 [signal] or 2 [batch, signal] dimensions. "
|
141 |
+
f"Current rank of signal is {signal_rank}, please reduce it.",
|
142 |
+
value=input,
|
143 |
+
)
|
144 |
+
|
145 |
+
# Get window and make sure it's the same size as `win_length` or `n_fft`
|
146 |
+
n_win = symbolic_helper._get_tensor_dim_size(window, dim=0)
|
147 |
+
if n_win is not None:
|
148 |
+
win_length_default = win_length if win_length else n_fft
|
149 |
+
assert n_win == win_length_default, (
|
150 |
+
"Analysis window size must equal `win_length` or `n_fft`. "
|
151 |
+
f"Please, set `win_length` or `n_fft` to match `window` size ({n_win})",
|
152 |
+
)
|
153 |
+
|
154 |
+
# Center window around zeros if needed (required by ONNX's STFT)
|
155 |
+
if n_win < n_fft:
|
156 |
+
left, right = _compute_edge_sizes(n_fft, n_win)
|
157 |
+
left_win = g.op("Constant", value_t=torch.zeros(left))
|
158 |
+
right_win = g.op("Constant", value_t=torch.zeros(right))
|
159 |
+
window = g.op("Concat", left_win, window, right_win, axis_i=0)
|
160 |
+
|
161 |
+
# Create window, if needed
|
162 |
+
if symbolic_helper._is_none(window):
|
163 |
+
if win_length:
|
164 |
+
if win_length > n_fft:
|
165 |
+
raise errors.SymbolicValueError(
|
166 |
+
msg="The analysis window can't be longer than the size of the FFT. "
|
167 |
+
f"Please set `win_length` ({win_length}) to `n_fft` ({n_fft}) or less.",
|
168 |
+
value=input,
|
169 |
+
)
|
170 |
+
|
171 |
+
# Center window, if needed
|
172 |
+
left, right = _compute_edge_sizes(n_fft, win_length)
|
173 |
+
torch_window = torch.hstack(
|
174 |
+
(torch.zeros(left), torch.ones(win_length), torch.zeros(right))
|
175 |
+
)
|
176 |
+
else:
|
177 |
+
# Rectangle window
|
178 |
+
torch_window = torch.ones(n_fft)
|
179 |
+
assert torch_window.shape[0] == n_fft
|
180 |
+
window = g.op("Constant", value_t=torch_window)
|
181 |
+
window = g.op(
|
182 |
+
"Cast", window, to_i=_type_utils.JitScalarType.from_value(signal).onnx_type()
|
183 |
+
)
|
184 |
+
|
185 |
+
# Run STFT
|
186 |
+
result = g.op(
|
187 |
+
"STFT",
|
188 |
+
signal,
|
189 |
+
frame_step_const,
|
190 |
+
window,
|
191 |
+
frame_length_const,
|
192 |
+
onesided_i=1 if onesided is None or onesided else 0,
|
193 |
+
)
|
194 |
+
|
195 |
+
# Transpose to mimic torch.stft's behavior
|
196 |
+
result = g.op("Transpose", result, perm_i=[0, 2, 1, 3])
|
197 |
+
|
198 |
+
# Remove batch dimension, if needed
|
199 |
+
if signal_rank == 1:
|
200 |
+
result = g.op(
|
201 |
+
"Squeeze",
|
202 |
+
result,
|
203 |
+
g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)),
|
204 |
+
)
|
205 |
+
|
206 |
+
# Normalize, if needed
|
207 |
+
if normalized:
|
208 |
+
sqrt_nfft = torch.sqrt(torch.tensor(n_fft, dtype=signal.type().dtype()))
|
209 |
+
result = g.op("Div", result, g.op("Constant", value_t=sqrt_nfft))
|
210 |
+
|
211 |
+
return result
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset18.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 18.
|
2 |
+
|
3 |
+
Note [ONNX Operators that are added/updated in opset 18]
|
4 |
+
|
5 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
6 |
+
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-18-of-the-default-onnx-operator-set
|
7 |
+
New operators:
|
8 |
+
CenterCropPad
|
9 |
+
Col2Im
|
10 |
+
Mish
|
11 |
+
OptionalGetElement
|
12 |
+
OptionalHasElement
|
13 |
+
Pad
|
14 |
+
Resize
|
15 |
+
ScatterElements
|
16 |
+
ScatterND
|
17 |
+
"""
|
18 |
+
|
19 |
+
import functools
|
20 |
+
from typing import Sequence
|
21 |
+
|
22 |
+
from torch import _C
|
23 |
+
from torch.onnx import symbolic_helper
|
24 |
+
from torch.onnx._internal import _beartype, registration
|
25 |
+
|
26 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
27 |
+
# see Note [Edit Symbolic Files] in symbolic_helper.py
|
28 |
+
|
29 |
+
__all__ = ["col2im"]
|
30 |
+
|
31 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=18)
|
32 |
+
|
33 |
+
|
34 |
+
@_onnx_symbolic("aten::col2im")
|
35 |
+
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is")
|
36 |
+
@_beartype.beartype
|
37 |
+
def col2im(
|
38 |
+
g,
|
39 |
+
input: _C.Value,
|
40 |
+
output_size: _C.Value,
|
41 |
+
kernel_size: _C.Value,
|
42 |
+
dilation: Sequence[int],
|
43 |
+
padding: Sequence[int],
|
44 |
+
stride: Sequence[int],
|
45 |
+
):
|
46 |
+
# convert [i0, i1, ..., in] into [i0, i0, i1, i1, ..., in, in]
|
47 |
+
adjusted_padding = []
|
48 |
+
for pad in padding:
|
49 |
+
for _ in range(2):
|
50 |
+
adjusted_padding.append(pad)
|
51 |
+
|
52 |
+
num_dimensional_axis = symbolic_helper._get_tensor_sizes(output_size)[0]
|
53 |
+
if not adjusted_padding:
|
54 |
+
adjusted_padding = [0, 0] * num_dimensional_axis
|
55 |
+
|
56 |
+
if not dilation:
|
57 |
+
dilation = [1] * num_dimensional_axis
|
58 |
+
|
59 |
+
if not stride:
|
60 |
+
stride = [1] * num_dimensional_axis
|
61 |
+
|
62 |
+
return g.op(
|
63 |
+
"Col2Im",
|
64 |
+
input,
|
65 |
+
output_size,
|
66 |
+
kernel_size,
|
67 |
+
dilations_i=dilation,
|
68 |
+
pads_i=adjusted_padding,
|
69 |
+
strides_i=stride,
|
70 |
+
)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset7.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Note [ONNX operators that are added/updated from opset 7 to opset 8]
|
3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
+
New operators:
|
5 |
+
Expand
|
6 |
+
|
7 |
+
Updated operators:
|
8 |
+
Min, Max, Sum, Mean: supports multidirectional broadcasting.
|
9 |
+
MaxPool: added optional indices output.
|
10 |
+
Scan
|
11 |
+
"""
|
12 |
+
|
13 |
+
import functools
|
14 |
+
import warnings
|
15 |
+
|
16 |
+
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9
|
17 |
+
from torch.onnx._internal import jit_utils, registration
|
18 |
+
|
19 |
+
|
20 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=7)
|
21 |
+
|
22 |
+
block_listed_operators = (
|
23 |
+
"scan",
|
24 |
+
"expand",
|
25 |
+
"expand_as",
|
26 |
+
"meshgrid",
|
27 |
+
"adaptive_max_pool1d",
|
28 |
+
"adaptive_max_pool2d",
|
29 |
+
"adaptive_max_pool3d",
|
30 |
+
"max_pool1d_with_indices",
|
31 |
+
"max_pool2d_with_indices",
|
32 |
+
"max_pool3d_with_indices",
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7.
|
37 |
+
# torch.max (same for torch.min) actually has two interfaces smashed together:
|
38 |
+
# torch.max(x, dim, keepdim) and torch.max(x, y)
|
39 |
+
@_onnx_symbolic("aten::max")
|
40 |
+
def max(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None):
|
41 |
+
# torch.max(input, other)
|
42 |
+
if keepdim is None and dim_or_y is not None:
|
43 |
+
warnings.warn(
|
44 |
+
"Multidirectional broadcasting is not supported in opset 7. "
|
45 |
+
"This might cause the onnx model to be incorrect, if inputs to max operators "
|
46 |
+
"have different shapes"
|
47 |
+
)
|
48 |
+
return opset9.max(g, self, dim_or_y, keepdim)
|
49 |
+
|
50 |
+
|
51 |
+
@_onnx_symbolic("aten::min")
|
52 |
+
def min(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None):
|
53 |
+
# torch.min(input, other)
|
54 |
+
if keepdim is None and dim_or_y is not None:
|
55 |
+
warnings.warn(
|
56 |
+
"Multidirectional broadcasting is not supported in opset 7. "
|
57 |
+
"This might cause the onnx model to be incorrect, if inputs to min operators "
|
58 |
+
"have different shapes"
|
59 |
+
)
|
60 |
+
return opset9.min(g, self, dim_or_y, keepdim)
|
61 |
+
|
62 |
+
|
63 |
+
for block_listed_op in block_listed_operators:
|
64 |
+
_onnx_symbolic(f"aten::{block_listed_op}")(
|
65 |
+
symbolic_helper._block_list_in_opset(block_listed_op)
|
66 |
+
)
|
venv/lib/python3.10/site-packages/torch/onnx/utils.py
ADDED
@@ -0,0 +1,2121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions to export models into the ONNX IR format.
|
2 |
+
|
3 |
+
These models can be loaded with the ONNX library and then
|
4 |
+
converted to models which run on other deep learning frameworks.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import contextlib
|
9 |
+
import copy
|
10 |
+
import inspect
|
11 |
+
import io
|
12 |
+
import re
|
13 |
+
import textwrap
|
14 |
+
import typing
|
15 |
+
import warnings
|
16 |
+
from typing import (
|
17 |
+
Any,
|
18 |
+
Callable,
|
19 |
+
cast,
|
20 |
+
Collection,
|
21 |
+
Dict,
|
22 |
+
List,
|
23 |
+
Mapping,
|
24 |
+
Optional,
|
25 |
+
Sequence,
|
26 |
+
Set,
|
27 |
+
Tuple,
|
28 |
+
Type,
|
29 |
+
Union,
|
30 |
+
)
|
31 |
+
|
32 |
+
import torch
|
33 |
+
import torch._C._onnx as _C_onnx
|
34 |
+
import torch.jit._trace
|
35 |
+
import torch.serialization
|
36 |
+
from torch import _C
|
37 |
+
from torch.onnx import ( # noqa: F401
|
38 |
+
_constants,
|
39 |
+
_exporter_states,
|
40 |
+
errors,
|
41 |
+
symbolic_caffe2,
|
42 |
+
symbolic_helper,
|
43 |
+
)
|
44 |
+
from torch.onnx._globals import GLOBALS
|
45 |
+
from torch.onnx._internal import (
|
46 |
+
_beartype,
|
47 |
+
diagnostics,
|
48 |
+
jit_utils,
|
49 |
+
onnx_proto_utils,
|
50 |
+
registration,
|
51 |
+
)
|
52 |
+
|
53 |
+
__all__ = [
|
54 |
+
"is_in_onnx_export",
|
55 |
+
"select_model_mode_for_export",
|
56 |
+
"disable_apex_o2_state_dict_hook",
|
57 |
+
"setup_onnx_logging",
|
58 |
+
"exporter_context",
|
59 |
+
"export",
|
60 |
+
"model_signature",
|
61 |
+
"warn_on_static_input_change",
|
62 |
+
"unpack_quantized_tensor",
|
63 |
+
"export_to_pretty_string",
|
64 |
+
"unconvertible_ops",
|
65 |
+
"register_custom_op_symbolic",
|
66 |
+
"unregister_custom_op_symbolic",
|
67 |
+
]
|
68 |
+
|
69 |
+
|
70 |
+
def is_in_onnx_export() -> bool:
|
71 |
+
"""Returns whether it is in the middle of ONNX export."""
|
72 |
+
return GLOBALS.in_onnx_export
|
73 |
+
|
74 |
+
|
75 |
+
# TODO(justinchuby): Remove dependency to this global variable from constant_fold.cpp
|
76 |
+
# Skip check due to cannot import IValue from torch._C
|
77 |
+
_params_dict = {} # type: ignore[var-annotated]
|
78 |
+
|
79 |
+
|
80 |
+
@contextlib.contextmanager
|
81 |
+
@_beartype.beartype
|
82 |
+
def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):
|
83 |
+
r"""A context manager to temporarily set the training mode of ``model``
|
84 |
+
to ``mode``, resetting it when we exit the with-block.
|
85 |
+
|
86 |
+
Args:
|
87 |
+
model: Same type and meaning as ``model`` arg to :func:`export`.
|
88 |
+
mode: Same type and meaning as ``training`` arg to :func:`export`.
|
89 |
+
"""
|
90 |
+
if not isinstance(mode, _C_onnx.TrainingMode):
|
91 |
+
raise TypeError(
|
92 |
+
f"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'."
|
93 |
+
)
|
94 |
+
originally_training: bool = False
|
95 |
+
|
96 |
+
if hasattr(model, "training"):
|
97 |
+
originally_training = model.training
|
98 |
+
|
99 |
+
# ONNX opset 12 has better support for training amenable models, with updated
|
100 |
+
# versions of the dropout and batch_norm operators
|
101 |
+
if mode == _C_onnx.TrainingMode.TRAINING or (
|
102 |
+
mode == _C_onnx.TrainingMode.PRESERVE and originally_training
|
103 |
+
):
|
104 |
+
GLOBALS.export_training = True
|
105 |
+
if GLOBALS.export_onnx_opset_version < 12:
|
106 |
+
warnings.warn(
|
107 |
+
"You are exporting the model in training mode with onnx opset "
|
108 |
+
f"version {GLOBALS.export_onnx_opset_version}. "
|
109 |
+
"Opset versions lower than opset 12 will not be able to export "
|
110 |
+
"nodes such as Dropout and BatchNorm correctly."
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
GLOBALS.export_training = False
|
114 |
+
|
115 |
+
GLOBALS.training_mode = mode
|
116 |
+
if mode == _C_onnx.TrainingMode.TRAINING:
|
117 |
+
model.train(True)
|
118 |
+
elif mode == _C_onnx.TrainingMode.EVAL:
|
119 |
+
model.train(False)
|
120 |
+
# else mode == _C_onnx.TrainingMode.PRESERVE, do nothing
|
121 |
+
|
122 |
+
try:
|
123 |
+
yield
|
124 |
+
finally:
|
125 |
+
if hasattr(model, "training") and not mode == _C_onnx.TrainingMode.PRESERVE:
|
126 |
+
model.train(originally_training)
|
127 |
+
|
128 |
+
|
129 |
+
@contextlib.contextmanager
|
130 |
+
@_beartype.beartype
|
131 |
+
def disable_apex_o2_state_dict_hook(
|
132 |
+
model: Union[torch.nn.Module, torch.jit.ScriptFunction]
|
133 |
+
):
|
134 |
+
# Apex O2 hook state_dict to return fp16 weights as fp32.
|
135 |
+
# Exporter cannot identify them as same tensors.
|
136 |
+
# Since this hook is only used by optimizer, it is safe to
|
137 |
+
# remove this hook while exporting.
|
138 |
+
if not isinstance(model, torch.jit.ScriptFunction):
|
139 |
+
model_hooks = {} # type: ignore[var-annotated]
|
140 |
+
for module in model.modules():
|
141 |
+
for key, hook in module._state_dict_hooks.items():
|
142 |
+
if type(hook).__name__ == "O2StateDictHook":
|
143 |
+
if module not in model_hooks:
|
144 |
+
model_hooks[module] = {}
|
145 |
+
model_hooks[module][key] = hook
|
146 |
+
if module in model_hooks:
|
147 |
+
for key in model_hooks[module]:
|
148 |
+
module._state_dict_hooks.pop(key)
|
149 |
+
try:
|
150 |
+
yield
|
151 |
+
finally:
|
152 |
+
# Add the hooks back
|
153 |
+
for module, m_map in model_hooks.items():
|
154 |
+
for key, hook in m_map.items():
|
155 |
+
module._state_dict_hooks[key] = hook
|
156 |
+
else:
|
157 |
+
try:
|
158 |
+
yield
|
159 |
+
finally:
|
160 |
+
pass
|
161 |
+
|
162 |
+
|
163 |
+
@contextlib.contextmanager
|
164 |
+
@_beartype.beartype
|
165 |
+
def setup_onnx_logging(verbose: bool):
|
166 |
+
is_originally_enabled = torch.onnx.is_onnx_log_enabled()
|
167 |
+
if is_originally_enabled or verbose:
|
168 |
+
torch.onnx.enable_log()
|
169 |
+
try:
|
170 |
+
yield
|
171 |
+
finally:
|
172 |
+
if not is_originally_enabled:
|
173 |
+
torch.onnx.disable_log()
|
174 |
+
|
175 |
+
|
176 |
+
@contextlib.contextmanager
|
177 |
+
@_beartype.beartype
|
178 |
+
def exporter_context(model, mode: _C_onnx.TrainingMode, verbose: bool):
|
179 |
+
with select_model_mode_for_export(
|
180 |
+
model, mode
|
181 |
+
) as mode_ctx, disable_apex_o2_state_dict_hook(
|
182 |
+
model
|
183 |
+
) as apex_ctx, setup_onnx_logging(
|
184 |
+
verbose
|
185 |
+
) as log_ctx, diagnostics.create_export_diagnostic_context() as diagnostic_ctx:
|
186 |
+
yield (mode_ctx, apex_ctx, log_ctx, diagnostic_ctx)
|
187 |
+
|
188 |
+
|
189 |
+
@_beartype.beartype
|
190 |
+
def export(
|
191 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction],
|
192 |
+
args: Union[Tuple[Any, ...], torch.Tensor],
|
193 |
+
f: Union[str, io.BytesIO],
|
194 |
+
export_params: bool = True,
|
195 |
+
verbose: bool = False,
|
196 |
+
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
|
197 |
+
input_names: Optional[Sequence[str]] = None,
|
198 |
+
output_names: Optional[Sequence[str]] = None,
|
199 |
+
operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX,
|
200 |
+
opset_version: Optional[int] = None,
|
201 |
+
do_constant_folding: bool = True,
|
202 |
+
dynamic_axes: Optional[
|
203 |
+
Union[Mapping[str, Mapping[int, str]], Mapping[str, Sequence[int]]]
|
204 |
+
] = None,
|
205 |
+
keep_initializers_as_inputs: Optional[bool] = None,
|
206 |
+
custom_opsets: Optional[Mapping[str, int]] = None,
|
207 |
+
export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]] = False,
|
208 |
+
autograd_inlining: Optional[bool] = True,
|
209 |
+
) -> None:
|
210 |
+
r"""Exports a model into ONNX format.
|
211 |
+
|
212 |
+
If ``model`` is not a :class:`torch.jit.ScriptModule` nor a
|
213 |
+
:class:`torch.jit.ScriptFunction`, this runs
|
214 |
+
``model`` once in order to convert it to a TorchScript graph to be exported
|
215 |
+
(the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support
|
216 |
+
for dynamic control flow as :func:`torch.jit.trace`.
|
217 |
+
|
218 |
+
Args:
|
219 |
+
model (:class:`torch.nn.Module`, :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`):
|
220 |
+
the model to be exported.
|
221 |
+
args (tuple or torch.Tensor):
|
222 |
+
|
223 |
+
args can be structured either as:
|
224 |
+
|
225 |
+
1. ONLY A TUPLE OF ARGUMENTS::
|
226 |
+
|
227 |
+
args = (x, y, z)
|
228 |
+
|
229 |
+
The tuple should contain model inputs such that ``model(*args)`` is a valid
|
230 |
+
invocation of the model. Any non-Tensor arguments will be hard-coded into the
|
231 |
+
exported model; any Tensor arguments will become inputs of the exported model,
|
232 |
+
in the order they occur in the tuple.
|
233 |
+
|
234 |
+
2. A TENSOR::
|
235 |
+
|
236 |
+
args = torch.Tensor([1])
|
237 |
+
|
238 |
+
This is equivalent to a 1-ary tuple of that Tensor.
|
239 |
+
|
240 |
+
3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::
|
241 |
+
|
242 |
+
args = (
|
243 |
+
x,
|
244 |
+
{
|
245 |
+
"y": input_y,
|
246 |
+
"z": input_z
|
247 |
+
}
|
248 |
+
)
|
249 |
+
|
250 |
+
All but the last element of the tuple will be passed as non-keyword arguments,
|
251 |
+
and named arguments will be set from the last element. If a named argument is
|
252 |
+
not present in the dictionary, it is assigned the default value, or None if a
|
253 |
+
default value is not provided.
|
254 |
+
|
255 |
+
.. note::
|
256 |
+
If a dictionary is the last element of the args tuple, it will be
|
257 |
+
interpreted as containing named arguments. In order to pass a dict as the
|
258 |
+
last non-keyword arg, provide an empty dict as the last element of the args
|
259 |
+
tuple. For example, instead of::
|
260 |
+
|
261 |
+
torch.onnx.export(
|
262 |
+
model,
|
263 |
+
(
|
264 |
+
x,
|
265 |
+
# WRONG: will be interpreted as named arguments
|
266 |
+
{y: z}
|
267 |
+
),
|
268 |
+
"test.onnx.pb"
|
269 |
+
)
|
270 |
+
|
271 |
+
Write::
|
272 |
+
|
273 |
+
torch.onnx.export(
|
274 |
+
model,
|
275 |
+
(
|
276 |
+
x,
|
277 |
+
{y: z},
|
278 |
+
{}
|
279 |
+
),
|
280 |
+
"test.onnx.pb"
|
281 |
+
)
|
282 |
+
|
283 |
+
f: a file-like object (such that ``f.fileno()`` returns a file descriptor)
|
284 |
+
or a string containing a file name. A binary protocol buffer will be written
|
285 |
+
to this file.
|
286 |
+
export_params (bool, default True): if True, all parameters will
|
287 |
+
be exported. Set this to False if you want to export an untrained model.
|
288 |
+
In this case, the exported model will first take all of its parameters
|
289 |
+
as arguments, with the ordering as specified by ``model.state_dict().values()``
|
290 |
+
verbose (bool, default False): if True, prints a description of the
|
291 |
+
model being exported to stdout. In addition, the final ONNX graph will include the
|
292 |
+
field ``doc_string``` from the exported model which mentions the source code locations
|
293 |
+
for ``model``. If True, ONNX exporter logging will be turned on.
|
294 |
+
training (enum, default TrainingMode.EVAL):
|
295 |
+
* ``TrainingMode.EVAL``: export the model in inference mode.
|
296 |
+
* ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is
|
297 |
+
False and in training mode if model.training is True.
|
298 |
+
* ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations
|
299 |
+
which might interfere with training.
|
300 |
+
input_names (list of str, default empty list): names to assign to the
|
301 |
+
input nodes of the graph, in order.
|
302 |
+
output_names (list of str, default empty list): names to assign to the
|
303 |
+
output nodes of the graph, in order.
|
304 |
+
operator_export_type (enum, default OperatorExportTypes.ONNX):
|
305 |
+
|
306 |
+
* ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops
|
307 |
+
(in the default opset domain).
|
308 |
+
* ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops
|
309 |
+
to standard ONNX ops in the default opset domain. If unable to do so
|
310 |
+
(e.g. because support has not been added to convert a particular torch op to ONNX),
|
311 |
+
fall back to exporting the op into a custom opset domain without conversion. Applies
|
312 |
+
to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_
|
313 |
+
as well as ATen ops. For the exported model to be usable, the runtime must support
|
314 |
+
these non-standard ops.
|
315 |
+
* ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten")
|
316 |
+
are exported as ATen ops (in opset domain "org.pytorch.aten").
|
317 |
+
`ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so
|
318 |
+
this instructs the runtime to use PyTorch's implementation of these ops.
|
319 |
+
|
320 |
+
.. warning::
|
321 |
+
|
322 |
+
Models exported this way are probably runnable only by Caffe2.
|
323 |
+
|
324 |
+
This may be useful if the numeric differences in implementations of operators are
|
325 |
+
causing large differences in behavior between PyTorch and Caffe2 (which is more
|
326 |
+
common on untrained models).
|
327 |
+
|
328 |
+
* ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op
|
329 |
+
(in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so
|
330 |
+
(e.g. because support has not been added to convert a particular torch op to ONNX),
|
331 |
+
fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for
|
332 |
+
context.
|
333 |
+
For example::
|
334 |
+
|
335 |
+
graph(%0 : Float):
|
336 |
+
%3 : int = prim::Constant[value=0]()
|
337 |
+
# conversion unsupported
|
338 |
+
%4 : Float = aten::triu(%0, %3)
|
339 |
+
# conversion supported
|
340 |
+
%5 : Float = aten::mul(%4, %0)
|
341 |
+
return (%5)
|
342 |
+
|
343 |
+
Assuming ``aten::triu`` is not supported in ONNX, this will be exported as::
|
344 |
+
|
345 |
+
graph(%0 : Float):
|
346 |
+
%1 : Long() = onnx::Constant[value={0}]()
|
347 |
+
# not converted
|
348 |
+
%2 : Float = aten::ATen[operator="triu"](%0, %1)
|
349 |
+
# converted
|
350 |
+
%3 : Float = onnx::Mul(%2, %0)
|
351 |
+
return (%3)
|
352 |
+
|
353 |
+
If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then
|
354 |
+
Caffe2-specific behavior will be enabled, including special support
|
355 |
+
for ops are produced by the modules described in
|
356 |
+
`Quantization <https://pytorch.org/docs/stable/quantization.html>`_.
|
357 |
+
|
358 |
+
.. warning::
|
359 |
+
|
360 |
+
Models exported this way are probably runnable only by Caffe2.
|
361 |
+
|
362 |
+
opset_version (int, default 17): The version of the
|
363 |
+
`default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
|
364 |
+
to target. Must be >= 7 and <= 17.
|
365 |
+
do_constant_folding (bool, default True): Apply the constant-folding optimization.
|
366 |
+
Constant-folding will replace some of the ops that have all constant inputs
|
367 |
+
with pre-computed constant nodes.
|
368 |
+
dynamic_axes (dict[string, dict[int, string]] or dict[string, list(int)], default empty dict):
|
369 |
+
|
370 |
+
By default the exported model will have the shapes of all input and output tensors
|
371 |
+
set to exactly match those given in ``args``. To specify axes of tensors as
|
372 |
+
dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema:
|
373 |
+
|
374 |
+
* KEY (str): an input or output name. Each name must also be provided in ``input_names`` or
|
375 |
+
``output_names``.
|
376 |
+
* VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a
|
377 |
+
list, each element is an axis index.
|
378 |
+
|
379 |
+
For example::
|
380 |
+
|
381 |
+
class SumModule(torch.nn.Module):
|
382 |
+
def forward(self, x):
|
383 |
+
return torch.sum(x, dim=1)
|
384 |
+
|
385 |
+
torch.onnx.export(
|
386 |
+
SumModule(),
|
387 |
+
(torch.ones(2, 2),),
|
388 |
+
"onnx.pb",
|
389 |
+
input_names=["x"],
|
390 |
+
output_names=["sum"]
|
391 |
+
)
|
392 |
+
|
393 |
+
Produces::
|
394 |
+
|
395 |
+
input {
|
396 |
+
name: "x"
|
397 |
+
...
|
398 |
+
shape {
|
399 |
+
dim {
|
400 |
+
dim_value: 2 # axis 0
|
401 |
+
}
|
402 |
+
dim {
|
403 |
+
dim_value: 2 # axis 1
|
404 |
+
...
|
405 |
+
output {
|
406 |
+
name: "sum"
|
407 |
+
...
|
408 |
+
shape {
|
409 |
+
dim {
|
410 |
+
dim_value: 2 # axis 0
|
411 |
+
...
|
412 |
+
|
413 |
+
While::
|
414 |
+
|
415 |
+
torch.onnx.export(
|
416 |
+
SumModule(),
|
417 |
+
(torch.ones(2, 2),),
|
418 |
+
"onnx.pb",
|
419 |
+
input_names=["x"],
|
420 |
+
output_names=["sum"],
|
421 |
+
dynamic_axes={
|
422 |
+
# dict value: manually named axes
|
423 |
+
"x": {0: "my_custom_axis_name"},
|
424 |
+
# list value: automatic names
|
425 |
+
"sum": [0],
|
426 |
+
}
|
427 |
+
)
|
428 |
+
|
429 |
+
Produces::
|
430 |
+
|
431 |
+
input {
|
432 |
+
name: "x"
|
433 |
+
...
|
434 |
+
shape {
|
435 |
+
dim {
|
436 |
+
dim_param: "my_custom_axis_name" # axis 0
|
437 |
+
}
|
438 |
+
dim {
|
439 |
+
dim_value: 2 # axis 1
|
440 |
+
...
|
441 |
+
output {
|
442 |
+
name: "sum"
|
443 |
+
...
|
444 |
+
shape {
|
445 |
+
dim {
|
446 |
+
dim_param: "sum_dynamic_axes_1" # axis 0
|
447 |
+
...
|
448 |
+
|
449 |
+
keep_initializers_as_inputs (bool, default None): If True, all the
|
450 |
+
initializers (typically corresponding to parameters) in the
|
451 |
+
exported graph will also be added as inputs to the graph. If False,
|
452 |
+
then initializers are not added as inputs to the graph, and only
|
453 |
+
the non-parameter inputs are added as inputs.
|
454 |
+
This may allow for better optimizations (e.g. constant folding) by
|
455 |
+
backends/runtimes.
|
456 |
+
|
457 |
+
If True, `deduplicate_initializers` pass will not be executed. This means
|
458 |
+
initializers with duplicated values will not be deduplicated and
|
459 |
+
will be treated as distinct inputs to the graph. This allows different
|
460 |
+
input initializers to be supplied at the runtime following export.
|
461 |
+
|
462 |
+
If ``opset_version < 9``, initializers MUST be part of graph
|
463 |
+
inputs and this argument will be ignored and the behavior will be
|
464 |
+
equivalent to setting this argument to True.
|
465 |
+
|
466 |
+
If None, then the behavior is chosen automatically as follows:
|
467 |
+
|
468 |
+
* If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent
|
469 |
+
to setting this argument to False.
|
470 |
+
* Else, the behavior is equivalent to setting this argument to True.
|
471 |
+
|
472 |
+
custom_opsets (dict[str, int], default empty dict): A dict with schema:
|
473 |
+
|
474 |
+
* KEY (str): opset domain name
|
475 |
+
* VALUE (int): opset version
|
476 |
+
|
477 |
+
If a custom opset is referenced by ``model`` but not mentioned in this dictionary,
|
478 |
+
the opset version is set to 1. Only custom opset domain name and version should be
|
479 |
+
indicated through this argument.
|
480 |
+
|
481 |
+
export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable
|
482 |
+
exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the
|
483 |
+
particular types of modules to export as local functions in ONNX.
|
484 |
+
This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because
|
485 |
+
``opset_version`` < 15 implies IR version < 8, which means no local function support.
|
486 |
+
Module variables will be exported as function attributes. There are two categories of function
|
487 |
+
attributes.
|
488 |
+
|
489 |
+
1. Annotated attributes: class variables that have type annotations via
|
490 |
+
`PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_
|
491 |
+
will be exported as attributes.
|
492 |
+
Annotated attributes are not used inside the subgraph of ONNX local function because
|
493 |
+
they are not created by PyTorch JIT tracing, but they may be used by consumers
|
494 |
+
to determine whether or not to replace the function with a particular fused kernel.
|
495 |
+
|
496 |
+
2. Inferred attributes: variables that are used by operators inside the module. Attribute names
|
497 |
+
will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from
|
498 |
+
python module annotations. Inferred attributes are used inside the subgraph of ONNX local function.
|
499 |
+
|
500 |
+
* ``False`` (default): export ``nn.Module`` forward calls as fine grained nodes.
|
501 |
+
* ``True``: export all ``nn.Module`` forward calls as local function nodes.
|
502 |
+
* Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes,
|
503 |
+
only if the type of the ``nn.Module`` is found in the set.
|
504 |
+
|
505 |
+
autograd_inlining (bool, default True): Flag used to control whether to inline autograd functions.
|
506 |
+
Refer to https://github.com/pytorch/pytorch/pull/74765 for more details.
|
507 |
+
|
508 |
+
Raises:
|
509 |
+
:class:`torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph.
|
510 |
+
:class:`torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it
|
511 |
+
uses an operator that is not supported by the exporter.
|
512 |
+
:class:`torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export.
|
513 |
+
All errors are subclasses of :class:`errors.OnnxExporterError`.
|
514 |
+
"""
|
515 |
+
|
516 |
+
_export(
|
517 |
+
model,
|
518 |
+
args,
|
519 |
+
f,
|
520 |
+
export_params,
|
521 |
+
verbose,
|
522 |
+
training,
|
523 |
+
input_names,
|
524 |
+
output_names,
|
525 |
+
operator_export_type=operator_export_type,
|
526 |
+
opset_version=opset_version,
|
527 |
+
do_constant_folding=do_constant_folding,
|
528 |
+
dynamic_axes=dynamic_axes,
|
529 |
+
keep_initializers_as_inputs=keep_initializers_as_inputs,
|
530 |
+
custom_opsets=custom_opsets,
|
531 |
+
export_modules_as_functions=export_modules_as_functions,
|
532 |
+
autograd_inlining=autograd_inlining,
|
533 |
+
)
|
534 |
+
|
535 |
+
|
536 |
+
@_beartype.beartype
|
537 |
+
def _is_constant_tensor_list(node):
|
538 |
+
if node.kind() != "prim::Constant":
|
539 |
+
return False
|
540 |
+
output_type = node.output().type()
|
541 |
+
if output_type.isSubtypeOf(_C.ListType.ofTensors()):
|
542 |
+
return True
|
543 |
+
if output_type.isSubtypeOf(_C.ListType(_C.OptionalType.ofTensor())):
|
544 |
+
return True
|
545 |
+
|
546 |
+
|
547 |
+
# ONNX can't handle constants that are lists of tensors, which can
|
548 |
+
# get generated in constant prop. So we split them back into prim::ListConstructs
|
549 |
+
|
550 |
+
|
551 |
+
@_beartype.beartype
|
552 |
+
def _split_tensor_list_constants(g, block):
|
553 |
+
for node in block.nodes():
|
554 |
+
for subblock in node.blocks():
|
555 |
+
_split_tensor_list_constants(g, subblock)
|
556 |
+
if _is_constant_tensor_list(node):
|
557 |
+
inputs = []
|
558 |
+
for val in node.output().toIValue():
|
559 |
+
input = g.insertConstant(val)
|
560 |
+
input.node().moveBefore(node)
|
561 |
+
input.node().copyMetadata(node)
|
562 |
+
inputs.append(input)
|
563 |
+
|
564 |
+
lc = (
|
565 |
+
g.create("prim::ListConstruct", inputs)
|
566 |
+
.insertBefore(node)
|
567 |
+
.output()
|
568 |
+
.setType(_C.ListType.ofTensors())
|
569 |
+
)
|
570 |
+
lc.node().copyMetadata(node)
|
571 |
+
node.output().replaceAllUsesWith(lc)
|
572 |
+
|
573 |
+
|
574 |
+
@_beartype.beartype
|
575 |
+
def _optimize_graph(
|
576 |
+
graph: _C.Graph,
|
577 |
+
operator_export_type: _C_onnx.OperatorExportTypes,
|
578 |
+
_disable_torch_constant_prop: bool = False,
|
579 |
+
fixed_batch_size: bool = False,
|
580 |
+
params_dict=None,
|
581 |
+
dynamic_axes=None,
|
582 |
+
input_names=None,
|
583 |
+
module=None,
|
584 |
+
):
|
585 |
+
if params_dict is None:
|
586 |
+
params_dict = {}
|
587 |
+
|
588 |
+
# Inline everything
|
589 |
+
_C._jit_pass_inline(graph)
|
590 |
+
|
591 |
+
# Remove fork/wait nodes
|
592 |
+
_C._jit_pass_inline_fork_wait(graph)
|
593 |
+
_C._jit_pass_lint(graph)
|
594 |
+
if GLOBALS.autograd_inlining:
|
595 |
+
_C._jit_pass_onnx_autograd_function_process(graph)
|
596 |
+
_C._jit_pass_lower_all_tuples(graph)
|
597 |
+
|
598 |
+
# we now record some ops like ones/zeros
|
599 |
+
# into a trace where we previously recorded constants.
|
600 |
+
# use constant prop to maintain our current level of onnx support
|
601 |
+
# without implementing symbolics for all of them
|
602 |
+
if _disable_torch_constant_prop is False:
|
603 |
+
_C._jit_pass_constant_propagation(graph)
|
604 |
+
|
605 |
+
_split_tensor_list_constants(graph, graph)
|
606 |
+
# run dce to eliminate dead parts of the graph that might have been
|
607 |
+
# left behind by things like symbolic_override
|
608 |
+
_C._jit_pass_dce(graph)
|
609 |
+
_C._jit_pass_lint(graph)
|
610 |
+
|
611 |
+
# CSE should improve perf when Autocast is used with disabled cache
|
612 |
+
# Autocast is disabled due to a limitation on tracer as described at https://github.com/pytorch/pytorch/issues/84092
|
613 |
+
# Must run before _C._jit_pass_erase_number_types to prevent type substitution
|
614 |
+
if _C._jit_pass_cse(graph):
|
615 |
+
_C._jit_pass_onnx_lint(graph)
|
616 |
+
|
617 |
+
_C._jit_pass_canonicalize_graph_fuser_ops(graph)
|
618 |
+
_C._jit_pass_lint(graph)
|
619 |
+
_C._jit_pass_peephole(graph, True)
|
620 |
+
_C._jit_pass_fuse_addmm(graph)
|
621 |
+
_C._jit_pass_lint(graph)
|
622 |
+
|
623 |
+
_C._jit_pass_peephole(graph, True)
|
624 |
+
_C._jit_pass_lower_all_tuples(graph)
|
625 |
+
# in _jit_pass_onnx, symbolic functions are called for each node for conversion.
|
626 |
+
# However, there are nodes that cannot be converted without additional context.
|
627 |
+
# For example, the number of outputs from split (and whether it is static or dynamic) is unknown
|
628 |
+
# until the point where it is unpacked by listUnpack node.
|
629 |
+
# This pass does a preprocess, and prepares the nodes such that enough context can be received
|
630 |
+
# by the symbolic function.
|
631 |
+
_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
|
632 |
+
_C._jit_pass_onnx_preprocess(graph)
|
633 |
+
|
634 |
+
# onnx does not support tuples, so try to remove them
|
635 |
+
_C._jit_pass_lint(graph)
|
636 |
+
|
637 |
+
# onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0
|
638 |
+
_C._jit_pass_prepare_division_for_onnx(graph)
|
639 |
+
|
640 |
+
_C._jit_pass_onnx_remove_print(graph)
|
641 |
+
_C._jit_pass_onnx_preprocess_caffe2(graph)
|
642 |
+
|
643 |
+
symbolic_helper._quantized_ops.clear()
|
644 |
+
# Unpack quantized weights for conv and linear ops and insert into graph.
|
645 |
+
_C._jit_pass_onnx_unpack_quantized_weights(
|
646 |
+
graph, params_dict, symbolic_helper.is_caffe2_aten_fallback()
|
647 |
+
)
|
648 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
649 |
+
# Insert permutes before and after each conv op to ensure correct order.
|
650 |
+
_C._jit_pass_onnx_quantization_insert_permutes(graph, params_dict)
|
651 |
+
|
652 |
+
# Find consecutive permutes that are no-ops and remove them.
|
653 |
+
_C._jit_pass_custom_pattern_based_rewrite_graph(
|
654 |
+
textwrap.dedent(
|
655 |
+
"""\
|
656 |
+
graph(%Pi):
|
657 |
+
%Pq = quantized::nhwc2nchw(%Pi)
|
658 |
+
%Pr = quantized::nchw2nhwc(%Pq)
|
659 |
+
return (%Pr)"""
|
660 |
+
),
|
661 |
+
textwrap.dedent(
|
662 |
+
"""\
|
663 |
+
graph(%Ri):
|
664 |
+
return (%Ri)"""
|
665 |
+
),
|
666 |
+
graph,
|
667 |
+
)
|
668 |
+
|
669 |
+
# onnx only supports tensors, so we turn all out number types into tensors
|
670 |
+
_C._jit_pass_erase_number_types(graph)
|
671 |
+
if GLOBALS.onnx_shape_inference:
|
672 |
+
input_names = [] if input_names is None else input_names
|
673 |
+
dynamic_axes = {} if dynamic_axes is None else dynamic_axes
|
674 |
+
_C._jit_pass_onnx_set_dynamic_input_shape(graph, dynamic_axes, input_names)
|
675 |
+
_C._jit_pass_onnx_lint(graph)
|
676 |
+
|
677 |
+
graph = _C._jit_pass_onnx(graph, operator_export_type)
|
678 |
+
_C._jit_pass_onnx_lint(graph)
|
679 |
+
_C._jit_pass_lint(graph)
|
680 |
+
|
681 |
+
_C._jit_pass_onnx_scalar_type_analysis(
|
682 |
+
graph, True, GLOBALS.export_onnx_opset_version
|
683 |
+
)
|
684 |
+
_C._jit_pass_lint(graph)
|
685 |
+
|
686 |
+
_C._jit_pass_onnx_peephole(
|
687 |
+
graph, GLOBALS.export_onnx_opset_version, fixed_batch_size
|
688 |
+
)
|
689 |
+
_C._jit_pass_lint(graph)
|
690 |
+
|
691 |
+
# graph is not a valid jit graph anymore because types have been replaced
|
692 |
+
# (e.g. int with Tensor), so it now contains operators that don't actually
|
693 |
+
# exist. We can't run normal dead code elimination because it'd fail trying
|
694 |
+
# to look up if an operator has side effects, but we can run a dead code
|
695 |
+
# elimination variant that doesn't need to look up if an op has side effects.
|
696 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
697 |
+
_C._jit_pass_lint(graph)
|
698 |
+
graph = _C._jit_pass_canonicalize(graph)
|
699 |
+
_C._jit_pass_lint(graph)
|
700 |
+
if GLOBALS.onnx_shape_inference:
|
701 |
+
try:
|
702 |
+
_C._jit_pass_onnx_graph_shape_type_inference(
|
703 |
+
graph, params_dict, GLOBALS.export_onnx_opset_version
|
704 |
+
)
|
705 |
+
except RuntimeError as exc:
|
706 |
+
if (
|
707 |
+
_C_onnx._CAFFE2_ATEN_FALLBACK
|
708 |
+
and exc.args[0]
|
709 |
+
== "ScalarType UNKNOWN_SCALAR is an unexpected tensor scalar type!"
|
710 |
+
):
|
711 |
+
# Caffe2 builds can have UNKNOWN_SCALAR for some tensors
|
712 |
+
pass
|
713 |
+
|
714 |
+
return graph
|
715 |
+
|
716 |
+
|
717 |
+
@_beartype.beartype
|
718 |
+
def warn_on_static_input_change(input_states):
|
719 |
+
"""Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.
|
720 |
+
|
721 |
+
We accept dictionaries and strings as ONNX inputs, but they should be only for
|
722 |
+
configuration use. we detect here if these inputs are modified, and if so we warn
|
723 |
+
the user that the changes won't take effect in the traced ONNX graph.
|
724 |
+
"""
|
725 |
+
for input, traced_input in zip(input_states[0], input_states[1]):
|
726 |
+
if isinstance(input, dict):
|
727 |
+
if list(input.keys()) != list(traced_input.keys()):
|
728 |
+
warning = (
|
729 |
+
"We detected that you are modifying a dictionary that is an input to your "
|
730 |
+
"model. "
|
731 |
+
"Note that dictionaries are allowed as inputs in ONNX but they should be "
|
732 |
+
"handled with care. "
|
733 |
+
"Usages of dictionaries is not recommended, and should not be used except "
|
734 |
+
"for configuration use. "
|
735 |
+
"Also note that the order and values of the keys must remain the same. "
|
736 |
+
)
|
737 |
+
warnings.warn(warning)
|
738 |
+
elif isinstance(input, str):
|
739 |
+
if input != traced_input:
|
740 |
+
warning = (
|
741 |
+
"The model seems to have string inputs/outputs. "
|
742 |
+
"Note that strings will not appear as inputs/outputs of the ONNX graph. "
|
743 |
+
)
|
744 |
+
warnings.warn(warning)
|
745 |
+
|
746 |
+
|
747 |
+
@_beartype.beartype
|
748 |
+
def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
|
749 |
+
"""Resolves the arguments that are ignored when export_type != operator_export_type.ONNX."""
|
750 |
+
if (
|
751 |
+
operator_export_type is not operator_export_type.ONNX
|
752 |
+
and _C_onnx._CAFFE2_ATEN_FALLBACK
|
753 |
+
):
|
754 |
+
if arg_value is True:
|
755 |
+
warnings.warn(
|
756 |
+
f"'{arg_name}' can be set to True only when 'operator_export_type' is "
|
757 |
+
"`ONNX`. Since 'operator_export_type' is not set to 'ONNX', "
|
758 |
+
f"'{arg_name}' argument will be ignored."
|
759 |
+
)
|
760 |
+
arg_value = False
|
761 |
+
return arg_value
|
762 |
+
|
763 |
+
|
764 |
+
@_beartype.beartype
|
765 |
+
def _decide_keep_init_as_input(
|
766 |
+
keep_initializers_as_inputs: Optional[bool],
|
767 |
+
operator_export_type: _C_onnx.OperatorExportTypes,
|
768 |
+
opset_version: int,
|
769 |
+
):
|
770 |
+
"""Decides whether the initializers in the graph should be listed as ONNX graph inputs.
|
771 |
+
|
772 |
+
This method encapsulates the logic to decide whether the initializers in the graph
|
773 |
+
should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
|
774 |
+
If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
|
775 |
+
initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
|
776 |
+
is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
|
777 |
+
export types keep initializers as input (val_keep_init_as_ip=True).
|
778 |
+
If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
|
779 |
+
in which case it must be ignored because for opset version <= 8, all initializers MUST be
|
780 |
+
part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.
|
781 |
+
|
782 |
+
Special handling is needed for opset version 8 or lower, because irrespective
|
783 |
+
of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
|
784 |
+
semantics, i.e. all initializers must be listed as ONNX graph input.
|
785 |
+
"""
|
786 |
+
|
787 |
+
if opset_version < 9:
|
788 |
+
if keep_initializers_as_inputs is False:
|
789 |
+
warnings.warn(
|
790 |
+
"Setting 'keep_initializers_as_inputs=False' for opset version"
|
791 |
+
"8 or lower would lead to an invalid ONNX graph. Therefore, "
|
792 |
+
"'keep_initializers_as_inputs=False' is ignored during export."
|
793 |
+
"Exported model will have initializers as graph inputs (compliant "
|
794 |
+
" to ONNX IR v3)."
|
795 |
+
)
|
796 |
+
return True # i.e. True == initializers are part of graph input (ONNX IR v3)
|
797 |
+
val_keep_init_as_ip = (
|
798 |
+
True if keep_initializers_as_inputs is None else keep_initializers_as_inputs
|
799 |
+
)
|
800 |
+
if (
|
801 |
+
keep_initializers_as_inputs is None
|
802 |
+
and operator_export_type is _C_onnx.OperatorExportTypes.ONNX
|
803 |
+
):
|
804 |
+
val_keep_init_as_ip = False
|
805 |
+
return val_keep_init_as_ip
|
806 |
+
|
807 |
+
|
808 |
+
@_beartype.beartype
|
809 |
+
def _decide_add_node_names(add_node_names, operator_export_type):
|
810 |
+
return _resolve_args_by_export_type(
|
811 |
+
"add_node_names", add_node_names, operator_export_type
|
812 |
+
)
|
813 |
+
|
814 |
+
|
815 |
+
@_beartype.beartype
|
816 |
+
def _decide_constant_folding(do_constant_folding, operator_export_type, training):
|
817 |
+
do_constant_folding = _resolve_args_by_export_type(
|
818 |
+
"do_constant_folding", do_constant_folding, operator_export_type
|
819 |
+
)
|
820 |
+
if do_constant_folding and (
|
821 |
+
training is not None and training is not _C_onnx.TrainingMode.EVAL
|
822 |
+
):
|
823 |
+
warnings.warn(
|
824 |
+
"It is recommended that constant folding be turned off ('do_constant_folding=False') "
|
825 |
+
"when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' "
|
826 |
+
"or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some "
|
827 |
+
"learnable model parameters may not translate correctly in the exported ONNX model "
|
828 |
+
"because constant folding mutates model parameters. Please consider "
|
829 |
+
"turning off constant folding or setting the training=TrainingMode.EVAL."
|
830 |
+
)
|
831 |
+
return do_constant_folding
|
832 |
+
|
833 |
+
|
834 |
+
@_beartype.beartype
|
835 |
+
def _signature(model) -> inspect.Signature:
|
836 |
+
should_be_callable = getattr(model, "forward", model)
|
837 |
+
if callable(should_be_callable):
|
838 |
+
return inspect.signature(should_be_callable)
|
839 |
+
raise ValueError("model has no forward method and is not callable")
|
840 |
+
|
841 |
+
|
842 |
+
@_beartype.beartype
|
843 |
+
def _decide_input_format(model, args):
|
844 |
+
try:
|
845 |
+
sig = _signature(model)
|
846 |
+
except ValueError as e:
|
847 |
+
warnings.warn(f"{e}, skipping _decide_input_format")
|
848 |
+
return args
|
849 |
+
try:
|
850 |
+
ordered_list_keys = list(sig.parameters.keys())
|
851 |
+
if ordered_list_keys[0] == "self":
|
852 |
+
ordered_list_keys = ordered_list_keys[1:]
|
853 |
+
args_dict: Dict = {}
|
854 |
+
if isinstance(args, list):
|
855 |
+
args_list = args
|
856 |
+
elif isinstance(args, tuple):
|
857 |
+
args_list = list(args)
|
858 |
+
else:
|
859 |
+
args_list = [args]
|
860 |
+
if isinstance(args_list[-1], dict):
|
861 |
+
args_dict = args_list[-1]
|
862 |
+
args_list = args_list[:-1]
|
863 |
+
n_nonkeyword = len(args_list)
|
864 |
+
for optional_arg in ordered_list_keys[n_nonkeyword:]:
|
865 |
+
if optional_arg in args_dict:
|
866 |
+
args_list.append(args_dict[optional_arg])
|
867 |
+
# Check if this arg has a default value
|
868 |
+
else:
|
869 |
+
param = sig.parameters[optional_arg]
|
870 |
+
if param.default != param.empty:
|
871 |
+
args_list.append(param.default)
|
872 |
+
args = args_list if isinstance(args, list) else tuple(args_list)
|
873 |
+
# Cases of models with no input args
|
874 |
+
except IndexError:
|
875 |
+
warnings.warn("No input args, skipping _decide_input_format")
|
876 |
+
except Exception as e:
|
877 |
+
warnings.warn(f"Skipping _decide_input_format\n {e.args[0]}")
|
878 |
+
|
879 |
+
return args
|
880 |
+
|
881 |
+
|
882 |
+
@_beartype.beartype
|
883 |
+
def _trace(func, args, operator_export_type, return_outs=False):
|
884 |
+
# Special case for common case of passing a single Tensor
|
885 |
+
if isinstance(args, torch.Tensor):
|
886 |
+
args = (args,)
|
887 |
+
|
888 |
+
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
|
889 |
+
func,
|
890 |
+
args,
|
891 |
+
strict=False,
|
892 |
+
_force_outplace=False,
|
893 |
+
_return_inputs_states=True,
|
894 |
+
)
|
895 |
+
warn_on_static_input_change(inputs_states)
|
896 |
+
|
897 |
+
trace_graph = _optimize_graph(trace_graph, operator_export_type, params_dict={})
|
898 |
+
if return_outs:
|
899 |
+
return trace_graph, torch_out
|
900 |
+
return trace_graph
|
901 |
+
|
902 |
+
|
903 |
+
@_beartype.beartype
|
904 |
+
def _trace_and_get_graph_from_model(model, args):
|
905 |
+
# A basic sanity check: make sure the state_dict keys are the same
|
906 |
+
# before and after running the model. Fail fast!
|
907 |
+
orig_state_dict_keys = torch.jit._unique_state_dict(model).keys()
|
908 |
+
|
909 |
+
# Disable Autocast cache because it replaces kernel's weight and bias
|
910 |
+
# by (undesired) constants.
|
911 |
+
# No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665
|
912 |
+
prev_autocast_cache_enabled = torch.is_autocast_cache_enabled()
|
913 |
+
torch.set_autocast_cache_enabled(False)
|
914 |
+
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
|
915 |
+
model,
|
916 |
+
args,
|
917 |
+
strict=False,
|
918 |
+
_force_outplace=False,
|
919 |
+
_return_inputs_states=True,
|
920 |
+
)
|
921 |
+
torch.set_autocast_cache_enabled(prev_autocast_cache_enabled)
|
922 |
+
|
923 |
+
warn_on_static_input_change(inputs_states)
|
924 |
+
|
925 |
+
if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys():
|
926 |
+
raise RuntimeError(
|
927 |
+
"state_dict changed after running the tracer; "
|
928 |
+
"something weird is happening in your model!"
|
929 |
+
)
|
930 |
+
|
931 |
+
return trace_graph, torch_out
|
932 |
+
|
933 |
+
|
934 |
+
@_beartype.beartype
|
935 |
+
def _get_param_count_list(method_graph, args_params):
|
936 |
+
param_count_list = []
|
937 |
+
for input_, arg_params_ in zip(method_graph.inputs(), args_params):
|
938 |
+
if "PackedParams" in str(input_.type()):
|
939 |
+
in_vars, _ = torch.jit._flatten(arg_params_)
|
940 |
+
param_count_list.append(len(in_vars))
|
941 |
+
else:
|
942 |
+
param_count_list.append(arg_params_ is not None)
|
943 |
+
|
944 |
+
return param_count_list
|
945 |
+
|
946 |
+
|
947 |
+
@_beartype.beartype
|
948 |
+
def _check_flatten_did_not_remove(original, jit_flattened):
|
949 |
+
"""torch.jit._flatten removes None. Check if it did so in this case."""
|
950 |
+
|
951 |
+
@_beartype.beartype
|
952 |
+
def flatten(x):
|
953 |
+
if isinstance(x, (list, tuple)):
|
954 |
+
for inner in x:
|
955 |
+
yield from flatten(inner)
|
956 |
+
elif isinstance(x, dict):
|
957 |
+
for inner in x.values():
|
958 |
+
yield from flatten(inner)
|
959 |
+
else:
|
960 |
+
yield x
|
961 |
+
|
962 |
+
flattened_with_none = list(flatten(original))
|
963 |
+
num_none = len(flattened_with_none) - len(jit_flattened)
|
964 |
+
assert num_none >= 0
|
965 |
+
if num_none:
|
966 |
+
raise ValueError(
|
967 |
+
f"args contained {num_none} None's after flattening. "
|
968 |
+
"When exporting a ScriptModule or ScriptFunction, no args may "
|
969 |
+
"be None because that breaks type propagation."
|
970 |
+
)
|
971 |
+
|
972 |
+
|
973 |
+
def _create_jit_graph(
|
974 |
+
model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any]
|
975 |
+
) -> Tuple[_C.Graph, List[_C.IValue], Optional[Any], Optional[_C.ScriptModule]]:
|
976 |
+
if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)):
|
977 |
+
flattened_args = tuple(torch.jit._flatten(tuple(args))[0])
|
978 |
+
_check_flatten_did_not_remove(args, flattened_args)
|
979 |
+
torch_out = None
|
980 |
+
|
981 |
+
if isinstance(model, torch.jit.ScriptModule):
|
982 |
+
try:
|
983 |
+
graph = model.forward.graph # type: ignore[attr-defined]
|
984 |
+
except AttributeError as e:
|
985 |
+
raise RuntimeError("'forward' method must be a script method") from e
|
986 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
987 |
+
freezed_module = _C._freeze_module(
|
988 |
+
cast(_C.ScriptModule, model._c), preserveParameters=True
|
989 |
+
)
|
990 |
+
module, params = _C._jit_onnx_list_model_parameters(freezed_module)
|
991 |
+
method_graph = module._get_method("forward").graph
|
992 |
+
args_params = tuple(args) + tuple(params)
|
993 |
+
param_count_list = _get_param_count_list(method_graph, args_params)
|
994 |
+
in_vars, _ = torch.jit._flatten(args_params)
|
995 |
+
graph = _C._propagate_and_assign_input_shapes(
|
996 |
+
method_graph, tuple(in_vars), param_count_list, False, False
|
997 |
+
)
|
998 |
+
return graph, params, torch_out, module
|
999 |
+
|
1000 |
+
# torch.jit.ScriptFunction
|
1001 |
+
params = []
|
1002 |
+
graph = model.graph
|
1003 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
1004 |
+
param_count_list = _get_param_count_list(graph, args)
|
1005 |
+
graph = _C._propagate_and_assign_input_shapes(
|
1006 |
+
graph, flattened_args, param_count_list, False, False
|
1007 |
+
)
|
1008 |
+
return graph, params, torch_out, None
|
1009 |
+
|
1010 |
+
graph, torch_out = _trace_and_get_graph_from_model(model, args)
|
1011 |
+
_C._jit_pass_onnx_lint(graph)
|
1012 |
+
state_dict = torch.jit._unique_state_dict(model)
|
1013 |
+
params = list(state_dict.values())
|
1014 |
+
graph_inputs = list(graph.inputs())
|
1015 |
+
user_input_num = len(graph_inputs) - len(state_dict)
|
1016 |
+
param_names = list(state_dict.keys())
|
1017 |
+
for i, inp in enumerate(graph_inputs):
|
1018 |
+
if i >= user_input_num:
|
1019 |
+
inp.setDebugName(param_names[i - user_input_num])
|
1020 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
1021 |
+
return graph, params, torch_out, None
|
1022 |
+
|
1023 |
+
|
1024 |
+
@_beartype.beartype
|
1025 |
+
def _get_named_param_dict(graph, params):
|
1026 |
+
input_and_param_names = [val.debugName() for val in graph.inputs()]
|
1027 |
+
param_names = input_and_param_names[len(input_and_param_names) - len(params) :]
|
1028 |
+
_params_dict = dict(zip(param_names, params))
|
1029 |
+
return _params_dict
|
1030 |
+
|
1031 |
+
|
1032 |
+
@_beartype.beartype
|
1033 |
+
def _get_example_outputs(model, args):
|
1034 |
+
input_args = copy.deepcopy(args)
|
1035 |
+
input_kwargs = {}
|
1036 |
+
if input_args and isinstance(input_args[-1], dict):
|
1037 |
+
input_kwargs = input_args[-1]
|
1038 |
+
input_args = input_args[:-1]
|
1039 |
+
|
1040 |
+
example_outputs = model(*input_args, **input_kwargs)
|
1041 |
+
if isinstance(example_outputs, list):
|
1042 |
+
example_outputs = [example_outputs]
|
1043 |
+
elif not isinstance(example_outputs, tuple):
|
1044 |
+
example_outputs = (example_outputs,)
|
1045 |
+
|
1046 |
+
return example_outputs
|
1047 |
+
|
1048 |
+
|
1049 |
+
_qtype_vtype_map = {
|
1050 |
+
torch.quint8: torch.uint8,
|
1051 |
+
torch.qint8: torch.int8,
|
1052 |
+
torch.qint32: torch.int32,
|
1053 |
+
torch.quint4x2: torch.int8,
|
1054 |
+
}
|
1055 |
+
|
1056 |
+
|
1057 |
+
@_beartype.beartype
|
1058 |
+
def unpack_quantized_tensor(value, cast_onnx_accepted=True):
|
1059 |
+
if isinstance(value, torch.Tensor) and value.dtype in _qtype_vtype_map:
|
1060 |
+
q_value_dequantize = value.dequantize()
|
1061 |
+
q_scale = (
|
1062 |
+
torch.tensor(value.q_scale(), dtype=torch.double)
|
1063 |
+
if cast_onnx_accepted
|
1064 |
+
else torch.tensor(value.q_scale(), dtype=torch.float32)
|
1065 |
+
)
|
1066 |
+
q_zero_point = (
|
1067 |
+
torch.tensor(value.q_zero_point(), dtype=torch.int64)
|
1068 |
+
if cast_onnx_accepted
|
1069 |
+
else torch.tensor(value.q_zero_point(), dtype=_qtype_vtype_map[value.dtype])
|
1070 |
+
)
|
1071 |
+
q_value = q_value_dequantize / q_scale + q_zero_point
|
1072 |
+
q_value = q_value.to(dtype=_qtype_vtype_map[value.dtype])
|
1073 |
+
return q_value, q_scale, q_zero_point
|
1074 |
+
else:
|
1075 |
+
return (value,)
|
1076 |
+
|
1077 |
+
|
1078 |
+
@_beartype.beartype
|
1079 |
+
def _pre_trace_quant_model(model, args):
|
1080 |
+
r"""Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return
|
1081 |
+
original model.
|
1082 |
+
|
1083 |
+
This is due to https://github.com/pytorch/pytorch/issues/75761.
|
1084 |
+
"""
|
1085 |
+
if any(
|
1086 |
+
hasattr(m, "_packed_params") for m in getattr(model, "modules", list)()
|
1087 |
+
) or any(getattr(arg, "is_quantized", False) for arg in args):
|
1088 |
+
return torch.jit.trace(model, args)
|
1089 |
+
return model
|
1090 |
+
|
1091 |
+
|
1092 |
+
@_beartype.beartype
|
1093 |
+
def _model_to_graph(
|
1094 |
+
model,
|
1095 |
+
args,
|
1096 |
+
verbose=False,
|
1097 |
+
input_names=None,
|
1098 |
+
output_names=None,
|
1099 |
+
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
|
1100 |
+
do_constant_folding=True,
|
1101 |
+
_disable_torch_constant_prop=False,
|
1102 |
+
fixed_batch_size=False,
|
1103 |
+
training=_C_onnx.TrainingMode.EVAL,
|
1104 |
+
dynamic_axes=None,
|
1105 |
+
) -> Tuple[
|
1106 |
+
_C.Graph,
|
1107 |
+
Dict[str, torch.Tensor],
|
1108 |
+
Optional[
|
1109 |
+
Union[
|
1110 |
+
torch.Tensor,
|
1111 |
+
Tuple[torch.Tensor, ...],
|
1112 |
+
List[torch.Tensor],
|
1113 |
+
Dict[str, torch.Tensor],
|
1114 |
+
Any, # Can be nested tuples etc.
|
1115 |
+
]
|
1116 |
+
],
|
1117 |
+
]:
|
1118 |
+
"""Converts model into an ONNX graph.
|
1119 |
+
|
1120 |
+
Returns:
|
1121 |
+
graph: A TorchScript IR Graph with ONNX nodes.
|
1122 |
+
params_dict: Dict from input param name to param value.
|
1123 |
+
torch_out: The output tensors resulting from the trace of ``model``.
|
1124 |
+
If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,
|
1125 |
+
this will be None, since we are not doing any tracing.
|
1126 |
+
"""
|
1127 |
+
# TODO: can we simplify this to always return a tuple of Tensor or None?
|
1128 |
+
|
1129 |
+
# Special case for common case of passing a single Tensor
|
1130 |
+
if isinstance(args, (torch.Tensor, int, float, bool)):
|
1131 |
+
args = (args,)
|
1132 |
+
|
1133 |
+
model = _pre_trace_quant_model(model, args)
|
1134 |
+
graph, params, torch_out, module = _create_jit_graph(model, args)
|
1135 |
+
params_dict = _get_named_param_dict(graph, params)
|
1136 |
+
|
1137 |
+
try:
|
1138 |
+
graph = _optimize_graph(
|
1139 |
+
graph,
|
1140 |
+
operator_export_type,
|
1141 |
+
_disable_torch_constant_prop=_disable_torch_constant_prop,
|
1142 |
+
fixed_batch_size=fixed_batch_size,
|
1143 |
+
params_dict=params_dict,
|
1144 |
+
dynamic_axes=dynamic_axes,
|
1145 |
+
input_names=input_names,
|
1146 |
+
module=module,
|
1147 |
+
)
|
1148 |
+
except Exception as e:
|
1149 |
+
torch.onnx.log("Torch IR graph at exception: ", graph)
|
1150 |
+
raise
|
1151 |
+
|
1152 |
+
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
|
1153 |
+
if is_script:
|
1154 |
+
example_outputs = _get_example_outputs(model, args)
|
1155 |
+
example_outputs_final = ()
|
1156 |
+
for example_output in example_outputs:
|
1157 |
+
example_outputs_final += unpack_quantized_tensor(example_output)
|
1158 |
+
out_vars, desc = torch.jit._flatten(example_outputs_final)
|
1159 |
+
_C._jit_pass_onnx_assign_output_shape(
|
1160 |
+
graph,
|
1161 |
+
out_vars,
|
1162 |
+
desc,
|
1163 |
+
GLOBALS.onnx_shape_inference,
|
1164 |
+
is_script,
|
1165 |
+
GLOBALS.export_onnx_opset_version,
|
1166 |
+
)
|
1167 |
+
|
1168 |
+
# NB: ONNX requires complete information about output types, which might be
|
1169 |
+
# erased by some optimizations, so we need to set it explicitly again.
|
1170 |
+
else:
|
1171 |
+
if not isinstance(torch_out, (list, tuple)):
|
1172 |
+
output_wrapped = [torch_out]
|
1173 |
+
else:
|
1174 |
+
output_wrapped = torch_out # type: ignore[assignment]
|
1175 |
+
|
1176 |
+
output_tensors, out_desc = torch.jit._flatten(tuple(output_wrapped))
|
1177 |
+
# assign_output_shape pass is not compatible with quantized outputs.
|
1178 |
+
# Quantized outputs are flattened to 3 values in ONNX, while packed as
|
1179 |
+
# single value in PyTorch.
|
1180 |
+
if not any(getattr(out, "is_quantized", False) for out in output_tensors):
|
1181 |
+
_C._jit_pass_onnx_assign_output_shape(
|
1182 |
+
graph,
|
1183 |
+
output_tensors,
|
1184 |
+
out_desc,
|
1185 |
+
GLOBALS.onnx_shape_inference,
|
1186 |
+
is_script,
|
1187 |
+
GLOBALS.export_onnx_opset_version,
|
1188 |
+
)
|
1189 |
+
|
1190 |
+
_set_input_and_output_names(graph, input_names, output_names)
|
1191 |
+
params_dict = _get_named_param_dict(graph, params)
|
1192 |
+
|
1193 |
+
if (
|
1194 |
+
do_constant_folding
|
1195 |
+
and GLOBALS.export_onnx_opset_version
|
1196 |
+
>= _constants.ONNX_CONSTANT_FOLDING_MIN_OPSET
|
1197 |
+
):
|
1198 |
+
if training is None or training == _C_onnx.TrainingMode.EVAL:
|
1199 |
+
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
|
1200 |
+
|
1201 |
+
params_dict = _C._jit_pass_onnx_constant_fold(
|
1202 |
+
graph, params_dict, GLOBALS.export_onnx_opset_version
|
1203 |
+
)
|
1204 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
1205 |
+
|
1206 |
+
if GLOBALS.onnx_shape_inference:
|
1207 |
+
try:
|
1208 |
+
_C._jit_pass_onnx_graph_shape_type_inference(
|
1209 |
+
graph, params_dict, GLOBALS.export_onnx_opset_version
|
1210 |
+
)
|
1211 |
+
except RuntimeError as exc:
|
1212 |
+
if (
|
1213 |
+
_C_onnx._CAFFE2_ATEN_FALLBACK
|
1214 |
+
and exc.args[0]
|
1215 |
+
== "ScalarType UNKNOWN_SCALAR is an unexpected tensor scalar type!"
|
1216 |
+
):
|
1217 |
+
# Caffe2 builds can have UNKNOWN_SCALAR for some tensors
|
1218 |
+
pass
|
1219 |
+
|
1220 |
+
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
|
1221 |
+
|
1222 |
+
# For ONNX opset < 9, constants only have three data types: float16, float, double.
|
1223 |
+
# In this pass transform constants of other data types to float/double + cast operator.
|
1224 |
+
if GLOBALS.export_onnx_opset_version < 9:
|
1225 |
+
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
|
1226 |
+
|
1227 |
+
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
|
1228 |
+
_C._jit_decay_packed_param_input_types(graph)
|
1229 |
+
|
1230 |
+
# If output names lack a proper name and are identified only by their unique
|
1231 |
+
# give them a legible name for debugging purposes
|
1232 |
+
_apply_friendly_debug_names(graph, params_dict)
|
1233 |
+
|
1234 |
+
return graph, params_dict, torch_out
|
1235 |
+
|
1236 |
+
|
1237 |
+
@_beartype.beartype
|
1238 |
+
@torch._disable_dynamo
|
1239 |
+
def export_to_pretty_string(
|
1240 |
+
model,
|
1241 |
+
args,
|
1242 |
+
export_params=True,
|
1243 |
+
verbose=False,
|
1244 |
+
training=_C_onnx.TrainingMode.EVAL,
|
1245 |
+
input_names=None,
|
1246 |
+
output_names=None,
|
1247 |
+
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
|
1248 |
+
export_type=None,
|
1249 |
+
google_printer=False,
|
1250 |
+
opset_version=None,
|
1251 |
+
keep_initializers_as_inputs=None,
|
1252 |
+
custom_opsets=None,
|
1253 |
+
add_node_names=True,
|
1254 |
+
do_constant_folding=True,
|
1255 |
+
dynamic_axes=None,
|
1256 |
+
):
|
1257 |
+
r"""
|
1258 |
+
Similar to :func:`export`, but returns a text representation of the ONNX
|
1259 |
+
model. Only differences in args listed below. All other args are the same
|
1260 |
+
as :func:`export`.
|
1261 |
+
|
1262 |
+
Args:
|
1263 |
+
add_node_names (bool, default True): Whether or not to set
|
1264 |
+
NodeProto.name. This makes no difference unless
|
1265 |
+
``google_printer=True``.
|
1266 |
+
google_printer (bool, default False): If False, will return a custom,
|
1267 |
+
compact representation of the model. If True will return the
|
1268 |
+
protobuf's `Message::DebugString()`, which is more verbose.
|
1269 |
+
|
1270 |
+
Returns:
|
1271 |
+
A UTF-8 str containing a human-readable representation of the ONNX model.
|
1272 |
+
"""
|
1273 |
+
if opset_version is None:
|
1274 |
+
opset_version = _constants.ONNX_DEFAULT_OPSET
|
1275 |
+
if custom_opsets is None:
|
1276 |
+
custom_opsets = {}
|
1277 |
+
GLOBALS.export_onnx_opset_version = opset_version
|
1278 |
+
GLOBALS.operator_export_type = operator_export_type
|
1279 |
+
|
1280 |
+
with exporter_context(model, training, verbose):
|
1281 |
+
val_keep_init_as_ip = _decide_keep_init_as_input(
|
1282 |
+
keep_initializers_as_inputs, operator_export_type, opset_version
|
1283 |
+
)
|
1284 |
+
val_add_node_names = _decide_add_node_names(
|
1285 |
+
add_node_names, operator_export_type
|
1286 |
+
)
|
1287 |
+
val_do_constant_folding = _decide_constant_folding(
|
1288 |
+
do_constant_folding, operator_export_type, training
|
1289 |
+
)
|
1290 |
+
args = _decide_input_format(model, args)
|
1291 |
+
graph, params_dict, torch_out = _model_to_graph(
|
1292 |
+
model,
|
1293 |
+
args,
|
1294 |
+
verbose,
|
1295 |
+
input_names,
|
1296 |
+
output_names,
|
1297 |
+
operator_export_type,
|
1298 |
+
val_do_constant_folding,
|
1299 |
+
training=training,
|
1300 |
+
dynamic_axes=dynamic_axes,
|
1301 |
+
)
|
1302 |
+
|
1303 |
+
return graph._pretty_print_onnx( # type: ignore[attr-defined]
|
1304 |
+
params_dict,
|
1305 |
+
opset_version,
|
1306 |
+
False,
|
1307 |
+
operator_export_type,
|
1308 |
+
google_printer,
|
1309 |
+
val_keep_init_as_ip,
|
1310 |
+
custom_opsets,
|
1311 |
+
val_add_node_names,
|
1312 |
+
)
|
1313 |
+
|
1314 |
+
|
1315 |
+
@_beartype.beartype
|
1316 |
+
def unconvertible_ops(
|
1317 |
+
model,
|
1318 |
+
args,
|
1319 |
+
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
|
1320 |
+
opset_version: Optional[int] = None,
|
1321 |
+
) -> Tuple[_C.Graph, List[str]]:
|
1322 |
+
"""Returns an approximated list of all ops that are yet supported by :mod:`torch.onnx`.
|
1323 |
+
|
1324 |
+
The list is approximated because some ops may be removed during the conversion
|
1325 |
+
process and don't need to be converted. Some other ops may have partial support
|
1326 |
+
that will fail conversion with particular inputs. Please open a Github Issue
|
1327 |
+
for op support requests.
|
1328 |
+
|
1329 |
+
Args:
|
1330 |
+
model: Same as the `model` parameter in :func:`torch.onnx.export`.
|
1331 |
+
args: Same as the `args` parameter in :func:`torch.onnx.export`.
|
1332 |
+
training: Same as the `training` parameter in :func:`torch.onnx.export`.
|
1333 |
+
opset_version: Same as the `opset_version` parameter in :func:`torch.onnx.export`.
|
1334 |
+
|
1335 |
+
Returns:
|
1336 |
+
The JIT graph and a list of unconvertible ops in the format of "domain::op".
|
1337 |
+
"""
|
1338 |
+
|
1339 |
+
opset_version = opset_version or _constants.ONNX_DEFAULT_OPSET
|
1340 |
+
GLOBALS.export_onnx_opset_version = opset_version
|
1341 |
+
|
1342 |
+
try:
|
1343 |
+
with exporter_context(model, training, verbose=False):
|
1344 |
+
# Create a mostly clean JIT graph that contains the plain aten and
|
1345 |
+
# other ops we can check with the symbolic registry.
|
1346 |
+
# NOTE: We don't want to actually convert any ops to ONNX or run any
|
1347 |
+
# symbolic functions because there is a higher chance that a pass
|
1348 |
+
# fails or an unconvertible op messes up the graph during ONNX conversion.
|
1349 |
+
# This way we can always generate a list just by looking at the names
|
1350 |
+
# of the ops in the graph.
|
1351 |
+
args = _decide_input_format(model, args)
|
1352 |
+
model = _pre_trace_quant_model(model, args)
|
1353 |
+
graph, _, _, module = _create_jit_graph(model, args)
|
1354 |
+
_C._jit_pass_inline(graph)
|
1355 |
+
_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
|
1356 |
+
_C._jit_pass_erase_number_types(graph)
|
1357 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
1358 |
+
except Exception as e:
|
1359 |
+
raise errors.OnnxExporterError(
|
1360 |
+
"Failed to discover unconvertible ops because of errors during the JIT graph "
|
1361 |
+
"generation process."
|
1362 |
+
) from e
|
1363 |
+
|
1364 |
+
unsupported_ops = []
|
1365 |
+
for node in graph.nodes():
|
1366 |
+
domain_op = node.kind()
|
1367 |
+
if domain_op.startswith(("onnx::", "prim::")):
|
1368 |
+
# We consider onnx and prim ops as supported ops, even though some "prim"
|
1369 |
+
# ops are not implemented as symbolic functions, because they may be
|
1370 |
+
# eliminated in the conversion passes. Users may still see errors caused
|
1371 |
+
# by prim ops even though they don't show up in the list.
|
1372 |
+
continue
|
1373 |
+
if not registration.registry.is_registered_op(
|
1374 |
+
domain_op.rstrip("_"), opset_version
|
1375 |
+
):
|
1376 |
+
# We consider all registered ops supported, even though some of them are
|
1377 |
+
# only partially supported, because there is not yet a good way to check
|
1378 |
+
# if an op is fully supported.
|
1379 |
+
# TODO(justinchuby): Create a way to check if an op is fully supported.
|
1380 |
+
unsupported_ops.append(domain_op)
|
1381 |
+
return graph, unsupported_ops
|
1382 |
+
|
1383 |
+
|
1384 |
+
@_beartype.beartype
|
1385 |
+
def _setup_trace_module_map(
|
1386 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
1387 |
+
export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]],
|
1388 |
+
) -> Set[str]:
|
1389 |
+
def __register_attribute_hook():
|
1390 |
+
attr_name = "_onnx_attrs"
|
1391 |
+
|
1392 |
+
def _track_module_attributes_forward_pre_hook(module, input):
|
1393 |
+
setattr(module, attr_name, _get_module_attributes(module))
|
1394 |
+
|
1395 |
+
def _track_module_attributes_forward_hook(module, input, output):
|
1396 |
+
tracing_state = _C._get_tracing_state()
|
1397 |
+
if not tracing_state:
|
1398 |
+
return
|
1399 |
+
|
1400 |
+
graph = tracing_state.graph()
|
1401 |
+
onnx_attrs = {}
|
1402 |
+
if hasattr(module, attr_name):
|
1403 |
+
onnx_attrs = getattr(module, attr_name)
|
1404 |
+
delattr(module, attr_name)
|
1405 |
+
|
1406 |
+
_C._jit_pass_onnx_track_scope_attributes(graph, onnx_attrs)
|
1407 |
+
|
1408 |
+
for m in model.modules():
|
1409 |
+
m.register_forward_hook(_track_module_attributes_forward_hook)
|
1410 |
+
m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook)
|
1411 |
+
|
1412 |
+
def _unqualified_variable_name(qualified_name: str) -> str:
|
1413 |
+
"""
|
1414 |
+
Parse qualified variable name and return the unqualified version.
|
1415 |
+
|
1416 |
+
Pure numeric atoms are considered inadequate, so this function will look past them,
|
1417 |
+
and start from the first non-numeric atom.
|
1418 |
+
|
1419 |
+
Example:
|
1420 |
+
>>> _unqualified_variable_name('__main__.Foo.bar')
|
1421 |
+
'bar'
|
1422 |
+
>>> _unqualified_variable_name('__main__.Foo.bar.0')
|
1423 |
+
'bar.0'
|
1424 |
+
"""
|
1425 |
+
name_atoms = qualified_name.split(".")
|
1426 |
+
for i, atom in reversed(list(enumerate(name_atoms))):
|
1427 |
+
if not atom.isnumeric():
|
1428 |
+
return ".".join(name_atoms[i:])
|
1429 |
+
return qualified_name
|
1430 |
+
|
1431 |
+
trace_module_map = {
|
1432 |
+
_m: torch._C._jit_onnx_create_full_scope_name(
|
1433 |
+
torch.typename(type(_m)), _unqualified_variable_name(_n)
|
1434 |
+
)
|
1435 |
+
for _n, _m in model.named_modules()
|
1436 |
+
}
|
1437 |
+
torch.jit._trace._trace_module_map = trace_module_map
|
1438 |
+
if isinstance(export_modules_as_functions, bool) and export_modules_as_functions:
|
1439 |
+
module_typenames = {torch.typename(type(module)) for module in trace_module_map}
|
1440 |
+
elif isinstance(export_modules_as_functions, set) and export_modules_as_functions:
|
1441 |
+
|
1442 |
+
def _find_typename(v):
|
1443 |
+
if isinstance(v, type):
|
1444 |
+
return torch.typename(v)
|
1445 |
+
else:
|
1446 |
+
raise RuntimeError(
|
1447 |
+
"Only type of the `nn.Module` should be "
|
1448 |
+
"passed in the set for argument `export_modules_as_functions`. "
|
1449 |
+
"Got `%s`." % (type(v).__name__)
|
1450 |
+
)
|
1451 |
+
|
1452 |
+
module_typenames = {_find_typename(v) for v in export_modules_as_functions}
|
1453 |
+
else:
|
1454 |
+
module_typenames = set()
|
1455 |
+
|
1456 |
+
if module_typenames:
|
1457 |
+
__register_attribute_hook()
|
1458 |
+
|
1459 |
+
return module_typenames
|
1460 |
+
|
1461 |
+
|
1462 |
+
@_beartype.beartype
|
1463 |
+
def _reset_trace_module_map():
|
1464 |
+
torch.jit._trace._trace_module_map = None
|
1465 |
+
_C._jit_pass_onnx_clear_scope_records()
|
1466 |
+
|
1467 |
+
|
1468 |
+
@_beartype.beartype
|
1469 |
+
def _get_module_attributes(module):
|
1470 |
+
annotations = typing.get_type_hints(type(module))
|
1471 |
+
base_m_annotations = typing.get_type_hints(torch.nn.Module)
|
1472 |
+
[annotations.pop(k, None) for k in base_m_annotations]
|
1473 |
+
# Check whether module attributes can be accessed. Some classes
|
1474 |
+
# define attributes but don't provide access to them in their
|
1475 |
+
# constructor.
|
1476 |
+
#
|
1477 |
+
# For example, torch.nn.Embedding has the `freeze` variable and its
|
1478 |
+
# type specified in the class but the attribute is not created in the
|
1479 |
+
# constructor. In other words, there is no `self.freeze = <True | False>`
|
1480 |
+
# in the constructor.
|
1481 |
+
#
|
1482 |
+
# Reference: https://github.com/pytorch/pytorch/blob/92de1d322223fb5584e384971b32c46b93bc2f4b/torch/nn/modules/sparse.py#L120
|
1483 |
+
attrs = {}
|
1484 |
+
for k in annotations:
|
1485 |
+
try:
|
1486 |
+
attrs[k] = getattr(module, k)
|
1487 |
+
except AttributeError:
|
1488 |
+
torch.onnx.log(f"Skipping module attribute '{k}'")
|
1489 |
+
continue
|
1490 |
+
return attrs
|
1491 |
+
|
1492 |
+
|
1493 |
+
@_beartype.beartype
|
1494 |
+
def _export(
|
1495 |
+
model,
|
1496 |
+
args,
|
1497 |
+
f,
|
1498 |
+
export_params=True,
|
1499 |
+
verbose=False,
|
1500 |
+
training=_C_onnx.TrainingMode.EVAL,
|
1501 |
+
input_names=None,
|
1502 |
+
output_names=None,
|
1503 |
+
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
|
1504 |
+
export_type=None,
|
1505 |
+
opset_version=None,
|
1506 |
+
do_constant_folding=True,
|
1507 |
+
dynamic_axes=None,
|
1508 |
+
keep_initializers_as_inputs=None,
|
1509 |
+
fixed_batch_size=False,
|
1510 |
+
custom_opsets=None,
|
1511 |
+
add_node_names=True,
|
1512 |
+
onnx_shape_inference=True,
|
1513 |
+
export_modules_as_functions=False,
|
1514 |
+
autograd_inlining=True,
|
1515 |
+
):
|
1516 |
+
assert GLOBALS.in_onnx_export is False
|
1517 |
+
|
1518 |
+
if export_type is None:
|
1519 |
+
export_type = _exporter_states.ExportTypes.PROTOBUF_FILE
|
1520 |
+
|
1521 |
+
# Discussed deprecation with Nikita Shulga and Sergii Dymchenko from Meta
|
1522 |
+
if _C_onnx._CAFFE2_ATEN_FALLBACK:
|
1523 |
+
warnings.warn(
|
1524 |
+
"Caffe2 ONNX exporter is deprecated in version 2.0 and will be "
|
1525 |
+
"removed in 2.2. Please use PyTorch 2.1 or older for this capability.",
|
1526 |
+
category=FutureWarning,
|
1527 |
+
stacklevel=2,
|
1528 |
+
)
|
1529 |
+
|
1530 |
+
if isinstance(model, torch.nn.DataParallel):
|
1531 |
+
raise ValueError(
|
1532 |
+
"torch.nn.DataParallel is not supported by ONNX "
|
1533 |
+
"exporter, please use 'attribute' module to "
|
1534 |
+
"unwrap model from torch.nn.DataParallel. Try "
|
1535 |
+
"torch.onnx.export(model.module, ...)"
|
1536 |
+
)
|
1537 |
+
|
1538 |
+
GLOBALS.onnx_shape_inference = onnx_shape_inference
|
1539 |
+
|
1540 |
+
if opset_version is None:
|
1541 |
+
opset_version = _constants.ONNX_DEFAULT_OPSET
|
1542 |
+
|
1543 |
+
# torch.onnx.export does not support opset versions >=18
|
1544 |
+
if opset_version > _constants.ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET:
|
1545 |
+
# We do not want to fail because we should still allow users to create
|
1546 |
+
# custom symbolic functions for opset>17
|
1547 |
+
warnings.warn(
|
1548 |
+
f"Exporting to ONNX opset version {opset_version} is not supported. "
|
1549 |
+
f"by 'torch.onnx.export()'. "
|
1550 |
+
f"The highest opset version supported is {_constants.ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET}. "
|
1551 |
+
f"To use a newer opset version, consider 'torch.onnx.dynamo_export()'. "
|
1552 |
+
f"Note that dynamo_export() is in preview. Please report errors with "
|
1553 |
+
f"dynamo_export() as Github issues to https://github.com/pytorch/pytorch/issues.",
|
1554 |
+
category=errors.OnnxExporterWarning,
|
1555 |
+
)
|
1556 |
+
|
1557 |
+
if export_modules_as_functions and opset_version < 15:
|
1558 |
+
raise ValueError(
|
1559 |
+
"`export_modules_as_functions` is not supported for `opset_version` < 15."
|
1560 |
+
"This is because `opset_version` < 15 implies IR version < 8, which means "
|
1561 |
+
"no local function support. "
|
1562 |
+
)
|
1563 |
+
if not operator_export_type:
|
1564 |
+
if _C_onnx._CAFFE2_ATEN_FALLBACK:
|
1565 |
+
operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
1566 |
+
else:
|
1567 |
+
operator_export_type = _C_onnx.OperatorExportTypes.ONNX
|
1568 |
+
|
1569 |
+
# By default, training=TrainingMode.EVAL,
|
1570 |
+
# which is good because running a model in training mode could result in
|
1571 |
+
# internal buffers getting updated, dropout getting applied, etc.
|
1572 |
+
# If you really know what you're doing, you can turn
|
1573 |
+
# training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE,
|
1574 |
+
# (to preserve whatever the original training mode was.)
|
1575 |
+
GLOBALS.export_onnx_opset_version = opset_version
|
1576 |
+
GLOBALS.operator_export_type = operator_export_type
|
1577 |
+
|
1578 |
+
try:
|
1579 |
+
GLOBALS.in_onnx_export = True
|
1580 |
+
_autograd_inlining_previous = GLOBALS.autograd_inlining
|
1581 |
+
GLOBALS.autograd_inlining = autograd_inlining
|
1582 |
+
|
1583 |
+
module_typenames_to_export_as_functions: Set[str] = set()
|
1584 |
+
if isinstance(model, (torch.nn.Module, torch.jit.ScriptModule)):
|
1585 |
+
module_typenames_to_export_as_functions = _setup_trace_module_map(
|
1586 |
+
model, export_modules_as_functions
|
1587 |
+
)
|
1588 |
+
|
1589 |
+
with exporter_context(model, training, verbose):
|
1590 |
+
val_keep_init_as_ip = _decide_keep_init_as_input(
|
1591 |
+
keep_initializers_as_inputs,
|
1592 |
+
operator_export_type,
|
1593 |
+
opset_version,
|
1594 |
+
)
|
1595 |
+
val_add_node_names = _decide_add_node_names(
|
1596 |
+
add_node_names, operator_export_type
|
1597 |
+
)
|
1598 |
+
val_do_constant_folding = _decide_constant_folding(
|
1599 |
+
do_constant_folding, operator_export_type, training
|
1600 |
+
)
|
1601 |
+
# Normally f can be a file-like object, but for large models, the external data format requires a
|
1602 |
+
# valid `model_file_location`. Code in export.cpp will enforce this.
|
1603 |
+
if isinstance(f, str):
|
1604 |
+
model_file_location = f
|
1605 |
+
else:
|
1606 |
+
model_file_location = ""
|
1607 |
+
args = _decide_input_format(model, args)
|
1608 |
+
if dynamic_axes is None:
|
1609 |
+
dynamic_axes = {}
|
1610 |
+
_validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
|
1611 |
+
|
1612 |
+
graph, params_dict, torch_out = _model_to_graph(
|
1613 |
+
model,
|
1614 |
+
args,
|
1615 |
+
verbose,
|
1616 |
+
input_names,
|
1617 |
+
output_names,
|
1618 |
+
operator_export_type,
|
1619 |
+
val_do_constant_folding,
|
1620 |
+
fixed_batch_size=fixed_batch_size,
|
1621 |
+
training=training,
|
1622 |
+
dynamic_axes=dynamic_axes,
|
1623 |
+
)
|
1624 |
+
|
1625 |
+
# TODO: Don't allocate a in-memory string for the protobuf
|
1626 |
+
defer_weight_export = (
|
1627 |
+
export_type is not _exporter_states.ExportTypes.PROTOBUF_FILE
|
1628 |
+
)
|
1629 |
+
if custom_opsets is None:
|
1630 |
+
custom_opsets = {}
|
1631 |
+
|
1632 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
1633 |
+
node_attr_to_name = {} # type: ignore[var-annotated]
|
1634 |
+
if module_typenames_to_export_as_functions:
|
1635 |
+
# NOTE: cannot call DCE after this pass. DCE will remove function definition nodes.
|
1636 |
+
node_attr_to_name = _C._jit_pass_onnx_function_extraction(
|
1637 |
+
graph,
|
1638 |
+
module_typenames_to_export_as_functions,
|
1639 |
+
list(params_dict.keys()),
|
1640 |
+
)
|
1641 |
+
|
1642 |
+
if keep_initializers_as_inputs is not True:
|
1643 |
+
params_dict = _C._jit_pass_onnx_deduplicate_initializers( # type: ignore[assignment]
|
1644 |
+
graph, params_dict, getattr(model, "training", False) # type: ignore[arg-type]
|
1645 |
+
)
|
1646 |
+
_C._jit_pass_onnx_assign_scoped_names_for_node_and_value(graph)
|
1647 |
+
if export_params:
|
1648 |
+
(
|
1649 |
+
proto,
|
1650 |
+
export_map,
|
1651 |
+
val_use_external_data_format,
|
1652 |
+
node_names,
|
1653 |
+
) = graph._export_onnx( # type: ignore[attr-defined]
|
1654 |
+
params_dict,
|
1655 |
+
opset_version,
|
1656 |
+
dynamic_axes,
|
1657 |
+
defer_weight_export,
|
1658 |
+
operator_export_type,
|
1659 |
+
not verbose,
|
1660 |
+
val_keep_init_as_ip,
|
1661 |
+
custom_opsets,
|
1662 |
+
val_add_node_names,
|
1663 |
+
model_file_location,
|
1664 |
+
node_attr_to_name,
|
1665 |
+
)
|
1666 |
+
else:
|
1667 |
+
(
|
1668 |
+
proto,
|
1669 |
+
export_map,
|
1670 |
+
val_use_external_data_format,
|
1671 |
+
node_names,
|
1672 |
+
) = graph._export_onnx( # type: ignore[attr-defined]
|
1673 |
+
{},
|
1674 |
+
opset_version,
|
1675 |
+
dynamic_axes,
|
1676 |
+
False,
|
1677 |
+
operator_export_type,
|
1678 |
+
not verbose,
|
1679 |
+
val_keep_init_as_ip,
|
1680 |
+
custom_opsets,
|
1681 |
+
val_add_node_names,
|
1682 |
+
model_file_location,
|
1683 |
+
node_attr_to_name,
|
1684 |
+
)
|
1685 |
+
# insert function_proto into model_proto.
|
1686 |
+
proto = onnx_proto_utils._add_onnxscript_fn(
|
1687 |
+
proto,
|
1688 |
+
custom_opsets,
|
1689 |
+
)
|
1690 |
+
if verbose:
|
1691 |
+
torch.onnx.log("Exported graph: ", graph)
|
1692 |
+
onnx_proto_utils._export_file(proto, f, export_type, export_map)
|
1693 |
+
# The ONNX checker only works for ONNX graph. So if the operator_export_type is not ONNX,
|
1694 |
+
# we can skip this check.
|
1695 |
+
# If large model format export is enabled, proto will only contain data location instead of
|
1696 |
+
# raw data and _check_onnx_proto() will fail because it can only handle the raw ONNX proto
|
1697 |
+
# string in memory.
|
1698 |
+
if (operator_export_type is _C_onnx.OperatorExportTypes.ONNX) and (
|
1699 |
+
not val_use_external_data_format
|
1700 |
+
):
|
1701 |
+
try:
|
1702 |
+
_C._check_onnx_proto(proto)
|
1703 |
+
except RuntimeError as e:
|
1704 |
+
raise errors.CheckerError(e) from e
|
1705 |
+
finally:
|
1706 |
+
assert GLOBALS.in_onnx_export
|
1707 |
+
GLOBALS.in_onnx_export = False
|
1708 |
+
GLOBALS.autograd_inlining = _autograd_inlining_previous
|
1709 |
+
_reset_trace_module_map()
|
1710 |
+
|
1711 |
+
return torch_out
|
1712 |
+
|
1713 |
+
|
1714 |
+
@_beartype.beartype
|
1715 |
+
def _apply_friendly_debug_names(graph, params):
|
1716 |
+
for n in graph.nodes():
|
1717 |
+
for v in n.inputs():
|
1718 |
+
old_name = v.debugName()
|
1719 |
+
if old_name != str(v.unique()):
|
1720 |
+
continue
|
1721 |
+
new_name = f"{n.kind()}_{v.unique()}"
|
1722 |
+
v.setDebugName(new_name)
|
1723 |
+
if old_name in params:
|
1724 |
+
params[new_name] = params.pop(old_name)
|
1725 |
+
|
1726 |
+
|
1727 |
+
@_beartype.beartype
|
1728 |
+
def _set_input_and_output_names(graph, input_names, output_names):
|
1729 |
+
@_beartype.beartype
|
1730 |
+
def set_names(node_list, name_list, descriptor):
|
1731 |
+
if name_list is None:
|
1732 |
+
return
|
1733 |
+
if len(name_list) > len(node_list):
|
1734 |
+
raise RuntimeError(
|
1735 |
+
"number of %s names provided (%d) exceeded number of %ss (%d)"
|
1736 |
+
% (descriptor, len(name_list), descriptor, len(node_list))
|
1737 |
+
)
|
1738 |
+
|
1739 |
+
# Mark if the output node DebugName is set before.
|
1740 |
+
output_node_set = set()
|
1741 |
+
for i, (name, node) in enumerate(zip(name_list, node_list)):
|
1742 |
+
# Duplicated output node, insert onnx::Identity to avoid setting the same DebugName after setDebugName().
|
1743 |
+
if descriptor == "output":
|
1744 |
+
if node in output_node_set:
|
1745 |
+
identity_node = graph.create("onnx::Identity")
|
1746 |
+
identity_node.insertAfter(node.node())
|
1747 |
+
identity_node.addInput(node)
|
1748 |
+
identity_node.output().setType(node.type())
|
1749 |
+
graph.return_node().replaceInput(i, identity_node.output())
|
1750 |
+
node = identity_node.output()
|
1751 |
+
output_node_set.add(node)
|
1752 |
+
|
1753 |
+
if node.debugName() != name:
|
1754 |
+
node.setDebugName(name)
|
1755 |
+
|
1756 |
+
set_names(list(graph.inputs()), input_names, "input")
|
1757 |
+
set_names(list(graph.outputs()), output_names, "output")
|
1758 |
+
|
1759 |
+
|
1760 |
+
@_beartype.beartype
|
1761 |
+
def _run_symbolic_method(g, op_name, symbolic_fn, args):
|
1762 |
+
r"""
|
1763 |
+
This trampoline function gets invoked for every symbolic method
|
1764 |
+
call from C++.
|
1765 |
+
"""
|
1766 |
+
try:
|
1767 |
+
graph_context = jit_utils.GraphContext(
|
1768 |
+
graph=g,
|
1769 |
+
block=g.block(),
|
1770 |
+
opset=GLOBALS.export_onnx_opset_version,
|
1771 |
+
original_node=None, # type: ignore[arg-type]
|
1772 |
+
params_dict=_params_dict,
|
1773 |
+
env={},
|
1774 |
+
)
|
1775 |
+
return symbolic_fn(graph_context, *args)
|
1776 |
+
except TypeError as e:
|
1777 |
+
# Handle the specific case where we didn't successfully dispatch
|
1778 |
+
# to symbolic_fn. Otherwise, the backtrace will have the clues
|
1779 |
+
# you need.
|
1780 |
+
e.args = (f"{e.args[0]} (occurred when translating {op_name})",)
|
1781 |
+
raise
|
1782 |
+
|
1783 |
+
|
1784 |
+
@_beartype.beartype
|
1785 |
+
def _add_block(node: _C.Node) -> _C.Block:
|
1786 |
+
return node.addBlock()
|
1787 |
+
|
1788 |
+
|
1789 |
+
@_beartype.beartype
|
1790 |
+
def _add_input_to_block(block: _C.Block):
|
1791 |
+
return block.addInputToBlock() # type: ignore[attr-defined]
|
1792 |
+
|
1793 |
+
|
1794 |
+
@_beartype.beartype
|
1795 |
+
def _add_output_to_block(block: _C.Block, value: _C.Value) -> int:
|
1796 |
+
return block.registerOutput(value)
|
1797 |
+
|
1798 |
+
|
1799 |
+
@_beartype.beartype
|
1800 |
+
def _should_aten_fallback(
|
1801 |
+
name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes
|
1802 |
+
):
|
1803 |
+
# For BUILD_CAFFE2=0 builds, if domain=="aten" and operator_export_type==ONNX_ATEN,
|
1804 |
+
# an aten::ATen operator is created regardless of symbolics existence
|
1805 |
+
# For BUILD_CAFFE2=1, the same applies only if there is no symbolic available
|
1806 |
+
|
1807 |
+
is_exportable_aten_op = registration.registry.is_registered_op(name, opset_version)
|
1808 |
+
is_onnx_aten_export = operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN
|
1809 |
+
is_aten_fallback_export = (
|
1810 |
+
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
1811 |
+
)
|
1812 |
+
is_caffe2_build = _C_onnx._CAFFE2_ATEN_FALLBACK
|
1813 |
+
|
1814 |
+
if not name.startswith("aten::"):
|
1815 |
+
return False
|
1816 |
+
|
1817 |
+
if is_caffe2_build:
|
1818 |
+
if (
|
1819 |
+
is_onnx_aten_export or is_aten_fallback_export
|
1820 |
+
) and not is_exportable_aten_op:
|
1821 |
+
return True
|
1822 |
+
else:
|
1823 |
+
if is_onnx_aten_export or (
|
1824 |
+
is_aten_fallback_export and not is_exportable_aten_op
|
1825 |
+
):
|
1826 |
+
return True
|
1827 |
+
|
1828 |
+
return False
|
1829 |
+
|
1830 |
+
|
1831 |
+
@_beartype.beartype
|
1832 |
+
def _need_symbolic_context(symbolic_fn: Callable) -> bool:
|
1833 |
+
"""Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`."""
|
1834 |
+
params = tuple(inspect.signature(symbolic_fn).parameters.values())
|
1835 |
+
# When the annotation is postpone-evaluated, the annotation is a string
|
1836 |
+
# and not a type. We need to use get_type_hints to get the real type.
|
1837 |
+
if not params:
|
1838 |
+
return False
|
1839 |
+
first_param_name = params[0].name
|
1840 |
+
type_hints = typing.get_type_hints(symbolic_fn)
|
1841 |
+
if first_param_name not in type_hints:
|
1842 |
+
return False
|
1843 |
+
param_type = type_hints[first_param_name]
|
1844 |
+
return issubclass(param_type, _exporter_states.SymbolicContext)
|
1845 |
+
|
1846 |
+
|
1847 |
+
@_beartype.beartype
|
1848 |
+
def _symbolic_context_handler(symbolic_fn: Callable) -> Callable:
|
1849 |
+
"""Decorator that provides the symbolic context to the symbolic function if needed."""
|
1850 |
+
if _need_symbolic_context(symbolic_fn):
|
1851 |
+
# TODO(justinchuby): Update the module name of GraphContext when it is public
|
1852 |
+
warnings.warn(
|
1853 |
+
"The first argument to symbolic functions is deprecated in 1.13 and will be "
|
1854 |
+
"removed in the future. Please annotate treat the first argument (g) as GraphContext "
|
1855 |
+
"and use context information from the object instead.",
|
1856 |
+
category=FutureWarning,
|
1857 |
+
)
|
1858 |
+
|
1859 |
+
def wrapper(graph_context: jit_utils.GraphContext, *args, **kwargs):
|
1860 |
+
symbolic_context = _exporter_states.SymbolicContext(
|
1861 |
+
params_dict=graph_context.params_dict,
|
1862 |
+
env=graph_context.env,
|
1863 |
+
cur_node=graph_context.original_node,
|
1864 |
+
onnx_block=graph_context.block,
|
1865 |
+
)
|
1866 |
+
return symbolic_fn(symbolic_context, graph_context, *args, **kwargs)
|
1867 |
+
|
1868 |
+
return wrapper
|
1869 |
+
return symbolic_fn
|
1870 |
+
|
1871 |
+
|
1872 |
+
@_beartype.beartype
|
1873 |
+
def _get_aten_op_overload_name(n: _C.Node) -> str:
|
1874 |
+
# Returns `overload_name` attribute to ATen ops on non-Caffe2 builds
|
1875 |
+
schema = n.schema()
|
1876 |
+
if not schema.startswith("aten::") or symbolic_helper.is_caffe2_aten_fallback():
|
1877 |
+
return ""
|
1878 |
+
return _C.parse_schema(schema).overload_name
|
1879 |
+
|
1880 |
+
|
1881 |
+
@_beartype.beartype
|
1882 |
+
def _run_symbolic_function(
|
1883 |
+
graph: _C.Graph,
|
1884 |
+
block: _C.Block,
|
1885 |
+
node: _C.Node,
|
1886 |
+
inputs: Any,
|
1887 |
+
env: Dict[_C.Value, _C.Value],
|
1888 |
+
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
|
1889 |
+
) -> Optional[Union[_C.Value, Sequence[Optional[_C.Value]]]]:
|
1890 |
+
"""Runs a symbolic function.
|
1891 |
+
|
1892 |
+
The function is used in C++ to export the node to ONNX.
|
1893 |
+
|
1894 |
+
Returns:
|
1895 |
+
A single or a tuple of Values.
|
1896 |
+
None when the node gets cloned as is into the new graph.
|
1897 |
+
"""
|
1898 |
+
|
1899 |
+
opset_version = GLOBALS.export_onnx_opset_version
|
1900 |
+
|
1901 |
+
# See Note [Export inplace]
|
1902 |
+
node_kind = node.kind()
|
1903 |
+
if node_kind.endswith("_"):
|
1904 |
+
# Treat relu_ -> relu; add_ -> add etc.
|
1905 |
+
ns_op_name = node_kind[:-1]
|
1906 |
+
else:
|
1907 |
+
ns_op_name = node_kind
|
1908 |
+
|
1909 |
+
namespace, op_name = jit_utils.parse_node_kind(ns_op_name)
|
1910 |
+
|
1911 |
+
graph_context = jit_utils.GraphContext(
|
1912 |
+
graph=graph,
|
1913 |
+
block=block,
|
1914 |
+
opset=opset_version,
|
1915 |
+
original_node=node,
|
1916 |
+
params_dict=_params_dict,
|
1917 |
+
env=env,
|
1918 |
+
)
|
1919 |
+
|
1920 |
+
# Direct ATen export requested
|
1921 |
+
if _should_aten_fallback(ns_op_name, opset_version, operator_export_type):
|
1922 |
+
attrs = {
|
1923 |
+
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
|
1924 |
+
for k in node.attributeNames()
|
1925 |
+
}
|
1926 |
+
outputs = node.outputsSize()
|
1927 |
+
attrs["outputs"] = outputs
|
1928 |
+
return graph_context.aten_op(
|
1929 |
+
op_name,
|
1930 |
+
*inputs,
|
1931 |
+
overload_name=_get_aten_op_overload_name(node),
|
1932 |
+
**attrs,
|
1933 |
+
)
|
1934 |
+
|
1935 |
+
try:
|
1936 |
+
# Caffe2-specific: Quantized op symbolics are registered for opset 9 only.
|
1937 |
+
if symbolic_helper.is_caffe2_aten_fallback() and opset_version == 9:
|
1938 |
+
symbolic_caffe2.register_quantized_ops("caffe2", opset_version)
|
1939 |
+
|
1940 |
+
if namespace == "quantized" and symbolic_helper.is_caffe2_aten_fallback():
|
1941 |
+
domain = "caffe2"
|
1942 |
+
else:
|
1943 |
+
domain = namespace
|
1944 |
+
symbolic_function_name = f"{domain}::{op_name}"
|
1945 |
+
|
1946 |
+
symbolic_function_group = registration.registry.get_function_group(
|
1947 |
+
symbolic_function_name
|
1948 |
+
)
|
1949 |
+
if symbolic_function_group is not None:
|
1950 |
+
symbolic_fn = symbolic_function_group.get(opset_version)
|
1951 |
+
if symbolic_fn is not None:
|
1952 |
+
# TODO Wrap almost identical attrs assignment or comment the difference.
|
1953 |
+
attrs = {
|
1954 |
+
k: symbolic_helper._node_get(node, k) for k in node.attributeNames()
|
1955 |
+
}
|
1956 |
+
return symbolic_fn(graph_context, *inputs, **attrs)
|
1957 |
+
|
1958 |
+
attrs = {
|
1959 |
+
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
|
1960 |
+
for k in node.attributeNames()
|
1961 |
+
}
|
1962 |
+
if namespace == "onnx":
|
1963 |
+
# Clone node to trigger ONNX shape inference
|
1964 |
+
return graph_context.op(op_name, *inputs, **attrs, outputs=node.outputsSize()) # type: ignore[attr-defined]
|
1965 |
+
|
1966 |
+
raise errors.UnsupportedOperatorError(
|
1967 |
+
symbolic_function_name,
|
1968 |
+
opset_version,
|
1969 |
+
symbolic_function_group.get_min_supported()
|
1970 |
+
if symbolic_function_group
|
1971 |
+
else None,
|
1972 |
+
)
|
1973 |
+
|
1974 |
+
except RuntimeError:
|
1975 |
+
if operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH:
|
1976 |
+
return None
|
1977 |
+
elif (
|
1978 |
+
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
1979 |
+
and not symbolic_helper.is_caffe2_aten_fallback()
|
1980 |
+
):
|
1981 |
+
# Emit ATen op for non-Caffe2 builds when `operator_export_type==ONNX_ATEN_FALLBACK`
|
1982 |
+
attrs = {
|
1983 |
+
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
|
1984 |
+
for k in node.attributeNames()
|
1985 |
+
}
|
1986 |
+
return graph_context.aten_op(
|
1987 |
+
op_name,
|
1988 |
+
*inputs,
|
1989 |
+
overload_name=_get_aten_op_overload_name(node),
|
1990 |
+
**attrs,
|
1991 |
+
)
|
1992 |
+
raise
|
1993 |
+
except TypeError as e:
|
1994 |
+
# Handle the specific case where we didn't successfully dispatch.
|
1995 |
+
# Otherwise, the backtrace will have the clues you need.
|
1996 |
+
e.args = (f"{e.args[0]} \n(Occurred when translating {op_name}).",)
|
1997 |
+
raise
|
1998 |
+
|
1999 |
+
|
2000 |
+
@_beartype.beartype
|
2001 |
+
def _verify_custom_op_name(symbolic_name: str):
|
2002 |
+
if not re.match(r"^[a-zA-Z0-9-_]+::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name):
|
2003 |
+
raise errors.OnnxExporterError(
|
2004 |
+
f"Failed to register operator {symbolic_name}. "
|
2005 |
+
"The symbolic name must match the format domain::name, "
|
2006 |
+
"and should start with a letter and contain only "
|
2007 |
+
"alphanumerical characters"
|
2008 |
+
)
|
2009 |
+
|
2010 |
+
ns, _ = jit_utils.parse_node_kind(symbolic_name)
|
2011 |
+
if ns == "onnx":
|
2012 |
+
raise ValueError(
|
2013 |
+
f"Failed to register operator {symbolic_name}. {ns} domain cannot be modified."
|
2014 |
+
)
|
2015 |
+
|
2016 |
+
|
2017 |
+
@_beartype.beartype
|
2018 |
+
def register_custom_op_symbolic(
|
2019 |
+
symbolic_name: str,
|
2020 |
+
symbolic_fn: Callable,
|
2021 |
+
opset_version: int,
|
2022 |
+
):
|
2023 |
+
"""Registers a symbolic function for a custom operator.
|
2024 |
+
|
2025 |
+
When the user registers symbolic for custom/contrib ops,
|
2026 |
+
it is highly recommended to add shape inference for that operator via setType API,
|
2027 |
+
otherwise the exported graph may have incorrect shape inference in some extreme cases.
|
2028 |
+
An example of setType is `test_aten_embedding_2` in `test_operators.py`.
|
2029 |
+
|
2030 |
+
See "Custom Operators" in the module documentation for an example usage.
|
2031 |
+
|
2032 |
+
Args:
|
2033 |
+
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
|
2034 |
+
format.
|
2035 |
+
symbolic_fn (Callable): A function that takes in the ONNX graph and
|
2036 |
+
the input arguments to the current operator, and returns new
|
2037 |
+
operator nodes to add to the graph.
|
2038 |
+
opset_version (int): The ONNX opset version in which to register.
|
2039 |
+
"""
|
2040 |
+
if symbolic_name.startswith("::"):
|
2041 |
+
symbolic_name = f"aten{symbolic_name}"
|
2042 |
+
|
2043 |
+
_verify_custom_op_name(symbolic_name)
|
2044 |
+
|
2045 |
+
registration.custom_onnx_symbolic(
|
2046 |
+
symbolic_name,
|
2047 |
+
opset_version,
|
2048 |
+
decorate=[
|
2049 |
+
_symbolic_context_handler,
|
2050 |
+
],
|
2051 |
+
)(symbolic_fn)
|
2052 |
+
|
2053 |
+
|
2054 |
+
@_beartype.beartype
|
2055 |
+
def unregister_custom_op_symbolic(symbolic_name: str, opset_version: int):
|
2056 |
+
"""Unregisters ``symbolic_name``.
|
2057 |
+
|
2058 |
+
See "Custom Operators" in the module documentation for an example usage.
|
2059 |
+
|
2060 |
+
Args:
|
2061 |
+
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
|
2062 |
+
format.
|
2063 |
+
opset_version (int): The ONNX opset version in which to unregister.
|
2064 |
+
"""
|
2065 |
+
if symbolic_name.startswith("::"):
|
2066 |
+
symbolic_name = f"aten{symbolic_name}"
|
2067 |
+
|
2068 |
+
_verify_custom_op_name(symbolic_name)
|
2069 |
+
|
2070 |
+
registration.registry.unregister(symbolic_name, opset_version)
|
2071 |
+
|
2072 |
+
|
2073 |
+
@_beartype.beartype
|
2074 |
+
def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
|
2075 |
+
"""Ensures dynamic axes argument is follows the expected format."""
|
2076 |
+
if len(dynamic_axes) == 0:
|
2077 |
+
return
|
2078 |
+
|
2079 |
+
if hasattr(model, "graph"):
|
2080 |
+
# Extracting set of valid input/output names that shall be used for dynamic_axes
|
2081 |
+
if (input_names is None) or len(input_names) == 0:
|
2082 |
+
input_names = [x.debugName() for x in model.graph.inputs()]
|
2083 |
+
if (output_names is None) or len(output_names) == 0:
|
2084 |
+
output_names = [y.debugName() for y in model.graph.outputs()]
|
2085 |
+
|
2086 |
+
valid_names = set((input_names or []) + (output_names or []))
|
2087 |
+
|
2088 |
+
# If dynamic axes are provided as a list rather than dictionary, they should
|
2089 |
+
# first get converted to a dictionary in expected format. If desired axes names
|
2090 |
+
# are not provided for dynamic axes, automatic names shall be generated for
|
2091 |
+
# provided dynamic axes of specified input/output
|
2092 |
+
for key, value in dynamic_axes.items():
|
2093 |
+
if key not in valid_names:
|
2094 |
+
warnings.warn(
|
2095 |
+
f"Provided key {key} for dynamic axes is not a valid input/output name"
|
2096 |
+
)
|
2097 |
+
if isinstance(value, list):
|
2098 |
+
warnings.warn(
|
2099 |
+
"No names were found for specified dynamic axes of provided input."
|
2100 |
+
f"Automatically generated names will be applied to each dynamic axes of input {key}"
|
2101 |
+
)
|
2102 |
+
|
2103 |
+
value_dict = {}
|
2104 |
+
for i, x in enumerate(value):
|
2105 |
+
if not isinstance(x, int):
|
2106 |
+
raise ValueError(
|
2107 |
+
"The type of axis index is expected to be an integer"
|
2108 |
+
)
|
2109 |
+
if x in value_dict:
|
2110 |
+
warnings.warn(
|
2111 |
+
f"Duplicate dynamic axis index {x} was provided for input {key}."
|
2112 |
+
)
|
2113 |
+
else:
|
2114 |
+
value_dict[x] = str(key) + "_dynamic_axes_" + str(i + 1)
|
2115 |
+
dynamic_axes[key] = value_dict
|
2116 |
+
|
2117 |
+
|
2118 |
+
def model_signature(model: Union[torch.nn.Module, Callable]) -> inspect.Signature:
|
2119 |
+
return inspect.signature(
|
2120 |
+
model.forward if isinstance(model, torch.nn.Module) else model
|
2121 |
+
)
|
venv/lib/python3.10/site-packages/torch/onnx/verification.py
ADDED
@@ -0,0 +1,1884 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions to verify exported ONNX model is functionally equivalent to original PyTorch model.
|
2 |
+
|
3 |
+
ONNX Runtime is required, and is used as the ONNX backend for export verification.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import contextlib
|
9 |
+
import copy
|
10 |
+
import dataclasses
|
11 |
+
import datetime
|
12 |
+
import difflib
|
13 |
+
import enum
|
14 |
+
import functools
|
15 |
+
import io
|
16 |
+
import itertools
|
17 |
+
import os
|
18 |
+
import tempfile
|
19 |
+
import warnings
|
20 |
+
from typing import (
|
21 |
+
Any,
|
22 |
+
Callable,
|
23 |
+
Collection,
|
24 |
+
Dict,
|
25 |
+
FrozenSet,
|
26 |
+
List,
|
27 |
+
Mapping,
|
28 |
+
Optional,
|
29 |
+
Sequence,
|
30 |
+
Set,
|
31 |
+
Tuple,
|
32 |
+
Union,
|
33 |
+
)
|
34 |
+
|
35 |
+
import numpy as np
|
36 |
+
|
37 |
+
import torch
|
38 |
+
import torch._C._onnx as _C_onnx
|
39 |
+
from torch import _C
|
40 |
+
from torch.onnx import _constants, _experimental, _exporter_states, utils
|
41 |
+
from torch.onnx._globals import GLOBALS
|
42 |
+
from torch.onnx._internal import _beartype, onnx_proto_utils
|
43 |
+
from torch.types import Number
|
44 |
+
|
45 |
+
_ORT_PROVIDERS = ("CPUExecutionProvider",)
|
46 |
+
|
47 |
+
_NumericType = Union[Number, torch.Tensor, np.ndarray]
|
48 |
+
_ModelType = Union[torch.nn.Module, torch.jit.ScriptModule]
|
49 |
+
_InputArgsType = Union[torch.Tensor, Tuple[Any, ...]]
|
50 |
+
_InputKwargsType = Mapping[str, Any]
|
51 |
+
_OutputsType = Union[Sequence[_NumericType], Sequence]
|
52 |
+
|
53 |
+
|
54 |
+
class OnnxBackend(enum.Enum):
|
55 |
+
"""Enum class for ONNX backend used for export verification."""
|
56 |
+
|
57 |
+
REFERENCE = "ONNXReferenceEvaluator"
|
58 |
+
ONNX_RUNTIME_CPU = "CPUExecutionProvider"
|
59 |
+
ONNX_RUNTIME_CUDA = "CUDAExecutionProvider"
|
60 |
+
|
61 |
+
|
62 |
+
@dataclasses.dataclass
|
63 |
+
class VerificationOptions:
|
64 |
+
"""Options for ONNX export verification.
|
65 |
+
|
66 |
+
Attributes:
|
67 |
+
flatten: If True, unpack nested list/tuple/dict inputs into a flattened list of
|
68 |
+
Tensors for ONNX. Set this to False if nested structures are to be preserved
|
69 |
+
for ONNX, which is usually the case with exporting ScriptModules. Default True.
|
70 |
+
ignore_none: Whether to ignore None type in torch output, which is usually the
|
71 |
+
case with tracing. Set this to False, if torch output should keep None type,
|
72 |
+
which is usually the case with exporting ScriptModules. Default to True.
|
73 |
+
check_shape: Whether to check the shapes between PyTorch and ONNX Runtime outputs
|
74 |
+
are exactly the same. Set this to False to allow output shape broadcasting.
|
75 |
+
Default to True.
|
76 |
+
check_dtype: Whether to check the dtypes between PyTorch and ONNX Runtime outputs
|
77 |
+
are consistent. Default to True.
|
78 |
+
backend: ONNX backend for verification. Default to OnnxBackend.ONNX_RUNTIME_CPU.
|
79 |
+
rtol: relative tolerance in comparison between ONNX and PyTorch outputs.
|
80 |
+
atol: absolute tolerance in comparison between ONNX and PyTorch outputs.
|
81 |
+
remained_onnx_input_idx: If provided, only the specified inputs will be passed
|
82 |
+
to the ONNX model. Supply a list when there are unused inputs in the model.
|
83 |
+
Since unused inputs will be removed in the exported ONNX model, supplying
|
84 |
+
all inputs will cause an error on unexpected inputs. This parameter tells
|
85 |
+
the verifier which inputs to pass into the ONNX model.
|
86 |
+
acceptable_error_percentage: acceptable percentage of element mismatches in comparison.
|
87 |
+
It should be a float of value between 0.0 and 1.0.
|
88 |
+
"""
|
89 |
+
|
90 |
+
flatten: bool = True
|
91 |
+
ignore_none: bool = True
|
92 |
+
check_shape: bool = True
|
93 |
+
check_dtype: bool = True
|
94 |
+
backend: OnnxBackend = OnnxBackend.ONNX_RUNTIME_CPU
|
95 |
+
rtol: float = 1e-3
|
96 |
+
atol: float = 1e-7
|
97 |
+
remained_onnx_input_idx: Optional[Sequence[int]] = None
|
98 |
+
acceptable_error_percentage: Optional[float] = None
|
99 |
+
|
100 |
+
|
101 |
+
@_beartype.beartype
|
102 |
+
def _flatten_tuples(elem):
|
103 |
+
flattened = []
|
104 |
+
for t in elem:
|
105 |
+
if isinstance(t, tuple):
|
106 |
+
flattened.extend(_flatten_tuples(t))
|
107 |
+
else:
|
108 |
+
flattened.append(t)
|
109 |
+
return flattened
|
110 |
+
|
111 |
+
|
112 |
+
# TODO(justinchuby): Add type checking by narrowing down the return type when input is None
|
113 |
+
def _to_numpy(elem) -> Union[list, np.ndarray]:
|
114 |
+
if isinstance(elem, torch.Tensor):
|
115 |
+
if elem.requires_grad:
|
116 |
+
return elem.detach().cpu().numpy()
|
117 |
+
else:
|
118 |
+
return elem.cpu().numpy()
|
119 |
+
elif isinstance(elem, (list, tuple)):
|
120 |
+
return [_to_numpy(inp) for inp in elem]
|
121 |
+
elif isinstance(elem, (bool, int, float)):
|
122 |
+
return np.array(elem)
|
123 |
+
elif isinstance(elem, dict):
|
124 |
+
flattened = []
|
125 |
+
for k in elem:
|
126 |
+
flattened.extend([_to_numpy(k), _to_numpy(elem[k])])
|
127 |
+
return flattened
|
128 |
+
return elem
|
129 |
+
|
130 |
+
|
131 |
+
@_beartype.beartype
|
132 |
+
def _inline_flatten_list(inputs, res_list) -> list:
|
133 |
+
for i in inputs:
|
134 |
+
res_list.append(i) if not isinstance(
|
135 |
+
i, (list, tuple)
|
136 |
+
) else _inline_flatten_list(i, res_list)
|
137 |
+
return res_list
|
138 |
+
|
139 |
+
|
140 |
+
@_beartype.beartype
|
141 |
+
def _unpack_to_numpy(values, cast_onnx_accepted=True) -> list:
|
142 |
+
value_unpacked = []
|
143 |
+
for value in values:
|
144 |
+
value_unpacked.extend(
|
145 |
+
utils.unpack_quantized_tensor(value, cast_onnx_accepted=cast_onnx_accepted)
|
146 |
+
)
|
147 |
+
return [_to_numpy(v) for v in value_unpacked]
|
148 |
+
|
149 |
+
|
150 |
+
@_beartype.beartype
|
151 |
+
def _run_onnx(onnx_session, inputs) -> _OutputsType:
|
152 |
+
kw_inputs = {}
|
153 |
+
if inputs and isinstance(inputs[-1], dict):
|
154 |
+
kw_inputs = inputs[-1]
|
155 |
+
inputs = inputs[:-1]
|
156 |
+
inputs = _unpack_to_numpy(_flatten_tuples(inputs))
|
157 |
+
ort_inputs = {}
|
158 |
+
for input_name, input in kw_inputs.items():
|
159 |
+
ort_inputs[input_name] = _to_numpy(input)
|
160 |
+
inputs = _to_numpy(inputs)
|
161 |
+
if hasattr(onnx_session, "get_inputs"):
|
162 |
+
# onnxruntime.InferenceSession
|
163 |
+
input_names = [i.name for i in onnx_session.get_inputs()]
|
164 |
+
elif hasattr(onnx_session, "input_names"):
|
165 |
+
# onnx.reference.ReferenceEvaluator
|
166 |
+
input_names = onnx_session.input_names
|
167 |
+
else:
|
168 |
+
raise ValueError(f"Unknown ONNX backend type: {type(onnx_session)}.")
|
169 |
+
|
170 |
+
for i, input in enumerate(inputs):
|
171 |
+
if i == len(input_names) or input_names[i] in ort_inputs:
|
172 |
+
raise ValueError(
|
173 |
+
f"got too many positional inputs. inputs: {inputs}. kw_inputs: {kw_inputs}. "
|
174 |
+
f"input names: {input_names}."
|
175 |
+
)
|
176 |
+
ort_inputs[input_names[i]] = input
|
177 |
+
onnx_outs = onnx_session.run(None, ort_inputs)
|
178 |
+
return onnx_outs
|
179 |
+
|
180 |
+
|
181 |
+
@_beartype.beartype
|
182 |
+
def _ort_session(
|
183 |
+
model: Union[str, io.BytesIO], ort_providers: Sequence[str] = _ORT_PROVIDERS
|
184 |
+
):
|
185 |
+
try:
|
186 |
+
import onnxruntime # type: ignore[import]
|
187 |
+
except ImportError as e:
|
188 |
+
raise ImportError("onnxruntime is required for export verification.") from e
|
189 |
+
|
190 |
+
if ort_providers is None:
|
191 |
+
ort_providers = _ORT_PROVIDERS
|
192 |
+
|
193 |
+
session_options = onnxruntime.SessionOptions()
|
194 |
+
# suppress ort warnings.
|
195 |
+
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
|
196 |
+
session_options.log_severity_level = 3
|
197 |
+
ort_session = onnxruntime.InferenceSession(
|
198 |
+
model if isinstance(model, str) else model.getvalue(),
|
199 |
+
session_options,
|
200 |
+
providers=ort_providers,
|
201 |
+
)
|
202 |
+
return ort_session
|
203 |
+
|
204 |
+
|
205 |
+
@_beartype.beartype
|
206 |
+
def _onnx_reference_evaluator_session(model: Union[str, io.BytesIO]):
|
207 |
+
try:
|
208 |
+
import onnx
|
209 |
+
from onnx import reference as onnx_reference # type: ignore[attr-defined]
|
210 |
+
except ImportError as exc:
|
211 |
+
raise ImportError("onnx >= 1.13 is required for reference evaluator.") from exc
|
212 |
+
|
213 |
+
proto = (
|
214 |
+
onnx.load(model) # type: ignore[attr-defined]
|
215 |
+
if isinstance(model, str)
|
216 |
+
else onnx.load_model_from_string(model.getvalue()) # type: ignore[attr-defined]
|
217 |
+
)
|
218 |
+
onnx_session = onnx_reference.ReferenceEvaluator(proto)
|
219 |
+
return onnx_session
|
220 |
+
|
221 |
+
|
222 |
+
@_beartype.beartype
|
223 |
+
def _onnx_backend_session(model: Union[str, io.BytesIO], backend: OnnxBackend):
|
224 |
+
if backend == OnnxBackend.REFERENCE:
|
225 |
+
onnx_session = _onnx_reference_evaluator_session(model)
|
226 |
+
elif backend in {OnnxBackend.ONNX_RUNTIME_CPU, OnnxBackend.ONNX_RUNTIME_CUDA}:
|
227 |
+
onnx_session = _ort_session(model, (backend.value,))
|
228 |
+
else:
|
229 |
+
raise ValueError(f"Unsupported backend: {backend}")
|
230 |
+
return onnx_session
|
231 |
+
|
232 |
+
|
233 |
+
@_beartype.beartype
|
234 |
+
def _compare_onnx_pytorch_outputs_in_np(
|
235 |
+
onnx_outs: _OutputsType,
|
236 |
+
pt_outs: _OutputsType,
|
237 |
+
options: VerificationOptions,
|
238 |
+
):
|
239 |
+
assert len(onnx_outs) == len(
|
240 |
+
pt_outs
|
241 |
+
), f"Number of outputs differ ONNX runtime: ({len(onnx_outs)}) PyTorch: ({len(pt_outs)})"
|
242 |
+
acceptable_error_percentage = options.acceptable_error_percentage
|
243 |
+
if acceptable_error_percentage and (
|
244 |
+
acceptable_error_percentage > 1.0 or acceptable_error_percentage < 0.0
|
245 |
+
):
|
246 |
+
raise ValueError(
|
247 |
+
"If set, acceptable_error_percentage should be between 0.0 and 1.0"
|
248 |
+
)
|
249 |
+
|
250 |
+
for ort_out, pt_out in zip(onnx_outs, pt_outs):
|
251 |
+
try:
|
252 |
+
# TODO: Remove `check_shape` option once every shape inconsistent issue is addressed.
|
253 |
+
if not options.check_shape:
|
254 |
+
# Allow different but broadcastable output shapes.
|
255 |
+
ort_out, pt_out = np.broadcast_arrays(ort_out, pt_out)
|
256 |
+
torch.testing.assert_close(
|
257 |
+
ort_out,
|
258 |
+
pt_out,
|
259 |
+
rtol=options.rtol,
|
260 |
+
atol=options.atol,
|
261 |
+
check_dtype=options.check_dtype,
|
262 |
+
equal_nan=True,
|
263 |
+
)
|
264 |
+
except AssertionError as e:
|
265 |
+
if acceptable_error_percentage:
|
266 |
+
error_percentage = 1 - np.sum(
|
267 |
+
np.isclose(ort_out, pt_out, rtol=options.rtol, atol=options.atol)
|
268 |
+
) / np.prod(ort_out.shape)
|
269 |
+
if error_percentage <= acceptable_error_percentage:
|
270 |
+
warnings.warn(
|
271 |
+
f"Suppressed AssertionError:\n{e}.\n"
|
272 |
+
f"Error percentage {error_percentage} "
|
273 |
+
f"within acceptable range {acceptable_error_percentage}."
|
274 |
+
)
|
275 |
+
continue
|
276 |
+
if ort_out.dtype == np.uint8 or ort_out.dtype == np.int8:
|
277 |
+
warnings.warn("ONNX output is quantized")
|
278 |
+
if pt_out.dtype == np.uint8 or pt_out.dtype == np.int8:
|
279 |
+
warnings.warn("PyTorch output is quantized")
|
280 |
+
raise
|
281 |
+
|
282 |
+
|
283 |
+
@_beartype.beartype
|
284 |
+
def _compare_onnx_pytorch_outputs(
|
285 |
+
onnx_outs: _OutputsType,
|
286 |
+
pt_outs: Any,
|
287 |
+
options: VerificationOptions,
|
288 |
+
):
|
289 |
+
"""
|
290 |
+
Compare ONNX and PyTorch outputs.
|
291 |
+
|
292 |
+
Args:
|
293 |
+
onnx_outs: outputs from ONNX backend.
|
294 |
+
pt_outs: outputs from PyTorch.
|
295 |
+
options: options for verification.
|
296 |
+
|
297 |
+
Raises:
|
298 |
+
AssertionError: if outputs from ONNX model and PyTorch model are not
|
299 |
+
equal up to specified precision.
|
300 |
+
ValueError: if arguments provided are invalid.
|
301 |
+
"""
|
302 |
+
if options.ignore_none:
|
303 |
+
# torch.jit._flatten filters None type
|
304 |
+
pt_outs, _ = torch.jit._flatten(pt_outs)
|
305 |
+
else:
|
306 |
+
pt_outs = _inline_flatten_list([pt_outs], [])
|
307 |
+
pt_outs_np = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False)
|
308 |
+
onnx_outs = _inline_flatten_list(onnx_outs, [])
|
309 |
+
_compare_onnx_pytorch_outputs_in_np(onnx_outs, pt_outs_np, options)
|
310 |
+
|
311 |
+
|
312 |
+
@_beartype.beartype
|
313 |
+
def _prepare_input_for_pytorch(args, kwargs):
|
314 |
+
"""Prepare input for PyTorch model execution.
|
315 |
+
|
316 |
+
Any future changes/formatting to the input before dispatching to the PyTorch
|
317 |
+
model should be made in this function.
|
318 |
+
|
319 |
+
Args:
|
320 |
+
args: positional arguments for PyTorch model forward method.
|
321 |
+
kwargs: keyword arguments for PyTorch model forward method.
|
322 |
+
|
323 |
+
Returns:
|
324 |
+
args: positional arguments for PyTorch model forward method.
|
325 |
+
kwargs: keyword arguments for PyTorch model forward method.
|
326 |
+
"""
|
327 |
+
if isinstance(args, (torch.Tensor, dict)):
|
328 |
+
args = (args,)
|
329 |
+
# In-place operators will update input tensor data as well.
|
330 |
+
# Thus inputs are replicated before every forward call.
|
331 |
+
args = copy.deepcopy(args)
|
332 |
+
if kwargs:
|
333 |
+
kwargs = copy.deepcopy(kwargs)
|
334 |
+
else:
|
335 |
+
kwargs = {}
|
336 |
+
return args, kwargs
|
337 |
+
|
338 |
+
|
339 |
+
@_beartype.beartype
|
340 |
+
def _prepare_input_for_export(args, kwargs):
|
341 |
+
"""Prepare input for ONNX model export.
|
342 |
+
|
343 |
+
Any future changes/formatting to the input before dispatching to the
|
344 |
+
:func:`torch.onnx.export` api should be made in this function.
|
345 |
+
|
346 |
+
Args:
|
347 |
+
args: positional arguments for PyTorch model forward method.
|
348 |
+
kwargs: keyword arguments for PyTorch model forward method.
|
349 |
+
|
350 |
+
Returns:
|
351 |
+
onnx_inputs: positional arguments for ONNX model export, as `args` in
|
352 |
+
:func:`torch.onnx.export`.
|
353 |
+
"""
|
354 |
+
args, kwargs = _prepare_input_for_pytorch(args, kwargs)
|
355 |
+
if not kwargs and len(args) > 0 and isinstance(args[-1], dict):
|
356 |
+
onnx_inputs = args + ({},)
|
357 |
+
elif kwargs:
|
358 |
+
onnx_inputs = args + (kwargs,)
|
359 |
+
else:
|
360 |
+
onnx_inputs = args
|
361 |
+
return onnx_inputs
|
362 |
+
|
363 |
+
|
364 |
+
@_beartype.beartype
|
365 |
+
def _prepare_input_for_onnx(
|
366 |
+
args, kwargs, remained_onnx_input_idx: Optional[Sequence[int]], flatten: bool
|
367 |
+
):
|
368 |
+
"""Prepare input for ONNX model execution in ONNX backend.
|
369 |
+
|
370 |
+
Any future changes/formatting to the input before dispatching to the ONNX backend
|
371 |
+
run should be made in this function.
|
372 |
+
|
373 |
+
Args:
|
374 |
+
args: positional arguments for PyTorch model forward method.
|
375 |
+
kwargs: keyword arguments for PyTorch model forward method.
|
376 |
+
remained_onnx_input_idx: indices of inputs to be used for ONNX model execution.
|
377 |
+
flatten: whether to flatten the input before dispatching to the ONNX model execution.
|
378 |
+
|
379 |
+
Returns:
|
380 |
+
onnx_inputs: positional arguments for ONNX model execution in ONNX backend.
|
381 |
+
"""
|
382 |
+
onnx_inputs = _prepare_input_for_export(args, kwargs)
|
383 |
+
if flatten:
|
384 |
+
onnx_inputs, _ = torch.jit._flatten(onnx_inputs)
|
385 |
+
elif onnx_inputs and onnx_inputs[-1] == {}:
|
386 |
+
# Handle empty kwargs (normally removed by flatten).
|
387 |
+
onnx_inputs = onnx_inputs[:-1]
|
388 |
+
if remained_onnx_input_idx is not None:
|
389 |
+
return [onnx_inputs[i] for i in remained_onnx_input_idx]
|
390 |
+
else:
|
391 |
+
return onnx_inputs
|
392 |
+
|
393 |
+
|
394 |
+
@_beartype.beartype
|
395 |
+
def _try_clone_model(model):
|
396 |
+
"""Used for preserving original model in case forward mutates model states."""
|
397 |
+
try:
|
398 |
+
return copy.deepcopy(model)
|
399 |
+
except Exception:
|
400 |
+
warnings.warn(
|
401 |
+
"Failed to clone model. Model state might be mutated during verification."
|
402 |
+
)
|
403 |
+
return model
|
404 |
+
|
405 |
+
|
406 |
+
@_beartype.beartype
|
407 |
+
def _compare_onnx_pytorch_model(
|
408 |
+
pt_model: _ModelType,
|
409 |
+
onnx_model_f: Union[str, io.BytesIO],
|
410 |
+
input_args: _InputArgsType,
|
411 |
+
input_kwargs: Optional[_InputKwargsType],
|
412 |
+
additional_test_inputs: Optional[Sequence[_InputArgsType]],
|
413 |
+
options: VerificationOptions,
|
414 |
+
):
|
415 |
+
"""Compare outputs from ONNX model runs with outputs from PyTorch model runs.
|
416 |
+
|
417 |
+
Args:
|
418 |
+
pt_model: PyTorch model.
|
419 |
+
onnx_model_f: ONNX model file path or file-like object.
|
420 |
+
input_args: positional arguments for PyTorch model forward method.
|
421 |
+
input_kwargs: keyword arguments for PyTorch model forward method.
|
422 |
+
additional_test_inputs: additional positional arguments for PyTorch model
|
423 |
+
forward method.
|
424 |
+
options: options for verification.
|
425 |
+
|
426 |
+
Raises:
|
427 |
+
AssertionError: if outputs from ONNX model and PyTorch model are not
|
428 |
+
equal up to specified precision.
|
429 |
+
"""
|
430 |
+
onnx_session = _onnx_backend_session(onnx_model_f, options.backend)
|
431 |
+
|
432 |
+
@_beartype.beartype
|
433 |
+
def compare_onnx_pytorch_model_with_input(input_args, input_kwargs):
|
434 |
+
pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs)
|
435 |
+
# TODO: remove this and treat mutating model separately. See #77679
|
436 |
+
pt_model_copy = _try_clone_model(pt_model)
|
437 |
+
pt_outs = pt_model_copy(*pt_args, **pt_kwargs)
|
438 |
+
|
439 |
+
onnx_inputs = _prepare_input_for_onnx(
|
440 |
+
input_args, input_kwargs, options.remained_onnx_input_idx, options.flatten
|
441 |
+
)
|
442 |
+
|
443 |
+
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
|
444 |
+
|
445 |
+
_compare_onnx_pytorch_outputs(
|
446 |
+
onnx_outs=onnx_outs,
|
447 |
+
pt_outs=pt_outs,
|
448 |
+
options=options,
|
449 |
+
)
|
450 |
+
|
451 |
+
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
|
452 |
+
|
453 |
+
if additional_test_inputs:
|
454 |
+
for test_input_args in additional_test_inputs:
|
455 |
+
compare_onnx_pytorch_model_with_input(test_input_args, {})
|
456 |
+
|
457 |
+
|
458 |
+
class _GraphDiff:
|
459 |
+
"""A class to represent the difference between two graphs."""
|
460 |
+
|
461 |
+
@_beartype.beartype
|
462 |
+
def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph):
|
463 |
+
"""Construct a _GraphDiff object.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
graph_a (_C.Graph): First graph to compare.
|
467 |
+
graph_b (_C.Graph): Second graph to compare.
|
468 |
+
"""
|
469 |
+
self.graph_a = graph_a
|
470 |
+
self.graph_b = graph_b
|
471 |
+
|
472 |
+
@_beartype.beartype
|
473 |
+
def __str__(self):
|
474 |
+
"""See function :func:`diff_report`."""
|
475 |
+
return self.diff_report()
|
476 |
+
|
477 |
+
@_beartype.beartype
|
478 |
+
def _indent(self, lines: str) -> str:
|
479 |
+
return "\n".join(["\t" + line for line in lines.splitlines()])
|
480 |
+
|
481 |
+
@_beartype.beartype
|
482 |
+
def diff_report(self) -> str:
|
483 |
+
"""Return a string representation of the graph difference.
|
484 |
+
|
485 |
+
The report shows the first pair of nodes that diverges. It also shows the source
|
486 |
+
location of the pair of nodes.
|
487 |
+
|
488 |
+
Returns:
|
489 |
+
graph_diff_report (str): A string representation of the graph difference.
|
490 |
+
"""
|
491 |
+
graph_a = self.graph_a
|
492 |
+
graph_b = self.graph_b
|
493 |
+
|
494 |
+
graph_a_str = str(graph_a)
|
495 |
+
graph_b_str = str(graph_b)
|
496 |
+
|
497 |
+
if graph_a_str == graph_b_str:
|
498 |
+
return ""
|
499 |
+
|
500 |
+
graph_diff = difflib.ndiff(
|
501 |
+
graph_a_str.splitlines(True), graph_b_str.splitlines(True)
|
502 |
+
)
|
503 |
+
graph_diff_report = ["Graph diff:", self._indent("".join(graph_diff))]
|
504 |
+
|
505 |
+
for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):
|
506 |
+
if str(node_a) != str(node_b):
|
507 |
+
graph_diff_report.append("First diverging operator:")
|
508 |
+
node_diff = difflib.ndiff(
|
509 |
+
str(node_a).splitlines(True), str(node_b).splitlines(True)
|
510 |
+
)
|
511 |
+
source_printout = ["node diff:", self._indent("".join(node_diff))]
|
512 |
+
|
513 |
+
stack_a = node_a.sourceRange() if node_a else None
|
514 |
+
if stack_a:
|
515 |
+
source_printout.extend(
|
516 |
+
["Former source location:", self._indent(str(stack_a))]
|
517 |
+
)
|
518 |
+
stack_b = node_b.sourceRange() if node_b else None
|
519 |
+
if stack_b:
|
520 |
+
source_printout.extend(
|
521 |
+
["Latter source location:", self._indent(str(stack_b))]
|
522 |
+
)
|
523 |
+
|
524 |
+
graph_diff_report.extend(source_printout)
|
525 |
+
|
526 |
+
break
|
527 |
+
|
528 |
+
return "\n".join(graph_diff_report)
|
529 |
+
|
530 |
+
|
531 |
+
@_beartype.beartype
|
532 |
+
def _check_graph_diff(
|
533 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
534 |
+
test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]],
|
535 |
+
export_options: _experimental.ExportOptions,
|
536 |
+
model_to_graph_func: Callable[
|
537 |
+
[
|
538 |
+
torch.nn.Module,
|
539 |
+
Tuple[Any, ...],
|
540 |
+
Mapping[str, Any],
|
541 |
+
_experimental.ExportOptions,
|
542 |
+
],
|
543 |
+
_C.Graph,
|
544 |
+
],
|
545 |
+
) -> str:
|
546 |
+
"""Check if graph produced by `model_to_graph_func` is the same across `test_input_groups`.
|
547 |
+
|
548 |
+
Args:
|
549 |
+
model: See :func:`check_export_model_diff`.
|
550 |
+
test_input_groups: See :func:`check_export_model_diff`.
|
551 |
+
export_options: See :func:`check_export_model_diff`.
|
552 |
+
model_to_graph_func: A function to convert a PyTorch model to a JIT IR graph.
|
553 |
+
|
554 |
+
Returns:
|
555 |
+
graph_diff_report (str): A string representation of the graph difference.
|
556 |
+
"""
|
557 |
+
if len(test_input_groups) < 2:
|
558 |
+
raise ValueError("Need at least two groups of test inputs to compare.")
|
559 |
+
|
560 |
+
ref_jit_graph = None
|
561 |
+
for args, kwargs in test_input_groups:
|
562 |
+
jit_graph = model_to_graph_func(model, args, kwargs, export_options)
|
563 |
+
if ref_jit_graph is None:
|
564 |
+
ref_jit_graph = jit_graph
|
565 |
+
continue
|
566 |
+
|
567 |
+
graph_diff_report = _GraphDiff(ref_jit_graph, jit_graph).diff_report()
|
568 |
+
if graph_diff_report:
|
569 |
+
return graph_diff_report
|
570 |
+
return ""
|
571 |
+
|
572 |
+
|
573 |
+
@_beartype.beartype
|
574 |
+
def _traced_graph_from_model(
|
575 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
576 |
+
args: Tuple[Any, ...],
|
577 |
+
kwargs: Mapping[str, Any],
|
578 |
+
export_options: _experimental.ExportOptions,
|
579 |
+
) -> _C.Graph:
|
580 |
+
"""As part of the ONNX export steps, create a traced JIT graph from a PyTorch model.
|
581 |
+
|
582 |
+
Args:
|
583 |
+
model: See :func:`check_export_model_diff`.
|
584 |
+
args: See :func:`check_export_model_diff`.
|
585 |
+
kwargs: See :func:`check_export_model_diff`.
|
586 |
+
export_options: See :func:`check_export_model_diff`.
|
587 |
+
|
588 |
+
Returns:
|
589 |
+
jit_graph (_C.Graph): A traced JIT graph.
|
590 |
+
"""
|
591 |
+
training = export_options.training
|
592 |
+
verbose = export_options.verbose
|
593 |
+
|
594 |
+
with utils.exporter_context(model, training, verbose):
|
595 |
+
export_inputs = _prepare_input_for_export(args, kwargs)
|
596 |
+
model = utils._pre_trace_quant_model(model, export_inputs)
|
597 |
+
jit_graph, _, _, _ = utils._create_jit_graph(model, export_inputs)
|
598 |
+
return jit_graph
|
599 |
+
|
600 |
+
|
601 |
+
@_beartype.beartype
|
602 |
+
def _onnx_graph_from_model(
|
603 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
604 |
+
args: Tuple[Any, ...],
|
605 |
+
kwargs: Mapping[str, Any],
|
606 |
+
export_options: _experimental.ExportOptions,
|
607 |
+
) -> _C.Graph:
|
608 |
+
"""As part of the ONNX export steps, export an ONNX JIT graph from a PyTorch model.
|
609 |
+
|
610 |
+
Args:
|
611 |
+
model: See :func:`check_export_model_diff`.
|
612 |
+
args: See :func:`check_export_model_diff`.
|
613 |
+
kwargs: See :func:`check_export_model_diff`.
|
614 |
+
export_options: See :func:`check_export_model_diff`.
|
615 |
+
|
616 |
+
Returns:
|
617 |
+
onnx_graph (_C.Graph): An ONNX JIT graph.
|
618 |
+
"""
|
619 |
+
# TODO: refactor utils.py to remove duplicated code of context setup. See #78834
|
620 |
+
opset_version = export_options.opset_version
|
621 |
+
operator_export_type = export_options.operator_export_type
|
622 |
+
export_modules_as_functions = export_options.export_modules_as_functions
|
623 |
+
training = export_options.training
|
624 |
+
verbose = export_options.verbose
|
625 |
+
dynamic_axes = export_options.dynamic_axes
|
626 |
+
input_names = export_options.input_names
|
627 |
+
output_names = export_options.output_names
|
628 |
+
|
629 |
+
if opset_version is None:
|
630 |
+
opset_version = _constants.ONNX_DEFAULT_OPSET
|
631 |
+
|
632 |
+
utils._setup_trace_module_map(model, export_modules_as_functions)
|
633 |
+
|
634 |
+
if not operator_export_type:
|
635 |
+
if _C_onnx._CAFFE2_ATEN_FALLBACK:
|
636 |
+
operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
637 |
+
else:
|
638 |
+
operator_export_type = _C_onnx.OperatorExportTypes.ONNX
|
639 |
+
|
640 |
+
GLOBALS.export_onnx_opset_version = opset_version
|
641 |
+
GLOBALS.operator_export_type = operator_export_type
|
642 |
+
|
643 |
+
with utils.exporter_context(model, training, verbose):
|
644 |
+
do_constant_folding = utils._decide_constant_folding(
|
645 |
+
export_options.do_constant_folding, operator_export_type, training
|
646 |
+
)
|
647 |
+
|
648 |
+
if dynamic_axes is None:
|
649 |
+
dynamic_axes = {}
|
650 |
+
utils._validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
|
651 |
+
|
652 |
+
export_inputs = _prepare_input_for_export(args, kwargs)
|
653 |
+
export_inputs = utils._decide_input_format(model, export_inputs)
|
654 |
+
onnx_graph, _, _ = utils._model_to_graph(
|
655 |
+
model,
|
656 |
+
export_inputs,
|
657 |
+
verbose,
|
658 |
+
input_names,
|
659 |
+
output_names,
|
660 |
+
operator_export_type,
|
661 |
+
do_constant_folding,
|
662 |
+
training=training,
|
663 |
+
dynamic_axes=dynamic_axes,
|
664 |
+
)
|
665 |
+
|
666 |
+
return onnx_graph
|
667 |
+
|
668 |
+
|
669 |
+
@_beartype.beartype
|
670 |
+
def _onnx_graph_from_aten_graph(
|
671 |
+
graph: torch.Graph,
|
672 |
+
export_options: _experimental.ExportOptions,
|
673 |
+
params_dict: Optional[Dict[str, Any]] = None,
|
674 |
+
) -> Tuple[torch.Graph, Dict[str, Any]]:
|
675 |
+
if params_dict is None:
|
676 |
+
params_dict = {}
|
677 |
+
operator_export_type = export_options.operator_export_type
|
678 |
+
dynamic_axes = export_options.dynamic_axes or {}
|
679 |
+
input_names = export_options.input_names
|
680 |
+
training = export_options.training
|
681 |
+
do_constant_folding = export_options.do_constant_folding
|
682 |
+
opset_version = export_options.opset_version or _constants.ONNX_DEFAULT_OPSET
|
683 |
+
|
684 |
+
GLOBALS.export_onnx_opset_version = opset_version
|
685 |
+
GLOBALS.operator_export_type = operator_export_type
|
686 |
+
|
687 |
+
do_constant_folding = utils._decide_constant_folding(
|
688 |
+
do_constant_folding, operator_export_type, training
|
689 |
+
)
|
690 |
+
|
691 |
+
# TODO: Below is doing aten graph to onnx. It should be abstracted as a
|
692 |
+
# function in torch/onnx/utils.py.
|
693 |
+
graph = graph.copy()
|
694 |
+
graph = utils._optimize_graph(
|
695 |
+
graph,
|
696 |
+
operator_export_type,
|
697 |
+
params_dict=params_dict,
|
698 |
+
dynamic_axes=dynamic_axes,
|
699 |
+
input_names=input_names,
|
700 |
+
)
|
701 |
+
|
702 |
+
if training is None or training == _C_onnx.TrainingMode.EVAL:
|
703 |
+
params_dict = torch._C._jit_pass_onnx_eval_peephole(graph, params_dict)
|
704 |
+
|
705 |
+
if (
|
706 |
+
do_constant_folding
|
707 |
+
and opset_version >= _constants.ONNX_CONSTANT_FOLDING_MIN_OPSET
|
708 |
+
):
|
709 |
+
params_dict = _C._jit_pass_onnx_constant_fold(graph, params_dict, opset_version)
|
710 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
711 |
+
|
712 |
+
if GLOBALS.onnx_shape_inference:
|
713 |
+
_C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, opset_version)
|
714 |
+
|
715 |
+
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
|
716 |
+
|
717 |
+
# For ONNX opset < 9, constants only have three data types: float16, float, double.
|
718 |
+
# In this pass transform constants of other data types to float/double + cast operator.
|
719 |
+
if opset_version < 9:
|
720 |
+
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
|
721 |
+
|
722 |
+
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
|
723 |
+
_C._jit_decay_packed_param_input_types(graph)
|
724 |
+
|
725 |
+
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
|
726 |
+
|
727 |
+
if export_options.verbose:
|
728 |
+
print("ONNX graph: ", graph)
|
729 |
+
|
730 |
+
return graph, params_dict
|
731 |
+
|
732 |
+
|
733 |
+
@_beartype.beartype
|
734 |
+
def _onnx_proto_from_onnx_graph(
|
735 |
+
onnx_graph: torch.Graph,
|
736 |
+
export_options: _experimental.ExportOptions,
|
737 |
+
params_dict: Dict[str, Any],
|
738 |
+
) -> Tuple[bytes, Mapping[str, bytes]]:
|
739 |
+
opset_version = export_options.opset_version or _constants.ONNX_DEFAULT_OPSET
|
740 |
+
dynamic_axes = export_options.dynamic_axes or {}
|
741 |
+
operator_export_type = export_options.operator_export_type
|
742 |
+
val_keep_init_as_ip = utils._decide_keep_init_as_input(
|
743 |
+
export_options.keep_initializers_as_inputs,
|
744 |
+
operator_export_type,
|
745 |
+
opset_version,
|
746 |
+
)
|
747 |
+
val_add_node_names = utils._decide_add_node_names(True, operator_export_type)
|
748 |
+
custom_opsets = export_options.custom_opsets or {}
|
749 |
+
|
750 |
+
proto, export_map, _, _ = onnx_graph._export_onnx( # type: ignore[attr-defined]
|
751 |
+
params_dict,
|
752 |
+
opset_version,
|
753 |
+
dynamic_axes,
|
754 |
+
False,
|
755 |
+
operator_export_type,
|
756 |
+
not export_options.verbose,
|
757 |
+
val_keep_init_as_ip,
|
758 |
+
custom_opsets,
|
759 |
+
val_add_node_names,
|
760 |
+
"",
|
761 |
+
{},
|
762 |
+
)
|
763 |
+
|
764 |
+
return proto, export_map
|
765 |
+
|
766 |
+
|
767 |
+
@_beartype.beartype
|
768 |
+
def check_export_model_diff(
|
769 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
770 |
+
test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]],
|
771 |
+
export_options: Optional[_experimental.ExportOptions] = None,
|
772 |
+
) -> str:
|
773 |
+
"""Verify exported model discrepancy between different groups of inputs.
|
774 |
+
|
775 |
+
A graph is exported for each group of inputs. The exported graphs are then compared
|
776 |
+
to each other, and discrepancies of first pair of nodes are reported. This function
|
777 |
+
first checks the jit graph. If no discrepancies were found, it then checks the onnx
|
778 |
+
graph.
|
779 |
+
|
780 |
+
Unless otherwise specified, the jit/ONNX graph is expected to be the same, regardless
|
781 |
+
of the inputs used for exporting. A discrepancy implies the graph exported is
|
782 |
+
not accurate when run on other groups of inputs, which will typically results in
|
783 |
+
runtime errors or mismatching output.
|
784 |
+
|
785 |
+
Args:
|
786 |
+
model (torch.nn.Module or torch.jit.ScriptModule): The model to be exported.
|
787 |
+
test_input_groups (Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]]): A sequence
|
788 |
+
of input groups to be used to export the model. Each input group is a pair of
|
789 |
+
(args, kwargs).
|
790 |
+
export_options (_experimental.ExportOptions, optional): An _experimental.ExportOptions
|
791 |
+
object that controls the export behavior.
|
792 |
+
|
793 |
+
Returns:
|
794 |
+
str: A string containing the diff of the exported models.
|
795 |
+
"""
|
796 |
+
export_options = (
|
797 |
+
_experimental.ExportOptions() if export_options is None else export_options
|
798 |
+
)
|
799 |
+
|
800 |
+
jit_diff_report = _check_graph_diff(
|
801 |
+
model, test_input_groups, export_options, _traced_graph_from_model
|
802 |
+
)
|
803 |
+
if jit_diff_report:
|
804 |
+
return jit_diff_report
|
805 |
+
|
806 |
+
return _check_graph_diff(
|
807 |
+
model, test_input_groups, export_options, _onnx_graph_from_model
|
808 |
+
)
|
809 |
+
|
810 |
+
|
811 |
+
@_beartype.beartype
|
812 |
+
def verify(
|
813 |
+
model: _ModelType,
|
814 |
+
input_args: _InputArgsType,
|
815 |
+
input_kwargs: Optional[_InputKwargsType] = None,
|
816 |
+
do_constant_folding: bool = True,
|
817 |
+
dynamic_axes: Optional[
|
818 |
+
Mapping[str, Union[Mapping[int, str], Mapping[str, Sequence[int]]]]
|
819 |
+
] = None,
|
820 |
+
input_names: Optional[Sequence[str]] = None,
|
821 |
+
output_names: Optional[Sequence[str]] = None,
|
822 |
+
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
|
823 |
+
opset_version: Optional[int] = None,
|
824 |
+
keep_initializers_as_inputs: bool = True,
|
825 |
+
verbose: bool = False,
|
826 |
+
fixed_batch_size: bool = False,
|
827 |
+
use_external_data: bool = False,
|
828 |
+
additional_test_inputs: Optional[Sequence[_InputArgsType]] = None,
|
829 |
+
options: Optional[VerificationOptions] = None,
|
830 |
+
):
|
831 |
+
"""Verify model export to ONNX against original PyTorch model.
|
832 |
+
|
833 |
+
Args:
|
834 |
+
model (torch.nn.Module or torch.jit.ScriptModule): See :func:`torch.onnx.export`.
|
835 |
+
input_args (tuple): See :func:`torch.onnx.export`.
|
836 |
+
input_kwargs (dict): See :func:`torch.onnx.export`.
|
837 |
+
do_constant_folding (bool, optional): See :func:`torch.onnx.export`.
|
838 |
+
dynamic_axes (dict, optional): See :func:`torch.onnx.export`.
|
839 |
+
input_names (list, optional): See :func:`torch.onnx.export`.
|
840 |
+
output_names (list, optional): See :func:`torch.onnx.export`.
|
841 |
+
training (torch.onnx.TrainingMode): See :func:`torch.onnx.export`.
|
842 |
+
opset_version (int, optional): See :func:`torch.onnx.export`.
|
843 |
+
keep_initializers_as_inputs (bool, optional): See :func:`torch.onnx.export`.
|
844 |
+
verbose (bool, optional): See :func:`torch.onnx.export`.
|
845 |
+
fixed_batch_size (bool, optional): Legacy argument, used only by rnn test cases.
|
846 |
+
use_external_data (bool, optional): Explicitly specify whether to export the
|
847 |
+
model with external data.
|
848 |
+
additional_test_inputs (list, optional): List of tuples. Each tuple is a group of
|
849 |
+
input arguments to test. Currently only *args are supported.
|
850 |
+
options (_VerificationOptions, optional): A _VerificationOptions object that
|
851 |
+
controls the verification behavior.
|
852 |
+
|
853 |
+
Raises:
|
854 |
+
AssertionError: if outputs from ONNX model and PyTorch model are not
|
855 |
+
equal up to specified precision.
|
856 |
+
ValueError: if arguments provided are invalid.
|
857 |
+
"""
|
858 |
+
if options is None:
|
859 |
+
options = VerificationOptions()
|
860 |
+
|
861 |
+
if training == torch.onnx.TrainingMode.TRAINING:
|
862 |
+
model.train()
|
863 |
+
elif training == torch.onnx.TrainingMode.EVAL:
|
864 |
+
model.eval()
|
865 |
+
with torch.no_grad(), contextlib.ExitStack() as stack:
|
866 |
+
model_f: Union[str, io.BytesIO] = io.BytesIO()
|
867 |
+
if use_external_data:
|
868 |
+
tmpdir_path = stack.enter_context(tempfile.TemporaryDirectory())
|
869 |
+
model_f = os.path.join(tmpdir_path, "model.onnx")
|
870 |
+
|
871 |
+
inputs_for_export = _prepare_input_for_export(input_args, input_kwargs)
|
872 |
+
|
873 |
+
# TODO(#77679): remove this and treat mutating model separately.
|
874 |
+
model_copy = _try_clone_model(model)
|
875 |
+
utils._export(
|
876 |
+
model,
|
877 |
+
inputs_for_export,
|
878 |
+
model_f,
|
879 |
+
opset_version=opset_version,
|
880 |
+
do_constant_folding=do_constant_folding,
|
881 |
+
keep_initializers_as_inputs=keep_initializers_as_inputs,
|
882 |
+
dynamic_axes=dynamic_axes,
|
883 |
+
input_names=input_names,
|
884 |
+
output_names=output_names,
|
885 |
+
fixed_batch_size=fixed_batch_size,
|
886 |
+
training=training,
|
887 |
+
verbose=verbose,
|
888 |
+
)
|
889 |
+
|
890 |
+
_compare_onnx_pytorch_model(
|
891 |
+
pt_model=model_copy,
|
892 |
+
onnx_model_f=model_f,
|
893 |
+
input_args=input_args,
|
894 |
+
input_kwargs=input_kwargs,
|
895 |
+
additional_test_inputs=additional_test_inputs,
|
896 |
+
options=options,
|
897 |
+
)
|
898 |
+
|
899 |
+
|
900 |
+
@_beartype.beartype
|
901 |
+
def verify_aten_graph(
|
902 |
+
graph: torch.Graph,
|
903 |
+
input_args: Tuple[Any, ...],
|
904 |
+
export_options: _experimental.ExportOptions,
|
905 |
+
params_dict: Optional[Dict[str, Any]] = None,
|
906 |
+
verification_options: Optional[VerificationOptions] = None,
|
907 |
+
) -> Tuple[Optional[AssertionError], torch.Graph, _OutputsType, _OutputsType]:
|
908 |
+
if verification_options is None:
|
909 |
+
verification_options = VerificationOptions()
|
910 |
+
if params_dict is None:
|
911 |
+
params_dict = {}
|
912 |
+
|
913 |
+
original_jit_graph = graph
|
914 |
+
graph = graph.copy()
|
915 |
+
|
916 |
+
# Execute aten graph and get reference torch jit outputs.
|
917 |
+
graph_inputs = list(graph.inputs())
|
918 |
+
jit_inputs = tuple([arg for arg in input_args if arg is not None])
|
919 |
+
weights = [params_dict[v.debugName()] for v in graph_inputs[len(jit_inputs) :]]
|
920 |
+
assert all(w is not None for w in weights)
|
921 |
+
# TODO: Only copy the argument if mutation is detected in Graph.
|
922 |
+
jit_inputs = copy.deepcopy(jit_inputs)
|
923 |
+
jit_input_and_parameters = jit_inputs + tuple(weights)
|
924 |
+
jit_outs = torch._C._jit_interpret_graph(graph, jit_input_and_parameters) # type: ignore[attr-defined]
|
925 |
+
if not isinstance(jit_outs, (list, tuple)):
|
926 |
+
jit_outs = [jit_outs]
|
927 |
+
|
928 |
+
# Convert aten graph to onnx graph.
|
929 |
+
graph, onnx_params_dict = _onnx_graph_from_aten_graph(
|
930 |
+
graph, export_options, params_dict
|
931 |
+
)
|
932 |
+
|
933 |
+
proto, export_map = _onnx_proto_from_onnx_graph(
|
934 |
+
graph, export_options, onnx_params_dict
|
935 |
+
)
|
936 |
+
model_f: Union[str, io.BytesIO] = io.BytesIO()
|
937 |
+
export_type = _exporter_states.ExportTypes.PROTOBUF_FILE
|
938 |
+
onnx_proto_utils._export_file(proto, model_f, export_type, export_map)
|
939 |
+
|
940 |
+
# NOTE: Verification is unstable. Try catch to emit information for debugging.
|
941 |
+
try:
|
942 |
+
# NOTE: Input might be dce'ed, so we need to remove those from the input args.
|
943 |
+
new_input_names = {v.debugName() for v in graph.inputs()}
|
944 |
+
new_input_args = []
|
945 |
+
for v, arg in zip(original_jit_graph.inputs(), input_args):
|
946 |
+
if v.debugName() in new_input_names:
|
947 |
+
new_input_args.append(arg)
|
948 |
+
input_args = tuple(new_input_args)
|
949 |
+
|
950 |
+
onnx_inputs = _prepare_input_for_onnx(
|
951 |
+
input_args,
|
952 |
+
{},
|
953 |
+
verification_options.remained_onnx_input_idx,
|
954 |
+
verification_options.flatten,
|
955 |
+
)
|
956 |
+
|
957 |
+
onnx_session = _onnx_backend_session(model_f, verification_options.backend)
|
958 |
+
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
|
959 |
+
del onnx_session # To free device memory
|
960 |
+
|
961 |
+
try:
|
962 |
+
_compare_onnx_pytorch_outputs(
|
963 |
+
onnx_outs=onnx_outs,
|
964 |
+
pt_outs=jit_outs,
|
965 |
+
options=verification_options,
|
966 |
+
)
|
967 |
+
except AssertionError as e:
|
968 |
+
return e, graph, jit_outs, onnx_outs
|
969 |
+
|
970 |
+
return None, graph, jit_outs, onnx_outs
|
971 |
+
|
972 |
+
except Exception as e:
|
973 |
+
print("Unexpected error during verification.")
|
974 |
+
print("jit graph: ", original_jit_graph)
|
975 |
+
print("onnx graph: ", graph)
|
976 |
+
raise e
|
977 |
+
|
978 |
+
|
979 |
+
class GraphInfoPrettyPrinter:
|
980 |
+
graph_info: Optional[GraphInfo]
|
981 |
+
upper_printer: Optional[GraphInfoPrettyPrinter]
|
982 |
+
lower_printer: Optional[GraphInfoPrettyPrinter]
|
983 |
+
|
984 |
+
graph_str_lambdas: Mapping[int, str]
|
985 |
+
connector_str_lambdas: Mapping[int, str]
|
986 |
+
children_str_lambdas: Mapping[int, str]
|
987 |
+
|
988 |
+
def __init__(self, graph_info: Optional[GraphInfo]):
|
989 |
+
self.graph_info = graph_info
|
990 |
+
if (
|
991 |
+
graph_info is not None
|
992 |
+
and graph_info.upper_graph_info is not None
|
993 |
+
and graph_info.lower_graph_info is not None
|
994 |
+
):
|
995 |
+
self.upper_printer = GraphInfoPrettyPrinter(graph_info.upper_graph_info)
|
996 |
+
self.lower_printer = GraphInfoPrettyPrinter(graph_info.lower_graph_info)
|
997 |
+
else:
|
998 |
+
self.upper_printer = None
|
999 |
+
self.lower_printer = None
|
1000 |
+
|
1001 |
+
@_beartype.beartype
|
1002 |
+
def _total_rows(self) -> int:
|
1003 |
+
if self.graph_info is None:
|
1004 |
+
return 1
|
1005 |
+
if self.upper_printer and self.lower_printer:
|
1006 |
+
return (
|
1007 |
+
self.upper_printer._total_rows() + self.lower_printer._total_rows() + 1
|
1008 |
+
)
|
1009 |
+
return 2 # Two lines: node count + id.
|
1010 |
+
|
1011 |
+
@_beartype.beartype
|
1012 |
+
def _node_count_segment_str(self) -> str:
|
1013 |
+
if self.graph_info is None:
|
1014 |
+
return "..."
|
1015 |
+
node_count = self.graph_info.essential_node_count()
|
1016 |
+
has_mismatch = self.graph_info.has_mismatch()
|
1017 |
+
error_node_kind = (
|
1018 |
+
f"({self.graph_info.essential_node_kinds().pop()})"
|
1019 |
+
if node_count == 1 and has_mismatch
|
1020 |
+
else ""
|
1021 |
+
)
|
1022 |
+
|
1023 |
+
return f"{node_count} {'X' if has_mismatch else '✓'} {error_node_kind}"
|
1024 |
+
|
1025 |
+
@_beartype.beartype
|
1026 |
+
def _graph_id_segment_str(self) -> str:
|
1027 |
+
if self.graph_info is None:
|
1028 |
+
return ""
|
1029 |
+
return f"id: {self.graph_info.id}"
|
1030 |
+
|
1031 |
+
@_beartype.beartype
|
1032 |
+
def _max_segment_columns(self) -> int:
|
1033 |
+
return max(
|
1034 |
+
map(len, (self._node_count_segment_str(), self._graph_id_segment_str()))
|
1035 |
+
)
|
1036 |
+
|
1037 |
+
@_beartype.beartype
|
1038 |
+
def _graph_segment_str_at_line(self, line: int) -> str:
|
1039 |
+
"""Get the string representation of the graph segment at the given line."""
|
1040 |
+
if line == 0:
|
1041 |
+
result_str = self._node_count_segment_str()
|
1042 |
+
result_str += " " * (self._max_segment_columns() - len(result_str))
|
1043 |
+
return result_str
|
1044 |
+
if line == 1:
|
1045 |
+
result_str = self._graph_id_segment_str()
|
1046 |
+
result_str += " " * (self._max_segment_columns() - len(result_str))
|
1047 |
+
return result_str
|
1048 |
+
if 0 <= line < self._total_rows():
|
1049 |
+
return " " * self._max_segment_columns()
|
1050 |
+
return ""
|
1051 |
+
|
1052 |
+
@_beartype.beartype
|
1053 |
+
def _connector_segment_str_at_line(self, line: int) -> str:
|
1054 |
+
"""Get the connector segment string at the given line."""
|
1055 |
+
if self.upper_printer is None and self.lower_printer is None:
|
1056 |
+
return ""
|
1057 |
+
upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1
|
1058 |
+
lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1
|
1059 |
+
if line == 0:
|
1060 |
+
return " __"
|
1061 |
+
elif line < upper_total_rows + 1:
|
1062 |
+
return " | "
|
1063 |
+
elif line == upper_total_rows + 1:
|
1064 |
+
return " |__"
|
1065 |
+
elif line < upper_total_rows + lower_total_rows + 1:
|
1066 |
+
return " "
|
1067 |
+
return ""
|
1068 |
+
|
1069 |
+
@_beartype.beartype
|
1070 |
+
def _children_str_at_line(self, line: int) -> str:
|
1071 |
+
"""Get the string representation of the children at the given line.
|
1072 |
+
|
1073 |
+
Recursively calls `_str_at_line` on children nodes.
|
1074 |
+
"""
|
1075 |
+
if self.upper_printer is None and self.lower_printer is None:
|
1076 |
+
return ""
|
1077 |
+
upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1
|
1078 |
+
lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1
|
1079 |
+
if 0 <= line < upper_total_rows:
|
1080 |
+
return (
|
1081 |
+
self.upper_printer._str_at_line(line) if self.upper_printer else "..."
|
1082 |
+
)
|
1083 |
+
elif upper_total_rows < line < upper_total_rows + lower_total_rows + 1:
|
1084 |
+
return (
|
1085 |
+
self.lower_printer._str_at_line(line - upper_total_rows - 1)
|
1086 |
+
if self.lower_printer
|
1087 |
+
else "..."
|
1088 |
+
)
|
1089 |
+
return ""
|
1090 |
+
|
1091 |
+
@_beartype.beartype
|
1092 |
+
def _str_at_line(self, line: int) -> str:
|
1093 |
+
"""Get the string representation of the graph at the given line."""
|
1094 |
+
return (
|
1095 |
+
self._graph_segment_str_at_line(line)
|
1096 |
+
+ self._connector_segment_str_at_line(line)
|
1097 |
+
+ self._children_str_at_line(line)
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
def pretty_print(self):
|
1101 |
+
if self.graph_info is None:
|
1102 |
+
print(None)
|
1103 |
+
return
|
1104 |
+
# Print tree.
|
1105 |
+
print(" Tree: ".center(80, "="))
|
1106 |
+
total_rows = self._total_rows()
|
1107 |
+
for line in range(total_rows):
|
1108 |
+
print(self._str_at_line(line).rstrip())
|
1109 |
+
if self.graph_info.has_mismatch():
|
1110 |
+
# Summarize leaf subgraphs with mismatch.
|
1111 |
+
print(" Mismatch leaf subgraphs: ".center(80, "="))
|
1112 |
+
print(
|
1113 |
+
[
|
1114 |
+
graph_info.id
|
1115 |
+
for graph_info in self.graph_info.all_mismatch_leaf_graph_info()
|
1116 |
+
]
|
1117 |
+
)
|
1118 |
+
# Summarize node kinds with mismatch.
|
1119 |
+
mismatch_node_kinds: Dict[str, int] = {}
|
1120 |
+
for graph_info in self.graph_info.all_mismatch_leaf_graph_info():
|
1121 |
+
node_kinds = graph_info.essential_node_kinds()
|
1122 |
+
if len(node_kinds) == 1:
|
1123 |
+
node_kind = node_kinds.pop()
|
1124 |
+
mismatch_node_kinds[node_kind] = (
|
1125 |
+
mismatch_node_kinds.get(node_kind, 0) + 1
|
1126 |
+
)
|
1127 |
+
print(" Mismatch node kinds: ".center(80, "="))
|
1128 |
+
print(mismatch_node_kinds)
|
1129 |
+
else:
|
1130 |
+
print(" No mismatch found. ".center(80, "="))
|
1131 |
+
|
1132 |
+
|
1133 |
+
class OnnxTestCaseRepro:
|
1134 |
+
def __init__(self, repro_dir):
|
1135 |
+
self.repro_dir = repro_dir
|
1136 |
+
self.proto, self.inputs, self.outputs = onnx_proto_utils.load_test_case(
|
1137 |
+
repro_dir
|
1138 |
+
)
|
1139 |
+
|
1140 |
+
@classmethod
|
1141 |
+
@_beartype.beartype
|
1142 |
+
def create_test_case_repro(
|
1143 |
+
cls, proto: bytes, inputs, outputs, dir: str, name: Optional[str] = None
|
1144 |
+
):
|
1145 |
+
"""Create a repro under "{dir}/test_{name}" for an ONNX test case.
|
1146 |
+
|
1147 |
+
The test case contains the model and the inputs/outputs data. The directory
|
1148 |
+
structure is as follows:
|
1149 |
+
|
1150 |
+
dir
|
1151 |
+
├── test_<name>
|
1152 |
+
│ ├── model.onnx
|
1153 |
+
│ └── test_data_set_0
|
1154 |
+
│ ├── input_0.pb
|
1155 |
+
│ ├── input_1.pb
|
1156 |
+
│ ├── output_0.pb
|
1157 |
+
│ └── output_1.pb
|
1158 |
+
|
1159 |
+
Args:
|
1160 |
+
proto: ONNX model proto.
|
1161 |
+
inputs: Inputs to the model.
|
1162 |
+
outputs: Outputs of the model.
|
1163 |
+
dir: Directory to save the repro.
|
1164 |
+
name: Name of the test case. If not specified, a name based on current time
|
1165 |
+
will be generated.
|
1166 |
+
Returns:
|
1167 |
+
Path to the repro.
|
1168 |
+
"""
|
1169 |
+
if name is None:
|
1170 |
+
name = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
|
1171 |
+
return onnx_proto_utils.export_as_test_case(
|
1172 |
+
proto,
|
1173 |
+
_to_numpy(inputs),
|
1174 |
+
_to_numpy(outputs),
|
1175 |
+
name,
|
1176 |
+
dir,
|
1177 |
+
)
|
1178 |
+
|
1179 |
+
@_beartype.beartype
|
1180 |
+
def validate(self, options: VerificationOptions):
|
1181 |
+
"""Run the ONNX test case with options.backend, and compare with the expected outputs.
|
1182 |
+
|
1183 |
+
Args:
|
1184 |
+
options: Options for validation.
|
1185 |
+
|
1186 |
+
Raise:
|
1187 |
+
AssertionError: if outputs from options.backend and expected outputs are not
|
1188 |
+
equal up to specified precision.
|
1189 |
+
"""
|
1190 |
+
onnx_session = _onnx_backend_session(io.BytesIO(self.proto), options.backend)
|
1191 |
+
run_outputs = onnx_session.run(None, self.inputs)
|
1192 |
+
if hasattr(onnx_session, "get_outputs"):
|
1193 |
+
output_names = [o.name for o in onnx_session.get_outputs()]
|
1194 |
+
elif hasattr(onnx_session, "output_names"):
|
1195 |
+
output_names = onnx_session.output_names
|
1196 |
+
else:
|
1197 |
+
raise ValueError(f"Unknown onnx session type: {type(onnx_session)}")
|
1198 |
+
expected_outs = [self.outputs[name] for name in output_names]
|
1199 |
+
_compare_onnx_pytorch_outputs_in_np(run_outputs, expected_outs, options)
|
1200 |
+
|
1201 |
+
|
1202 |
+
@dataclasses.dataclass
|
1203 |
+
class GraphInfo:
|
1204 |
+
"""GraphInfo contains validation information of a TorchScript graph and its converted ONNX graph."""
|
1205 |
+
|
1206 |
+
graph: torch.Graph
|
1207 |
+
input_args: Tuple[Any, ...]
|
1208 |
+
params_dict: Dict[str, Any]
|
1209 |
+
export_options: _experimental.ExportOptions = dataclasses.field(
|
1210 |
+
default_factory=_experimental.ExportOptions
|
1211 |
+
)
|
1212 |
+
mismatch_error: Optional[AssertionError] = dataclasses.field(
|
1213 |
+
default=None, init=False
|
1214 |
+
)
|
1215 |
+
pt_outs: Optional[Sequence[_NumericType]] = dataclasses.field(
|
1216 |
+
default=None, init=False
|
1217 |
+
)
|
1218 |
+
upper_graph_info: Optional[GraphInfo] = dataclasses.field(default=None, init=False)
|
1219 |
+
lower_graph_info: Optional[GraphInfo] = dataclasses.field(default=None, init=False)
|
1220 |
+
id: str = dataclasses.field(default="")
|
1221 |
+
_onnx_graph: Optional[torch.Graph] = dataclasses.field(init=False, default=None)
|
1222 |
+
|
1223 |
+
_EXCLUDED_NODE_KINDS: FrozenSet[str] = frozenset(
|
1224 |
+
{"prim::Constant", "prim::ListConstruct", "aten::ScalarImplicit"}
|
1225 |
+
)
|
1226 |
+
|
1227 |
+
def clear(self):
|
1228 |
+
"""Clear states and results of previous verification."""
|
1229 |
+
self.mismatch_error = None
|
1230 |
+
self.pt_outs = None
|
1231 |
+
self._onnx_graph = None
|
1232 |
+
self.upper_graph_info = None
|
1233 |
+
self.lower_graph_info = None
|
1234 |
+
|
1235 |
+
def pretty_print_tree(self):
|
1236 |
+
"""Pretty print `GraphInfo` tree.
|
1237 |
+
|
1238 |
+
Each node represents a subgraph, showing the number of nodes in the subgraph and
|
1239 |
+
a check mark if the subgraph has output mismatch between torch and ONNX.
|
1240 |
+
|
1241 |
+
The id of the subgraph is shown under the node. The `GraphInfo` object for any
|
1242 |
+
subgraph can be retrieved by calling `graph_info.find_partition(id)`.
|
1243 |
+
|
1244 |
+
Example::
|
1245 |
+
|
1246 |
+
==================================== Tree: =====================================
|
1247 |
+
5 X __2 X __1 ✓
|
1248 |
+
id: | id: 0 | id: 00
|
1249 |
+
| |
|
1250 |
+
| |__1 X (aten::relu)
|
1251 |
+
| id: 01
|
1252 |
+
|
|
1253 |
+
|__3 X __1 ✓
|
1254 |
+
id: 1 | id: 10
|
1255 |
+
|
|
1256 |
+
|__2 X __1 X (aten::relu)
|
1257 |
+
id: 11 | id: 110
|
1258 |
+
|
|
1259 |
+
|__1 ✓
|
1260 |
+
id: 111
|
1261 |
+
=========================== Mismatch leaf subgraphs: ===========================
|
1262 |
+
['01', '110']
|
1263 |
+
============================= Mismatch node kinds: =============================
|
1264 |
+
{'aten::relu': 2}
|
1265 |
+
|
1266 |
+
"""
|
1267 |
+
GraphInfoPrettyPrinter(self).pretty_print()
|
1268 |
+
|
1269 |
+
def pretty_print_mismatch(self, graph: bool = False):
|
1270 |
+
"""Pretty print details of the mismatch between torch and ONNX.
|
1271 |
+
|
1272 |
+
Args:
|
1273 |
+
graph: If True, print the ATen JIT graph and ONNX graph.
|
1274 |
+
"""
|
1275 |
+
print(f" Mismatch info for graph partition {self.id}: ".center(80, "="))
|
1276 |
+
if graph:
|
1277 |
+
print(" ATen JIT graph ".center(80, "="))
|
1278 |
+
# TODO: A more compact graph printer.
|
1279 |
+
# * Drop stride, grad, device information.
|
1280 |
+
# * Show source location on a separate line.
|
1281 |
+
print(self.graph)
|
1282 |
+
if self._onnx_graph is not None:
|
1283 |
+
print(" ONNX graph ".center(80, "="))
|
1284 |
+
print(self._onnx_graph)
|
1285 |
+
if self.has_mismatch():
|
1286 |
+
print(" Mismatch error ".center(80, "="))
|
1287 |
+
print(self.mismatch_error)
|
1288 |
+
else:
|
1289 |
+
print(" No mismatch ".center(80, "="))
|
1290 |
+
|
1291 |
+
@_beartype.beartype
|
1292 |
+
def has_mismatch(self) -> bool:
|
1293 |
+
"""Return True if the subgraph has output mismatch between torch and ONNX."""
|
1294 |
+
return self.mismatch_error is not None
|
1295 |
+
|
1296 |
+
@_beartype.beartype
|
1297 |
+
def essential_node_count(self) -> int:
|
1298 |
+
"""Return the number of nodes in the subgraph excluding those in `_EXCLUDED_NODE_KINDS`."""
|
1299 |
+
return sum(
|
1300 |
+
1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS
|
1301 |
+
)
|
1302 |
+
|
1303 |
+
@_beartype.beartype
|
1304 |
+
def essential_node_kinds(self) -> Set[str]:
|
1305 |
+
"""Return the set of node kinds in the subgraph excluding those in `_EXCLUDED_NODE_KINDS`."""
|
1306 |
+
return {
|
1307 |
+
n.kind()
|
1308 |
+
for n in self.graph.nodes()
|
1309 |
+
if n.kind() not in self._EXCLUDED_NODE_KINDS
|
1310 |
+
}
|
1311 |
+
|
1312 |
+
@_beartype.beartype
|
1313 |
+
def all_mismatch_leaf_graph_info(self) -> List["GraphInfo"]:
|
1314 |
+
"""Return a list of all leaf `GraphInfo` objects that have mismatch."""
|
1315 |
+
if not self.has_mismatch():
|
1316 |
+
return []
|
1317 |
+
|
1318 |
+
no_mismatch_children = (
|
1319 |
+
self.upper_graph_info is None or not self.upper_graph_info.has_mismatch()
|
1320 |
+
) and (
|
1321 |
+
self.lower_graph_info is None or not self.lower_graph_info.has_mismatch()
|
1322 |
+
)
|
1323 |
+
|
1324 |
+
if no_mismatch_children:
|
1325 |
+
return [self]
|
1326 |
+
|
1327 |
+
results = []
|
1328 |
+
if self.upper_graph_info is not None:
|
1329 |
+
results += self.upper_graph_info.all_mismatch_leaf_graph_info()
|
1330 |
+
if self.lower_graph_info is not None:
|
1331 |
+
results += self.lower_graph_info.all_mismatch_leaf_graph_info()
|
1332 |
+
|
1333 |
+
return results
|
1334 |
+
|
1335 |
+
@_beartype.beartype
|
1336 |
+
def find_partition(self, id: str) -> Optional["GraphInfo"]:
|
1337 |
+
"""Find the `GraphInfo` object with the given id."""
|
1338 |
+
if id == self.id:
|
1339 |
+
return self
|
1340 |
+
current_length = len(self.id)
|
1341 |
+
if len(id) > current_length:
|
1342 |
+
if id[current_length] == "0" and self.upper_graph_info is not None:
|
1343 |
+
return self.upper_graph_info.find_partition(id)
|
1344 |
+
elif id[current_length] == "1" and self.lower_graph_info is not None:
|
1345 |
+
return self.lower_graph_info.find_partition(id)
|
1346 |
+
return None
|
1347 |
+
|
1348 |
+
@_beartype.beartype
|
1349 |
+
def export_repro(
|
1350 |
+
self, repro_dir: Optional[str] = None, name: Optional[str] = None
|
1351 |
+
) -> str:
|
1352 |
+
"""Export the subgraph to ONNX along with the input/output data for repro.
|
1353 |
+
|
1354 |
+
The repro directory will contain the following files::
|
1355 |
+
|
1356 |
+
dir
|
1357 |
+
├── test_<name>
|
1358 |
+
│ ├── model.onnx
|
1359 |
+
│ └── test_data_set_0
|
1360 |
+
│ ├── input_0.pb
|
1361 |
+
│ ├── input_1.pb
|
1362 |
+
│ ├── output_0.pb
|
1363 |
+
│ └── output_1.pb
|
1364 |
+
|
1365 |
+
Args:
|
1366 |
+
repro_dir: The directory to export the repro files to. Defaults to current
|
1367 |
+
working directory if None.
|
1368 |
+
name: An optional name for the test case folder: "test_{name}".
|
1369 |
+
|
1370 |
+
Returns:
|
1371 |
+
The path to the exported repro directory.
|
1372 |
+
"""
|
1373 |
+
|
1374 |
+
if repro_dir is None:
|
1375 |
+
repro_dir = os.getcwd()
|
1376 |
+
repro_dir = os.path.join(repro_dir, "onnx_debug")
|
1377 |
+
|
1378 |
+
onnx_graph, onnx_params_dict = _onnx_graph_from_aten_graph(
|
1379 |
+
self.graph, self.export_options, self.params_dict
|
1380 |
+
)
|
1381 |
+
|
1382 |
+
proto, _ = _onnx_proto_from_onnx_graph(
|
1383 |
+
onnx_graph, self.export_options, onnx_params_dict
|
1384 |
+
)
|
1385 |
+
return OnnxTestCaseRepro.create_test_case_repro(
|
1386 |
+
proto, self.input_args, self.pt_outs, repro_dir, name
|
1387 |
+
)
|
1388 |
+
|
1389 |
+
@_beartype.beartype
|
1390 |
+
def _graph_partition_pivot(self) -> int:
|
1391 |
+
"""Find the pivot index to partition the graph.
|
1392 |
+
|
1393 |
+
The pivot is the node that splits the graph into two parts. Each part should
|
1394 |
+
have the similar amount of nodes, excluding non essential ops, defined in
|
1395 |
+
`_EXCLUDED_NODE_KINDS`, such as `prim::Constant`.
|
1396 |
+
If the graph has an odd number of nodes, the upper part will have one more node.
|
1397 |
+
If the graph does not have any node that can be partitioned, return -1.
|
1398 |
+
|
1399 |
+
Returns:
|
1400 |
+
The index of the pivot node.
|
1401 |
+
"""
|
1402 |
+
included_node_indices = [
|
1403 |
+
i
|
1404 |
+
for i, n in enumerate(self.graph.nodes())
|
1405 |
+
if n.kind() not in self._EXCLUDED_NODE_KINDS
|
1406 |
+
]
|
1407 |
+
half_idx = len(included_node_indices) // 2 - 1
|
1408 |
+
if half_idx >= 0 and len(included_node_indices) > half_idx:
|
1409 |
+
return included_node_indices[half_idx] + 1
|
1410 |
+
return -1
|
1411 |
+
|
1412 |
+
@_beartype.beartype
|
1413 |
+
def _partition_upper_graph(self) -> torch.Graph:
|
1414 |
+
pivot = self._graph_partition_pivot()
|
1415 |
+
if pivot == -1:
|
1416 |
+
return torch.Graph()
|
1417 |
+
graph = self.graph.copy() # Copy to not mutate parent graph.
|
1418 |
+
original_outputs = list(graph.outputs())
|
1419 |
+
|
1420 |
+
def _process_bridge_value_for_upper(
|
1421 |
+
new_outputs: List[torch.Value], bridge_value: torch.Value
|
1422 |
+
) -> torch.Value:
|
1423 |
+
# Add bridge values as upper graph outputs.
|
1424 |
+
new_outputs.append(bridge_value)
|
1425 |
+
return bridge_value
|
1426 |
+
|
1427 |
+
new_outputs: List[torch.Value] = []
|
1428 |
+
process_bridge_value_for_upper = functools.partial(
|
1429 |
+
_process_bridge_value_for_upper, new_outputs
|
1430 |
+
)
|
1431 |
+
_, dropped_nodes, complete_upper_nodes_set, _ = self._partition_nodes(
|
1432 |
+
graph, pivot, process_bridge_value_for_upper
|
1433 |
+
)
|
1434 |
+
|
1435 |
+
for _ in enumerate(original_outputs):
|
1436 |
+
graph.eraseOutput(0)
|
1437 |
+
for output in new_outputs:
|
1438 |
+
graph.registerOutput(output)
|
1439 |
+
|
1440 |
+
for node in reversed(dropped_nodes):
|
1441 |
+
node.destroy()
|
1442 |
+
|
1443 |
+
for i, input in reversed(list(enumerate(list(graph.inputs())))):
|
1444 |
+
if (
|
1445 |
+
not _has_uses_by_nodes(input, complete_upper_nodes_set)
|
1446 |
+
and input not in new_outputs
|
1447 |
+
):
|
1448 |
+
try:
|
1449 |
+
graph.eraseInput(i)
|
1450 |
+
except RuntimeError as e:
|
1451 |
+
print(input, graph)
|
1452 |
+
raise e
|
1453 |
+
|
1454 |
+
return graph
|
1455 |
+
|
1456 |
+
@_beartype.beartype
|
1457 |
+
def _partition_lower_graph(self) -> torch.Graph:
|
1458 |
+
pivot = self._graph_partition_pivot()
|
1459 |
+
if pivot == -1:
|
1460 |
+
return torch.Graph()
|
1461 |
+
graph = self.graph.copy() # Copy to not mutate parent graph.
|
1462 |
+
original_outputs = list(graph.outputs())
|
1463 |
+
original_inputs = list(graph.inputs())
|
1464 |
+
|
1465 |
+
new_outputs = []
|
1466 |
+
|
1467 |
+
def _process_bridge_value_for_lower(
|
1468 |
+
graph: torch.Graph, bridge_value: torch.Value
|
1469 |
+
) -> torch.Value:
|
1470 |
+
# Add bridge values as lower graph inputs.
|
1471 |
+
new_input = graph.addInput()
|
1472 |
+
bridge_value.replaceAllUsesWith(new_input)
|
1473 |
+
new_input.copyMetadata(bridge_value)
|
1474 |
+
return new_input
|
1475 |
+
|
1476 |
+
process_bridge_value_for_lower = functools.partial(
|
1477 |
+
_process_bridge_value_for_lower, graph
|
1478 |
+
)
|
1479 |
+
|
1480 |
+
upper_nodes, lower_nodes, _, complete_lower_nodes_set = self._partition_nodes(
|
1481 |
+
graph, pivot, process_bridge_value_for_lower
|
1482 |
+
)
|
1483 |
+
|
1484 |
+
for output in original_outputs:
|
1485 |
+
if _produced_by(output, lower_nodes):
|
1486 |
+
new_outputs.append(output)
|
1487 |
+
for _ in enumerate(original_outputs):
|
1488 |
+
graph.eraseOutput(0)
|
1489 |
+
for output in new_outputs:
|
1490 |
+
graph.registerOutput(output)
|
1491 |
+
|
1492 |
+
for input in original_inputs:
|
1493 |
+
if _has_uses_by_nodes(input, complete_lower_nodes_set):
|
1494 |
+
new_input = graph.addInput()
|
1495 |
+
input.replaceAllUsesWith(new_input)
|
1496 |
+
new_input.copyMetadata(input)
|
1497 |
+
|
1498 |
+
for node in reversed(upper_nodes):
|
1499 |
+
if node not in complete_lower_nodes_set:
|
1500 |
+
try:
|
1501 |
+
node.destroy()
|
1502 |
+
except RuntimeError as e:
|
1503 |
+
print(node, graph)
|
1504 |
+
raise e
|
1505 |
+
|
1506 |
+
for _ in original_inputs:
|
1507 |
+
graph.eraseInput(0)
|
1508 |
+
|
1509 |
+
return graph
|
1510 |
+
|
1511 |
+
@_beartype.beartype
|
1512 |
+
def _partition_node(
|
1513 |
+
self,
|
1514 |
+
node: torch.Node,
|
1515 |
+
complete_upper_nodes_set: Set[torch.Node],
|
1516 |
+
complete_lower_nodes_set: Set[torch.Node],
|
1517 |
+
original_graph_outputs: Set[torch.Value],
|
1518 |
+
covered_bridge_values: Set[torch.Value],
|
1519 |
+
process_bridge_value: Callable[[torch.Value], torch.Value],
|
1520 |
+
):
|
1521 |
+
if node in complete_lower_nodes_set:
|
1522 |
+
return
|
1523 |
+
|
1524 |
+
if (
|
1525 |
+
_node_has_uses_by(node, complete_lower_nodes_set)
|
1526 |
+
and node.kind() in self._EXCLUDED_NODE_KINDS
|
1527 |
+
):
|
1528 |
+
complete_lower_nodes_set.update(_all_nodes([node]))
|
1529 |
+
for input in node.inputs():
|
1530 |
+
if input in covered_bridge_values:
|
1531 |
+
continue
|
1532 |
+
self._partition_node(
|
1533 |
+
input.node(),
|
1534 |
+
complete_upper_nodes_set,
|
1535 |
+
complete_lower_nodes_set,
|
1536 |
+
original_graph_outputs,
|
1537 |
+
covered_bridge_values,
|
1538 |
+
process_bridge_value,
|
1539 |
+
)
|
1540 |
+
else:
|
1541 |
+
for output in node.outputs():
|
1542 |
+
if output in covered_bridge_values:
|
1543 |
+
continue
|
1544 |
+
if (
|
1545 |
+
_has_uses_by_nodes(output, complete_lower_nodes_set)
|
1546 |
+
or output in original_graph_outputs
|
1547 |
+
):
|
1548 |
+
covered_bridge_values.add(process_bridge_value(output))
|
1549 |
+
|
1550 |
+
@_beartype.beartype
|
1551 |
+
def _partition_nodes(
|
1552 |
+
self,
|
1553 |
+
graph: torch.Graph,
|
1554 |
+
pivot: int,
|
1555 |
+
process_bridge_value: Callable[[torch.Value], torch.Value],
|
1556 |
+
) -> Tuple[List[torch.Node], List[torch.Node], Set[torch.Node], Set[torch.Node]]:
|
1557 |
+
nodes = list(graph.nodes())
|
1558 |
+
upper_nodes = nodes[:pivot]
|
1559 |
+
lower_nodes = nodes[pivot:]
|
1560 |
+
# `upper_nodes` and `complete_upper_nodes_set` differs in that the latter
|
1561 |
+
# recursively contains nodes in subblock of `upper_nodes`.
|
1562 |
+
# The same applies for `lower_nodes` and `complete_lower_nodes_set`.
|
1563 |
+
# With addition that `complete_lower_nodes_set` will include nodes that
|
1564 |
+
# are determined to be copied from `upper_nodes` to `lower_nodes`.
|
1565 |
+
complete_upper_nodes_set = _all_nodes(upper_nodes)
|
1566 |
+
complete_lower_nodes_set = _all_nodes(lower_nodes)
|
1567 |
+
original_graph_outputs = set(graph.outputs())
|
1568 |
+
# Bridge values are values produced from upper graph, and consumed
|
1569 |
+
# by lower graph. These values need to be become upper graph outputs
|
1570 |
+
# and lower graph inputs, to bridge the interaction.
|
1571 |
+
# Start with all graph inputs marked as covered. If any graph input is
|
1572 |
+
# needed by lower graph, just keep it in lower graph inputs later.
|
1573 |
+
covered_bridge_values = set(graph.inputs())
|
1574 |
+
for node in upper_nodes:
|
1575 |
+
self._partition_node(
|
1576 |
+
node,
|
1577 |
+
complete_upper_nodes_set,
|
1578 |
+
complete_lower_nodes_set,
|
1579 |
+
original_graph_outputs,
|
1580 |
+
covered_bridge_values,
|
1581 |
+
process_bridge_value,
|
1582 |
+
)
|
1583 |
+
return (
|
1584 |
+
upper_nodes,
|
1585 |
+
lower_nodes,
|
1586 |
+
complete_upper_nodes_set,
|
1587 |
+
complete_lower_nodes_set,
|
1588 |
+
)
|
1589 |
+
|
1590 |
+
@_beartype.beartype
|
1591 |
+
def _bridge_kwargs(self):
|
1592 |
+
pt_outs = self.pt_outs
|
1593 |
+
graph_outputs = list(self.graph.outputs())
|
1594 |
+
assert pt_outs is not None
|
1595 |
+
assert len(graph_outputs) == len(
|
1596 |
+
pt_outs
|
1597 |
+
), f"{len(graph_outputs)} vs {len(pt_outs)}\nGraph: {self.graph}"
|
1598 |
+
return {v.debugName(): o for v, o in zip(graph_outputs, pt_outs)}
|
1599 |
+
|
1600 |
+
@_beartype.beartype
|
1601 |
+
def _args_and_params_for_partition_graph(
|
1602 |
+
self,
|
1603 |
+
graph: torch.Graph,
|
1604 |
+
bridge_kwargs: Mapping[str, Union[_NumericType, Sequence[_NumericType]]],
|
1605 |
+
full_kwargs: Mapping[str, torch.Tensor],
|
1606 |
+
full_params: Mapping[str, torch.Tensor],
|
1607 |
+
):
|
1608 |
+
input_names = [input.debugName() for input in graph.inputs()]
|
1609 |
+
args = tuple(bridge_kwargs[k] for k in input_names if k in bridge_kwargs)
|
1610 |
+
args += tuple(full_kwargs[k] for k in input_names if k in full_kwargs)
|
1611 |
+
params = {k: full_params[k] for k in input_names if k in full_params}
|
1612 |
+
assert len(args) + len(params) == len(
|
1613 |
+
input_names
|
1614 |
+
), f"{len(args)} + {len(params)} vs {len(input_names)}: {input_names}"
|
1615 |
+
return args, params
|
1616 |
+
|
1617 |
+
@_beartype.beartype
|
1618 |
+
def verify_export(
|
1619 |
+
self, options: VerificationOptions
|
1620 |
+
) -> Tuple[Optional[AssertionError], torch.Graph, _OutputsType, _OutputsType]:
|
1621 |
+
"""
|
1622 |
+
Verify the export from TorchScript IR graph to ONNX.
|
1623 |
+
|
1624 |
+
Export the TorchScript IR graph to ONNX, with the inputs, parameters and export
|
1625 |
+
options recorded in this object. Then verify the exported ONNX graph against
|
1626 |
+
the original TorchScript IR graph under the provided verification options.
|
1627 |
+
|
1628 |
+
Args:
|
1629 |
+
options: The verification options.
|
1630 |
+
|
1631 |
+
Returns:
|
1632 |
+
error: The AssertionError raised during the verification. Returns None if no
|
1633 |
+
error is raised.
|
1634 |
+
onnx_graph: The exported ONNX graph in TorchScript IR format.
|
1635 |
+
onnx_outs: The outputs from running exported ONNX model under the onnx
|
1636 |
+
backend in `options`.
|
1637 |
+
pt_outs: The outputs from running the TorchScript IR graph.
|
1638 |
+
"""
|
1639 |
+
return verify_aten_graph(
|
1640 |
+
self.graph,
|
1641 |
+
input_args=self.input_args,
|
1642 |
+
params_dict=self.params_dict,
|
1643 |
+
export_options=self.export_options,
|
1644 |
+
verification_options=options,
|
1645 |
+
)
|
1646 |
+
|
1647 |
+
@_beartype.beartype
|
1648 |
+
def find_mismatch(
|
1649 |
+
self,
|
1650 |
+
options: Optional[VerificationOptions] = None,
|
1651 |
+
):
|
1652 |
+
"""
|
1653 |
+
Find all mismatches between the TorchScript IR graph and the exported onnx model.
|
1654 |
+
|
1655 |
+
Binary searches the model graph to find the minimal subgraph that exhibits the
|
1656 |
+
mismatch. A `GraphInfo` object is created for each subgraph, recording the test
|
1657 |
+
inputs and export options, as well as the validation results.
|
1658 |
+
|
1659 |
+
Args:
|
1660 |
+
options: The verification options.
|
1661 |
+
"""
|
1662 |
+
self.clear()
|
1663 |
+
|
1664 |
+
if options is None:
|
1665 |
+
options = VerificationOptions()
|
1666 |
+
|
1667 |
+
if self.export_options.verbose:
|
1668 |
+
print(self.graph)
|
1669 |
+
|
1670 |
+
if len(list(self.graph.outputs())) == 0:
|
1671 |
+
return
|
1672 |
+
|
1673 |
+
assert len(self.input_args) + len(self.params_dict) == len(
|
1674 |
+
list(self.graph.inputs())
|
1675 |
+
), (
|
1676 |
+
f"Number of graph inputs({len(list(self.graph.inputs()))}) does not match "
|
1677 |
+
f"the provided tensor arguments({len(self.input_args)} + {len(self.params_dict)})."
|
1678 |
+
)
|
1679 |
+
|
1680 |
+
self.mismatch_error, self._onnx_graph, self.pt_outs, _ = self.verify_export(
|
1681 |
+
options
|
1682 |
+
)
|
1683 |
+
|
1684 |
+
if self.mismatch_error is None:
|
1685 |
+
# No mismatch found in graph.
|
1686 |
+
return
|
1687 |
+
|
1688 |
+
if self.essential_node_count() <= 1:
|
1689 |
+
# Reached leaf node, no more partitioning.
|
1690 |
+
return
|
1691 |
+
|
1692 |
+
full_kwargs = {
|
1693 |
+
k.debugName(): v for k, v in zip(self.graph.inputs(), self.input_args)
|
1694 |
+
}
|
1695 |
+
full_params = self.params_dict
|
1696 |
+
|
1697 |
+
upper_graph = self._partition_upper_graph()
|
1698 |
+
upper_args, upper_params = self._args_and_params_for_partition_graph(
|
1699 |
+
upper_graph, {}, full_kwargs, full_params
|
1700 |
+
)
|
1701 |
+
self.upper_graph_info = GraphInfo(
|
1702 |
+
upper_graph,
|
1703 |
+
upper_args,
|
1704 |
+
upper_params,
|
1705 |
+
self.export_options,
|
1706 |
+
id=self.id + "0",
|
1707 |
+
)
|
1708 |
+
|
1709 |
+
self.upper_graph_info.find_mismatch(options)
|
1710 |
+
|
1711 |
+
bridge_kwargs = self.upper_graph_info._bridge_kwargs()
|
1712 |
+
lower_graph = self._partition_lower_graph()
|
1713 |
+
lower_args, lower_params = self._args_and_params_for_partition_graph(
|
1714 |
+
lower_graph, bridge_kwargs, full_kwargs, full_params
|
1715 |
+
)
|
1716 |
+
self.lower_graph_info = GraphInfo(
|
1717 |
+
lower_graph,
|
1718 |
+
lower_args,
|
1719 |
+
lower_params,
|
1720 |
+
self.export_options,
|
1721 |
+
id=self.id + "1",
|
1722 |
+
)
|
1723 |
+
|
1724 |
+
self.lower_graph_info.find_mismatch(options)
|
1725 |
+
|
1726 |
+
|
1727 |
+
@_beartype.beartype
|
1728 |
+
def _all_nodes(nodes: Collection[torch.Node]) -> Set[torch.Node]:
|
1729 |
+
all_nodes = set(nodes)
|
1730 |
+
for n in nodes:
|
1731 |
+
for b in n.blocks():
|
1732 |
+
all_nodes.update(_all_nodes(list(b.nodes())))
|
1733 |
+
return all_nodes
|
1734 |
+
|
1735 |
+
|
1736 |
+
@_beartype.beartype
|
1737 |
+
def _has_uses_by_nodes(value: torch.Value, nodes: Collection[torch.Node]) -> bool:
|
1738 |
+
if any(use.user in nodes for use in value.uses()):
|
1739 |
+
return True
|
1740 |
+
return False
|
1741 |
+
|
1742 |
+
|
1743 |
+
@_beartype.beartype
|
1744 |
+
def _node_has_uses_by(node: torch.Node, nodes: Collection[torch.Node]) -> bool:
|
1745 |
+
for output in node.outputs():
|
1746 |
+
if _has_uses_by_nodes(output, nodes):
|
1747 |
+
return True
|
1748 |
+
return False
|
1749 |
+
|
1750 |
+
|
1751 |
+
@_beartype.beartype
|
1752 |
+
def _produced_by(value: torch.Value, nodes: Collection[torch.Node]) -> bool:
|
1753 |
+
return value.node() in nodes
|
1754 |
+
|
1755 |
+
|
1756 |
+
@_beartype.beartype
|
1757 |
+
def find_mismatch(
|
1758 |
+
model: Union[torch.nn.Module, torch.jit.ScriptModule],
|
1759 |
+
input_args: Tuple[Any, ...],
|
1760 |
+
do_constant_folding: bool = True,
|
1761 |
+
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
|
1762 |
+
opset_version: Optional[int] = None,
|
1763 |
+
keep_initializers_as_inputs: bool = True,
|
1764 |
+
verbose: bool = False,
|
1765 |
+
options: Optional[VerificationOptions] = None,
|
1766 |
+
) -> GraphInfo:
|
1767 |
+
r"""Find all mismatches between the original model and the exported model.
|
1768 |
+
|
1769 |
+
Experimental. The API is subject to change.
|
1770 |
+
|
1771 |
+
This tool helps debug the mismatch between the original PyTorch model and exported
|
1772 |
+
ONNX model. It binary searches the model graph to find the minimal subgraph that
|
1773 |
+
exhibits the mismatch.
|
1774 |
+
|
1775 |
+
Args:
|
1776 |
+
model: The model to be exported.
|
1777 |
+
input_args: The input arguments to the model.
|
1778 |
+
do_constant_folding: Same as `do_constant_folding` in :func:`torch.onnx.export`.
|
1779 |
+
training: Same as `training` in :func:`torch.onnx.export`.
|
1780 |
+
opset_version: Same as `opset_version` in :func:`torch.onnx.export`.
|
1781 |
+
keep_initializers_as_inputs: Same as `keep_initializers_as_inputs` in :func:`torch.onnx.export`.
|
1782 |
+
verbose: Same as `verbose` in :func:`torch.onnx.export`.
|
1783 |
+
options: The options for the mismatch verification.
|
1784 |
+
|
1785 |
+
Returns:
|
1786 |
+
A GraphInfo object that contains the mismatch information.
|
1787 |
+
|
1788 |
+
Example::
|
1789 |
+
|
1790 |
+
>>> import torch
|
1791 |
+
>>> import torch.onnx.verification
|
1792 |
+
>>> torch.manual_seed(0)
|
1793 |
+
>>> opset_version = 15
|
1794 |
+
>>> # Define a custom symbolic function for aten::relu.
|
1795 |
+
>>> # The custom symbolic function is incorrect, which will result in mismatches.
|
1796 |
+
>>> def incorrect_relu_symbolic_function(g, self):
|
1797 |
+
... return self
|
1798 |
+
>>> torch.onnx.register_custom_op_symbolic(
|
1799 |
+
... "aten::relu",
|
1800 |
+
... incorrect_relu_symbolic_function,
|
1801 |
+
... opset_version=opset_version,
|
1802 |
+
... )
|
1803 |
+
>>> class Model(torch.nn.Module):
|
1804 |
+
... def __init__(self):
|
1805 |
+
... super().__init__()
|
1806 |
+
... self.layers = torch.nn.Sequential(
|
1807 |
+
... torch.nn.Linear(3, 4),
|
1808 |
+
... torch.nn.ReLU(),
|
1809 |
+
... torch.nn.Linear(4, 5),
|
1810 |
+
... torch.nn.ReLU(),
|
1811 |
+
... torch.nn.Linear(5, 6),
|
1812 |
+
... )
|
1813 |
+
... def forward(self, x):
|
1814 |
+
... return self.layers(x)
|
1815 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX)
|
1816 |
+
>>> graph_info = torch.onnx.verification.find_mismatch(
|
1817 |
+
... Model(),
|
1818 |
+
... (torch.randn(2, 3),),
|
1819 |
+
... opset_version=opset_version,
|
1820 |
+
... )
|
1821 |
+
===================== Mismatch info for graph partition : ======================
|
1822 |
+
================================ Mismatch error ================================
|
1823 |
+
Tensor-likes are not close!
|
1824 |
+
Mismatched elements: 12 / 12 (100.0%)
|
1825 |
+
Greatest absolute difference: 0.2328854203224182 at index (1, 2) (up to 1e-07 allowed)
|
1826 |
+
Greatest relative difference: 0.699536174352349 at index (1, 3) (up to 0.001 allowed)
|
1827 |
+
==================================== Tree: =====================================
|
1828 |
+
5 X __2 X __1 ✓
|
1829 |
+
id: | id: 0 | id: 00
|
1830 |
+
| |
|
1831 |
+
| |__1 X (aten::relu)
|
1832 |
+
| id: 01
|
1833 |
+
|
|
1834 |
+
|__3 X __1 ✓
|
1835 |
+
id: 1 | id: 10
|
1836 |
+
|
|
1837 |
+
|__2 X __1 X (aten::relu)
|
1838 |
+
id: 11 | id: 110
|
1839 |
+
|
|
1840 |
+
|__1 ✓
|
1841 |
+
id: 111
|
1842 |
+
=========================== Mismatch leaf subgraphs: ===========================
|
1843 |
+
['01', '110']
|
1844 |
+
============================= Mismatch node kinds: =============================
|
1845 |
+
{'aten::relu': 2}
|
1846 |
+
|
1847 |
+
"""
|
1848 |
+
if options is None:
|
1849 |
+
options = VerificationOptions()
|
1850 |
+
if opset_version is None:
|
1851 |
+
opset_version = _constants.ONNX_DEFAULT_OPSET
|
1852 |
+
"""From aten graph, do binary search on graph partition to find operator export discrepancy."""
|
1853 |
+
# TODO: Copied from utils.py `export` until `_optimize_graph`.
|
1854 |
+
if training == torch.onnx.TrainingMode.TRAINING:
|
1855 |
+
model.train()
|
1856 |
+
elif training == torch.onnx.TrainingMode.EVAL:
|
1857 |
+
model.eval()
|
1858 |
+
with torch.no_grad():
|
1859 |
+
inputs_for_export = _prepare_input_for_export(input_args, {})
|
1860 |
+
args = utils._decide_input_format(model, inputs_for_export)
|
1861 |
+
|
1862 |
+
model = utils._pre_trace_quant_model(model, args)
|
1863 |
+
graph, params, torch_out, module = utils._create_jit_graph(model, args)
|
1864 |
+
params_dict = utils._get_named_param_dict(graph, params)
|
1865 |
+
|
1866 |
+
utils._apply_friendly_debug_names(graph, params_dict)
|
1867 |
+
|
1868 |
+
graph_info = GraphInfo(
|
1869 |
+
graph,
|
1870 |
+
input_args,
|
1871 |
+
params_dict,
|
1872 |
+
_experimental.ExportOptions(
|
1873 |
+
do_constant_folding=do_constant_folding,
|
1874 |
+
training=training,
|
1875 |
+
opset_version=opset_version,
|
1876 |
+
keep_initializers_as_inputs=keep_initializers_as_inputs,
|
1877 |
+
verbose=verbose,
|
1878 |
+
),
|
1879 |
+
)
|
1880 |
+
graph_info.find_mismatch(options)
|
1881 |
+
graph_info.pretty_print_mismatch()
|
1882 |
+
graph_info.pretty_print_tree()
|
1883 |
+
|
1884 |
+
return graph_info
|
venv/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
venv/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (199 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
venv/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (196 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc
ADDED
Binary file (597 Bytes). View file
|
|