Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/onnx/__init__.py +177 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_deprecation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_globals.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_caffe2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset10.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset13.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset14.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset15.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset17.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/__pycache__/verification.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/onnx/_constants.py +25 -0
- venv/lib/python3.10/site-packages/torch/onnx/_experimental.py +28 -0
- venv/lib/python3.10/site-packages/torch/onnx/_exporter_states.py +39 -0
- venv/lib/python3.10/site-packages/torch/onnx/_globals.py +85 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py +1233 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py +485 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py +289 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py +470 -0
- venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:521cf52a07bcfd9e7ad9bb21d552e93734867eb516c719b6b1e27a510b6f3515
|
3 |
+
size 16778396
|
ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b20df606e3efb85cfb30786194df403fd4110bbfd108ef919c5446d8bed158bc
|
3 |
+
size 16778411
|
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b79740a9833f95550810cd432c64519f335049e04225c97eb6ecb50ab61d9969
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/torch/onnx/__init__.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import _C
|
2 |
+
from torch._C import _onnx as _C_onnx
|
3 |
+
from torch._C._onnx import (
|
4 |
+
_CAFFE2_ATEN_FALLBACK,
|
5 |
+
OperatorExportTypes,
|
6 |
+
TensorProtoDataType,
|
7 |
+
TrainingMode,
|
8 |
+
)
|
9 |
+
|
10 |
+
from . import ( # usort:skip. Keep the order instead of sorting lexicographically
|
11 |
+
_deprecation,
|
12 |
+
errors,
|
13 |
+
symbolic_caffe2,
|
14 |
+
symbolic_helper,
|
15 |
+
symbolic_opset7,
|
16 |
+
symbolic_opset8,
|
17 |
+
symbolic_opset9,
|
18 |
+
symbolic_opset10,
|
19 |
+
symbolic_opset11,
|
20 |
+
symbolic_opset12,
|
21 |
+
symbolic_opset13,
|
22 |
+
symbolic_opset14,
|
23 |
+
symbolic_opset15,
|
24 |
+
symbolic_opset16,
|
25 |
+
symbolic_opset17,
|
26 |
+
symbolic_opset18,
|
27 |
+
utils,
|
28 |
+
)
|
29 |
+
|
30 |
+
# TODO(After 1.13 release): Remove the deprecated SymbolicContext
|
31 |
+
from ._exporter_states import ExportTypes, SymbolicContext
|
32 |
+
from ._type_utils import JitScalarType
|
33 |
+
from .errors import CheckerError # Backwards compatibility
|
34 |
+
from .utils import (
|
35 |
+
_optimize_graph,
|
36 |
+
_run_symbolic_function,
|
37 |
+
_run_symbolic_method,
|
38 |
+
export,
|
39 |
+
export_to_pretty_string,
|
40 |
+
is_in_onnx_export,
|
41 |
+
register_custom_op_symbolic,
|
42 |
+
select_model_mode_for_export,
|
43 |
+
unregister_custom_op_symbolic,
|
44 |
+
)
|
45 |
+
|
46 |
+
from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import
|
47 |
+
DiagnosticOptions,
|
48 |
+
ExportOptions,
|
49 |
+
ONNXProgram,
|
50 |
+
ONNXProgramSerializer,
|
51 |
+
ONNXRuntimeOptions,
|
52 |
+
InvalidExportOptionsError,
|
53 |
+
OnnxExporterError,
|
54 |
+
OnnxRegistry,
|
55 |
+
dynamo_export,
|
56 |
+
enable_fake_mode,
|
57 |
+
)
|
58 |
+
|
59 |
+
from ._internal.onnxruntime import (
|
60 |
+
is_onnxrt_backend_supported,
|
61 |
+
OrtBackend as _OrtBackend,
|
62 |
+
OrtBackendOptions as _OrtBackendOptions,
|
63 |
+
OrtExecutionProvider as _OrtExecutionProvider,
|
64 |
+
)
|
65 |
+
|
66 |
+
__all__ = [
|
67 |
+
# Modules
|
68 |
+
"symbolic_helper",
|
69 |
+
"utils",
|
70 |
+
"errors",
|
71 |
+
# All opsets
|
72 |
+
"symbolic_caffe2",
|
73 |
+
"symbolic_opset7",
|
74 |
+
"symbolic_opset8",
|
75 |
+
"symbolic_opset9",
|
76 |
+
"symbolic_opset10",
|
77 |
+
"symbolic_opset11",
|
78 |
+
"symbolic_opset12",
|
79 |
+
"symbolic_opset13",
|
80 |
+
"symbolic_opset14",
|
81 |
+
"symbolic_opset15",
|
82 |
+
"symbolic_opset16",
|
83 |
+
"symbolic_opset17",
|
84 |
+
"symbolic_opset18",
|
85 |
+
# Enums
|
86 |
+
"ExportTypes",
|
87 |
+
"OperatorExportTypes",
|
88 |
+
"TrainingMode",
|
89 |
+
"TensorProtoDataType",
|
90 |
+
"JitScalarType",
|
91 |
+
# Public functions
|
92 |
+
"export",
|
93 |
+
"export_to_pretty_string",
|
94 |
+
"is_in_onnx_export",
|
95 |
+
"select_model_mode_for_export",
|
96 |
+
"register_custom_op_symbolic",
|
97 |
+
"unregister_custom_op_symbolic",
|
98 |
+
"disable_log",
|
99 |
+
"enable_log",
|
100 |
+
# Errors
|
101 |
+
"CheckerError", # Backwards compatibility
|
102 |
+
# Dynamo Exporter
|
103 |
+
"DiagnosticOptions",
|
104 |
+
"ExportOptions",
|
105 |
+
"ONNXProgram",
|
106 |
+
"ONNXProgramSerializer",
|
107 |
+
"ONNXRuntimeOptions",
|
108 |
+
"InvalidExportOptionsError",
|
109 |
+
"OnnxExporterError",
|
110 |
+
"OnnxRegistry",
|
111 |
+
"dynamo_export",
|
112 |
+
"enable_fake_mode",
|
113 |
+
# DORT / torch.compile
|
114 |
+
"is_onnxrt_backend_supported",
|
115 |
+
]
|
116 |
+
|
117 |
+
# Set namespace for exposed private names
|
118 |
+
ExportTypes.__module__ = "torch.onnx"
|
119 |
+
JitScalarType.__module__ = "torch.onnx"
|
120 |
+
ExportOptions.__module__ = "torch.onnx"
|
121 |
+
ONNXProgram.__module__ = "torch.onnx"
|
122 |
+
ONNXProgramSerializer.__module__ = "torch.onnx"
|
123 |
+
ONNXRuntimeOptions.__module__ = "torch.onnx"
|
124 |
+
dynamo_export.__module__ = "torch.onnx"
|
125 |
+
InvalidExportOptionsError.__module__ = "torch.onnx"
|
126 |
+
OnnxExporterError.__module__ = "torch.onnx"
|
127 |
+
enable_fake_mode.__module__ = "torch.onnx"
|
128 |
+
OnnxRegistry.__module__ = "torch.onnx"
|
129 |
+
DiagnosticOptions.__module__ = "torch.onnx"
|
130 |
+
is_onnxrt_backend_supported.__module__ = "torch.onnx"
|
131 |
+
_OrtExecutionProvider.__module__ = "torch.onnx"
|
132 |
+
_OrtBackendOptions.__module__ = "torch.onnx"
|
133 |
+
_OrtBackend.__module__ = "torch.onnx"
|
134 |
+
|
135 |
+
producer_name = "pytorch"
|
136 |
+
producer_version = _C_onnx.PRODUCER_VERSION
|
137 |
+
|
138 |
+
|
139 |
+
@_deprecation.deprecated(
|
140 |
+
since="1.12.0", removed_in="2.0", instructions="use `torch.onnx.export` instead"
|
141 |
+
)
|
142 |
+
def _export(*args, **kwargs):
|
143 |
+
return utils._export(*args, **kwargs)
|
144 |
+
|
145 |
+
|
146 |
+
# TODO(justinchuby): Deprecate these logging functions in favor of the new diagnostic module.
|
147 |
+
|
148 |
+
# Returns True iff ONNX logging is turned on.
|
149 |
+
is_onnx_log_enabled = _C._jit_is_onnx_log_enabled
|
150 |
+
|
151 |
+
|
152 |
+
def enable_log() -> None:
|
153 |
+
r"""Enables ONNX logging."""
|
154 |
+
_C._jit_set_onnx_log_enabled(True)
|
155 |
+
|
156 |
+
|
157 |
+
def disable_log() -> None:
|
158 |
+
r"""Disables ONNX logging."""
|
159 |
+
_C._jit_set_onnx_log_enabled(False)
|
160 |
+
|
161 |
+
|
162 |
+
"""Sets output stream for ONNX logging.
|
163 |
+
|
164 |
+
Args:
|
165 |
+
stream_name (str, default "stdout"): Only 'stdout' and 'stderr' are supported
|
166 |
+
as ``stream_name``.
|
167 |
+
"""
|
168 |
+
set_log_stream = _C._jit_set_onnx_log_output_stream
|
169 |
+
|
170 |
+
|
171 |
+
"""A simple logging facility for ONNX exporter.
|
172 |
+
|
173 |
+
Args:
|
174 |
+
args: Arguments are converted to string, concatenated together with a newline
|
175 |
+
character appended to the end, and flushed to output stream.
|
176 |
+
"""
|
177 |
+
log = _C._jit_onnx_log
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.98 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc
ADDED
Binary file (782 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_deprecation.cpython-310.pyc
ADDED
Binary file (1.82 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc
ADDED
Binary file (1.46 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc
ADDED
Binary file (1.69 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_globals.cpython-310.pyc
ADDED
Binary file (3.04 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc
ADDED
Binary file (3.79 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc
ADDED
Binary file (3.41 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc
ADDED
Binary file (903 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_caffe2.cpython-310.pyc
ADDED
Binary file (7.23 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc
ADDED
Binary file (42.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset10.cpython-310.pyc
ADDED
Binary file (22.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc
ADDED
Binary file (34 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset13.cpython-310.pyc
ADDED
Binary file (21.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset14.cpython-310.pyc
ADDED
Binary file (6.52 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset15.cpython-310.pyc
ADDED
Binary file (2.71 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc
ADDED
Binary file (4.45 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset17.cpython-310.pyc
ADDED
Binary file (5.19 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc
ADDED
Binary file (1.58 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc
ADDED
Binary file (1.77 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc
ADDED
Binary file (10.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc
ADDED
Binary file (143 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (57.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/__pycache__/verification.cpython-310.pyc
ADDED
Binary file (55.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/onnx/_constants.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Constant values used in ONNX."""
|
2 |
+
|
3 |
+
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
|
4 |
+
|
5 |
+
ONNX_BASE_OPSET = 9
|
6 |
+
ONNX_MIN_OPSET = 7
|
7 |
+
ONNX_MAX_OPSET = 19
|
8 |
+
ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 17
|
9 |
+
# ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py
|
10 |
+
ONNX_DEFAULT_OPSET = 17
|
11 |
+
ONNX_CONSTANT_FOLDING_MIN_OPSET = 9
|
12 |
+
|
13 |
+
PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"
|
14 |
+
|
15 |
+
INT64_MAX = 9223372036854775807
|
16 |
+
INT32_MAX = 2147483647
|
17 |
+
INT16_MAX = 32767
|
18 |
+
INT8_MAX = 127
|
19 |
+
UINT8_MAX = 255
|
20 |
+
|
21 |
+
INT64_MIN = -9223372036854775808
|
22 |
+
INT32_MIN = -2147483648
|
23 |
+
INT16_MIN = -32768
|
24 |
+
INT8_MIN = -128
|
25 |
+
UINT8_MIN = 0
|
venv/lib/python3.10/site-packages/torch/onnx/_experimental.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Experimental classes and functions used by ONNX export."""
|
2 |
+
|
3 |
+
import dataclasses
|
4 |
+
from typing import Mapping, Optional, Sequence, Set, Type, Union
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch._C._onnx as _C_onnx
|
8 |
+
|
9 |
+
|
10 |
+
@dataclasses.dataclass
|
11 |
+
class ExportOptions:
|
12 |
+
"""Arguments used by :func:`torch.onnx.export`.
|
13 |
+
|
14 |
+
TODO: Adopt this in `torch.onnx.export` api to replace keyword arguments.
|
15 |
+
"""
|
16 |
+
|
17 |
+
export_params: bool = True
|
18 |
+
verbose: bool = False
|
19 |
+
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL
|
20 |
+
input_names: Optional[Sequence[str]] = None
|
21 |
+
output_names: Optional[Sequence[str]] = None
|
22 |
+
operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX
|
23 |
+
opset_version: Optional[int] = None
|
24 |
+
do_constant_folding: bool = True
|
25 |
+
dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None
|
26 |
+
keep_initializers_as_inputs: Optional[bool] = None
|
27 |
+
custom_opsets: Optional[Mapping[str, int]] = None
|
28 |
+
export_modules_as_functions: Union[bool, Set[Type[torch.nn.Module]]] = False
|
venv/lib/python3.10/site-packages/torch/onnx/_exporter_states.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import Dict
|
4 |
+
|
5 |
+
from torch import _C
|
6 |
+
|
7 |
+
|
8 |
+
class ExportTypes:
|
9 |
+
r"""Specifies how the ONNX model is stored."""
|
10 |
+
|
11 |
+
PROTOBUF_FILE = "Saves model in the specified protobuf file."
|
12 |
+
ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)."
|
13 |
+
COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)."
|
14 |
+
DIRECTORY = "Saves model in the specified folder."
|
15 |
+
|
16 |
+
|
17 |
+
class SymbolicContext:
|
18 |
+
"""Extra context for symbolic functions.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue.
|
22 |
+
env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value.
|
23 |
+
cur_node (_C.Node): Current node being converted to ONNX domain.
|
24 |
+
onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to.
|
25 |
+
"""
|
26 |
+
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
params_dict: Dict[str, _C.IValue],
|
30 |
+
env: dict,
|
31 |
+
cur_node: _C.Node,
|
32 |
+
onnx_block: _C.Block,
|
33 |
+
):
|
34 |
+
self.params_dict: Dict[str, _C.IValue] = params_dict
|
35 |
+
self.env: Dict[_C.Value, _C.Value] = env
|
36 |
+
# Current node that is being converted.
|
37 |
+
self.cur_node: _C.Node = cur_node
|
38 |
+
# Current onnx block that converted nodes are being appended to.
|
39 |
+
self.onnx_block: _C.Block = onnx_block
|
venv/lib/python3.10/site-packages/torch/onnx/_globals.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Globals used internally by the ONNX exporter.
|
2 |
+
|
3 |
+
Do not use this module outside of `torch.onnx` and its tests.
|
4 |
+
|
5 |
+
Be very judicious when adding any new global variables. Do not create new global
|
6 |
+
variables unless they are absolutely necessary.
|
7 |
+
"""
|
8 |
+
import torch._C._onnx as _C_onnx
|
9 |
+
|
10 |
+
# This module should only depend on _constants and nothing else in torch.onnx to keep
|
11 |
+
# dependency direction clean.
|
12 |
+
from torch.onnx import _constants
|
13 |
+
|
14 |
+
|
15 |
+
class _InternalGlobals:
|
16 |
+
"""Globals used internally by ONNX exporter.
|
17 |
+
|
18 |
+
NOTE: Be very judicious when adding any new variables. Do not create new
|
19 |
+
global variables unless they are absolutely necessary.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(self):
|
23 |
+
self._export_onnx_opset_version = _constants.ONNX_DEFAULT_OPSET
|
24 |
+
self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL
|
25 |
+
self._in_onnx_export: bool = False
|
26 |
+
# Whether the user's model is training during export
|
27 |
+
self.export_training: bool = False
|
28 |
+
self.operator_export_type: _C_onnx.OperatorExportTypes = (
|
29 |
+
_C_onnx.OperatorExportTypes.ONNX
|
30 |
+
)
|
31 |
+
self.onnx_shape_inference: bool = True
|
32 |
+
self._autograd_inlining: bool = True
|
33 |
+
|
34 |
+
@property
|
35 |
+
def training_mode(self):
|
36 |
+
"""The training mode for the exporter."""
|
37 |
+
return self._training_mode
|
38 |
+
|
39 |
+
@training_mode.setter
|
40 |
+
def training_mode(self, training_mode: _C_onnx.TrainingMode):
|
41 |
+
if not isinstance(training_mode, _C_onnx.TrainingMode):
|
42 |
+
raise TypeError(
|
43 |
+
"training_mode must be of type 'torch.onnx.TrainingMode'. This is "
|
44 |
+
"likely a bug in torch.onnx."
|
45 |
+
)
|
46 |
+
self._training_mode = training_mode
|
47 |
+
|
48 |
+
@property
|
49 |
+
def export_onnx_opset_version(self) -> int:
|
50 |
+
"""Opset version used during export."""
|
51 |
+
return self._export_onnx_opset_version
|
52 |
+
|
53 |
+
@export_onnx_opset_version.setter
|
54 |
+
def export_onnx_opset_version(self, value: int):
|
55 |
+
supported_versions = range(
|
56 |
+
_constants.ONNX_MIN_OPSET, _constants.ONNX_MAX_OPSET + 1
|
57 |
+
)
|
58 |
+
if value not in supported_versions:
|
59 |
+
raise ValueError(f"Unsupported ONNX opset version: {value}")
|
60 |
+
self._export_onnx_opset_version = value
|
61 |
+
|
62 |
+
@property
|
63 |
+
def in_onnx_export(self) -> bool:
|
64 |
+
"""Whether it is in the middle of ONNX export."""
|
65 |
+
return self._in_onnx_export
|
66 |
+
|
67 |
+
@in_onnx_export.setter
|
68 |
+
def in_onnx_export(self, value: bool):
|
69 |
+
if type(value) is not bool:
|
70 |
+
raise TypeError("in_onnx_export must be a boolean")
|
71 |
+
self._in_onnx_export = value
|
72 |
+
|
73 |
+
@property
|
74 |
+
def autograd_inlining(self) -> bool:
|
75 |
+
"""Whether Autograd must be inlined."""
|
76 |
+
return self._autograd_inlining
|
77 |
+
|
78 |
+
@autograd_inlining.setter
|
79 |
+
def autograd_inlining(self, value: bool):
|
80 |
+
if type(value) is not bool:
|
81 |
+
raise TypeError("autograd_inlining must be a boolean")
|
82 |
+
self._autograd_inlining = value
|
83 |
+
|
84 |
+
|
85 |
+
GLOBALS = _InternalGlobals()
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py
ADDED
@@ -0,0 +1,1233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
import sys
|
5 |
+
import warnings
|
6 |
+
from typing import List, Optional, Sequence, Tuple, Union
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch._C._onnx as _C_onnx
|
10 |
+
import torch.onnx
|
11 |
+
from torch import _C
|
12 |
+
|
13 |
+
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
|
14 |
+
from torch.onnx import (
|
15 |
+
_constants,
|
16 |
+
_type_utils,
|
17 |
+
errors,
|
18 |
+
symbolic_helper,
|
19 |
+
symbolic_opset9 as opset9,
|
20 |
+
)
|
21 |
+
from torch.onnx._globals import GLOBALS
|
22 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
23 |
+
|
24 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
25 |
+
# see Note [Edit Symbolic Files] in README.md
|
26 |
+
|
27 |
+
# This file exports ONNX ops for opset 10
|
28 |
+
# Opset 10 is supported by ONNX release 1.5.0
|
29 |
+
# release on 04/24/19
|
30 |
+
|
31 |
+
|
32 |
+
__all__ = [
|
33 |
+
"dequantize",
|
34 |
+
"div",
|
35 |
+
"embedding_bag",
|
36 |
+
"fake_quantize_per_tensor_affine",
|
37 |
+
"flip",
|
38 |
+
"fmod",
|
39 |
+
"isfinite",
|
40 |
+
"isinf",
|
41 |
+
"nan_to_num",
|
42 |
+
"quantize_per_tensor",
|
43 |
+
"quantized_add_relu",
|
44 |
+
"quantized_add",
|
45 |
+
"quantized_cat",
|
46 |
+
"quantized_conv1d_relu",
|
47 |
+
"quantized_conv2d_relu",
|
48 |
+
"quantized_conv3d_relu",
|
49 |
+
"quantized_conv1d",
|
50 |
+
"quantized_conv2d",
|
51 |
+
"quantized_conv3d",
|
52 |
+
"quantized_conv_transpose1d",
|
53 |
+
"quantized_conv_transpose2d",
|
54 |
+
"quantized_conv_transpose3d",
|
55 |
+
"quantized_group_norm",
|
56 |
+
"quantized_hardswish",
|
57 |
+
"quantized_instance_norm",
|
58 |
+
"quantized_layer_norm",
|
59 |
+
"quantized_leaky_relu",
|
60 |
+
"quantized_linear",
|
61 |
+
"quantized_linear_relu",
|
62 |
+
"quantized_mul",
|
63 |
+
"quantized_sigmoid",
|
64 |
+
"slice",
|
65 |
+
"sort",
|
66 |
+
"topk",
|
67 |
+
]
|
68 |
+
|
69 |
+
|
70 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=10)
|
71 |
+
|
72 |
+
|
73 |
+
def _apply_params(*args, **kwargs):
|
74 |
+
"""Returns a decorator that calls the decorated (higher-order) function with the given parameters."""
|
75 |
+
|
76 |
+
def _apply(fn):
|
77 |
+
return fn(*args, **kwargs)
|
78 |
+
|
79 |
+
return _apply
|
80 |
+
|
81 |
+
|
82 |
+
@_onnx_symbolic("aten::div")
|
83 |
+
@_beartype.beartype
|
84 |
+
def div(g: jit_utils.GraphContext, self, other, *args):
|
85 |
+
if len(args) == 0:
|
86 |
+
return opset9.true_divide(g, self, other)
|
87 |
+
else:
|
88 |
+
return _div_rounding_mode(g, self, other, *args)
|
89 |
+
|
90 |
+
|
91 |
+
@symbolic_helper.parse_args("v", "v", "s")
|
92 |
+
@_beartype.beartype
|
93 |
+
def _div_rounding_mode(g: jit_utils.GraphContext, self, other, rounding_mode):
|
94 |
+
if rounding_mode == "floor":
|
95 |
+
return _floor_divide(g, self, other)
|
96 |
+
else:
|
97 |
+
return opset9._div_rounding_mode(g, self, other, rounding_mode)
|
98 |
+
|
99 |
+
|
100 |
+
@_onnx_symbolic("aten::_floor_divide")
|
101 |
+
@_beartype.beartype
|
102 |
+
def _floor_divide(g: jit_utils.GraphContext, self, other):
|
103 |
+
if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other):
|
104 |
+
out = opset9.true_divide(g, self, other)
|
105 |
+
return g.op("Floor", out)
|
106 |
+
else:
|
107 |
+
# Integer division does trunction rounding
|
108 |
+
div = g.op("Div", self, other)
|
109 |
+
# Division is negative if: self < 0 != other < 0
|
110 |
+
zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
|
111 |
+
negative = g.op("Xor", g.op("Less", self, zero), g.op("Less", other, zero))
|
112 |
+
|
113 |
+
# For negative numbers with self % other != 0, subtract 1 to round down instead of up
|
114 |
+
mod = g.op("Mod", self, other, fmod_i=0)
|
115 |
+
fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero)))
|
116 |
+
|
117 |
+
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
|
118 |
+
fixup = g.op("Sub", div, one)
|
119 |
+
return g.op("Where", fixup_mask, fixup, div)
|
120 |
+
|
121 |
+
|
122 |
+
@_onnx_symbolic("aten::sort")
|
123 |
+
@symbolic_helper.parse_args("v", "i", "i", "none")
|
124 |
+
@_beartype.beartype
|
125 |
+
def sort(g: jit_utils.GraphContext, self, dim, decending, out=None):
|
126 |
+
return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out)
|
127 |
+
|
128 |
+
|
129 |
+
@_onnx_symbolic("aten::topk")
|
130 |
+
@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none")
|
131 |
+
@_beartype.beartype
|
132 |
+
def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None):
|
133 |
+
return symbolic_helper._topk_helper(
|
134 |
+
g, self, k, dim, largest=largest, sorted=sorted, out=out
|
135 |
+
)
|
136 |
+
|
137 |
+
|
138 |
+
def _aten_max_pool_onnx(
|
139 |
+
g: jit_utils.GraphContext,
|
140 |
+
self: _C.Value,
|
141 |
+
kernel_shape: Sequence[int],
|
142 |
+
strides: Sequence[int],
|
143 |
+
pads: Sequence[int],
|
144 |
+
dilations: Sequence[int],
|
145 |
+
ceil_mode: bool,
|
146 |
+
unbatched_rank: int,
|
147 |
+
) -> _C.Value:
|
148 |
+
self_rank = g.op("Size", g.op("Shape", self))
|
149 |
+
if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1
|
150 |
+
self = g.op(
|
151 |
+
"Unsqueeze",
|
152 |
+
self,
|
153 |
+
g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)),
|
154 |
+
)
|
155 |
+
|
156 |
+
pool_result, _ = g.op(
|
157 |
+
"MaxPool",
|
158 |
+
self,
|
159 |
+
outputs=2,
|
160 |
+
ceil_mode_i=ceil_mode,
|
161 |
+
dilations_i=dilations,
|
162 |
+
kernel_shape_i=kernel_shape,
|
163 |
+
pads_i=pads,
|
164 |
+
strides_i=strides,
|
165 |
+
)
|
166 |
+
|
167 |
+
if self_rank == unbatched_rank:
|
168 |
+
pool_result = g.op(
|
169 |
+
"Squeeze",
|
170 |
+
pool_result,
|
171 |
+
g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)),
|
172 |
+
)
|
173 |
+
|
174 |
+
return pool_result
|
175 |
+
|
176 |
+
|
177 |
+
# For MaxPool
|
178 |
+
def _adjust_attributes_of_max_pool(
|
179 |
+
expand_size: int,
|
180 |
+
kernel_size: Union[Sequence[int], int],
|
181 |
+
stride: Union[Sequence[int], int],
|
182 |
+
padding: Union[Sequence[int], int],
|
183 |
+
dilation: Union[Sequence[int], int],
|
184 |
+
) -> Tuple[Sequence[int], Sequence[int], Sequence[int], Sequence[int]]:
|
185 |
+
"""Adjust attributes of avg_pool to match ONNX specification."""
|
186 |
+
|
187 |
+
if isinstance(dilation, int):
|
188 |
+
dilation = [dilation] * expand_size
|
189 |
+
|
190 |
+
if isinstance(kernel_size, int):
|
191 |
+
kernel_shape = [kernel_size] * expand_size
|
192 |
+
else:
|
193 |
+
kernel_shape = kernel_size # type: ignore[assignment]
|
194 |
+
|
195 |
+
if isinstance(padding, int):
|
196 |
+
pads = [padding] * expand_size * 2 # type: ignore[operator, assignment]
|
197 |
+
elif len(padding) == 1:
|
198 |
+
pads = padding * expand_size * 2 # type: ignore[operator, assignment]
|
199 |
+
elif len(padding) == 2:
|
200 |
+
# 2D padding
|
201 |
+
pads = padding * 2 # type: ignore[operator, assignment]
|
202 |
+
elif len(padding) == 3:
|
203 |
+
# 3D padding
|
204 |
+
pads = padding * 2 # type: ignore[operator, assignment]
|
205 |
+
else:
|
206 |
+
# When padding is already done for all dimensions,
|
207 |
+
# we don't need to double it
|
208 |
+
# eg: (1, 1, 1, 1, 1, 1)
|
209 |
+
pads = padding # type: ignore[assignment]
|
210 |
+
|
211 |
+
if isinstance(stride, int):
|
212 |
+
strides = [stride] * expand_size
|
213 |
+
elif not stride:
|
214 |
+
strides = kernel_shape
|
215 |
+
else:
|
216 |
+
strides = stride # type: ignore[assignment]
|
217 |
+
|
218 |
+
return (kernel_shape, strides, pads, dilation)
|
219 |
+
|
220 |
+
|
221 |
+
def _aten_max_pool_with_indices_onnx(
|
222 |
+
g: jit_utils.GraphContext,
|
223 |
+
self: _C.Value,
|
224 |
+
kernel_shape: Sequence[int],
|
225 |
+
strides: Sequence[int],
|
226 |
+
pads: Sequence[int],
|
227 |
+
dilations: Sequence[int],
|
228 |
+
ceil_mode: bool,
|
229 |
+
unbatched_rank: int,
|
230 |
+
n_dims_one: Sequence[int],
|
231 |
+
n_dims_zero: Sequence[int],
|
232 |
+
n_dims_axes: Sequence[int],
|
233 |
+
) -> Tuple[_C.Value, Sequence[int]]:
|
234 |
+
self_rank = g.op("Size", g.op("Shape", self))
|
235 |
+
if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1
|
236 |
+
self = g.op(
|
237 |
+
"Unsqueeze",
|
238 |
+
self,
|
239 |
+
g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)),
|
240 |
+
)
|
241 |
+
|
242 |
+
pool_result, indices = g.op(
|
243 |
+
"MaxPool",
|
244 |
+
self,
|
245 |
+
outputs=2,
|
246 |
+
ceil_mode_i=ceil_mode,
|
247 |
+
dilations_i=dilations,
|
248 |
+
kernel_shape_i=kernel_shape,
|
249 |
+
pads_i=pads,
|
250 |
+
strides_i=strides,
|
251 |
+
)
|
252 |
+
_, flatten_indices = g.op(
|
253 |
+
"MaxPool",
|
254 |
+
self,
|
255 |
+
outputs=2,
|
256 |
+
dilations_i=dilations,
|
257 |
+
kernel_shape_i=n_dims_one,
|
258 |
+
strides_i=n_dims_one,
|
259 |
+
)
|
260 |
+
|
261 |
+
ends = g.op("Constant", value_t=torch.tensor(n_dims_one))
|
262 |
+
starts = g.op("Constant", value_t=torch.tensor(n_dims_zero))
|
263 |
+
axes = g.op("Constant", value_t=torch.tensor(n_dims_axes))
|
264 |
+
|
265 |
+
delta = g.op("Slice", flatten_indices, starts, ends, axes)
|
266 |
+
indices = g.op("Sub", indices, delta)
|
267 |
+
|
268 |
+
if self_rank == unbatched_rank:
|
269 |
+
pool_result = g.op(
|
270 |
+
"Squeeze", pool_result, value_t=torch.tensor([0], dtype=torch.int64)
|
271 |
+
)
|
272 |
+
indices = g.op("Squeeze", indices, value_t=torch.tensor([0], dtype=torch.int64))
|
273 |
+
|
274 |
+
return (pool_result, indices)
|
275 |
+
|
276 |
+
|
277 |
+
@_onnx_symbolic(
|
278 |
+
"aten::max_pool1d",
|
279 |
+
decorate=[_apply_params("max_pool1d", 1, return_indices=False)],
|
280 |
+
)
|
281 |
+
@_onnx_symbolic(
|
282 |
+
"aten::max_pool2d",
|
283 |
+
decorate=[_apply_params("max_pool2d", 2, return_indices=False)],
|
284 |
+
)
|
285 |
+
@_onnx_symbolic(
|
286 |
+
"aten::max_pool3d",
|
287 |
+
decorate=[_apply_params("max_pool3d", 3, return_indices=False)],
|
288 |
+
)
|
289 |
+
@_onnx_symbolic(
|
290 |
+
"aten::max_pool1d_with_indices",
|
291 |
+
decorate=[
|
292 |
+
_apply_params(
|
293 |
+
"max_pool1d_with_indices",
|
294 |
+
1,
|
295 |
+
return_indices=True,
|
296 |
+
)
|
297 |
+
],
|
298 |
+
)
|
299 |
+
@_onnx_symbolic(
|
300 |
+
"aten::max_pool2d_with_indices",
|
301 |
+
decorate=[
|
302 |
+
_apply_params(
|
303 |
+
"max_pool2d_with_indices",
|
304 |
+
2,
|
305 |
+
return_indices=True,
|
306 |
+
)
|
307 |
+
],
|
308 |
+
)
|
309 |
+
@_onnx_symbolic(
|
310 |
+
"aten::max_pool3d_with_indices",
|
311 |
+
decorate=[
|
312 |
+
_apply_params(
|
313 |
+
"max_pool3d_with_indices",
|
314 |
+
3,
|
315 |
+
return_indices=True,
|
316 |
+
)
|
317 |
+
],
|
318 |
+
)
|
319 |
+
@_beartype.beartype
|
320 |
+
def _max_pool(name: str, expand_size: int, return_indices: bool):
|
321 |
+
@symbolic_helper.quantized_args(True, False, False, False, False, False)
|
322 |
+
@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i")
|
323 |
+
def symbolic_fn(
|
324 |
+
g: jit_utils.GraphContext,
|
325 |
+
input: _C.Value,
|
326 |
+
kernel_size: Sequence[int],
|
327 |
+
stride: Sequence[int],
|
328 |
+
padding: Union[int, Sequence[int]],
|
329 |
+
dilation: Sequence[int],
|
330 |
+
ceil_mode: bool,
|
331 |
+
):
|
332 |
+
kernel_shape, strides, pads, dilations = _adjust_attributes_of_max_pool(
|
333 |
+
expand_size, kernel_size, stride, padding, dilation
|
334 |
+
)
|
335 |
+
|
336 |
+
if return_indices:
|
337 |
+
return _aten_max_pool_with_indices_onnx(
|
338 |
+
g,
|
339 |
+
input,
|
340 |
+
kernel_shape,
|
341 |
+
strides,
|
342 |
+
pads,
|
343 |
+
dilations,
|
344 |
+
ceil_mode,
|
345 |
+
expand_size + 1,
|
346 |
+
([1] * expand_size),
|
347 |
+
([0] * expand_size),
|
348 |
+
([2 + i for i in range(expand_size)]),
|
349 |
+
)
|
350 |
+
else:
|
351 |
+
return _aten_max_pool_onnx(
|
352 |
+
g,
|
353 |
+
input,
|
354 |
+
kernel_shape,
|
355 |
+
strides,
|
356 |
+
pads,
|
357 |
+
dilations,
|
358 |
+
ceil_mode,
|
359 |
+
expand_size + 1,
|
360 |
+
)
|
361 |
+
|
362 |
+
return symbolic_fn
|
363 |
+
|
364 |
+
|
365 |
+
# For AvgPool
|
366 |
+
def _adjust_attributes_of_avg_pool(
|
367 |
+
expand_size: int,
|
368 |
+
kernel_size: Union[Sequence[int], int],
|
369 |
+
stride: Union[Sequence[int], int],
|
370 |
+
padding: Union[Sequence[int], int],
|
371 |
+
) -> Tuple[Sequence[int], Sequence[int], Sequence[int]]:
|
372 |
+
"""Adjust attributes of avg_pool to match ONNX specification."""
|
373 |
+
|
374 |
+
if isinstance(kernel_size, int):
|
375 |
+
kernel_shape = [kernel_size] * expand_size
|
376 |
+
else:
|
377 |
+
kernel_shape = kernel_size # type: ignore[assignment]
|
378 |
+
|
379 |
+
if isinstance(padding, int):
|
380 |
+
pads = [padding] * expand_size * 2
|
381 |
+
elif len(padding) == 1:
|
382 |
+
pads = padding * expand_size * 2 # type: ignore[operator, assignment]
|
383 |
+
elif len(padding) == 2:
|
384 |
+
pads = padding * expand_size # type: ignore[operator, assignment]
|
385 |
+
else:
|
386 |
+
pads = padding * 2 # type: ignore[operator, assignment]
|
387 |
+
|
388 |
+
if isinstance(stride, int):
|
389 |
+
strides = [stride] * expand_size
|
390 |
+
elif not stride:
|
391 |
+
strides = kernel_shape
|
392 |
+
else:
|
393 |
+
strides = stride # type: ignore[assignment]
|
394 |
+
|
395 |
+
return (kernel_shape, strides, pads)
|
396 |
+
|
397 |
+
|
398 |
+
@_onnx_symbolic(
|
399 |
+
"aten::avg_pool1d",
|
400 |
+
decorate=[_apply_params("avg_pool1d", 1)],
|
401 |
+
)
|
402 |
+
@_onnx_symbolic(
|
403 |
+
"aten::avg_pool2d",
|
404 |
+
decorate=[_apply_params("avg_pool2d", 2)],
|
405 |
+
)
|
406 |
+
@_onnx_symbolic(
|
407 |
+
"aten::avg_pool3d",
|
408 |
+
decorate=[_apply_params("avg_pool3d", 3)],
|
409 |
+
)
|
410 |
+
@_beartype.beartype
|
411 |
+
def _avg_pool(name, expand_size):
|
412 |
+
@symbolic_helper.quantized_args(True, False, False, False, False, False, False)
|
413 |
+
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
|
414 |
+
@_beartype.beartype
|
415 |
+
def symbolic_fn(
|
416 |
+
g,
|
417 |
+
input: _C.Value,
|
418 |
+
kernel_size: Sequence[int],
|
419 |
+
stride: Sequence[int],
|
420 |
+
padding: Union[int, Sequence[int]],
|
421 |
+
ceil_mode: int,
|
422 |
+
count_include_pad: int,
|
423 |
+
divisor_override=None,
|
424 |
+
):
|
425 |
+
kernel_shape, strides, pads = _adjust_attributes_of_avg_pool(
|
426 |
+
expand_size, kernel_size, stride, padding
|
427 |
+
)
|
428 |
+
|
429 |
+
result = g.op(
|
430 |
+
"AveragePool",
|
431 |
+
input,
|
432 |
+
ceil_mode_i=ceil_mode,
|
433 |
+
count_include_pad_i=count_include_pad,
|
434 |
+
kernel_shape_i=kernel_shape,
|
435 |
+
pads_i=pads,
|
436 |
+
strides_i=strides,
|
437 |
+
)
|
438 |
+
|
439 |
+
return result
|
440 |
+
|
441 |
+
return symbolic_fn
|
442 |
+
|
443 |
+
|
444 |
+
@_onnx_symbolic(
|
445 |
+
"aten::upsample_nearest1d",
|
446 |
+
decorate=[_apply_params("upsample_nearest1d", 3, "nearest")],
|
447 |
+
)
|
448 |
+
@_onnx_symbolic(
|
449 |
+
"aten::upsample_nearest2d",
|
450 |
+
decorate=[_apply_params("upsample_nearest2d", 4, "nearest")],
|
451 |
+
)
|
452 |
+
@_onnx_symbolic(
|
453 |
+
"aten::upsample_nearest3d",
|
454 |
+
decorate=[_apply_params("upsample_nearest3d", 5, "nearest")],
|
455 |
+
)
|
456 |
+
@_onnx_symbolic(
|
457 |
+
"aten::upsample_linear1d",
|
458 |
+
decorate=[_apply_params("upsample_linear1d", 3, "linear")],
|
459 |
+
)
|
460 |
+
@_onnx_symbolic(
|
461 |
+
"aten::upsample_bilinear2d",
|
462 |
+
decorate=[_apply_params("upsample_bilinear2d", 4, "linear")],
|
463 |
+
)
|
464 |
+
@_onnx_symbolic(
|
465 |
+
"aten::upsample_trilinear3d",
|
466 |
+
decorate=[_apply_params("upsample_trilinear3d", 5, "linear")],
|
467 |
+
)
|
468 |
+
@_beartype.beartype
|
469 |
+
def _interpolate(name, dim, interpolate_mode):
|
470 |
+
@symbolic_helper.quantized_args(True, False, False)
|
471 |
+
@_beartype.beartype
|
472 |
+
def symbolic_fn(g, input, output_size, *args):
|
473 |
+
scales, align_corners = symbolic_helper._get_interpolate_attributes(
|
474 |
+
g, interpolate_mode, args
|
475 |
+
)
|
476 |
+
symbolic_helper._interpolate_warning(interpolate_mode)
|
477 |
+
align_corners = symbolic_helper._maybe_get_scalar(align_corners)
|
478 |
+
if align_corners:
|
479 |
+
return symbolic_helper._unimplemented(name, "align_corners == True", input)
|
480 |
+
if scales is None:
|
481 |
+
scales = symbolic_helper._interpolate_size_to_scales(
|
482 |
+
g, input, output_size, dim
|
483 |
+
)
|
484 |
+
return g.op("Resize", input, scales, mode_s=interpolate_mode)
|
485 |
+
|
486 |
+
return symbolic_fn
|
487 |
+
|
488 |
+
|
489 |
+
@_onnx_symbolic("aten::__interpolate")
|
490 |
+
@_beartype.beartype
|
491 |
+
def __interpolate(
|
492 |
+
g: jit_utils.GraphContext,
|
493 |
+
input,
|
494 |
+
size,
|
495 |
+
scale_factor,
|
496 |
+
mode,
|
497 |
+
align_corners,
|
498 |
+
recompute_scale_factor,
|
499 |
+
antialias,
|
500 |
+
):
|
501 |
+
scales, mode = symbolic_helper._interpolate_get_scales_and_mode(
|
502 |
+
g, input, size, scale_factor, mode, align_corners
|
503 |
+
)
|
504 |
+
return g.op("Resize", input, scales, mode_s=mode)
|
505 |
+
|
506 |
+
|
507 |
+
@_beartype.beartype
|
508 |
+
def _slice(
|
509 |
+
g: jit_utils.GraphContext,
|
510 |
+
input: torch._C.Value,
|
511 |
+
axes: Union[List, torch.Tensor, torch._C.Value],
|
512 |
+
starts: Union[List, torch.Tensor, torch._C.Value],
|
513 |
+
ends: Union[List, torch.Tensor, torch._C.Value],
|
514 |
+
steps: Optional[Union[List, torch.Tensor, torch._C.Value]] = None,
|
515 |
+
):
|
516 |
+
def is_none_value(value):
|
517 |
+
if value is None:
|
518 |
+
return True
|
519 |
+
return (
|
520 |
+
isinstance(value, torch._C.Value)
|
521 |
+
and value.node().kind() == "prim::Constant"
|
522 |
+
and isinstance(value.type(), _C.NoneType)
|
523 |
+
)
|
524 |
+
|
525 |
+
def to_slice_input(list_or_value, default_value=None):
|
526 |
+
# Convert input param into a 1D torch.Value.
|
527 |
+
if is_none_value(list_or_value) and default_value is not None:
|
528 |
+
list_or_value = [default_value]
|
529 |
+
|
530 |
+
if isinstance(list_or_value, (list, torch.Tensor)):
|
531 |
+
return g.op("Constant", value_t=torch.tensor(list_or_value))
|
532 |
+
|
533 |
+
rank = symbolic_helper._get_tensor_rank(list_or_value)
|
534 |
+
if rank == 0:
|
535 |
+
return symbolic_helper._unsqueeze_helper(g, list_or_value, [0])
|
536 |
+
if rank == 1:
|
537 |
+
return list_or_value
|
538 |
+
raise errors.SymbolicValueError(
|
539 |
+
f"Rank must be 0 or 1, not {rank}", list_or_value
|
540 |
+
)
|
541 |
+
|
542 |
+
def get_const_value(list_or_value):
|
543 |
+
if isinstance(list_or_value, (list, torch.Tensor)):
|
544 |
+
if len(list_or_value) == 1:
|
545 |
+
return list_or_value[0]
|
546 |
+
return None
|
547 |
+
return symbolic_helper._maybe_get_const(list_or_value, "i")
|
548 |
+
|
549 |
+
# Check if slice is a no-op
|
550 |
+
if (
|
551 |
+
get_const_value(starts) == 0
|
552 |
+
and get_const_value(ends) == _constants.INT64_MAX
|
553 |
+
and (steps is None or get_const_value(steps) == 1)
|
554 |
+
):
|
555 |
+
return input
|
556 |
+
|
557 |
+
axes = to_slice_input(axes)
|
558 |
+
starts = to_slice_input(starts, default_value=0)
|
559 |
+
ends = to_slice_input(ends, default_value=_constants.INT64_MAX)
|
560 |
+
if steps is None:
|
561 |
+
return g.op("Slice", input, starts, ends, axes)
|
562 |
+
steps = to_slice_input(steps, default_value=1)
|
563 |
+
return g.op("Slice", input, starts, ends, axes, steps)
|
564 |
+
|
565 |
+
|
566 |
+
@_onnx_symbolic("aten::slice")
|
567 |
+
@_beartype.beartype
|
568 |
+
def slice(g: jit_utils.GraphContext, self, *args):
|
569 |
+
if len(args) == 4:
|
570 |
+
# aten::slice(Tensor self, int dim, int? start=None, int? end=None, int step=1) -> Tensor
|
571 |
+
dims, start, end, step = args
|
572 |
+
elif len(args) == 3:
|
573 |
+
# aten::slice(t[] l, int? start=None, int? end=None, int step=1) -> t[]
|
574 |
+
start, end, step = args
|
575 |
+
dims = [0]
|
576 |
+
else:
|
577 |
+
raise errors.SymbolicValueError("Unknown aten::slice signature", self)
|
578 |
+
|
579 |
+
return symbolic_helper._slice_helper(
|
580 |
+
g,
|
581 |
+
self,
|
582 |
+
axes=dims,
|
583 |
+
starts=start,
|
584 |
+
ends=end,
|
585 |
+
steps=step,
|
586 |
+
)
|
587 |
+
|
588 |
+
|
589 |
+
@_onnx_symbolic("aten::flip")
|
590 |
+
@symbolic_helper.parse_args("v", "is")
|
591 |
+
@_beartype.beartype
|
592 |
+
def flip(g: jit_utils.GraphContext, input, dims):
|
593 |
+
return symbolic_helper._slice_helper(
|
594 |
+
g,
|
595 |
+
input,
|
596 |
+
axes=dims,
|
597 |
+
starts=[-1] * len(dims),
|
598 |
+
ends=[-_constants.INT64_MAX] * len(dims),
|
599 |
+
steps=[-1] * len(dims),
|
600 |
+
)
|
601 |
+
|
602 |
+
|
603 |
+
@_onnx_symbolic("aten::fmod")
|
604 |
+
@_beartype.beartype
|
605 |
+
def fmod(g: jit_utils.GraphContext, input, other):
|
606 |
+
return g.op("Mod", input, other, fmod_i=1)
|
607 |
+
|
608 |
+
|
609 |
+
@_onnx_symbolic("aten::embedding_bag")
|
610 |
+
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i")
|
611 |
+
@_beartype.beartype
|
612 |
+
def embedding_bag(
|
613 |
+
g: jit_utils.GraphContext,
|
614 |
+
embedding_matrix,
|
615 |
+
indices,
|
616 |
+
offsets,
|
617 |
+
scale_grad_by_freq,
|
618 |
+
mode,
|
619 |
+
sparse,
|
620 |
+
per_sample_weights,
|
621 |
+
include_last_offset,
|
622 |
+
padding_idx,
|
623 |
+
):
|
624 |
+
if scale_grad_by_freq and GLOBALS.export_training:
|
625 |
+
return symbolic_helper._onnx_unsupported(
|
626 |
+
"embedding_bag with scale_grad_by_freq for training mode"
|
627 |
+
)
|
628 |
+
if padding_idx is not None and padding_idx >= 0:
|
629 |
+
raise RuntimeError("embedding_bag with padding_idx")
|
630 |
+
|
631 |
+
warnings.warn(
|
632 |
+
"Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. "
|
633 |
+
"Please use opset 11 or higher to export model for dynamic input shape.'"
|
634 |
+
)
|
635 |
+
offsets_dim_0 = symbolic_helper._get_tensor_dim_size(offsets, 0)
|
636 |
+
if offsets_dim_0 is not None:
|
637 |
+
if include_last_offset:
|
638 |
+
offset_len = offsets_dim_0 - 1
|
639 |
+
offsets_extended = offsets
|
640 |
+
else:
|
641 |
+
offset_len = offsets_dim_0
|
642 |
+
offsets_extended = [
|
643 |
+
offsets,
|
644 |
+
g.op("Constant", value_t=torch.tensor([sys.maxsize])),
|
645 |
+
]
|
646 |
+
offsets_extended = g.op("Concat", *offsets_extended, axis_i=0)
|
647 |
+
list_ = []
|
648 |
+
for i in range(offset_len):
|
649 |
+
start_ = symbolic_helper._unsqueeze_helper(
|
650 |
+
g,
|
651 |
+
opset9.select(g, offsets_extended, torch.tensor(0), torch.tensor(i)),
|
652 |
+
[0],
|
653 |
+
)
|
654 |
+
end_ = symbolic_helper._unsqueeze_helper(
|
655 |
+
g,
|
656 |
+
opset9.select(
|
657 |
+
g, offsets_extended, torch.tensor(0), torch.tensor(i + 1)
|
658 |
+
),
|
659 |
+
[0],
|
660 |
+
)
|
661 |
+
axes_ = g.op("Constant", value_t=torch.tensor([0]))
|
662 |
+
indices_row = g.op("Slice", indices, start_, end_, axes_)
|
663 |
+
|
664 |
+
embeddings = g.op("Gather", embedding_matrix, indices_row)
|
665 |
+
if not symbolic_helper._is_none(per_sample_weights):
|
666 |
+
per_sample_weights_row = g.op(
|
667 |
+
"Slice", per_sample_weights, start_, end_, axes_
|
668 |
+
)
|
669 |
+
per_sample_weights_row = symbolic_helper._unsqueeze_helper(
|
670 |
+
g, per_sample_weights_row, [1]
|
671 |
+
)
|
672 |
+
embeddings = g.op("Mul", embeddings, per_sample_weights_row)
|
673 |
+
if mode == 0:
|
674 |
+
embeddings = symbolic_helper._reducesum_helper(
|
675 |
+
g, embeddings, axes_i=[0], keepdims_i=0
|
676 |
+
)
|
677 |
+
elif mode == 1:
|
678 |
+
embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0)
|
679 |
+
else:
|
680 |
+
embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0)
|
681 |
+
|
682 |
+
embeddings = symbolic_helper._unsqueeze_helper(g, embeddings, [0])
|
683 |
+
list_.append(embeddings)
|
684 |
+
|
685 |
+
output = g.op("Concat", *list_, axis_i=0)
|
686 |
+
# aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
|
687 |
+
# But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
|
688 |
+
return output, None, None, None
|
689 |
+
else:
|
690 |
+
return symbolic_helper._onnx_unsupported(
|
691 |
+
"embedding_bag with unknown shape of offsets for opset 10 is not supported. "
|
692 |
+
"please use opset 11 or higher."
|
693 |
+
)
|
694 |
+
|
695 |
+
|
696 |
+
@_onnx_symbolic("aten::fake_quantize_per_tensor_affine")
|
697 |
+
@symbolic_helper.parse_args("v", "v", "v", "i", "i")
|
698 |
+
@_beartype.beartype
|
699 |
+
def fake_quantize_per_tensor_affine(
|
700 |
+
g: jit_utils.GraphContext,
|
701 |
+
inputs,
|
702 |
+
scale,
|
703 |
+
zero_point,
|
704 |
+
quant_min=-128,
|
705 |
+
quant_max=127,
|
706 |
+
):
|
707 |
+
# NOTE: (0, 127) is a special case. PyTorch restricts activations to be in the range (0, 127).
|
708 |
+
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
|
709 |
+
if (quant_min, quant_max) == (0, 127):
|
710 |
+
symbolic_helper._onnx_opset_unsupported_detailed(
|
711 |
+
"fake_quantize_per_tensor_affine",
|
712 |
+
10,
|
713 |
+
13,
|
714 |
+
"Quantize range (0, 127) not supported, requires opset 13 Clip",
|
715 |
+
inputs,
|
716 |
+
)
|
717 |
+
if (quant_min, quant_max) not in [(0, 255), (-128, 127)]:
|
718 |
+
raise errors.SymbolicValueError(
|
719 |
+
f"For (quant_min, quant_max), ONNX allows only (0, 255) and (-128, 127). "
|
720 |
+
f"Got ({quant_min}, {quant_max})",
|
721 |
+
inputs,
|
722 |
+
)
|
723 |
+
scale = symbolic_helper._maybe_get_scalar(scale)
|
724 |
+
if scale is None:
|
725 |
+
symbolic_helper._onnx_opset_unsupported_detailed(
|
726 |
+
"fake_quantize_per_tensor_affine",
|
727 |
+
10,
|
728 |
+
13,
|
729 |
+
"Non-constant scale not supported",
|
730 |
+
inputs,
|
731 |
+
)
|
732 |
+
scale = scale.float().data # Avoid exporter generating double type
|
733 |
+
if quant_min == 0:
|
734 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
|
735 |
+
else:
|
736 |
+
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
|
737 |
+
return g.op(
|
738 |
+
"DequantizeLinear",
|
739 |
+
g.op("QuantizeLinear", inputs, scale, zero_point),
|
740 |
+
scale,
|
741 |
+
zero_point,
|
742 |
+
)
|
743 |
+
|
744 |
+
|
745 |
+
@_onnx_symbolic("aten::isinf")
|
746 |
+
@_beartype.beartype
|
747 |
+
def isinf(g: jit_utils.GraphContext, input):
|
748 |
+
return g.op("IsInf", g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.DOUBLE))
|
749 |
+
|
750 |
+
|
751 |
+
@_onnx_symbolic("aten::isfinite")
|
752 |
+
@_beartype.beartype
|
753 |
+
def isfinite(g: jit_utils.GraphContext, input):
|
754 |
+
inf_node = isinf(g, input)
|
755 |
+
nan_node = opset9.isnan(g, input)
|
756 |
+
return opset9.__not_(g, opset9.__or_(g, inf_node, nan_node))
|
757 |
+
|
758 |
+
|
759 |
+
@_onnx_symbolic("aten::quantize_per_tensor")
|
760 |
+
@_beartype.beartype
|
761 |
+
def quantize_per_tensor(g: jit_utils.GraphContext, input, scale, zero_point, dtype):
|
762 |
+
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
763 |
+
# TODO(justinchuby): Extract all the cast ops into a helper function.
|
764 |
+
zero_point = g.op(
|
765 |
+
"Cast", zero_point, to_i=_type_utils.JitScalarType(dtype).onnx_type()
|
766 |
+
)
|
767 |
+
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
768 |
+
return symbolic_helper.quantize_helper(g, input, scale, zero_point)
|
769 |
+
|
770 |
+
|
771 |
+
@_onnx_symbolic("aten::dequantize")
|
772 |
+
@_beartype.beartype
|
773 |
+
def dequantize(g: jit_utils.GraphContext, input):
|
774 |
+
return symbolic_helper.dequantize_helper(g, input)[0]
|
775 |
+
|
776 |
+
|
777 |
+
@_onnx_symbolic("aten::nan_to_num")
|
778 |
+
@symbolic_helper.parse_args("v", "f", "f", "f")
|
779 |
+
@_beartype.beartype
|
780 |
+
def nan_to_num(g: jit_utils.GraphContext, input, nan, posinf, neginf):
|
781 |
+
# Cannot create a int type tensor with inf/nan values, so we simply
|
782 |
+
# return the original tensor
|
783 |
+
if not symbolic_helper._is_fp(input):
|
784 |
+
return input
|
785 |
+
input_dtype = _type_utils.JitScalarType.from_value(input).dtype()
|
786 |
+
if nan is None:
|
787 |
+
nan = 0.0
|
788 |
+
nan_cond = opset9.isnan(g, input)
|
789 |
+
nan_result = g.op(
|
790 |
+
"Where",
|
791 |
+
nan_cond,
|
792 |
+
g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)),
|
793 |
+
input,
|
794 |
+
)
|
795 |
+
|
796 |
+
# For None values of posinf, neginf we use the greatest/lowest finite
|
797 |
+
# value representable by input’s dtype.
|
798 |
+
finfo = torch.finfo(input_dtype)
|
799 |
+
if posinf is None:
|
800 |
+
posinf = finfo.max
|
801 |
+
posinf_cond = opset9.logical_and(
|
802 |
+
g,
|
803 |
+
isinf(g, nan_result),
|
804 |
+
opset9.gt(g, nan_result, g.op("Constant", value_t=torch.LongTensor([0]))),
|
805 |
+
)
|
806 |
+
nan_posinf_result = g.op(
|
807 |
+
"Where",
|
808 |
+
posinf_cond,
|
809 |
+
g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)),
|
810 |
+
nan_result,
|
811 |
+
)
|
812 |
+
|
813 |
+
if neginf is None:
|
814 |
+
neginf = finfo.min
|
815 |
+
neginf_cond = opset9.logical_and(
|
816 |
+
g,
|
817 |
+
isinf(g, nan_posinf_result),
|
818 |
+
opset9.lt(
|
819 |
+
g, nan_posinf_result, g.op("Constant", value_t=torch.LongTensor([0]))
|
820 |
+
),
|
821 |
+
)
|
822 |
+
return g.op(
|
823 |
+
"Where",
|
824 |
+
neginf_cond,
|
825 |
+
g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)),
|
826 |
+
nan_posinf_result,
|
827 |
+
)
|
828 |
+
|
829 |
+
|
830 |
+
# Quantized symbolics ---------------------------------------------------------
|
831 |
+
# https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export
|
832 |
+
# Support starts from opset 10 because `DequantizeLinear` and `QuantizeLinear` were
|
833 |
+
# introduced in opset version 10.
|
834 |
+
@_onnx_symbolic("quantized::linear")
|
835 |
+
@_beartype.beartype
|
836 |
+
def quantized_linear(
|
837 |
+
g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point
|
838 |
+
):
|
839 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
840 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
841 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
842 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
843 |
+
|
844 |
+
output = opset9.linear(g, input, weight, bias)
|
845 |
+
|
846 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
847 |
+
|
848 |
+
|
849 |
+
@_onnx_symbolic("quantized::linear_relu")
|
850 |
+
@_beartype.beartype
|
851 |
+
def quantized_linear_relu(
|
852 |
+
g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point
|
853 |
+
):
|
854 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
855 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
856 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
857 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
858 |
+
|
859 |
+
output = opset9.linear(g, input, weight, bias)
|
860 |
+
output = opset9.relu(g, output)
|
861 |
+
|
862 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
863 |
+
|
864 |
+
|
865 |
+
@_onnx_symbolic("quantized::add")
|
866 |
+
@_beartype.beartype
|
867 |
+
def quantized_add(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point):
|
868 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
869 |
+
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
|
870 |
+
|
871 |
+
output = opset9.add(g, x, y)
|
872 |
+
|
873 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
874 |
+
|
875 |
+
|
876 |
+
@_onnx_symbolic("quantized::add_relu")
|
877 |
+
@_beartype.beartype
|
878 |
+
def quantized_add_relu(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point):
|
879 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
880 |
+
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
|
881 |
+
|
882 |
+
output = opset9.add(g, x, y)
|
883 |
+
output = opset9.relu(g, output)
|
884 |
+
|
885 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
886 |
+
|
887 |
+
|
888 |
+
@_onnx_symbolic("quantized::mul")
|
889 |
+
@_beartype.beartype
|
890 |
+
def quantized_mul(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point):
|
891 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
892 |
+
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
|
893 |
+
|
894 |
+
output = opset9.mul(g, x, y)
|
895 |
+
|
896 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
897 |
+
|
898 |
+
|
899 |
+
@_onnx_symbolic("quantized::hardswish")
|
900 |
+
@_beartype.beartype
|
901 |
+
def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point):
|
902 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
903 |
+
|
904 |
+
output = opset9.hardswish(g, x)
|
905 |
+
|
906 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
907 |
+
|
908 |
+
|
909 |
+
@_onnx_symbolic("quantized::sigmoid")
|
910 |
+
@_beartype.beartype
|
911 |
+
def quantized_sigmoid(g: jit_utils.GraphContext, x, op_scale, op_zero_point):
|
912 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
913 |
+
|
914 |
+
output = opset9.sigmoid(g, x)
|
915 |
+
|
916 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
917 |
+
|
918 |
+
|
919 |
+
@_onnx_symbolic("quantized::leaky_relu")
|
920 |
+
@_beartype.beartype
|
921 |
+
def quantized_leaky_relu(
|
922 |
+
g: jit_utils.GraphContext, x, negative_slope, inplace, op_scale, op_zero_point
|
923 |
+
):
|
924 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
925 |
+
|
926 |
+
output = opset9.leaky_relu(g, x, negative_slope, inplace)
|
927 |
+
|
928 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
929 |
+
|
930 |
+
|
931 |
+
@_onnx_symbolic("quantized::layer_norm")
|
932 |
+
@_beartype.beartype
|
933 |
+
def quantized_layer_norm(
|
934 |
+
g: jit_utils.GraphContext,
|
935 |
+
x,
|
936 |
+
normalized_shape,
|
937 |
+
weight,
|
938 |
+
bias,
|
939 |
+
eps,
|
940 |
+
op_scale,
|
941 |
+
op_zero_point,
|
942 |
+
):
|
943 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
944 |
+
|
945 |
+
output = opset9.layer_norm(g, x, normalized_shape, weight, bias, eps, False)
|
946 |
+
|
947 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
948 |
+
|
949 |
+
|
950 |
+
@_onnx_symbolic("quantized::group_norm")
|
951 |
+
@_beartype.beartype
|
952 |
+
def quantized_group_norm(
|
953 |
+
g: jit_utils.GraphContext,
|
954 |
+
x,
|
955 |
+
num_groups,
|
956 |
+
weight,
|
957 |
+
bias,
|
958 |
+
eps,
|
959 |
+
op_scale,
|
960 |
+
op_zero_point,
|
961 |
+
):
|
962 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
963 |
+
|
964 |
+
output = opset9.group_norm(g, x, num_groups, weight, bias, eps, False)
|
965 |
+
|
966 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
967 |
+
|
968 |
+
|
969 |
+
@_onnx_symbolic("quantized::instance_norm")
|
970 |
+
@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v")
|
971 |
+
@_beartype.beartype
|
972 |
+
def quantized_instance_norm(
|
973 |
+
g: jit_utils.GraphContext,
|
974 |
+
q_input,
|
975 |
+
weight,
|
976 |
+
bias,
|
977 |
+
eps,
|
978 |
+
op_scale,
|
979 |
+
op_zero_point,
|
980 |
+
):
|
981 |
+
input, _, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
982 |
+
|
983 |
+
output = opset9.instance_norm(
|
984 |
+
g, input, weight, bias, None, None, False, 0.0, eps, False
|
985 |
+
)
|
986 |
+
|
987 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
988 |
+
|
989 |
+
|
990 |
+
@_onnx_symbolic("quantized::conv1d_relu")
|
991 |
+
@_beartype.beartype
|
992 |
+
def quantized_conv1d_relu(
|
993 |
+
g: jit_utils.GraphContext,
|
994 |
+
q_input,
|
995 |
+
q_weight,
|
996 |
+
bias,
|
997 |
+
stride,
|
998 |
+
padding,
|
999 |
+
dilation,
|
1000 |
+
groups,
|
1001 |
+
op_scale,
|
1002 |
+
op_zero_point,
|
1003 |
+
):
|
1004 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1005 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1006 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1007 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1008 |
+
|
1009 |
+
output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups)
|
1010 |
+
output = opset9.relu(g, output)
|
1011 |
+
|
1012 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1013 |
+
|
1014 |
+
|
1015 |
+
@_onnx_symbolic("quantized::conv2d_relu")
|
1016 |
+
@_beartype.beartype
|
1017 |
+
def quantized_conv2d_relu(
|
1018 |
+
g: jit_utils.GraphContext,
|
1019 |
+
q_input,
|
1020 |
+
q_weight,
|
1021 |
+
bias,
|
1022 |
+
stride,
|
1023 |
+
padding,
|
1024 |
+
dilation,
|
1025 |
+
groups,
|
1026 |
+
op_scale,
|
1027 |
+
op_zero_point,
|
1028 |
+
):
|
1029 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1030 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1031 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1032 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1033 |
+
|
1034 |
+
output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups)
|
1035 |
+
output = opset9.relu(g, output)
|
1036 |
+
|
1037 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1038 |
+
|
1039 |
+
|
1040 |
+
@_onnx_symbolic("quantized::conv3d_relu")
|
1041 |
+
@_beartype.beartype
|
1042 |
+
def quantized_conv3d_relu(
|
1043 |
+
g: jit_utils.GraphContext,
|
1044 |
+
q_input,
|
1045 |
+
q_weight,
|
1046 |
+
bias,
|
1047 |
+
stride,
|
1048 |
+
padding,
|
1049 |
+
dilation,
|
1050 |
+
groups,
|
1051 |
+
op_scale,
|
1052 |
+
op_zero_point,
|
1053 |
+
):
|
1054 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1055 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1056 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1057 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1058 |
+
|
1059 |
+
output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups)
|
1060 |
+
output = opset9.relu(g, output)
|
1061 |
+
|
1062 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1063 |
+
|
1064 |
+
|
1065 |
+
@_onnx_symbolic("quantized::conv1d")
|
1066 |
+
@_beartype.beartype
|
1067 |
+
def quantized_conv1d(
|
1068 |
+
g: jit_utils.GraphContext,
|
1069 |
+
q_input,
|
1070 |
+
q_weight,
|
1071 |
+
bias,
|
1072 |
+
stride,
|
1073 |
+
padding,
|
1074 |
+
dilation,
|
1075 |
+
groups,
|
1076 |
+
op_scale,
|
1077 |
+
op_zero_point,
|
1078 |
+
):
|
1079 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1080 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1081 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1082 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1083 |
+
|
1084 |
+
output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups)
|
1085 |
+
|
1086 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1087 |
+
|
1088 |
+
|
1089 |
+
@_onnx_symbolic("quantized::conv2d")
|
1090 |
+
@_beartype.beartype
|
1091 |
+
def quantized_conv2d(
|
1092 |
+
g: jit_utils.GraphContext,
|
1093 |
+
q_input,
|
1094 |
+
q_weight,
|
1095 |
+
bias,
|
1096 |
+
stride,
|
1097 |
+
padding,
|
1098 |
+
dilation,
|
1099 |
+
groups,
|
1100 |
+
op_scale,
|
1101 |
+
op_zero_point,
|
1102 |
+
):
|
1103 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1104 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1105 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1106 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1107 |
+
|
1108 |
+
output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups)
|
1109 |
+
|
1110 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1111 |
+
|
1112 |
+
|
1113 |
+
@_onnx_symbolic("quantized::conv3d")
|
1114 |
+
@_beartype.beartype
|
1115 |
+
def quantized_conv3d(
|
1116 |
+
g: jit_utils.GraphContext,
|
1117 |
+
q_input,
|
1118 |
+
q_weight,
|
1119 |
+
bias,
|
1120 |
+
stride,
|
1121 |
+
padding,
|
1122 |
+
dilation,
|
1123 |
+
groups,
|
1124 |
+
op_scale,
|
1125 |
+
op_zero_point,
|
1126 |
+
):
|
1127 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1128 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1129 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1130 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1131 |
+
|
1132 |
+
output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups)
|
1133 |
+
|
1134 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1135 |
+
|
1136 |
+
|
1137 |
+
@_onnx_symbolic("quantized::conv_transpose1d")
|
1138 |
+
@_beartype.beartype
|
1139 |
+
def quantized_conv_transpose1d(
|
1140 |
+
g: jit_utils.GraphContext,
|
1141 |
+
q_input,
|
1142 |
+
q_weight,
|
1143 |
+
bias,
|
1144 |
+
stride,
|
1145 |
+
padding,
|
1146 |
+
output_padding,
|
1147 |
+
dilation,
|
1148 |
+
groups,
|
1149 |
+
op_scale,
|
1150 |
+
op_zero_point,
|
1151 |
+
):
|
1152 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1153 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1154 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1155 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1156 |
+
|
1157 |
+
output = opset9.conv_transpose2d(
|
1158 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1159 |
+
)
|
1160 |
+
|
1161 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1162 |
+
|
1163 |
+
|
1164 |
+
@_onnx_symbolic("quantized::conv_transpose2d")
|
1165 |
+
@_beartype.beartype
|
1166 |
+
def quantized_conv_transpose2d(
|
1167 |
+
g: jit_utils.GraphContext,
|
1168 |
+
q_input,
|
1169 |
+
q_weight,
|
1170 |
+
bias,
|
1171 |
+
stride,
|
1172 |
+
padding,
|
1173 |
+
output_padding,
|
1174 |
+
dilation,
|
1175 |
+
groups,
|
1176 |
+
op_scale,
|
1177 |
+
op_zero_point,
|
1178 |
+
):
|
1179 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1180 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1181 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1182 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1183 |
+
|
1184 |
+
output = opset9.conv_transpose2d(
|
1185 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1186 |
+
)
|
1187 |
+
|
1188 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1189 |
+
|
1190 |
+
|
1191 |
+
@_onnx_symbolic("quantized::conv_transpose3d")
|
1192 |
+
@_beartype.beartype
|
1193 |
+
def quantized_conv_transpose3d(
|
1194 |
+
g: jit_utils.GraphContext,
|
1195 |
+
q_input,
|
1196 |
+
q_weight,
|
1197 |
+
bias,
|
1198 |
+
stride,
|
1199 |
+
padding,
|
1200 |
+
output_padding,
|
1201 |
+
dilation,
|
1202 |
+
groups,
|
1203 |
+
op_scale,
|
1204 |
+
op_zero_point,
|
1205 |
+
):
|
1206 |
+
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
|
1207 |
+
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
|
1208 |
+
q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale)
|
1209 |
+
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
|
1210 |
+
|
1211 |
+
output = opset9.conv_transpose3d(
|
1212 |
+
g, input, weight, bias, stride, padding, output_padding, groups, dilation
|
1213 |
+
)
|
1214 |
+
|
1215 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
1216 |
+
|
1217 |
+
|
1218 |
+
@_onnx_symbolic("quantized::cat")
|
1219 |
+
@symbolic_helper.parse_args("v", "i", "v", "v")
|
1220 |
+
@_beartype.beartype
|
1221 |
+
def quantized_cat(
|
1222 |
+
g: jit_utils.GraphContext,
|
1223 |
+
q_inputs: _C.Value,
|
1224 |
+
dim: int,
|
1225 |
+
op_scale: _C.Value,
|
1226 |
+
op_zero_point: _C.Value,
|
1227 |
+
) -> _C.Value:
|
1228 |
+
unpacked_inputs = symbolic_helper._unpack_list(q_inputs)
|
1229 |
+
dequantized = [
|
1230 |
+
symbolic_helper.dequantize_helper(g, input)[0] for input in unpacked_inputs
|
1231 |
+
]
|
1232 |
+
concatenated = g.op("Concat", *dequantized, axis_i=dim)
|
1233 |
+
return symbolic_helper.quantize_helper(g, concatenated, op_scale, op_zero_point)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py
ADDED
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
import sys
|
5 |
+
from typing import Optional, Tuple
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch._C import _onnx as _C_onnx
|
9 |
+
from torch.onnx import (
|
10 |
+
_type_utils,
|
11 |
+
errors,
|
12 |
+
symbolic_helper,
|
13 |
+
symbolic_opset9 as opset9,
|
14 |
+
utils,
|
15 |
+
)
|
16 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
17 |
+
|
18 |
+
|
19 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
20 |
+
# see Note [Edit Symbolic Files] in README.md
|
21 |
+
|
22 |
+
# This file exports ONNX ops for opset 12
|
23 |
+
|
24 |
+
__all__ = [
|
25 |
+
"argmax",
|
26 |
+
"argmin",
|
27 |
+
"binary_cross_entropy_with_logits",
|
28 |
+
"celu",
|
29 |
+
"cross_entropy_loss",
|
30 |
+
"dropout",
|
31 |
+
"einsum",
|
32 |
+
"ge",
|
33 |
+
"le",
|
34 |
+
"native_dropout",
|
35 |
+
"nll_loss",
|
36 |
+
"nll_loss2d",
|
37 |
+
"nll_loss_nd",
|
38 |
+
"outer",
|
39 |
+
"pow",
|
40 |
+
"tensordot",
|
41 |
+
"unfold",
|
42 |
+
]
|
43 |
+
|
44 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=12)
|
45 |
+
|
46 |
+
|
47 |
+
@_beartype.beartype
|
48 |
+
def _einsum_helper(g: jit_utils.GraphContext, equation, tensors):
|
49 |
+
if not tensors:
|
50 |
+
raise RuntimeError("Einsum inputs are empty.")
|
51 |
+
# ONNX does not support bool for Einsum inputs.
|
52 |
+
if symbolic_helper._is_bool(tensors[0]):
|
53 |
+
tensors = [
|
54 |
+
g.op("Cast", tensor, to_i=_C_onnx.TensorProtoDataType.INT64)
|
55 |
+
for tensor in tensors
|
56 |
+
]
|
57 |
+
return g.op(
|
58 |
+
"Cast",
|
59 |
+
g.op("Einsum", *tensors, equation_s=equation),
|
60 |
+
to_i=_C_onnx.TensorProtoDataType.BOOL,
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
return g.op("Einsum", *tensors, equation_s=equation)
|
64 |
+
|
65 |
+
|
66 |
+
@_onnx_symbolic("aten::einsum")
|
67 |
+
@symbolic_helper.parse_args("s", "v", "is")
|
68 |
+
@_beartype.beartype
|
69 |
+
def einsum(g: jit_utils.GraphContext, equation, tensor_list, path=None):
|
70 |
+
tensors = symbolic_helper._unpack_list(tensor_list)
|
71 |
+
return _einsum_helper(g, equation, tensors)
|
72 |
+
|
73 |
+
|
74 |
+
@_onnx_symbolic("aten::outer")
|
75 |
+
@symbolic_helper.parse_args("v", "v")
|
76 |
+
@_beartype.beartype
|
77 |
+
def outer(g: jit_utils.GraphContext, input, other):
|
78 |
+
# make sure to cast other to self's type
|
79 |
+
if _type_utils.JitScalarType.from_value(
|
80 |
+
other, _type_utils.JitScalarType.UNDEFINED
|
81 |
+
) != _type_utils.JitScalarType.from_value(input):
|
82 |
+
other = g.op(
|
83 |
+
"Cast",
|
84 |
+
other,
|
85 |
+
to_i=_type_utils.JitScalarType.from_value(input).onnx_type(),
|
86 |
+
)
|
87 |
+
return _einsum_helper(g, "i,j->ij", [input, other])
|
88 |
+
|
89 |
+
|
90 |
+
@_beartype.beartype
|
91 |
+
def _dropout_returns_masked_input_and_mask(
|
92 |
+
g: jit_utils.GraphContext, input: torch._C.Value, p: float, train: bool
|
93 |
+
) -> Tuple[torch._C.Value, Optional[torch._C.Value]]:
|
94 |
+
symbolic_helper.check_training_mode(train, "dropout")
|
95 |
+
# In eval mode, dropout is non-op. That is, if the node's
|
96 |
+
# train param is set to False, dropout just returns its inputs.
|
97 |
+
if not train:
|
98 |
+
return input, None
|
99 |
+
p = g.op("Constant", value_t=torch.tensor(p))
|
100 |
+
t = g.op("Constant", value_t=torch.tensor(train, dtype=torch.bool))
|
101 |
+
r, mask = g.op("Dropout", input, p, t, outputs=2)
|
102 |
+
return r, mask
|
103 |
+
|
104 |
+
|
105 |
+
@_onnx_symbolic("aten::dropout")
|
106 |
+
@symbolic_helper.parse_args("v", "f", "b")
|
107 |
+
@_beartype.beartype
|
108 |
+
def dropout(g: jit_utils.GraphContext, input, p, train):
|
109 |
+
masked, _ = _dropout_returns_masked_input_and_mask(g, input, p, train)
|
110 |
+
return masked
|
111 |
+
|
112 |
+
|
113 |
+
@_onnx_symbolic("aten::native_dropout")
|
114 |
+
@symbolic_helper.parse_args("v", "f", "b")
|
115 |
+
@_beartype.beartype
|
116 |
+
def native_dropout(g: jit_utils.GraphContext, input, p, train):
|
117 |
+
return _dropout_returns_masked_input_and_mask(g, input, p, train)
|
118 |
+
|
119 |
+
|
120 |
+
@_onnx_symbolic("aten::nll_loss")
|
121 |
+
@_beartype.beartype
|
122 |
+
def nll_loss(g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index):
|
123 |
+
# none reduction : onnx::Constant[value={0}]
|
124 |
+
# mean reduction : onnx::Constant[value={1}]
|
125 |
+
# sum reduction : onnx::Constant[value={2}]
|
126 |
+
reduction = symbolic_helper._maybe_get_const(reduction, "i")
|
127 |
+
reduction_vals = ["none", "mean", "sum"]
|
128 |
+
reduction = reduction_vals[reduction]
|
129 |
+
|
130 |
+
# in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value.
|
131 |
+
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
|
132 |
+
ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i")
|
133 |
+
if weight.node().mustBeNone():
|
134 |
+
nllloss = g.op(
|
135 |
+
"NegativeLogLikelihoodLoss",
|
136 |
+
self,
|
137 |
+
target,
|
138 |
+
reduction_s=reduction,
|
139 |
+
ignore_index_i=ignore_index,
|
140 |
+
)
|
141 |
+
else:
|
142 |
+
nllloss = g.op(
|
143 |
+
"NegativeLogLikelihoodLoss",
|
144 |
+
self,
|
145 |
+
target,
|
146 |
+
weight,
|
147 |
+
reduction_s=reduction,
|
148 |
+
ignore_index_i=ignore_index,
|
149 |
+
)
|
150 |
+
|
151 |
+
return nllloss
|
152 |
+
|
153 |
+
|
154 |
+
@_onnx_symbolic("aten::nll_loss2d")
|
155 |
+
@_beartype.beartype
|
156 |
+
def nll_loss2d(
|
157 |
+
g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index
|
158 |
+
):
|
159 |
+
return nll_loss(g, self, target, weight, reduction, ignore_index)
|
160 |
+
|
161 |
+
|
162 |
+
@_onnx_symbolic("aten::nll_loss_nd")
|
163 |
+
@_beartype.beartype
|
164 |
+
def nll_loss_nd(
|
165 |
+
g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index
|
166 |
+
):
|
167 |
+
return nll_loss(g, self, target, weight, reduction, ignore_index)
|
168 |
+
|
169 |
+
|
170 |
+
@_onnx_symbolic("aten::cross_entropy_loss")
|
171 |
+
@_beartype.beartype
|
172 |
+
def cross_entropy_loss(
|
173 |
+
g: jit_utils.GraphContext,
|
174 |
+
self,
|
175 |
+
target,
|
176 |
+
weight,
|
177 |
+
reduction,
|
178 |
+
ignore_index,
|
179 |
+
label_smoothing,
|
180 |
+
):
|
181 |
+
# none reduction : onnx::Constant[value={0}]
|
182 |
+
# mean reduction : onnx::Constant[value={1}]
|
183 |
+
# sum reduction : onnx::Constant[value={2}]
|
184 |
+
reduction = symbolic_helper._maybe_get_const(reduction, "i")
|
185 |
+
reduction_vals = ["none", "mean", "sum"]
|
186 |
+
reduction = reduction_vals[reduction]
|
187 |
+
|
188 |
+
label_smoothing = symbolic_helper._maybe_get_const(label_smoothing, "f")
|
189 |
+
if label_smoothing is not None and label_smoothing > 0.0:
|
190 |
+
raise errors.SymbolicValueError(
|
191 |
+
"Unsupported: ONNX does not support label_smoothing", self
|
192 |
+
)
|
193 |
+
|
194 |
+
# in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value.
|
195 |
+
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
|
196 |
+
ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i")
|
197 |
+
if weight.node().mustBeNone():
|
198 |
+
celoss = g.op(
|
199 |
+
"SoftmaxCrossEntropyLoss",
|
200 |
+
self,
|
201 |
+
target,
|
202 |
+
reduction_s=reduction,
|
203 |
+
ignore_index_i=ignore_index,
|
204 |
+
)
|
205 |
+
else:
|
206 |
+
celoss = g.op(
|
207 |
+
"SoftmaxCrossEntropyLoss",
|
208 |
+
self,
|
209 |
+
target,
|
210 |
+
weight,
|
211 |
+
reduction_s=reduction,
|
212 |
+
ignore_index_i=ignore_index,
|
213 |
+
)
|
214 |
+
|
215 |
+
return celoss
|
216 |
+
|
217 |
+
|
218 |
+
@_onnx_symbolic("aten::binary_cross_entropy_with_logits")
|
219 |
+
@symbolic_helper.parse_args("v", "v", "v", "v", "i")
|
220 |
+
@_beartype.beartype
|
221 |
+
def binary_cross_entropy_with_logits(
|
222 |
+
g: jit_utils.GraphContext, input, target, weight, pos_weight, reduction
|
223 |
+
):
|
224 |
+
p = g.op("Constant", value_t=torch.tensor([1]))
|
225 |
+
sig_x = opset9.sigmoid(g, input)
|
226 |
+
log_sig_x = opset9.log(g, sig_x)
|
227 |
+
sub_1_x = opset9.sub(g, p, sig_x)
|
228 |
+
sub_1_y = opset9.sub(g, p, target)
|
229 |
+
log_1_x = opset9.log(g, sub_1_x)
|
230 |
+
if pos_weight is None or symbolic_helper._is_none(pos_weight):
|
231 |
+
output = opset9.neg(
|
232 |
+
g,
|
233 |
+
opset9.add(
|
234 |
+
g, opset9.mul(g, target, log_sig_x), opset9.mul(g, sub_1_y, log_1_x)
|
235 |
+
),
|
236 |
+
)
|
237 |
+
else:
|
238 |
+
output = opset9.neg(
|
239 |
+
g,
|
240 |
+
opset9.add(
|
241 |
+
g,
|
242 |
+
opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight),
|
243 |
+
opset9.mul(g, sub_1_y, log_1_x),
|
244 |
+
),
|
245 |
+
)
|
246 |
+
|
247 |
+
if weight is not None and not symbolic_helper._is_none(weight):
|
248 |
+
output = opset9.mul(g, weight, output)
|
249 |
+
|
250 |
+
reduction = symbolic_helper._maybe_get_const(reduction, "i")
|
251 |
+
if reduction == 0:
|
252 |
+
return output
|
253 |
+
elif reduction == 1:
|
254 |
+
return g.op("ReduceMean", output, keepdims_i=0)
|
255 |
+
elif reduction == 2:
|
256 |
+
return g.op("ReduceSum", output, keepdims_i=0)
|
257 |
+
else:
|
258 |
+
return symbolic_helper._onnx_unsupported(
|
259 |
+
"binary_cross_entropy_with_logits with reduction other than none, mean, or sum",
|
260 |
+
input,
|
261 |
+
)
|
262 |
+
|
263 |
+
|
264 |
+
@_onnx_symbolic("aten::celu")
|
265 |
+
@_beartype.beartype
|
266 |
+
def celu(g: jit_utils.GraphContext, self, alpha):
|
267 |
+
alpha = symbolic_helper._maybe_get_const(alpha, "f")
|
268 |
+
# if the input is of type double cast it to float
|
269 |
+
if (
|
270 |
+
_type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED)
|
271 |
+
== _type_utils.JitScalarType.DOUBLE
|
272 |
+
):
|
273 |
+
self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
274 |
+
out = g.op("Celu", self, alpha_f=alpha)
|
275 |
+
return g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.DOUBLE)
|
276 |
+
|
277 |
+
return g.op("Celu", self, alpha_f=alpha)
|
278 |
+
|
279 |
+
|
280 |
+
@_onnx_symbolic("aten::argmax")
|
281 |
+
@symbolic_helper.parse_args("v", "v", "b")
|
282 |
+
@_beartype.beartype
|
283 |
+
def argmax(
|
284 |
+
g: jit_utils.GraphContext,
|
285 |
+
input: torch._C.Value,
|
286 |
+
dim: torch._C.Value,
|
287 |
+
keepdim: bool,
|
288 |
+
):
|
289 |
+
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax")
|
290 |
+
|
291 |
+
|
292 |
+
@_onnx_symbolic("aten::argmin")
|
293 |
+
@symbolic_helper.parse_args("v", "v", "b")
|
294 |
+
@_beartype.beartype
|
295 |
+
def argmin(
|
296 |
+
g: jit_utils.GraphContext,
|
297 |
+
input: torch._C.Value,
|
298 |
+
dim: torch._C.Value,
|
299 |
+
keepdim: bool,
|
300 |
+
):
|
301 |
+
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin")
|
302 |
+
|
303 |
+
|
304 |
+
@_onnx_symbolic("aten::pow")
|
305 |
+
@_beartype.beartype
|
306 |
+
def pow(g: jit_utils.GraphContext, self, exponent):
|
307 |
+
return g.op("Pow", self, exponent)
|
308 |
+
|
309 |
+
|
310 |
+
@_onnx_symbolic("aten::ge")
|
311 |
+
@_beartype.beartype
|
312 |
+
def ge(g: jit_utils.GraphContext, input, other):
|
313 |
+
return g.op("GreaterOrEqual", input, other)
|
314 |
+
|
315 |
+
|
316 |
+
@_onnx_symbolic("aten::le")
|
317 |
+
@_beartype.beartype
|
318 |
+
def le(g: jit_utils.GraphContext, input, other):
|
319 |
+
return g.op("LessOrEqual", input, other)
|
320 |
+
|
321 |
+
|
322 |
+
@_onnx_symbolic("aten::unfold")
|
323 |
+
@symbolic_helper.parse_args("v", "i", "v", "v")
|
324 |
+
@_beartype.beartype
|
325 |
+
def unfold(g: jit_utils.GraphContext, input, dimension, size, step):
|
326 |
+
const_size = symbolic_helper._maybe_get_const(size, "i")
|
327 |
+
const_step = symbolic_helper._maybe_get_const(step, "i")
|
328 |
+
if not symbolic_helper._is_value(const_size) and not symbolic_helper._is_value(
|
329 |
+
const_step
|
330 |
+
):
|
331 |
+
return opset9.unfold(g, input, dimension, const_size, const_step)
|
332 |
+
if symbolic_helper.is_caffe2_aten_fallback():
|
333 |
+
return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step)
|
334 |
+
|
335 |
+
sizedim = symbolic_helper._get_tensor_dim_size(input, dimension)
|
336 |
+
if sizedim is not None:
|
337 |
+
low_start = g.op("Constant", value_t=torch.tensor(0))
|
338 |
+
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
|
339 |
+
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
|
340 |
+
low_indices = g.op("Range", low_start, low_end, step)
|
341 |
+
hi_indices = g.op("Range", size, hi_end, step)
|
342 |
+
|
343 |
+
low_size = symbolic_helper._size_helper(
|
344 |
+
g, low_indices, g.op("Constant", value_t=torch.tensor(0))
|
345 |
+
)
|
346 |
+
hi_size = symbolic_helper._size_helper(
|
347 |
+
g, hi_indices, g.op("Constant", value_t=torch.tensor(0))
|
348 |
+
)
|
349 |
+
|
350 |
+
ndim = symbolic_helper._get_tensor_rank(input)
|
351 |
+
assert ndim is not None
|
352 |
+
perm = list(range(0, ndim))
|
353 |
+
perm.append(perm.pop(dimension))
|
354 |
+
|
355 |
+
unsqueeze_list = []
|
356 |
+
loop_condition = g.op("Constant", value_t=torch.tensor(1))
|
357 |
+
loop_condition = g.op(
|
358 |
+
"Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL
|
359 |
+
)
|
360 |
+
loop_len = g.op("Min", low_size, hi_size)
|
361 |
+
|
362 |
+
loop, (loop_context,), _ = jit_utils.add_op_with_blocks(
|
363 |
+
g, "Loop", loop_len, loop_condition, n_blocks=1
|
364 |
+
)
|
365 |
+
|
366 |
+
loop_block = loop_context.block
|
367 |
+
block_input_iter = utils._add_input_to_block(loop_block)
|
368 |
+
# FIXME(justinchuby): cond is unused?
|
369 |
+
cond = utils._add_input_to_block(loop_block)
|
370 |
+
|
371 |
+
starts = loop_context.op("Gather", low_indices, block_input_iter)
|
372 |
+
ends = loop_context.op("Gather", hi_indices, block_input_iter)
|
373 |
+
axes = loop_context.op("Constant", value_t=torch.tensor([2]))
|
374 |
+
starts = symbolic_helper._unsqueeze_helper(loop_context, starts, [0])
|
375 |
+
ends = symbolic_helper._unsqueeze_helper(loop_context, ends, [0])
|
376 |
+
stack = loop_context.op("Slice", input, starts, ends, axes)
|
377 |
+
|
378 |
+
unsqueeze = symbolic_helper._unsqueeze_helper(
|
379 |
+
loop_context, loop_context.op("Transpose", stack, perm_i=perm), [dimension]
|
380 |
+
)
|
381 |
+
unsqueeze_list.append(unsqueeze)
|
382 |
+
concat = loop_context.op("Concat", *unsqueeze_list, axis_i=0)
|
383 |
+
|
384 |
+
cond_out = loop_context.op(
|
385 |
+
"Cast", loop_condition, _C_onnx.TensorProtoDataType.BOOL
|
386 |
+
)
|
387 |
+
utils._add_output_to_block(loop_block, cond_out)
|
388 |
+
utils._add_output_to_block(loop_block, concat)
|
389 |
+
|
390 |
+
loop_output = loop.node().output()
|
391 |
+
perm = [0, 1, 2, 3, 4]
|
392 |
+
perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0]
|
393 |
+
transpose = g.op("Transpose", loop_output, perm_i=perm)
|
394 |
+
squeeze = symbolic_helper._squeeze_helper(g, transpose, [0])
|
395 |
+
|
396 |
+
return squeeze
|
397 |
+
|
398 |
+
return symbolic_helper._unimplemented("Unfold", "input size not accessible")
|
399 |
+
|
400 |
+
|
401 |
+
@_onnx_symbolic("aten::tensordot")
|
402 |
+
@symbolic_helper.parse_args("v", "v", "is", "is", "v")
|
403 |
+
@_beartype.beartype
|
404 |
+
def tensordot(g: jit_utils.GraphContext, input_a, input_b, dims_a, dims_b, out=None):
|
405 |
+
if out is not None:
|
406 |
+
symbolic_helper._unimplemented(
|
407 |
+
"Tensordot", "Out parameter is not supported for tensordot."
|
408 |
+
)
|
409 |
+
|
410 |
+
dim_count_a = symbolic_helper._get_tensor_rank(input_a)
|
411 |
+
if dim_count_a is None:
|
412 |
+
raise errors.SymbolicValueError(
|
413 |
+
"Unsupported: ONNX export of tensordot for tensor(input_a) of unknown rank.",
|
414 |
+
input_a,
|
415 |
+
)
|
416 |
+
|
417 |
+
dim_count_b = symbolic_helper._get_tensor_rank(input_b)
|
418 |
+
if dim_count_b is None:
|
419 |
+
raise errors.SymbolicValueError(
|
420 |
+
"Unsupported: ONNX export of tensordot for tensor(input_b) of unknown rank.",
|
421 |
+
input_b,
|
422 |
+
)
|
423 |
+
|
424 |
+
dims_a = [
|
425 |
+
(dims_a[i] + dim_count_a) if (dims_a[i] < 0) else dims_a[i]
|
426 |
+
for i in range(len(dims_a))
|
427 |
+
]
|
428 |
+
dims_b = [
|
429 |
+
(dims_b[i] + dim_count_b) if (dims_b[i] < 0) else dims_b[i]
|
430 |
+
for i in range(len(dims_b))
|
431 |
+
]
|
432 |
+
|
433 |
+
left_dims_a = [i for i in range(dim_count_a) if (i not in dims_a)]
|
434 |
+
left_dims_b = [i for i in range(dim_count_b) if (i not in dims_b)]
|
435 |
+
|
436 |
+
new_input_a = opset9.permute(g, input_a, left_dims_a + dims_a)
|
437 |
+
new_input_b = opset9.permute(g, input_b, dims_b + left_dims_b)
|
438 |
+
|
439 |
+
input_shape = g.op("Shape", new_input_a)
|
440 |
+
left_sizes_a = symbolic_helper._slice_helper(
|
441 |
+
g, input_shape, axes=[0], starts=[0], ends=[len(left_dims_a)]
|
442 |
+
)
|
443 |
+
shape_sizes = [
|
444 |
+
left_sizes_a,
|
445 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
|
446 |
+
]
|
447 |
+
output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes)
|
448 |
+
|
449 |
+
input_shape = g.op("Shape", output_a)
|
450 |
+
slices = symbolic_helper._slice_helper(
|
451 |
+
g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize]
|
452 |
+
)
|
453 |
+
shape_sizes = [
|
454 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
|
455 |
+
slices,
|
456 |
+
]
|
457 |
+
output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes)
|
458 |
+
|
459 |
+
input_shape = g.op("Shape", new_input_b)
|
460 |
+
left_sizes_b = symbolic_helper._slice_helper(
|
461 |
+
g, input_shape, axes=[0], starts=[len(dims_b)], ends=[sys.maxsize]
|
462 |
+
)
|
463 |
+
slices = symbolic_helper._slice_helper(
|
464 |
+
g, input_shape, axes=[0], starts=[0], ends=[len(dims_b)]
|
465 |
+
)
|
466 |
+
shape_sizes = [
|
467 |
+
slices,
|
468 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
|
469 |
+
]
|
470 |
+
output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes)
|
471 |
+
|
472 |
+
input_shape = g.op("Shape", output_b)
|
473 |
+
slices = symbolic_helper._slice_helper(
|
474 |
+
g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize]
|
475 |
+
)
|
476 |
+
shape_sizes = [
|
477 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
|
478 |
+
slices,
|
479 |
+
]
|
480 |
+
output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes)
|
481 |
+
|
482 |
+
output = einsum(g, "ij,jk->ik", g.op("prim::ListConstruct", *[output_a, output_b]))
|
483 |
+
|
484 |
+
shape_sizes = [left_sizes_a, left_sizes_b]
|
485 |
+
return opset9._reshape_from_tensor(g, output, shape_sizes)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This file exports ONNX ops for opset 14.
|
2 |
+
|
3 |
+
Note [ONNX operators that are added/updated in opset 14]
|
4 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
5 |
+
New operators:
|
6 |
+
HardSwish, Trilu
|
7 |
+
|
8 |
+
Updated operators:
|
9 |
+
Reshape
|
10 |
+
Add, Sub, Mul, Div
|
11 |
+
GRU, LSTM, RNN
|
12 |
+
BatchNorm, Cumsum, Relu
|
13 |
+
"""
|
14 |
+
|
15 |
+
# EDITING THIS FILE? READ THIS FIRST!
|
16 |
+
# see Note [Edit Symbolic Files] in README.md
|
17 |
+
from __future__ import annotations
|
18 |
+
|
19 |
+
import functools
|
20 |
+
from typing import Optional
|
21 |
+
|
22 |
+
import torch
|
23 |
+
from torch.onnx import _constants, _type_utils, symbolic_helper
|
24 |
+
from torch.onnx._globals import GLOBALS
|
25 |
+
from torch.onnx._internal import _beartype, jit_utils, registration
|
26 |
+
|
27 |
+
__all__ = [
|
28 |
+
"hardswish",
|
29 |
+
"tril",
|
30 |
+
"triu",
|
31 |
+
"reshape",
|
32 |
+
"batch_norm",
|
33 |
+
"quantized_hardswish",
|
34 |
+
"scaled_dot_product_attention",
|
35 |
+
]
|
36 |
+
|
37 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=14)
|
38 |
+
|
39 |
+
|
40 |
+
@_onnx_symbolic("aten::hardswish")
|
41 |
+
@symbolic_helper.parse_args("v")
|
42 |
+
@_beartype.beartype
|
43 |
+
def hardswish(g: jit_utils.GraphContext, self):
|
44 |
+
return g.op("HardSwish", self)
|
45 |
+
|
46 |
+
|
47 |
+
@_onnx_symbolic("aten::tril")
|
48 |
+
@_beartype.beartype
|
49 |
+
def tril(g: jit_utils.GraphContext, self, diagonal, out=None):
|
50 |
+
return g.op("Trilu", self, diagonal, upper_i=0)
|
51 |
+
|
52 |
+
|
53 |
+
@_onnx_symbolic("aten::triu")
|
54 |
+
@_beartype.beartype
|
55 |
+
def triu(g: jit_utils.GraphContext, self, diagonal, out=None):
|
56 |
+
return g.op("Trilu", self, diagonal, upper_i=1)
|
57 |
+
|
58 |
+
|
59 |
+
@_onnx_symbolic("aten::reshape")
|
60 |
+
@symbolic_helper.quantized_args(True)
|
61 |
+
@symbolic_helper.parse_args("v", "v")
|
62 |
+
@_beartype.beartype
|
63 |
+
def reshape(g: jit_utils.GraphContext, self, shape):
|
64 |
+
# NOTE: Due to bug in ORT https://github.com/microsoft/onnxruntime/issues/10664
|
65 |
+
# Reshape export cannot utilize the new allowzero attribute introduced in opset 14.
|
66 |
+
return symbolic_helper._reshape_helper(g, self, shape, allowzero=0)
|
67 |
+
|
68 |
+
|
69 |
+
@_onnx_symbolic("aten::batch_norm")
|
70 |
+
@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i")
|
71 |
+
@_beartype.beartype
|
72 |
+
def batch_norm(
|
73 |
+
g: jit_utils.GraphContext,
|
74 |
+
input,
|
75 |
+
weight,
|
76 |
+
bias,
|
77 |
+
running_mean,
|
78 |
+
running_var,
|
79 |
+
training,
|
80 |
+
momentum,
|
81 |
+
eps,
|
82 |
+
cudnn_enabled,
|
83 |
+
):
|
84 |
+
if (
|
85 |
+
torch.is_autocast_enabled()
|
86 |
+
and not symbolic_helper.args_have_same_dtype(
|
87 |
+
[input, weight, bias, running_mean, running_var]
|
88 |
+
)
|
89 |
+
and GLOBALS.export_onnx_opset_version < 15
|
90 |
+
):
|
91 |
+
return symbolic_helper._onnx_opset_unsupported_detailed(
|
92 |
+
"BatchNormalization",
|
93 |
+
14,
|
94 |
+
15,
|
95 |
+
"All input tensors must have the same `dtype`."
|
96 |
+
" Turn off Autocast or export using opset version 15.",
|
97 |
+
input,
|
98 |
+
)
|
99 |
+
|
100 |
+
symbolic_helper.check_training_mode(training, "batch_norm")
|
101 |
+
weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper(
|
102 |
+
g, input, weight, bias, running_mean, running_var
|
103 |
+
)
|
104 |
+
out = g.op(
|
105 |
+
"BatchNormalization",
|
106 |
+
input,
|
107 |
+
weight,
|
108 |
+
bias,
|
109 |
+
running_mean,
|
110 |
+
running_var,
|
111 |
+
epsilon_f=eps,
|
112 |
+
momentum_f=1 - momentum,
|
113 |
+
training_mode_i=0 if not training else 1,
|
114 |
+
outputs=1 if not training else 3,
|
115 |
+
)
|
116 |
+
if not training:
|
117 |
+
return out
|
118 |
+
else:
|
119 |
+
res, new_running_mean, new_running_var = out
|
120 |
+
new_running_mean.setType(running_mean.type())
|
121 |
+
new_running_var.setType(running_var.type())
|
122 |
+
return res
|
123 |
+
|
124 |
+
|
125 |
+
@_onnx_symbolic("quantized::hardswish")
|
126 |
+
@_beartype.beartype
|
127 |
+
def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point):
|
128 |
+
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
|
129 |
+
|
130 |
+
output = hardswish(g, x)
|
131 |
+
|
132 |
+
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
|
133 |
+
|
134 |
+
|
135 |
+
# Ported from
|
136 |
+
# https://github.com/microsoft/onnxscript/blob/6b1b81700b4523f31d8c6d3321e5d8ef5d42b764/onnxscript/function_libs/torch_aten/ops/nn.py#L1504
|
137 |
+
# aten_scaled_dot_product_attention
|
138 |
+
# NOTE: Need op.Trilu
|
139 |
+
@_onnx_symbolic("aten::scaled_dot_product_attention")
|
140 |
+
@symbolic_helper.parse_args("v", "v", "v", "v", "f", "b", "v")
|
141 |
+
@_beartype.beartype
|
142 |
+
def scaled_dot_product_attention(
|
143 |
+
g: jit_utils.GraphContext,
|
144 |
+
query: torch._C.Value,
|
145 |
+
key: torch._C.Value,
|
146 |
+
value: torch._C.Value,
|
147 |
+
attn_mask: Optional[torch._C.Value] = None,
|
148 |
+
dropout_p: float = 0.0,
|
149 |
+
is_causal: bool = False,
|
150 |
+
scale: Optional[torch._C.Value] = None,
|
151 |
+
):
|
152 |
+
assert (not is_causal) or (
|
153 |
+
is_causal and symbolic_helper._is_none(attn_mask)
|
154 |
+
), "is_causal and attn_mask cannot be set at the same time"
|
155 |
+
|
156 |
+
scale = symbolic_helper._maybe_get_const(scale, "f")
|
157 |
+
if symbolic_helper._is_none(scale):
|
158 |
+
scale = _attention_scale(g, query)
|
159 |
+
|
160 |
+
if is_causal:
|
161 |
+
attn_mask = _causal_attention_mask(g, query, key)
|
162 |
+
|
163 |
+
# Swap the last two axes of key
|
164 |
+
# NOTE: onnx-script has different logic here, because the attribute perms in
|
165 |
+
# transpose needs list of ints
|
166 |
+
key_shape_builtin = symbolic_helper._get_tensor_rank(key)
|
167 |
+
key_transposed_axes = list(range(key_shape_builtin))
|
168 |
+
key_transposed_axes[-1], key_transposed_axes[-2] = (
|
169 |
+
key_transposed_axes[-2],
|
170 |
+
key_transposed_axes[-1],
|
171 |
+
)
|
172 |
+
key_transposed = g.op("Transpose", key, perm_i=key_transposed_axes)
|
173 |
+
|
174 |
+
# https://github.com/pytorch/pytorch/blob/12da0c70378b5be9135c6fda62a9863bce4a4818/aten/src/ATen/native/transformers/attention.cpp#L653
|
175 |
+
# Scale q, k before matmul for stability see https://tinyurl.com/sudb9s96 for math
|
176 |
+
query_scaled = g.op("Mul", query, g.op("Sqrt", scale))
|
177 |
+
key_transposed_scaled = g.op("Mul", key_transposed, g.op("Sqrt", scale))
|
178 |
+
mul_qk = g.op("MatMul", query_scaled, key_transposed_scaled)
|
179 |
+
|
180 |
+
if symbolic_helper._is_none(attn_mask):
|
181 |
+
mul_qk_add = mul_qk
|
182 |
+
elif (
|
183 |
+
_type_utils.JitScalarType.from_value(attn_mask)
|
184 |
+
== _type_utils.JitScalarType.BOOL
|
185 |
+
):
|
186 |
+
# Turn the Boolean mask to float: attn_mask.masked_fill(not attn_mask, -float('inf'))
|
187 |
+
const_zero = g.op("Constant", value_t=torch.tensor([0.0]))
|
188 |
+
const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")]))
|
189 |
+
attn_mask = g.op("Where", attn_mask, const_zero, const_neg_inf)
|
190 |
+
mul_qk_add = g.op("Add", mul_qk, attn_mask)
|
191 |
+
elif _type_utils.JitScalarType.from_value(attn_mask) in (
|
192 |
+
_type_utils.JitScalarType.FLOAT,
|
193 |
+
_type_utils.JitScalarType.HALF,
|
194 |
+
_type_utils.JitScalarType.BFLOAT16,
|
195 |
+
):
|
196 |
+
mul_qk_add = g.op("Add", mul_qk, attn_mask)
|
197 |
+
else:
|
198 |
+
raise ValueError(
|
199 |
+
f"Unsupported type for attn_mask: {_type_utils.JitScalarType.from_value(attn_mask)}"
|
200 |
+
)
|
201 |
+
|
202 |
+
attn_weight = g.op("Softmax", mul_qk_add, axis_i=-1)
|
203 |
+
|
204 |
+
if dropout_p != 0:
|
205 |
+
attn_weight = g.op(
|
206 |
+
"Dropout",
|
207 |
+
attn_weight,
|
208 |
+
g.op("Constant", value_t=torch.tensor(dropout_p, dtype=torch.float)),
|
209 |
+
)
|
210 |
+
|
211 |
+
return g.op("MatMul", attn_weight, value)
|
212 |
+
|
213 |
+
|
214 |
+
@_beartype.beartype
|
215 |
+
def _attention_scale(
|
216 |
+
g: jit_utils.GraphContext, query: torch._C.Value
|
217 |
+
) -> torch._C.Value:
|
218 |
+
"""Calculate the scale factor for the attention result.
|
219 |
+
|
220 |
+
Args:
|
221 |
+
query: Tensor of shape [..., L, E]
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
Scalar scale factor := 1 / math.sqrt(query.size(-1))
|
225 |
+
"""
|
226 |
+
query_shape = g.op("Shape", query)
|
227 |
+
query_shape_last = g.op(
|
228 |
+
"Slice",
|
229 |
+
query_shape,
|
230 |
+
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)),
|
231 |
+
g.op(
|
232 |
+
"Constant", value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64)
|
233 |
+
),
|
234 |
+
)
|
235 |
+
embedding_size = g.op(
|
236 |
+
"Cast",
|
237 |
+
query_shape_last,
|
238 |
+
to_i=_type_utils.JitScalarType.from_value(query).onnx_type(),
|
239 |
+
)
|
240 |
+
const_one = g.op("Constant", value_t=torch.tensor([1.0], dtype=torch.float))
|
241 |
+
scale = g.op("Div", const_one, g.op("Sqrt", embedding_size))
|
242 |
+
# Add a Cast to convert the scale back to original type
|
243 |
+
scale = g.op(
|
244 |
+
"Cast",
|
245 |
+
scale,
|
246 |
+
to_i=_type_utils.JitScalarType.from_value(query).onnx_type(),
|
247 |
+
)
|
248 |
+
return scale
|
249 |
+
|
250 |
+
|
251 |
+
@_beartype.beartype
|
252 |
+
def _causal_attention_mask(
|
253 |
+
g: jit_utils.GraphContext, query: torch._C.Value, key: torch._C.Value
|
254 |
+
) -> torch._C.Value:
|
255 |
+
"""Create a causal mask for the given query and key tensors.
|
256 |
+
|
257 |
+
Equivalent to::
|
258 |
+
mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
|
259 |
+
attn_mask = torch.zeros(L, S, dtype=torch.float)
|
260 |
+
attn_mask = attn_mask.masked_fill(not mask, -float('inf'))
|
261 |
+
|
262 |
+
Args:
|
263 |
+
query: Tensor of shape [..., L, E]
|
264 |
+
key: Tensor of shape [..., S, E]
|
265 |
+
|
266 |
+
Returns:
|
267 |
+
Tensor of shape [L, S]
|
268 |
+
"""
|
269 |
+
|
270 |
+
query_shape = g.op("Shape", query)
|
271 |
+
key_shape = g.op("Shape", key)
|
272 |
+
|
273 |
+
last_idx = g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
|
274 |
+
second_last_idx = g.op("Constant", value_t=torch.tensor([-2], dtype=torch.int64))
|
275 |
+
target_length = g.op("Slice", query_shape, second_last_idx, last_idx)
|
276 |
+
source_length = g.op("Slice", key_shape, second_last_idx, last_idx)
|
277 |
+
# attn_mask = torch.ones(L, S) := {
|
278 |
+
size = g.op("Concat", target_length, source_length, axis_i=0)
|
279 |
+
const_one = g.op("Constant", value_t=torch.tensor([1.0]))
|
280 |
+
attn_mask = g.op("Expand", const_one, size)
|
281 |
+
# }
|
282 |
+
attn_mask = g.op("Trilu", attn_mask, upper_i=0)
|
283 |
+
# The causal mask has 0s in the lower triangle and -inf in the upper triangle.
|
284 |
+
const_zero = g.op("Constant", value_t=torch.tensor([0.0]))
|
285 |
+
const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")]))
|
286 |
+
attn_mask = g.op(
|
287 |
+
"Where", g.op("Equal", attn_mask, const_zero), const_neg_inf, const_zero
|
288 |
+
)
|
289 |
+
return attn_mask
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py
ADDED
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Note [ONNX operators that are added/updated from opset 8 to opset 9]
|
3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
+
New operators:
|
5 |
+
Compress
|
6 |
+
ConstantOfShape
|
7 |
+
EyeLike
|
8 |
+
MaxUnpool
|
9 |
+
OneHot
|
10 |
+
Sinh
|
11 |
+
Cosh
|
12 |
+
Asinh
|
13 |
+
Acosh
|
14 |
+
Atanh
|
15 |
+
Shrink
|
16 |
+
IsNaN
|
17 |
+
Sign
|
18 |
+
Erf
|
19 |
+
Scatter
|
20 |
+
Where
|
21 |
+
NonZero
|
22 |
+
TfIdfVectorizer
|
23 |
+
MeanVarianceNormalization
|
24 |
+
|
25 |
+
Updated operators:
|
26 |
+
BatchNormalization: removed spatial attribute.
|
27 |
+
Greater, Less, Constant, MatMul, PRelu, Gemm, Flatten: more data types{integers} supported.
|
28 |
+
Cast: more data types{string} supported.
|
29 |
+
Upsample: moved scales from attribute to input.
|
30 |
+
Scan
|
31 |
+
"""
|
32 |
+
|
33 |
+
import functools
|
34 |
+
import warnings
|
35 |
+
|
36 |
+
import torch
|
37 |
+
from torch._C import _onnx as _C_onnx
|
38 |
+
from torch.onnx import _type_utils, errors, symbolic_helper, symbolic_opset9 as opset9
|
39 |
+
from torch.onnx._internal import jit_utils, registration
|
40 |
+
|
41 |
+
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=8)
|
42 |
+
|
43 |
+
block_listed_operators = (
|
44 |
+
"nonzero",
|
45 |
+
"where",
|
46 |
+
"scatter",
|
47 |
+
"scatter_add",
|
48 |
+
"erf",
|
49 |
+
"sign",
|
50 |
+
"isnan",
|
51 |
+
"gather",
|
52 |
+
"arange",
|
53 |
+
"masked_fill",
|
54 |
+
"index_fill",
|
55 |
+
"index_copy",
|
56 |
+
"repeat_interleave",
|
57 |
+
"any",
|
58 |
+
"all",
|
59 |
+
)
|
60 |
+
|
61 |
+
for block_listed_op in block_listed_operators:
|
62 |
+
_onnx_symbolic(f"aten::{block_listed_op}")(
|
63 |
+
symbolic_helper._block_list_in_opset(block_listed_op)
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
def _apply_params(*args, **kwargs):
|
68 |
+
"""Returns a decorator that calls the decorated (higher-order) function with the given parameters."""
|
69 |
+
|
70 |
+
def _apply(fn):
|
71 |
+
return fn(*args, **kwargs)
|
72 |
+
|
73 |
+
return _apply
|
74 |
+
|
75 |
+
|
76 |
+
@_onnx_symbolic(
|
77 |
+
"aten::upsample_nearest1d",
|
78 |
+
decorate=[_apply_params("upsample_nearest1d", 3, "nearest")],
|
79 |
+
)
|
80 |
+
@_onnx_symbolic(
|
81 |
+
"aten::upsample_nearest2d",
|
82 |
+
decorate=[_apply_params("upsample_nearest2d", 4, "nearest")],
|
83 |
+
)
|
84 |
+
@_onnx_symbolic(
|
85 |
+
"aten::upsample_nearest3d",
|
86 |
+
decorate=[_apply_params("upsample_nearest3d", 5, "nearest")],
|
87 |
+
)
|
88 |
+
@_onnx_symbolic(
|
89 |
+
"aten::upsample_linear1d",
|
90 |
+
decorate=[_apply_params("upsample_linear1d", 3, "linear")],
|
91 |
+
)
|
92 |
+
@_onnx_symbolic(
|
93 |
+
"aten::upsample_bilinear2d",
|
94 |
+
decorate=[_apply_params("upsample_bilinear2d", 4, "linear")],
|
95 |
+
)
|
96 |
+
@_onnx_symbolic(
|
97 |
+
"aten::upsample_trilinear3d",
|
98 |
+
decorate=[_apply_params("upsample_trilinear3d", 5, "linear")],
|
99 |
+
)
|
100 |
+
def _interpolate(name, dim, interpolate_mode):
|
101 |
+
def symbolic_fn(g, input, output_size, *args):
|
102 |
+
scales, align_corners = symbolic_helper._get_interpolate_attributes(
|
103 |
+
g, interpolate_mode, args
|
104 |
+
)
|
105 |
+
symbolic_helper._interpolate_warning(interpolate_mode)
|
106 |
+
align_corners = symbolic_helper._maybe_get_scalar(align_corners)
|
107 |
+
if align_corners:
|
108 |
+
return symbolic_helper._unimplemented(name, "align_corners == True", input)
|
109 |
+
output_size = symbolic_helper._maybe_get_const(output_size, "is")
|
110 |
+
if symbolic_helper._is_value(output_size):
|
111 |
+
return symbolic_helper._unimplemented(
|
112 |
+
name, "torch._C.Value (output_size) indexing"
|
113 |
+
)
|
114 |
+
if scales is None:
|
115 |
+
scales = [
|
116 |
+
1.0
|
117 |
+
if i < 2
|
118 |
+
else float(output_size[-(dim - i)])
|
119 |
+
/ float(input.type().sizes()[-(dim - i)])
|
120 |
+
for i in range(0, dim)
|
121 |
+
]
|
122 |
+
return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales)
|
123 |
+
|
124 |
+
return symbolic_fn
|
125 |
+
|
126 |
+
|
127 |
+
@_onnx_symbolic("aten::__interpolate")
|
128 |
+
def __interpolate(
|
129 |
+
g: jit_utils.GraphContext,
|
130 |
+
input,
|
131 |
+
size,
|
132 |
+
scale_factor,
|
133 |
+
mode,
|
134 |
+
align_corners,
|
135 |
+
recompute_scale_factor,
|
136 |
+
antialias,
|
137 |
+
):
|
138 |
+
align_corners = symbolic_helper._maybe_get_const(align_corners, "b")
|
139 |
+
if not symbolic_helper._is_none(align_corners) and align_corners:
|
140 |
+
return symbolic_helper._unimplemented("interpolate", "align_corners == True")
|
141 |
+
|
142 |
+
if not symbolic_helper._is_none(scale_factor) and symbolic_helper._is_value(
|
143 |
+
scale_factor
|
144 |
+
):
|
145 |
+
return symbolic_helper._unimplemented(
|
146 |
+
"interpolate", "dynamic scales in opset 8"
|
147 |
+
)
|
148 |
+
|
149 |
+
if not symbolic_helper._is_none(size) and symbolic_helper._is_value(size):
|
150 |
+
return symbolic_helper._unimplemented("interpolate", "dynamic size in opset 8")
|
151 |
+
|
152 |
+
scales, mode = symbolic_helper._interpolate_get_scales_and_mode(
|
153 |
+
g, input, size, scale_factor, mode, align_corners
|
154 |
+
)
|
155 |
+
return g.op("Upsample", input, mode_s=mode, scales_f=scales)
|
156 |
+
|
157 |
+
|
158 |
+
# NOTE: We should create a wrapper for this kind of operation, after resolving the shape/type propagation
|
159 |
+
# issue for "cast" operators. Some symbolic functions depend on shape information of input tensor, which
|
160 |
+
# is lost after casting.
|
161 |
+
def _try_cast_integer_to_float(g: jit_utils.GraphContext, *args):
|
162 |
+
floating_scalar_types = {
|
163 |
+
_type_utils.JitScalarType.HALF,
|
164 |
+
_type_utils.JitScalarType.FLOAT,
|
165 |
+
_type_utils.JitScalarType.DOUBLE,
|
166 |
+
}
|
167 |
+
old_type = None
|
168 |
+
# Cast the input tensor to Float if its scalarType is known and is not floating number.
|
169 |
+
# If casting is performed, return the old scalarType, otherwise return None.
|
170 |
+
arg0_type = _type_utils.JitScalarType.from_value(
|
171 |
+
args[0], _type_utils.JitScalarType.UNDEFINED
|
172 |
+
)
|
173 |
+
if arg0_type != _type_utils.JitScalarType.UNDEFINED:
|
174 |
+
old_type = arg0_type
|
175 |
+
if old_type not in floating_scalar_types:
|
176 |
+
old_type = old_type.scalar_name()
|
177 |
+
args = tuple(
|
178 |
+
g.op("Cast", arg, to_i=_C_onnx.TensorProtoDataType.FLOAT)
|
179 |
+
for arg in args
|
180 |
+
)
|
181 |
+
else:
|
182 |
+
return (None,) + args
|
183 |
+
else:
|
184 |
+
warnings.warn(
|
185 |
+
"Only floating datatype is supported for these operators: "
|
186 |
+
"{Greater, Less, MatMul, PRelu, Gemm, Flatten}. This might cause "
|
187 |
+
"the onnx model to be incorrect, if inputs have integer datatypes."
|
188 |
+
)
|
189 |
+
return (old_type,) + args
|
190 |
+
|
191 |
+
|
192 |
+
def _cast_to_type(g: jit_utils.GraphContext, input, to_type):
|
193 |
+
if to_type is None:
|
194 |
+
return input
|
195 |
+
return getattr(opset9, f"_cast_{to_type}")(g, input, False)
|
196 |
+
|
197 |
+
|
198 |
+
def _comparison_operator(g: jit_utils.GraphContext, input, other, op_name):
|
199 |
+
other = symbolic_helper._maybe_get_scalar(other)
|
200 |
+
other = symbolic_helper._if_scalar_type_as(other, input)
|
201 |
+
_, input, other = _try_cast_integer_to_float(g, input, other)
|
202 |
+
return g.op(op_name, input, other)
|
203 |
+
|
204 |
+
|
205 |
+
# NOTE: For symbolics {gt, lt, bmm, matmul, prelu, mm, addmm, view, flatten},
|
206 |
+
# integer input type not supported in opset8. Cast to float if possible.
|
207 |
+
@_onnx_symbolic("aten::gt")
|
208 |
+
def gt(g: jit_utils.GraphContext, input, other):
|
209 |
+
return _comparison_operator(g, input, other, "Greater")
|
210 |
+
|
211 |
+
|
212 |
+
@_onnx_symbolic("aten::lt")
|
213 |
+
def lt(g: jit_utils.GraphContext, input, other):
|
214 |
+
return _comparison_operator(g, input, other, "Less")
|
215 |
+
|
216 |
+
|
217 |
+
@_onnx_symbolic("aten::bmm")
|
218 |
+
def bmm(g: jit_utils.GraphContext, self, other):
|
219 |
+
if symbolic_helper._try_get_scalar_type(self):
|
220 |
+
old_type, self, other = _try_cast_integer_to_float(g, self, other)
|
221 |
+
return _cast_to_type(g, g.op("MatMul", self, other), old_type)
|
222 |
+
else:
|
223 |
+
return g.op("MatMul", self, other)
|
224 |
+
|
225 |
+
|
226 |
+
@_onnx_symbolic("aten::matmul")
|
227 |
+
def matmul(g: jit_utils.GraphContext, self, other):
|
228 |
+
return bmm(g, self, other)
|
229 |
+
|
230 |
+
|
231 |
+
@_onnx_symbolic("aten::prelu")
|
232 |
+
def prelu(g: jit_utils.GraphContext, self, weight):
|
233 |
+
self_rank = symbolic_helper._get_tensor_rank(self)
|
234 |
+
weight_sizes = symbolic_helper._get_tensor_sizes(weight)
|
235 |
+
if self_rank is not None and self_rank > 2:
|
236 |
+
weight = g.op("Unsqueeze", weight, axes_i=list(range(1, self_rank - 1)))
|
237 |
+
elif self_rank == 0 and weight_sizes == [1]:
|
238 |
+
# self and weight are both scalar but weight has rank == 1, squeeze weight.
|
239 |
+
weight = symbolic_helper._squeeze_helper(g, weight, [0])
|
240 |
+
if symbolic_helper._try_get_scalar_type(self):
|
241 |
+
old_type, self, weight = _try_cast_integer_to_float(g, self, weight)
|
242 |
+
return _cast_to_type(g, g.op("PRelu", self, weight), old_type)
|
243 |
+
else:
|
244 |
+
return g.op("PRelu", self, weight)
|
245 |
+
|
246 |
+
|
247 |
+
@_onnx_symbolic("aten::mm")
|
248 |
+
def mm(g: jit_utils.GraphContext, self, other):
|
249 |
+
# Create a dummy C tensor. Only needed for API purposes, the value is
|
250 |
+
# since beta = 0
|
251 |
+
scalar_type = symbolic_helper._try_get_scalar_type(self, other)
|
252 |
+
if scalar_type is None:
|
253 |
+
raise errors.SymbolicValueError(
|
254 |
+
"mm can only operate on tensors with known types", self
|
255 |
+
)
|
256 |
+
zero_constant = g.op(
|
257 |
+
"Constant",
|
258 |
+
value_t=torch.tensor([0], dtype=scalar_type.dtype()),
|
259 |
+
)
|
260 |
+
|
261 |
+
if symbolic_helper._try_get_scalar_type(self):
|
262 |
+
old_type, self, other, zero_constant = _try_cast_integer_to_float(
|
263 |
+
g, self, other, zero_constant
|
264 |
+
)
|
265 |
+
return _cast_to_type(
|
266 |
+
g,
|
267 |
+
g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0),
|
268 |
+
old_type,
|
269 |
+
)
|
270 |
+
return g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0)
|
271 |
+
|
272 |
+
|
273 |
+
@_onnx_symbolic("aten::addmm")
|
274 |
+
@symbolic_helper.parse_args("v", "v", "v", "t", "t")
|
275 |
+
def addmm(g: jit_utils.GraphContext, self, mat1, mat2, beta, alpha):
|
276 |
+
if symbolic_helper._try_get_scalar_type(self):
|
277 |
+
old_type, self, mat1, mat2 = _try_cast_integer_to_float(g, self, mat1, mat2)
|
278 |
+
return _cast_to_type(
|
279 |
+
g,
|
280 |
+
g.op(
|
281 |
+
"Gemm",
|
282 |
+
mat1,
|
283 |
+
mat2,
|
284 |
+
self,
|
285 |
+
beta_f=symbolic_helper._scalar(beta),
|
286 |
+
alpha_f=symbolic_helper._scalar(alpha),
|
287 |
+
),
|
288 |
+
old_type,
|
289 |
+
)
|
290 |
+
else:
|
291 |
+
return g.op(
|
292 |
+
"Gemm",
|
293 |
+
mat1,
|
294 |
+
mat2,
|
295 |
+
self,
|
296 |
+
beta_f=symbolic_helper._scalar(beta),
|
297 |
+
alpha_f=symbolic_helper._scalar(alpha),
|
298 |
+
)
|
299 |
+
|
300 |
+
|
301 |
+
@_onnx_symbolic("aten::flatten")
|
302 |
+
def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim):
|
303 |
+
start_dim_i = symbolic_helper._get_const(start_dim, "i", "start_dim")
|
304 |
+
end_dim_i = symbolic_helper._get_const(end_dim, "i", "end_dim")
|
305 |
+
|
306 |
+
dim = input.type().dim()
|
307 |
+
if end_dim_i < 0:
|
308 |
+
end_dim_i = dim + end_dim_i
|
309 |
+
# use ONNX's Flatten operator for cases where the output shape is 2D
|
310 |
+
if start_dim_i == 1 and end_dim_i == dim - 1:
|
311 |
+
if symbolic_helper._try_get_scalar_type(input):
|
312 |
+
old_type, input = _try_cast_integer_to_float(g, input)
|
313 |
+
return _cast_to_type(
|
314 |
+
g, g.op("Flatten", input, axis_i=start_dim_i), old_type
|
315 |
+
)
|
316 |
+
else:
|
317 |
+
return g.op("Flatten", input, axis_i=start_dim_i)
|
318 |
+
if start_dim_i == 0 and end_dim_i == dim - 2:
|
319 |
+
if symbolic_helper._try_get_scalar_type(input):
|
320 |
+
old_type, input = _try_cast_integer_to_float(g, input)
|
321 |
+
return _cast_to_type(
|
322 |
+
g, g.op("Flatten", input, axis_i=end_dim_i + 1), old_type
|
323 |
+
)
|
324 |
+
else:
|
325 |
+
return g.op("Flatten", input, axis_i=end_dim_i + 1)
|
326 |
+
|
327 |
+
return opset9.flatten(g, input, start_dim, end_dim)
|
328 |
+
|
329 |
+
|
330 |
+
def _constant_fill(g: jit_utils.GraphContext, sizes, dtype: int, const_value):
|
331 |
+
if dtype is None:
|
332 |
+
scalar_type = _type_utils.JitScalarType.FLOAT
|
333 |
+
else:
|
334 |
+
scalar_type = _type_utils.JitScalarType(dtype)
|
335 |
+
if not scalar_type.dtype().is_floating_point:
|
336 |
+
result = g.op(
|
337 |
+
"ConstantFill",
|
338 |
+
sizes,
|
339 |
+
dtype_i=_type_utils.JitScalarType.FLOAT.onnx_type(),
|
340 |
+
input_as_shape_i=1,
|
341 |
+
value_f=const_value,
|
342 |
+
)
|
343 |
+
return g.op("Cast", result, to_i=scalar_type.onnx_type())
|
344 |
+
else:
|
345 |
+
return g.op(
|
346 |
+
"ConstantFill",
|
347 |
+
sizes,
|
348 |
+
dtype_i=scalar_type.onnx_type(),
|
349 |
+
input_as_shape_i=1,
|
350 |
+
value_f=const_value,
|
351 |
+
)
|
352 |
+
|
353 |
+
|
354 |
+
@_onnx_symbolic("aten::empty")
|
355 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
|
356 |
+
def empty(
|
357 |
+
g: jit_utils.GraphContext,
|
358 |
+
sizes,
|
359 |
+
dtype,
|
360 |
+
layout,
|
361 |
+
device,
|
362 |
+
pin_memory=False,
|
363 |
+
memory_format=None,
|
364 |
+
):
|
365 |
+
return zeros(g, sizes, dtype, layout, device, pin_memory)
|
366 |
+
|
367 |
+
|
368 |
+
@_onnx_symbolic("aten::empty_like")
|
369 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
|
370 |
+
def empty_like(
|
371 |
+
g: jit_utils.GraphContext,
|
372 |
+
input,
|
373 |
+
dtype,
|
374 |
+
layout,
|
375 |
+
device,
|
376 |
+
pin_memory=False,
|
377 |
+
memory_format=None,
|
378 |
+
):
|
379 |
+
return zeros_like(g, input, dtype, layout, device, pin_memory)
|
380 |
+
|
381 |
+
|
382 |
+
@_onnx_symbolic("aten::zeros")
|
383 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
|
384 |
+
def zeros(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False):
|
385 |
+
# NOTE: no way to set device and layout in ONNX, so we ignore it
|
386 |
+
return _constant_fill(g, sizes, dtype, 0)
|
387 |
+
|
388 |
+
|
389 |
+
@_onnx_symbolic("aten::zeros_like")
|
390 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
|
391 |
+
def zeros_like(
|
392 |
+
g: jit_utils.GraphContext,
|
393 |
+
input,
|
394 |
+
dtype,
|
395 |
+
layout,
|
396 |
+
device,
|
397 |
+
pin_memory=False,
|
398 |
+
memory_format=None,
|
399 |
+
):
|
400 |
+
shape = g.op("Shape", input)
|
401 |
+
return _constant_fill(g, shape, dtype, 0)
|
402 |
+
|
403 |
+
|
404 |
+
@_onnx_symbolic("aten::ones")
|
405 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
|
406 |
+
def ones(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False):
|
407 |
+
return _constant_fill(g, sizes, dtype, 1)
|
408 |
+
|
409 |
+
|
410 |
+
@_onnx_symbolic("aten::ones_like")
|
411 |
+
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
|
412 |
+
def ones_like(
|
413 |
+
g: jit_utils.GraphContext,
|
414 |
+
input,
|
415 |
+
dtype,
|
416 |
+
layout,
|
417 |
+
device,
|
418 |
+
pin_memory=False,
|
419 |
+
memory_format=None,
|
420 |
+
):
|
421 |
+
shape = g.op("Shape", input)
|
422 |
+
return _constant_fill(g, shape, dtype, 1)
|
423 |
+
|
424 |
+
|
425 |
+
@_onnx_symbolic("aten::full")
|
426 |
+
def full(
|
427 |
+
g: jit_utils.GraphContext, sizes, value, dtype, layout, device, pin_memory=False
|
428 |
+
):
|
429 |
+
const_value = symbolic_helper._maybe_get_const(value, "t")
|
430 |
+
if symbolic_helper._is_value(const_value):
|
431 |
+
tmp = zeros(g, sizes, dtype, layout, device)
|
432 |
+
return opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
|
433 |
+
else:
|
434 |
+
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
|
435 |
+
return _constant_fill(g, sizes, dtype, const_value)
|
436 |
+
|
437 |
+
|
438 |
+
@_onnx_symbolic("aten::full_like")
|
439 |
+
@symbolic_helper.parse_args("v", "f", "i", "v", "v", "v", "v")
|
440 |
+
def full_like(
|
441 |
+
g: jit_utils.GraphContext,
|
442 |
+
input,
|
443 |
+
fill_value,
|
444 |
+
dtype,
|
445 |
+
layout,
|
446 |
+
device,
|
447 |
+
pin_memory=False,
|
448 |
+
memory_format=None,
|
449 |
+
):
|
450 |
+
shape = g.op("Shape", input)
|
451 |
+
return _constant_fill(g, shape, dtype, fill_value)
|
452 |
+
|
453 |
+
|
454 |
+
@_onnx_symbolic("aten::repeat")
|
455 |
+
def repeat(g: jit_utils.GraphContext, self, repeats):
|
456 |
+
if not symbolic_helper._is_value(repeats):
|
457 |
+
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
|
458 |
+
if symbolic_helper._is_packed_list(repeats):
|
459 |
+
repeat_size_len = len(symbolic_helper._unpack_list(repeats))
|
460 |
+
else:
|
461 |
+
const_repeats = symbolic_helper._maybe_get_const(repeats, "is")
|
462 |
+
repeat_size_len = len(const_repeats)
|
463 |
+
if self.isCompleteTensor():
|
464 |
+
sizes = self.type().sizes()
|
465 |
+
diff_dims = repeat_size_len - len(sizes)
|
466 |
+
if diff_dims > 0:
|
467 |
+
self = opset9.view(
|
468 |
+
g, self, g.op("Constant", value_t=torch.tensor([1] * diff_dims + sizes))
|
469 |
+
)
|
470 |
+
return g.op("Tile", self, repeats)
|
venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (203 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc
ADDED
Binary file (1.46 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc
ADDED
Binary file (3.68 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc
ADDED
Binary file (253 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc
ADDED
Binary file (2.48 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc
ADDED
Binary file (1.41 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc
ADDED
Binary file (16.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc
ADDED
Binary file (4.78 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc
ADDED
Binary file (4.97 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (210 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (1.84 kB). View file
|
|