applied-ai-018 commited on
Commit
ff792d3
·
verified ·
1 Parent(s): b9e633b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/17.input_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/17.input_layernorm.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/ao/nn/__init__.py +19 -0
  7. venv/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py +1 -0
  8. venv/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py +1 -0
  10. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py +3 -0
  12. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py +25 -0
  15. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__init__.py +14 -0
  16. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/conv.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/linear.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/conv.py +270 -0
  21. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/embedding_ops.py +143 -0
  22. venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/linear.py +81 -0
  23. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py +1 -0
  24. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py +9 -0
  26. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py +465 -0
  30. venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py +411 -0
  31. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__init__.py +38 -0
  32. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/functional.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/functional.py +644 -0
  35. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__init__.py +131 -0
  36. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/conv.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/linear.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/activation.py +302 -0
  48. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/batchnorm.py +106 -0
  49. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/conv.py +945 -0
  50. venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/dropout.py +27 -0
ckpts/universal/global_step120/zero/17.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:948c39a2c96bce2cb781383f91ff3236cf3845429c695be8909682a1919d3125
3
+ size 9387
ckpts/universal/global_step120/zero/17.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8da328f1ab9e1b0b76e1ec0db6c3e5738402f923a1e5442c0ee5418143cf4086
3
+ size 9293
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa570de0a3a2d41a83493af9c6c07ffa043df0d57a26a34edf7e2b5e949f30c6
3
+ size 33555612
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01e2bcb4fb228f10a26f82abe36abf3bc072b8382940b7faf5fe0e75cc786803
3
+ size 33555627
ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee95dbee1f6af25871f57f8ebc301001184176a83245c07d54ec3fa04c88188f
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/ao/nn/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # We are exposing all subpackages to the end-user.
2
+ # Because of possible inter-dependency, we want to avoid
3
+ # the cyclic imports, thus implementing lazy version
4
+ # as per https://peps.python.org/pep-0562/
5
+
6
+ import importlib
7
+
8
+ __all__ = [
9
+ "intrinsic",
10
+ "qat",
11
+ "quantizable",
12
+ "quantized",
13
+ "sparse",
14
+ ]
15
+
16
+ def __getattr__(name):
17
+ if name in __all__:
18
+ return importlib.import_module("." + name, __name__)
19
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
venv/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
venv/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (207 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (215 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .linear import Linear
2
+
3
+ __all__ = ["Linear"]
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ __all__ = ["Linear"]
4
+
5
+ class Linear(torch.ao.nn.qat.Linear):
6
+ r"""
7
+ A linear module attached with FakeQuantize modules for weight,
8
+ used for dynamic quantization aware training.
9
+
10
+ We adopt the same interface as `torch.nn.Linear`, please see
11
+ https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
12
+ for documentation.
13
+
14
+ Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
15
+ default.
16
+ """
17
+
18
+ def __init__(self, in_features, out_features, bias=True,
19
+ qconfig=None, device=None, dtype=None) -> None:
20
+ super().__init__(in_features, out_features, bias, qconfig, device, dtype)
21
+ if not torch.ao.quantization.qconfig._activation_is_memoryless(qconfig):
22
+ raise ValueError(
23
+ "Dynamic QAT requires a memoryless observer." +
24
+ "This means a MovingAverage observer with averaging constant equal to 1"
25
+ )
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear import Linear
2
+ from .conv import Conv1d
3
+ from .conv import Conv2d
4
+ from .conv import Conv3d
5
+ from .embedding_ops import EmbeddingBag, Embedding
6
+
7
+ __all__ = [
8
+ "Linear",
9
+ "Conv1d",
10
+ "Conv2d",
11
+ "Conv3d",
12
+ "Embedding",
13
+ "EmbeddingBag",
14
+ ]
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (442 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc ADDED
Binary file (5.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (2.84 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/conv.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn.modules.utils import _single, _pair, _triple
4
+ from torch.ao.nn.intrinsic import _FusedModule
5
+ from typing import Tuple, TypeVar, Union
6
+ from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
7
+
8
+ __all__ = [
9
+ "Conv1d",
10
+ "Conv2d",
11
+ "Conv3d"
12
+ ]
13
+
14
+ MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
15
+
16
+ class _ConvNd(nn.modules.conv._ConvNd):
17
+
18
+ _FLOAT_MODULE = MOD
19
+
20
+ def __init__(self,
21
+ in_channels: int,
22
+ out_channels: int,
23
+ kernel_size: Tuple[int, ...],
24
+ stride: Tuple[int, ...],
25
+ padding: Tuple[int, ...],
26
+ dilation: Tuple[int, ...],
27
+ transposed: bool,
28
+ output_padding: Tuple[int, ...],
29
+ groups: int,
30
+ bias: bool,
31
+ padding_mode: str,
32
+ qconfig=None,
33
+ device=None,
34
+ dtype=None) -> None:
35
+ factory_kwargs = {"device": device, "dtype": dtype}
36
+ nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
37
+ stride, padding, dilation, transposed,
38
+ output_padding, groups, bias, padding_mode, **factory_kwargs)
39
+ assert qconfig, 'qconfig must be provided for QAT module'
40
+ self.qconfig = qconfig
41
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
42
+
43
+ def forward(self, input):
44
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
45
+
46
+ @staticmethod
47
+ def from_float(cls, mod):
48
+ r"""Create a qat module from a float module
49
+
50
+ Args:
51
+ `mod`: a float module, either produced by torch.ao.quantization utilities
52
+ or directly from user
53
+ """
54
+ assert type(mod) == cls._FLOAT_MODULE, (
55
+ "qat."
56
+ + cls.__name__
57
+ + ".from_float only works for "
58
+ + cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
59
+ )
60
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
61
+ assert mod.qconfig, 'Input float module must have a valid qconfig'
62
+ if issubclass(type(mod), _FusedModule):
63
+ mod = mod[0] # type: ignore[index]
64
+ qconfig = mod.qconfig
65
+ qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
66
+ stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
67
+ groups=mod.groups, bias=mod.bias is not None,
68
+ padding_mode=mod.padding_mode, qconfig=qconfig)
69
+ qat_conv.weight = mod.weight
70
+ qat_conv.bias = mod.bias
71
+ return qat_conv
72
+
73
+ def to_float(self):
74
+ """ This works for both single qat conv, and the qat conv - relu modules
75
+ to convert the qat module to a floating point module
76
+ """
77
+ cls = type(self)
78
+ conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined, operator]
79
+ self.in_channels,
80
+ self.out_channels,
81
+ self.kernel_size, # type: ignore[arg-type]
82
+ self.stride, # type: ignore[arg-type]
83
+ self.padding, # type: ignore[arg-type]
84
+ self.dilation, # type: ignore[arg-type]
85
+ self.groups,
86
+ self.bias is not None,
87
+ self.padding_mode)
88
+ conv.weight = torch.nn.Parameter(self.weight.detach())
89
+ if self.bias is not None:
90
+ conv.bias = torch.nn.Parameter(self.bias.detach())
91
+ # conv relu
92
+ if issubclass(cls, _FusedModule):
93
+ modules = [conv]
94
+ assert hasattr(cls, "_FLOAT_RELU_MODULE")
95
+ relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
96
+ modules.append(relu)
97
+ fused = cls._FLOAT_MODULE(*modules) # type: ignore[arg-type, attr-defined, operator]
98
+ fused.train(self.training)
99
+ return fused
100
+ else:
101
+ return conv
102
+
103
+ class Conv1d(_ConvNd, nn.Conv1d):
104
+ r"""
105
+ A Conv1d module attached with FakeQuantize modules for weight,
106
+ used for quantization aware training.
107
+
108
+ We adopt the same interface as :class:`~torch.nn.Conv1d`
109
+
110
+ Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
111
+ default.
112
+
113
+ Attributes:
114
+ weight_fake_quant: fake quant module for weight
115
+ """
116
+ _FLOAT_MODULE = nn.Conv1d
117
+ _FLOAT_CONV_MODULE = nn.Conv1d
118
+
119
+ def __init__(self,
120
+ in_channels: int,
121
+ out_channels: int,
122
+ kernel_size: _size_1_t,
123
+ stride: _size_1_t = 1,
124
+ padding: Union[str, _size_1_t] = 0,
125
+ dilation: _size_1_t = 1,
126
+ groups: int = 1,
127
+ bias: bool = True,
128
+ padding_mode: str = 'zeros',
129
+ qconfig=None,
130
+ device=None,
131
+ dtype=None) -> None:
132
+ kernel_size_ = _single(kernel_size)
133
+ stride_ = _single(stride)
134
+ padding_ = padding if isinstance(padding, str) else _single(padding)
135
+ dilation_ = _single(dilation)
136
+ super().__init__(
137
+ in_channels,
138
+ out_channels,
139
+ kernel_size_,
140
+ stride=stride_,
141
+ padding=padding_,
142
+ dilation=dilation_,
143
+ transposed=False,
144
+ output_padding=_single(0),
145
+ groups=groups,
146
+ bias=bias,
147
+ padding_mode=padding_mode,
148
+ qconfig=qconfig,
149
+ device=device,
150
+ dtype=dtype)
151
+
152
+ @classmethod
153
+ def from_float(cls, mod):
154
+ return super().from_float(cls, mod)
155
+
156
+ class Conv2d(_ConvNd, nn.Conv2d):
157
+ r"""
158
+ A Conv2d module attached with FakeQuantize modules for weight,
159
+ used for quantization aware training.
160
+
161
+ We adopt the same interface as `torch.nn.Conv2d`, please see
162
+ https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
163
+ for documentation.
164
+
165
+ Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
166
+ default.
167
+
168
+ Attributes:
169
+ weight_fake_quant: fake quant module for weight
170
+ """
171
+ _FLOAT_MODULE = nn.Conv2d
172
+ _FLOAT_CONV_MODULE = nn.Conv2d
173
+
174
+ def __init__(self,
175
+ in_channels: int,
176
+ out_channels: int,
177
+ kernel_size: _size_2_t,
178
+ stride: _size_2_t = 1,
179
+ padding: Union[str, _size_2_t] = 0,
180
+ dilation: _size_2_t = 1,
181
+ groups: int = 1,
182
+ bias: bool = True,
183
+ padding_mode: str = 'zeros',
184
+ qconfig=None,
185
+ device=None,
186
+ dtype=None) -> None:
187
+ kernel_size_ = _pair(kernel_size)
188
+ stride_ = _pair(stride)
189
+ padding_ = padding if isinstance(padding, str) else _pair(padding)
190
+ dilation_ = _pair(dilation)
191
+ super().__init__(
192
+ in_channels,
193
+ out_channels,
194
+ kernel_size_,
195
+ stride=stride_,
196
+ padding=padding_,
197
+ dilation=dilation_,
198
+ transposed=False,
199
+ output_padding=_pair(0),
200
+ groups=groups,
201
+ bias=bias,
202
+ padding_mode=padding_mode,
203
+ qconfig=qconfig,
204
+ device=device,
205
+ dtype=dtype)
206
+
207
+ def forward(self, input):
208
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
209
+
210
+ @classmethod
211
+ def from_float(cls, mod):
212
+ return super().from_float(cls, mod)
213
+
214
+ class Conv3d(_ConvNd, nn.Conv3d):
215
+ r"""
216
+ A Conv3d module attached with FakeQuantize modules for weight,
217
+ used for quantization aware training.
218
+
219
+ We adopt the same interface as `torch.nn.Conv3d`, please see
220
+ https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
221
+ for documentation.
222
+
223
+ Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
224
+ default.
225
+
226
+ Attributes:
227
+ weight_fake_quant: fake quant module for weight
228
+ """
229
+ _FLOAT_MODULE = nn.Conv3d
230
+ _FLOAT_CONV_MODULE = nn.Conv3d
231
+
232
+ def __init__(self,
233
+ in_channels: int,
234
+ out_channels: int,
235
+ kernel_size: _size_3_t,
236
+ stride: _size_3_t = 1,
237
+ padding: Union[str, _size_3_t] = 0,
238
+ dilation: _size_3_t = 1,
239
+ groups: int = 1,
240
+ bias: bool = True,
241
+ padding_mode: str = 'zeros',
242
+ qconfig=None,
243
+ device=None,
244
+ dtype=None) -> None:
245
+ kernel_size_ = _triple(kernel_size)
246
+ stride_ = _triple(stride)
247
+ padding_ = padding if isinstance(padding, str) else _triple(padding)
248
+ dilation_ = _triple(dilation)
249
+ super().__init__(
250
+ in_channels,
251
+ out_channels,
252
+ kernel_size_,
253
+ stride=stride_,
254
+ padding=padding_,
255
+ dilation=dilation_,
256
+ transposed=False,
257
+ output_padding=_triple(0),
258
+ groups=groups,
259
+ bias=bias,
260
+ padding_mode=padding_mode,
261
+ qconfig=qconfig,
262
+ device=device,
263
+ dtype=dtype)
264
+
265
+ def forward(self, input):
266
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
267
+
268
+ @classmethod
269
+ def from_float(cls, mod):
270
+ return super().from_float(cls, mod)
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/embedding_ops.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ __all__ = ['Embedding', 'EmbeddingBag']
7
+
8
+ class Embedding(nn.Embedding):
9
+ r"""
10
+ An embedding bag module attached with FakeQuantize modules for weight,
11
+ used for quantization aware training.
12
+
13
+ We adopt the same interface as `torch.nn.Embedding`, please see
14
+ https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#torch.nn.Embedding
15
+ for documentation.
16
+
17
+ Similar to `torch.nn.Embedding`, with FakeQuantize modules initialized to
18
+ default.
19
+
20
+ Attributes:
21
+ weight: fake quant module for weight
22
+ """
23
+ _FLOAT_MODULE = nn.Embedding
24
+
25
+ def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
26
+ max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
27
+ sparse=False, _weight=None, device=None, dtype=None, qconfig=None) -> None:
28
+ factory_kwargs = {'device': device, 'dtype': dtype}
29
+ super().__init__(num_embeddings, embedding_dim, padding_idx, max_norm,
30
+ norm_type, scale_grad_by_freq, sparse, _weight,
31
+ **factory_kwargs)
32
+ assert qconfig, 'qconfig must be provided for QAT module'
33
+ assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, \
34
+ 'Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
35
+ str(qconfig.weight().qscheme)
36
+ self.qconfig = qconfig
37
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
38
+
39
+ def forward(self, input) -> Tensor:
40
+ return F.embedding(input, self.weight_fake_quant(self.weight), self.padding_idx,
41
+ self.max_norm, self.norm_type, self.scale_grad_by_freq,
42
+ self.sparse)
43
+
44
+ @classmethod
45
+ def from_float(cls, mod):
46
+ r"""Create a qat module from a float module
47
+
48
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
49
+ or directly from user
50
+ """
51
+ assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + \
52
+ cls._FLOAT_MODULE.__name__
53
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
54
+ assert mod.qconfig, 'Input float module must have a valid qconfig'
55
+ weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
56
+ assert weight_qscheme == torch.per_channel_affine_float_qparams, \
57
+ 'Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
58
+ str(weight_qscheme)
59
+
60
+ qconfig = mod.qconfig
61
+ qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.padding_idx,
62
+ mod.max_norm, mod.norm_type, mod.scale_grad_by_freq,
63
+ mod.sparse, mod.weight, qconfig=qconfig)
64
+
65
+ return qat_embedding_bag
66
+
67
+ def to_float(self):
68
+ embedding_bag = torch.nn.Embedding(self.num_embeddings, self.embedding_dim, self.padding_idx,
69
+ self.max_norm, self.norm_type, self.scale_grad_by_freq,
70
+ self.sparse, None)
71
+ embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
72
+ embedding_bag.train(self.training)
73
+ return embedding_bag
74
+
75
+ class EmbeddingBag(nn.EmbeddingBag):
76
+ r"""
77
+ An embedding bag module attached with FakeQuantize modules for weight,
78
+ used for quantization aware training.
79
+
80
+ We adopt the same interface as `torch.nn.EmbeddingBag`, please see
81
+ https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html#torch.nn.EmbeddingBag
82
+ for documentation.
83
+
84
+ Similar to `torch.nn.EmbeddingBag`, with FakeQuantize modules initialized to
85
+ default.
86
+
87
+ Attributes:
88
+ weight: fake quant module for weight
89
+ """
90
+ _FLOAT_MODULE = nn.EmbeddingBag
91
+
92
+ def __init__(self, num_embeddings, embedding_dim, max_norm=None,
93
+ norm_type=2.0, scale_grad_by_freq=False, mode='mean',
94
+ sparse=False, _weight=None, include_last_offset=False,
95
+ padding_idx=None, qconfig=None, device=None, dtype=None) -> None:
96
+ factory_kwargs = {'device': device, 'dtype': dtype}
97
+ super().__init__(num_embeddings, embedding_dim, max_norm, norm_type,
98
+ scale_grad_by_freq, mode, sparse, _weight,
99
+ include_last_offset, padding_idx, **factory_kwargs)
100
+ assert qconfig, 'qconfig must be provided for QAT module'
101
+ assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, \
102
+ 'Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
103
+ str(qconfig.weight().qscheme)
104
+ self.qconfig = qconfig
105
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
106
+
107
+ def forward(self, input, offsets=None, per_sample_weights=None) -> Tensor:
108
+ return F.embedding_bag(input, self.weight_fake_quant(self.weight), offsets,
109
+ self.max_norm, self.norm_type,
110
+ self.scale_grad_by_freq, self.mode, self.sparse,
111
+ per_sample_weights, self.include_last_offset,
112
+ self.padding_idx)
113
+
114
+ @classmethod
115
+ def from_float(cls, mod):
116
+ r"""Create a qat module from a float module
117
+
118
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
119
+ or directly from user
120
+ """
121
+ assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + \
122
+ cls._FLOAT_MODULE.__name__
123
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
124
+ assert mod.qconfig, 'Input float module must have a valid qconfig'
125
+ weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
126
+ assert weight_qscheme == torch.per_channel_affine_float_qparams, \
127
+ 'Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
128
+ str(weight_qscheme)
129
+
130
+ qconfig = mod.qconfig
131
+ qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.max_norm, mod.norm_type,
132
+ mod.scale_grad_by_freq, mod.mode, mod.sparse, mod.weight,
133
+ mod.include_last_offset, mod.padding_idx, qconfig=qconfig)
134
+
135
+ return qat_embedding_bag
136
+
137
+ def to_float(self):
138
+ embedding_bag = torch.nn.EmbeddingBag(self.num_embeddings, self.embedding_dim, self.max_norm,
139
+ self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse,
140
+ None, self.include_last_offset, self.padding_idx)
141
+ embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
142
+ embedding_bag.train(self.training)
143
+ return embedding_bag
venv/lib/python3.10/site-packages/torch/ao/nn/qat/modules/linear.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.ao.nn.intrinsic import LinearReLU
5
+ from torch.nn.utils.parametrize import (
6
+ is_parametrized,
7
+ type_before_parametrizations,
8
+ transfer_parametrizations_and_params,
9
+ )
10
+
11
+ __all__ = [
12
+ "Linear"
13
+ ]
14
+
15
+ class Linear(nn.Linear):
16
+ r"""
17
+ A linear module attached with FakeQuantize modules for weight,
18
+ used for quantization aware training.
19
+
20
+ We adopt the same interface as `torch.nn.Linear`, please see
21
+ https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
22
+ for documentation.
23
+
24
+ Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
25
+ default.
26
+
27
+ Attributes:
28
+ weight: fake quant module for weight
29
+ """
30
+ _FLOAT_MODULE = nn.Linear
31
+
32
+ def __init__(self, in_features, out_features, bias=True,
33
+ qconfig=None, device=None, dtype=None) -> None:
34
+ factory_kwargs = {'device': device, 'dtype': dtype}
35
+ super().__init__(in_features, out_features, bias, **factory_kwargs)
36
+ assert qconfig, 'qconfig must be provided for QAT module'
37
+ self.qconfig = qconfig
38
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
39
+
40
+ def forward(self, input):
41
+ return F.linear(input, self.weight_fake_quant(self.weight), self.bias)
42
+
43
+ @classmethod
44
+ def from_float(cls, mod):
45
+ r"""Create a qat module from a float module or qparams_dict
46
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
47
+ or directly from user
48
+ """
49
+ assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, (
50
+ " qat."
51
+ + cls.__name__
52
+ + ".from_float only works for "
53
+ + cls._FLOAT_MODULE.__name__
54
+ )
55
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
56
+ assert mod.qconfig, "Input float module must have a valid qconfig"
57
+ if type_before_parametrizations(mod) == LinearReLU:
58
+ mod = mod[0]
59
+
60
+ qconfig = mod.qconfig
61
+ qat_linear = cls(mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig)
62
+
63
+ if is_parametrized(mod, "weight"):
64
+ transfer_parametrizations_and_params(mod, qat_linear, "weight")
65
+ else:
66
+ qat_linear.weight = mod.weight
67
+
68
+ if is_parametrized(mod, "bias"):
69
+ transfer_parametrizations_and_params(mod, qat_linear, "bias")
70
+ else:
71
+ qat_linear.bias = mod.bias
72
+
73
+ return qat_linear
74
+
75
+ def to_float(self):
76
+ linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)
77
+ linear.weight = torch.nn.Parameter(self.weight.detach())
78
+ if self.bias is not None:
79
+ linear.bias = torch.nn.Parameter(self.bias.detach())
80
+ linear.train(self.training)
81
+ return linear
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (215 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .activation import MultiheadAttention
2
+ from .rnn import LSTM
3
+ from .rnn import LSTMCell
4
+
5
+ __all__ = [
6
+ 'LSTM',
7
+ 'LSTMCell',
8
+ 'MultiheadAttention',
9
+ ]
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (351 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.jit # this is needed to avoid a circular import
3
+ from torch import nn
4
+ import torch.nn.functional as nnF
5
+
6
+ from torch import Tensor
7
+ from typing import Optional, Tuple
8
+
9
+ import warnings
10
+
11
+ __all__ = [
12
+ "MultiheadAttention"
13
+ ]
14
+
15
+ class MultiheadAttention(nn.MultiheadAttention):
16
+ _FLOAT_MODULE = nn.MultiheadAttention
17
+
18
+ r"""Quantizable implementation of the MultiheadAttention.
19
+
20
+ Note::
21
+ Please, refer to :class:`~torch.nn.MultiheadAttention` for more
22
+ information
23
+
24
+ Allows the model to jointly attend to information from different
25
+ representation subspaces.
26
+ See reference: Attention Is All You Need
27
+
28
+ The original MHA module is not quantizable.
29
+ This reimplements it by explicitly instantiating the linear layers.
30
+
31
+ .. math::
32
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
33
+ \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
34
+
35
+ Args:
36
+ embed_dim: total dimension of the model.
37
+ num_heads: parallel attention heads.
38
+ dropout: a Dropout layer on attn_output_weights. Default: 0.0.
39
+ bias: add bias as module parameter. Default: True.
40
+ add_bias_kv: add bias to the key and value sequences at dim=0.
41
+ add_zero_attn: add a new batch of zeros to the key and
42
+ value sequences at dim=1.
43
+ kdim: total number of features in key. Default: None.
44
+ vdim: total number of features in value. Default: None.
45
+ batch_first: If ``True``, then the input and output tensors are provided
46
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
47
+
48
+ Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
49
+ to :attr:`embed_dim` such that query, key, and value have the same
50
+ number of features.
51
+
52
+ Examples::
53
+
54
+ >>> import torch.ao.nn.quantizable as nnqa
55
+ >>> multihead_attn = nnqa.MultiheadAttention(embed_dim, num_heads)
56
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
57
+
58
+ Note::
59
+ Please, follow the quantization flow to convert the quantizable MHA.
60
+ """
61
+ __constants__ = ['batch_first']
62
+
63
+ def __init__(self, embed_dim: int, num_heads: int,
64
+ dropout: float = 0., bias: bool = True,
65
+ add_bias_kv: bool = False, add_zero_attn: bool = False,
66
+ kdim: Optional[int] = None, vdim: Optional[int] = None, batch_first: bool = False,
67
+ device=None, dtype=None) -> None:
68
+ factory_kwargs = {'device': device, 'dtype': dtype}
69
+ super().__init__(embed_dim, num_heads, dropout,
70
+ bias, add_bias_kv,
71
+ add_zero_attn, kdim, vdim, batch_first,
72
+ **factory_kwargs)
73
+ self.linear_Q = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs)
74
+ self.linear_K = nn.Linear(self.kdim, self.embed_dim, bias=bias, **factory_kwargs)
75
+ self.linear_V = nn.Linear(self.vdim, self.embed_dim, bias=bias, **factory_kwargs)
76
+ # for the type: ignore, see https://github.com/pytorch/pytorch/issues/58969
77
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ignore[assignment]
78
+
79
+ # Functionals
80
+ self.q_scaling_product = torch.ao.nn.quantized.FloatFunctional()
81
+ # note: importing torch.ao.nn.quantized at top creates a circular import
82
+
83
+ # Quant/Dequant
84
+ self.quant_attn_output = torch.ao.quantization.QuantStub()
85
+ self.quant_attn_output_weights = torch.ao.quantization.QuantStub()
86
+ self.dequant_q = torch.ao.quantization.DeQuantStub()
87
+ self.dequant_k = torch.ao.quantization.DeQuantStub()
88
+ self.dequant_v = torch.ao.quantization.DeQuantStub()
89
+
90
+ def _get_name(self):
91
+ return 'QuantizableMultiheadAttention'
92
+
93
+ @classmethod
94
+ def from_float(cls, other):
95
+ assert type(other) == cls._FLOAT_MODULE
96
+ assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
97
+ # Setting the dropout to 0.0!
98
+ observed = cls(other.embed_dim, other.num_heads, other.dropout,
99
+ (other.in_proj_bias is not None),
100
+ (other.bias_k is not None),
101
+ other.add_zero_attn, other.kdim, other.vdim,
102
+ other.batch_first)
103
+ observed.bias_k = other.bias_k
104
+ observed.bias_v = other.bias_v
105
+ observed.qconfig = other.qconfig
106
+
107
+ # Set the linear weights
108
+ # for the type: ignores, see https://github.com/pytorch/pytorch/issues/58969
109
+ observed.out_proj.weight = other.out_proj.weight # type: ignore[has-type]
110
+ observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type]
111
+ if other._qkv_same_embed_dim:
112
+ # Use separate params
113
+ bias = other.in_proj_bias
114
+ _start = 0
115
+ _end = _start + other.embed_dim
116
+ weight = other.in_proj_weight[_start:_end, :]
117
+ if bias is not None:
118
+ bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
119
+ observed.linear_Q.weight = torch.nn.Parameter(weight,
120
+ weight.requires_grad)
121
+ observed.linear_Q.bias = bias
122
+
123
+ bias = other.in_proj_bias
124
+ _start = _end
125
+ _end = _start + other.embed_dim
126
+ weight = other.in_proj_weight[_start:_end, :]
127
+ if bias is not None:
128
+ bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
129
+ observed.linear_K.weight = torch.nn.Parameter(weight,
130
+ weight.requires_grad)
131
+ observed.linear_K.bias = bias
132
+
133
+ bias = other.in_proj_bias
134
+ _start = _end
135
+ weight = other.in_proj_weight[_start:, :]
136
+ if bias is not None:
137
+ bias = torch.nn.Parameter(bias[_start:], bias.requires_grad)
138
+ observed.linear_V.weight = torch.nn.Parameter(weight,
139
+ weight.requires_grad)
140
+ observed.linear_V.bias = bias
141
+ else:
142
+ observed.linear_Q.weight = nn.Parameter(other.q_proj_weight)
143
+ observed.linear_K.weight = nn.Parameter(other.k_proj_weight)
144
+ observed.linear_V.weight = nn.Parameter(other.v_proj_weight)
145
+ if other.in_proj_bias is None:
146
+ observed.linear_Q.bias = None # type: ignore[assignment]
147
+ observed.linear_K.bias = None # type: ignore[assignment]
148
+ observed.linear_V.bias = None # type: ignore[assignment]
149
+ else:
150
+ observed.linear_Q.bias = nn.Parameter(other.in_proj_bias[0:other.embed_dim])
151
+ observed.linear_K.bias = nn.Parameter(other.in_proj_bias[other.embed_dim:(other.embed_dim * 2)])
152
+ observed.linear_V.bias = nn.Parameter(other.in_proj_bias[(other.embed_dim * 2):])
153
+ observed.eval()
154
+ # Explicit prepare
155
+ observed = torch.ao.quantization.prepare(observed, inplace=True)
156
+ return observed
157
+
158
+ @torch.jit.unused
159
+ def dequantize(self):
160
+ r"""Utility to convert the quantized MHA back to float.
161
+
162
+ The motivation for this is that it is not trivial to conver the weights
163
+ from the format that is used in the quantized version back to the
164
+ float.
165
+ """
166
+ fp = self._FLOAT_MODULE(self.embed_dim, self.num_heads, self.dropout,
167
+ (self.linear_Q._weight_bias()[1] is not None),
168
+ (self.bias_k is not None),
169
+ self.add_zero_attn, self.kdim, self.vdim, self.batch_first)
170
+ assert fp._qkv_same_embed_dim == self._qkv_same_embed_dim
171
+ if self.bias_k is not None:
172
+ fp.bias_k = nn.Parameter(self.bias_k.dequantize())
173
+ if self.bias_v is not None:
174
+ fp.bias_v = nn.Parameter(self.bias_v.dequantize())
175
+
176
+ # Set the linear weights
177
+ # Note: Because the linear layers are quantized, mypy does not nkow how
178
+ # to deal with them -- might need to ignore the typing checks.
179
+ # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
180
+ w, b = self.out_proj._weight_bias() # type: ignore[operator, has-type]
181
+ fp.out_proj.weight = nn.Parameter(w.dequantize())
182
+ if b is not None:
183
+ fp.out_proj.bias = nn.Parameter(b)
184
+
185
+ wQ, bQ = self.linear_Q._weight_bias() # type: ignore[operator]
186
+ wQ = wQ.dequantize()
187
+ wK, bK = self.linear_K._weight_bias() # type: ignore[operator]
188
+ wK = wK.dequantize()
189
+ wV, bV = self.linear_V._weight_bias() # type: ignore[operator]
190
+ wV = wV.dequantize()
191
+ if fp._qkv_same_embed_dim:
192
+ # Use separate params
193
+ _start = 0
194
+ _end = _start + fp.embed_dim
195
+ fp.in_proj_weight[_start:_end, :] = wQ
196
+ if fp.in_proj_bias is not None:
197
+ assert all(bQ == 0)
198
+ fp.in_proj_bias[_start:_end] = bQ
199
+
200
+ _start = _end
201
+ _end = _start + fp.embed_dim
202
+ fp.in_proj_weight[_start:_end, :] = wK
203
+ if fp.in_proj_bias is not None:
204
+ assert all(bK == 0)
205
+ fp.in_proj_bias[_start:_end] = bK
206
+
207
+ _start = _end
208
+ fp.in_proj_weight[_start:, :] = wV
209
+ if fp.in_proj_bias is not None:
210
+ assert all(bV == 0)
211
+ fp.in_proj_bias[_start:] = bV
212
+ else:
213
+ fp.q_proj_weight = nn.Parameter(wQ)
214
+ fp.k_proj_weight = nn.Parameter(wK)
215
+ fp.v_proj_weight = nn.Parameter(wV)
216
+ if fp.in_proj_bias is None:
217
+ self.linear_Q.bias = None
218
+ self.linear_K.bias = None
219
+ self.linear_V.bias = None
220
+ else:
221
+ fp.in_proj_bias[0:fp.embed_dim] = bQ
222
+ fp.in_proj_bias[fp.embed_dim:(fp.embed_dim * 2)] = bK
223
+ fp.in_proj_bias[(fp.embed_dim * 2):] = bV
224
+
225
+ return fp
226
+
227
+
228
+ @classmethod
229
+ def from_observed(cls, other):
230
+ # The whole flow is float -> observed -> quantized
231
+ # This class does float -> observed only
232
+ # See nn.quantized.MultiheadAttention
233
+ raise NotImplementedError("It looks like you are trying to prepare an "
234
+ "MHA module. Please, see "
235
+ "the examples on quantizable MHAs.")
236
+
237
+ def forward(self,
238
+ query: Tensor,
239
+ key: Tensor,
240
+ value: Tensor,
241
+ key_padding_mask: Optional[Tensor] = None,
242
+ need_weights: bool = True,
243
+ attn_mask: Optional[Tensor] = None,
244
+ average_attn_weights: bool = True,
245
+ is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]:
246
+ r"""
247
+ Note::
248
+ Please, refer to :func:`~torch.nn.MultiheadAttention.forward` for more
249
+ information
250
+
251
+ Args:
252
+ query, key, value: map a query and a set of key-value pairs to an output.
253
+ See "Attention Is All You Need" for more details.
254
+ key_padding_mask: if provided, specified padding elements in the key will
255
+ be ignored by the attention. When given a binary mask and a value is True,
256
+ the corresponding value on the attention layer will be ignored.
257
+ need_weights: output attn_output_weights.
258
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
259
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
260
+
261
+ Shape:
262
+ - Inputs:
263
+ - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
264
+ the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
265
+ - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
266
+ the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
267
+ - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
268
+ the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
269
+ - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
270
+ If a BoolTensor is provided, the positions with the
271
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
272
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
273
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
274
+ S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
275
+ positions. If a BoolTensor is provided, positions with ``True``
276
+ is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
277
+ is provided, it will be added to the attention weight.
278
+ - is_causal: If specified, applies a causal mask as attention mask. Mutually exclusive with providing attn_mask.
279
+ Default: ``False``.
280
+ - average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
281
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
282
+ effect when ``need_weights=True.``. Default: True (i.e. average weights across heads)
283
+
284
+ - Outputs:
285
+ - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
286
+ E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
287
+ - attn_output_weights: If ``average_attn_weights=True``, returns attention weights averaged
288
+ across heads of shape :math:`(N, L, S)`, where N is the batch size, L is the target sequence length,
289
+ S is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
290
+ head of shape :math:`(N, num_heads, L, S)`.
291
+ """
292
+ return self._forward_impl(query, key, value, key_padding_mask,
293
+ need_weights, attn_mask, average_attn_weights,
294
+ is_causal)
295
+
296
+ def _forward_impl(self,
297
+ query: Tensor,
298
+ key: Tensor,
299
+ value: Tensor,
300
+ key_padding_mask: Optional[Tensor] = None,
301
+ need_weights: bool = True,
302
+ attn_mask: Optional[Tensor] = None,
303
+ average_attn_weights: bool = True,
304
+ is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]:
305
+ # This version will not deal with the static key/value pairs.
306
+ # Keeping it here for future changes.
307
+ #
308
+ # TODO: This method has some duplicate lines with the
309
+ # `torch.nn.functional.multi_head_attention`. Will need to refactor.
310
+ static_k = None
311
+ static_v = None
312
+
313
+ if attn_mask is not None and is_causal:
314
+ raise AssertionError("Only allow causal mask or attn_mask")
315
+
316
+ if is_causal:
317
+ raise AssertionError("causal mask not supported by AO MHA module")
318
+
319
+ if self.batch_first:
320
+ query, key, value = (x.transpose(0, 1) for x in (query, key, value))
321
+
322
+ tgt_len, bsz, embed_dim_to_check = query.size()
323
+ assert self.embed_dim == embed_dim_to_check
324
+ # allow MHA to have different sizes for the feature dimension
325
+ assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
326
+
327
+ head_dim = self.embed_dim // self.num_heads
328
+ assert head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
329
+ scaling = float(head_dim) ** -0.5
330
+
331
+ q = self.linear_Q(query)
332
+ k = self.linear_K(key)
333
+ v = self.linear_V(value)
334
+
335
+ q = self.q_scaling_product.mul_scalar(q, scaling)
336
+
337
+ if attn_mask is not None:
338
+ if attn_mask.dtype == torch.uint8:
339
+ warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
340
+ attn_mask = attn_mask.to(torch.bool)
341
+ assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
342
+ f'Only float and bool types are supported for attn_mask, not {attn_mask.dtype}'
343
+
344
+ if attn_mask.dim() == 2:
345
+ attn_mask = attn_mask.unsqueeze(0)
346
+ if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
347
+ raise RuntimeError('The size of the 2D attn_mask is not correct.')
348
+ elif attn_mask.dim() == 3:
349
+ if list(attn_mask.size()) != [bsz * self.num_heads, query.size(0), key.size(0)]:
350
+ raise RuntimeError('The size of the 3D attn_mask is not correct.')
351
+ else:
352
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
353
+ # attn_mask's dim is 3 now.
354
+
355
+ # convert ByteTensor key_padding_mask to bool
356
+ if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
357
+ warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
358
+ key_padding_mask = key_padding_mask.to(torch.bool)
359
+ if self.bias_k is not None and self.bias_v is not None:
360
+ if static_k is None and static_v is None:
361
+
362
+ # Explicitly assert that bias_k and bias_v are not None
363
+ # in a way that TorchScript can understand.
364
+ bias_k = self.bias_k
365
+ assert bias_k is not None
366
+ bias_v = self.bias_v
367
+ assert bias_v is not None
368
+
369
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
370
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
371
+ if attn_mask is not None:
372
+ attn_mask = nnF.pad(attn_mask, (0, 1))
373
+ if key_padding_mask is not None:
374
+ key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
375
+ else:
376
+ assert static_k is None, "bias cannot be added to static key."
377
+ assert static_v is None, "bias cannot be added to static value."
378
+ else:
379
+ assert self.bias_k is None
380
+ assert self.bias_v is None
381
+
382
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, head_dim).transpose(0, 1)
383
+ if k is not None:
384
+ k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
385
+ if v is not None:
386
+ v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
387
+
388
+ if static_k is not None:
389
+ assert static_k.size(0) == bsz * self.num_heads
390
+ assert static_k.size(2) == head_dim
391
+ k = static_k
392
+
393
+ if static_v is not None:
394
+ assert static_v.size(0) == bsz * self.num_heads
395
+ assert static_v.size(2) == head_dim
396
+ v = static_v
397
+
398
+ src_len = k.size(1)
399
+
400
+ if key_padding_mask is not None:
401
+ assert key_padding_mask.size(0) == bsz
402
+ assert key_padding_mask.size(1) == src_len
403
+
404
+ if self.add_zero_attn:
405
+ src_len += 1
406
+ k_zeros = torch.zeros((k.size(0), 1) + k.size()[2:])
407
+ if k.is_quantized:
408
+ k_zeros = torch.quantize_per_tensor(k_zeros, k.q_scale(), k.q_zero_point(), k.dtype)
409
+ k = torch.cat([k, k_zeros], dim=1)
410
+ v_zeros = torch.zeros((v.size(0), 1) + k.size()[2:])
411
+ if v.is_quantized:
412
+ v_zeros = torch.quantize_per_tensor(v_zeros, v.q_scale(), v.q_zero_point(), v.dtype)
413
+ v = torch.cat([v, v_zeros], dim=1)
414
+
415
+ if attn_mask is not None:
416
+ attn_mask = nnF.pad(attn_mask, (0, 1))
417
+ if key_padding_mask is not None:
418
+ key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
419
+
420
+ # Leaving the quantized zone here
421
+ q = self.dequant_q(q)
422
+ k = self.dequant_k(k)
423
+ v = self.dequant_v(v)
424
+ attn_output_weights = torch.bmm(q, k.transpose(1, 2))
425
+ assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
426
+
427
+ if attn_mask is not None:
428
+ if attn_mask.dtype == torch.bool:
429
+ attn_output_weights.masked_fill_(attn_mask, float('-inf'))
430
+ else:
431
+ attn_output_weights += attn_mask
432
+
433
+ if key_padding_mask is not None:
434
+ attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
435
+ attn_output_weights = attn_output_weights.masked_fill(
436
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
437
+ float('-inf'),
438
+ )
439
+ attn_output_weights = attn_output_weights.view(bsz * self.num_heads, tgt_len, src_len)
440
+
441
+ attn_output_weights = nnF.softmax(
442
+ attn_output_weights, dim=-1)
443
+ attn_output_weights = nnF.dropout(attn_output_weights, p=self.dropout, training=self.training)
444
+
445
+ attn_output = torch.bmm(attn_output_weights, v)
446
+ assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, head_dim]
447
+ if self.batch_first:
448
+ attn_output = attn_output.view(bsz, tgt_len, self.embed_dim)
449
+ else:
450
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
451
+
452
+ # Reentering the quantized zone
453
+ attn_output = self.quant_attn_output(attn_output)
454
+ # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
455
+ attn_output = self.out_proj(attn_output) # type: ignore[has-type]
456
+ attn_output_weights = self.quant_attn_output_weights(attn_output_weights)
457
+
458
+ if need_weights:
459
+ # average attention weights over heads
460
+ attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
461
+ if average_attn_weights:
462
+ attn_output_weights = attn_output_weights.mean(dim=1)
463
+ return attn_output, attn_output_weights
464
+ else:
465
+ return attn_output, None
venv/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numbers
2
+ from typing import Optional, Tuple
3
+ import warnings
4
+
5
+ import torch
6
+ from torch import Tensor
7
+
8
+ """
9
+ We will recreate all the RNN modules as we require the modules to be decomposed
10
+ into its building blocks to be able to observe.
11
+ """
12
+
13
+ __all__ = [
14
+ "LSTMCell",
15
+ "LSTM"
16
+ ]
17
+
18
+ class LSTMCell(torch.nn.Module):
19
+ r"""A quantizable long short-term memory (LSTM) cell.
20
+
21
+ For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell`
22
+
23
+ Examples::
24
+
25
+ >>> import torch.ao.nn.quantizable as nnqa
26
+ >>> rnn = nnqa.LSTMCell(10, 20)
27
+ >>> input = torch.randn(6, 10)
28
+ >>> hx = torch.randn(3, 20)
29
+ >>> cx = torch.randn(3, 20)
30
+ >>> output = []
31
+ >>> for i in range(6):
32
+ ... hx, cx = rnn(input[i], (hx, cx))
33
+ ... output.append(hx)
34
+ """
35
+ _FLOAT_MODULE = torch.nn.LSTMCell
36
+
37
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
38
+ device=None, dtype=None) -> None:
39
+ factory_kwargs = {'device': device, 'dtype': dtype}
40
+ super().__init__()
41
+ self.input_size = input_dim
42
+ self.hidden_size = hidden_dim
43
+ self.bias = bias
44
+
45
+ self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
46
+ self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
47
+ self.gates = torch.ao.nn.quantized.FloatFunctional()
48
+
49
+ self.input_gate = torch.nn.Sigmoid()
50
+ self.forget_gate = torch.nn.Sigmoid()
51
+ self.cell_gate = torch.nn.Tanh()
52
+ self.output_gate = torch.nn.Sigmoid()
53
+
54
+ self.fgate_cx = torch.ao.nn.quantized.FloatFunctional()
55
+ self.igate_cgate = torch.ao.nn.quantized.FloatFunctional()
56
+ self.fgate_cx_igate_cgate = torch.ao.nn.quantized.FloatFunctional()
57
+
58
+ self.ogate_cy = torch.ao.nn.quantized.FloatFunctional()
59
+
60
+ self.initial_hidden_state_qparams: Tuple[float, int] = (1.0, 0)
61
+ self.initial_cell_state_qparams: Tuple[float, int] = (1.0, 0)
62
+ self.hidden_state_dtype: torch.dtype = torch.quint8
63
+ self.cell_state_dtype: torch.dtype = torch.quint8
64
+
65
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
66
+ if hidden is None or hidden[0] is None or hidden[1] is None:
67
+ hidden = self.initialize_hidden(x.shape[0], x.is_quantized)
68
+ hx, cx = hidden
69
+
70
+ igates = self.igates(x)
71
+ hgates = self.hgates(hx)
72
+ gates = self.gates.add(igates, hgates)
73
+
74
+ input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
75
+
76
+ input_gate = self.input_gate(input_gate)
77
+ forget_gate = self.forget_gate(forget_gate)
78
+ cell_gate = self.cell_gate(cell_gate)
79
+ out_gate = self.output_gate(out_gate)
80
+
81
+ fgate_cx = self.fgate_cx.mul(forget_gate, cx)
82
+ igate_cgate = self.igate_cgate.mul(input_gate, cell_gate)
83
+ fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate)
84
+ cy = fgate_cx_igate_cgate
85
+
86
+ # TODO: make this tanh a member of the module so its qparams can be configured
87
+ tanh_cy = torch.tanh(cy)
88
+ hy = self.ogate_cy.mul(out_gate, tanh_cy)
89
+ return hy, cy
90
+
91
+ def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]:
92
+ h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
93
+ if is_quantized:
94
+ (h_scale, h_zp) = self.initial_hidden_state_qparams
95
+ (c_scale, c_zp) = self.initial_cell_state_qparams
96
+ h = torch.quantize_per_tensor(h, scale=h_scale, zero_point=h_zp, dtype=self.hidden_state_dtype)
97
+ c = torch.quantize_per_tensor(c, scale=c_scale, zero_point=c_zp, dtype=self.cell_state_dtype)
98
+ return h, c
99
+
100
+ def _get_name(self):
101
+ return 'QuantizableLSTMCell'
102
+
103
+ @classmethod
104
+ def from_params(cls, wi, wh, bi=None, bh=None):
105
+ """Uses the weights and biases to create a new LSTM cell.
106
+
107
+ Args:
108
+ wi, wh: Weights for the input and hidden layers
109
+ bi, bh: Biases for the input and hidden layers
110
+ """
111
+ assert (bi is None) == (bh is None) # Either both None or both have values
112
+ input_size = wi.shape[1]
113
+ hidden_size = wh.shape[1]
114
+ cell = cls(input_dim=input_size, hidden_dim=hidden_size,
115
+ bias=(bi is not None))
116
+ cell.igates.weight = torch.nn.Parameter(wi)
117
+ if bi is not None:
118
+ cell.igates.bias = torch.nn.Parameter(bi)
119
+ cell.hgates.weight = torch.nn.Parameter(wh)
120
+ if bh is not None:
121
+ cell.hgates.bias = torch.nn.Parameter(bh)
122
+ return cell
123
+
124
+ @classmethod
125
+ def from_float(cls, other):
126
+ assert type(other) == cls._FLOAT_MODULE
127
+ assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
128
+ observed = cls.from_params(other.weight_ih, other.weight_hh,
129
+ other.bias_ih, other.bias_hh)
130
+ observed.qconfig = other.qconfig
131
+ observed.igates.qconfig = other.qconfig
132
+ observed.hgates.qconfig = other.qconfig
133
+ return observed
134
+
135
+
136
+ class _LSTMSingleLayer(torch.nn.Module):
137
+ r"""A single one-directional LSTM layer.
138
+
139
+ The difference between a layer and a cell is that the layer can process a
140
+ sequence, while the cell only expects an instantaneous value.
141
+ """
142
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
143
+ device=None, dtype=None) -> None:
144
+ factory_kwargs = {'device': device, 'dtype': dtype}
145
+ super().__init__()
146
+ self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs)
147
+
148
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
149
+ result = []
150
+ seq_len = x.shape[0]
151
+ for i in range(seq_len):
152
+ hidden = self.cell(x[i], hidden)
153
+ result.append(hidden[0]) # type: ignore[index]
154
+ result_tensor = torch.stack(result, 0)
155
+ return result_tensor, hidden
156
+
157
+ @classmethod
158
+ def from_params(cls, *args, **kwargs):
159
+ cell = LSTMCell.from_params(*args, **kwargs)
160
+ layer = cls(cell.input_size, cell.hidden_size, cell.bias)
161
+ layer.cell = cell
162
+ return layer
163
+
164
+
165
+ class _LSTMLayer(torch.nn.Module):
166
+ r"""A single bi-directional LSTM layer."""
167
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
168
+ batch_first: bool = False, bidirectional: bool = False,
169
+ device=None, dtype=None) -> None:
170
+ factory_kwargs = {'device': device, 'dtype': dtype}
171
+ super().__init__()
172
+ self.batch_first = batch_first
173
+ self.bidirectional = bidirectional
174
+ self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
175
+ if self.bidirectional:
176
+ self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
177
+
178
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
179
+ if self.batch_first:
180
+ x = x.transpose(0, 1)
181
+ if hidden is None:
182
+ hx_fw, cx_fw = (None, None)
183
+ else:
184
+ hx_fw, cx_fw = hidden
185
+ hidden_bw: Optional[Tuple[Tensor, Tensor]] = None
186
+ if self.bidirectional:
187
+ if hx_fw is None:
188
+ hx_bw = None
189
+ else:
190
+ hx_bw = hx_fw[1]
191
+ hx_fw = hx_fw[0]
192
+ if cx_fw is None:
193
+ cx_bw = None
194
+ else:
195
+ cx_bw = cx_fw[1]
196
+ cx_fw = cx_fw[0]
197
+ if hx_bw is not None and cx_bw is not None:
198
+ hidden_bw = hx_bw, cx_bw
199
+ if hx_fw is None and cx_fw is None:
200
+ hidden_fw = None
201
+ else:
202
+ hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw)
203
+ result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
204
+
205
+ if hasattr(self, 'layer_bw') and self.bidirectional:
206
+ x_reversed = x.flip(0)
207
+ result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
208
+ result_bw = result_bw.flip(0)
209
+
210
+ result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
211
+ if hidden_fw is None and hidden_bw is None:
212
+ h = None
213
+ c = None
214
+ elif hidden_fw is None:
215
+ (h, c) = torch.jit._unwrap_optional(hidden_bw)
216
+ elif hidden_bw is None:
217
+ (h, c) = torch.jit._unwrap_optional(hidden_fw)
218
+ else:
219
+ h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
220
+ c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
221
+ else:
222
+ result = result_fw
223
+ h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
224
+
225
+ if self.batch_first:
226
+ result.transpose_(0, 1)
227
+
228
+ return result, (h, c)
229
+
230
+ @classmethod
231
+ def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
232
+ r"""
233
+ There is no FP equivalent of this class. This function is here just to
234
+ mimic the behavior of the `prepare` within the `torch.ao.quantization`
235
+ flow.
236
+ """
237
+ assert hasattr(other, 'qconfig') or (qconfig is not None)
238
+
239
+ input_size = kwargs.get('input_size', other.input_size)
240
+ hidden_size = kwargs.get('hidden_size', other.hidden_size)
241
+ bias = kwargs.get('bias', other.bias)
242
+ batch_first = kwargs.get('batch_first', other.batch_first)
243
+ bidirectional = kwargs.get('bidirectional', other.bidirectional)
244
+
245
+ layer = cls(input_size, hidden_size, bias, batch_first, bidirectional)
246
+ layer.qconfig = getattr(other, 'qconfig', qconfig)
247
+ wi = getattr(other, f'weight_ih_l{layer_idx}')
248
+ wh = getattr(other, f'weight_hh_l{layer_idx}')
249
+ bi = getattr(other, f'bias_ih_l{layer_idx}', None)
250
+ bh = getattr(other, f'bias_hh_l{layer_idx}', None)
251
+
252
+ layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
253
+
254
+ if other.bidirectional:
255
+ wi = getattr(other, f'weight_ih_l{layer_idx}_reverse')
256
+ wh = getattr(other, f'weight_hh_l{layer_idx}_reverse')
257
+ bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None)
258
+ bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None)
259
+ layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
260
+ return layer
261
+
262
+
263
+ class LSTM(torch.nn.Module):
264
+ r"""A quantizable long short-term memory (LSTM).
265
+
266
+ For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
267
+
268
+ Attributes:
269
+ layers : instances of the `_LSTMLayer`
270
+
271
+ .. note::
272
+ To access the weights and biases, you need to access them per layer.
273
+ See examples below.
274
+
275
+ Examples::
276
+
277
+ >>> import torch.ao.nn.quantizable as nnqa
278
+ >>> rnn = nnqa.LSTM(10, 20, 2)
279
+ >>> input = torch.randn(5, 3, 10)
280
+ >>> h0 = torch.randn(2, 3, 20)
281
+ >>> c0 = torch.randn(2, 3, 20)
282
+ >>> output, (hn, cn) = rnn(input, (h0, c0))
283
+ >>> # To get the weights:
284
+ >>> # xdoctest: +SKIP
285
+ >>> print(rnn.layers[0].weight_ih)
286
+ tensor([[...]])
287
+ >>> print(rnn.layers[0].weight_hh)
288
+ AssertionError: There is no reverse path in the non-bidirectional layer
289
+ """
290
+ _FLOAT_MODULE = torch.nn.LSTM
291
+
292
+ def __init__(self, input_size: int, hidden_size: int,
293
+ num_layers: int = 1, bias: bool = True,
294
+ batch_first: bool = False, dropout: float = 0.,
295
+ bidirectional: bool = False,
296
+ device=None, dtype=None) -> None:
297
+ factory_kwargs = {'device': device, 'dtype': dtype}
298
+ super().__init__()
299
+ self.input_size = input_size
300
+ self.hidden_size = hidden_size
301
+ self.num_layers = num_layers
302
+ self.bias = bias
303
+ self.batch_first = batch_first
304
+ self.dropout = float(dropout)
305
+ self.bidirectional = bidirectional
306
+ self.training = False # Default to eval mode. If we want to train, we will explicitly set to training.
307
+ num_directions = 2 if bidirectional else 1
308
+
309
+ if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
310
+ isinstance(dropout, bool):
311
+ raise ValueError("dropout should be a number in range [0, 1] "
312
+ "representing the probability of an element being "
313
+ "zeroed")
314
+ if dropout > 0:
315
+ warnings.warn("dropout option for quantizable LSTM is ignored. "
316
+ "If you are training, please, use nn.LSTM version "
317
+ "followed by `prepare` step.")
318
+ if num_layers == 1:
319
+ warnings.warn("dropout option adds dropout after all but last "
320
+ "recurrent layer, so non-zero dropout expects "
321
+ f"num_layers greater than 1, but got dropout={dropout} "
322
+ f"and num_layers={num_layers}")
323
+
324
+ layers = [_LSTMLayer(self.input_size, self.hidden_size,
325
+ self.bias, batch_first=False,
326
+ bidirectional=self.bidirectional, **factory_kwargs)]
327
+ for layer in range(1, num_layers):
328
+ layers.append(_LSTMLayer(self.hidden_size, self.hidden_size,
329
+ self.bias, batch_first=False,
330
+ bidirectional=self.bidirectional,
331
+ **factory_kwargs))
332
+ self.layers = torch.nn.ModuleList(layers)
333
+
334
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
335
+ if self.batch_first:
336
+ x = x.transpose(0, 1)
337
+
338
+ max_batch_size = x.size(1)
339
+ num_directions = 2 if self.bidirectional else 1
340
+ if hidden is None:
341
+ zeros = torch.zeros(num_directions, max_batch_size,
342
+ self.hidden_size, dtype=torch.float,
343
+ device=x.device)
344
+ zeros.squeeze_(0)
345
+ if x.is_quantized:
346
+ zeros = torch.quantize_per_tensor(zeros, scale=1.0,
347
+ zero_point=0, dtype=x.dtype)
348
+ hxcx = [(zeros, zeros) for _ in range(self.num_layers)]
349
+ else:
350
+ hidden_non_opt = torch.jit._unwrap_optional(hidden)
351
+ if isinstance(hidden_non_opt[0], Tensor):
352
+ hx = hidden_non_opt[0].reshape(self.num_layers, num_directions,
353
+ max_batch_size,
354
+ self.hidden_size)
355
+ cx = hidden_non_opt[1].reshape(self.num_layers, num_directions,
356
+ max_batch_size,
357
+ self.hidden_size)
358
+ hxcx = [(hx[idx].squeeze(0), cx[idx].squeeze(0)) for idx in range(self.num_layers)]
359
+ else:
360
+ hxcx = hidden_non_opt
361
+
362
+ hx_list = []
363
+ cx_list = []
364
+ for idx, layer in enumerate(self.layers):
365
+ x, (h, c) = layer(x, hxcx[idx])
366
+ hx_list.append(torch.jit._unwrap_optional(h))
367
+ cx_list.append(torch.jit._unwrap_optional(c))
368
+ hx_tensor = torch.stack(hx_list)
369
+ cx_tensor = torch.stack(cx_list)
370
+
371
+ # We are creating another dimension for bidirectional case
372
+ # need to collapse it
373
+ hx_tensor = hx_tensor.reshape(-1, hx_tensor.shape[-2], hx_tensor.shape[-1])
374
+ cx_tensor = cx_tensor.reshape(-1, cx_tensor.shape[-2], cx_tensor.shape[-1])
375
+
376
+ if self.batch_first:
377
+ x = x.transpose(0, 1)
378
+
379
+ return x, (hx_tensor, cx_tensor)
380
+
381
+ def _get_name(self):
382
+ return 'QuantizableLSTM'
383
+
384
+ @classmethod
385
+ def from_float(cls, other, qconfig=None):
386
+ assert isinstance(other, cls._FLOAT_MODULE)
387
+ assert (hasattr(other, 'qconfig') or qconfig)
388
+ observed = cls(other.input_size, other.hidden_size, other.num_layers,
389
+ other.bias, other.batch_first, other.dropout,
390
+ other.bidirectional)
391
+ observed.qconfig = getattr(other, 'qconfig', qconfig)
392
+ for idx in range(other.num_layers):
393
+ observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig,
394
+ batch_first=False)
395
+
396
+ # Prepare the model
397
+ if other.training:
398
+ observed.train()
399
+ observed = torch.ao.quantization.prepare_qat(observed, inplace=True)
400
+ else:
401
+ observed.eval()
402
+ observed = torch.ao.quantization.prepare(observed, inplace=True)
403
+ return observed
404
+
405
+ @classmethod
406
+ def from_observed(cls, other):
407
+ # The whole flow is float -> observed -> quantized
408
+ # This class does float -> observed only
409
+ raise NotImplementedError("It looks like you are trying to convert a "
410
+ "non-quantizable LSTM module. Please, see "
411
+ "the examples on quantizable LSTMs.")
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import functional
2
+ from .modules import * # noqa: F403
3
+ from .modules import MaxPool2d
4
+
5
+ __all__ = [
6
+ 'BatchNorm2d',
7
+ 'BatchNorm3d',
8
+ 'Conv1d',
9
+ 'Conv2d',
10
+ 'Conv3d',
11
+ 'ConvTranspose1d',
12
+ 'ConvTranspose2d',
13
+ 'ConvTranspose3d',
14
+ 'DeQuantize',
15
+ 'ELU',
16
+ 'Embedding',
17
+ 'EmbeddingBag',
18
+ 'GroupNorm',
19
+ 'Hardswish',
20
+ 'InstanceNorm1d',
21
+ 'InstanceNorm2d',
22
+ 'InstanceNorm3d',
23
+ 'LayerNorm',
24
+ 'LeakyReLU',
25
+ 'Linear',
26
+ 'LSTM',
27
+ 'MultiheadAttention',
28
+ 'Quantize',
29
+ 'ReLU6',
30
+ 'Sigmoid',
31
+ 'Softmax',
32
+ 'Dropout',
33
+ 'PReLU',
34
+ # Wrapper modules
35
+ 'FloatFunctional',
36
+ 'FXFloatFunctional',
37
+ 'QFunctional',
38
+ ]
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (670 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/functional.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/functional.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r""" Functional interface (quantized)."""
2
+ from typing import List, Optional
3
+ import warnings
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ from torch.nn.modules.utils import _pair, _triple
8
+ from torch.jit.annotations import BroadcastingList2
9
+
10
+ from .modules.utils import _pair_from_first
11
+
12
+ # Although some of the functions and docstrings are mirrored from the torch.nn,
13
+ # we want to have them here for future changes.
14
+
15
+ __all__ = [
16
+ "avg_pool2d",
17
+ "avg_pool3d",
18
+ "adaptive_avg_pool2d",
19
+ "adaptive_avg_pool3d",
20
+ "conv1d",
21
+ "conv2d",
22
+ "conv3d",
23
+ "interpolate",
24
+ "linear",
25
+ "max_pool1d",
26
+ "max_pool2d",
27
+ "celu",
28
+ "leaky_relu",
29
+ "hardtanh",
30
+ "hardswish",
31
+ "threshold",
32
+ "elu",
33
+ "hardsigmoid",
34
+ "clamp",
35
+ "upsample",
36
+ "upsample_bilinear",
37
+ "upsample_nearest",
38
+ ]
39
+
40
+ def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
41
+ count_include_pad=True, divisor_override=None):
42
+ r"""
43
+ Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
44
+ :math:`sH \times sW` steps. The number of output features is equal to the number of
45
+ input planes.
46
+
47
+ .. note:: The input quantization parameters propagate to the output.
48
+
49
+ See :class:`~torch.ao.nn.quantized.AvgPool2d` for details and output shape.
50
+
51
+ Args:
52
+ input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
53
+ kernel_size: size of the pooling region. Can be a single number or a
54
+ tuple `(kH, kW)`
55
+ stride: stride of the pooling operation. Can be a single number or a
56
+ tuple `(sH, sW)`. Default: :attr:`kernel_size`
57
+ padding: implicit zero paddings on both sides of the input. Can be a
58
+ single number or a tuple `(padH, padW)`. Default: 0
59
+ ceil_mode: when True, will use `ceil` instead of `floor` in the formula
60
+ to compute the output shape. Default: ``False``
61
+ count_include_pad: when True, will include the zero-padding in the
62
+ averaging calculation. Default: ``True``
63
+ divisor_override: if specified, it will be used as divisor, otherwise
64
+ size of the pooling region will be used. Default: None
65
+ """
66
+ if not input.is_quantized:
67
+ raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
68
+ return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding,
69
+ ceil_mode, count_include_pad,
70
+ divisor_override)
71
+
72
+ def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
73
+ count_include_pad=True, divisor_override=None):
74
+ r"""
75
+ Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
76
+ :math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
77
+ input planes.
78
+
79
+ .. note:: The input quantization parameters propagate to the output.
80
+
81
+ Args:
82
+ input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
83
+ kernel_size: size of the pooling region. Can be a single number or a
84
+ tuple `(kD, kH, kW)`
85
+ stride: stride of the pooling operation. Can be a single number or a
86
+ tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
87
+ padding: implicit zero paddings on both sides of the input. Can be a
88
+ single number or a tuple `(padD, padH, padW)`. Default: 0
89
+ ceil_mode: when True, will use `ceil` instead of `floor` in the formula
90
+ to compute the output shape. Default: ``False``
91
+ count_include_pad: when True, will include the zero-padding in the
92
+ averaging calculation. Default: ``True``
93
+ divisor_override: if specified, it will be used as divisor, otherwise
94
+ size of the pooling region will be used. Default: None
95
+ """
96
+ if not input.is_quantized:
97
+ raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
98
+ return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding,
99
+ ceil_mode, count_include_pad,
100
+ divisor_override)
101
+
102
+ def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
103
+ r"""
104
+ Applies a 2D adaptive average pooling over a quantized input signal composed
105
+ of several quantized input planes.
106
+
107
+ .. note:: The input quantization parameters propagate to the output.
108
+
109
+ See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
110
+
111
+ Args:
112
+ output_size: the target output size (single integer or
113
+ double-integer tuple)
114
+ """
115
+ if not input.is_quantized:
116
+ raise ValueError("Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!")
117
+ return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
118
+
119
+ def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
120
+ r"""
121
+ Applies a 3D adaptive average pooling over a quantized input signal composed
122
+ of several quantized input planes.
123
+
124
+ .. note:: The input quantization parameters propagate to the output.
125
+
126
+ See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool3d` for details and output shape.
127
+
128
+ Args:
129
+ output_size: the target output size (single integer or
130
+ double-integer tuple)
131
+ """
132
+ if not input.is_quantized:
133
+ raise ValueError(
134
+ "Input to 'quantized.functional.adaptive_avg_pool3d' must be quantized!")
135
+ return torch.nn.functional.adaptive_avg_pool3d(input, output_size)
136
+
137
+ def conv1d(input, weight, bias,
138
+ stride=1, padding=0, dilation=1, groups=1,
139
+ padding_mode='zeros',
140
+ scale=1.0, zero_point=0,
141
+ dtype=torch.quint8):
142
+ r"""
143
+ Applies a 1D convolution over a quantized 1D input composed of several input
144
+ planes.
145
+
146
+ See :class:`~torch.ao.nn.quantized.Conv1d` for details and output shape.
147
+
148
+ Args:
149
+ input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
150
+ weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , iW)`
151
+ bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
152
+ stride: the stride of the convolving kernel. Can be a single number or a
153
+ tuple `(sW,)`. Default: 1
154
+ padding: implicit paddings on both sides of the input. Can be a
155
+ single number or a tuple `(padW,)`. Default: 0
156
+ dilation: the spacing between kernel elements. Can be a single number or
157
+ a tuple `(dW,)`. Default: 1
158
+ groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
159
+ number of groups. Default: 1
160
+ padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
161
+ scale: quantization scale for the output. Default: 1.0
162
+ zero_point: quantization zero_point for the output. Default: 0
163
+ dtype: quantization data type to use. Default: ``torch.quint8``
164
+
165
+ Examples::
166
+
167
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
168
+ >>> from torch.ao.nn.quantized import functional as qF
169
+ >>> filters = torch.randn(33, 16, 3, dtype=torch.float)
170
+ >>> inputs = torch.randn(20, 16, 50, dtype=torch.float)
171
+ >>> bias = torch.randn(33, dtype=torch.float)
172
+ >>>
173
+ >>> scale, zero_point = 1.0, 0
174
+ >>> dtype_inputs = torch.quint8
175
+ >>> dtype_filters = torch.qint8
176
+ >>>
177
+ >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
178
+ >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
179
+ >>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
180
+ """ # noqa: E501
181
+ if padding_mode != 'zeros':
182
+ raise NotImplementedError("Only zero-padding is supported!")
183
+ if input.dtype != torch.quint8:
184
+ raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
185
+ if weight.dtype != torch.qint8:
186
+ raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
187
+ if input.ndim != 3:
188
+ raise ValueError("Input shape must be `(N, C, L)`!")
189
+ stride = _pair_from_first(stride)
190
+ padding = _pair_from_first(padding)
191
+ dilation = _pair_from_first(dilation)
192
+
193
+ packed_params = torch.ops.quantized.conv1d_prepack(
194
+ weight, bias, stride, padding, dilation, groups)
195
+ return torch.ops.quantized.conv1d(input, packed_params, scale, zero_point)
196
+
197
+ def conv2d(input, weight, bias,
198
+ stride=1, padding=0, dilation=1, groups=1,
199
+ padding_mode='zeros',
200
+ scale=1.0, zero_point=0,
201
+ dtype=torch.quint8):
202
+ r"""
203
+ Applies a 2D convolution over a quantized 2D input composed of several input
204
+ planes.
205
+
206
+ See :class:`~torch.ao.nn.quantized.Conv2d` for details and output shape.
207
+
208
+ Args:
209
+ input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
210
+ weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
211
+ bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
212
+ stride: the stride of the convolving kernel. Can be a single number or a
213
+ tuple `(sH, sW)`. Default: 1
214
+ padding: implicit paddings on both sides of the input. Can be a
215
+ single number or a tuple `(padH, padW)`. Default: 0
216
+ dilation: the spacing between kernel elements. Can be a single number or
217
+ a tuple `(dH, dW)`. Default: 1
218
+ groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
219
+ number of groups. Default: 1
220
+ padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
221
+ scale: quantization scale for the output. Default: 1.0
222
+ zero_point: quantization zero_point for the output. Default: 0
223
+ dtype: quantization data type to use. Default: ``torch.quint8``
224
+
225
+ Examples::
226
+
227
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
228
+ >>> from torch.ao.nn.quantized import functional as qF
229
+ >>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
230
+ >>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
231
+ >>> bias = torch.randn(8, dtype=torch.float)
232
+ >>>
233
+ >>> scale, zero_point = 1.0, 0
234
+ >>> dtype_inputs = torch.quint8
235
+ >>> dtype_filters = torch.qint8
236
+ >>>
237
+ >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
238
+ >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
239
+ >>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
240
+ """ # noqa: E501
241
+ if padding_mode != 'zeros':
242
+ raise NotImplementedError("Only zero-padding is supported!")
243
+ if input.dtype != torch.quint8:
244
+ raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
245
+ if weight.dtype != torch.qint8:
246
+ raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
247
+ if input.ndim != 4:
248
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
249
+ stride = _pair(stride)
250
+ padding = _pair(padding)
251
+ dilation = _pair(dilation)
252
+
253
+ packed_params = torch.ops.quantized.conv2d_prepack(
254
+ weight, bias, stride, padding, dilation, groups)
255
+ return torch.ops.quantized.conv2d(input, packed_params, scale, zero_point)
256
+
257
+ def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
258
+ padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
259
+ r"""
260
+ Applies a 3D convolution over a quantized 3D input composed of several input
261
+ planes.
262
+
263
+ See :class:`~torch.ao.nn.quantized.Conv3d` for details and output shape.
264
+
265
+ Args:
266
+ input: quantized input tensor of shape
267
+ :math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
268
+ weight: quantized filters of shape
269
+ :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
270
+ bias: **non-quantized** bias tensor of shape
271
+ :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
272
+ stride: the stride of the convolving kernel. Can be a single number or a
273
+ tuple `(sD, sH, sW)`. Default: 1
274
+ padding: implicit paddings on both sides of the input. Can be a
275
+ single number or a tuple `(padD, padH, padW)`. Default: 0
276
+ dilation: the spacing between kernel elements. Can be a single number or
277
+ a tuple `(dD, dH, dW)`. Default: 1
278
+ groups: split input into groups, :math:`\text{in\_channels}` should be
279
+ divisible by the number of groups. Default: 1
280
+ padding_mode: the padding mode to use. Only "zeros" is supported for
281
+ quantized convolution at the moment. Default: "zeros"
282
+ scale: quantization scale for the output. Default: 1.0
283
+ zero_point: quantization zero_point for the output. Default: 0
284
+ dtype: quantization data type to use. Default: ``torch.quint8``
285
+
286
+ Examples::
287
+
288
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
289
+ >>> from torch.ao.nn.quantized import functional as qF
290
+ >>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
291
+ >>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
292
+ >>> bias = torch.randn(8, dtype=torch.float)
293
+ >>>
294
+ >>> scale, zero_point = 1.0, 0
295
+ >>> dtype_inputs = torch.quint8
296
+ >>> dtype_filters = torch.qint8
297
+ >>>
298
+ >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
299
+ >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
300
+ >>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
301
+ """ # noqa: E501
302
+ if padding_mode != 'zeros':
303
+ raise NotImplementedError("Only zero-padding is supported!")
304
+ if input.dtype != torch.quint8:
305
+ raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
306
+ if weight.dtype != torch.qint8:
307
+ raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
308
+ if input.ndim != 5:
309
+ raise ValueError("Input shape must be `(N, C, D, H, W)`!")
310
+ stride = _triple(stride)
311
+ padding = _triple(padding)
312
+ dilation = _triple(dilation)
313
+
314
+ packed_params = torch.ops.quantized.conv3d_prepack(
315
+ weight, bias, stride, padding, dilation, groups)
316
+ return torch.ops.quantized.conv3d(input, packed_params, scale, zero_point)
317
+
318
+ def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
319
+ r"""Down/up samples the input to either the given :attr:`size` or the given
320
+ :attr:`scale_factor`
321
+
322
+ See :func:`torch.nn.functional.interpolate` for implementation details.
323
+
324
+ The input dimensions are interpreted in the form:
325
+ `mini-batch x channels x [optional depth] x [optional height] x width`.
326
+
327
+ .. note:: The input quantization parameters propagate to the output.
328
+
329
+ .. note:: Only 2D/3D input is supported for quantized inputs
330
+
331
+ .. note:: Only the following modes are supported for the quantized inputs:
332
+
333
+ - `bilinear`
334
+ - `nearest`
335
+
336
+ Args:
337
+ input (Tensor): the input tensor
338
+ size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
339
+ output spatial size.
340
+ scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
341
+ mode (str): algorithm used for upsampling:
342
+ ``'nearest'`` | ``'bilinear'``
343
+ align_corners (bool, optional): Geometrically, we consider the pixels of the
344
+ input and output as squares rather than points.
345
+ If set to ``True``, the input and output tensors are aligned by the
346
+ center points of their corner pixels, preserving the values at the corner pixels.
347
+ If set to ``False``, the input and output tensors are aligned by the corner
348
+ points of their corner pixels, and the interpolation uses edge value padding
349
+ for out-of-boundary values, making this operation *independent* of input size
350
+ when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
351
+ is ``'bilinear'``.
352
+ Default: ``False``
353
+ """
354
+ if not input.is_quantized:
355
+ raise ValueError("Input to 'quantized.interpolate' must be quantized!")
356
+ return torch.nn.functional.interpolate(input, size, scale_factor, mode,
357
+ align_corners)
358
+
359
+ def linear(
360
+ input: Tensor, weight: Tensor, bias: Optional[Tensor] = None,
361
+ scale: Optional[float] = None, zero_point: Optional[int] = None
362
+ ) -> Tensor:
363
+ r"""
364
+ Applies a linear transformation to the incoming quantized data:
365
+ :math:`y = xA^T + b`.
366
+ See :class:`~torch.ao.nn.quantized.Linear`
367
+
368
+ .. note::
369
+
370
+ Current implementation packs weights on every call, which has penalty on performance.
371
+ If you want to avoid the overhead, use :class:`~torch.ao.nn.quantized.Linear`.
372
+
373
+ Args:
374
+ input (Tensor): Quantized input of type `torch.quint8`
375
+ weight (Tensor): Quantized weight of type `torch.qint8`
376
+ bias (Tensor): None or fp32 bias of type `torch.float`
377
+ scale (double): output scale. If None, derived from the input scale
378
+ zero_point (long): output zero point. If None, derived from the input zero_point
379
+
380
+ Shape:
381
+ - Input: :math:`(N, *, in\_features)` where `*` means any number of
382
+ additional dimensions
383
+ - Weight: :math:`(out\_features, in\_features)`
384
+ - Bias: :math:`(out\_features)`
385
+ - Output: :math:`(N, *, out\_features)`
386
+ """
387
+ if scale is None:
388
+ scale = input.q_scale()
389
+ if zero_point is None:
390
+ zero_point = input.q_zero_point()
391
+ _packed_params = torch.ops.quantized.linear_prepack(weight, bias)
392
+ return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
393
+
394
+ def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
395
+ ceil_mode=False, return_indices=False):
396
+ r"""Applies a 1D max pooling over a quantized input signal composed of
397
+ several quantized input planes.
398
+
399
+ .. note:: The input quantization parameters are propagated to the output.
400
+
401
+ See :class:`~torch.ao.nn.quantized.MaxPool1d` for details.
402
+ """
403
+ if return_indices:
404
+ raise NotImplementedError("return_indices is not yet implemented!")
405
+ if stride is None:
406
+ stride = torch.jit.annotate(List[int], [])
407
+ return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding,
408
+ dilation, ceil_mode=ceil_mode, return_indices=return_indices)
409
+
410
+ def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
411
+ ceil_mode=False, return_indices=False):
412
+ r"""Applies a 2D max pooling over a quantized input signal composed of
413
+ several quantized input planes.
414
+
415
+ .. note:: The input quantization parameters are propagated to the output.
416
+
417
+ See :class:`~torch.ao.nn.quantized.MaxPool2d` for details.
418
+ """
419
+ if return_indices:
420
+ raise NotImplementedError("return_indices is not yet implemented!")
421
+ if stride is None:
422
+ stride = torch.jit.annotate(List[int], [])
423
+ return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
424
+ dilation, ceil_mode=ceil_mode, return_indices=return_indices)
425
+
426
+ def celu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
427
+ r"""celu(input, scale, zero_point, alpha=1.) -> Tensor
428
+
429
+ Applies the quantized CELU function element-wise.
430
+
431
+ .. math::
432
+ \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x / \alpha) - 1))
433
+
434
+ Args:
435
+ input: quantized input
436
+ alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
437
+ """
438
+ if not input.is_quantized:
439
+ raise ValueError("Input to 'quantized.celu' must be quantized!")
440
+ return torch.ops.quantized.celu(input, scale, zero_point, alpha)
441
+
442
+
443
+ def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False,
444
+ scale: Optional[float] = None, zero_point: Optional[int] = None):
445
+ r"""
446
+ Quantized version of the.
447
+ leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
448
+
449
+ Applies element-wise,
450
+ :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
451
+
452
+ Args:
453
+ input: Quantized input
454
+ negative_slope: The slope of the negative input
455
+ inplace: Inplace modification of the input tensor
456
+ scale, zero_point: Scale and zero point of the output tensor.
457
+
458
+ See :class:`~torch.nn.LeakyReLU` for more details.
459
+ """
460
+ if scale is not None and zero_point is not None:
461
+ assert not inplace, "Cannot rescale with `inplace`"
462
+ output = torch._empty_affine_quantized(
463
+ input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype)
464
+ torch._C._nn.leaky_relu(input, negative_slope, out=output)
465
+ return output
466
+ if inplace:
467
+ result = torch._C._nn.leaky_relu_(input, negative_slope)
468
+ else:
469
+ result = torch._C._nn.leaky_relu(input, negative_slope)
470
+ return result
471
+
472
+ def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor:
473
+ r"""This is the quantized version of :func:`~torch.nn.functional.hardtanh`.
474
+ """
475
+ if not input.is_quantized:
476
+ raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
477
+ if inplace:
478
+ return torch._C._nn.hardtanh_(input, min_val, max_val)
479
+ return torch._C._nn.hardtanh(input, min_val, max_val)
480
+
481
+ def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor:
482
+ r"""This is the quantized version of :func:`~torch.nn.functional.hardswish`.
483
+
484
+ Args:
485
+ input: quantized input
486
+ scale: quantization scale of the output tensor
487
+ zero_point: quantization zero point of the output tensor
488
+ """
489
+ if not input.is_quantized:
490
+ raise ValueError("Input to 'quantized.hardswish' must be quantized!")
491
+ return torch._ops.ops.quantized.hardswish(input, scale, zero_point)
492
+
493
+ def threshold(input: Tensor, threshold: float, value: float) -> Tensor:
494
+ r"""Applies the quantized version of the threshold function element-wise:
495
+
496
+ .. math::
497
+ x = \begin{cases}
498
+ x & \text{if~} x > \text{threshold} \\
499
+ \text{value} & \text{otherwise}
500
+ \end{cases}
501
+
502
+ See :class:`~torch.nn.Threshold` for more details.
503
+ """
504
+ if not input.is_quantized:
505
+ raise ValueError("Input to 'quantized.threshold' must be quantized!")
506
+ if threshold is None:
507
+ raise ValueError("Input to 'threshold' must be specified!")
508
+ if value is None:
509
+ raise ValueError("Input to 'value' must be specified!")
510
+ return torch._ops.ops.quantized.threshold(input, threshold, value)
511
+
512
+ def elu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
513
+ r"""This is the quantized version of :func:`~torch.nn.functional.elu`.
514
+
515
+ Args:
516
+ input: quantized input
517
+ scale: quantization scale of the output tensor
518
+ zero_point: quantization zero point of the output tensor
519
+ alpha: the alpha constant
520
+ """
521
+ if not input.is_quantized:
522
+ raise ValueError("Input to 'quantized.elu' must be quantized!")
523
+ return torch.ops.quantized.elu(input, scale, zero_point, alpha)
524
+
525
+ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
526
+ r"""This is the quantized version of :func:`~torch.nn.functional.hardsigmoid`.
527
+ """
528
+ if not input.is_quantized:
529
+ raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!")
530
+ if inplace:
531
+ return torch._C._nn.hardsigmoid_(input) # type: ignore[attr-defined]
532
+ return torch._C._nn.hardsigmoid(input)
533
+
534
+ def clamp(input: Tensor, min_: float, max_: float) -> Tensor:
535
+ r"""float(input, min\_, max\_) -> Tensor
536
+
537
+ Applies the clamp function element-wise.
538
+ See :class:`~torch.ao.nn.quantized.clamp` for more details.
539
+
540
+ Args:
541
+ input: quantized input
542
+ min_: minimum value for clamping
543
+ max_: maximum value for clamping
544
+ """
545
+ if not input.is_quantized:
546
+ raise ValueError("Input to 'quantized.clamp' must be quantized!")
547
+ return torch.clamp(input, min_, max_)
548
+
549
+ def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
550
+ r"""Upsamples the input to either the given :attr:`size` or the given
551
+ :attr:`scale_factor`
552
+
553
+ .. warning::
554
+ This function is deprecated in favor of
555
+ :func:`torch.ao.nn.quantized.functional.interpolate`.
556
+ This is equivalent with ``nn.quantized.functional.interpolate(...)``.
557
+
558
+ See :func:`torch.nn.functional.interpolate` for implementation details.
559
+
560
+ The input dimensions are interpreted in the form:
561
+ `mini-batch x channels x [optional depth] x [optional height] x width`.
562
+
563
+ .. note:: The input quantization parameters propagate to the output.
564
+
565
+ .. note:: Only 2D input is supported for quantized inputs
566
+
567
+ .. note:: Only the following modes are supported for the quantized inputs:
568
+
569
+ - `bilinear`
570
+ - `nearest`
571
+
572
+ Args:
573
+ input (Tensor): quantized input tensor
574
+ size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
575
+ output spatial size.
576
+ scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
577
+ mode (str): algorithm used for upsampling:
578
+ ``'nearest'`` | ``'bilinear'``
579
+ align_corners (bool, optional): Geometrically, we consider the pixels of the
580
+ input and output as squares rather than points.
581
+ If set to ``True``, the input and output tensors are aligned by the
582
+ center points of their corner pixels, preserving the values at the corner pixels.
583
+ If set to ``False``, the input and output tensors are aligned by the corner
584
+ points of their corner pixels, and the interpolation uses edge value padding
585
+ for out-of-boundary values, making this operation *independent* of input size
586
+ when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
587
+ is ``'bilinear'``.
588
+ Default: ``False``
589
+
590
+ .. warning::
591
+ With ``align_corners = True``, the linearly interpolating modes
592
+ (`bilinear`) don't proportionally align the
593
+ output and input pixels, and thus the output values can depend on the
594
+ input size. This was the default behavior for these modes up to version
595
+ 0.3.1. Since then, the default behavior is ``align_corners = False``.
596
+ See :class:`~torch.nn.Upsample` for concrete examples on how this
597
+ affects the outputs.
598
+ """
599
+ warnings.warn("nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.")
600
+ return interpolate(input, size, scale_factor, mode, align_corners)
601
+
602
+ def upsample_bilinear(input, size=None, scale_factor=None):
603
+ r"""Upsamples the input, using bilinear upsampling.
604
+
605
+ .. warning::
606
+ This function is deprecated in favor of
607
+ :func:`torch.ao.nn.quantized.functional.interpolate`.
608
+ This is equivalent with
609
+ ``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
610
+
611
+ .. note:: The input quantization parameters propagate to the output.
612
+
613
+ .. note:: Only 2D inputs are supported
614
+
615
+ Args:
616
+ input (Tensor): quantized input
617
+ size (int or Tuple[int, int]): output spatial size.
618
+ scale_factor (int or Tuple[int, int]): multiplier for spatial size
619
+ """
620
+ # DeprecationWarning is ignored by default
621
+ warnings.warn("nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.")
622
+ return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
623
+
624
+ def upsample_nearest(input, size=None, scale_factor=None):
625
+ r"""Upsamples the input, using nearest neighbours' pixel values.
626
+
627
+ .. warning::
628
+ This function is deprecated in favor of
629
+ :func:`torch.ao.nn.quantized.functional.interpolate`.
630
+ This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
631
+
632
+ .. note:: The input quantization parameters propagate to the output.
633
+
634
+ .. note:: Only 2D inputs are supported
635
+
636
+ Args:
637
+ input (Tensor): quantized input
638
+ size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
639
+ size.
640
+ scale_factor (int): multiplier for spatial size. Has to be an integer.
641
+ """
642
+ # DeprecationWarning is ignored by default
643
+ warnings.warn("nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.")
644
+ return interpolate(input, size, scale_factor, mode='nearest')
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__init__.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # The quantized modules use `torch.nn` and `torch.ao.nn.quantizable`
4
+ # packages. However, the `quantizable` package uses "lazy imports"
5
+ # to avoid circular dependency.
6
+ # Hence we need to include it here to make sure it is resolved before
7
+ # they are used in the modules.
8
+ import torch.ao.nn.quantizable
9
+
10
+ from torch.nn.modules.pooling import MaxPool2d
11
+
12
+ from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU
13
+ from .dropout import Dropout
14
+ from .batchnorm import BatchNorm2d, BatchNorm3d
15
+ from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
16
+ InstanceNorm2d, InstanceNorm3d
17
+ from .conv import Conv1d, Conv2d, Conv3d
18
+ from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
19
+ from .linear import Linear
20
+ from .embedding_ops import Embedding, EmbeddingBag
21
+ from .rnn import LSTM
22
+
23
+ from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
24
+
25
+ __all__ = [
26
+ 'BatchNorm2d',
27
+ 'BatchNorm3d',
28
+ 'Conv1d',
29
+ 'Conv2d',
30
+ 'Conv3d',
31
+ 'ConvTranspose1d',
32
+ 'ConvTranspose2d',
33
+ 'ConvTranspose3d',
34
+ 'DeQuantize',
35
+ 'ELU',
36
+ 'Embedding',
37
+ 'EmbeddingBag',
38
+ 'GroupNorm',
39
+ 'Hardswish',
40
+ 'InstanceNorm1d',
41
+ 'InstanceNorm2d',
42
+ 'InstanceNorm3d',
43
+ 'LayerNorm',
44
+ 'LeakyReLU',
45
+ 'Linear',
46
+ 'LSTM',
47
+ 'MultiheadAttention',
48
+ 'Quantize',
49
+ 'ReLU6',
50
+ 'Sigmoid',
51
+ 'Softmax',
52
+ 'Dropout',
53
+ 'PReLU',
54
+ # Wrapper modules
55
+ 'FloatFunctional',
56
+ 'FXFloatFunctional',
57
+ 'QFunctional',
58
+ ]
59
+
60
+ class Quantize(torch.nn.Module):
61
+ r"""Quantizes an incoming tensor
62
+
63
+ Args:
64
+ `scale`: scale of the output Quantized Tensor
65
+ `zero_point`: zero_point of output Quantized Tensor
66
+ `dtype`: data type of output Quantized Tensor
67
+ `factory_kwargs`: Dictionary of kwargs used for configuring initialization
68
+ of internal buffers. Currently, `device` and `dtype` are supported.
69
+ Example: `factory_kwargs={'device': 'cuda', 'dtype': torch.float64}`
70
+ will initialize internal buffers as type `torch.float64` on the current CUDA device.
71
+ Note that `dtype` only applies to floating-point buffers.
72
+
73
+ Examples::
74
+ >>> t = torch.tensor([[1., -1.], [1., -1.]])
75
+ >>> scale, zero_point, dtype = 1.0, 2, torch.qint8
76
+ >>> qm = Quantize(scale, zero_point, dtype)
77
+ >>> # xdoctest: +SKIP
78
+ >>> qt = qm(t)
79
+ >>> print(qt)
80
+ tensor([[ 1., -1.],
81
+ [ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
82
+ """
83
+
84
+ scale: torch.Tensor
85
+ zero_point: torch.Tensor
86
+
87
+ def __init__(self, scale, zero_point, dtype, factory_kwargs=None):
88
+ factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
89
+ super().__init__()
90
+ self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))
91
+ self.register_buffer('zero_point',
92
+ torch.tensor([zero_point], dtype=torch.long,
93
+ **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
94
+ self.dtype = dtype
95
+
96
+ def forward(self, X):
97
+ return torch.quantize_per_tensor(X, float(self.scale),
98
+ int(self.zero_point), self.dtype)
99
+
100
+ @staticmethod
101
+ def from_float(mod):
102
+ assert hasattr(mod, 'activation_post_process')
103
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
104
+ return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)
105
+
106
+ def extra_repr(self):
107
+ return f'scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}'
108
+
109
+
110
+ class DeQuantize(torch.nn.Module):
111
+ r"""Dequantizes an incoming tensor
112
+
113
+ Examples::
114
+ >>> input = torch.tensor([[1., -1.], [1., -1.]])
115
+ >>> scale, zero_point, dtype = 1.0, 2, torch.qint8
116
+ >>> qm = Quantize(scale, zero_point, dtype)
117
+ >>> # xdoctest: +SKIP
118
+ >>> quantized_input = qm(input)
119
+ >>> dqm = DeQuantize()
120
+ >>> dequantized = dqm(quantized_input)
121
+ >>> print(dequantized)
122
+ tensor([[ 1., -1.],
123
+ [ 1., -1.]], dtype=torch.float32)
124
+ """
125
+
126
+ def forward(self, Xq):
127
+ return Xq.dequantize()
128
+
129
+ @staticmethod
130
+ def from_float(mod):
131
+ return DeQuantize()
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc ADDED
Binary file (3.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (31.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc ADDED
Binary file (8.51 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (9.41 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc ADDED
Binary file (6.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/activation.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from warnings import warn
3
+ __all__ = [
4
+ "ReLU6",
5
+ "Hardswish",
6
+ "ELU",
7
+ "LeakyReLU",
8
+ "Sigmoid",
9
+ "Softmax",
10
+ "MultiheadAttention",
11
+ "PReLU"
12
+ ]
13
+
14
+ class ReLU6(torch.nn.ReLU):
15
+ r"""Applies the element-wise function:
16
+
17
+ :math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
18
+ zero_point, and :math:`q(6)` is the quantized representation of number 6.
19
+
20
+ Args:
21
+ inplace: can optionally do the operation in-place. Default: ``False``
22
+
23
+ Shape:
24
+ - Input: :math:`(N, *)` where `*` means, any number of additional
25
+ dimensions
26
+ - Output: :math:`(N, *)`, same shape as the input
27
+
28
+ .. image:: ../scripts/activation_images/ReLU6.png
29
+
30
+ Examples::
31
+
32
+ >>> m = nn.quantized.ReLU6()
33
+ >>> input = torch.randn(2)
34
+ >>> # xdoctest: +SKIP
35
+ >>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
36
+ >>> output = m(input)
37
+ """
38
+ def __init__(self, inplace=False):
39
+ super().__init__(inplace)
40
+ self.inplace = inplace
41
+
42
+ def forward(self, input):
43
+ return torch.ops.quantized.relu6(input, self.inplace)
44
+
45
+ def _get_name(self):
46
+ return 'QuantizedReLU6'
47
+
48
+ @staticmethod
49
+ def from_float(mod):
50
+ return ReLU6(mod.inplace)
51
+
52
+ class Hardswish(torch.nn.Hardswish):
53
+ r"""This is the quantized version of :class:`~torch.nn.Hardswish`.
54
+
55
+ Args:
56
+ scale: quantization scale of the output tensor
57
+ zero_point: quantization zero point of the output tensor
58
+ """
59
+ def __init__(self, scale, zero_point, device=None, dtype=None):
60
+ factory_kwargs = {'device': device, 'dtype': dtype}
61
+ super().__init__()
62
+ self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
63
+ self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
64
+
65
+ def forward(self, input):
66
+ return torch.ops.quantized.hardswish(input, self.scale, self.zero_point)
67
+
68
+ def _get_name(self):
69
+ return 'QuantizedHardswish'
70
+
71
+ @staticmethod
72
+ def from_float(mod):
73
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
74
+ return Hardswish(float(scale), int(zero_point))
75
+
76
+ @classmethod
77
+ def from_reference(cls, mod, scale, zero_point):
78
+ return cls(float(scale), int(zero_point))
79
+
80
+ class ELU(torch.nn.ELU):
81
+ r"""This is the quantized equivalent of :class:`~torch.nn.ELU`.
82
+
83
+ Args:
84
+ scale: quantization scale of the output tensor
85
+ zero_point: quantization zero point of the output tensor
86
+ alpha: the alpha constant
87
+ """
88
+ def __init__(self, scale, zero_point, alpha=1.):
89
+ super().__init__(alpha)
90
+ self.scale = scale
91
+ self.zero_point = zero_point
92
+
93
+ def forward(self, input):
94
+ return torch.ao.nn.quantized.functional.elu(
95
+ input, self.scale, self.zero_point, self.alpha)
96
+
97
+ def _get_name(self):
98
+ return 'QuantizedELU'
99
+
100
+ @staticmethod
101
+ def from_float(mod):
102
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
103
+ return ELU(float(scale), int(zero_point), mod.alpha)
104
+
105
+ @classmethod
106
+ def from_reference(cls, mod, scale, zero_point):
107
+ return cls(float(scale), int(zero_point), mod.alpha)
108
+
109
+ class LeakyReLU(torch.nn.LeakyReLU):
110
+ r"""This is the quantized equivalent of :class:`~torch.nn.LeakyReLU`.
111
+
112
+ Args:
113
+ scale: quantization scale of the output tensor
114
+ zero_point: quantization zero point of the output tensor
115
+ negative_slope: Controls the angle of the negative slope. Default: 1e-2
116
+ """
117
+ def __init__(self, scale: float, zero_point: int, negative_slope: float = 1e-2,
118
+ inplace: bool = False, device=None, dtype=None) -> None:
119
+ factory_kwargs = {'device': device, 'dtype': dtype}
120
+ super().__init__(negative_slope, inplace)
121
+ self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
122
+ self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
123
+
124
+ def forward(self, input):
125
+ return torch.ops.quantized.leaky_relu(
126
+ input, self.negative_slope, self.inplace, self.scale, self.zero_point)
127
+
128
+ def _get_name(self):
129
+ return 'QuantizedLeakyReLU'
130
+
131
+ @classmethod
132
+ def from_float(cls, mod):
133
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
134
+ return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
135
+
136
+ @classmethod
137
+ def from_reference(cls, mod, scale, zero_point):
138
+ return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
139
+
140
+ class Sigmoid(torch.nn.Sigmoid):
141
+ r"""This is the quantized equivalent of :class:`~torch.nn.Sigmoid`.
142
+
143
+ Args:
144
+ scale: quantization scale of the output tensor
145
+ zero_point: quantization zero point of the output tensor
146
+ """
147
+
148
+ def __init__(self, output_scale: float, output_zero_point: int):
149
+ super().__init__()
150
+ self.output_scale = output_scale
151
+ self.output_zero_point = output_zero_point
152
+
153
+ def forward(self, input):
154
+ return torch.ops.quantized.sigmoid(input, self.output_scale, self.output_zero_point)
155
+
156
+ @classmethod
157
+ def from_float(cls, mod):
158
+ output_scale, output_zero_point = mod.activation_post_process.calculate_qparams()
159
+ return cls(float(output_scale), int(output_zero_point))
160
+
161
+ class Softmax(torch.nn.Softmax):
162
+ r"""This is the quantized version of :class:`~torch.nn.Softmax`.
163
+
164
+ Args:
165
+ dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1).
166
+ scale: quantization scale of the output tensor
167
+ zero_point: quantization zero point of the output tensor
168
+ """
169
+ def __init__(self, dim=None, scale=1.0, zero_point=0):
170
+ super().__init__()
171
+ self.dim = dim
172
+ self.scale = scale
173
+ self.zero_point = zero_point
174
+
175
+ def forward(self, input):
176
+ dim = self.dim
177
+ if dim is None:
178
+ stacklevel = 3
179
+ # Note: adding the mypy ignore on _get_softmax_dim seems less bad
180
+ # than making `_get_softmax_dim` an official API.
181
+ dim = torch.nn.functional._get_softmax_dim( # type: ignore[attr-defined]
182
+ "softmax", input.dim(), stacklevel)
183
+ return torch.ops.quantized.softmax(
184
+ input, dim, self.scale, self.zero_point)
185
+
186
+ def _get_name(self):
187
+ return 'QuantizedSoftmax'
188
+
189
+ @staticmethod
190
+ def from_float(mod):
191
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
192
+ return Softmax(mod.dim, float(scale), int(zero_point))
193
+
194
+ @classmethod
195
+ def from_reference(cls, mod, scale, zero_point):
196
+ return cls(mod.dim, float(scale), int(zero_point))
197
+
198
+
199
+ class MultiheadAttention(torch.ao.nn.quantizable.MultiheadAttention):
200
+ _FLOAT_MODULE = torch.ao.nn.quantizable.MultiheadAttention
201
+
202
+ def _get_name(self):
203
+ return "QuantizedMultiheadAttention"
204
+
205
+ @classmethod
206
+ def from_float(cls, other):
207
+ # The whole flow is float -> observed -> quantized
208
+ # This class does observed -> quantized only
209
+ raise NotImplementedError("It looks like you are trying to convert a "
210
+ "non-observed MHA module. Please, see "
211
+ "the examples on quantizable MHAs.")
212
+
213
+ @classmethod
214
+ def from_observed(cls, other):
215
+ converted = torch.ao.quantization.convert(other, mapping=None,
216
+ inplace=False,
217
+ remove_qconfig=True,
218
+ convert_custom_config_dict=None)
219
+ converted.__class__ = cls
220
+ # Remove the parameters for the bias_k and bias_v to quantize them
221
+ # TODO: This is a potential source of accuracy drop.
222
+ # quantized cat takes the scale and zp of the first
223
+ # element, which might lose the precision in the bias_k
224
+ # and the bias_v (which are cat'ed with k/v being first).
225
+ if converted.bias_k is not None:
226
+ bias_k = converted._parameters.pop('bias_k')
227
+ sc, zp = torch._choose_qparams_per_tensor(bias_k,
228
+ reduce_range=False)
229
+ bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
230
+ setattr(converted, 'bias_k', bias_k) # noqa: B010
231
+
232
+ if converted.bias_v is not None:
233
+ bias_v = converted._parameters.pop('bias_v')
234
+ sc, zp = torch._choose_qparams_per_tensor(bias_k, # type: ignore[possibly-undefined]
235
+ reduce_range=False)
236
+ bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
237
+ setattr(converted, 'bias_v', bias_v) # noqa: B010
238
+
239
+ del converted.in_proj_weight
240
+ del converted.in_proj_bias
241
+
242
+ return converted
243
+
244
+ class PReLU(torch.nn.Module):
245
+ r"""This is the quantized equivalent of :class:`~torch.nn.PReLU`.
246
+
247
+ Args:
248
+ scale: quantization scale of the output tensor
249
+ zero_point: quantization zero point of the output tensor
250
+ num_parameters: number of parameters: 1, or the number of channels at input. Default: 1
251
+ """
252
+ def __init__(self, output_scale: float, output_zero_point: int,
253
+ num_parameters: int = 1) -> None:
254
+ super().__init__()
255
+ self.num_parameters = num_parameters
256
+ self.scale = output_scale
257
+ self.zero_point = output_zero_point
258
+ w = torch.randn(num_parameters, dtype=torch.float)
259
+ qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.quint8)
260
+ self.set_weight(qw)
261
+
262
+ def set_weight(self, w: torch.Tensor) -> None:
263
+ self.weight = w
264
+
265
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
266
+ return torch.ops.quantized.prelu(input, self.weight, self.scale, self.zero_point)
267
+
268
+ def _get_name(self):
269
+ return 'QuantizedPReLU'
270
+
271
+ @classmethod
272
+ def from_float(cls, mod):
273
+ scale, zero_point = mod.activation_post_process.calculate_qparams()
274
+ qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
275
+ float_wt = mod.weight.float()
276
+ observer = mod.qconfig.weight()
277
+ observer(float_wt)
278
+ if observer.dtype != torch.quint8:
279
+ warn(
280
+ f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}"
281
+ )
282
+ wt_scale, wt_zp = observer.calculate_qparams()
283
+ qweight = torch.quantize_per_tensor(
284
+ float_wt, float(wt_scale), int(wt_zp), torch.quint8)
285
+ qprelu.set_weight(qweight)
286
+ return qprelu
287
+
288
+ @classmethod
289
+ def from_reference(cls, mod, scale, zero_point):
290
+ qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
291
+ float_wt = mod.weight.float()
292
+ observer = mod.qconfig.weight()
293
+ observer(float_wt)
294
+ if observer.dtype != torch.quint8:
295
+ warn(
296
+ f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}"
297
+ )
298
+ wt_scale, wt_zp = observer.calculate_qparams()
299
+ qweight = torch.quantize_per_tensor(
300
+ float_wt, float(wt_scale), int(wt_zp), torch.quint8)
301
+ qprelu.set_weight(qweight)
302
+ return qprelu
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/batchnorm.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.ao.nn.intrinsic as nni
3
+
4
+ __all__ = [
5
+ "BatchNorm2d",
6
+ "BatchNorm3d"
7
+ ]
8
+
9
+ class _BatchNorm(torch.nn.modules.batchnorm._BatchNorm):
10
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
11
+ factory_kwargs = {'device': device, 'dtype': dtype}
12
+ super().__init__(num_features, eps, momentum, True, True, **factory_kwargs)
13
+ self.register_buffer('scale', torch.tensor(1.0, **factory_kwargs))
14
+ self.register_buffer('zero_point', torch.tensor(0, **factory_kwargs))
15
+
16
+ @staticmethod
17
+ def from_float(cls, mod):
18
+ activation_post_process = mod.activation_post_process
19
+ if type(mod) == cls._NNI_BN_RELU_MODULE:
20
+ mod = mod[0]
21
+ scale, zero_point = activation_post_process.calculate_qparams()
22
+ new_mod = cls(mod.num_features, mod.eps)
23
+ new_mod.weight = mod.weight
24
+ new_mod.bias = mod.bias
25
+ new_mod.running_mean = mod.running_mean
26
+ new_mod.running_var = mod.running_var
27
+ new_mod.scale = scale
28
+ new_mod.zero_point = zero_point
29
+ return new_mod
30
+
31
+ @classmethod
32
+ def from_reference(cls, bn, output_scale, output_zero_point):
33
+ qbn = cls(
34
+ bn.num_features,
35
+ bn.eps,
36
+ bn.momentum,
37
+ device=bn.weight.device,
38
+ dtype=bn.weight.dtype
39
+ )
40
+ qbn.weight = bn.weight
41
+ qbn.bias = bn.bias
42
+ qbn.running_mean = bn.running_mean
43
+ qbn.running_var = bn.running_var
44
+ qbn.scale = output_scale
45
+ qbn.zero_point = output_zero_point
46
+ return qbn
47
+
48
+ class BatchNorm2d(_BatchNorm):
49
+ r"""This is the quantized version of :class:`~torch.nn.BatchNorm2d`.
50
+ """
51
+
52
+ _NNI_BN_RELU_MODULE = nni.BNReLU2d
53
+
54
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
55
+ factory_kwargs = {'device': device, 'dtype': dtype}
56
+ super().__init__(num_features, eps, momentum, **factory_kwargs)
57
+
58
+ def _get_name(self):
59
+ return 'QuantizedBatchNorm2d'
60
+
61
+ def _check_input_dim(self, input):
62
+ # Temporarily using len(shape) instead of ndim due to JIT issue
63
+ # https://github.com/pytorch/pytorch/issues/23890
64
+ if len(input.shape) != 4:
65
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
66
+
67
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
68
+ # disabling this since this is not symbolically traceable
69
+ # self._check_input_dim(input)
70
+ return torch.ops.quantized.batch_norm2d(
71
+ input, self.weight, self.bias, self.running_mean,
72
+ self.running_var, self.eps, self.scale, self.zero_point)
73
+
74
+ @classmethod
75
+ def from_float(cls, mod):
76
+ return _BatchNorm.from_float(cls, mod)
77
+
78
+ class BatchNorm3d(_BatchNorm):
79
+ r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`.
80
+ """
81
+
82
+ _NNI_BN_RELU_MODULE = nni.BNReLU3d
83
+
84
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
85
+ factory_kwargs = {'device': device, 'dtype': dtype}
86
+ super().__init__(num_features, eps, momentum, **factory_kwargs)
87
+
88
+ def _get_name(self):
89
+ return 'QuantizedBatchNorm3d'
90
+
91
+ def _check_input_dim(self, input):
92
+ # Temporarily using len(shape) instead of ndim due to JIT issue
93
+ # https://github.com/pytorch/pytorch/issues/23890
94
+ if len(input.shape) != 5:
95
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
96
+
97
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
98
+ # disabling this since this is not symbolically traceable
99
+ # self._check_input_dim(input)
100
+ return torch.ops.quantized.batch_norm3d(
101
+ input, self.weight, self.bias, self.running_mean,
102
+ self.running_var, self.eps, self.scale, self.zero_point)
103
+
104
+ @classmethod
105
+ def from_float(cls, mod):
106
+ return _BatchNorm.from_float(cls, mod)
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/conv.py ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Quantized convolution modules."""
2
+
3
+ from typing import Optional, List, TypeVar
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ import torch.ao.nn.intrinsic as nni
9
+ import torch.ao.nn.intrinsic.qat as nniqat
10
+
11
+ from torch._ops import ops
12
+ from torch.nn.common_types import _size_1_t
13
+ from torch.nn.modules.utils import _single, _pair, _triple
14
+ from torch.nn.utils import fuse_conv_bn_weights
15
+
16
+ from .utils import _quantize_weight, WeightedQuantizedModule
17
+
18
+ __all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
19
+
20
+ _SUPPORTED_PADDING = {
21
+ 'zeros',
22
+ 'reflect'
23
+ }
24
+
25
+
26
+ def _reverse_repeat_padding(padding: List[int]) -> List[int]:
27
+ _reversed_padding_repeated_twice: List[int] = []
28
+ N = len(padding)
29
+ for idx in range(N):
30
+ for _ in range(2):
31
+ _reversed_padding_repeated_twice.append(padding[N - idx - 1])
32
+ return _reversed_padding_repeated_twice
33
+
34
+
35
+ class _ConvNd(WeightedQuantizedModule):
36
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
37
+ padding=0, dilation=1, groups=1, bias=True,
38
+ padding_mode='zeros', device=None, dtype=None):
39
+ # All subclasses have this signature - See PR #49702s
40
+ raise NotImplementedError
41
+
42
+ def _init(self, in_channels, out_channels, kernel_size, stride,
43
+ padding, dilation,
44
+ transposed, output_padding,
45
+ groups, bias,
46
+ padding_mode='zeros',
47
+ device=None,
48
+ dtype=None) -> None:
49
+ factory_kwargs = {'device': device, 'dtype': dtype}
50
+ super().__init__()
51
+
52
+ if in_channels % groups != 0:
53
+ raise ValueError('in_channels must be divisible by groups')
54
+ if out_channels % groups != 0:
55
+ raise ValueError('out_channels must be divisible by groups')
56
+ self.in_channels = in_channels
57
+ self.out_channels = out_channels
58
+ self.kernel_size = kernel_size
59
+ self.stride = stride
60
+ self.padding = padding
61
+ self.dilation = dilation
62
+ self.transposed = transposed
63
+ self.output_padding = output_padding
64
+ self.groups = groups
65
+ if padding_mode not in _SUPPORTED_PADDING:
66
+ raise ValueError(f"'padding_mode' {padding_mode} is not supported by quantized convolution")
67
+ self.padding_mode = padding_mode
68
+ # Initialize as NCHW. set_weight will internally transpose to NHWC.
69
+ if self.transposed:
70
+ weight_shape = [in_channels, out_channels // self.groups]
71
+ else:
72
+ weight_shape = [out_channels, in_channels // self.groups]
73
+ qweight = torch._empty_affine_quantized(
74
+ weight_shape + list(kernel_size),
75
+ scale=1, zero_point=0, dtype=torch.qint8,
76
+ **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
77
+ bias_float = (
78
+ torch.zeros(out_channels, dtype=torch.float,
79
+ **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)
80
+
81
+ self.set_weight_bias(qweight, bias_float)
82
+ self.scale = 1.0
83
+ self.zero_point = 0
84
+
85
+ def set_weight_bias(self, qweight, bias_float):
86
+ raise NotImplementedError
87
+
88
+ def bias(self):
89
+ raise NotImplementedError
90
+
91
+ def _weight_bias(self):
92
+ raise NotImplementedError
93
+
94
+ def extra_repr(self):
95
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
96
+ ', stride={stride}, scale={scale}, zero_point={zero_point}')
97
+ if self.padding != (0,) * len(self.padding):
98
+ s += ', padding={padding}'
99
+ if self.dilation != (1,) * len(self.dilation):
100
+ s += ', dilation={dilation}'
101
+ if self.output_padding != (0,) * len(self.output_padding):
102
+ s += ', output_padding={output_padding}'
103
+ if self.groups != 1:
104
+ s += ', groups={groups}'
105
+ if self.bias() is None:
106
+ s += ', bias=False'
107
+ return s.format(**self.__dict__)
108
+
109
+ # ===== Serialization methods =====
110
+ # The special consideration here is that we have to unpack the weights into
111
+ # their regular QTensor form for serialization. Packed weights should not
112
+ # live outside the process in which they were created, rather they should be
113
+ # derived from the QTensor weight.
114
+ # self
115
+ # |--- weight : Tensor
116
+ # |--- bias : Tensor
117
+ #
118
+ # TODO: maybe change to this when https://github.com/pytorch/pytorch/pull/32958 is landed
119
+ # self
120
+ # |--- _packed_params : Conv2dPackedParamsBase or Conv3dPackedParamsBase
121
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
122
+ super()._save_to_state_dict(destination, prefix, keep_vars)
123
+ (w, b) = self._weight_bias()
124
+ destination[prefix + 'weight'] = w
125
+ destination[prefix + 'bias'] = b
126
+ destination[prefix + 'scale'] = torch.tensor(self.scale)
127
+ destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
128
+
129
+ @torch.jit.export
130
+ def __getstate__(self):
131
+ (w, b) = self._weight_bias()
132
+ return (
133
+ self.in_channels,
134
+ self.out_channels,
135
+ self.kernel_size,
136
+ self.stride,
137
+ self.padding,
138
+ self.dilation,
139
+ self.transposed,
140
+ self.output_padding,
141
+ self.groups,
142
+ self.padding_mode,
143
+ w,
144
+ b,
145
+ self.scale,
146
+ self.zero_point,
147
+ self.training
148
+ )
149
+
150
+ # ===== Deserialization methods =====
151
+ # Counterpart to the serialization methods, we must pack the serialized
152
+ # QTensor weight into its packed format for use by the FBGEMM ops.
153
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
154
+ missing_keys, unexpected_keys, error_msgs):
155
+ self.set_weight_bias(
156
+ state_dict[prefix + 'weight'], state_dict[prefix + 'bias'])
157
+ state_dict.pop(prefix + 'weight')
158
+ state_dict.pop(prefix + 'bias')
159
+ self.scale = float(state_dict[prefix + 'scale'])
160
+ state_dict.pop(prefix + 'scale')
161
+ self.zero_point = int(state_dict[prefix + 'zero_point'])
162
+ state_dict.pop(prefix + 'zero_point')
163
+ super()._load_from_state_dict(
164
+ state_dict, prefix, local_metadata, False, missing_keys,
165
+ unexpected_keys, error_msgs)
166
+
167
+ @torch.jit.export
168
+ def __setstate__(self, state):
169
+ self.in_channels = state[0]
170
+ self.out_channels = state[1]
171
+ self.kernel_size = state[2]
172
+ self.stride = state[3]
173
+ self.padding = state[4]
174
+ self.dilation = state[5]
175
+ self.transposed = state[6]
176
+ self.output_padding = state[7]
177
+ self.groups = state[8]
178
+ self.padding_mode = state[9]
179
+ self.set_weight_bias(state[10], state[11])
180
+ self.scale = state[12]
181
+ self.zero_point = state[13]
182
+ self.training = state[14]
183
+
184
+ def __deepcopy__(self, memo):
185
+ new_instance = type(self).__new__(type(self))
186
+ torch.nn.Module.__init__(new_instance)
187
+ state = self.__getstate__()
188
+ new_instance.__setstate__(state)
189
+ return new_instance
190
+
191
+ def __copy__(self):
192
+ return self.__deepcopy__({})
193
+
194
+ @classmethod
195
+ def get_qconv(cls, mod, activation_post_process, weight_post_process=None):
196
+ r"""Creates a qconv object and returns it.
197
+ """
198
+ if weight_post_process is None:
199
+ weight_post_process = mod.qconfig.weight()
200
+ weight_post_process(mod.weight)
201
+ assert weight_post_process.dtype == torch.qint8, \
202
+ 'Weight observer must have a dtype of qint8'
203
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
204
+ # the __init__ call used is the one from derived classes and not the one from _ConvNd
205
+ qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
206
+ mod.stride, mod.padding, mod.dilation, mod.groups,
207
+ mod.bias is not None, mod.padding_mode)
208
+ qconv.set_weight_bias(qweight, mod.bias)
209
+ if activation_post_process is None or activation_post_process.dtype == torch.float:
210
+ return qconv # dynamic quantization doesn't need scale/zero_point
211
+ else:
212
+ act_scale, act_zp = activation_post_process.calculate_qparams()
213
+ qconv.scale = float(act_scale)
214
+ qconv.zero_point = int(act_zp)
215
+ return qconv
216
+
217
+ @staticmethod
218
+ def from_float(cls, mod):
219
+ if hasattr(mod, "weight_fake_quant"):
220
+ # assert type(mod) == cls.__QAT_MODULE, " nnq." + cls.__name__ + \
221
+ # ".from_float only works for " + cls.__QAT_MODULE.__name__
222
+ if type(mod) == cls._NNIQAT_CONV_BN_MODULE:
223
+ mod.weight, mod.bias = fuse_conv_bn_weights(
224
+ mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
225
+ mod.bn.eps, mod.bn.weight, mod.bn.bias)
226
+ assert hasattr(mod, "activation_post_process"), \
227
+ "Input QAT module must have observer attached"
228
+ weight_post_process = mod.weight_fake_quant
229
+ activation_post_process = mod.activation_post_process
230
+ else:
231
+ assert type(mod) == cls._FLOAT_MODULE, \
232
+ " nnq." + cls.__name__ + ".from_float only works for " + \
233
+ cls._FLOAT_MODULE.__name__ + " but got:" + str(type(mod))
234
+ assert hasattr(mod, "qconfig"), \
235
+ "Input float module must have qconfig defined."
236
+ activation_post_process = None if not hasattr(
237
+ mod, "activation_post_process") else mod.activation_post_process
238
+ if type(mod) in [cls._NNI_CONV_RELU_MODULE, cls._NNI_CONV_ADD_MODULE, cls._NNI_CONV_ADD_RELU_MODULE]:
239
+ mod = mod[0]
240
+ weight_post_process = mod.qconfig.weight()
241
+ return cls.get_qconv(mod, activation_post_process, weight_post_process)
242
+
243
+ @classmethod
244
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
245
+ r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
246
+ Args:
247
+ ref_qconv (Module): a reference quantized module, either produced by torch.ao.quantization
248
+ utilities or provided by the user
249
+ output_scale (float): scale for output Tensor
250
+ output_zero_point (int): zero point for output Tensor
251
+ """
252
+ qconv = cls(
253
+ ref_qconv.in_channels,
254
+ ref_qconv.out_channels,
255
+ ref_qconv.kernel_size, # type: ignore[arg-type]
256
+ ref_qconv.stride, # type: ignore[arg-type]
257
+ ref_qconv.padding, # type: ignore[arg-type]
258
+ ref_qconv.dilation, # type: ignore[arg-type]
259
+ ref_qconv.groups,
260
+ ref_qconv.bias is not None, # type: ignore[arg-type]
261
+ ref_qconv.padding_mode,
262
+ device=ref_qconv.weight.device,
263
+ dtype=ref_qconv.weight.dtype)
264
+ qweight = ref_qconv.get_quantized_weight()
265
+ qconv.set_weight_bias(qweight, ref_qconv.bias)
266
+ qconv.scale = float(output_scale)
267
+ qconv.zero_point = int(output_zero_point)
268
+ return qconv
269
+
270
+
271
+ class Conv1d(_ConvNd):
272
+ r"""Applies a 1D convolution over a quantized input signal composed of
273
+ several quantized input planes.
274
+
275
+ For details on input arguments, parameters, and implementation see
276
+ :class:`~torch.nn.Conv1d`.
277
+
278
+ .. note::
279
+ Only `zeros` is supported for the :attr:`padding_mode` argument.
280
+
281
+ .. note::
282
+ Only `torch.quint8` is supported for the input data type.
283
+
284
+
285
+ Attributes:
286
+ weight (Tensor): packed tensor derived from the learnable weight
287
+ parameter.
288
+ scale (Tensor): scalar for the output scale
289
+ zero_point (Tensor): scalar for the output zero point
290
+
291
+ See :class:`~torch.nn.Conv1d` for other attributes.
292
+
293
+ Examples::
294
+
295
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
296
+ >>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
297
+ >>> input = torch.randn(20, 16, 100)
298
+ >>> # quantize input to quint8
299
+ >>> # xdoctest: +SKIP
300
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
301
+ ... dtype=torch.quint8)
302
+ >>> output = m(q_input)
303
+
304
+ """
305
+
306
+ _FLOAT_MODULE = nn.Conv1d
307
+ _NNIQAT_CONV_BN_MODULE = nniqat.ConvBn1d
308
+ _NNI_CONV_RELU_MODULE = nni.ConvReLU1d
309
+ _NNI_CONV_ADD_MODULE: None = None
310
+ _NNI_CONV_ADD_RELU_MODULE: None = None
311
+
312
+ def __init__(self,
313
+ in_channels: int,
314
+ out_channels: int,
315
+ kernel_size: _size_1_t,
316
+ stride: _size_1_t = 1,
317
+ padding: _size_1_t = 0,
318
+ dilation: _size_1_t = 1,
319
+ groups: int = 1,
320
+ bias: bool = True,
321
+ padding_mode: str = 'zeros',
322
+ device=None,
323
+ dtype=None):
324
+ factory_kwargs = {'device': device, 'dtype': dtype}
325
+ kernel_size = _single(kernel_size)
326
+ stride = _single(stride)
327
+ padding = padding if isinstance(padding, str) else _single(padding)
328
+ dilation = _single(dilation)
329
+
330
+ # Subclasses of _ConvNd needs to call _init rather than __init__. See
331
+ # discussion on PR #49702
332
+ super()._init(
333
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
334
+ False, _single(0), groups, bias, padding_mode, **factory_kwargs)
335
+
336
+ def _get_name(self):
337
+ return 'QuantizedConv1d'
338
+
339
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
340
+ if self.padding_mode == 'zeros':
341
+ self._packed_params = torch.ops.quantized.conv1d_prepack(
342
+ w, b, self.stride, self.padding, self.dilation, self.groups)
343
+ else:
344
+ self._packed_params = torch.ops.quantized.conv1d_prepack(
345
+ w, b, self.stride, _pair(0), self.dilation,
346
+ self.groups)
347
+
348
+ def _weight_bias(self):
349
+ w, b = torch.ops.quantized.conv1d_unpack(self._packed_params)
350
+ return w, b
351
+
352
+ def weight(self):
353
+ return self._weight_bias()[0]
354
+
355
+ def bias(self):
356
+ return self._weight_bias()[1]
357
+
358
+ def forward(self, input):
359
+ # Temporarily using len(shape) instead of ndim due to JIT issue
360
+ # https://github.com/pytorch/pytorch/issues/23890
361
+ if len(input.shape) != 3:
362
+ raise ValueError("Input shape must be `(N, C, L)`!")
363
+ if self.padding_mode != 'zeros':
364
+ # Padding in Conv1d is stored as (p, p), need to get (p,)
365
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
366
+ input = F.pad(input, _reversed_padding_repeated_twice,
367
+ mode=self.padding_mode)
368
+ return ops.quantized.conv1d(input, self._packed_params, self.scale, self.zero_point)
369
+
370
+ @classmethod
371
+ def from_float(cls, mod):
372
+ r"""Creates a quantized module from a float module or qparams_dict.
373
+
374
+ Args:
375
+ mod (Module): a float module, either produced by torch.ao.quantization
376
+ utilities or provided by the user
377
+ """
378
+ return _ConvNd.from_float(cls, mod)
379
+
380
+
381
+ class Conv2d(_ConvNd):
382
+ r"""Applies a 2D convolution over a quantized input signal composed of
383
+ several quantized input planes.
384
+
385
+ For details on input arguments, parameters, and implementation see
386
+ :class:`~torch.nn.Conv2d`.
387
+
388
+ .. note::
389
+ Only `zeros` is supported for the :attr:`padding_mode` argument.
390
+
391
+ .. note::
392
+ Only `torch.quint8` is supported for the input data type.
393
+
394
+
395
+ Attributes:
396
+ weight (Tensor): packed tensor derived from the learnable weight
397
+ parameter.
398
+ scale (Tensor): scalar for the output scale
399
+ zero_point (Tensor): scalar for the output zero point
400
+
401
+ See :class:`~torch.nn.Conv2d` for other attributes.
402
+
403
+ Examples::
404
+
405
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
406
+ >>> # With square kernels and equal stride
407
+ >>> m = nn.quantized.Conv2d(16, 33, 3, stride=2)
408
+ >>> # non-square kernels and unequal stride and with padding
409
+ >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
410
+ >>> # non-square kernels and unequal stride and with padding and dilation
411
+ >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
412
+ >>> input = torch.randn(20, 16, 50, 100)
413
+ >>> # quantize input to quint8
414
+ >>> # xdoctest: +SKIP
415
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
416
+ >>> output = m(q_input)
417
+
418
+ """
419
+ _FLOAT_MODULE = nn.Conv2d
420
+ _NNIQAT_CONV_BN_MODULE = nniqat.ConvBn2d
421
+ _NNI_CONV_RELU_MODULE = nni.ConvReLU2d
422
+ _NNI_CONV_ADD_MODULE = nni.ConvAdd2d
423
+ _NNI_CONV_ADD_RELU_MODULE = nni.ConvAddReLU2d
424
+
425
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
426
+ padding=0, dilation=1, groups=1, bias=True,
427
+ padding_mode='zeros', device=None, dtype=None):
428
+ factory_kwargs = {'device': device, 'dtype': dtype}
429
+ kernel_size = _pair(kernel_size)
430
+ stride = _pair(stride)
431
+ padding = _pair(padding)
432
+ dilation = _pair(dilation)
433
+ # Subclasses of _ConvNd need to call _init rather than __init__. See
434
+ # discussion on PR #49702
435
+ super()._init(
436
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
437
+ False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
438
+
439
+ def _get_name(self):
440
+ return 'QuantizedConv2d'
441
+
442
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
443
+ if self.padding_mode == 'zeros':
444
+ self._packed_params = torch.ops.quantized.conv2d_prepack(
445
+ w, b, self.stride, self.padding, self.dilation, self.groups)
446
+ else:
447
+ self._packed_params = torch.ops.quantized.conv2d_prepack(
448
+ w, b, self.stride, _pair(0), self.dilation, self.groups)
449
+
450
+ def _weight_bias(self):
451
+ return self._packed_params.unpack()
452
+
453
+ def weight(self):
454
+ return self._weight_bias()[0]
455
+
456
+ def bias(self):
457
+ return self._weight_bias()[1]
458
+
459
+ def forward(self, input):
460
+ # Temporarily using len(shape) instead of ndim due to JIT issue
461
+ # https://github.com/pytorch/pytorch/issues/23890
462
+ if len(input.shape) != 4:
463
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
464
+ if self.padding_mode != 'zeros':
465
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
466
+ input = F.pad(input, _reversed_padding_repeated_twice,
467
+ mode=self.padding_mode)
468
+ return ops.quantized.conv2d(
469
+ input, self._packed_params, self.scale, self.zero_point)
470
+
471
+ @classmethod
472
+ def from_float(cls, mod):
473
+ r"""Creates a quantized module from a float module or qparams_dict.
474
+
475
+ Args:
476
+ mod (Module): a float module, either produced by torch.ao.quantization
477
+ utilities or provided by the user
478
+ """
479
+ return _ConvNd.from_float(cls, mod)
480
+
481
+
482
+ class Conv3d(_ConvNd):
483
+ r"""Applies a 3D convolution over a quantized input signal composed of
484
+ several quantized input planes.
485
+
486
+ For details on input arguments, parameters, and implementation see
487
+ :class:`~torch.nn.Conv3d`.
488
+
489
+ .. note::
490
+ Only `zeros` is supported for the :attr:`padding_mode` argument.
491
+
492
+ .. note::
493
+ Only `torch.quint8` is supported for the input data type.
494
+
495
+
496
+ Attributes:
497
+ weight (Tensor): packed tensor derived from the learnable weight
498
+ parameter.
499
+ scale (Tensor): scalar for the output scale
500
+ zero_point (Tensor): scalar for the output zero point
501
+
502
+ See :class:`~torch.nn.Conv3d` for other attributes.
503
+
504
+ Examples::
505
+
506
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
507
+ >>> # With square kernels and equal stride
508
+ >>> m = nn.quantized.Conv3d(16, 33, 3, stride=2)
509
+ >>> # non-square kernels and unequal stride and with padding
510
+ >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
511
+ >>> # non-square kernels and unequal stride and with padding and dilation
512
+ >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
513
+ >>> input = torch.randn(20, 16, 56, 56, 56)
514
+ >>> # quantize input to quint8
515
+ >>> # xdoctest: +SKIP
516
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
517
+ >>> output = m(q_input)
518
+
519
+ """
520
+ _FLOAT_MODULE = nn.Conv3d
521
+ _NNIQAT_CONV_BN_MODULE = nniqat.ConvBn3d
522
+ _NNI_CONV_RELU_MODULE = nni.ConvReLU3d
523
+ _NNI_CONV_ADD_MODULE: None = None
524
+ _NNI_CONV_ADD_RELU_MODULE: None = None
525
+
526
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
527
+ padding=0, dilation=1, groups=1, bias=True,
528
+ padding_mode='zeros', device=None, dtype=None):
529
+ assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
530
+ factory_kwargs = {'device': device, 'dtype': dtype}
531
+ kernel_size = _triple(kernel_size)
532
+ stride = _triple(stride)
533
+ padding = _triple(padding)
534
+ dilation = _triple(dilation)
535
+ # Subclasses of _ConvNd need to call _init rather than __init__. See
536
+ # discussion on PR #49702
537
+ super()._init(
538
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
539
+ False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
540
+
541
+ def _get_name(self):
542
+ return 'QuantizedConv3d'
543
+
544
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
545
+ if self.padding_mode == 'zeros':
546
+ self._packed_params = torch.ops.quantized.conv3d_prepack(
547
+ w, b, self.stride, self.padding, self.dilation, self.groups)
548
+ else:
549
+ self._packed_params = torch.ops.quantized.conv3d_prepack(
550
+ w, b, self.stride, _triple(0), self.dilation, self.groups)
551
+
552
+ def _weight_bias(self):
553
+ return self._packed_params.unpack()
554
+
555
+ def weight(self):
556
+ return self._weight_bias()[0]
557
+
558
+ def bias(self):
559
+ return self._weight_bias()[1]
560
+
561
+ def forward(self, input):
562
+ # Temporarily using len(shape) instead of ndim due to JIT issue
563
+ # https://github.com/pytorch/pytorch/issues/23890
564
+ if len(input.shape) != 5:
565
+ raise ValueError("Input shape must be `(N, C, D, H, W)`!")
566
+ if self.padding_mode != 'zeros':
567
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
568
+ input = F.pad(input, _reversed_padding_repeated_twice,
569
+ mode=self.padding_mode)
570
+ return ops.quantized.conv3d(
571
+ input, self._packed_params, self.scale, self.zero_point)
572
+
573
+ @classmethod
574
+ def from_float(cls, mod):
575
+ r"""Creates a quantized module from a float module or qparams_dict.
576
+
577
+ Args:
578
+ mod (Module): a float module, either produced by torch.ao.quantization
579
+ utilities or provided by the user
580
+ """
581
+ return _ConvNd.from_float(cls, mod)
582
+
583
+ # === Transposed Convolutions ===
584
+ MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
585
+
586
+
587
+ class _ConvTransposeNd(_ConvNd):
588
+
589
+ _FLOAT_MODULE = MOD
590
+
591
+ def __init__(self, in_channels, out_channels, kernel_size, stride,
592
+ padding, dilation, transposed, output_padding,
593
+ groups, bias, padding_mode, device=None, dtype=None):
594
+ if padding_mode != 'zeros':
595
+ raise ValueError(f'Only "zeros" padding mode is supported for {self.__class__.__name__}')
596
+ factory_kwargs = {'device': device, 'dtype': dtype}
597
+ # Subclasses of _ConvNd need to call _init rather than __init__. See
598
+ # discussion on PR #49702
599
+ super()._init(
600
+ in_channels, out_channels, kernel_size, stride,
601
+ padding, dilation, transposed, output_padding,
602
+ groups, bias, padding_mode, **factory_kwargs)
603
+
604
+ def _input_padding(self, kernel_size: List[int], dilation: List[int], padding: List[int]) -> List[int]:
605
+ res = torch.jit.annotate(List[int], [])
606
+ for kdx in range(len(kernel_size)):
607
+ pad = (dilation[kdx] * (kernel_size[kdx] - 1) - padding[kdx])
608
+ res.append(pad)
609
+ return res
610
+
611
+ @classmethod
612
+ def from_float(cls, mod):
613
+ r"""Creates a quantized module from a float module or qparams_dict.
614
+ Args:
615
+ mod (Module): a float module, either produced by torch.ao.quantization
616
+ utilities or provided by the user
617
+ """
618
+ # derived classes override cls._FLOAT_MODULE attribute
619
+ msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + \
620
+ cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
621
+ assert type(mod) == cls._FLOAT_MODULE, msg
622
+ assert hasattr(mod, 'qconfig'), \
623
+ 'Input float module must have qconfig defined.'
624
+ weight_post_process = mod.qconfig.weight()
625
+ weight_post_process(mod.weight)
626
+ assert weight_post_process.dtype == torch.qint8, \
627
+ 'Weight observer must have a dtype of qint8'
628
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
629
+ # the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
630
+ qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, # type: ignore[call-arg]
631
+ mod.stride, mod.padding, mod.output_padding, mod.groups,
632
+ mod.bias is not None, mod.dilation, mod.padding_mode)
633
+ qconv.set_weight_bias(qweight, mod.bias)
634
+ if not hasattr(mod, "activation_post_process") or mod.activation_post_process.dtype == torch.float:
635
+ return qconv # dynamic quantization doesn't need scale/zero_point
636
+ else:
637
+ act_scale, act_zp = mod.activation_post_process.calculate_qparams()
638
+ qconv.scale = float(act_scale)
639
+ qconv.zero_point = int(act_zp)
640
+ return qconv
641
+
642
+ @staticmethod
643
+ def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
644
+ r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
645
+ Args:
646
+ ref_qconvt (Module): a reference quantized module, either produced by torch.ao.quantization
647
+ utilities or provided by the user
648
+ output_scale (float): scale for output Tensor
649
+ output_zero_point (int): zero point for output Tensor
650
+ """
651
+ qconv = cls(
652
+ ref_qconvt.in_channels,
653
+ ref_qconvt.out_channels,
654
+ ref_qconvt.kernel_size, # type: ignore[arg-type]
655
+ ref_qconvt.stride, # type: ignore[arg-type]
656
+ ref_qconvt.padding, # type: ignore[arg-type]
657
+ ref_qconvt.output_padding, # type: ignore[arg-type]
658
+ ref_qconvt.groups,
659
+ ref_qconvt.bias is not None, # type: ignore[arg-type]
660
+ ref_qconvt.dilation, # type: ignore[arg-type]
661
+ ref_qconvt.padding_mode,
662
+ device=ref_qconvt.weight.device,
663
+ dtype=ref_qconvt.weight.dtype)
664
+ qweight = ref_qconvt.get_quantized_weight()
665
+ qconv.set_weight_bias(qweight, ref_qconvt.bias)
666
+ qconv.scale = float(output_scale)
667
+ qconv.zero_point = int(output_zero_point)
668
+ return qconv
669
+
670
+
671
+ class ConvTranspose1d(_ConvTransposeNd):
672
+ r"""Applies a 1D transposed convolution operator over an input image
673
+ composed of several input planes.
674
+ For details on input arguments, parameters, and implementation see
675
+ :class:`~torch.nn.ConvTranspose1d`.
676
+
677
+ .. note:: Currently only the QNNPACK engine is implemented.
678
+ Please, set the `torch.backends.quantized.engine = 'qnnpack'`
679
+
680
+ For special notes, please, see :class:`~torch.ao.nn.quantized.Conv1d`
681
+
682
+ Attributes:
683
+ weight (Tensor): packed tensor derived from the learnable weight
684
+ parameter.
685
+ scale (Tensor): scalar for the output scale
686
+ zero_point (Tensor): scalar for the output zero point
687
+ See :class:`~torch.nn.ConvTranspose2d` for other attributes.
688
+
689
+ Examples::
690
+
691
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
692
+ >>> torch.backends.quantized.engine = 'qnnpack'
693
+ >>> from torch.ao.nn import quantized as nnq
694
+ >>> # With square kernels and equal stride
695
+ >>> m = nnq.ConvTranspose1d(16, 33, 3, stride=2)
696
+ >>> # non-square kernels and unequal stride and with padding
697
+ >>> m = nnq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
698
+ >>> input = torch.randn(20, 16, 50)
699
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
700
+ >>> output = m(q_input)
701
+ >>> # exact output size can be also specified as an argument
702
+ >>> input = torch.randn(1, 16, 12)
703
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
704
+ >>> downsample = nnq.Conv1d(16, 16, 3, stride=2, padding=1)
705
+ >>> upsample = nnq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
706
+ >>> h = downsample(q_input)
707
+ >>> h.size()
708
+ torch.Size([1, 16, 6])
709
+ >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
710
+ >>> output = upsample(h, output_size=input.size())
711
+ >>> output.size()
712
+ torch.Size([1, 16, 12])
713
+ """
714
+
715
+ _FLOAT_MODULE = nn.ConvTranspose1d
716
+
717
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
718
+ padding=0, output_padding=0, groups=1, bias=True,
719
+ dilation=1, padding_mode='zeros', device=None, dtype=None):
720
+ factory_kwargs = {'device': device, 'dtype': dtype}
721
+ kernel_size = _single(kernel_size)
722
+ stride = _single(stride)
723
+ padding = _single(padding)
724
+ dilation = _single(dilation)
725
+ output_padding = _single(output_padding)
726
+
727
+ super().__init__(
728
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
729
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
730
+
731
+ def _get_name(self):
732
+ return 'QuantizedConvTranspose1d'
733
+
734
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
735
+ self._packed_params = torch.ops.quantized.conv_transpose1d_prepack(
736
+ w, b, self.stride, self.padding, self.output_padding, self.dilation,
737
+ self.groups)
738
+
739
+ def _weight_bias(self):
740
+ w, b = torch.ops.quantized.conv_transpose1d_unpack(self._packed_params)
741
+ return w, b
742
+
743
+ def weight(self):
744
+ (w, _) = self._weight_bias()
745
+ return w
746
+
747
+ def bias(self):
748
+ (_, b) = self._weight_bias()
749
+ return b
750
+
751
+ def forward(self, input):
752
+ # Temporarily using len(shape) instead of ndim due to JIT issue
753
+ # https://github.com/pytorch/pytorch/issues/23890
754
+ if len(input.shape) != 3:
755
+ raise ValueError("Input shape must be `(N, C, L)`!")
756
+ return torch.ops.quantized.conv_transpose1d(
757
+ input, self._packed_params, self.scale, self.zero_point)
758
+
759
+ @classmethod
760
+ def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
761
+ return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
762
+
763
+
764
+ class ConvTranspose2d(_ConvTransposeNd):
765
+ r"""Applies a 2D transposed convolution operator over an input image
766
+ composed of several input planes.
767
+ For details on input arguments, parameters, and implementation see
768
+ :class:`~torch.nn.ConvTranspose2d`.
769
+
770
+ For special notes, please, see :class:`~torch.ao.nn.quantized.Conv2d`
771
+
772
+ Attributes:
773
+ weight (Tensor): packed tensor derived from the learnable weight
774
+ parameter.
775
+ scale (Tensor): scalar for the output scale
776
+ zero_point (Tensor): scalar for the output zero point
777
+ See :class:`~torch.nn.ConvTranspose2d` for other attributes.
778
+
779
+ Examples::
780
+
781
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
782
+ >>> # QNNPACK or FBGEMM as backend
783
+ >>> torch.backends.quantized.engine = 'qnnpack'
784
+ >>> # With square kernels and equal stride
785
+ >>> import torch.ao.nn.quantized as nnq
786
+ >>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
787
+ >>> # non-square kernels and unequal stride and with padding
788
+ >>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
789
+ >>> input = torch.randn(20, 16, 50, 100)
790
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
791
+ >>> output = m(q_input)
792
+ >>> # exact output size can be also specified as an argument
793
+ >>> input = torch.randn(1, 16, 12, 12)
794
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
795
+ >>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
796
+ >>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
797
+ >>> h = downsample(q_input)
798
+ >>> h.size()
799
+ torch.Size([1, 16, 6, 6])
800
+ >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
801
+ >>> output = upsample(h, output_size=input.size())
802
+ >>> output.size()
803
+ torch.Size([1, 16, 12, 12])
804
+ """
805
+
806
+ _FLOAT_MODULE = nn.ConvTranspose2d
807
+
808
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
809
+ padding=0, output_padding=0, groups=1, bias=True,
810
+ dilation=1, padding_mode='zeros', device=None, dtype=None):
811
+ factory_kwargs = {'device': device, 'dtype': dtype}
812
+ kernel_size = _pair(kernel_size)
813
+ stride = _pair(stride)
814
+ padding = _pair(padding)
815
+ dilation = _pair(dilation)
816
+ output_padding = _pair(output_padding)
817
+
818
+ super().__init__(
819
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
820
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
821
+
822
+ def _get_name(self):
823
+ return 'QuantizedConvTranspose2d'
824
+
825
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
826
+ self._packed_params = torch.ops.quantized.conv_transpose2d_prepack(
827
+ w, b, self.stride, self.padding, self.output_padding, self.dilation,
828
+ self.groups)
829
+
830
+ def _weight_bias(self):
831
+ w, b = torch.ops.quantized.conv2d_unpack(self._packed_params)
832
+ return w, b
833
+
834
+ def weight(self):
835
+ (w, _) = self._weight_bias()
836
+ return w
837
+
838
+ def bias(self):
839
+ (_, b) = self._weight_bias()
840
+ return b
841
+
842
+ def forward(self, input):
843
+ # Temporarily using len(shape) instead of ndim due to JIT issue
844
+ # https://github.com/pytorch/pytorch/issues/23890
845
+ if len(input.shape) != 4:
846
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
847
+ return ops.quantized.conv_transpose2d(
848
+ input, self._packed_params, self.scale, self.zero_point)
849
+
850
+ @classmethod
851
+ def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
852
+ return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
853
+
854
+
855
+ class ConvTranspose3d(_ConvTransposeNd):
856
+ r"""Applies a 3D transposed convolution operator over an input image
857
+ composed of several input planes.
858
+ For details on input arguments, parameters, and implementation see
859
+ :class:`~torch.nn.ConvTranspose3d`.
860
+
861
+ .. note:: Currently only the FBGEMM engine is implemented.
862
+ Please, set the `torch.backends.quantized.engine = 'fbgemm'`
863
+
864
+ For special notes, please, see :class:`~torch.ao.nn.quantized.Conv3d`
865
+
866
+ Attributes:
867
+ weight (Tensor): packed tensor derived from the learnable weight
868
+ parameter.
869
+ scale (Tensor): scalar for the output scale
870
+ zero_point (Tensor): scalar for the output zero point
871
+ See :class:`~torch.nn.ConvTranspose3d` for other attributes.
872
+
873
+ Examples::
874
+
875
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
876
+ >>> torch.backends.quantized.engine = 'fbgemm'
877
+ >>> from torch.ao.nn import quantized as nnq
878
+ >>> # With cubic kernels and equal stride
879
+ >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
880
+ >>> # non-cubic kernels and unequal stride and with padding
881
+ >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
882
+ >>> input = torch.randn(20, 16, 50, 100, 100)
883
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
884
+ >>> output = m(q_input)
885
+ >>> # exact output size can be also specified as an argument
886
+ >>> input = torch.randn(1, 16, 12, 12, 12)
887
+ >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
888
+ >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
889
+ >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
890
+ >>> h = downsample(q_input)
891
+ >>> h.size()
892
+ torch.Size([1, 16, 6, 6, 6])
893
+ >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
894
+ >>> output = upsample(h, output_size=input.size())
895
+ >>> output.size()
896
+ torch.Size([1, 16, 12, 12, 12])
897
+ """
898
+
899
+ _FLOAT_MODULE = nn.ConvTranspose3d
900
+
901
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
902
+ padding=0, output_padding=0, groups=1, bias=True,
903
+ dilation=1, padding_mode='zeros', device=None, dtype=None):
904
+ factory_kwargs = {'device': device, 'dtype': dtype}
905
+ kernel_size = _triple(kernel_size)
906
+ stride = _triple(stride)
907
+ padding = _triple(padding)
908
+ dilation = _triple(dilation)
909
+ output_padding = _triple(output_padding)
910
+
911
+ super().__init__(
912
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
913
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
914
+
915
+ def _get_name(self):
916
+ return 'QuantizedConvTranspose3d'
917
+
918
+ def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
919
+ self._packed_params = torch.ops.quantized.conv_transpose3d_prepack(
920
+ w, b, self.stride, self.padding, self.output_padding, self.dilation,
921
+ self.groups)
922
+
923
+ def _weight_bias(self):
924
+ w, b = torch.ops.quantized.conv3d_unpack(self._packed_params)
925
+ return w, b
926
+
927
+ def weight(self):
928
+ (w, _) = self._weight_bias()
929
+ return w
930
+
931
+ def bias(self):
932
+ (_, b) = self._weight_bias()
933
+ return b
934
+
935
+ def forward(self, input):
936
+ # Temporarily using len(shape) instead of ndim due to JIT issue
937
+ # https://github.com/pytorch/pytorch/issues/23890
938
+ if len(input.shape) != 5:
939
+ raise ValueError("Input shape must be `(N, C, T, H, W)`!")
940
+ return ops.quantized.conv_transpose3d(
941
+ input, self._packed_params, self.scale, self.zero_point)
942
+
943
+ @classmethod
944
+ def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
945
+ return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/dropout.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ __all__ = ['Dropout']
4
+
5
+ class Dropout(torch.nn.Dropout):
6
+ r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
7
+ And this is a placeholder to enable models where fp32 tensors
8
+ had dropout to work with quantized tensors in train and eval mode.
9
+
10
+ Args:
11
+ p: probability of an element to be zeroed
12
+ inplace: can optionally do the operation in-place. Default: ``False``
13
+ """
14
+
15
+ def forward(self, input):
16
+ return input
17
+
18
+ def _get_name(self):
19
+ return 'QuantizedDropout'
20
+
21
+ @classmethod
22
+ def from_float(cls, mod):
23
+ return cls(mod.p, mod.inplace)
24
+
25
+ @classmethod
26
+ def from_reference(cls, mod, scale, zero_point):
27
+ return cls(mod.p, mod.inplace)