Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/aiosignal/__init__.pyi +12 -0
- venv/lib/python3.10/site-packages/aiosignal/py.typed +0 -0
- venv/lib/python3.10/site-packages/async_timeout/__init__.py +239 -0
- venv/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/async_timeout/py.typed +1 -0
- venv/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py +36 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py +38 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py +160 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py +1 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py +31 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py +825 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py +171 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py +48 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py +14 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py +1 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py +6 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py +55 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py +17 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py +82 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py +93 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py +175 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py +177 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py +1 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py +19 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd6688987b67a17f3b22fbe54d21537ec5956b9fbcf000f0dcb3b443928a62ec
|
3 |
+
size 50332749
|
ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0cf9ec8cb077965574f8a0c74a776c4e7dfad04731e3775cb96a763422e4ffc
|
3 |
+
size 50332828
|
ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af4d7613c97ad363690eda7b2c856485ae16911360834eba6761ee4b8eeb11fa
|
3 |
+
size 50332843
|
venv/lib/python3.10/site-packages/aiosignal/__init__.pyi
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Generic, TypeVar
|
2 |
+
|
3 |
+
from frozenlist import FrozenList
|
4 |
+
|
5 |
+
__all__ = ("Signal",)
|
6 |
+
|
7 |
+
_T = TypeVar("_T")
|
8 |
+
|
9 |
+
class Signal(FrozenList[_T], Generic[_T]):
|
10 |
+
def __init__(self, owner: Any) -> None: ...
|
11 |
+
def __repr__(self) -> str: ...
|
12 |
+
async def send(self, *args: Any, **kwargs: Any) -> None: ...
|
venv/lib/python3.10/site-packages/aiosignal/py.typed
ADDED
File without changes
|
venv/lib/python3.10/site-packages/async_timeout/__init__.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import enum
|
3 |
+
import sys
|
4 |
+
import warnings
|
5 |
+
from types import TracebackType
|
6 |
+
from typing import Optional, Type
|
7 |
+
|
8 |
+
|
9 |
+
if sys.version_info >= (3, 8):
|
10 |
+
from typing import final
|
11 |
+
else:
|
12 |
+
from typing_extensions import final
|
13 |
+
|
14 |
+
|
15 |
+
if sys.version_info >= (3, 11):
|
16 |
+
|
17 |
+
def _uncancel_task(task: "asyncio.Task[object]") -> None:
|
18 |
+
task.uncancel()
|
19 |
+
|
20 |
+
else:
|
21 |
+
|
22 |
+
def _uncancel_task(task: "asyncio.Task[object]") -> None:
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
__version__ = "4.0.3"
|
27 |
+
|
28 |
+
|
29 |
+
__all__ = ("timeout", "timeout_at", "Timeout")
|
30 |
+
|
31 |
+
|
32 |
+
def timeout(delay: Optional[float]) -> "Timeout":
|
33 |
+
"""timeout context manager.
|
34 |
+
|
35 |
+
Useful in cases when you want to apply timeout logic around block
|
36 |
+
of code or in cases when asyncio.wait_for is not suitable. For example:
|
37 |
+
|
38 |
+
>>> async with timeout(0.001):
|
39 |
+
... async with aiohttp.get('https://github.com') as r:
|
40 |
+
... await r.text()
|
41 |
+
|
42 |
+
|
43 |
+
delay - value in seconds or None to disable timeout logic
|
44 |
+
"""
|
45 |
+
loop = asyncio.get_running_loop()
|
46 |
+
if delay is not None:
|
47 |
+
deadline = loop.time() + delay # type: Optional[float]
|
48 |
+
else:
|
49 |
+
deadline = None
|
50 |
+
return Timeout(deadline, loop)
|
51 |
+
|
52 |
+
|
53 |
+
def timeout_at(deadline: Optional[float]) -> "Timeout":
|
54 |
+
"""Schedule the timeout at absolute time.
|
55 |
+
|
56 |
+
deadline argument points on the time in the same clock system
|
57 |
+
as loop.time().
|
58 |
+
|
59 |
+
Please note: it is not POSIX time but a time with
|
60 |
+
undefined starting base, e.g. the time of the system power on.
|
61 |
+
|
62 |
+
>>> async with timeout_at(loop.time() + 10):
|
63 |
+
... async with aiohttp.get('https://github.com') as r:
|
64 |
+
... await r.text()
|
65 |
+
|
66 |
+
|
67 |
+
"""
|
68 |
+
loop = asyncio.get_running_loop()
|
69 |
+
return Timeout(deadline, loop)
|
70 |
+
|
71 |
+
|
72 |
+
class _State(enum.Enum):
|
73 |
+
INIT = "INIT"
|
74 |
+
ENTER = "ENTER"
|
75 |
+
TIMEOUT = "TIMEOUT"
|
76 |
+
EXIT = "EXIT"
|
77 |
+
|
78 |
+
|
79 |
+
@final
|
80 |
+
class Timeout:
|
81 |
+
# Internal class, please don't instantiate it directly
|
82 |
+
# Use timeout() and timeout_at() public factories instead.
|
83 |
+
#
|
84 |
+
# Implementation note: `async with timeout()` is preferred
|
85 |
+
# over `with timeout()`.
|
86 |
+
# While technically the Timeout class implementation
|
87 |
+
# doesn't need to be async at all,
|
88 |
+
# the `async with` statement explicitly points that
|
89 |
+
# the context manager should be used from async function context.
|
90 |
+
#
|
91 |
+
# This design allows to avoid many silly misusages.
|
92 |
+
#
|
93 |
+
# TimeoutError is raised immediately when scheduled
|
94 |
+
# if the deadline is passed.
|
95 |
+
# The purpose is to time out as soon as possible
|
96 |
+
# without waiting for the next await expression.
|
97 |
+
|
98 |
+
__slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task")
|
99 |
+
|
100 |
+
def __init__(
|
101 |
+
self, deadline: Optional[float], loop: asyncio.AbstractEventLoop
|
102 |
+
) -> None:
|
103 |
+
self._loop = loop
|
104 |
+
self._state = _State.INIT
|
105 |
+
|
106 |
+
self._task: Optional["asyncio.Task[object]"] = None
|
107 |
+
self._timeout_handler = None # type: Optional[asyncio.Handle]
|
108 |
+
if deadline is None:
|
109 |
+
self._deadline = None # type: Optional[float]
|
110 |
+
else:
|
111 |
+
self.update(deadline)
|
112 |
+
|
113 |
+
def __enter__(self) -> "Timeout":
|
114 |
+
warnings.warn(
|
115 |
+
"with timeout() is deprecated, use async with timeout() instead",
|
116 |
+
DeprecationWarning,
|
117 |
+
stacklevel=2,
|
118 |
+
)
|
119 |
+
self._do_enter()
|
120 |
+
return self
|
121 |
+
|
122 |
+
def __exit__(
|
123 |
+
self,
|
124 |
+
exc_type: Optional[Type[BaseException]],
|
125 |
+
exc_val: Optional[BaseException],
|
126 |
+
exc_tb: Optional[TracebackType],
|
127 |
+
) -> Optional[bool]:
|
128 |
+
self._do_exit(exc_type)
|
129 |
+
return None
|
130 |
+
|
131 |
+
async def __aenter__(self) -> "Timeout":
|
132 |
+
self._do_enter()
|
133 |
+
return self
|
134 |
+
|
135 |
+
async def __aexit__(
|
136 |
+
self,
|
137 |
+
exc_type: Optional[Type[BaseException]],
|
138 |
+
exc_val: Optional[BaseException],
|
139 |
+
exc_tb: Optional[TracebackType],
|
140 |
+
) -> Optional[bool]:
|
141 |
+
self._do_exit(exc_type)
|
142 |
+
return None
|
143 |
+
|
144 |
+
@property
|
145 |
+
def expired(self) -> bool:
|
146 |
+
"""Is timeout expired during execution?"""
|
147 |
+
return self._state == _State.TIMEOUT
|
148 |
+
|
149 |
+
@property
|
150 |
+
def deadline(self) -> Optional[float]:
|
151 |
+
return self._deadline
|
152 |
+
|
153 |
+
def reject(self) -> None:
|
154 |
+
"""Reject scheduled timeout if any."""
|
155 |
+
# cancel is maybe better name but
|
156 |
+
# task.cancel() raises CancelledError in asyncio world.
|
157 |
+
if self._state not in (_State.INIT, _State.ENTER):
|
158 |
+
raise RuntimeError(f"invalid state {self._state.value}")
|
159 |
+
self._reject()
|
160 |
+
|
161 |
+
def _reject(self) -> None:
|
162 |
+
self._task = None
|
163 |
+
if self._timeout_handler is not None:
|
164 |
+
self._timeout_handler.cancel()
|
165 |
+
self._timeout_handler = None
|
166 |
+
|
167 |
+
def shift(self, delay: float) -> None:
|
168 |
+
"""Advance timeout on delay seconds.
|
169 |
+
|
170 |
+
The delay can be negative.
|
171 |
+
|
172 |
+
Raise RuntimeError if shift is called when deadline is not scheduled
|
173 |
+
"""
|
174 |
+
deadline = self._deadline
|
175 |
+
if deadline is None:
|
176 |
+
raise RuntimeError("cannot shift timeout if deadline is not scheduled")
|
177 |
+
self.update(deadline + delay)
|
178 |
+
|
179 |
+
def update(self, deadline: float) -> None:
|
180 |
+
"""Set deadline to absolute value.
|
181 |
+
|
182 |
+
deadline argument points on the time in the same clock system
|
183 |
+
as loop.time().
|
184 |
+
|
185 |
+
If new deadline is in the past the timeout is raised immediately.
|
186 |
+
|
187 |
+
Please note: it is not POSIX time but a time with
|
188 |
+
undefined starting base, e.g. the time of the system power on.
|
189 |
+
"""
|
190 |
+
if self._state == _State.EXIT:
|
191 |
+
raise RuntimeError("cannot reschedule after exit from context manager")
|
192 |
+
if self._state == _State.TIMEOUT:
|
193 |
+
raise RuntimeError("cannot reschedule expired timeout")
|
194 |
+
if self._timeout_handler is not None:
|
195 |
+
self._timeout_handler.cancel()
|
196 |
+
self._deadline = deadline
|
197 |
+
if self._state != _State.INIT:
|
198 |
+
self._reschedule()
|
199 |
+
|
200 |
+
def _reschedule(self) -> None:
|
201 |
+
assert self._state == _State.ENTER
|
202 |
+
deadline = self._deadline
|
203 |
+
if deadline is None:
|
204 |
+
return
|
205 |
+
|
206 |
+
now = self._loop.time()
|
207 |
+
if self._timeout_handler is not None:
|
208 |
+
self._timeout_handler.cancel()
|
209 |
+
|
210 |
+
self._task = asyncio.current_task()
|
211 |
+
if deadline <= now:
|
212 |
+
self._timeout_handler = self._loop.call_soon(self._on_timeout)
|
213 |
+
else:
|
214 |
+
self._timeout_handler = self._loop.call_at(deadline, self._on_timeout)
|
215 |
+
|
216 |
+
def _do_enter(self) -> None:
|
217 |
+
if self._state != _State.INIT:
|
218 |
+
raise RuntimeError(f"invalid state {self._state.value}")
|
219 |
+
self._state = _State.ENTER
|
220 |
+
self._reschedule()
|
221 |
+
|
222 |
+
def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None:
|
223 |
+
if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT:
|
224 |
+
assert self._task is not None
|
225 |
+
_uncancel_task(self._task)
|
226 |
+
self._timeout_handler = None
|
227 |
+
self._task = None
|
228 |
+
raise asyncio.TimeoutError
|
229 |
+
# timeout has not expired
|
230 |
+
self._state = _State.EXIT
|
231 |
+
self._reject()
|
232 |
+
return None
|
233 |
+
|
234 |
+
def _on_timeout(self) -> None:
|
235 |
+
assert self._task is not None
|
236 |
+
self._task.cancel()
|
237 |
+
self._state = _State.TIMEOUT
|
238 |
+
# drop the reference early
|
239 |
+
self._timeout_handler = None
|
venv/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (6.57 kB). View file
|
|
venv/lib/python3.10/site-packages/async_timeout/py.typed
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Placeholder
|
venv/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (482 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (502 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
2 |
+
from .modules.fused import _FusedModule # noqa: F403
|
3 |
+
|
4 |
+
# # Subpackages
|
5 |
+
# from . import qat # noqa: F403
|
6 |
+
# from . import quantized # noqa: F403
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
'ConvBn1d',
|
10 |
+
'ConvBn2d',
|
11 |
+
'ConvBn3d',
|
12 |
+
'ConvBnReLU1d',
|
13 |
+
'ConvBnReLU2d',
|
14 |
+
'ConvBnReLU3d',
|
15 |
+
'ConvReLU1d',
|
16 |
+
'ConvReLU2d',
|
17 |
+
'ConvReLU3d',
|
18 |
+
'LinearReLU',
|
19 |
+
'BNReLU2d',
|
20 |
+
'BNReLU3d',
|
21 |
+
'LinearBn1d',
|
22 |
+
'LinearLeakyReLU',
|
23 |
+
'LinearTanh',
|
24 |
+
'ConvAdd2d',
|
25 |
+
'ConvAddReLU2d',
|
26 |
+
]
|
27 |
+
|
28 |
+
# We are exposing all subpackages to the end-user.
|
29 |
+
# Because of possible inter-dependency, we want to avoid
|
30 |
+
# the cyclic imports, thus implementing lazy version
|
31 |
+
# as per https://peps.python.org/pep-0562/
|
32 |
+
def __getattr__(name):
|
33 |
+
if name in __all__:
|
34 |
+
import importlib
|
35 |
+
return importlib.import_module("." + name, __name__)
|
36 |
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (750 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .fused import _FusedModule # noqa: F401
|
2 |
+
from .fused import ConvBn1d
|
3 |
+
from .fused import ConvBn2d
|
4 |
+
from .fused import ConvBn3d
|
5 |
+
from .fused import ConvBnReLU1d
|
6 |
+
from .fused import ConvBnReLU2d
|
7 |
+
from .fused import ConvBnReLU3d
|
8 |
+
from .fused import ConvReLU1d
|
9 |
+
from .fused import ConvReLU2d
|
10 |
+
from .fused import ConvReLU3d
|
11 |
+
from .fused import LinearReLU
|
12 |
+
from .fused import BNReLU2d
|
13 |
+
from .fused import BNReLU3d
|
14 |
+
from .fused import LinearBn1d
|
15 |
+
from .fused import LinearLeakyReLU
|
16 |
+
from .fused import LinearTanh
|
17 |
+
from .fused import ConvAdd2d
|
18 |
+
from .fused import ConvAddReLU2d
|
19 |
+
|
20 |
+
__all__ = [
|
21 |
+
'ConvBn1d',
|
22 |
+
'ConvBn2d',
|
23 |
+
'ConvBn3d',
|
24 |
+
'ConvBnReLU1d',
|
25 |
+
'ConvBnReLU2d',
|
26 |
+
'ConvBnReLU3d',
|
27 |
+
'ConvReLU1d',
|
28 |
+
'ConvReLU2d',
|
29 |
+
'ConvReLU3d',
|
30 |
+
'LinearReLU',
|
31 |
+
'BNReLU2d',
|
32 |
+
'BNReLU3d',
|
33 |
+
'LinearBn1d',
|
34 |
+
'LinearLeakyReLU',
|
35 |
+
'LinearTanh',
|
36 |
+
'ConvAdd2d',
|
37 |
+
'ConvAddReLU2d',
|
38 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (909 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc
ADDED
Binary file (8.55 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
|
3 |
+
from torch.nn.utils.parametrize import type_before_parametrizations
|
4 |
+
|
5 |
+
__all__ = ['ConvReLU1d', 'ConvReLU2d', 'ConvReLU3d', 'LinearReLU', 'ConvBn1d', 'ConvBn2d',
|
6 |
+
'ConvBnReLU1d', 'ConvBnReLU2d', 'ConvBn3d', 'ConvBnReLU3d', 'BNReLU2d', 'BNReLU3d',
|
7 |
+
'LinearBn1d', 'LinearLeakyReLU', 'LinearTanh', 'ConvAdd2d', 'ConvAddReLU2d']
|
8 |
+
|
9 |
+
# Used for identifying intrinsic modules used in quantization
|
10 |
+
class _FusedModule(torch.nn.Sequential):
|
11 |
+
pass
|
12 |
+
|
13 |
+
class ConvReLU1d(_FusedModule):
|
14 |
+
r"""This is a sequential container which calls the Conv1d and ReLU modules.
|
15 |
+
During quantization this will be replaced with the corresponding fused module."""
|
16 |
+
def __init__(self, conv, relu):
|
17 |
+
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(relu) == ReLU, \
|
18 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
|
19 |
+
super().__init__(conv, relu)
|
20 |
+
|
21 |
+
class ConvReLU2d(_FusedModule):
|
22 |
+
r"""This is a sequential container which calls the Conv2d and ReLU modules.
|
23 |
+
During quantization this will be replaced with the corresponding fused module."""
|
24 |
+
def __init__(self, conv, relu):
|
25 |
+
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(relu) == ReLU, \
|
26 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
|
27 |
+
super().__init__(conv, relu)
|
28 |
+
|
29 |
+
class ConvReLU3d(_FusedModule):
|
30 |
+
r"""This is a sequential container which calls the Conv3d and ReLU modules.
|
31 |
+
During quantization this will be replaced with the corresponding fused module."""
|
32 |
+
def __init__(self, conv, relu):
|
33 |
+
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(relu) == ReLU, \
|
34 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
|
35 |
+
super().__init__(conv, relu)
|
36 |
+
|
37 |
+
class LinearReLU(_FusedModule):
|
38 |
+
r"""This is a sequential container which calls the Linear and ReLU modules.
|
39 |
+
During quantization this will be replaced with the corresponding fused module."""
|
40 |
+
def __init__(self, linear, relu):
|
41 |
+
assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(relu) == ReLU, \
|
42 |
+
'Incorrect types for input modules{}{}'.format(
|
43 |
+
type_before_parametrizations(linear), type_before_parametrizations(relu))
|
44 |
+
super().__init__(linear, relu)
|
45 |
+
|
46 |
+
class ConvBn1d(_FusedModule):
|
47 |
+
r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
|
48 |
+
During quantization this will be replaced with the corresponding fused module."""
|
49 |
+
def __init__(self, conv, bn):
|
50 |
+
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d, \
|
51 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
|
52 |
+
super().__init__(conv, bn)
|
53 |
+
|
54 |
+
class ConvBn2d(_FusedModule):
|
55 |
+
r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
|
56 |
+
During quantization this will be replaced with the corresponding fused module."""
|
57 |
+
def __init__(self, conv, bn):
|
58 |
+
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d, \
|
59 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
|
60 |
+
super().__init__(conv, bn)
|
61 |
+
|
62 |
+
class ConvBnReLU1d(_FusedModule):
|
63 |
+
r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
|
64 |
+
During quantization this will be replaced with the corresponding fused module."""
|
65 |
+
def __init__(self, conv, bn, relu):
|
66 |
+
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d and \
|
67 |
+
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
|
68 |
+
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
|
69 |
+
super().__init__(conv, bn, relu)
|
70 |
+
|
71 |
+
class ConvBnReLU2d(_FusedModule):
|
72 |
+
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
|
73 |
+
During quantization this will be replaced with the corresponding fused module."""
|
74 |
+
def __init__(self, conv, bn, relu):
|
75 |
+
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d and \
|
76 |
+
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
|
77 |
+
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
|
78 |
+
super().__init__(conv, bn, relu)
|
79 |
+
|
80 |
+
class ConvBn3d(_FusedModule):
|
81 |
+
r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
|
82 |
+
During quantization this will be replaced with the corresponding fused module."""
|
83 |
+
def __init__(self, conv, bn):
|
84 |
+
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, \
|
85 |
+
f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
|
86 |
+
super().__init__(conv, bn)
|
87 |
+
|
88 |
+
class ConvBnReLU3d(_FusedModule):
|
89 |
+
r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
|
90 |
+
During quantization this will be replaced with the corresponding fused module."""
|
91 |
+
def __init__(self, conv, bn, relu):
|
92 |
+
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d and \
|
93 |
+
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
|
94 |
+
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
|
95 |
+
super().__init__(conv, bn, relu)
|
96 |
+
|
97 |
+
|
98 |
+
class BNReLU2d(_FusedModule):
|
99 |
+
r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
|
100 |
+
During quantization this will be replaced with the corresponding fused module."""
|
101 |
+
def __init__(self, batch_norm, relu):
|
102 |
+
assert type_before_parametrizations(batch_norm) == BatchNorm2d and type_before_parametrizations(relu) == ReLU, \
|
103 |
+
'Incorrect types for input modules{}{}'.format(
|
104 |
+
type_before_parametrizations(batch_norm), type_before_parametrizations(relu))
|
105 |
+
super().__init__(batch_norm, relu)
|
106 |
+
|
107 |
+
class BNReLU3d(_FusedModule):
|
108 |
+
r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
|
109 |
+
During quantization this will be replaced with the corresponding fused module."""
|
110 |
+
def __init__(self, batch_norm, relu):
|
111 |
+
assert type_before_parametrizations(batch_norm) == BatchNorm3d and type_before_parametrizations(relu) == ReLU, \
|
112 |
+
'Incorrect types for input modules{}{}'.format(
|
113 |
+
type_before_parametrizations(batch_norm), type_before_parametrizations(relu))
|
114 |
+
super().__init__(batch_norm, relu)
|
115 |
+
|
116 |
+
|
117 |
+
class LinearBn1d(_FusedModule):
|
118 |
+
r"""This is a sequential container which calls the Linear and BatchNorm1d modules.
|
119 |
+
During quantization this will be replaced with the corresponding fused module."""
|
120 |
+
def __init__(self, linear, bn):
|
121 |
+
assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(bn) == BatchNorm1d, \
|
122 |
+
f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(bn)}'
|
123 |
+
super().__init__(linear, bn)
|
124 |
+
|
125 |
+
class LinearLeakyReLU(_FusedModule):
|
126 |
+
r"""This is a sequential container which calls the Linear and LeakyReLU modules.
|
127 |
+
During quantization this will be replaced with the corresponding fused module."""
|
128 |
+
def __init__(self, linear, leaky_relu):
|
129 |
+
assert type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU, \
|
130 |
+
f'Incorrect types for input modules{type(linear)}{type(leaky_relu)}'
|
131 |
+
super().__init__(linear, leaky_relu)
|
132 |
+
|
133 |
+
class LinearTanh(_FusedModule):
|
134 |
+
r"""This is a sequential container which calls the Linear and Tanh modules.
|
135 |
+
During quantization this will be replaced with the corresponding fused module."""
|
136 |
+
def __init__(self, linear, tanh):
|
137 |
+
assert type(linear) == Linear and type(tanh) == torch.nn.Tanh, \
|
138 |
+
f'Incorrect types for input modules{type(linear)}{type(tanh)}'
|
139 |
+
super().__init__(linear, tanh)
|
140 |
+
|
141 |
+
class ConvAdd2d(_FusedModule):
|
142 |
+
r"""This is a sequential container which calls the Conv2d modules with extra Add.
|
143 |
+
During quantization this will be replaced with the corresponding fused module."""
|
144 |
+
def __init__(self, conv, add):
|
145 |
+
super().__init__(conv)
|
146 |
+
self.add = add
|
147 |
+
|
148 |
+
def forward(self, x1, x2):
|
149 |
+
return self.add(self[0](x1), x2)
|
150 |
+
|
151 |
+
class ConvAddReLU2d(_FusedModule):
|
152 |
+
r"""This is a sequential container which calls the Conv2d, add, Relu.
|
153 |
+
During quantization this will be replaced with the corresponding fused module."""
|
154 |
+
def __init__(self, conv, add, relu):
|
155 |
+
super().__init__(conv)
|
156 |
+
self.add = add
|
157 |
+
self.relu = relu
|
158 |
+
|
159 |
+
def forward(self, x1, x2):
|
160 |
+
return self.relu(self.add(self[0](x1), x2))
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (217 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .linear_relu import LinearReLU
|
2 |
+
from .linear_fused import LinearBn1d
|
3 |
+
from .conv_fused import (
|
4 |
+
ConvBn1d,
|
5 |
+
ConvBn2d,
|
6 |
+
ConvBn3d,
|
7 |
+
ConvBnReLU1d,
|
8 |
+
ConvBnReLU2d,
|
9 |
+
ConvBnReLU3d,
|
10 |
+
ConvReLU1d,
|
11 |
+
ConvReLU2d,
|
12 |
+
ConvReLU3d,
|
13 |
+
update_bn_stats,
|
14 |
+
freeze_bn_stats,
|
15 |
+
)
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"LinearReLU",
|
19 |
+
"LinearBn1d",
|
20 |
+
"ConvReLU1d",
|
21 |
+
"ConvReLU2d",
|
22 |
+
"ConvReLU3d",
|
23 |
+
"ConvBn1d",
|
24 |
+
"ConvBn2d",
|
25 |
+
"ConvBn3d",
|
26 |
+
"ConvBnReLU1d",
|
27 |
+
"ConvBnReLU2d",
|
28 |
+
"ConvBnReLU3d",
|
29 |
+
"update_bn_stats",
|
30 |
+
"freeze_bn_stats",
|
31 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (645 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc
ADDED
Binary file (19.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc
ADDED
Binary file (4.94 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (2.18 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py
ADDED
@@ -0,0 +1,825 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.ao.nn.intrinsic as nni
|
5 |
+
import torch.ao.nn.qat as nnqat
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch.nn import init
|
8 |
+
from torch.nn.utils import fuse_conv_bn_weights
|
9 |
+
from torch.nn.modules.utils import _single, _pair, _triple
|
10 |
+
from torch.nn.parameter import Parameter
|
11 |
+
from typing import TypeVar
|
12 |
+
|
13 |
+
__all__ = ['ConvBn1d', 'ConvBnReLU1d', 'ConvReLU1d', 'ConvBn2d', 'ConvBnReLU2d', 'ConvReLU2d', 'ConvBn3d',
|
14 |
+
'ConvBnReLU3d', 'ConvReLU3d', 'update_bn_stats', 'freeze_bn_stats']
|
15 |
+
_BN_CLASS_MAP = {
|
16 |
+
1: nn.BatchNorm1d,
|
17 |
+
2: nn.BatchNorm2d,
|
18 |
+
3: nn.BatchNorm3d,
|
19 |
+
}
|
20 |
+
|
21 |
+
|
22 |
+
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
|
23 |
+
|
24 |
+
|
25 |
+
class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
|
26 |
+
|
27 |
+
_version = 2
|
28 |
+
_FLOAT_MODULE = MOD
|
29 |
+
|
30 |
+
def __init__(self,
|
31 |
+
# ConvNd args
|
32 |
+
in_channels, out_channels, kernel_size, stride,
|
33 |
+
padding, dilation, transposed, output_padding,
|
34 |
+
groups,
|
35 |
+
bias,
|
36 |
+
padding_mode,
|
37 |
+
# BatchNormNd args
|
38 |
+
# num_features: out_channels
|
39 |
+
eps=1e-05, momentum=0.1,
|
40 |
+
# affine: True
|
41 |
+
# track_running_stats: True
|
42 |
+
# Args for this module
|
43 |
+
freeze_bn=False,
|
44 |
+
qconfig=None,
|
45 |
+
dim=2):
|
46 |
+
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
|
47 |
+
stride, padding, dilation, transposed,
|
48 |
+
output_padding, groups, False, padding_mode)
|
49 |
+
assert qconfig, 'qconfig must be provided for QAT module'
|
50 |
+
self.qconfig = qconfig
|
51 |
+
self.freeze_bn = freeze_bn if self.training else True
|
52 |
+
self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
|
53 |
+
self.weight_fake_quant = self.qconfig.weight()
|
54 |
+
if bias:
|
55 |
+
self.bias = Parameter(torch.empty(out_channels))
|
56 |
+
else:
|
57 |
+
self.register_parameter('bias', None)
|
58 |
+
self.reset_bn_parameters()
|
59 |
+
|
60 |
+
# this needs to be called after reset_bn_parameters,
|
61 |
+
# as they modify the same state
|
62 |
+
if self.training:
|
63 |
+
if freeze_bn:
|
64 |
+
self.freeze_bn_stats()
|
65 |
+
else:
|
66 |
+
self.update_bn_stats()
|
67 |
+
else:
|
68 |
+
self.freeze_bn_stats()
|
69 |
+
|
70 |
+
self._enable_slow_path_for_better_numerical_stability = False
|
71 |
+
|
72 |
+
def reset_running_stats(self):
|
73 |
+
self.bn.reset_running_stats()
|
74 |
+
|
75 |
+
def reset_bn_parameters(self):
|
76 |
+
self.bn.reset_running_stats()
|
77 |
+
init.uniform_(self.bn.weight)
|
78 |
+
init.zeros_(self.bn.bias)
|
79 |
+
# note: below is actually for conv, not BN
|
80 |
+
if self.bias is not None:
|
81 |
+
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
|
82 |
+
bound = 1 / math.sqrt(fan_in)
|
83 |
+
init.uniform_(self.bias, -bound, bound)
|
84 |
+
|
85 |
+
def reset_parameters(self):
|
86 |
+
super().reset_parameters()
|
87 |
+
|
88 |
+
def update_bn_stats(self):
|
89 |
+
self.freeze_bn = False
|
90 |
+
self.bn.training = True
|
91 |
+
return self
|
92 |
+
|
93 |
+
def freeze_bn_stats(self):
|
94 |
+
self.freeze_bn = True
|
95 |
+
self.bn.training = False
|
96 |
+
return self
|
97 |
+
|
98 |
+
def _forward(self, input):
|
99 |
+
if self._enable_slow_path_for_better_numerical_stability:
|
100 |
+
return self._forward_slow(input)
|
101 |
+
return self._forward_approximate(input)
|
102 |
+
|
103 |
+
def _forward_approximate(self, input):
|
104 |
+
"""Approximated method to fuse conv and bn. It requires only one forward pass.
|
105 |
+
conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std
|
106 |
+
"""
|
107 |
+
assert self.bn.running_var is not None
|
108 |
+
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
|
109 |
+
scale_factor = self.bn.weight / running_std
|
110 |
+
weight_shape = [1] * len(self.weight.shape)
|
111 |
+
weight_shape[0] = -1
|
112 |
+
bias_shape = [1] * len(self.weight.shape)
|
113 |
+
bias_shape[1] = -1
|
114 |
+
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
|
115 |
+
# using zero bias here since the bias for original conv
|
116 |
+
# will be added later
|
117 |
+
if self.bias is not None:
|
118 |
+
zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
|
119 |
+
else:
|
120 |
+
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device, dtype=input.dtype)
|
121 |
+
conv = self._conv_forward(input, scaled_weight, zero_bias)
|
122 |
+
conv_orig = conv / scale_factor.reshape(bias_shape)
|
123 |
+
if self.bias is not None:
|
124 |
+
conv_orig = conv_orig + self.bias.reshape(bias_shape)
|
125 |
+
conv = self.bn(conv_orig)
|
126 |
+
return conv
|
127 |
+
|
128 |
+
def _forward_slow(self, input):
|
129 |
+
"""
|
130 |
+
A more accurate but slow method to compute conv bn fusion, following https://arxiv.org/pdf/1806.08342.pdf
|
131 |
+
It requires two forward passes but handles the case bn.weight == 0
|
132 |
+
|
133 |
+
Conv: Y = WX + B_c
|
134 |
+
Conv without bias: Y0 = WX = Y - B_c, Y = Y0 + B_c
|
135 |
+
|
136 |
+
Batch statistics:
|
137 |
+
mean_Y = Y.mean()
|
138 |
+
= Y0.mean() + B_c
|
139 |
+
var_Y = (Y - mean_Y)^2.mean()
|
140 |
+
= (Y0 - Y0.mean())^2.mean()
|
141 |
+
BN (r: bn.weight, beta: bn.bias):
|
142 |
+
Z = r * (Y - mean_Y) / sqrt(var_Y + eps) + beta
|
143 |
+
= r * (Y0 - Y0.mean()) / sqrt(var_Y + eps) + beta
|
144 |
+
|
145 |
+
Fused Conv BN training (std_Y = sqrt(var_Y + eps)):
|
146 |
+
Z = (r * W / std_Y) * X + r * (B_c - mean_Y) / std_Y + beta
|
147 |
+
= (r * W / std_Y) * X - r * Y0.mean() / std_Y + beta
|
148 |
+
|
149 |
+
Fused Conv BN inference (running_std = sqrt(running_var + eps)):
|
150 |
+
Z = (r * W / running_std) * X - r * (running_mean - B_c) / running_std + beta
|
151 |
+
|
152 |
+
QAT with fused conv bn:
|
153 |
+
Z_train = fake_quant(r * W / running_std) * X * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
|
154 |
+
= conv(X, fake_quant(r * W / running_std)) * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
|
155 |
+
Z_inference = conv(X, fake_quant(r * W / running_std)) - r * (running_mean - B_c) / running_std + beta
|
156 |
+
"""
|
157 |
+
|
158 |
+
assert self.bn.running_var is not None
|
159 |
+
assert self.bn.running_mean is not None
|
160 |
+
|
161 |
+
# using zero bias here since the bias for original conv
|
162 |
+
# will be added later
|
163 |
+
zero_bias = torch.zeros(self.out_channels, device=self.weight.device, dtype=input.dtype)
|
164 |
+
|
165 |
+
weight_shape = [1] * len(self.weight.shape)
|
166 |
+
weight_shape[0] = -1
|
167 |
+
bias_shape = [1] * len(self.weight.shape)
|
168 |
+
bias_shape[1] = -1
|
169 |
+
|
170 |
+
if self.bn.training:
|
171 |
+
# needed to compute batch mean/std
|
172 |
+
conv_out = self._conv_forward(input, self.weight, zero_bias)
|
173 |
+
# update bn statistics
|
174 |
+
with torch.no_grad():
|
175 |
+
conv_out_bias = (
|
176 |
+
conv_out if self.bias is None else conv_out + self.bias.reshape(bias_shape)
|
177 |
+
)
|
178 |
+
self.bn(conv_out_bias)
|
179 |
+
|
180 |
+
# fused conv + bn without bias using bn running statistics
|
181 |
+
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
|
182 |
+
scale_factor = self.bn.weight / running_std
|
183 |
+
scaled_weight = self.weight_fake_quant(
|
184 |
+
self.weight * scale_factor.reshape(weight_shape)
|
185 |
+
)
|
186 |
+
# fused conv without bias for inference: (r * W / running_std) * X
|
187 |
+
conv_bn = self._conv_forward(input, scaled_weight, zero_bias)
|
188 |
+
|
189 |
+
if self.bn.training:
|
190 |
+
avg_dims = [0] + list(range(2, len(self.weight.shape)))
|
191 |
+
batch_mean = conv_out.mean(avg_dims) # type: ignore[possibly-undefined]
|
192 |
+
batch_var = torch.square(conv_out - batch_mean.reshape(bias_shape)).mean(
|
193 |
+
avg_dims
|
194 |
+
)
|
195 |
+
batch_std = torch.sqrt(batch_var + self.bn.eps)
|
196 |
+
|
197 |
+
# scale to use batch std in training mode
|
198 |
+
# conv(X, r * W / std_Y) = conv(X, r * W / running_std) * (running_std / std_Y)
|
199 |
+
unscale_factor = running_std / batch_std
|
200 |
+
conv_bn *= unscale_factor.reshape(bias_shape)
|
201 |
+
|
202 |
+
fused_mean = batch_mean
|
203 |
+
fused_std = batch_std
|
204 |
+
else:
|
205 |
+
fused_mean = self.bn.running_mean - (self.bias if self.bias is not None else 0)
|
206 |
+
fused_std = running_std
|
207 |
+
|
208 |
+
# fused bias = beta - r * mean / std
|
209 |
+
fused_bias = self.bn.bias - self.bn.weight * fused_mean / fused_std
|
210 |
+
conv_bn += fused_bias.reshape(bias_shape)
|
211 |
+
|
212 |
+
# HACK to let conv bias participate in loss to avoid DDP error (parameters
|
213 |
+
# were not used in producing loss)
|
214 |
+
if self.bias is not None:
|
215 |
+
conv_bn += (self.bias - self.bias).reshape(bias_shape)
|
216 |
+
|
217 |
+
return conv_bn
|
218 |
+
|
219 |
+
def extra_repr(self):
|
220 |
+
# TODO(jerryzh): extend
|
221 |
+
return super().extra_repr()
|
222 |
+
|
223 |
+
def forward(self, input):
|
224 |
+
return self._forward(input)
|
225 |
+
|
226 |
+
def train(self, mode=True):
|
227 |
+
"""
|
228 |
+
Batchnorm's training behavior is using the self.training flag. Prevent
|
229 |
+
changing it if BN is frozen. This makes sure that calling `model.train()`
|
230 |
+
on a model with a frozen BN will behave properly.
|
231 |
+
"""
|
232 |
+
self.training = mode
|
233 |
+
if not self.freeze_bn:
|
234 |
+
for module in self.children():
|
235 |
+
module.train(mode)
|
236 |
+
return self
|
237 |
+
|
238 |
+
# ===== Serialization version history =====
|
239 |
+
#
|
240 |
+
# Version 1/None
|
241 |
+
# self
|
242 |
+
# |--- weight : Tensor
|
243 |
+
# |--- bias : Tensor
|
244 |
+
# |--- gamma : Tensor
|
245 |
+
# |--- beta : Tensor
|
246 |
+
# |--- running_mean : Tensor
|
247 |
+
# |--- running_var : Tensor
|
248 |
+
# |--- num_batches_tracked : Tensor
|
249 |
+
#
|
250 |
+
# Version 2
|
251 |
+
# self
|
252 |
+
# |--- weight : Tensor
|
253 |
+
# |--- bias : Tensor
|
254 |
+
# |--- bn : Module
|
255 |
+
# |--- weight : Tensor (moved from v1.self.gamma)
|
256 |
+
# |--- bias : Tensor (moved from v1.self.beta)
|
257 |
+
# |--- running_mean : Tensor (moved from v1.self.running_mean)
|
258 |
+
# |--- running_var : Tensor (moved from v1.self.running_var)
|
259 |
+
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
|
260 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
|
261 |
+
version = local_metadata.get('version', None)
|
262 |
+
if version is None or version == 1:
|
263 |
+
# BN related parameters and buffers were moved into the BN module for v2
|
264 |
+
v2_to_v1_names = {
|
265 |
+
'bn.weight': 'gamma',
|
266 |
+
'bn.bias': 'beta',
|
267 |
+
'bn.running_mean': 'running_mean',
|
268 |
+
'bn.running_var': 'running_var',
|
269 |
+
'bn.num_batches_tracked': 'num_batches_tracked',
|
270 |
+
}
|
271 |
+
for v2_name, v1_name in v2_to_v1_names.items():
|
272 |
+
if prefix + v1_name in state_dict:
|
273 |
+
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
|
274 |
+
state_dict.pop(prefix + v1_name)
|
275 |
+
elif prefix + v2_name in state_dict:
|
276 |
+
# there was a brief period where forward compatibility
|
277 |
+
# for this module was broken (between
|
278 |
+
# https://github.com/pytorch/pytorch/pull/38478
|
279 |
+
# and https://github.com/pytorch/pytorch/pull/38820)
|
280 |
+
# and modules emitted the v2 state_dict format while
|
281 |
+
# specifying that version == 1. This patches the forward
|
282 |
+
# compatibility issue by allowing the v2 style entries to
|
283 |
+
# be used.
|
284 |
+
pass
|
285 |
+
elif strict:
|
286 |
+
missing_keys.append(prefix + v2_name)
|
287 |
+
|
288 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict,
|
289 |
+
missing_keys, unexpected_keys, error_msgs)
|
290 |
+
|
291 |
+
@classmethod
|
292 |
+
def from_float(cls, mod):
|
293 |
+
r"""Create a qat module from a float module or qparams_dict
|
294 |
+
|
295 |
+
Args: `mod` a float module, either produced by torch.ao.quantization utilities
|
296 |
+
or directly from user
|
297 |
+
"""
|
298 |
+
# The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
|
299 |
+
# has no __name__ (code is fine though)
|
300 |
+
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
|
301 |
+
cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
|
302 |
+
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
|
303 |
+
assert mod.qconfig, 'Input float module must have a valid qconfig'
|
304 |
+
qconfig = mod.qconfig
|
305 |
+
conv, bn = mod[0], mod[1]
|
306 |
+
qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
|
307 |
+
conv.stride, conv.padding, conv.dilation,
|
308 |
+
conv.groups, conv.bias is not None,
|
309 |
+
conv.padding_mode,
|
310 |
+
bn.eps, bn.momentum,
|
311 |
+
False,
|
312 |
+
qconfig)
|
313 |
+
qat_convbn.weight = conv.weight
|
314 |
+
qat_convbn.bias = conv.bias
|
315 |
+
qat_convbn.bn.weight = bn.weight
|
316 |
+
qat_convbn.bn.bias = bn.bias
|
317 |
+
qat_convbn.bn.running_mean = bn.running_mean
|
318 |
+
qat_convbn.bn.running_var = bn.running_var
|
319 |
+
# mypy error: Cannot determine type of 'num_batches_tracked'
|
320 |
+
qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked # type: ignore[has-type]
|
321 |
+
return qat_convbn
|
322 |
+
|
323 |
+
def to_float(self):
|
324 |
+
cls = type(self)
|
325 |
+
conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
|
326 |
+
self.in_channels,
|
327 |
+
self.out_channels,
|
328 |
+
self.kernel_size,
|
329 |
+
self.stride,
|
330 |
+
self.padding,
|
331 |
+
self.dilation,
|
332 |
+
self.groups,
|
333 |
+
self.bias is not None,
|
334 |
+
self.padding_mode)
|
335 |
+
conv.weight = torch.nn.Parameter(self.weight.detach())
|
336 |
+
if self.bias is not None:
|
337 |
+
conv.bias = torch.nn.Parameter(self.bias.detach())
|
338 |
+
|
339 |
+
if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined]
|
340 |
+
# fuse bn into conv
|
341 |
+
assert self.bn.running_var is not None and self.bn.running_mean is not None
|
342 |
+
conv.weight, conv.bias = fuse_conv_bn_weights(
|
343 |
+
conv.weight,
|
344 |
+
conv.bias,
|
345 |
+
self.bn.running_mean,
|
346 |
+
self.bn.running_var,
|
347 |
+
self.bn.eps,
|
348 |
+
self.bn.weight,
|
349 |
+
self.bn.bias
|
350 |
+
)
|
351 |
+
|
352 |
+
if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined]
|
353 |
+
modules = []
|
354 |
+
modules.append(conv)
|
355 |
+
relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
|
356 |
+
modules.append(relu)
|
357 |
+
conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined]
|
358 |
+
conv_relu.train(self.training)
|
359 |
+
return conv_relu
|
360 |
+
else:
|
361 |
+
conv.train(self.training)
|
362 |
+
return conv
|
363 |
+
|
364 |
+
class ConvBn1d(_ConvBnNd, nn.Conv1d):
|
365 |
+
r"""
|
366 |
+
A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
|
367 |
+
attached with FakeQuantize modules for weight,
|
368 |
+
used in quantization aware training.
|
369 |
+
|
370 |
+
We combined the interface of :class:`torch.nn.Conv1d` and
|
371 |
+
:class:`torch.nn.BatchNorm1d`.
|
372 |
+
|
373 |
+
Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
|
374 |
+
to default.
|
375 |
+
|
376 |
+
Attributes:
|
377 |
+
freeze_bn:
|
378 |
+
weight_fake_quant: fake quant module for weight
|
379 |
+
|
380 |
+
"""
|
381 |
+
_FLOAT_BN_MODULE = nn.BatchNorm1d
|
382 |
+
_FLOAT_RELU_MODULE: None = None
|
383 |
+
_FLOAT_MODULE = nni.ConvBn1d
|
384 |
+
_FLOAT_CONV_MODULE = nn.Conv1d
|
385 |
+
|
386 |
+
def __init__(self,
|
387 |
+
# Conv1d args
|
388 |
+
in_channels, out_channels, kernel_size, stride=1,
|
389 |
+
padding=0, dilation=1, groups=1,
|
390 |
+
bias=None,
|
391 |
+
padding_mode='zeros',
|
392 |
+
# BatchNorm1d args
|
393 |
+
# num_features: out_channels
|
394 |
+
eps=1e-05, momentum=0.1,
|
395 |
+
# affine: True
|
396 |
+
# track_running_stats: True
|
397 |
+
# Args for this module
|
398 |
+
freeze_bn=False,
|
399 |
+
qconfig=None):
|
400 |
+
kernel_size = _single(kernel_size)
|
401 |
+
stride = _single(stride)
|
402 |
+
padding = _single(padding)
|
403 |
+
dilation = _single(dilation)
|
404 |
+
_ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
|
405 |
+
padding, dilation, False, _single(0), groups, bias, padding_mode,
|
406 |
+
eps, momentum, freeze_bn, qconfig, dim=1)
|
407 |
+
|
408 |
+
class ConvBnReLU1d(ConvBn1d):
|
409 |
+
r"""
|
410 |
+
A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
|
411 |
+
attached with FakeQuantize modules for weight,
|
412 |
+
used in quantization aware training.
|
413 |
+
|
414 |
+
We combined the interface of :class:`torch.nn.Conv1d` and
|
415 |
+
:class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`.
|
416 |
+
|
417 |
+
Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
|
418 |
+
default.
|
419 |
+
|
420 |
+
Attributes:
|
421 |
+
weight_fake_quant: fake quant module for weight
|
422 |
+
|
423 |
+
"""
|
424 |
+
# base class defines _FLOAT_MODULE as "ConvBn1d"
|
425 |
+
_FLOAT_MODULE = nni.ConvBnReLU1d # type: ignore[assignment]
|
426 |
+
_FLOAT_CONV_MODULE = nn.Conv1d
|
427 |
+
_FLOAT_BN_MODULE = nn.BatchNorm1d
|
428 |
+
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
|
429 |
+
# module class after fusing bn into conv
|
430 |
+
_FUSED_FLOAT_MODULE = nni.ConvReLU1d
|
431 |
+
|
432 |
+
def __init__(self,
|
433 |
+
# Conv1d args
|
434 |
+
in_channels, out_channels, kernel_size, stride=1,
|
435 |
+
padding=0, dilation=1, groups=1,
|
436 |
+
bias=None,
|
437 |
+
padding_mode='zeros',
|
438 |
+
# BatchNorm1d args
|
439 |
+
# num_features: out_channels
|
440 |
+
eps=1e-05, momentum=0.1,
|
441 |
+
# affine: True
|
442 |
+
# track_running_stats: True
|
443 |
+
# Args for this module
|
444 |
+
freeze_bn=False,
|
445 |
+
qconfig=None):
|
446 |
+
super().__init__(in_channels, out_channels, kernel_size, stride,
|
447 |
+
padding, dilation, groups, bias,
|
448 |
+
padding_mode, eps, momentum,
|
449 |
+
freeze_bn,
|
450 |
+
qconfig)
|
451 |
+
|
452 |
+
def forward(self, input):
|
453 |
+
return F.relu(ConvBn1d._forward(self, input))
|
454 |
+
|
455 |
+
@classmethod
|
456 |
+
def from_float(cls, mod):
|
457 |
+
return super().from_float(mod)
|
458 |
+
|
459 |
+
class ConvReLU1d(nnqat.Conv1d, nni._FusedModule):
|
460 |
+
r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with
|
461 |
+
FakeQuantize modules for weight for
|
462 |
+
quantization aware training.
|
463 |
+
|
464 |
+
We combined the interface of :class:`~torch.nn.Conv1d` and
|
465 |
+
:class:`~torch.nn.BatchNorm1d`.
|
466 |
+
|
467 |
+
Attributes:
|
468 |
+
weight_fake_quant: fake quant module for weight
|
469 |
+
|
470 |
+
"""
|
471 |
+
_FLOAT_MODULE = nni.ConvReLU1d # type: ignore[assignment]
|
472 |
+
_FLOAT_CONV_MODULE = nn.Conv1d
|
473 |
+
_FLOAT_BN_MODULE: None = None
|
474 |
+
_FLOAT_RELU_MODULE = nn.ReLU
|
475 |
+
|
476 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
477 |
+
padding=0, dilation=1, groups=1,
|
478 |
+
bias=True, padding_mode='zeros',
|
479 |
+
qconfig=None):
|
480 |
+
super().__init__(in_channels, out_channels, kernel_size,
|
481 |
+
stride=stride, padding=padding, dilation=dilation,
|
482 |
+
groups=groups, bias=bias, padding_mode=padding_mode,
|
483 |
+
qconfig=qconfig)
|
484 |
+
assert qconfig, 'qconfig must be provided for QAT module'
|
485 |
+
self.qconfig = qconfig
|
486 |
+
self.weight_fake_quant = self.qconfig.weight()
|
487 |
+
|
488 |
+
def forward(self, input):
|
489 |
+
return F.relu(
|
490 |
+
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
|
491 |
+
|
492 |
+
@classmethod
|
493 |
+
def from_float(cls, mod):
|
494 |
+
return super().from_float(mod)
|
495 |
+
|
496 |
+
class ConvBn2d(_ConvBnNd, nn.Conv2d):
|
497 |
+
r"""
|
498 |
+
A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
|
499 |
+
attached with FakeQuantize modules for weight,
|
500 |
+
used in quantization aware training.
|
501 |
+
|
502 |
+
We combined the interface of :class:`torch.nn.Conv2d` and
|
503 |
+
:class:`torch.nn.BatchNorm2d`.
|
504 |
+
|
505 |
+
Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
|
506 |
+
to default.
|
507 |
+
|
508 |
+
Attributes:
|
509 |
+
freeze_bn:
|
510 |
+
weight_fake_quant: fake quant module for weight
|
511 |
+
|
512 |
+
"""
|
513 |
+
_FLOAT_MODULE = nni.ConvBn2d
|
514 |
+
_FLOAT_CONV_MODULE = nn.Conv2d
|
515 |
+
_FLOAT_BN_MODULE = nn.BatchNorm2d
|
516 |
+
_FLOAT_RELU_MODULE: None = None
|
517 |
+
|
518 |
+
def __init__(self,
|
519 |
+
# ConvNd args
|
520 |
+
in_channels, out_channels, kernel_size, stride=1,
|
521 |
+
padding=0, dilation=1, groups=1,
|
522 |
+
bias=None,
|
523 |
+
padding_mode='zeros',
|
524 |
+
# BatchNorm2d args
|
525 |
+
# num_features: out_channels
|
526 |
+
eps=1e-05, momentum=0.1,
|
527 |
+
# affine: True
|
528 |
+
# track_running_stats: True
|
529 |
+
# Args for this module
|
530 |
+
freeze_bn=False,
|
531 |
+
qconfig=None):
|
532 |
+
kernel_size = _pair(kernel_size)
|
533 |
+
stride = _pair(stride)
|
534 |
+
padding = _pair(padding)
|
535 |
+
dilation = _pair(dilation)
|
536 |
+
_ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
|
537 |
+
padding, dilation, False, _pair(0), groups, bias, padding_mode,
|
538 |
+
eps, momentum, freeze_bn, qconfig, dim=2)
|
539 |
+
|
540 |
+
class ConvBnReLU2d(ConvBn2d):
|
541 |
+
r"""
|
542 |
+
A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
|
543 |
+
attached with FakeQuantize modules for weight,
|
544 |
+
used in quantization aware training.
|
545 |
+
|
546 |
+
We combined the interface of :class:`torch.nn.Conv2d` and
|
547 |
+
:class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
|
548 |
+
|
549 |
+
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
|
550 |
+
default.
|
551 |
+
|
552 |
+
Attributes:
|
553 |
+
weight_fake_quant: fake quant module for weight
|
554 |
+
|
555 |
+
"""
|
556 |
+
# base class defines _FLOAT_MODULE as "ConvBn2d"
|
557 |
+
_FLOAT_MODULE = nni.ConvBnReLU2d # type: ignore[assignment]
|
558 |
+
_FLOAT_CONV_MODULE = nn.Conv2d
|
559 |
+
_FLOAT_BN_MODULE = nn.BatchNorm2d
|
560 |
+
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
|
561 |
+
# module class after fusing bn into conv
|
562 |
+
_FUSED_FLOAT_MODULE = nni.ConvReLU2d
|
563 |
+
|
564 |
+
def __init__(self,
|
565 |
+
# Conv2d args
|
566 |
+
in_channels, out_channels, kernel_size, stride=1,
|
567 |
+
padding=0, dilation=1, groups=1,
|
568 |
+
bias=None,
|
569 |
+
padding_mode='zeros',
|
570 |
+
# BatchNorm2d args
|
571 |
+
# num_features: out_channels
|
572 |
+
eps=1e-05, momentum=0.1,
|
573 |
+
# affine: True
|
574 |
+
# track_running_stats: True
|
575 |
+
# Args for this module
|
576 |
+
freeze_bn=False,
|
577 |
+
qconfig=None):
|
578 |
+
super().__init__(in_channels, out_channels, kernel_size, stride,
|
579 |
+
padding, dilation, groups, bias,
|
580 |
+
padding_mode, eps, momentum,
|
581 |
+
freeze_bn,
|
582 |
+
qconfig)
|
583 |
+
|
584 |
+
def forward(self, input):
|
585 |
+
return F.relu(ConvBn2d._forward(self, input))
|
586 |
+
|
587 |
+
@classmethod
|
588 |
+
def from_float(cls, mod):
|
589 |
+
return super().from_float(mod)
|
590 |
+
|
591 |
+
class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):
|
592 |
+
r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
|
593 |
+
FakeQuantize modules for weight for
|
594 |
+
quantization aware training.
|
595 |
+
|
596 |
+
We combined the interface of :class:`~torch.nn.Conv2d` and
|
597 |
+
:class:`~torch.nn.BatchNorm2d`.
|
598 |
+
|
599 |
+
Attributes:
|
600 |
+
weight_fake_quant: fake quant module for weight
|
601 |
+
|
602 |
+
"""
|
603 |
+
_FLOAT_MODULE = nni.ConvReLU2d # type: ignore[assignment]
|
604 |
+
_FLOAT_CONV_MODULE = nn.Conv2d
|
605 |
+
_FLOAT_BN_MODULE: None = None
|
606 |
+
_FLOAT_RELU_MODULE = nn.ReLU
|
607 |
+
|
608 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
609 |
+
padding=0, dilation=1, groups=1,
|
610 |
+
bias=True, padding_mode='zeros',
|
611 |
+
qconfig=None):
|
612 |
+
super().__init__(in_channels, out_channels, kernel_size,
|
613 |
+
stride=stride, padding=padding, dilation=dilation,
|
614 |
+
groups=groups, bias=bias, padding_mode=padding_mode,
|
615 |
+
qconfig=qconfig)
|
616 |
+
assert qconfig, 'qconfig must be provided for QAT module'
|
617 |
+
self.qconfig = qconfig
|
618 |
+
self.weight_fake_quant = self.qconfig.weight()
|
619 |
+
|
620 |
+
def forward(self, input):
|
621 |
+
return F.relu(
|
622 |
+
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
|
623 |
+
|
624 |
+
@classmethod
|
625 |
+
def from_float(cls, mod):
|
626 |
+
return super().from_float(mod)
|
627 |
+
|
628 |
+
class ConvBn3d(_ConvBnNd, nn.Conv3d):
|
629 |
+
r"""
|
630 |
+
A ConvBn3d module is a module fused from Conv3d and BatchNorm3d,
|
631 |
+
attached with FakeQuantize modules for weight,
|
632 |
+
used in quantization aware training.
|
633 |
+
|
634 |
+
We combined the interface of :class:`torch.nn.Conv3d` and
|
635 |
+
:class:`torch.nn.BatchNorm3d`.
|
636 |
+
|
637 |
+
Similar to :class:`torch.nn.Conv3d`, with FakeQuantize modules initialized
|
638 |
+
to default.
|
639 |
+
|
640 |
+
Attributes:
|
641 |
+
freeze_bn:
|
642 |
+
weight_fake_quant: fake quant module for weight
|
643 |
+
|
644 |
+
"""
|
645 |
+
_FLOAT_MODULE = nni.ConvBn3d
|
646 |
+
_FLOAT_CONV_MODULE = nn.Conv3d
|
647 |
+
_FLOAT_BN_MODULE = nn.BatchNorm3d
|
648 |
+
_FLOAT_RELU_MODULE: None = None
|
649 |
+
|
650 |
+
def __init__(
|
651 |
+
self,
|
652 |
+
# ConvNd args
|
653 |
+
in_channels,
|
654 |
+
out_channels,
|
655 |
+
kernel_size,
|
656 |
+
stride=1,
|
657 |
+
padding=0,
|
658 |
+
dilation=1,
|
659 |
+
groups=1,
|
660 |
+
bias=None,
|
661 |
+
padding_mode="zeros",
|
662 |
+
# BatchNorm3d args
|
663 |
+
# num_features: out_channels
|
664 |
+
eps=1e-05,
|
665 |
+
momentum=0.1,
|
666 |
+
# affine: True
|
667 |
+
# track_running_stats: True
|
668 |
+
# Args for this module
|
669 |
+
freeze_bn=False,
|
670 |
+
qconfig=None,
|
671 |
+
):
|
672 |
+
kernel_size = _triple(kernel_size)
|
673 |
+
stride = _triple(stride)
|
674 |
+
padding = _triple(padding)
|
675 |
+
dilation = _triple(dilation)
|
676 |
+
_ConvBnNd.__init__(
|
677 |
+
self,
|
678 |
+
in_channels,
|
679 |
+
out_channels,
|
680 |
+
kernel_size,
|
681 |
+
stride,
|
682 |
+
padding,
|
683 |
+
dilation,
|
684 |
+
False,
|
685 |
+
_triple(0),
|
686 |
+
groups,
|
687 |
+
bias,
|
688 |
+
padding_mode,
|
689 |
+
eps,
|
690 |
+
momentum,
|
691 |
+
freeze_bn,
|
692 |
+
qconfig,
|
693 |
+
dim=3,
|
694 |
+
)
|
695 |
+
|
696 |
+
class ConvBnReLU3d(ConvBn3d):
|
697 |
+
r"""
|
698 |
+
A ConvBnReLU3d module is a module fused from Conv3d, BatchNorm3d and ReLU,
|
699 |
+
attached with FakeQuantize modules for weight,
|
700 |
+
used in quantization aware training.
|
701 |
+
|
702 |
+
We combined the interface of :class:`torch.nn.Conv3d` and
|
703 |
+
:class:`torch.nn.BatchNorm3d` and :class:`torch.nn.ReLU`.
|
704 |
+
|
705 |
+
Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
|
706 |
+
default.
|
707 |
+
|
708 |
+
Attributes:
|
709 |
+
weight_fake_quant: fake quant module for weight
|
710 |
+
|
711 |
+
"""
|
712 |
+
_FLOAT_MODULE = nni.ConvBnReLU3d # type: ignore[assignment]
|
713 |
+
_FLOAT_CONV_MODULE = nn.Conv3d
|
714 |
+
_FLOAT_BN_MODULE = nn.BatchNorm3d
|
715 |
+
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
|
716 |
+
# module class after fusing bn into conv
|
717 |
+
_FUSED_FLOAT_MODULE = nni.ConvReLU3d
|
718 |
+
|
719 |
+
def __init__(
|
720 |
+
self,
|
721 |
+
# Conv3d args
|
722 |
+
in_channels,
|
723 |
+
out_channels,
|
724 |
+
kernel_size,
|
725 |
+
stride=1,
|
726 |
+
padding=0,
|
727 |
+
dilation=1,
|
728 |
+
groups=1,
|
729 |
+
bias=None,
|
730 |
+
padding_mode="zeros",
|
731 |
+
# BatchNorm3d args
|
732 |
+
# num_features: out_channels
|
733 |
+
eps=1e-05,
|
734 |
+
momentum=0.1,
|
735 |
+
# affine: True
|
736 |
+
# track_running_stats: True
|
737 |
+
# Args for this module
|
738 |
+
freeze_bn=False,
|
739 |
+
qconfig=None,
|
740 |
+
):
|
741 |
+
super().__init__(
|
742 |
+
in_channels,
|
743 |
+
out_channels,
|
744 |
+
kernel_size,
|
745 |
+
stride,
|
746 |
+
padding,
|
747 |
+
dilation,
|
748 |
+
groups,
|
749 |
+
bias,
|
750 |
+
padding_mode,
|
751 |
+
eps,
|
752 |
+
momentum,
|
753 |
+
freeze_bn,
|
754 |
+
qconfig,
|
755 |
+
)
|
756 |
+
|
757 |
+
def forward(self, input):
|
758 |
+
return F.relu(ConvBn3d._forward(self, input))
|
759 |
+
|
760 |
+
@classmethod
|
761 |
+
def from_float(cls, mod):
|
762 |
+
return super().from_float(mod)
|
763 |
+
|
764 |
+
class ConvReLU3d(nnqat.Conv3d, nni._FusedModule):
|
765 |
+
r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with
|
766 |
+
FakeQuantize modules for weight for
|
767 |
+
quantization aware training.
|
768 |
+
|
769 |
+
We combined the interface of :class:`~torch.nn.Conv3d` and
|
770 |
+
:class:`~torch.nn.BatchNorm3d`.
|
771 |
+
|
772 |
+
Attributes:
|
773 |
+
weight_fake_quant: fake quant module for weight
|
774 |
+
|
775 |
+
"""
|
776 |
+
_FLOAT_MODULE = nni.ConvReLU3d # type: ignore[assignment]
|
777 |
+
_FLOAT_CONV_MODULE = nn.Conv3d
|
778 |
+
_FLOAT_BN_MODULE: None = None
|
779 |
+
_FLOAT_RELU_MODULE = nn.ReLU
|
780 |
+
|
781 |
+
def __init__(
|
782 |
+
self,
|
783 |
+
in_channels,
|
784 |
+
out_channels,
|
785 |
+
kernel_size,
|
786 |
+
stride=1,
|
787 |
+
padding=0,
|
788 |
+
dilation=1,
|
789 |
+
groups=1,
|
790 |
+
bias=True,
|
791 |
+
padding_mode="zeros",
|
792 |
+
qconfig=None,
|
793 |
+
):
|
794 |
+
super().__init__(
|
795 |
+
in_channels,
|
796 |
+
out_channels,
|
797 |
+
kernel_size,
|
798 |
+
stride=stride,
|
799 |
+
padding=padding,
|
800 |
+
dilation=dilation,
|
801 |
+
groups=groups,
|
802 |
+
bias=bias,
|
803 |
+
padding_mode=padding_mode,
|
804 |
+
qconfig=qconfig,
|
805 |
+
)
|
806 |
+
assert qconfig, "qconfig must be provided for QAT module"
|
807 |
+
self.qconfig = qconfig
|
808 |
+
self.weight_fake_quant = self.qconfig.weight()
|
809 |
+
|
810 |
+
def forward(self, input):
|
811 |
+
return F.relu(
|
812 |
+
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
|
813 |
+
)
|
814 |
+
|
815 |
+
@classmethod
|
816 |
+
def from_float(cls, mod):
|
817 |
+
return super().from_float(mod)
|
818 |
+
|
819 |
+
def update_bn_stats(mod):
|
820 |
+
if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}:
|
821 |
+
mod.update_bn_stats()
|
822 |
+
|
823 |
+
def freeze_bn_stats(mod):
|
824 |
+
if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}:
|
825 |
+
mod.freeze_bn_stats()
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.ao.nn.intrinsic as nni
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch.nn import init
|
6 |
+
from torch.nn.parameter import Parameter
|
7 |
+
from torch.nn.utils.fusion import fuse_linear_bn_weights
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"LinearBn1d",
|
11 |
+
]
|
12 |
+
|
13 |
+
class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
|
14 |
+
r"""
|
15 |
+
A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached
|
16 |
+
with FakeQuantize modules for weight, used in quantization aware training.
|
17 |
+
|
18 |
+
We combined the interface of :class:`torch.nn.Linear` and
|
19 |
+
:class:torch.nn.BatchNorm1d`.
|
20 |
+
|
21 |
+
Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized
|
22 |
+
to default.
|
23 |
+
|
24 |
+
Attributes:
|
25 |
+
freeze_bn:
|
26 |
+
weight_fake_quant: fake quant module for weight
|
27 |
+
|
28 |
+
"""
|
29 |
+
def __init__(self,
|
30 |
+
# Linear args
|
31 |
+
in_features, out_features, bias=True,
|
32 |
+
# BatchNorm1d args
|
33 |
+
# num_features: out_features
|
34 |
+
eps=1e-05, momentum=0.1,
|
35 |
+
# affine: True
|
36 |
+
# track_running_stats: True
|
37 |
+
# Args for this module
|
38 |
+
freeze_bn=False,
|
39 |
+
qconfig=None):
|
40 |
+
nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
|
41 |
+
assert qconfig, 'qconfig must be provided for QAT module'
|
42 |
+
self.qconfig = qconfig
|
43 |
+
self.freeze_bn = freeze_bn if self.training else True
|
44 |
+
self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)
|
45 |
+
self.weight_fake_quant = self.qconfig.weight()
|
46 |
+
if bias:
|
47 |
+
self.bias = Parameter(torch.empty(out_features))
|
48 |
+
else:
|
49 |
+
self.register_parameter('bias', None)
|
50 |
+
self.reset_bn_parameters()
|
51 |
+
|
52 |
+
# this needs to be called after reset_bn_parameters,
|
53 |
+
# as they modify the same state
|
54 |
+
if self.training:
|
55 |
+
if freeze_bn:
|
56 |
+
self.freeze_bn_stats()
|
57 |
+
else:
|
58 |
+
self.update_bn_stats()
|
59 |
+
else:
|
60 |
+
self.freeze_bn_stats()
|
61 |
+
|
62 |
+
def reset_running_stats(self):
|
63 |
+
self.bn.reset_running_stats()
|
64 |
+
|
65 |
+
def reset_bn_parameters(self):
|
66 |
+
self.bn.reset_running_stats()
|
67 |
+
init.uniform_(self.bn.weight)
|
68 |
+
init.zeros_(self.bn.bias)
|
69 |
+
|
70 |
+
def reset_parameters(self):
|
71 |
+
super().reset_parameters()
|
72 |
+
|
73 |
+
def update_bn_stats(self):
|
74 |
+
self.freeze_bn = False
|
75 |
+
self.bn.training = True
|
76 |
+
return self
|
77 |
+
|
78 |
+
def freeze_bn_stats(self):
|
79 |
+
self.freeze_bn = True
|
80 |
+
self.bn.training = False
|
81 |
+
return self
|
82 |
+
|
83 |
+
def forward(self, input):
|
84 |
+
assert self.bn.running_var is not None
|
85 |
+
|
86 |
+
# Scale the linear weights by BN's running statistics to reduce
|
87 |
+
# weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18
|
88 |
+
# for motivation.
|
89 |
+
#
|
90 |
+
# Instead of
|
91 |
+
#
|
92 |
+
# x1 = F.linear(x0, fq(w), b)
|
93 |
+
# x2 = self.bn(x1)
|
94 |
+
#
|
95 |
+
# We have
|
96 |
+
#
|
97 |
+
# # scale the weight by previous batch's running statistics
|
98 |
+
# scale_factor = bn.w / bn.running_std_from_prev_batch
|
99 |
+
# # do the linear transformation without bias
|
100 |
+
# x1_scaled = F.linear(x0, fq(w * scale_factor), 0)
|
101 |
+
# # reverse the scaling and add original bias
|
102 |
+
# x1_orig = x1_scaled / scale_factor + b
|
103 |
+
# x2 = self.bn(x1_orig)
|
104 |
+
|
105 |
+
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
|
106 |
+
scale_factor = self.bn.weight / running_std
|
107 |
+
weight_shape = [1] * len(self.weight.shape)
|
108 |
+
weight_shape[0] = -1
|
109 |
+
bias_shape = [1] * len(self.weight.shape)
|
110 |
+
bias_shape[1] = -1
|
111 |
+
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
|
112 |
+
if self.bias is not None:
|
113 |
+
zero_bias = torch.zeros_like(self.bias)
|
114 |
+
else:
|
115 |
+
zero_bias = torch.zeros(self.out_features, device=scaled_weight.device)
|
116 |
+
linear_out = F.linear(input, scaled_weight, zero_bias)
|
117 |
+
linear_out_orig = linear_out / scale_factor.reshape(bias_shape)
|
118 |
+
if self.bias is not None:
|
119 |
+
linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape)
|
120 |
+
bn_out = self.bn(linear_out_orig)
|
121 |
+
return bn_out
|
122 |
+
|
123 |
+
def train(self, mode=True):
|
124 |
+
"""
|
125 |
+
Batchnorm's training behavior is using the self.training flag. Prevent
|
126 |
+
changing it if BN is frozen. This makes sure that calling `model.train()`
|
127 |
+
on a model with a frozen BN will behave properly.
|
128 |
+
"""
|
129 |
+
self.training = mode
|
130 |
+
if not self.freeze_bn:
|
131 |
+
for module in self.children():
|
132 |
+
module.train(mode)
|
133 |
+
return self
|
134 |
+
|
135 |
+
@classmethod
|
136 |
+
def from_float(cls, mod):
|
137 |
+
r"""Create a qat module from a float module or qparams_dict
|
138 |
+
|
139 |
+
Args: `mod' a float module, either produced by torch.ao.quantization
|
140 |
+
utilities or directly from user
|
141 |
+
"""
|
142 |
+
assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + \
|
143 |
+
'.from_float only works for ' + nni.LinearBn1d.__name__
|
144 |
+
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
|
145 |
+
assert mod.qconfig, 'Input float module must have a valid config'
|
146 |
+
qconfig = mod.qconfig
|
147 |
+
linear, bn = mod[0], mod[1]
|
148 |
+
qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None,
|
149 |
+
bn.eps, bn.momentum,
|
150 |
+
False, qconfig)
|
151 |
+
qat_linearbn.weight = linear.weight
|
152 |
+
qat_linearbn.bias = linear.bias
|
153 |
+
qat_linearbn.bn.weight = bn.weight
|
154 |
+
qat_linearbn.bn.bias = bn.bias
|
155 |
+
qat_linearbn.bn.running_mean = bn.running_mean
|
156 |
+
qat_linearbn.bn.running_var = bn.running_var
|
157 |
+
qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked
|
158 |
+
return qat_linearbn
|
159 |
+
|
160 |
+
def to_float(self):
|
161 |
+
linear = torch.nn.Linear(self.in_features, self.out_features)
|
162 |
+
assert self.bn.running_var is not None and self.bn.running_mean is not None
|
163 |
+
linear.weight, linear.bias = fuse_linear_bn_weights(
|
164 |
+
self.weight,
|
165 |
+
self.bias,
|
166 |
+
self.bn.running_mean,
|
167 |
+
self.bn.running_var,
|
168 |
+
self.bn.eps,
|
169 |
+
self.bn.weight,
|
170 |
+
self.bn.bias)
|
171 |
+
return linear
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.ao.nn.qat as nnqat
|
3 |
+
import torch.ao.nn.intrinsic as nni
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
class LinearReLU(nnqat.Linear, nni._FusedModule):
|
7 |
+
r"""
|
8 |
+
A LinearReLU module fused from Linear and ReLU modules, attached with
|
9 |
+
FakeQuantize modules for weight, used in
|
10 |
+
quantization aware training.
|
11 |
+
|
12 |
+
We adopt the same interface as :class:`torch.nn.Linear`.
|
13 |
+
|
14 |
+
Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
|
15 |
+
default.
|
16 |
+
|
17 |
+
Attributes:
|
18 |
+
weight: fake quant module for weight
|
19 |
+
|
20 |
+
Examples::
|
21 |
+
|
22 |
+
>>> # xdoctest: +SKIP
|
23 |
+
>>> m = nn.qat.LinearReLU(20, 30)
|
24 |
+
>>> input = torch.randn(128, 20)
|
25 |
+
>>> output = m(input)
|
26 |
+
>>> print(output.size())
|
27 |
+
torch.Size([128, 30])
|
28 |
+
"""
|
29 |
+
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
|
30 |
+
|
31 |
+
def __init__(self, in_features, out_features, bias=True,
|
32 |
+
qconfig=None):
|
33 |
+
super().__init__(in_features, out_features, bias, qconfig)
|
34 |
+
|
35 |
+
def forward(self, input):
|
36 |
+
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
def from_float(cls, mod):
|
40 |
+
return super().from_float(mod)
|
41 |
+
|
42 |
+
def to_float(self):
|
43 |
+
linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)
|
44 |
+
linear.weight = torch.nn.Parameter(self.weight.detach())
|
45 |
+
if self.bias is not None:
|
46 |
+
linear.bias = torch.nn.Parameter(self.bias.detach())
|
47 |
+
relu = torch.nn.ReLU()
|
48 |
+
return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
'BNReLU2d',
|
5 |
+
'BNReLU3d',
|
6 |
+
'ConvReLU1d',
|
7 |
+
'ConvReLU2d',
|
8 |
+
'ConvReLU3d',
|
9 |
+
'LinearReLU',
|
10 |
+
'LinearLeakyReLU',
|
11 |
+
'LinearTanh',
|
12 |
+
'ConvAdd2d',
|
13 |
+
'ConvAddReLU2d',
|
14 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (367 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (231 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .linear_relu import LinearReLU
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
'LinearReLU',
|
6 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (307 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (2.39 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.ao.nn.quantized.dynamic as nnqd
|
3 |
+
import torch.ao.nn.intrinsic as nni
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
"LinearReLU"
|
7 |
+
]
|
8 |
+
|
9 |
+
class LinearReLU(nnqd.Linear):
|
10 |
+
r"""
|
11 |
+
A LinearReLU module fused from Linear and ReLU modules that can be used
|
12 |
+
for dynamic quantization.
|
13 |
+
Supports both, FP16 and INT8 quantization.
|
14 |
+
|
15 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`.
|
16 |
+
|
17 |
+
Attributes:
|
18 |
+
Same as torch.ao.nn.quantized.dynamic.Linear
|
19 |
+
|
20 |
+
Examples::
|
21 |
+
|
22 |
+
>>> # xdoctest: +SKIP
|
23 |
+
>>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
|
24 |
+
>>> input = torch.randn(128, 20)
|
25 |
+
>>> output = m(input)
|
26 |
+
>>> print(output.size())
|
27 |
+
torch.Size([128, 30])
|
28 |
+
"""
|
29 |
+
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
|
30 |
+
|
31 |
+
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
|
32 |
+
super().__init__(in_features, out_features, bias, dtype)
|
33 |
+
|
34 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
35 |
+
if self._packed_params.dtype == torch.qint8:
|
36 |
+
# TODO check if we should set reduce_rage = True by default here
|
37 |
+
Y = torch.ops.quantized.linear_relu_dynamic(
|
38 |
+
x, self._packed_params._packed_params, reduce_range=True)
|
39 |
+
elif self._packed_params.dtype == torch.float16:
|
40 |
+
Y = torch.ops.quantized.linear_relu_dynamic_fp16(
|
41 |
+
x, self._packed_params._packed_params)
|
42 |
+
else:
|
43 |
+
raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!')
|
44 |
+
return Y.to(x.dtype)
|
45 |
+
|
46 |
+
def _get_name(self):
|
47 |
+
return 'DynamicQuantizedLinearReLU'
|
48 |
+
|
49 |
+
@classmethod
|
50 |
+
def from_float(cls, mod):
|
51 |
+
return super().from_float(mod)
|
52 |
+
|
53 |
+
@classmethod
|
54 |
+
def from_reference(cls, ref_qlinear_relu):
|
55 |
+
return super().from_reference(ref_qlinear_relu[0])
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .linear_relu import LinearReLU, LinearLeakyReLU, LinearTanh
|
2 |
+
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
|
3 |
+
from .bn_relu import BNReLU2d, BNReLU3d
|
4 |
+
from .conv_add import ConvAdd2d, ConvAddReLU2d
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
'LinearReLU',
|
8 |
+
'ConvReLU1d',
|
9 |
+
'ConvReLU2d',
|
10 |
+
'ConvReLU3d',
|
11 |
+
'BNReLU2d',
|
12 |
+
'BNReLU3d',
|
13 |
+
'LinearLeakyReLU',
|
14 |
+
'LinearTanh',
|
15 |
+
'ConvAdd2d',
|
16 |
+
'ConvAddReLU2d',
|
17 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (582 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc
ADDED
Binary file (3.07 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc
ADDED
Binary file (3.34 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc
ADDED
Binary file (5.59 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (6.36 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import torch.ao.nn.intrinsic
|
4 |
+
import torch.ao.nn.intrinsic.qat
|
5 |
+
import torch.ao.nn.quantized as nnq
|
6 |
+
|
7 |
+
__all__ = [
|
8 |
+
"BNReLU2d",
|
9 |
+
"BNReLU3d"
|
10 |
+
]
|
11 |
+
|
12 |
+
class BNReLU2d(nnq.BatchNorm2d):
|
13 |
+
r"""
|
14 |
+
A BNReLU2d module is a fused module of BatchNorm2d and ReLU
|
15 |
+
|
16 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm2d`.
|
17 |
+
|
18 |
+
Attributes:
|
19 |
+
Same as torch.ao.nn.quantized.BatchNorm2d
|
20 |
+
|
21 |
+
"""
|
22 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d
|
23 |
+
|
24 |
+
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
|
25 |
+
super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
|
26 |
+
|
27 |
+
def forward(self, input):
|
28 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
29 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
30 |
+
if len(input.shape) != 4:
|
31 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
32 |
+
return torch.ops.quantized.batch_norm2d_relu(
|
33 |
+
input, self.weight, self.bias, self.running_mean,
|
34 |
+
self.running_var, self.eps, self.scale, self.zero_point)
|
35 |
+
|
36 |
+
def _get_name(self):
|
37 |
+
return 'QuantizedBNReLU2d'
|
38 |
+
|
39 |
+
@classmethod
|
40 |
+
def from_float(cls, mod):
|
41 |
+
# TODO: Add qat support for BNReLU2d
|
42 |
+
return super().from_float(mod)
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def from_reference(cls, bn_relu, output_scale, output_zero_point):
|
46 |
+
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
|
47 |
+
|
48 |
+
class BNReLU3d(nnq.BatchNorm3d):
|
49 |
+
r"""
|
50 |
+
A BNReLU3d module is a fused module of BatchNorm3d and ReLU
|
51 |
+
|
52 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm3d`.
|
53 |
+
|
54 |
+
Attributes:
|
55 |
+
Same as torch.ao.nn.quantized.BatchNorm3d
|
56 |
+
|
57 |
+
"""
|
58 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d
|
59 |
+
|
60 |
+
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
|
61 |
+
super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
|
62 |
+
|
63 |
+
def forward(self, input):
|
64 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
65 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
66 |
+
if len(input.shape) != 5:
|
67 |
+
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
|
68 |
+
return torch.ops.quantized.batch_norm3d_relu(
|
69 |
+
input, self.weight, self.bias, self.running_mean,
|
70 |
+
self.running_var, self.eps, self.scale, self.zero_point)
|
71 |
+
|
72 |
+
def _get_name(self):
|
73 |
+
return 'QuantizedBNReLU3d'
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def from_float(cls, mod):
|
77 |
+
# TODO: Add qat support for BNReLU3d
|
78 |
+
return super().from_float(mod)
|
79 |
+
|
80 |
+
@classmethod
|
81 |
+
def from_reference(cls, bn_relu, output_scale, output_zero_point):
|
82 |
+
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.ao.nn.intrinsic
|
3 |
+
import torch.ao.nn.intrinsic.qat
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.ao.nn.quantized as nnq
|
6 |
+
|
7 |
+
_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
|
8 |
+
|
9 |
+
class ConvAdd2d(nnq.Conv2d):
|
10 |
+
r"""
|
11 |
+
A ConvAdd2d module is a fused module of Conv2d and Add
|
12 |
+
|
13 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
|
14 |
+
|
15 |
+
Attributes:
|
16 |
+
Same as torch.ao.nn.quantized.Conv2d
|
17 |
+
|
18 |
+
"""
|
19 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAdd2d # type: ignore[assignment]
|
20 |
+
|
21 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
22 |
+
padding=0, dilation=1, groups=1, bias=True,
|
23 |
+
padding_mode='zeros', device=None, dtype=None):
|
24 |
+
super().__init__(
|
25 |
+
in_channels, out_channels, kernel_size, stride=stride,
|
26 |
+
padding=padding, dilation=dilation, groups=groups, bias=bias,
|
27 |
+
padding_mode=padding_mode, device=device, dtype=dtype)
|
28 |
+
|
29 |
+
def forward(self, input, extra_input):
|
30 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
31 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
32 |
+
if len(input.shape) != 4:
|
33 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
34 |
+
if self.padding_mode != 'zeros':
|
35 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
36 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
37 |
+
mode=self.padding_mode)
|
38 |
+
return torch.ops.quantized.conv2d_add(
|
39 |
+
input, extra_input, self._packed_params, self.scale, self.zero_point)
|
40 |
+
|
41 |
+
def _get_name(self):
|
42 |
+
return 'QuantizedConvAdd2d'
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def from_float(cls, mod):
|
46 |
+
return super().from_float(mod)
|
47 |
+
|
48 |
+
@classmethod
|
49 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
50 |
+
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
51 |
+
|
52 |
+
class ConvAddReLU2d(nnq.Conv2d):
|
53 |
+
r"""
|
54 |
+
A ConvAddReLU2d module is a fused module of Conv2d, Add and Relu
|
55 |
+
|
56 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
|
57 |
+
|
58 |
+
Attributes:
|
59 |
+
Same as torch.ao.nn.quantized.Conv2d
|
60 |
+
|
61 |
+
"""
|
62 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAddReLU2d # type: ignore[assignment]
|
63 |
+
|
64 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
65 |
+
padding=0, dilation=1, groups=1, bias=True,
|
66 |
+
padding_mode='zeros', device=None, dtype=None):
|
67 |
+
super().__init__(
|
68 |
+
in_channels, out_channels, kernel_size, stride=stride,
|
69 |
+
padding=padding, dilation=dilation, groups=groups, bias=bias,
|
70 |
+
padding_mode=padding_mode, device=device, dtype=dtype)
|
71 |
+
|
72 |
+
def forward(self, input, extra_input):
|
73 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
74 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
75 |
+
if len(input.shape) != 4:
|
76 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
77 |
+
if self.padding_mode != 'zeros':
|
78 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
79 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
80 |
+
mode=self.padding_mode)
|
81 |
+
return torch.ops.quantized.conv2d_add_relu(
|
82 |
+
input, extra_input, self._packed_params, self.scale, self.zero_point)
|
83 |
+
|
84 |
+
def _get_name(self):
|
85 |
+
return 'QuantizedConvAddReLU2d'
|
86 |
+
|
87 |
+
@classmethod
|
88 |
+
def from_float(cls, mod):
|
89 |
+
return super().from_float(mod)
|
90 |
+
|
91 |
+
@classmethod
|
92 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
93 |
+
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import torch.ao.nn.intrinsic
|
4 |
+
import torch.ao.nn.intrinsic.qat
|
5 |
+
import torch.nn.functional as F
|
6 |
+
import torch.ao.nn.quantized as nnq
|
7 |
+
|
8 |
+
from torch.nn.utils import fuse_conv_bn_weights
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
"ConvReLU1d",
|
12 |
+
"ConvReLU2d",
|
13 |
+
"ConvReLU3d",
|
14 |
+
]
|
15 |
+
|
16 |
+
_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
|
17 |
+
|
18 |
+
# TODO: factor out the common parts to ConvNd
|
19 |
+
class ConvReLU1d(nnq.Conv1d):
|
20 |
+
r"""
|
21 |
+
A ConvReLU1d module is a fused module of Conv1d and ReLU
|
22 |
+
|
23 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv1d`.
|
24 |
+
|
25 |
+
Attributes:
|
26 |
+
Same as torch.ao.nn.quantized.Conv1d
|
27 |
+
|
28 |
+
"""
|
29 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
|
30 |
+
|
31 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
32 |
+
padding=0, dilation=1, groups=1, bias=True,
|
33 |
+
padding_mode='zeros', device=None, dtype=None):
|
34 |
+
super().__init__(
|
35 |
+
in_channels, out_channels, kernel_size, stride=stride,
|
36 |
+
padding=padding, dilation=dilation, groups=groups, bias=bias,
|
37 |
+
padding_mode=padding_mode, device=device, dtype=dtype)
|
38 |
+
|
39 |
+
def forward(self, input):
|
40 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
41 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
42 |
+
if len(input.shape) != 3:
|
43 |
+
raise ValueError("Input shape must be `(N, C, L)`!")
|
44 |
+
if self.padding_mode != 'zeros':
|
45 |
+
# Padding in Conv1d is stored as (p, p), need to get (p,)
|
46 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
|
47 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
48 |
+
mode=self.padding_mode)
|
49 |
+
return torch.ops.quantized.conv1d_relu(
|
50 |
+
input, self._packed_params, self.scale, self.zero_point)
|
51 |
+
|
52 |
+
def _get_name(self):
|
53 |
+
return 'QuantizedConvReLU1d'
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
def from_float(cls, mod):
|
57 |
+
if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU1d:
|
58 |
+
assert mod.bn.running_var is not None and mod.bn.running_mean is not None
|
59 |
+
mod.weight, mod.bias = fuse_conv_bn_weights(
|
60 |
+
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
|
61 |
+
mod.bn.eps, mod.bn.weight, mod.bn.bias)
|
62 |
+
return super().from_float(mod)
|
63 |
+
|
64 |
+
@classmethod
|
65 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
66 |
+
assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU1d, \
|
67 |
+
"BatchNorm1d should be fused into Conv1d before converting to reference module"
|
68 |
+
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
69 |
+
|
70 |
+
class ConvReLU2d(nnq.Conv2d):
|
71 |
+
r"""
|
72 |
+
A ConvReLU2d module is a fused module of Conv2d and ReLU
|
73 |
+
|
74 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
|
75 |
+
|
76 |
+
Attributes:
|
77 |
+
Same as torch.ao.nn.quantized.Conv2d
|
78 |
+
|
79 |
+
"""
|
80 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
|
81 |
+
|
82 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
83 |
+
padding=0, dilation=1, groups=1, bias=True,
|
84 |
+
padding_mode='zeros', device=None, dtype=None):
|
85 |
+
super().__init__(
|
86 |
+
in_channels, out_channels, kernel_size, stride=stride,
|
87 |
+
padding=padding, dilation=dilation, groups=groups, bias=bias,
|
88 |
+
padding_mode=padding_mode, device=device, dtype=dtype)
|
89 |
+
|
90 |
+
def forward(self, input):
|
91 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
92 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
93 |
+
if len(input.shape) != 4:
|
94 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
95 |
+
if self.padding_mode != 'zeros':
|
96 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
97 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
98 |
+
mode=self.padding_mode)
|
99 |
+
return torch.ops.quantized.conv2d_relu(
|
100 |
+
input, self._packed_params, self.scale, self.zero_point)
|
101 |
+
|
102 |
+
def _get_name(self):
|
103 |
+
return 'QuantizedConvReLU2d'
|
104 |
+
|
105 |
+
@classmethod
|
106 |
+
def from_float(cls, mod):
|
107 |
+
if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU2d:
|
108 |
+
assert mod.bn.running_var is not None and mod.bn.running_mean is not None
|
109 |
+
mod.weight, mod.bias = fuse_conv_bn_weights(
|
110 |
+
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
|
111 |
+
mod.bn.eps, mod.bn.weight, mod.bn.bias)
|
112 |
+
return super().from_float(mod)
|
113 |
+
|
114 |
+
@classmethod
|
115 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
116 |
+
assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU2d, \
|
117 |
+
"BatchNorm2d should be fused into Conv2d before converting to reference module"
|
118 |
+
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
119 |
+
|
120 |
+
|
121 |
+
class ConvReLU3d(nnq.Conv3d):
|
122 |
+
r"""
|
123 |
+
A ConvReLU3d module is a fused module of Conv3d and ReLU
|
124 |
+
|
125 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv3d`.
|
126 |
+
|
127 |
+
Attributes: Same as torch.ao.nn.quantized.Conv3d
|
128 |
+
|
129 |
+
"""
|
130 |
+
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
|
131 |
+
|
132 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
133 |
+
padding=0, dilation=1, groups=1, bias=True,
|
134 |
+
padding_mode='zeros', device=None, dtype=None):
|
135 |
+
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
|
136 |
+
super().__init__(
|
137 |
+
in_channels, out_channels, kernel_size, stride=stride,
|
138 |
+
padding=padding, dilation=dilation, groups=groups, bias=bias,
|
139 |
+
padding_mode=padding_mode, device=device, dtype=dtype)
|
140 |
+
|
141 |
+
def forward(self, input):
|
142 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
143 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
144 |
+
if len(input.shape) != 5:
|
145 |
+
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
|
146 |
+
if self.padding_mode != 'zeros':
|
147 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
148 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
149 |
+
mode=self.padding_mode)
|
150 |
+
return torch.ops.quantized.conv3d_relu(
|
151 |
+
input, self._packed_params, self.scale, self.zero_point)
|
152 |
+
|
153 |
+
def _get_name(self):
|
154 |
+
return 'QuantizedConvReLU3d'
|
155 |
+
|
156 |
+
@classmethod
|
157 |
+
def from_float(cls, mod):
|
158 |
+
if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU3d:
|
159 |
+
assert mod.bn.running_var is not None and mod.bn.running_mean is not None
|
160 |
+
mod.weight, mod.bias = fuse_conv_bn_weights(
|
161 |
+
mod.weight,
|
162 |
+
mod.bias,
|
163 |
+
mod.bn.running_mean,
|
164 |
+
mod.bn.running_var,
|
165 |
+
mod.bn.eps,
|
166 |
+
mod.bn.weight,
|
167 |
+
mod.bn.bias,
|
168 |
+
)
|
169 |
+
return super().from_float(mod)
|
170 |
+
|
171 |
+
@classmethod
|
172 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
173 |
+
assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU3d, \
|
174 |
+
"BatchNorm3d should be fused into Conv3d before converting to reference module"
|
175 |
+
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.ao.nn.quantized as nnq
|
3 |
+
import torch.ao.nn.intrinsic as nni
|
4 |
+
from torch.ao.nn.quantized.modules.utils import _quantize_weight
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
"LinearReLU",
|
8 |
+
"LinearLeakyReLU",
|
9 |
+
"LinearTanh",
|
10 |
+
]
|
11 |
+
|
12 |
+
class LinearReLU(nnq.Linear):
|
13 |
+
r"""
|
14 |
+
A LinearReLU module fused from Linear and ReLU modules
|
15 |
+
|
16 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
|
17 |
+
|
18 |
+
Attributes:
|
19 |
+
Same as torch.ao.nn.quantized.Linear
|
20 |
+
|
21 |
+
Examples::
|
22 |
+
|
23 |
+
>>> # xdoctest: +SKIP
|
24 |
+
>>> m = nn.intrinsic.LinearReLU(20, 30)
|
25 |
+
>>> input = torch.randn(128, 20)
|
26 |
+
>>> output = m(input)
|
27 |
+
>>> print(output.size())
|
28 |
+
torch.Size([128, 30])
|
29 |
+
"""
|
30 |
+
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
|
31 |
+
|
32 |
+
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
|
33 |
+
super().__init__(in_features, out_features, bias, dtype)
|
34 |
+
|
35 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
36 |
+
return torch.ops.quantized.linear_relu(
|
37 |
+
x, self._packed_params._packed_params, self.scale, self.zero_point)
|
38 |
+
|
39 |
+
def _get_name(self):
|
40 |
+
return 'QuantizedLinearReLU'
|
41 |
+
|
42 |
+
@classmethod
|
43 |
+
def from_float(cls, mod):
|
44 |
+
return super().from_float(mod)
|
45 |
+
|
46 |
+
@classmethod
|
47 |
+
def from_reference(cls, ref_linear_relu, output_scale, output_zero_point):
|
48 |
+
return super().from_reference(ref_linear_relu[0], output_scale, output_zero_point)
|
49 |
+
|
50 |
+
class LinearLeakyReLU(nnq.Linear):
|
51 |
+
r"""
|
52 |
+
For onednn backend only
|
53 |
+
A LinearLeakyReLU module fused from Linear and LeakyReLU modules
|
54 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
|
55 |
+
Attributes:
|
56 |
+
Same as torch.ao.nn.quantized.Linear
|
57 |
+
+ negative_slope
|
58 |
+
Examples::
|
59 |
+
>>> # xdoctest: +SKIP
|
60 |
+
>>> m = nn.intrinsic.LinearLeakyReLU(20, 30, 0.01)
|
61 |
+
>>> input = torch.randn(128, 20)
|
62 |
+
>>> output = m(input)
|
63 |
+
>>> print(output.size())
|
64 |
+
torch.Size([128, 30])
|
65 |
+
"""
|
66 |
+
_FLOAT_MODULE = nni.LinearLeakyReLU # type: ignore[assignment]
|
67 |
+
|
68 |
+
def __init__(self, in_features, out_features, negative_slope, bias=True, dtype=torch.qint8):
|
69 |
+
super().__init__(in_features, out_features, bias, dtype)
|
70 |
+
self.negative_slope = negative_slope
|
71 |
+
|
72 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
73 |
+
return torch.ops.quantized.linear_leaky_relu(
|
74 |
+
x, self._packed_params._packed_params, self.scale, self.zero_point, self.negative_slope)
|
75 |
+
|
76 |
+
def _get_name(self):
|
77 |
+
return 'QuantizedLinearLeakyReLU'
|
78 |
+
|
79 |
+
@classmethod
|
80 |
+
def from_float(cls, mod):
|
81 |
+
assert type(mod) == nni.LinearLeakyReLU, 'Input float module should be LinearLeakyReLU'
|
82 |
+
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
|
83 |
+
activation_post_process = mod.activation_post_process
|
84 |
+
leaky_relu = mod[1]
|
85 |
+
mod = mod[0]
|
86 |
+
weight_post_process = mod.qconfig.weight()
|
87 |
+
weight_post_process(mod.weight)
|
88 |
+
dtype = weight_post_process.dtype
|
89 |
+
act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
|
90 |
+
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
|
91 |
+
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
|
92 |
+
qlinear_leaky_relu = cls(
|
93 |
+
mod.in_features,
|
94 |
+
mod.out_features,
|
95 |
+
leaky_relu.negative_slope,
|
96 |
+
dtype=dtype)
|
97 |
+
qlinear_leaky_relu.set_weight_bias(qweight, mod.bias)
|
98 |
+
qlinear_leaky_relu.scale = float(act_scale)
|
99 |
+
qlinear_leaky_relu.zero_point = int(act_zp)
|
100 |
+
return qlinear_leaky_relu
|
101 |
+
|
102 |
+
@classmethod
|
103 |
+
def from_reference(cls, ref_mod, output_scale, output_zero_point):
|
104 |
+
linear = ref_mod[0]
|
105 |
+
leaky_relu = ref_mod[1]
|
106 |
+
qlinear_leaky_relu = cls(
|
107 |
+
linear.in_features,
|
108 |
+
linear.out_features,
|
109 |
+
leaky_relu.negative_slope)
|
110 |
+
qweight = linear.get_quantized_weight()
|
111 |
+
qlinear_leaky_relu.set_weight_bias(qweight, linear.bias)
|
112 |
+
qlinear_leaky_relu.scale = float(output_scale)
|
113 |
+
qlinear_leaky_relu.zero_point = int(output_zero_point)
|
114 |
+
return qlinear_leaky_relu
|
115 |
+
|
116 |
+
class LinearTanh(nnq.Linear):
|
117 |
+
r"""
|
118 |
+
A LinearTanh module fused from Linear and Tanh modules
|
119 |
+
|
120 |
+
We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
|
121 |
+
|
122 |
+
Attributes:
|
123 |
+
Same as torch.ao.nn.quantized.Linear
|
124 |
+
|
125 |
+
Examples::
|
126 |
+
|
127 |
+
>>> # xdoctest: +SKIP
|
128 |
+
>>> m = nn.intrinsic.LinearTanh(20, 30)
|
129 |
+
>>> input = torch.randn(128, 20)
|
130 |
+
>>> output = m(input)
|
131 |
+
>>> print(output.size())
|
132 |
+
torch.Size([128, 30])
|
133 |
+
"""
|
134 |
+
_FLOAT_MODULE = nni.LinearTanh # type: ignore[assignment]
|
135 |
+
|
136 |
+
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
|
137 |
+
super().__init__(in_features, out_features, bias, dtype)
|
138 |
+
|
139 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
140 |
+
return torch.ops.quantized.linear_tanh(
|
141 |
+
x, self._packed_params._packed_params, self.scale, self.zero_point)
|
142 |
+
|
143 |
+
def _get_name(self):
|
144 |
+
return 'QuantizedLinearTanh'
|
145 |
+
|
146 |
+
@classmethod
|
147 |
+
def from_float(cls, mod):
|
148 |
+
assert type(mod) == nni.LinearTanh, 'Input float module should be LinearTanh'
|
149 |
+
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
|
150 |
+
activation_post_process = mod.activation_post_process
|
151 |
+
mod = mod[0]
|
152 |
+
weight_post_process = mod.qconfig.weight()
|
153 |
+
weight_post_process(mod.weight)
|
154 |
+
dtype = weight_post_process.dtype
|
155 |
+
act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
|
156 |
+
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
|
157 |
+
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
|
158 |
+
qlinear_tanh = cls(
|
159 |
+
mod.in_features,
|
160 |
+
mod.out_features,
|
161 |
+
dtype=dtype)
|
162 |
+
qlinear_tanh.set_weight_bias(qweight, mod.bias)
|
163 |
+
qlinear_tanh.scale = float(act_scale)
|
164 |
+
qlinear_tanh.zero_point = int(act_zp)
|
165 |
+
return qlinear_tanh
|
166 |
+
|
167 |
+
@classmethod
|
168 |
+
def from_reference(cls, ref_mod, output_scale, output_zero_point):
|
169 |
+
linear = ref_mod[0]
|
170 |
+
qlinear_tanh = cls(
|
171 |
+
linear.in_features,
|
172 |
+
linear.out_features)
|
173 |
+
qweight = linear.get_quantized_weight()
|
174 |
+
qlinear_tanh.set_weight_bias(qweight, linear.bias)
|
175 |
+
qlinear_tanh.scale = float(output_scale)
|
176 |
+
qlinear_tanh.zero_point = int(output_zero_point)
|
177 |
+
return qlinear_tanh
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (221 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from .linear import Linear
|
3 |
+
from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
|
4 |
+
from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
'Linear',
|
8 |
+
'LSTM',
|
9 |
+
'GRU',
|
10 |
+
'LSTMCell',
|
11 |
+
'RNNCell',
|
12 |
+
'GRUCell',
|
13 |
+
'Conv1d',
|
14 |
+
'Conv2d',
|
15 |
+
'Conv3d',
|
16 |
+
'ConvTranspose1d',
|
17 |
+
'ConvTranspose2d',
|
18 |
+
'ConvTranspose3d',
|
19 |
+
]
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (571 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc
ADDED
Binary file (14 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc
ADDED
Binary file (5.22 kB). View file
|
|