diff --git a/ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d3d93cd37247a55ea3d0e9aa387e1e346cfb9715 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd6688987b67a17f3b22fbe54d21537ec5956b9fbcf000f0dcb3b443928a62ec +size 50332749 diff --git a/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1f4c484e74dc6dcdf9f773338a100e88d8062bba --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0cf9ec8cb077965574f8a0c74a776c4e7dfad04731e3775cb96a763422e4ffc +size 50332828 diff --git a/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..af75edb9acb3cc9099f6be98b5c5f2607d01d91f --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af4d7613c97ad363690eda7b2c856485ae16911360834eba6761ee4b8eeb11fa +size 50332843 diff --git a/venv/lib/python3.10/site-packages/aiosignal/__init__.pyi b/venv/lib/python3.10/site-packages/aiosignal/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d4e3416d72246058259061578a82697e2bc0706e --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiosignal/__init__.pyi @@ -0,0 +1,12 @@ +from typing import Any, Generic, TypeVar + +from frozenlist import FrozenList + +__all__ = ("Signal",) + +_T = TypeVar("_T") + +class Signal(FrozenList[_T], Generic[_T]): + def __init__(self, owner: Any) -> None: ... + def __repr__(self) -> str: ... + async def send(self, *args: Any, **kwargs: Any) -> None: ... diff --git a/venv/lib/python3.10/site-packages/aiosignal/py.typed b/venv/lib/python3.10/site-packages/aiosignal/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/async_timeout/__init__.py b/venv/lib/python3.10/site-packages/async_timeout/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ffb069fce9b2b9a03515404155a7e5cc439484a --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout/__init__.py @@ -0,0 +1,239 @@ +import asyncio +import enum +import sys +import warnings +from types import TracebackType +from typing import Optional, Type + + +if sys.version_info >= (3, 8): + from typing import final +else: + from typing_extensions import final + + +if sys.version_info >= (3, 11): + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + task.uncancel() + +else: + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + pass + + +__version__ = "4.0.3" + + +__all__ = ("timeout", "timeout_at", "Timeout") + + +def timeout(delay: Optional[float]) -> "Timeout": + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> async with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + delay - value in seconds or None to disable timeout logic + """ + loop = asyncio.get_running_loop() + if delay is not None: + deadline = loop.time() + delay # type: Optional[float] + else: + deadline = None + return Timeout(deadline, loop) + + +def timeout_at(deadline: Optional[float]) -> "Timeout": + """Schedule the timeout at absolute time. + + deadline argument points on the time in the same clock system + as loop.time(). + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + + >>> async with timeout_at(loop.time() + 10): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + """ + loop = asyncio.get_running_loop() + return Timeout(deadline, loop) + + +class _State(enum.Enum): + INIT = "INIT" + ENTER = "ENTER" + TIMEOUT = "TIMEOUT" + EXIT = "EXIT" + + +@final +class Timeout: + # Internal class, please don't instantiate it directly + # Use timeout() and timeout_at() public factories instead. + # + # Implementation note: `async with timeout()` is preferred + # over `with timeout()`. + # While technically the Timeout class implementation + # doesn't need to be async at all, + # the `async with` statement explicitly points that + # the context manager should be used from async function context. + # + # This design allows to avoid many silly misusages. + # + # TimeoutError is raised immediately when scheduled + # if the deadline is passed. + # The purpose is to time out as soon as possible + # without waiting for the next await expression. + + __slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task") + + def __init__( + self, deadline: Optional[float], loop: asyncio.AbstractEventLoop + ) -> None: + self._loop = loop + self._state = _State.INIT + + self._task: Optional["asyncio.Task[object]"] = None + self._timeout_handler = None # type: Optional[asyncio.Handle] + if deadline is None: + self._deadline = None # type: Optional[float] + else: + self.update(deadline) + + def __enter__(self) -> "Timeout": + warnings.warn( + "with timeout() is deprecated, use async with timeout() instead", + DeprecationWarning, + stacklevel=2, + ) + self._do_enter() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + async def __aenter__(self) -> "Timeout": + self._do_enter() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + @property + def expired(self) -> bool: + """Is timeout expired during execution?""" + return self._state == _State.TIMEOUT + + @property + def deadline(self) -> Optional[float]: + return self._deadline + + def reject(self) -> None: + """Reject scheduled timeout if any.""" + # cancel is maybe better name but + # task.cancel() raises CancelledError in asyncio world. + if self._state not in (_State.INIT, _State.ENTER): + raise RuntimeError(f"invalid state {self._state.value}") + self._reject() + + def _reject(self) -> None: + self._task = None + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._timeout_handler = None + + def shift(self, delay: float) -> None: + """Advance timeout on delay seconds. + + The delay can be negative. + + Raise RuntimeError if shift is called when deadline is not scheduled + """ + deadline = self._deadline + if deadline is None: + raise RuntimeError("cannot shift timeout if deadline is not scheduled") + self.update(deadline + delay) + + def update(self, deadline: float) -> None: + """Set deadline to absolute value. + + deadline argument points on the time in the same clock system + as loop.time(). + + If new deadline is in the past the timeout is raised immediately. + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + """ + if self._state == _State.EXIT: + raise RuntimeError("cannot reschedule after exit from context manager") + if self._state == _State.TIMEOUT: + raise RuntimeError("cannot reschedule expired timeout") + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._deadline = deadline + if self._state != _State.INIT: + self._reschedule() + + def _reschedule(self) -> None: + assert self._state == _State.ENTER + deadline = self._deadline + if deadline is None: + return + + now = self._loop.time() + if self._timeout_handler is not None: + self._timeout_handler.cancel() + + self._task = asyncio.current_task() + if deadline <= now: + self._timeout_handler = self._loop.call_soon(self._on_timeout) + else: + self._timeout_handler = self._loop.call_at(deadline, self._on_timeout) + + def _do_enter(self) -> None: + if self._state != _State.INIT: + raise RuntimeError(f"invalid state {self._state.value}") + self._state = _State.ENTER + self._reschedule() + + def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None: + if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT: + assert self._task is not None + _uncancel_task(self._task) + self._timeout_handler = None + self._task = None + raise asyncio.TimeoutError + # timeout has not expired + self._state = _State.EXIT + self._reject() + return None + + def _on_timeout(self) -> None: + assert self._task is not None + self._task.cancel() + self._state = _State.TIMEOUT + # drop the reference early + self._timeout_handler = None diff --git a/venv/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b71282628fe83dc702c9a8075f355a126b52b720 Binary files /dev/null and b/venv/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/async_timeout/py.typed b/venv/lib/python3.10/site-packages/async_timeout/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..3b94f915737aba1f12a0f067fdba3726bfe02df5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout/py.typed @@ -0,0 +1 @@ +Placeholder diff --git a/venv/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c9c36c9dd539861eb5d433c69a51115c622d508 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e131b0f4292c3256a5a9c10db29da3793570b4f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a18bae3eaa38266c1a73468f3a7619ced076f461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py @@ -0,0 +1,36 @@ +from .modules import * # noqa: F403 +from .modules.fused import _FusedModule # noqa: F403 + +# # Subpackages +# from . import qat # noqa: F403 +# from . import quantized # noqa: F403 + +__all__ = [ + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearReLU', + 'BNReLU2d', + 'BNReLU3d', + 'LinearBn1d', + 'LinearLeakyReLU', + 'LinearTanh', + 'ConvAdd2d', + 'ConvAddReLU2d', +] + +# We are exposing all subpackages to the end-user. +# Because of possible inter-dependency, we want to avoid +# the cyclic imports, thus implementing lazy version +# as per https://peps.python.org/pep-0562/ +def __getattr__(name): + if name in __all__: + import importlib + return importlib.import_module("." + name, __name__) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ca9040b6e57bb528983c217329014853a3b755a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afc6c63f5f0c8a0ced7c5f004975ee3cabad91ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py @@ -0,0 +1,38 @@ +from .fused import _FusedModule # noqa: F401 +from .fused import ConvBn1d +from .fused import ConvBn2d +from .fused import ConvBn3d +from .fused import ConvBnReLU1d +from .fused import ConvBnReLU2d +from .fused import ConvBnReLU3d +from .fused import ConvReLU1d +from .fused import ConvReLU2d +from .fused import ConvReLU3d +from .fused import LinearReLU +from .fused import BNReLU2d +from .fused import BNReLU3d +from .fused import LinearBn1d +from .fused import LinearLeakyReLU +from .fused import LinearTanh +from .fused import ConvAdd2d +from .fused import ConvAddReLU2d + +__all__ = [ + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearReLU', + 'BNReLU2d', + 'BNReLU3d', + 'LinearBn1d', + 'LinearLeakyReLU', + 'LinearTanh', + 'ConvAdd2d', + 'ConvAddReLU2d', +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cb47964f41629b19fe7129da886075e99393dd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb903a4f8b59f2a8fc84ffb1bbc24994e94ae45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py new file mode 100644 index 0000000000000000000000000000000000000000..2b4c6f489e99aa3cf1b31c340bea4a03f589d4ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py @@ -0,0 +1,160 @@ +import torch +from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d +from torch.nn.utils.parametrize import type_before_parametrizations + +__all__ = ['ConvReLU1d', 'ConvReLU2d', 'ConvReLU3d', 'LinearReLU', 'ConvBn1d', 'ConvBn2d', + 'ConvBnReLU1d', 'ConvBnReLU2d', 'ConvBn3d', 'ConvBnReLU3d', 'BNReLU2d', 'BNReLU3d', + 'LinearBn1d', 'LinearLeakyReLU', 'LinearTanh', 'ConvAdd2d', 'ConvAddReLU2d'] + +# Used for identifying intrinsic modules used in quantization +class _FusedModule(torch.nn.Sequential): + pass + +class ConvReLU1d(_FusedModule): + r"""This is a sequential container which calls the Conv1d and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, relu): + assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(relu) == ReLU, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}' + super().__init__(conv, relu) + +class ConvReLU2d(_FusedModule): + r"""This is a sequential container which calls the Conv2d and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, relu): + assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(relu) == ReLU, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}' + super().__init__(conv, relu) + +class ConvReLU3d(_FusedModule): + r"""This is a sequential container which calls the Conv3d and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, relu): + assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(relu) == ReLU, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}' + super().__init__(conv, relu) + +class LinearReLU(_FusedModule): + r"""This is a sequential container which calls the Linear and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, linear, relu): + assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(relu) == ReLU, \ + 'Incorrect types for input modules{}{}'.format( + type_before_parametrizations(linear), type_before_parametrizations(relu)) + super().__init__(linear, relu) + +class ConvBn1d(_FusedModule): + r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn): + assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}' + super().__init__(conv, bn) + +class ConvBn2d(_FusedModule): + r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn): + assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}' + super().__init__(conv, bn) + +class ConvBnReLU1d(_FusedModule): + r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn, relu): + assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d and \ + type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \ + .format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu)) + super().__init__(conv, bn, relu) + +class ConvBnReLU2d(_FusedModule): + r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn, relu): + assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d and \ + type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \ + .format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu)) + super().__init__(conv, bn, relu) + +class ConvBn3d(_FusedModule): + r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn): + assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, \ + f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}' + super().__init__(conv, bn) + +class ConvBnReLU3d(_FusedModule): + r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, bn, relu): + assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d and \ + type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \ + .format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu)) + super().__init__(conv, bn, relu) + + +class BNReLU2d(_FusedModule): + r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, batch_norm, relu): + assert type_before_parametrizations(batch_norm) == BatchNorm2d and type_before_parametrizations(relu) == ReLU, \ + 'Incorrect types for input modules{}{}'.format( + type_before_parametrizations(batch_norm), type_before_parametrizations(relu)) + super().__init__(batch_norm, relu) + +class BNReLU3d(_FusedModule): + r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, batch_norm, relu): + assert type_before_parametrizations(batch_norm) == BatchNorm3d and type_before_parametrizations(relu) == ReLU, \ + 'Incorrect types for input modules{}{}'.format( + type_before_parametrizations(batch_norm), type_before_parametrizations(relu)) + super().__init__(batch_norm, relu) + + +class LinearBn1d(_FusedModule): + r"""This is a sequential container which calls the Linear and BatchNorm1d modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, linear, bn): + assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(bn) == BatchNorm1d, \ + f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(bn)}' + super().__init__(linear, bn) + +class LinearLeakyReLU(_FusedModule): + r"""This is a sequential container which calls the Linear and LeakyReLU modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, linear, leaky_relu): + assert type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU, \ + f'Incorrect types for input modules{type(linear)}{type(leaky_relu)}' + super().__init__(linear, leaky_relu) + +class LinearTanh(_FusedModule): + r"""This is a sequential container which calls the Linear and Tanh modules. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, linear, tanh): + assert type(linear) == Linear and type(tanh) == torch.nn.Tanh, \ + f'Incorrect types for input modules{type(linear)}{type(tanh)}' + super().__init__(linear, tanh) + +class ConvAdd2d(_FusedModule): + r"""This is a sequential container which calls the Conv2d modules with extra Add. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, add): + super().__init__(conv) + self.add = add + + def forward(self, x1, x2): + return self.add(self[0](x1), x2) + +class ConvAddReLU2d(_FusedModule): + r"""This is a sequential container which calls the Conv2d, add, Relu. + During quantization this will be replaced with the corresponding fused module.""" + def __init__(self, conv, add, relu): + super().__init__(conv) + self.add = add + self.relu = relu + + def forward(self, x1, x2): + return self.relu(self.add(self[0](x1), x2)) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..793c09c7e77864a987cd883eed0cc6658edbe957 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f44820c637e86c82c0f5d02919fa1c66803f21ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py @@ -0,0 +1,31 @@ +from .linear_relu import LinearReLU +from .linear_fused import LinearBn1d +from .conv_fused import ( + ConvBn1d, + ConvBn2d, + ConvBn3d, + ConvBnReLU1d, + ConvBnReLU2d, + ConvBnReLU3d, + ConvReLU1d, + ConvReLU2d, + ConvReLU3d, + update_bn_stats, + freeze_bn_stats, +) + +__all__ = [ + "LinearReLU", + "LinearBn1d", + "ConvReLU1d", + "ConvReLU2d", + "ConvReLU3d", + "ConvBn1d", + "ConvBn2d", + "ConvBn3d", + "ConvBnReLU1d", + "ConvBnReLU2d", + "ConvBnReLU3d", + "update_bn_stats", + "freeze_bn_stats", +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac98314027974a977448502553563bf3df67b3d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0acdd3fd55f7e351366f0eb53f397a993c1e5cdb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9af1644b9ad1c949f16c7869908e8ddba6175b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..591ffba330c009472cd4b6b6c63749ca4f21524b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py new file mode 100644 index 0000000000000000000000000000000000000000..906206e18e64fb8d7e3af60bfd72068fbce79e54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py @@ -0,0 +1,825 @@ +import math +import torch +import torch.nn as nn +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.qat as nnqat +import torch.nn.functional as F +from torch.nn import init +from torch.nn.utils import fuse_conv_bn_weights +from torch.nn.modules.utils import _single, _pair, _triple +from torch.nn.parameter import Parameter +from typing import TypeVar + +__all__ = ['ConvBn1d', 'ConvBnReLU1d', 'ConvReLU1d', 'ConvBn2d', 'ConvBnReLU2d', 'ConvReLU2d', 'ConvBn3d', + 'ConvBnReLU3d', 'ConvReLU3d', 'update_bn_stats', 'freeze_bn_stats'] +_BN_CLASS_MAP = { + 1: nn.BatchNorm1d, + 2: nn.BatchNorm2d, + 3: nn.BatchNorm3d, +} + + +MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd) + + +class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule): + + _version = 2 + _FLOAT_MODULE = MOD + + def __init__(self, + # ConvNd args + in_channels, out_channels, kernel_size, stride, + padding, dilation, transposed, output_padding, + groups, + bias, + padding_mode, + # BatchNormNd args + # num_features: out_channels + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None, + dim=2): + nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size, + stride, padding, dilation, transposed, + output_padding, groups, False, padding_mode) + assert qconfig, 'qconfig must be provided for QAT module' + self.qconfig = qconfig + self.freeze_bn = freeze_bn if self.training else True + self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True) + self.weight_fake_quant = self.qconfig.weight() + if bias: + self.bias = Parameter(torch.empty(out_channels)) + else: + self.register_parameter('bias', None) + self.reset_bn_parameters() + + # this needs to be called after reset_bn_parameters, + # as they modify the same state + if self.training: + if freeze_bn: + self.freeze_bn_stats() + else: + self.update_bn_stats() + else: + self.freeze_bn_stats() + + self._enable_slow_path_for_better_numerical_stability = False + + def reset_running_stats(self): + self.bn.reset_running_stats() + + def reset_bn_parameters(self): + self.bn.reset_running_stats() + init.uniform_(self.bn.weight) + init.zeros_(self.bn.bias) + # note: below is actually for conv, not BN + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def reset_parameters(self): + super().reset_parameters() + + def update_bn_stats(self): + self.freeze_bn = False + self.bn.training = True + return self + + def freeze_bn_stats(self): + self.freeze_bn = True + self.bn.training = False + return self + + def _forward(self, input): + if self._enable_slow_path_for_better_numerical_stability: + return self._forward_slow(input) + return self._forward_approximate(input) + + def _forward_approximate(self, input): + """Approximated method to fuse conv and bn. It requires only one forward pass. + conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std + """ + assert self.bn.running_var is not None + running_std = torch.sqrt(self.bn.running_var + self.bn.eps) + scale_factor = self.bn.weight / running_std + weight_shape = [1] * len(self.weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(self.weight.shape) + bias_shape[1] = -1 + scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape)) + # using zero bias here since the bias for original conv + # will be added later + if self.bias is not None: + zero_bias = torch.zeros_like(self.bias, dtype=input.dtype) + else: + zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device, dtype=input.dtype) + conv = self._conv_forward(input, scaled_weight, zero_bias) + conv_orig = conv / scale_factor.reshape(bias_shape) + if self.bias is not None: + conv_orig = conv_orig + self.bias.reshape(bias_shape) + conv = self.bn(conv_orig) + return conv + + def _forward_slow(self, input): + """ + A more accurate but slow method to compute conv bn fusion, following https://arxiv.org/pdf/1806.08342.pdf + It requires two forward passes but handles the case bn.weight == 0 + + Conv: Y = WX + B_c + Conv without bias: Y0 = WX = Y - B_c, Y = Y0 + B_c + + Batch statistics: + mean_Y = Y.mean() + = Y0.mean() + B_c + var_Y = (Y - mean_Y)^2.mean() + = (Y0 - Y0.mean())^2.mean() + BN (r: bn.weight, beta: bn.bias): + Z = r * (Y - mean_Y) / sqrt(var_Y + eps) + beta + = r * (Y0 - Y0.mean()) / sqrt(var_Y + eps) + beta + + Fused Conv BN training (std_Y = sqrt(var_Y + eps)): + Z = (r * W / std_Y) * X + r * (B_c - mean_Y) / std_Y + beta + = (r * W / std_Y) * X - r * Y0.mean() / std_Y + beta + + Fused Conv BN inference (running_std = sqrt(running_var + eps)): + Z = (r * W / running_std) * X - r * (running_mean - B_c) / running_std + beta + + QAT with fused conv bn: + Z_train = fake_quant(r * W / running_std) * X * (running_std / std_Y) - r * Y0.mean() / std_Y + beta + = conv(X, fake_quant(r * W / running_std)) * (running_std / std_Y) - r * Y0.mean() / std_Y + beta + Z_inference = conv(X, fake_quant(r * W / running_std)) - r * (running_mean - B_c) / running_std + beta + """ + + assert self.bn.running_var is not None + assert self.bn.running_mean is not None + + # using zero bias here since the bias for original conv + # will be added later + zero_bias = torch.zeros(self.out_channels, device=self.weight.device, dtype=input.dtype) + + weight_shape = [1] * len(self.weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(self.weight.shape) + bias_shape[1] = -1 + + if self.bn.training: + # needed to compute batch mean/std + conv_out = self._conv_forward(input, self.weight, zero_bias) + # update bn statistics + with torch.no_grad(): + conv_out_bias = ( + conv_out if self.bias is None else conv_out + self.bias.reshape(bias_shape) + ) + self.bn(conv_out_bias) + + # fused conv + bn without bias using bn running statistics + running_std = torch.sqrt(self.bn.running_var + self.bn.eps) + scale_factor = self.bn.weight / running_std + scaled_weight = self.weight_fake_quant( + self.weight * scale_factor.reshape(weight_shape) + ) + # fused conv without bias for inference: (r * W / running_std) * X + conv_bn = self._conv_forward(input, scaled_weight, zero_bias) + + if self.bn.training: + avg_dims = [0] + list(range(2, len(self.weight.shape))) + batch_mean = conv_out.mean(avg_dims) # type: ignore[possibly-undefined] + batch_var = torch.square(conv_out - batch_mean.reshape(bias_shape)).mean( + avg_dims + ) + batch_std = torch.sqrt(batch_var + self.bn.eps) + + # scale to use batch std in training mode + # conv(X, r * W / std_Y) = conv(X, r * W / running_std) * (running_std / std_Y) + unscale_factor = running_std / batch_std + conv_bn *= unscale_factor.reshape(bias_shape) + + fused_mean = batch_mean + fused_std = batch_std + else: + fused_mean = self.bn.running_mean - (self.bias if self.bias is not None else 0) + fused_std = running_std + + # fused bias = beta - r * mean / std + fused_bias = self.bn.bias - self.bn.weight * fused_mean / fused_std + conv_bn += fused_bias.reshape(bias_shape) + + # HACK to let conv bias participate in loss to avoid DDP error (parameters + # were not used in producing loss) + if self.bias is not None: + conv_bn += (self.bias - self.bias).reshape(bias_shape) + + return conv_bn + + def extra_repr(self): + # TODO(jerryzh): extend + return super().extra_repr() + + def forward(self, input): + return self._forward(input) + + def train(self, mode=True): + """ + Batchnorm's training behavior is using the self.training flag. Prevent + changing it if BN is frozen. This makes sure that calling `model.train()` + on a model with a frozen BN will behave properly. + """ + self.training = mode + if not self.freeze_bn: + for module in self.children(): + module.train(mode) + return self + + # ===== Serialization version history ===== + # + # Version 1/None + # self + # |--- weight : Tensor + # |--- bias : Tensor + # |--- gamma : Tensor + # |--- beta : Tensor + # |--- running_mean : Tensor + # |--- running_var : Tensor + # |--- num_batches_tracked : Tensor + # + # Version 2 + # self + # |--- weight : Tensor + # |--- bias : Tensor + # |--- bn : Module + # |--- weight : Tensor (moved from v1.self.gamma) + # |--- bias : Tensor (moved from v1.self.beta) + # |--- running_mean : Tensor (moved from v1.self.running_mean) + # |--- running_var : Tensor (moved from v1.self.running_var) + # |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked) + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + if version is None or version == 1: + # BN related parameters and buffers were moved into the BN module for v2 + v2_to_v1_names = { + 'bn.weight': 'gamma', + 'bn.bias': 'beta', + 'bn.running_mean': 'running_mean', + 'bn.running_var': 'running_var', + 'bn.num_batches_tracked': 'num_batches_tracked', + } + for v2_name, v1_name in v2_to_v1_names.items(): + if prefix + v1_name in state_dict: + state_dict[prefix + v2_name] = state_dict[prefix + v1_name] + state_dict.pop(prefix + v1_name) + elif prefix + v2_name in state_dict: + # there was a brief period where forward compatibility + # for this module was broken (between + # https://github.com/pytorch/pytorch/pull/38478 + # and https://github.com/pytorch/pytorch/pull/38820) + # and modules emitted the v2 state_dict format while + # specifying that version == 1. This patches the forward + # compatibility issue by allowing the v2 style entries to + # be used. + pass + elif strict: + missing_keys.append(prefix + v2_name) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + @classmethod + def from_float(cls, mod): + r"""Create a qat module from a float module or qparams_dict + + Args: `mod` a float module, either produced by torch.ao.quantization utilities + or directly from user + """ + # The ignore is because _FLOAT_MODULE is a TypeVar here where the bound + # has no __name__ (code is fine though) + assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \ + cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined] + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + assert mod.qconfig, 'Input float module must have a valid qconfig' + qconfig = mod.qconfig + conv, bn = mod[0], mod[1] + qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size, + conv.stride, conv.padding, conv.dilation, + conv.groups, conv.bias is not None, + conv.padding_mode, + bn.eps, bn.momentum, + False, + qconfig) + qat_convbn.weight = conv.weight + qat_convbn.bias = conv.bias + qat_convbn.bn.weight = bn.weight + qat_convbn.bn.bias = bn.bias + qat_convbn.bn.running_mean = bn.running_mean + qat_convbn.bn.running_var = bn.running_var + # mypy error: Cannot determine type of 'num_batches_tracked' + qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked # type: ignore[has-type] + return qat_convbn + + def to_float(self): + cls = type(self) + conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined] + self.in_channels, + self.out_channels, + self.kernel_size, + self.stride, + self.padding, + self.dilation, + self.groups, + self.bias is not None, + self.padding_mode) + conv.weight = torch.nn.Parameter(self.weight.detach()) + if self.bias is not None: + conv.bias = torch.nn.Parameter(self.bias.detach()) + + if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined] + # fuse bn into conv + assert self.bn.running_var is not None and self.bn.running_mean is not None + conv.weight, conv.bias = fuse_conv_bn_weights( + conv.weight, + conv.bias, + self.bn.running_mean, + self.bn.running_var, + self.bn.eps, + self.bn.weight, + self.bn.bias + ) + + if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined] + modules = [] + modules.append(conv) + relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined] + modules.append(relu) + conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined] + conv_relu.train(self.training) + return conv_relu + else: + conv.train(self.training) + return conv + +class ConvBn1d(_ConvBnNd, nn.Conv1d): + r""" + A ConvBn1d module is a module fused from Conv1d and BatchNorm1d, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv1d` and + :class:`torch.nn.BatchNorm1d`. + + Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized + to default. + + Attributes: + freeze_bn: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_BN_MODULE = nn.BatchNorm1d + _FLOAT_RELU_MODULE: None = None + _FLOAT_MODULE = nni.ConvBn1d + _FLOAT_CONV_MODULE = nn.Conv1d + + def __init__(self, + # Conv1d args + in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=None, + padding_mode='zeros', + # BatchNorm1d args + # num_features: out_channels + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None): + kernel_size = _single(kernel_size) + stride = _single(stride) + padding = _single(padding) + dilation = _single(dilation) + _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride, + padding, dilation, False, _single(0), groups, bias, padding_mode, + eps, momentum, freeze_bn, qconfig, dim=1) + +class ConvBnReLU1d(ConvBn1d): + r""" + A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv1d` and + :class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`. + + Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to + default. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + # base class defines _FLOAT_MODULE as "ConvBn1d" + _FLOAT_MODULE = nni.ConvBnReLU1d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv1d + _FLOAT_BN_MODULE = nn.BatchNorm1d + _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment] + # module class after fusing bn into conv + _FUSED_FLOAT_MODULE = nni.ConvReLU1d + + def __init__(self, + # Conv1d args + in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=None, + padding_mode='zeros', + # BatchNorm1d args + # num_features: out_channels + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None): + super().__init__(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, bias, + padding_mode, eps, momentum, + freeze_bn, + qconfig) + + def forward(self, input): + return F.relu(ConvBn1d._forward(self, input)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +class ConvReLU1d(nnqat.Conv1d, nni._FusedModule): + r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with + FakeQuantize modules for weight for + quantization aware training. + + We combined the interface of :class:`~torch.nn.Conv1d` and + :class:`~torch.nn.BatchNorm1d`. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvReLU1d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv1d + _FLOAT_BN_MODULE: None = None + _FLOAT_RELU_MODULE = nn.ReLU + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=True, padding_mode='zeros', + qconfig=None): + super().__init__(in_channels, out_channels, kernel_size, + stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias, padding_mode=padding_mode, + qconfig=qconfig) + assert qconfig, 'qconfig must be provided for QAT module' + self.qconfig = qconfig + self.weight_fake_quant = self.qconfig.weight() + + def forward(self, input): + return F.relu( + self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +class ConvBn2d(_ConvBnNd, nn.Conv2d): + r""" + A ConvBn2d module is a module fused from Conv2d and BatchNorm2d, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv2d` and + :class:`torch.nn.BatchNorm2d`. + + Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized + to default. + + Attributes: + freeze_bn: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvBn2d + _FLOAT_CONV_MODULE = nn.Conv2d + _FLOAT_BN_MODULE = nn.BatchNorm2d + _FLOAT_RELU_MODULE: None = None + + def __init__(self, + # ConvNd args + in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=None, + padding_mode='zeros', + # BatchNorm2d args + # num_features: out_channels + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None): + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + dilation = _pair(dilation) + _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride, + padding, dilation, False, _pair(0), groups, bias, padding_mode, + eps, momentum, freeze_bn, qconfig, dim=2) + +class ConvBnReLU2d(ConvBn2d): + r""" + A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv2d` and + :class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`. + + Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to + default. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + # base class defines _FLOAT_MODULE as "ConvBn2d" + _FLOAT_MODULE = nni.ConvBnReLU2d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv2d + _FLOAT_BN_MODULE = nn.BatchNorm2d + _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment] + # module class after fusing bn into conv + _FUSED_FLOAT_MODULE = nni.ConvReLU2d + + def __init__(self, + # Conv2d args + in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=None, + padding_mode='zeros', + # BatchNorm2d args + # num_features: out_channels + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None): + super().__init__(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, bias, + padding_mode, eps, momentum, + freeze_bn, + qconfig) + + def forward(self, input): + return F.relu(ConvBn2d._forward(self, input)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +class ConvReLU2d(nnqat.Conv2d, nni._FusedModule): + r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with + FakeQuantize modules for weight for + quantization aware training. + + We combined the interface of :class:`~torch.nn.Conv2d` and + :class:`~torch.nn.BatchNorm2d`. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvReLU2d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv2d + _FLOAT_BN_MODULE: None = None + _FLOAT_RELU_MODULE = nn.ReLU + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=True, padding_mode='zeros', + qconfig=None): + super().__init__(in_channels, out_channels, kernel_size, + stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias, padding_mode=padding_mode, + qconfig=qconfig) + assert qconfig, 'qconfig must be provided for QAT module' + self.qconfig = qconfig + self.weight_fake_quant = self.qconfig.weight() + + def forward(self, input): + return F.relu( + self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +class ConvBn3d(_ConvBnNd, nn.Conv3d): + r""" + A ConvBn3d module is a module fused from Conv3d and BatchNorm3d, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv3d` and + :class:`torch.nn.BatchNorm3d`. + + Similar to :class:`torch.nn.Conv3d`, with FakeQuantize modules initialized + to default. + + Attributes: + freeze_bn: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvBn3d + _FLOAT_CONV_MODULE = nn.Conv3d + _FLOAT_BN_MODULE = nn.BatchNorm3d + _FLOAT_RELU_MODULE: None = None + + def __init__( + self, + # ConvNd args + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=None, + padding_mode="zeros", + # BatchNorm3d args + # num_features: out_channels + eps=1e-05, + momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None, + ): + kernel_size = _triple(kernel_size) + stride = _triple(stride) + padding = _triple(padding) + dilation = _triple(dilation) + _ConvBnNd.__init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + False, + _triple(0), + groups, + bias, + padding_mode, + eps, + momentum, + freeze_bn, + qconfig, + dim=3, + ) + +class ConvBnReLU3d(ConvBn3d): + r""" + A ConvBnReLU3d module is a module fused from Conv3d, BatchNorm3d and ReLU, + attached with FakeQuantize modules for weight, + used in quantization aware training. + + We combined the interface of :class:`torch.nn.Conv3d` and + :class:`torch.nn.BatchNorm3d` and :class:`torch.nn.ReLU`. + + Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to + default. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvBnReLU3d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv3d + _FLOAT_BN_MODULE = nn.BatchNorm3d + _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment] + # module class after fusing bn into conv + _FUSED_FLOAT_MODULE = nni.ConvReLU3d + + def __init__( + self, + # Conv3d args + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=None, + padding_mode="zeros", + # BatchNorm3d args + # num_features: out_channels + eps=1e-05, + momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None, + ): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups, + bias, + padding_mode, + eps, + momentum, + freeze_bn, + qconfig, + ) + + def forward(self, input): + return F.relu(ConvBn3d._forward(self, input)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +class ConvReLU3d(nnqat.Conv3d, nni._FusedModule): + r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with + FakeQuantize modules for weight for + quantization aware training. + + We combined the interface of :class:`~torch.nn.Conv3d` and + :class:`~torch.nn.BatchNorm3d`. + + Attributes: + weight_fake_quant: fake quant module for weight + + """ + _FLOAT_MODULE = nni.ConvReLU3d # type: ignore[assignment] + _FLOAT_CONV_MODULE = nn.Conv3d + _FLOAT_BN_MODULE: None = None + _FLOAT_RELU_MODULE = nn.ReLU + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode="zeros", + qconfig=None, + ): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + qconfig=qconfig, + ) + assert qconfig, "qconfig must be provided for QAT module" + self.qconfig = qconfig + self.weight_fake_quant = self.qconfig.weight() + + def forward(self, input): + return F.relu( + self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias) + ) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + +def update_bn_stats(mod): + if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}: + mod.update_bn_stats() + +def freeze_bn_stats(mod): + if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}: + mod.freeze_bn_stats() diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py new file mode 100644 index 0000000000000000000000000000000000000000..5b67283dce4bb3bf7115d6ae7f9b49519f29f8fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py @@ -0,0 +1,171 @@ +import torch +import torch.nn as nn +import torch.ao.nn.intrinsic as nni +import torch.nn.functional as F +from torch.nn import init +from torch.nn.parameter import Parameter +from torch.nn.utils.fusion import fuse_linear_bn_weights + +__all__ = [ + "LinearBn1d", +] + +class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule): + r""" + A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached + with FakeQuantize modules for weight, used in quantization aware training. + + We combined the interface of :class:`torch.nn.Linear` and + :class:torch.nn.BatchNorm1d`. + + Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized + to default. + + Attributes: + freeze_bn: + weight_fake_quant: fake quant module for weight + + """ + def __init__(self, + # Linear args + in_features, out_features, bias=True, + # BatchNorm1d args + # num_features: out_features + eps=1e-05, momentum=0.1, + # affine: True + # track_running_stats: True + # Args for this module + freeze_bn=False, + qconfig=None): + nn.modules.linear.Linear.__init__(self, in_features, out_features, bias) + assert qconfig, 'qconfig must be provided for QAT module' + self.qconfig = qconfig + self.freeze_bn = freeze_bn if self.training else True + self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True) + self.weight_fake_quant = self.qconfig.weight() + if bias: + self.bias = Parameter(torch.empty(out_features)) + else: + self.register_parameter('bias', None) + self.reset_bn_parameters() + + # this needs to be called after reset_bn_parameters, + # as they modify the same state + if self.training: + if freeze_bn: + self.freeze_bn_stats() + else: + self.update_bn_stats() + else: + self.freeze_bn_stats() + + def reset_running_stats(self): + self.bn.reset_running_stats() + + def reset_bn_parameters(self): + self.bn.reset_running_stats() + init.uniform_(self.bn.weight) + init.zeros_(self.bn.bias) + + def reset_parameters(self): + super().reset_parameters() + + def update_bn_stats(self): + self.freeze_bn = False + self.bn.training = True + return self + + def freeze_bn_stats(self): + self.freeze_bn = True + self.bn.training = False + return self + + def forward(self, input): + assert self.bn.running_var is not None + + # Scale the linear weights by BN's running statistics to reduce + # weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18 + # for motivation. + # + # Instead of + # + # x1 = F.linear(x0, fq(w), b) + # x2 = self.bn(x1) + # + # We have + # + # # scale the weight by previous batch's running statistics + # scale_factor = bn.w / bn.running_std_from_prev_batch + # # do the linear transformation without bias + # x1_scaled = F.linear(x0, fq(w * scale_factor), 0) + # # reverse the scaling and add original bias + # x1_orig = x1_scaled / scale_factor + b + # x2 = self.bn(x1_orig) + + running_std = torch.sqrt(self.bn.running_var + self.bn.eps) + scale_factor = self.bn.weight / running_std + weight_shape = [1] * len(self.weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(self.weight.shape) + bias_shape[1] = -1 + scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape)) + if self.bias is not None: + zero_bias = torch.zeros_like(self.bias) + else: + zero_bias = torch.zeros(self.out_features, device=scaled_weight.device) + linear_out = F.linear(input, scaled_weight, zero_bias) + linear_out_orig = linear_out / scale_factor.reshape(bias_shape) + if self.bias is not None: + linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape) + bn_out = self.bn(linear_out_orig) + return bn_out + + def train(self, mode=True): + """ + Batchnorm's training behavior is using the self.training flag. Prevent + changing it if BN is frozen. This makes sure that calling `model.train()` + on a model with a frozen BN will behave properly. + """ + self.training = mode + if not self.freeze_bn: + for module in self.children(): + module.train(mode) + return self + + @classmethod + def from_float(cls, mod): + r"""Create a qat module from a float module or qparams_dict + + Args: `mod' a float module, either produced by torch.ao.quantization + utilities or directly from user + """ + assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + \ + '.from_float only works for ' + nni.LinearBn1d.__name__ + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + assert mod.qconfig, 'Input float module must have a valid config' + qconfig = mod.qconfig + linear, bn = mod[0], mod[1] + qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None, + bn.eps, bn.momentum, + False, qconfig) + qat_linearbn.weight = linear.weight + qat_linearbn.bias = linear.bias + qat_linearbn.bn.weight = bn.weight + qat_linearbn.bn.bias = bn.bias + qat_linearbn.bn.running_mean = bn.running_mean + qat_linearbn.bn.running_var = bn.running_var + qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked + return qat_linearbn + + def to_float(self): + linear = torch.nn.Linear(self.in_features, self.out_features) + assert self.bn.running_var is not None and self.bn.running_mean is not None + linear.weight, linear.bias = fuse_linear_bn_weights( + self.weight, + self.bias, + self.bn.running_mean, + self.bn.running_var, + self.bn.eps, + self.bn.weight, + self.bn.bias) + return linear diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..97f7a1dbc339633badc91e18445be00ffe11aed3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py @@ -0,0 +1,48 @@ +import torch +import torch.ao.nn.qat as nnqat +import torch.ao.nn.intrinsic as nni +import torch.nn.functional as F + +class LinearReLU(nnqat.Linear, nni._FusedModule): + r""" + A LinearReLU module fused from Linear and ReLU modules, attached with + FakeQuantize modules for weight, used in + quantization aware training. + + We adopt the same interface as :class:`torch.nn.Linear`. + + Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to + default. + + Attributes: + weight: fake quant module for weight + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.qat.LinearReLU(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment] + + def __init__(self, in_features, out_features, bias=True, + qconfig=None): + super().__init__(in_features, out_features, bias, qconfig) + + def forward(self, input): + return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias)) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + def to_float(self): + linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None) + linear.weight = torch.nn.Parameter(self.weight.detach()) + if self.bias is not None: + linear.bias = torch.nn.Parameter(self.bias.detach()) + relu = torch.nn.ReLU() + return torch.ao.nn.intrinsic.LinearReLU(linear, relu) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78c75f0c82b5605575e3abceafd41b3036cb9431 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py @@ -0,0 +1,14 @@ +from .modules import * # noqa: F403 + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearReLU', + 'LinearLeakyReLU', + 'LinearTanh', + 'ConvAdd2d', + 'ConvAddReLU2d', +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a5b5fa427aaa00d32683cddf4f32a227a6e40c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56db4ad0a8189fb6631cfbe69c18eb35547f3943 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce571862b4275063d49d60a259014cb4efcffd4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py @@ -0,0 +1,6 @@ +import torch +from .linear_relu import LinearReLU + +__all__ = [ + 'LinearReLU', +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd05fb635bb07e8cafc4a905a12a0a86be910e6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4587f475c532861b6367fba1198065fe51bd832a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..a0bccdc0e3d3d49665cf8f7d2ae002e405cf98ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py @@ -0,0 +1,55 @@ +import torch +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.intrinsic as nni + +__all__ = [ + "LinearReLU" +] + +class LinearReLU(nnqd.Linear): + r""" + A LinearReLU module fused from Linear and ReLU modules that can be used + for dynamic quantization. + Supports both, FP16 and INT8 quantization. + + We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`. + + Attributes: + Same as torch.ao.nn.quantized.dynamic.Linear + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment] + + def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias, dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self._packed_params.dtype == torch.qint8: + # TODO check if we should set reduce_rage = True by default here + Y = torch.ops.quantized.linear_relu_dynamic( + x, self._packed_params._packed_params, reduce_range=True) + elif self._packed_params.dtype == torch.float16: + Y = torch.ops.quantized.linear_relu_dynamic_fp16( + x, self._packed_params._packed_params) + else: + raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!') + return Y.to(x.dtype) + + def _get_name(self): + return 'DynamicQuantizedLinearReLU' + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qlinear_relu): + return super().from_reference(ref_qlinear_relu[0]) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51149bff646cf2f729a958b608be4ca3c0639115 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py @@ -0,0 +1,17 @@ +from .linear_relu import LinearReLU, LinearLeakyReLU, LinearTanh +from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d +from .bn_relu import BNReLU2d, BNReLU3d +from .conv_add import ConvAdd2d, ConvAddReLU2d + +__all__ = [ + 'LinearReLU', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'BNReLU2d', + 'BNReLU3d', + 'LinearLeakyReLU', + 'LinearTanh', + 'ConvAdd2d', + 'ConvAddReLU2d', +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..374ffca43459ee3e13f68e98675ec6fa6c8345f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8811f7ab60a4f91e57687a4fa5d2ae9521046317 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47c5a07e487287326a664da40ac745920004f885 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f876c4452c46b379ee2c25bd9002e89223f0a627 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c948777c3c31ebc89ae6a3fdc251f56c3077d1cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..856fa43aac9941b54af5f74d7ea961bab364cb8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py @@ -0,0 +1,82 @@ + +import torch +import torch.ao.nn.intrinsic +import torch.ao.nn.intrinsic.qat +import torch.ao.nn.quantized as nnq + +__all__ = [ + "BNReLU2d", + "BNReLU3d" +] + +class BNReLU2d(nnq.BatchNorm2d): + r""" + A BNReLU2d module is a fused module of BatchNorm2d and ReLU + + We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm2d`. + + Attributes: + Same as torch.ao.nn.quantized.BatchNorm2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d + + def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None): + super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype) + + def forward(self, input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + return torch.ops.quantized.batch_norm2d_relu( + input, self.weight, self.bias, self.running_mean, + self.running_var, self.eps, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedBNReLU2d' + + @classmethod + def from_float(cls, mod): + # TODO: Add qat support for BNReLU2d + return super().from_float(mod) + + @classmethod + def from_reference(cls, bn_relu, output_scale, output_zero_point): + return super().from_reference(bn_relu[0], output_scale, output_zero_point) + +class BNReLU3d(nnq.BatchNorm3d): + r""" + A BNReLU3d module is a fused module of BatchNorm3d and ReLU + + We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm3d`. + + Attributes: + Same as torch.ao.nn.quantized.BatchNorm3d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d + + def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None): + super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype) + + def forward(self, input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 5: + raise ValueError("Input shape must be `(N, C, D, H, W)`!") + return torch.ops.quantized.batch_norm3d_relu( + input, self.weight, self.bias, self.running_mean, + self.running_var, self.eps, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedBNReLU3d' + + @classmethod + def from_float(cls, mod): + # TODO: Add qat support for BNReLU3d + return super().from_float(mod) + + @classmethod + def from_reference(cls, bn_relu, output_scale, output_zero_point): + return super().from_reference(bn_relu[0], output_scale, output_zero_point) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py new file mode 100644 index 0000000000000000000000000000000000000000..6e46aa8915e4c3830c36f4d5cd9e8286ebaf3afd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py @@ -0,0 +1,93 @@ +import torch +import torch.ao.nn.intrinsic +import torch.ao.nn.intrinsic.qat +import torch.nn.functional as F +import torch.ao.nn.quantized as nnq + +_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding + +class ConvAdd2d(nnq.Conv2d): + r""" + A ConvAdd2d module is a fused module of Conv2d and Add + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAdd2d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input, extra_input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv2d_add( + input, extra_input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvAdd2d' + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) + +class ConvAddReLU2d(nnq.Conv2d): + r""" + A ConvAddReLU2d module is a fused module of Conv2d, Add and Relu + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAddReLU2d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input, extra_input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv2d_add_relu( + input, extra_input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvAddReLU2d' + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..5cdc9004c99c600fbeaad5cf6d2196614ca36810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py @@ -0,0 +1,175 @@ + +import torch +import torch.ao.nn.intrinsic +import torch.ao.nn.intrinsic.qat +import torch.nn.functional as F +import torch.ao.nn.quantized as nnq + +from torch.nn.utils import fuse_conv_bn_weights + +__all__ = [ + "ConvReLU1d", + "ConvReLU2d", + "ConvReLU3d", +] + +_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding + +# TODO: factor out the common parts to ConvNd +class ConvReLU1d(nnq.Conv1d): + r""" + A ConvReLU1d module is a fused module of Conv1d and ReLU + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv1d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv1d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU1d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 3: + raise ValueError("Input shape must be `(N, C, L)`!") + if self.padding_mode != 'zeros': + # Padding in Conv1d is stored as (p, p), need to get (p,) + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1]) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv1d_relu( + input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvReLU1d' + + @classmethod + def from_float(cls, mod): + if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU1d: + assert mod.bn.running_var is not None and mod.bn.running_mean is not None + mod.weight, mod.bias = fuse_conv_bn_weights( + mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var, + mod.bn.eps, mod.bn.weight, mod.bn.bias) + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU1d, \ + "BatchNorm1d should be fused into Conv1d before converting to reference module" + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) + +class ConvReLU2d(nnq.Conv2d): + r""" + A ConvReLU2d module is a fused module of Conv2d and ReLU + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU2d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv2d_relu( + input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvReLU2d' + + @classmethod + def from_float(cls, mod): + if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU2d: + assert mod.bn.running_var is not None and mod.bn.running_mean is not None + mod.weight, mod.bias = fuse_conv_bn_weights( + mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var, + mod.bn.eps, mod.bn.weight, mod.bn.bias) + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU2d, \ + "BatchNorm2d should be fused into Conv2d before converting to reference module" + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) + + +class ConvReLU3d(nnq.Conv3d): + r""" + A ConvReLU3d module is a fused module of Conv3d and ReLU + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv3d`. + + Attributes: Same as torch.ao.nn.quantized.Conv3d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU3d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + assert padding_mode != 'reflect', "Conv3d does not support reflection padding" + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 5: + raise ValueError("Input shape must be `(N, C, D, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv3d_relu( + input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvReLU3d' + + @classmethod + def from_float(cls, mod): + if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU3d: + assert mod.bn.running_var is not None and mod.bn.running_mean is not None + mod.weight, mod.bias = fuse_conv_bn_weights( + mod.weight, + mod.bias, + mod.bn.running_mean, + mod.bn.running_var, + mod.bn.eps, + mod.bn.weight, + mod.bn.bias, + ) + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU3d, \ + "BatchNorm3d should be fused into Conv3d before converting to reference module" + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..e774a72dc8229194328ef2af3054599506f00d3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py @@ -0,0 +1,177 @@ +import torch +import torch.ao.nn.quantized as nnq +import torch.ao.nn.intrinsic as nni +from torch.ao.nn.quantized.modules.utils import _quantize_weight + +__all__ = [ + "LinearReLU", + "LinearLeakyReLU", + "LinearTanh", +] + +class LinearReLU(nnq.Linear): + r""" + A LinearReLU module fused from Linear and ReLU modules + + We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`. + + Attributes: + Same as torch.ao.nn.quantized.Linear + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.intrinsic.LinearReLU(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment] + + def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias, dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.quantized.linear_relu( + x, self._packed_params._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedLinearReLU' + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_linear_relu, output_scale, output_zero_point): + return super().from_reference(ref_linear_relu[0], output_scale, output_zero_point) + +class LinearLeakyReLU(nnq.Linear): + r""" + For onednn backend only + A LinearLeakyReLU module fused from Linear and LeakyReLU modules + We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`. + Attributes: + Same as torch.ao.nn.quantized.Linear + + negative_slope + Examples:: + >>> # xdoctest: +SKIP + >>> m = nn.intrinsic.LinearLeakyReLU(20, 30, 0.01) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearLeakyReLU # type: ignore[assignment] + + def __init__(self, in_features, out_features, negative_slope, bias=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias, dtype) + self.negative_slope = negative_slope + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.quantized.linear_leaky_relu( + x, self._packed_params._packed_params, self.scale, self.zero_point, self.negative_slope) + + def _get_name(self): + return 'QuantizedLinearLeakyReLU' + + @classmethod + def from_float(cls, mod): + assert type(mod) == nni.LinearLeakyReLU, 'Input float module should be LinearLeakyReLU' + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + activation_post_process = mod.activation_post_process + leaky_relu = mod[1] + mod = mod[0] + weight_post_process = mod.qconfig.weight() + weight_post_process(mod.weight) + dtype = weight_post_process.dtype + act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator] + assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8' + qweight = _quantize_weight(mod.weight.float(), weight_post_process) + qlinear_leaky_relu = cls( + mod.in_features, + mod.out_features, + leaky_relu.negative_slope, + dtype=dtype) + qlinear_leaky_relu.set_weight_bias(qweight, mod.bias) + qlinear_leaky_relu.scale = float(act_scale) + qlinear_leaky_relu.zero_point = int(act_zp) + return qlinear_leaky_relu + + @classmethod + def from_reference(cls, ref_mod, output_scale, output_zero_point): + linear = ref_mod[0] + leaky_relu = ref_mod[1] + qlinear_leaky_relu = cls( + linear.in_features, + linear.out_features, + leaky_relu.negative_slope) + qweight = linear.get_quantized_weight() + qlinear_leaky_relu.set_weight_bias(qweight, linear.bias) + qlinear_leaky_relu.scale = float(output_scale) + qlinear_leaky_relu.zero_point = int(output_zero_point) + return qlinear_leaky_relu + +class LinearTanh(nnq.Linear): + r""" + A LinearTanh module fused from Linear and Tanh modules + + We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`. + + Attributes: + Same as torch.ao.nn.quantized.Linear + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.intrinsic.LinearTanh(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearTanh # type: ignore[assignment] + + def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias, dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.quantized.linear_tanh( + x, self._packed_params._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedLinearTanh' + + @classmethod + def from_float(cls, mod): + assert type(mod) == nni.LinearTanh, 'Input float module should be LinearTanh' + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + activation_post_process = mod.activation_post_process + mod = mod[0] + weight_post_process = mod.qconfig.weight() + weight_post_process(mod.weight) + dtype = weight_post_process.dtype + act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator] + assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8' + qweight = _quantize_weight(mod.weight.float(), weight_post_process) + qlinear_tanh = cls( + mod.in_features, + mod.out_features, + dtype=dtype) + qlinear_tanh.set_weight_bias(qweight, mod.bias) + qlinear_tanh.scale = float(act_scale) + qlinear_tanh.zero_point = int(act_zp) + return qlinear_tanh + + @classmethod + def from_reference(cls, ref_mod, output_scale, output_zero_point): + linear = ref_mod[0] + qlinear_tanh = cls( + linear.in_features, + linear.out_features) + qweight = linear.get_quantized_weight() + qlinear_tanh.set_weight_bias(qweight, linear.bias) + qlinear_tanh.scale = float(output_scale) + qlinear_tanh.zero_point = int(output_zero_point) + return qlinear_tanh diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f31e71261e3cdb3f671f312c1967283efd74f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7a97e0a8da831d63d25d6f02edf77cb85b280a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py @@ -0,0 +1,19 @@ + +from .linear import Linear +from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell +from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d + +__all__ = [ + 'Linear', + 'LSTM', + 'GRU', + 'LSTMCell', + 'RNNCell', + 'GRUCell', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..678de85dcaf3efc4574e0fb3b3b2158a615f1144 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15b661d07d53b8032014fdd2e684aecb408cc259 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cc08162e97aff665e28072b4ee9eaadcae0f0ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11849887a6b6a163ed36e2aaaf182e221ae72828 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/conv.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..f1af77964136556d209605665f77ed51c3a0d44e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/conv.py @@ -0,0 +1,399 @@ +r"""Dynamically quantized convolution modules.""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch import Tensor +from torch._ops import ops +from torch.nn.common_types import _size_1_t +from torch.nn.modules.utils import _single, _pair, _triple +from torch.ao.nn.quantized.modules.conv import _reverse_repeat_padding +import torch.ao.nn.quantized as nnq +import warnings + +__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'] + + +class Conv1d(nnq.Conv1d): + r"""A dynamically quantized conv module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.Conv1d` and :class:`~torch.ao.nn.quantized.dynamic.Conv1d` and + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + + See :class:`~torch.nn.Conv1d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.quantized.dynamic.Conv1d(16, 33, 3, stride=2) + >>> input = torch.randn(20, 16, 100) + >>> output = m(input) + + """ + + _FLOAT_MODULE = nn.Conv1d + _NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment] + _NNI_CONV_RELU_MODULE = None # type: ignore[assignment] + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: _size_1_t = 0, + dilation: _size_1_t = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = 'zeros', + device=None, + dtype=None, + reduce_range=True): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + factory_kwargs = {'device': device, 'dtype': dtype} + kernel_size = _single(kernel_size) + stride = _single(stride) + padding = padding if isinstance(padding, str) else _single(padding) + dilation = _single(dilation) + + super().__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConv1d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 3: + raise ValueError("Input shape must be `(N, C, L)`!") + if self.padding_mode != 'zeros': + # Padding in Conv1d is stored as (p, p), need to get (p,) + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1]) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return ops.quantized.conv1d_dynamic(input, self._packed_params, reduce_range) + + +class Conv2d(nnq.Conv2d): + r"""A dynamically quantized conv module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.Conv2d` and :class:`~torch.ao.nn.quantized.dynamic.Conv2d` and + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + + See :class:`~torch.nn.Conv2d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> # With square kernels and equal stride + >>> m = nn.quantized.dynamic.Conv2d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + >>> # non-square kernels and unequal stride and with padding and dilation + >>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) + >>> input = torch.randn(20, 16, 50, 100) + >>> output = m(input) + + """ + _FLOAT_MODULE = nn.Conv2d + _NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment] + _NNI_CONV_RELU_MODULE = None # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + factory_kwargs = {'device': device, 'dtype': dtype} + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + dilation = _pair(dilation) + + super().__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConv2d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return ops.quantized.conv2d_dynamic( + input, self._packed_params, reduce_range) + + +class Conv3d(nnq.Conv3d): + r"""A dynamically quantized conv module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.Conv3d` and :class:`~torch.ao.nn.quantized.dynamic.Conv3d` and + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + + See :class:`~torch.nn.Conv3d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> # With square kernels and equal stride + >>> m = nn.quantized.dynamic.Conv3d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)) + >>> # non-square kernels and unequal stride and with padding and dilation + >>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2)) + >>> input = torch.randn(20, 16, 56, 56, 56) + >>> output = m(input) + + """ + _FLOAT_MODULE = nn.Conv3d + _NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment] + _NNI_CONV_RELU_MODULE = None # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + assert padding_mode != 'reflect', "Conv3d does not support reflection padding" + factory_kwargs = {'device': device, 'dtype': dtype} + kernel_size = _triple(kernel_size) + stride = _triple(stride) + padding = _triple(padding) + dilation = _triple(dilation) + super()._init( + in_channels, out_channels, kernel_size, stride, padding, dilation, + False, _triple(0), groups, bias, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConv3d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 5: + raise ValueError("Input shape must be `(N, C, D, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return ops.quantized.conv3d_dynamic( + input, self._packed_params, reduce_range) + + +class ConvTranspose1d(nnq.ConvTranspose1d): + r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.ConvTranspose1d`. + + For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv1d` + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + See :class:`~torch.nn.ConvTranspose1d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> # With square kernels and equal stride + >>> m = nndq.ConvTranspose1d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nndq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + >>> output = m(input) + >>> # exact output size can be also specified as an argument + >>> downsample = nndq.Conv1d(16, 16, 3, stride=2, padding=1) + >>> upsample = nndq.ConvTranspose1d(16, 16, 3, stride=2, padding=1) + >>> h = downsample(input) + >>> h.size() + torch.Size([1, 16, 6]) + >>> output = upsample(h, output_size=input.size()) + >>> output.size() + torch.Size([1, 16, 12]) + """ + + _FLOAT_MODULE = nn.ConvTranspose1d + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros', device=None, dtype=None): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__( + in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConvTranspose1d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 3: + raise ValueError("Input shape must be `(N, C, L)`!") + return torch.ops.quantized.conv_transpose1d_dynamic( + input, self._packed_params, reduce_range) + + +class ConvTranspose2d(nnq.ConvTranspose2d): + r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.ConvTranspose2d`. + + For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv2d` + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + See :class:`~torch.nn.ConvTranspose2d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> # With square kernels and equal stride + >>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + >>> output = m(input) + >>> # exact output size can be also specified as an argument + >>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1) + >>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1) + >>> h = downsample(input) + >>> h.size() + torch.Size([1, 16, 6, 6]) + >>> output = upsample(h, output_size=input.size()) + >>> output.size() + torch.Size([1, 16, 12, 12]) + """ + + _FLOAT_MODULE = nn.ConvTranspose2d + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros', device=None, dtype=None): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__( + in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConvTranspose2d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + return ops.quantized.conv_transpose2d_dynamic( + input, self._packed_params, reduce_range) + + +class ConvTranspose3d(nnq.ConvTranspose3d): + r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs. + + For details on input arguments, parameters, and implementation see + :class:`~torch.nn.ConvTranspose3d`. + + For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv3d` + + Attributes: + weight (Tensor): packed tensor derived from the learnable weight + parameter. + scale (Tensor): scalar for the output scale + zero_point (Tensor): scalar for the output zero point + See :class:`~torch.nn.ConvTranspose3d` for other attributes. + + Examples:: + + >>> # xdoctest: +SKIP + >>> # With cubic kernels and equal stride + >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2) + >>> # non-cubic kernels and unequal stride and with padding + >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2)) + >>> output = m(input) + >>> # exact output size can be also specified as an argument + >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1) + >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1) + >>> h = downsample(input) + >>> h.size() + torch.Size([1, 16, 6, 6, 6]) + >>> output = upsample(h, output_size=input.size()) + >>> output.size() + torch.Size([1, 16, 12, 12, 12]) + """ + + _FLOAT_MODULE = nn.ConvTranspose3d + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros', device=None, dtype=None): + warnings.warn( + "The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format( + self._get_name() + ) + ) + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__( + in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, **factory_kwargs) + + def _get_name(self): + return 'DynamicQuantizedConvTranspose3d' + + def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 5: + raise ValueError("Input shape must be `(N, C, T, H, W)`!") + return ops.quantized.conv_transpose3d_dynamic( + input, self._packed_params, reduce_range) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/linear.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a366e57f53cd022fe6e12e20b76f76afdd4726 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/linear.py @@ -0,0 +1,132 @@ +import torch +import torch.ao.nn.quantized as nnq +from torch.ao.nn.quantized.modules.utils import _quantize_weight +import torch.ao.nn.intrinsic as nni + +__all__ = [ + "Linear", +] + + +class Linear(nnq.Linear): + r""" + A dynamic quantized linear module with floating point tensor as inputs and outputs. + We adopt the same interface as `torch.nn.Linear`, please see + https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation. + + Similar to :class:`torch.nn.Linear`, attributes will be randomly + initialized at module creation time and will be overwritten later + + Attributes: + weight (Tensor): the non-learnable quantized weights of the module which are of + shape :math:`(\text{out\_features}, \text{in\_features})`. + bias (Tensor): the non-learnable floating point bias of the module of shape + :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, + the values are initialized to zero. + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.quantized.dynamic.Linear(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + # version used in this class is different from the parent class nnq.Linear + _version = 4 + + def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias_, dtype=dtype) + # We don't muck around with buffers or attributes or anything here + # to keep the module simple. *everything* is simply a Python attribute. + # Serialization logic is explicitly handled in the below serialization and + # deserialization modules + self.version = 4 + + def forward(self, x): + # Note that we can handle self.bias == None case. + if self._packed_params.dtype == torch.qint8: + if self.version is None or self.version < 4: + Y = torch.ops.quantized.linear_dynamic( + x, self._packed_params._packed_params) + else: + Y = torch.ops.quantized.linear_dynamic( + x, self._packed_params._packed_params, reduce_range=True) + elif self._packed_params.dtype == torch.float16: + Y = torch.ops.quantized.linear_dynamic_fp16( + x, self._packed_params._packed_params) + else: + raise RuntimeError('Unsupported dtype on dynamic quantized linear!') + return Y.to(x.dtype) + + def _get_name(self): + return 'DynamicQuantizedLinear' + + def extra_repr(self): + extra_repr_str = 'in_features={}, out_features={}, dtype={}'.format( + self.in_features, self.out_features, self._packed_params.dtype + ) + if self._packed_params.dtype == torch.qint8: + extra_repr_str += f', qscheme={self.weight().qscheme()}' + return extra_repr_str + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + self.version = version + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + @classmethod + def from_float(cls, mod): + r"""Create a dynamic quantized module from a float module or qparams_dict + + Args: + mod (Module): a float module, either produced by torch.ao.quantization + utilities or provided by the user + """ + float_modules = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, + torch.ao.nn.intrinsic.modules.fused.LinearReLU, torch.ao.nn.qat.dynamic.Linear] + + assert type(mod) in float_modules, \ + 'nn.quantized.dynamic.Linear.from_float only works for one of' + \ + str([float_mod.__name__ for float_mod in float_modules]) + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + if type(mod) == nni.LinearReLU: + mod = mod[0] + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer = mod.qconfig.weight() + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer = default_dynamic_qconfig.weight() + dtype = weight_observer.dtype + assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \ + f"dynamic quantized linear are qint8 and float16 got: {dtype}" + weight_observer(mod.weight) + if dtype == torch.qint8: + qweight = _quantize_weight(mod.weight.float(), weight_observer) + elif dtype == torch.float16: + qweight = mod.weight.float() + else: + raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!') + qlinear = cls(mod.in_features, mod.out_features, dtype=dtype) + qlinear.set_weight_bias(qweight, mod.bias) + return qlinear + + @classmethod + def from_reference(cls, ref_qlinear): + """ Create a (fbgemm/qnnpack) dynamic quantized module from a reference quantized + module + Args: + ref_qlinear (Module): a reference quantized module, either produced by + torch.ao.quantization functions or provided by the user + """ + qlinear = cls(ref_qlinear.in_features, ref_qlinear.out_features, dtype=ref_qlinear.weight_dtype) + qweight = ref_qlinear.get_quantized_weight() + bias = ref_qlinear.bias + qlinear.set_weight_bias(qweight, bias) + return qlinear diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..02badab6796401e317bb0e75741e501a5b3ca1eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py @@ -0,0 +1,1096 @@ +import numbers +import warnings + +import torch +import torch.nn as nn +from torch import Tensor # noqa: F401 +from torch._jit_internal import Tuple, Optional, List, Union, Dict # noqa: F401 +from torch.nn.utils.rnn import PackedSequence +from torch.ao.nn.quantized.modules.utils import _quantize_weight + +__all__ = ['pack_weight_bias', 'PackedParameter', 'RNNBase', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell', + 'GRUCell', "apply_permutation"] + + +def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + return tensor.index_select(dim, permutation) + + +def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead") + return _apply_permutation(tensor, permutation, dim) + + +def pack_weight_bias(qweight, bias, dtype): + + if dtype == torch.qint8: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # w_ih, w_hh + packed_weight = \ + torch.ops.quantized.linear_prepack(qweight, bias) + + return packed_weight + else: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # packed_ih, packed_hh, b_ih, b_hh + packed_weight = torch.ops.quantized.linear_prepack_fp16( + qweight, bias) + + return packed_weight + + +class PackedParameter(torch.nn.Module): + def __init__(self, param): + super().__init__() + self.param = param + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'param'] = self.param + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self.param = state_dict[prefix + 'param'] + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + +class RNNBase(torch.nn.Module): + + _FLOAT_MODULE = nn.RNNBase + + _version = 2 + + def __init__(self, mode, input_size, hidden_size, + num_layers=1, bias=True, batch_first=False, + dropout=0., bidirectional=False, dtype=torch.qint8): + super().__init__() + + self.mode = mode + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bias = bias + self.batch_first = batch_first + self.dropout = float(dropout) + self.bidirectional = bidirectional + self.dtype = dtype + self.version = 2 + self.training = False + num_directions = 2 if bidirectional else 1 + + # "type: ignore" is required since ints and Numbers are not fully comparable + # https://github.com/python/mypy/issues/8566 + if not isinstance(dropout, numbers.Number) \ + or not 0 <= dropout <= 1 or isinstance(dropout, bool): # type: ignore[operator] + raise ValueError("dropout should be a number in range [0, 1] " + "representing the probability of an element being " + "zeroed") + if dropout > 0 and num_layers == 1: # type: ignore[operator] + warnings.warn("dropout option adds dropout after all but last " + "recurrent layer, so non-zero dropout expects " + f"num_layers greater than 1, but got dropout={dropout} and " + f"num_layers={num_layers}") + + if mode == 'LSTM': + gate_size = 4 * hidden_size + elif mode == 'GRU': + gate_size = 3 * hidden_size + else: + raise ValueError("Unrecognized RNN mode: " + mode) + + _all_weight_values = [] + for layer in range(num_layers): + for direction in range(num_directions): + layer_input_size = input_size if layer == 0 else hidden_size * num_directions + + w_ih = torch.randn(gate_size, layer_input_size).to(torch.float) + w_hh = torch.randn(gate_size, hidden_size).to(torch.float) + b_ih = torch.randn(gate_size).to(torch.float) + b_hh = torch.randn(gate_size).to(torch.float) + if dtype == torch.qint8: + w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8) + w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8) + packed_ih = \ + torch.ops.quantized.linear_prepack(w_ih, b_ih) + packed_hh = \ + torch.ops.quantized.linear_prepack(w_hh, b_hh) + if self.version is None or self.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh, True) + else: + packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + + _all_weight_values.append(PackedParameter(cell_params)) + self._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + def _get_name(self): + return 'DynamicQuantizedRNN' + + def extra_repr(self): + s = '{input_size}, {hidden_size}' + if self.num_layers != 1: + s += ', num_layers={num_layers}' + if self.bias is not True: + s += ', bias={bias}' + if self.batch_first is not False: + s += ', batch_first={batch_first}' + if self.dropout != 0: + s += ', dropout={dropout}' + if self.bidirectional is not False: + s += ', bidirectional={bidirectional}' + return s.format(**self.__dict__) + + def __repr__(self): + # We don't want to show `ModuleList` children, hence custom + # `__repr__`. This is the same as nn.Module.__repr__, except the check + # for the `PackedParameter` and `nn.ModuleList`. + # You should still override `extra_repr` to add more info. + extra_lines = [] + extra_repr = self.extra_repr() + # empty string will be split into list [''] + if extra_repr: + extra_lines = extra_repr.split('\n') + child_lines = [] + for key, module in self._modules.items(): + if isinstance(module, (PackedParameter, nn.ModuleList)): + continue + mod_str = repr(module) + mod_str = nn.modules.module._addindent(mod_str, 2) + child_lines.append('(' + key + '): ' + mod_str) + lines = extra_lines + child_lines + + main_str = self._get_name() + '(' + if lines: + # simple one-liner info, which most builtin Modules will use + if len(extra_lines) == 1 and not child_lines: + main_str += extra_lines[0] + else: + main_str += '\n ' + '\n '.join(lines) + '\n' + + main_str += ')' + return main_str + + def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: + expected_input_dim = 2 if batch_sizes is not None else 3 + if input.dim() != expected_input_dim: + raise RuntimeError( + f'input must have {expected_input_dim} dimensions, got {input.dim()}') + if self.input_size != input.size(-1): + raise RuntimeError( + f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}') + + def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: + if batch_sizes is not None: + mini_batch = int(batch_sizes[0]) + else: + mini_batch = input.size(0) if self.batch_first else input.size(1) + num_directions = 2 if self.bidirectional else 1 + expected_hidden_size = (self.num_layers * num_directions, + mini_batch, self.hidden_size) + return expected_hidden_size + + def check_hidden_size( + self, hx: Tensor, expected_hidden_size: Tuple[int, int, int], + msg: str = 'Expected hidden size {}, got {}' + ) -> None: + if hx.size() != expected_hidden_size: + raise RuntimeError(msg.format( + expected_hidden_size, list(hx.size()))) + + def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + self.check_hidden_size(hidden, expected_hidden_size, + msg='Expected hidden size {}, got {}') + + def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor: + if permutation is None: + return hx + return _apply_permutation(hx, permutation) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + self.version = version + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + def set_weight_bias(self, weight_bias_dict): + + def weight_bias_name(ihhh, layer, suffix): + weight_name = f"weight_{ihhh}_l{layer}{suffix}" + bias_name = f"bias_{ihhh}_l{layer}{suffix}" + return weight_name, bias_name + + num_directions = 2 if self.bidirectional else 1 + # TODO: dedup with __init__ of RNNBase + _all_weight_values = [] + for layer in range(self.num_layers): + for direction in range(num_directions): + suffix = "_reverse" if direction == 1 else "" + w_ih_name, b_ih_name = weight_bias_name("ih", layer, suffix) + w_hh_name, b_hh_name = weight_bias_name("hh", layer, suffix) + w_ih = weight_bias_dict[w_ih_name] + b_ih = weight_bias_dict[b_ih_name] + w_hh = weight_bias_dict[w_hh_name] + b_hh = weight_bias_dict[b_hh_name] + if w_ih.dtype == torch.qint8: + packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh) + if self.version is None or self.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh, True) + else: + packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + + _all_weight_values.append(PackedParameter(cell_params)) + self._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + @classmethod + def from_float(cls, mod): + assert type(mod) in {torch.nn.LSTM, + torch.nn.GRU}, 'nn.quantized.dynamic.RNNBase.from_float only works for nn.LSTM and nn.GRU' + assert hasattr( + mod, + 'qconfig' + ), 'Input float module must have qconfig defined' + + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer_method = mod.qconfig.weight + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer_method = default_dynamic_qconfig.weight + + dtype = weight_observer_method().dtype + supported_scalar_types = [torch.qint8, torch.float16] + if dtype not in supported_scalar_types: + raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}') + # RNNBase can be either LSTM or GRU + qRNNBase: Union[LSTM, GRU] + if mod.mode == 'LSTM': + qRNNBase = LSTM(mod.input_size, mod.hidden_size, mod.num_layers, + mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype) + elif mod.mode == 'GRU': + qRNNBase = GRU(mod.input_size, mod.hidden_size, mod.num_layers, + mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype) + else: + raise NotImplementedError('Only LSTM/GRU is supported for QuantizedRNN for now') + + num_directions = 2 if mod.bidirectional else 1 + + assert mod.bias + + _all_weight_values = [] + for layer in range(qRNNBase.num_layers): + for direction in range(num_directions): + suffix = '_reverse' if direction == 1 else '' + + def retrieve_weight_bias(ihhh): + weight_name = f'weight_{ihhh}_l{layer}{suffix}' + bias_name = f'bias_{ihhh}_l{layer}{suffix}' + weight = getattr(mod, weight_name) + bias = getattr(mod, bias_name) + return weight, bias + + weight_ih, bias_ih = retrieve_weight_bias('ih') + weight_hh, bias_hh = retrieve_weight_bias('hh') + + if dtype == torch.qint8: + def quantize_and_pack(w, b): + weight_observer = weight_observer_method() + weight_observer(w) + qweight = _quantize_weight(w.float(), weight_observer) + packed_weight = \ + torch.ops.quantized.linear_prepack(qweight, b) + return packed_weight + packed_ih = quantize_and_pack(weight_ih, bias_ih) + packed_hh = quantize_and_pack(weight_hh, bias_hh) + if qRNNBase.version is None or qRNNBase.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, bias_ih, bias_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, bias_ih, bias_hh, True) + + elif dtype == torch.float16: + packed_ih = torch.ops.quantized.linear_prepack_fp16( + weight_ih.float(), bias_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16( + weight_hh.float(), bias_hh) + + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + else: + raise RuntimeError('Unsupported dtype specified for dynamic quantized LSTM!') + + _all_weight_values.append(PackedParameter(cell_params)) + qRNNBase._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + return qRNNBase + + def _weight_bias(self): + # Returns a dict of weights and biases + weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}} + count = 0 + num_directions = 2 if self.bidirectional else 1 + for layer in range(self.num_layers): + for direction in range(num_directions): + suffix = '_reverse' if direction == 1 else '' + key_name1 = f'weight_ih_l{layer}{suffix}' + key_name2 = f'weight_hh_l{layer}{suffix}' + # packed weights are part of torchbind class, CellParamsSerializationType + # Within the packed weight class, the weight and bias are accessible as Tensors + packed_weight_bias = self._all_weight_values[count].param.__getstate__()[0][4] + weight_bias_dict['weight'][key_name1] = packed_weight_bias[0].__getstate__()[0][0] + weight_bias_dict['weight'][key_name2] = packed_weight_bias[1].__getstate__()[0][0] + key_name1 = f'bias_ih_l{layer}{suffix}' + key_name2 = f'bias_hh_l{layer}{suffix}' + weight_bias_dict['bias'][key_name1] = packed_weight_bias[0].__getstate__()[0][1] + weight_bias_dict['bias'][key_name2] = packed_weight_bias[1].__getstate__()[0][1] + count = count + 1 + return weight_bias_dict + + def get_weight(self): + return self._weight_bias()['weight'] + + def get_bias(self): + return self._weight_bias()['bias'] + + +class LSTM(RNNBase): + r""" + A dynamic quantized LSTM module with floating point tensor as inputs and outputs. + We adopt the same interface as `torch.nn.LSTM`, please see + https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.LSTM(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> c0 = torch.randn(2, 3, 20) + >>> output, (hn, cn) = rnn(input, (h0, c0)) + """ + _FLOAT_MODULE = nn.LSTM + + __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} + + def __init__(self, *args, **kwargs): + super().__init__('LSTM', *args, **kwargs) + + def _get_name(self): + return 'DynamicQuantizedLSTM' + + def forward_impl( + self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], + batch_sizes: Optional[Tensor], max_batch_size: int, + sorted_indices: Optional[Tensor] + ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: + if hx is None: + num_directions = 2 if self.bidirectional else 1 + zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + + _all_params = ([m.param for m in self._all_weight_values]) + if batch_sizes is None: + result = torch.quantized_lstm(input, hx, _all_params, self.bias, self.num_layers, + float(self.dropout), self.training, self.bidirectional, + self.batch_first, dtype=self.dtype, use_dynamic=True) + else: + result = torch.quantized_lstm(input, batch_sizes, hx, _all_params, self.bias, + self.num_layers, float(self.dropout), self.training, + self.bidirectional, dtype=self.dtype, use_dynamic=True) + output = result[0] + hidden = result[1:] + + return output, hidden + + @torch.jit.export + def forward_tensor( + self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None + ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + output, hidden = self.forward_impl( + input, hx, batch_sizes, max_batch_size, sorted_indices) + + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch.jit.export + def forward_packed( + self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None + ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: + input_, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + + output_, hidden = self.forward_impl( + input_, hx, batch_sizes, max_batch_size, sorted_indices + ) + + output = PackedSequence(output_, batch_sizes, + sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + # "type: ignore" is required due to issue #43072 + def permute_hidden( # type: ignore[override] + self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if permutation is None: + return hx + return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation) + + # "type: ignore" is required due to issue #43072 + def check_forward_args( # type: ignore[override] + self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor] + ) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + + self.check_hidden_size(hidden[0], expected_hidden_size, + 'Expected hidden[0] size {}, got {}') + self.check_hidden_size(hidden[1], expected_hidden_size, + 'Expected hidden[1] size {}, got {}') + + @torch.jit.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 " + "exists in LSTM, may need to relax the assumption to support the use case" + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.num_layers, + ref_mod.bias, + ref_mod.batch_first, + ref_mod.dropout, + ref_mod.bidirectional, + # assuming there is layer 0, which should be OK + ref_mod.weight_ih_l0_dtype, + ) + qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict()) + return qmod + + +class GRU(RNNBase): + r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence. + + + For each element in the input sequence, each layer computes the following + function: + + .. math:: + \begin{array}{ll} + r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\ + h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)} + \end{array} + + where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input + at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer + at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`, + :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively. + :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. + + In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer + (:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by + dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random + variable which is :math:`0` with probability :attr:`dropout`. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` + would mean stacking two GRUs together to form a `stacked GRU`, + with the second GRU taking in outputs of the first GRU and + computing the final results. Default: 1 + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` + dropout: If non-zero, introduces a `Dropout` layer on the outputs of each + GRU layer except the last layer, with dropout probability equal to + :attr:`dropout`. Default: 0 + bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False`` + + Inputs: input, h_0 + - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features + of the input sequence. The input can also be a packed variable length + sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` + for details. + - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial hidden state for each element in the batch. + Defaults to zero if not provided. If the RNN is bidirectional, + num_directions should be 2, else it should be 1. + + Outputs: output, h_n + - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor + containing the output features h_t from the last layer of the GRU, + for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been + given as the input, the output will also be a packed sequence. + For the unpacked case, the directions can be separated + using ``output.view(seq_len, batch, num_directions, hidden_size)``, + with forward and backward being direction `0` and `1` respectively. + + Similarly, the directions can be separated in the packed case. + - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the hidden state for `t = seq_len` + + Like *output*, the layers can be separated using + ``h_n.view(num_layers, num_directions, batch, hidden_size)``. + + Shape: + - Input1: :math:`(L, N, H_{in})` tensor containing input features where + :math:`H_{in}=\text{input\_size}` and `L` represents a sequence length. + - Input2: :math:`(S, N, H_{out})` tensor + containing the initial hidden state for each element in the batch. + :math:`H_{out}=\text{hidden\_size}` + Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}` + If the RNN is bidirectional, num_directions should be 2, else it should be 1. + - Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}` + - Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer + (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`. + Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)` + weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer + (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)` + bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer + (b_ir|b_iz|b_in), of shape `(3*hidden_size)` + bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer + (b_hr|b_hz|b_hn), of shape `(3*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + .. note:: + The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks. + In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the + previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix + `W` and addition of bias: + + .. math:: + \begin{aligned} + n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn}) + \end{aligned} + + This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}` + + .. math:: + \begin{aligned} + n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) + \end{aligned} + + This implementation differs on purpose for efficiency. + + .. include:: ../cudnn_persistent_rnn.rst + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.GRU(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> output, hn = rnn(input, h0) + """ + _FLOAT_MODULE = nn.GRU + + __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} + + def __init__(self, *args, **kwargs): + super().__init__('GRU', *args, **kwargs) + + def _get_name(self): + return 'DynamicQuantizedGRU' + + def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + + self.check_hidden_size(hidden, expected_hidden_size, + 'Expected hidden size {}, got {}') + + def forward_impl( + self, input: Tensor, hx: Optional[Tensor], + batch_sizes: Optional[Tensor], max_batch_size: int, + sorted_indices: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if hx is None: + num_directions = 2 if self.bidirectional else 1 + zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = zeros + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + + _all_params = ([m.param for m in self._all_weight_values]) + if batch_sizes is None: + result = torch.quantized_gru(input, + hx, + _all_params, + self.bias, + self.num_layers, + self.dropout, + self.training, + self.bidirectional, + self.batch_first) + else: + result = torch.quantized_gru(input, + batch_sizes, + hx, + _all_params, + self.bias, + self.num_layers, + self.dropout, + self.training, + self.bidirectional) + output = result[0] + hidden = result[1] + + return output, hidden + + + @torch.jit.export + def forward_tensor( + self, input: Tensor, hx: Optional[Tensor] = None + ) -> Tuple[Tensor, Tensor]: + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + output, hidden = self.forward_impl( + input, hx, batch_sizes, max_batch_size, sorted_indices) + + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch.jit.export + def forward_packed( + self, input: PackedSequence, hx: Optional[Tensor] = None + ) -> Tuple[PackedSequence, Tensor]: + input_, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + output_, hidden = self.forward_impl( + input_, hx, batch_sizes, max_batch_size, sorted_indices + ) + + output = PackedSequence(output_, batch_sizes, + sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + def permute_hidden( + self, hx: Tensor, permutation: Optional[Tensor] + ) -> Tensor: + if permutation is None: + return hx + return _apply_permutation(hx, permutation) + + @torch.jit.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 " + "exists in LSTM, may need to relax the assumption to support the use case" + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.num_layers, + ref_mod.bias, + ref_mod.batch_first, + ref_mod.dropout, + ref_mod.bidirectional, + # assuming there is layer 0, which should be OK + ref_mod.weight_ih_l0_dtype, + ) + qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict()) + return qmod + +class RNNCellBase(torch.nn.Module): + # _FLOAT_MODULE = nn.CellRNNBase + __constants__ = ['input_size', 'hidden_size', 'bias'] + + def __init__(self, input_size, hidden_size, bias=True, num_chunks=4, dtype=torch.qint8): + super().__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.bias = bias + self.weight_dtype = dtype + if bias: + self.bias_ih = torch.randn(num_chunks * hidden_size).to(dtype=torch.float) + self.bias_hh = torch.randn(num_chunks * hidden_size).to(dtype=torch.float) + else: + self.register_parameter('bias_ih', None) + self.register_parameter('bias_hh', None) + + weight_ih = torch.randn(num_chunks * hidden_size, input_size).to(torch.float) + weight_hh = torch.randn(num_chunks * hidden_size, hidden_size).to(torch.float) + if dtype == torch.qint8: + weight_ih = torch.quantize_per_tensor(weight_ih, scale=1, zero_point=0, dtype=torch.qint8) + weight_hh = torch.quantize_per_tensor(weight_hh, scale=1, zero_point=0, dtype=torch.qint8) + + if dtype == torch.qint8: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # w_ih, w_hh + packed_weight_ih = \ + torch.ops.quantized.linear_prepack(weight_ih, self.bias_ih) + packed_weight_hh = \ + torch.ops.quantized.linear_prepack(weight_hh, self.bias_hh) + else: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # packed_ih, packed_hh, b_ih, b_hh + packed_weight_ih = torch.ops.quantized.linear_prepack_fp16( + weight_ih, self.bias_ih) + packed_weight_hh = torch.ops.quantized.linear_prepack_fp16( + weight_hh, self.bias_hh) + + self._packed_weight_ih = packed_weight_ih + self._packed_weight_hh = packed_weight_hh + + def _get_name(self): + return 'DynamicQuantizedRNNBase' + + def extra_repr(self): + s = '{input_size}, {hidden_size}' + if 'bias' in self.__dict__ and self.bias is not True: + s += ', bias={bias}' + if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": + s += ', nonlinearity={nonlinearity}' + return s.format(**self.__dict__) + + def check_forward_input(self, input): + if input.size(1) != self.input_size: + raise RuntimeError( + f"input has inconsistent input_size: got {input.size(1)}, expected {self.input_size}") + + def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None: + if input.size(0) != hx.size(0): + raise RuntimeError( + f"Input batch size {input.size(0)} doesn't match hidden{hidden_label} batch size {hx.size(0)}") + + if hx.size(1) != self.hidden_size: + raise RuntimeError( + f"hidden{hidden_label} has inconsistent hidden_size: got {hx.size(1)}, expected {self.hidden_size}") + + @classmethod + def from_float(cls, mod): + assert type(mod) in {torch.nn.LSTMCell, + torch.nn.GRUCell, + torch.nn.RNNCell}, 'nn.quantized.dynamic.RNNCellBase.from_float \ + only works for nn.LSTMCell, nn.GRUCell and nn.RNNCell' + assert hasattr( + mod, 'qconfig'), 'Input float module must have qconfig defined' + + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer_method = mod.qconfig.weight + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer_method = default_dynamic_qconfig.weight + + dtype = weight_observer_method().dtype + supported_scalar_types = [torch.qint8, torch.float16] + if dtype not in supported_scalar_types: + raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}') + + qRNNCellBase: Union[LSTMCell, GRUCell, RNNCell] + + if type(mod) == torch.nn.LSTMCell: + qRNNCellBase = LSTMCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype) + elif type(mod) == torch.nn.GRUCell: + qRNNCellBase = GRUCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype) + elif type(mod) == torch.nn.RNNCell: + qRNNCellBase = RNNCell(mod.input_size, mod.hidden_size, bias=mod.bias, nonlinearity=mod.nonlinearity, dtype=dtype) + else: + raise NotImplementedError('Only LSTMCell, GRUCell and RNNCell \ + are supported for QuantizedRNN for now') + + assert mod.bias + + def _observe_and_quantize_weight(weight): + if dtype == torch.qint8: + weight_observer = weight_observer_method() + weight_observer(weight) + qweight = _quantize_weight(weight.float(), weight_observer) + return qweight + else: + return weight.float() + + qRNNCellBase._packed_weight_ih = pack_weight_bias(_observe_and_quantize_weight(mod.weight_ih), mod.bias_ih, dtype) + qRNNCellBase._packed_weight_hh = pack_weight_bias(_observe_and_quantize_weight(mod.weight_hh), mod.bias_hh, dtype) + return qRNNCellBase + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_dtype"), "We are assuming weight_ih " + "exists in reference module, may need to relax the assumption to support the use case" + if hasattr(ref_mod, "nonlinearity"): + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.bias, + ref_mod.nonlinearity, + dtype=ref_mod.weight_ih_dtype + ) + else: + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.bias, + dtype=ref_mod.weight_ih_dtype + ) + weight_bias_dict = { + "weight": { + "weight_ih": ref_mod.get_quantized_weight_ih(), + "weight_hh": ref_mod.get_quantized_weight_hh(), + }, + "bias": { + "bias_ih": ref_mod.bias_ih, + "bias_hh": ref_mod.bias_hh, + } + } + qmod.set_weight_bias(weight_bias_dict) + return qmod + + def _weight_bias(self): + # Returns a dict of weights and biases + weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}} + w1, b1 = self._packed_weight_ih.__getstate__()[0] + w2, b2 = self._packed_weight_hh.__getstate__()[0] + # TODO: these can be simplified to one level? e.g. using weight_ih as key + # directly + weight_bias_dict['weight']['weight_ih'] = w1 + weight_bias_dict['weight']['weight_hh'] = w2 + weight_bias_dict['bias']['bias_ih'] = b1 + weight_bias_dict['bias']['bias_hh'] = b2 + return weight_bias_dict + + def get_weight(self): + return self._weight_bias()['weight'] + + def get_bias(self): + return self._weight_bias()['bias'] + + def set_weight_bias(self, weight_bias_dict): + # TODO: these can be simplified to one level? e.g. using weight_ih as key + # directly + self._packed_weight_ih = pack_weight_bias( + weight_bias_dict["weight"]["weight_ih"], + weight_bias_dict["bias"]["bias_ih"], + self.weight_dtype) + self._packed_weight_hh = pack_weight_bias( + weight_bias_dict["weight"]["weight_hh"], + weight_bias_dict["bias"]["bias_hh"], + self.weight_dtype) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + '_packed_weight_ih'] = self._packed_weight_ih + destination[prefix + '_packed_weight_hh'] = self._packed_weight_hh + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self._packed_weight_ih = state_dict.pop(prefix + '_packed_weight_ih') + self._packed_weight_hh = state_dict.pop(prefix + '_packed_weight_hh') + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + +class RNNCell(RNNCellBase): + r"""An Elman RNN cell with tanh or ReLU non-linearity. + A dynamic quantized RNNCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.RNNCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.RNNCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.RNNCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx = rnn(input[i], hx) + ... output.append(hx) + """ + __constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity'] + + def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh", dtype=torch.qint8): + super().__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype) + self.nonlinearity = nonlinearity + + def _get_name(self): + return 'DynamicQuantizedRNNCell' + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + if self.nonlinearity == "tanh": + ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + elif self.nonlinearity == "relu": + ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + else: + ret = input # TODO: remove when jit supports exception flow + raise RuntimeError( + f"Unknown nonlinearity: {self.nonlinearity}") + return ret + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + +class LSTMCell(RNNCellBase): + r"""A long short-term memory (LSTM) cell. + + A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.LSTMCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.LSTMCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> cx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx, cx = rnn(input[i], (hx, cx)) + ... output.append(hx) + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, num_chunks=4, **kwargs) # type: ignore[misc] + + def _get_name(self): + return 'DynamicQuantizedLSTMCell' + + def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: + self.check_forward_input(input) + if hx is None: + zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + self.check_forward_hidden(input, hx[0], '[0]') + self.check_forward_hidden(input, hx[1], '[1]') + return torch.ops.quantized.quantized_lstm_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) + + +class GRUCell(RNNCellBase): + r"""A gated recurrent unit (GRU) cell + + A dynamic quantized GRUCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.GRUCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.GRUCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.GRUCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx = rnn(input[i], hx) + ... output.append(hx) + """ + + def __init__(self, input_size, hidden_size, bias=True, dtype=torch.qint8): + super().__init__(input_size, hidden_size, bias, num_chunks=3, dtype=dtype) + + def _get_name(self): + return 'DynamicQuantizedGRUCell' + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + return torch.ops.quantized.quantized_gru_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh, + ) + + @classmethod + def from_float(cls, mod): + return super().from_float(mod) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7fcf48ec51c0a7b6ce430c79dfee99cb04f4777 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..521952d30151ed34647dab42c2c2bed2acafe415 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/conv.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4de31729404e971add98add7cd94f358478e9df6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e3d6a185430ab29c7e65118747bd2951f6c0fb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/rnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..755606e4bb964cbd88dbca076d538d276e139deb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/sparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c0acf176911894fd38f396447c4ef8cdf1f5386 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/sparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73fd3e5749e0676f7c2d1272af80cfe75245541c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..910223056fba9ca12333136c538765cf8643bc54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py @@ -0,0 +1,318 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Dict, Any, List +from torch.nn.common_types import _size_1_t +from .utils import ReferenceQuantizedModule + +__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'] + +class _ConvNd(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule): + """ A reference version of nn.quantized.Conv2d + we will not pack the parameters in this module, since weight packing is an + optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), + this is useful when user want to use this module in other backends like Glow. + """ + __annotations__ = {"bias": Optional[torch.Tensor]} + _IS_REFERENCE = True + + @staticmethod + def from_float(cls, float_conv, weight_qparams): + qref_conv = cls( + float_conv.in_channels, + float_conv.out_channels, + float_conv.kernel_size, # type: ignore[arg-type] + float_conv.stride, # type: ignore[arg-type] + float_conv.padding, # type: ignore[arg-type] + float_conv.dilation, # type: ignore[arg-type] + float_conv.groups, + float_conv.bias is not None, # type: ignore[arg-type] + float_conv.padding_mode, + device=float_conv.weight.device, + dtype=float_conv.weight.dtype, + weight_qparams=weight_qparams) + qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach()) + if float_conv.bias is not None: + qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach()) + return qref_conv + +class Conv1d(_ConvNd, nn.Conv1d): + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: _size_1_t = 0, + dilation: _size_1_t = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = "zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv1d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv1d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv1d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv1d + """ + weight_quant_dequant = self.get_weight() + result = F.conv1d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv1d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class Conv2d(_ConvNd, nn.Conv2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv2d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv2d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv2d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv2d + """ + weight_quant_dequant = self.get_weight() + result = F.conv2d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv2d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class Conv3d(_ConvNd, nn.Conv3d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode="zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv3d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv3d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv3d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv3d + """ + weight_quant_dequant = self.get_weight() + result = F.conv3d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv3d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class _ConvTransposeNd(_ConvNd, torch.nn.modules.conv._ConvTransposeNd): + """ A reference version of nn.quantized.ConvTranspose2d + we will not pack the parameters in this module, since weight packing is an + optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), + this is useful when user want to use this module in other backends like Glow. + """ + @staticmethod + def from_float(cls, float_conv, weight_qparams): + qref_conv = cls( + float_conv.in_channels, + float_conv.out_channels, + float_conv.kernel_size, # type: ignore[arg-type] + float_conv.stride, # type: ignore[arg-type] + float_conv.padding, # type: ignore[arg-type] + float_conv.output_padding, # type: ignore[arg-type] + float_conv.groups, + float_conv.bias is not None, # type: ignore[arg-type] + float_conv.dilation, # type: ignore[arg-type] + float_conv.padding_mode, + device=float_conv.weight.device, + dtype=float_conv.weight.dtype, + weight_qparams=weight_qparams) + qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach()) + if float_conv.bias is not None: + qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach()) + return qref_conv + + +class ConvTranspose1d(_ConvTransposeNd, nn.ConvTranspose1d): + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: _size_1_t = 0, + output_padding: _size_1_t = 0, + groups: int = 1, + bias: bool = True, + dilation: _size_1_t = 1, + padding_mode: str = "zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.ConvTranspose1d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose1d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose1d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv1d + """ + + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose1d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + return result + + def _get_name(self): + return "QuantizedConvTranspose1d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) + +class ConvTranspose2d(_ConvTransposeNd, nn.ConvTranspose2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, + groups=1, bias=True, dilation=1, + padding_mode='zeros', + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + + nn.ConvTranspose2d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose2d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv2d + """ + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose2d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + + return result + + def _get_name(self): + return "QuantizedConvTranspose2d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) + +class ConvTranspose3d(_ConvTransposeNd, nn.ConvTranspose3d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, + groups=1, bias=True, dilation=1, + padding_mode="zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.ConvTranspose3d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose3d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose3d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv3d + """ + + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose3d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + return result + + def _get_name(self): + return "QuantizedConvTranspose3d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/linear.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..378fe0eb6eeeab9c69f06bef6cd71213f5b7fe34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/linear.py @@ -0,0 +1,57 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Dict, Any +from .utils import ReferenceQuantizedModule + +__all__ = ['Linear'] + +class Linear(nn.Linear, ReferenceQuantizedModule): + """ A reference quantized linear module that fits into the FX + Graph Mode Quantization workflow + activation will be floating point Tensor, we will store floating + point weight as well in the module, but in forward we'll quantize + and dequantize the weight before running the floating point functional + linear operator. + """ + _IS_REFERENCE = True + + def __init__( + self, + in_features: int, + out_features: int, + bias_: bool = True, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + weight_qparams: Optional[Dict[str, Any]] = None): + super().__init__(in_features, out_features, bias_, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def _get_name(self): + return "QuantizedLinear(Reference)" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.linear --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.linear --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized linear + """ + weight_quant_dequant = self.get_weight() + result = F.linear(x, weight_quant_dequant, self.bias) + return result + + @classmethod + def from_float(cls, float_linear, weight_qparams): + qref_linear = Linear( + float_linear.in_features, float_linear.out_features, + float_linear.bias is not None, device=float_linear.weight.device, + dtype=float_linear.weight.dtype, weight_qparams=weight_qparams) + qref_linear.weight = torch.nn.Parameter(float_linear.weight.detach()) + if float_linear.bias is not None: + qref_linear.bias = torch.nn.Parameter(float_linear.bias.detach()) + return qref_linear diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..4120338ce271af197a83ba2de8a767ed5ffe3716 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py @@ -0,0 +1,614 @@ +import torch +import torch.nn as nn +from torch import Tensor +from .utils import _quantize_and_dequantize_weight +from .utils import _quantize_weight +from typing import Optional, Dict, Any, Tuple +from torch import _VF +from torch.nn.utils.rnn import PackedSequence + +__all__ = ['RNNCellBase', 'RNNCell', 'LSTMCell', 'GRUCell', 'RNNBase', 'LSTM', 'GRU', 'get_quantized_weight'] + +def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + return tensor.index_select(dim, permutation) + +def _get_weight_and_quantization_params(module, wn): + weight = getattr(module, wn) + params = [weight] + for param_name in [wn + n for n in ["_qscheme", "_dtype", "_scale", "_zero_point", "_axis_int"]]: + if hasattr(module, param_name): + param = getattr(module, param_name) + else: + param = None + params.append(param) + return params + +def get_quantized_weight(module, wn): + if not hasattr(module, wn): + return None + params = _get_weight_and_quantization_params(module, wn) + weight = _quantize_weight(*params) + return weight + +def _get_quantize_and_dequantized_weight(module, wn): + if not hasattr(module, wn): + return None + params = _get_weight_and_quantization_params(module, wn) + weight = _quantize_and_dequantize_weight(*params) + return weight + +class RNNCellBase(nn.RNNCellBase): + def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int, + device=None, dtype=None, weight_qparams_dict=None) -> None: + super().__init__(input_size, hidden_size, bias, num_chunks, device=device, dtype=dtype) + # TODO(jerryzh168): maybe make this arg a required arg + if weight_qparams_dict is None: + weight_qparams = { + "qscheme": torch.per_tensor_affine, + "dtype": torch.quint8, + "scale": 1.0, + "zero_point": 0 + } + weight_qparams_dict = { + "weight_ih": weight_qparams, + "weight_hh": weight_qparams, + "is_decomposed": False, + } + assert len(weight_qparams_dict) == 3, "Expected length for weight_qparams_dict to be 3 for QuantizedRNNCellBase(Reference)" + self._init_weight_qparams_dict(weight_qparams_dict, device) + + def _init_weight_qparams_dict(self, weight_qparams_dict, device): + assert weight_qparams_dict is not None + self.is_decomposed = weight_qparams_dict["is_decomposed"] + for key, weight_qparams in weight_qparams_dict.items(): + if key == "is_decomposed": + continue + # TODO: refactor the duplicated code to utils.py + weight_qscheme = weight_qparams["qscheme"] + weight_dtype = weight_qparams["dtype"] + setattr(self, key + "_qscheme", weight_qscheme) + setattr(self, key + "_dtype", weight_dtype) + assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \ + Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}") + if weight_qscheme is not None: + scale = weight_qparams["scale"] + scale_tensor = scale.clone().detach() \ + if isinstance(scale, torch.Tensor) else \ + torch.tensor(scale, dtype=torch.float, device=device) + self.register_buffer(key + "_scale", scale_tensor) + zp = weight_qparams["zero_point"] + zp_tensor = zp.clone().detach() \ + if isinstance(zp, torch.Tensor) else \ + torch.tensor(zp, dtype=torch.int, device=device) + self.register_buffer(key + "_zero_point", zp_tensor) + if weight_qscheme == torch.per_channel_affine: + axis = weight_qparams["axis"] + axis_tensor = axis.clone().detach() \ + if isinstance(axis, torch.Tensor) else \ + torch.tensor(axis, dtype=torch.int, device=device) + self.register_buffer(key + "_axis", axis_tensor) + else: + # added for TorchScriptability, not used + self.register_buffer( + key + "_axis", torch.tensor(0, dtype=torch.int, device=device)) + setattr(self, key + "_axis_int", getattr(self, key + "_axis").item()) + + def _get_name(self): + return "QuantizedRNNCellBase(Reference)" + + def get_quantized_weight_ih(self): + return get_quantized_weight(self, "weight_ih") + + def get_quantized_weight_hh(self): + return get_quantized_weight(self, "weight_hh") + + def get_weight_ih(self): + return _get_quantize_and_dequantized_weight(self, "weight_ih") + + def get_weight_hh(self): + return _get_quantize_and_dequantized_weight(self, "weight_hh") + +class RNNCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh", + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs) + self.nonlinearity = nonlinearity + + def _get_name(self): + return "QuantizedRNNCell(Reference)" + + # TODO: refactor nn.RNNCell to have a _forward that takes weight_ih and weight_hh as input + # and remove duplicated code, same for the other two Cell modules + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + assert input.dim() in (1, 2), \ + f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + else: + hx = hx.unsqueeze(0) if not is_batched else hx + + if self.nonlinearity == "tanh": + ret = _VF.rnn_tanh_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + elif self.nonlinearity == "relu": + ret = _VF.rnn_relu_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + else: + ret = input # TODO: remove when jit supports exception flow + raise RuntimeError( + f"Unknown nonlinearity: {self.nonlinearity}") + + if not is_batched: + ret = ret.squeeze(0) + + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.nonlinearity, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class LSTMCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs) + + def _get_name(self): + return "QuantizedLSTMCell(Reference)" + + def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: + assert input.dim() in (1, 2), \ + f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + else: + hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx + + ret = _VF.lstm_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + + if not is_batched: + ret = (ret[0].squeeze(0), ret[1].squeeze(0)) + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class GRUCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs) + + def _get_name(self): + return "QuantizedGRUCell(Reference)" + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + assert input.dim() in (1, 2), \ + f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + else: + hx = hx.unsqueeze(0) if not is_batched else hx + + ret = _VF.gru_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + + if not is_batched: + ret = ret.squeeze(0) + + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class RNNBase(nn.RNNBase): + def __init__(self, mode: str, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, batch_first: bool = False, + dropout: float = 0., bidirectional: bool = False, proj_size: int = 0, + device=None, dtype=None, + weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + super().__init__( + mode, input_size, hidden_size, num_layers, bias, batch_first, dropout, + bidirectional, proj_size, device, dtype + ) + # TODO(jerryzh168): maybe make this arg a required arg + if weight_qparams_dict is None: + weight_qparams = { + 'qscheme': torch.per_tensor_affine, + 'dtype': torch.quint8, + 'scale': 1.0, + 'zero_point': 0 + } + weight_qparams_dict = {"is_decomposed": False} # type: ignore[dict-item] + for wn in self._flat_weights_names: + if wn.startswith("weight"): + weight_qparams_dict[wn] = weight_qparams + self._init_weight_qparams_dict(weight_qparams_dict, device) + + def _init_weight_qparams_dict(self, weight_qparams_dict, device): + self.is_decomposed = weight_qparams_dict["is_decomposed"] + for key, weight_qparams in weight_qparams_dict.items(): + if key == "is_decomposed": + continue + weight_qscheme = weight_qparams["qscheme"] + weight_dtype = weight_qparams["dtype"] + setattr(self, key + "_qscheme", weight_qscheme) + setattr(self, key + "_dtype", weight_dtype) + assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \ + Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}") + if weight_qscheme is not None: + self.register_buffer( + key + "_scale", + torch.tensor(weight_qparams["scale"], dtype=torch.float, device=device)) + self.register_buffer( + key + "_zero_point", + torch.tensor(weight_qparams["zero_point"], dtype=torch.int, device=device)) + if weight_qscheme == torch.per_channel_affine: + self.register_buffer( + key + "_axis", + torch.tensor(weight_qparams["axis"], dtype=torch.int, device=device)) + else: + # added for TorchScriptability, not used + self.register_buffer( + key + "_axis", torch.tensor(0, dtype=torch.int, device=device)) + setattr(self, key + "_axis_int", getattr(self, key + "_axis").item()) + +class LSTM(RNNBase): + """ Reference Quantized LSTM Module + We'll store weight_qparams for all the weights in _flat_weights, we need to pass in + a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0, + to the weight_qparams for that weight + """ + def __init__(self, *args, **kwargs): + super().__init__('LSTM', *args, **kwargs) + + # Same as above, see torch/nn/modules/module.py::_forward_unimplemented + def permute_hidden(self, # type: ignore[override] + hx: Tuple[Tensor, Tensor], + permutation: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if permutation is None: + return hx + return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation) + + def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: + if batch_sizes is not None: + mini_batch = int(batch_sizes[0]) + else: + mini_batch = input.size(0) if self.batch_first else input.size(1) + num_directions = 2 if self.bidirectional else 1 + expected_hidden_size = (self.num_layers * num_directions, + mini_batch, self.hidden_size) + return expected_hidden_size + + # In the future, we should prevent mypy from applying contravariance rules here. + # See torch/nn/modules/module.py::_forward_unimplemented + def check_forward_args(self, # type: ignore[override] + input: Tensor, + hidden: Tuple[Tensor, Tensor], + batch_sizes: Optional[Tensor], + ): + self.check_input(input, batch_sizes) + self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes), + 'Expected hidden[0] size {}, got {}') + self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes), + 'Expected hidden[1] size {}, got {}') + + def get_quantized_weight_bias_dict(self): + """ dictionary from flat_weight_name to quantized weight or (unquantized) bias + e.g. + { + "weight_ih_l0": quantized_weight, + "bias_ih_l0": unquantized_bias, + ... + } + """ + quantized_weight_bias_dict = {} + for wn in self._flat_weights_names: + if hasattr(self, wn): + if wn.startswith("weight"): + weight_or_bias = get_quantized_weight(self, wn) + else: + weight_or_bias = getattr(self, wn) + else: + weight_or_bias = None + quantized_weight_bias_dict[wn] = weight_or_bias + return quantized_weight_bias_dict + + def get_flat_weights(self): + flat_weights = [] + for wn in self._flat_weights_names: + if hasattr(self, wn): + weight = getattr(self, wn) + if wn.startswith("weight"): + params = _get_weight_and_quantization_params(self, wn) + weight = _quantize_and_dequantize_weight(*params) + else: + weight = None + flat_weights.append(weight) + return flat_weights + + def forward(self, input, hx=None): # noqa: F811 + orig_input = input + # xxx: isinstance check needs to be in conditional for TorchScript to compile + batch_sizes = None + if isinstance(orig_input, PackedSequence): + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + else: + batch_sizes = None + is_batched = input.dim() == 3 + batch_dim = 0 if self.batch_first else 1 + if not is_batched: + input = input.unsqueeze(batch_dim) + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + if hx is None: + num_directions = 2 if self.bidirectional else 1 + real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size + h_zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, real_hidden_size, + dtype=input.dtype, device=input.device) + c_zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = (h_zeros, c_zeros) + else: + if batch_sizes is None: # If not PackedSequence input. + if is_batched: # type: ignore[possibly-undefined] + if (hx[0].dim() != 3 or hx[1].dim() != 3): + msg = ("For batched 3-D input, hx and cx should " + f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors") + raise RuntimeError(msg) + else: + if hx[0].dim() != 2 or hx[1].dim() != 2: + msg = ("For unbatched 2-D input, hx and cx should " + f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors") + raise RuntimeError(msg) + hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1)) + + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + if batch_sizes is None: + result = _VF.lstm(input, hx, self.get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.lstm(input, batch_sizes, hx, self.get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + output = result[0] + hidden = result[1:] + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output_packed, self.permute_hidden(hidden, unsorted_indices) + else: + if not is_batched: # type: ignore[possibly-undefined] + output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] + hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1)) + return output, self.permute_hidden(hidden, unsorted_indices) + + def _get_name(self): + return "QuantizedLSTM(Reference)" + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.num_layers, + mod.bias, + mod.batch_first, + mod.dropout, + mod.bidirectional, + weight_qparams_dict=weight_qparams_dict) + for wn in mod._flat_weights_names: + setattr(ref_mod, wn, getattr(mod, wn)) + return ref_mod + +class GRU(RNNBase): + """ Reference Quantized GRU Module + We'll store weight_qparams for all the weights in _flat_weights, we need to pass in + a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0, + to the weight_qparams for that weight + """ + def __init__(self, *args, **kwargs): + if 'proj_size' in kwargs: + raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU") + super().__init__('GRU', *args, **kwargs) + + def get_quantized_weight_bias_dict(self): + """ dictionary from flat_weight_name to quantized weight or (unquantized) bias + e.g. + { + "weight_ih_l0": quantized_weight, + "bias_ih_l0": unquantized_bias, + ... + } + """ + quantized_weight_bias_dict = {} + for wn in self._flat_weights_names: + if hasattr(self, wn): + if wn.startswith("weight"): + weight_or_bias = get_quantized_weight(self, wn) + else: + weight_or_bias = getattr(self, wn) + else: + weight_or_bias = None + quantized_weight_bias_dict[wn] = weight_or_bias + return quantized_weight_bias_dict + + def get_flat_weights(self): + flat_weights = [] + for wn in self._flat_weights_names: + if hasattr(self, wn): + weight = getattr(self, wn) + if wn.startswith("weight"): + params = _get_weight_and_quantization_params(self, wn) + weight = _quantize_and_dequantize_weight(*params) + else: + weight = None + flat_weights.append(weight) + return flat_weights + + def forward(self, input, hx=None): # noqa: F811 + # Note: this is copied from the forward of GRU in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py + # only changed self._flat_weights to self.get_flat_weights() + # TODO: maybe we can try inheriting from that class and define get_flat_weights + # as a @property? this might interfere with TorchScript, if we remove that + # requirement in the future we should be able to do this + orig_input = input + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + else: + batch_sizes = None + assert (input.dim() in (2, 3)), f"GRU: Expected input to be 2-D or 3-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 3 + batch_dim = 0 if self.batch_first else 1 + if not is_batched: + input = input.unsqueeze(batch_dim) + if hx is not None: + if hx.dim() != 2: + raise RuntimeError( + f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor") + hx = hx.unsqueeze(1) + else: + if hx is not None and hx.dim() != 3: + raise RuntimeError( + f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor") + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + if hx is None: + num_directions = 2 if self.bidirectional else 1 + hx = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + if batch_sizes is None: + result = _VF.gru(input, hx, self.get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.gru(input, batch_sizes, hx, self.get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + output = result[0] + hidden = result[1] + + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output_packed, self.permute_hidden(hidden, unsorted_indices) + else: + if not is_batched: # type: ignore[possibly-undefined] + output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] + hidden = hidden.squeeze(1) + + return output, self.permute_hidden(hidden, unsorted_indices) + + def _get_name(self): + return "QuantizedGRU(Reference)" + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.num_layers, + mod.bias, + mod.batch_first, + mod.dropout, + mod.bidirectional, + weight_qparams_dict=weight_qparams_dict) + for wn in mod._flat_weights_names: + setattr(ref_mod, wn, getattr(mod, wn)) + return ref_mod diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/utils.py b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1f52cdf884f6b2c032469c86e85d566e4b216f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/utils.py @@ -0,0 +1,323 @@ +import torch +import typing + +__all__ = [ + "ReferenceQuantizedModule", +] + +class ReferenceQuantizedModule(torch.nn.Module): + def _init_weight_qparams(self, weight_qparams, device): + if weight_qparams is None: + weight_qparams = { + "qscheme": torch.per_tensor_affine, + "dtype": torch.quint8, + "scale": 1.0, + "zero_point": 0 + } + self.weight_qscheme: torch.qscheme = weight_qparams["qscheme"] + self.weight_dtype = weight_qparams["dtype"] + assert self.weight_qscheme in [ + None, torch.per_tensor_affine, torch.per_channel_affine, + torch.per_channel_affine_float_qparams], \ + Exception(f"qscheme: {self.weight_qscheme} is not support in reference quantized {self._get_name()}") + if self.weight_dtype in [torch.quint8, torch.qint8, torch.quint4x2, torch.qint32]: + zero_point_dtype = weight_qparams["zero_point"].dtype if \ + isinstance(weight_qparams["zero_point"], torch.Tensor) else \ + torch.int + w_scale = weight_qparams["scale"] + w_scale_tensor = w_scale.clone().detach() \ + if isinstance(w_scale, torch.Tensor) \ + else torch.tensor(w_scale, dtype=torch.float, device=device) + self.register_buffer("weight_scale", w_scale_tensor) + w_zp = weight_qparams["zero_point"] + w_zp_tensor = w_zp.clone().detach() \ + if isinstance(w_zp, torch.Tensor) \ + else torch.tensor(w_zp, dtype=zero_point_dtype, device=device) + self.register_buffer("weight_zero_point", w_zp_tensor) + if self.weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]: + w_axis = weight_qparams["axis"] + w_axis_tensor = w_axis.clone().detach() \ + if isinstance(w_axis, torch.Tensor) \ + else torch.tensor(w_axis, dtype=torch.int, device=device) + self.register_buffer("weight_axis", w_axis_tensor) + else: + # added for TorchScriptability, not used + self.register_buffer( + "weight_axis", torch.tensor(0, dtype=torch.int, device=device)) + else: + # added for TorchScriptability, and for torch.float + self.register_buffer("weight_scale", torch.tensor(1.0, dtype=torch.float, device=device)) + self.register_buffer("weight_zero_point", torch.tensor(0, dtype=torch.int, device=device)) + self.register_buffer( + "weight_axis", torch.tensor(0, dtype=torch.int, device=device)) + self.is_decomposed: bool = weight_qparams.get("is_decomposed", False) + # store weight_axis as weight_axis_int due to some constraints of torchdynamo.export + # for capturing `.item` operations + self.weight_axis_int: int = self.weight_axis.item() # type: ignore[operator, assignment] + self.weight_quant_min: typing.Optional[int] = weight_qparams.get("quant_min", None) + self.weight_quant_max: typing.Optional[int] = weight_qparams.get("quant_max", None) + + def get_weight(self): + """ + Fake quantize (quantize and dequantize) the weight with + the quantization parameters for weight, this is used to + simulate the numerics for the quantized weight in a quantized + model + """ + # suppress mypy warning + assert isinstance(self.weight_scale, torch.Tensor) + assert isinstance(self.weight_zero_point, torch.Tensor) + if self.is_decomposed: + return _quantize_and_dequantize_weight_decomposed( + self.weight, # type: ignore[arg-type] + self.weight_qscheme, + self.weight_dtype, + self.weight_scale, + self.weight_zero_point, + self.weight_axis_int, + self.weight_quant_min, + self.weight_quant_max) + else: + return _quantize_and_dequantize_weight( + self.weight, # type: ignore[arg-type] + self.weight_qscheme, + self.weight_dtype, + self.weight_scale, + self.weight_zero_point, + self.weight_axis_int) + + def get_quantized_weight(self): + # suppress mypy warning + assert isinstance(self.weight_scale, torch.Tensor) + assert isinstance(self.weight_zero_point, torch.Tensor) + # assert isinstance(self.weight_axis, torch.Tensor) + if self.is_decomposed: + return _quantize_weight_decomposed( + self.weight, # type: ignore[arg-type] + self.weight_qscheme, + self.weight_dtype, + self.weight_scale, + self.weight_zero_point, + self.weight_axis_int, + self.weight_quant_min, + self.weight_quant_max) + else: + return _quantize_weight( + self.weight, # type: ignore[arg-type] + self.weight_qscheme, + self.weight_dtype, + self.weight_scale, + self.weight_zero_point, + self.weight_axis_int) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + _save_weight_qparams( + destination, prefix, self.weight_qscheme, self.weight_dtype, + self.weight_scale, self.weight_zero_point, self.weight_axis) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + for key in _get_weight_qparam_keys(state_dict, prefix): + setattr(self, key, state_dict[prefix + key]) + state_dict.pop(prefix + key) + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + +def _quantize_weight_decomposed( + weight: torch.Tensor, + weight_qscheme: torch.qscheme, + weight_dtype: torch.dtype, + weight_scale: torch.Tensor, + weight_zero_point: torch.Tensor, + weight_axis: int, + weight_quant_min: typing.Optional[int], + weight_quant_max: typing.Optional[int], +) -> torch.Tensor: + _DTYPE_TO_QVALUE_BOUNDS = { + torch.uint8: (0, 255), + torch.int8: (-128, 127), + torch.int32: (-(2**31), 2**31 - 1), + } + # TODO: add an util function for converting qdtype to dtype + _QDTYPE_TO_UNDERLYING_INT_REPR_DTYPE = { + torch.quint8: torch.uint8, + torch.qint8: torch.int8, + torch.qint32: torch.int32, + } + if weight_qscheme == torch.per_tensor_affine: + if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]: + weight_dtype_ = _QDTYPE_TO_UNDERLYING_INT_REPR_DTYPE[weight_dtype] + if weight_quant_min is None or weight_quant_max is None: + weight_quant_min, weight_quant_max = _DTYPE_TO_QVALUE_BOUNDS[weight_dtype_] + weight = torch.ops.quantized_decomposed.quantize_per_tensor( + weight, + weight_scale, + weight_zero_point, + weight_quant_min, + weight_quant_max, + weight_dtype_ + ) + return weight + elif weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]: + # TODO: torch.quint4x2 is not supported + if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]: + weight_dtype_ = _QDTYPE_TO_UNDERLYING_INT_REPR_DTYPE[weight_dtype] + if weight_quant_min is None or weight_quant_max is None: + weight_quant_min, weight_quant_max = _DTYPE_TO_QVALUE_BOUNDS[weight_dtype_] + weight = torch.ops.quantized_decomposed.quantize_per_channel( + weight, + weight_scale, + weight_zero_point, + weight_axis, + weight_quant_min, + weight_quant_max, + weight_dtype_) # type: ignore[arg-type] + return weight + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + +def _dequantize_weight_decomposed( + weight: torch.Tensor, + weight_qscheme: torch.qscheme, + weight_dtype: torch.dtype, + weight_scale: torch.Tensor, + weight_zero_point: torch.Tensor, + weight_axis: int, + weight_quant_min: typing.Optional[int], + weight_quant_max: typing.Optional[int], +) -> torch.Tensor: + # TODO: get the quant_min and quant_max from activation_post_process + _DTYPE_TO_QVALUE_BOUNDS = { + torch.uint8: (0, 255), + torch.int8: (-128, 127), + torch.int32: (-(2**31), 2**31 - 1), + } + # TODO: add an util function for converting qdtype to dtype + _QDTYPE_TO_UNDERLYING_INT_REPR_DTYPE = { + torch.quint8: torch.uint8, + torch.qint8: torch.int8, + torch.qint32: torch.int32, + } + weight_dtype_ = _QDTYPE_TO_UNDERLYING_INT_REPR_DTYPE[weight_dtype] + if weight_quant_min is None or weight_quant_max is None: + weight_quant_min, weight_quant_max = _DTYPE_TO_QVALUE_BOUNDS[weight_dtype_] + if weight_qscheme == torch.per_tensor_affine: + if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]: + weight = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight, + weight_scale, + weight_zero_point, + weight_quant_min, + weight_quant_max, + weight_dtype_ + ) + return weight + elif weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]: + # TODO: torch.quint4x2 is not supported + if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]: + weight = torch.ops.quantized_decomposed.dequantize_per_channel( + weight, + weight_scale, + weight_zero_point, + weight_axis, + weight_quant_min, + weight_quant_max, + weight_dtype_) # type: ignore[arg-type] + return weight + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + +def _quantize_weight( + weight: torch.Tensor, + weight_qscheme: torch.qscheme, + weight_dtype: torch.dtype, + weight_scale: torch.Tensor, + weight_zero_point: torch.Tensor, + weight_axis_int: int +) -> torch.Tensor: + if weight_dtype == torch.float16: + weight = weight.to(weight_dtype) + return weight + + if weight_qscheme == torch.per_tensor_affine: + if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]: + weight = torch.quantize_per_tensor(weight, weight_scale, weight_zero_point, weight_dtype) + return weight + elif weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]: + if weight_dtype in [torch.quint8, torch.qint8, torch.quint4x2, torch.qint32]: + weight = torch.quantize_per_channel( + weight, weight_scale, + weight_zero_point, weight_axis_int, weight_dtype) # type: ignore[arg-type] + return weight + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + +def _quantize_and_dequantize_weight_decomposed( + weight: torch.Tensor, + weight_qscheme: torch.qscheme, + weight_dtype: torch.dtype, + weight_scale: torch.Tensor, + weight_zero_point: torch.Tensor, + weight_axis_int: int, + weight_quant_min: typing.Optional[int], + weight_quant_max: typing.Optional[int], +) -> torch.Tensor: + """ Quantize and then dequantize the weight based on + the quantization parameters + """ + if weight_qscheme in [ + torch.per_tensor_affine, + torch.per_channel_affine, + torch.per_channel_affine_float_qparams]: + weight_quant = _quantize_weight_decomposed( + weight, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis_int, + weight_quant_min, weight_quant_max) + weight_dequant = _dequantize_weight_decomposed( + weight_quant, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, + weight_axis_int, weight_quant_min, weight_quant_max) + else: + weight_dequant = weight + return weight_dequant + +def _quantize_and_dequantize_weight( + weight: torch.Tensor, + weight_qscheme: torch.qscheme, + weight_dtype: torch.dtype, + weight_scale: torch.Tensor, + weight_zero_point: torch.Tensor, + weight_axis_int: int +) -> torch.Tensor: + """ Quantize and then dequantize the weight based on + the quantization parameters + """ + if weight_qscheme in [ + torch.per_tensor_affine, + torch.per_channel_affine, + torch.per_channel_affine_float_qparams]: + weight_quant = _quantize_weight( + weight, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis_int) + weight_dequant = weight_quant.dequantize() + else: + weight_dequant = weight + return weight_dequant + +def _save_weight_qparams(destination, prefix, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis): + destination[prefix + "weight_qscheme"] = weight_qscheme + destination[prefix + "weight_dtype"] = weight_dtype + if weight_qscheme is not None: + destination[prefix + "weight_scale"] = weight_scale + destination[prefix + "weight_zero_point"] = weight_zero_point + if weight_qscheme == torch.per_channel_affine: + destination[prefix + "weight_axis"] = weight_axis + +def _get_weight_qparam_keys( + state_dict: typing.Dict[str, typing.Any], + prefix: str): + keys = ["weight_qscheme", "weight_dtype"] + weight_qscheme = state_dict[prefix + "weight_qscheme"] + if weight_qscheme is not None: + keys.append("weight_scale") + keys.append("weight_zero_point") + if weight_qscheme == torch.quantize_per_channel: + keys.append("weight_axis") + return keys diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fda5a58f2984ee05b0d167297b458f62c37fc59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py @@ -0,0 +1 @@ +from . import quantized diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d75882fba9610c1f3c731ee88b9eb39537872279 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..86596ba18cf1f08e979a9e4c0ae0485627c44845 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py @@ -0,0 +1,10 @@ +from torch.ao.nn.sparse.quantized import dynamic + +from .linear import Linear +from .linear import LinearPackedParams + +__all__ = [ + "dynamic", + "Linear", + "LinearPackedParams", +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1088da5f781d7ac4893b362ae3f43052307ff64c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7646a529ccf0c39840f0750f54d835de8e2b37dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f21fac7059927a60ded332d80cef6fe7d9a2f1e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83a394f4df276171e7e5b2a1eb0cee843f9d4e99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py @@ -0,0 +1,5 @@ +from .linear import Linear + +__all__ = [ + "Linear", +] diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..636e92bd593a0342ff00b922f16e33ef0fe71771 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c68b4158ee61d90e58bc6e8f512df3383f48be8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/linear.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..5347b682fb5a2bb430ab0b9a947f9ca0b830fb91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/linear.py @@ -0,0 +1,139 @@ +from typing import Optional + +import torch +import torch.ao.nn.intrinsic as nni + +from torch.ao.nn.sparse.quantized import linear +from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern +from torch.ao.nn.quantized.modules.utils import _quantize_weight, _hide_packed_params_repr + +__all__ = ['Linear'] + +class Linear(torch.nn.Module): + r""" + A dynamically quantized sparse linear module with float tensor as inputs and outputs. + """ + _version = 1 + _op_type = "sparse_dynamic" + _FLOAT_MODULE = torch.nn.Linear + + def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8): + super().__init__() + + if dtype != torch.qint8: + raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear Dynamic") + + self.in_features = in_features + self.out_features = out_features + + if bias: + bias = torch.zeros(self.out_features, dtype=torch.float) + else: + bias = None + + qweight = torch._empty_affine_quantized([out_features, in_features], + scale=1, zero_point=0, dtype=torch.qint8) + self._packed_params = linear.LinearPackedParams(row_block_size=row_block_size, + col_block_size=col_block_size, + dtype=dtype) + self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size) + + def _get_name(self): + return 'SparseQuantizedDynamicLinear' + + def extra_repr(self): + return f'in_features={self.in_features}, out_features={self.out_features}, qscheme={self.weight().qscheme()}' + + def __repr__(self): + return _hide_packed_params_repr(self, linear.LinearPackedParams) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.sparse.qlinear_dynamic(x, self._packed_params._packed_params) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'op_type'] = self._op_type + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + op_type = int(state_dict[prefix + 'op_type']) + assert op_type == 'sparse', \ + f"Cannot load from op_type [{op_type}], expecting [{self._op_type}]" + state_dict.pop(prefix + 'op_type') + + version = local_metadata.get('version', None) + assert version <= self._version + + # Is this code valid? In old quantization it seemed to be used to load + # older model + weight = state_dict.pop(prefix + 'weight') + bias = state_dict.pop(prefix + 'bias') + state_dict.update({prefix + '_packed_params.weight': weight, + prefix + '_packed_params.bias': bias}) + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + def _weight_bias(self): + return self._packed_params._weight_bias() + + def weight(self): + return self._weight_bias()[0] + + def bias(self): + return self._weight_bias()[1] + + def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor], + row_block_size: Optional[int], col_block_size: Optional[int]) -> None: + assert row_block_size is not None and col_block_size is not None + self.out_features = w.shape[0] + self.in_features = w.shape[1] + self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size) + + @classmethod + def from_float(cls, mod): + r"""Create a quantized sparse dynamic module from a float module. + + We only care about the convert at this stage, no need for observers just yet. + """ + assert type(mod) == cls._FLOAT_MODULE, ' nnq.' + cls.__name__ + '.from_float only works for ' + \ + cls._FLOAT_MODULE.__name__ + # TODO: Need to add options to qconfig to avoid the calibration. + # TODO: Add calibration for the sparsity + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + if type(mod) == nni.LinearReLU: + mod = mod[0] + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer = mod.qconfig.weight() + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer = default_dynamic_qconfig.weight() + + # It is important to multiply by the mask BEFORE calling the `weight_observer` + # TODO (zaf): Mask might not be part of the qconfig (T83295194) + weight = mod.weight + if getattr(mod.qconfig, 'mask', False): + weight = mod.qconfig.mask * mod.weight + + weight_observer(weight) + dtype = weight_observer.dtype + assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8' + w_sc, w_zp = weight_observer.calculate_qparams() + if isinstance(w_zp, torch.Tensor): + assert not torch.any(w_zp.bool()), "All weight zero points must map to 0" + else: + assert w_zp == 0, 'Weight zero point must map to 0' + qweight = _quantize_weight(weight.float(), weight_observer) + + row_block_size, col_block_size = LinearBlockSparsePattern.block_size() + qlinear = cls(mod.in_features, + mod.out_features, + row_block_size, + col_block_size, + dtype=dtype) + qlinear.set_weight_bias(qweight, mod.bias, row_block_size, col_block_size) + return qlinear diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..db48a825756b14c26b50b14cbc53d38c9545ac7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py @@ -0,0 +1,197 @@ +from typing import Optional + +import torch +from torch.ao.nn.quantized.modules.utils import _quantize_weight, _hide_packed_params_repr + +__all__ = ['LinearPackedParams', 'Linear'] + +# TODO (zaf): Inherit from `quantized.LinearPackedParams` (T83294430) +class LinearPackedParams(torch.nn.Module): + _version = 1 + + def __init__(self, row_block_size=1, col_block_size=4, dtype=torch.qint8): + super().__init__() + + if dtype != torch.qint8: + raise NotImplementedError("Linear prepacking only supports QINT8") + self.dtype = dtype + wq = torch._empty_affine_quantized([1, 1], scale=1.0, zero_point=0, dtype=torch.qint8) + self.set_weight_bias(wq, None, row_block_size, col_block_size) + + def _get_name(self): + return "SparseQuantizedLinearPackedParams" + + @torch.jit.export + def set_weight_bias(self, weight: torch.Tensor, bias: Optional[torch.Tensor], + row_block_size: Optional[int], col_block_size: Optional[int]) -> None: + assert row_block_size is not None and col_block_size is not None + self._packed_params = torch.ops.sparse.qlinear_prepack(weight, bias, row_block_size, col_block_size) + + @torch.jit.export + def _weight_bias(self): + (weight, bias, block_sizes) = torch.ops.sparse.qlinear_unpack(self._packed_params) + return (weight, bias, block_sizes[0], block_sizes[1]) + + def forward(self, x): + return x + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'dtype'] = self.dtype + destination[prefix + '_packed_params'] = self._weight_bias() + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + assert version <= self._version + + self.dtype = state_dict.pop(prefix + 'dtype') + weight, bias, row_block_size, col_block_size = state_dict.pop(prefix + '_packed_params') + self.set_weight_bias(weight, bias, row_block_size, col_block_size) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + @torch.jit.export + def __getstate__(self): + return self._packed_params, self.training, self.dtype + + @torch.jit.export + def __setstate__(self, state): + (self._packed_params, self.training, self.dtype) = state + + def __repr__(self): + return self._weight_bias().__repr__() + +# TODO (zaf): Inherit from `quantized.Linear` (T83294430) +class Linear(torch.nn.Module): + r""" + A quantized sparse linear module with quantized tensor as inputs and outputs. + """ + _version = 1 + _FLOAT_MODULE = torch.nn.Linear + + def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8): + super().__init__() + + if dtype != torch.qint8: + raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear") + + self.in_features = in_features + self.out_features = out_features + + if bias: + bias = torch.zeros(self.out_features, dtype=torch.float) + else: + bias = None + + qweight = torch._empty_affine_quantized([out_features, in_features], + scale=1, zero_point=0, dtype=torch.qint8) + self._packed_params = LinearPackedParams(row_block_size=row_block_size, + col_block_size=col_block_size, + dtype=dtype) + self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size) + self.scale = 1.0 + self.zero_point = 0 + + @classmethod + def _get_name(cls): + return 'SparseQuantizedLinear' + + def extra_repr(self): + return 'in_features={}, out_features={}, scale={}, zero_point={}, qscheme={}'.format( + self.in_features, self.out_features, self.scale, self.zero_point, self.weight().qscheme() + ) + + def __repr__(self): + return _hide_packed_params_repr(self, LinearPackedParams) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.sparse.qlinear(x, self._packed_params._packed_params, self.scale, self.zero_point) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'scale'] = torch.tensor(self.scale) + destination[prefix + 'zero_point'] = torch.tensor(self.zero_point) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self.scale = float(state_dict[prefix + 'scale']) + state_dict.pop(prefix + 'scale') + + self.zero_point = int(state_dict[prefix + 'zero_point']) + state_dict.pop(prefix + 'zero_point') + + op_type = int(state_dict[prefix + 'op_type']) + state_dict.pop(prefix + 'op_type') + + version = local_metadata.get('version', None) + assert version <= self._version + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + def _weight_bias(self): + return self._packed_params._weight_bias() + + def weight(self): + return self._weight_bias()[0] + + def bias(self): + return self._weight_bias()[1] + + def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor], + row_block_size: Optional[int], col_block_size: Optional[int]) -> None: + assert row_block_size is not None and col_block_size is not None + self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size) + + @classmethod + def from_float(cls, mod): + r"""Create a quantized sparse module from a float module. + + We only care about the convert at this stage, no need for observers just yet. + + TODO(zaf): Need to add the sparse params to the qconfig + """ + assert type(mod) == cls._FLOAT_MODULE, cls._get_name() + \ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__ + assert hasattr(mod, 'sparse_params'), \ + ('Expecting the Linear to have `sparse_params`. Make sure you have provided arguments ' + 'in the `sparsifier.squash_mask(params_to_save=("sparse_block_shape",))` method.') + sparse_block_shape = mod.sparse_params.get('sparse_block_shape', None) # type: ignore[operator, union-attr] + assert isinstance(sparse_block_shape, (tuple, list)) + assert len(sparse_block_shape) == 2 + # TODO: Need to add options to qconfig to avoid the calibration. + # TODO: Add calibration for the sparsity + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + activation_post_process = mod.activation_post_process + weight_post_process = mod.qconfig.weight() # type: ignore[operator, union-attr] + + # Assumption is that the weight is already sparsified by the + # `sparsifier.convert` + weight = mod.weight + + weight_post_process(weight) + dtype = weight_post_process.dtype + act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[operator, union-attr] + assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8' + w_sc, w_zp = weight_post_process.calculate_qparams() + if isinstance(w_zp, torch.Tensor): + assert not torch.any(w_zp.bool()), "All weight zero points must map to 0" + else: + assert w_zp == 0, 'Weight zero point must map to 0' + qweight = _quantize_weight(weight.float(), weight_post_process) + + row_block_size = mod.sparse_params['sparse_block_shape'][0] # type: ignore[index] + col_block_size = mod.sparse_params['sparse_block_shape'][1] # type: ignore[index] + qlinear = cls(mod.in_features, + mod.out_features, + row_block_size, + col_block_size, + dtype=dtype) + qlinear.set_weight_bias(qweight, mod.bias, + row_block_size, col_block_size) # type: ignore[arg-type] + qlinear.scale = float(act_scale) + qlinear.zero_point = int(act_zp) + return qlinear diff --git a/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3d934f57857436dd0ea7945e327cf3d0532c4c10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py @@ -0,0 +1,42 @@ +import threading + +__all__ = [ + "LinearBlockSparsePattern" +] + +def _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size): + return (row_block_size == 1 and col_block_size == 4) or \ + (row_block_size == 8 and col_block_size == 1) + +# This is a stop-gap measure as current flow does not allow module +# specific block sparse pattern. +# Infact there is no way to convey sparse pattern via module config +# of quantization flow. Thus using the global context to convey +# sparsity pattern. +# Once the flow supports it, this should be removed. +class LinearBlockSparsePattern: + rlock = threading.RLock() + row_block_size = 1 + col_block_size = 4 + prev_row_block_size = 1 + prev_col_block_size = 4 + + def __init__(self, row_block_size=1, col_block_size=4): + assert _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size) + LinearBlockSparsePattern.rlock.acquire() + LinearBlockSparsePattern.prev_row_block_size = LinearBlockSparsePattern.row_block_size + LinearBlockSparsePattern.prev_col_block_size = LinearBlockSparsePattern.col_block_size + LinearBlockSparsePattern.row_block_size = row_block_size + LinearBlockSparsePattern.col_block_size = col_block_size + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, backtrace): + LinearBlockSparsePattern.row_block_size = LinearBlockSparsePattern.prev_row_block_size + LinearBlockSparsePattern.col_block_size = LinearBlockSparsePattern.prev_col_block_size + LinearBlockSparsePattern.rlock.release() + + @staticmethod + def block_size(): + return LinearBlockSparsePattern.row_block_size, LinearBlockSparsePattern.col_block_size diff --git a/venv/lib/python3.10/site-packages/torch/bin/torch_shm_manager b/venv/lib/python3.10/site-packages/torch/bin/torch_shm_manager new file mode 100644 index 0000000000000000000000000000000000000000..f2697deaa9770f09c382252399d0640e0784e10d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/bin/torch_shm_manager differ