Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/backends/thnn.py +4 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py +35 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py +31 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py +1 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py +37 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py +15 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py +13 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py +12 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py +7 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py +5 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/_functions.py +288 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/flatten.py +144 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/fold.py +303 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py +434 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/module.py +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/padding.py +801 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py +113 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/transformer.py +975 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/upsampling.py +264 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/utils.py +79 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/__init__.py +18 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py +7 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py +3 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py +10 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py +12 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/activation.py +18 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/conv.py +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/dropout.py +13 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/embedding_ops.py +15 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/functional_modules.py +15 -0
llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc
ADDED
Binary file (302 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/backends/thnn.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this is for historical pickle deserialization, it is not used otherwise
|
2 |
+
|
3 |
+
def _get_thnn_function_backend():
|
4 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic import ConvBn1d
|
2 |
+
from torch.ao.nn.intrinsic import ConvBn2d
|
3 |
+
from torch.ao.nn.intrinsic import ConvBn3d
|
4 |
+
from torch.ao.nn.intrinsic import ConvBnReLU1d
|
5 |
+
from torch.ao.nn.intrinsic import ConvBnReLU2d
|
6 |
+
from torch.ao.nn.intrinsic import ConvBnReLU3d
|
7 |
+
from torch.ao.nn.intrinsic import ConvReLU1d
|
8 |
+
from torch.ao.nn.intrinsic import ConvReLU2d
|
9 |
+
from torch.ao.nn.intrinsic import ConvReLU3d
|
10 |
+
from torch.ao.nn.intrinsic import LinearReLU
|
11 |
+
from torch.ao.nn.intrinsic import BNReLU2d
|
12 |
+
from torch.ao.nn.intrinsic import BNReLU3d
|
13 |
+
from torch.ao.nn.intrinsic import LinearBn1d
|
14 |
+
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
|
15 |
+
|
16 |
+
# Include the subpackages in case user imports from it directly
|
17 |
+
from . import modules # noqa: F401
|
18 |
+
from . import qat # noqa: F401
|
19 |
+
from . import quantized # noqa: F401
|
20 |
+
|
21 |
+
__all__ = [
|
22 |
+
'ConvBn1d',
|
23 |
+
'ConvBn2d',
|
24 |
+
'ConvBn3d',
|
25 |
+
'ConvBnReLU1d',
|
26 |
+
'ConvBnReLU2d',
|
27 |
+
'ConvBnReLU3d',
|
28 |
+
'ConvReLU1d',
|
29 |
+
'ConvReLU2d',
|
30 |
+
'ConvReLU3d',
|
31 |
+
'LinearReLU',
|
32 |
+
'BNReLU2d',
|
33 |
+
'BNReLU3d',
|
34 |
+
'LinearBn1d',
|
35 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (892 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .fused import _FusedModule # noqa: F401
|
2 |
+
from .fused import BNReLU2d
|
3 |
+
from .fused import BNReLU3d
|
4 |
+
from .fused import ConvBn1d
|
5 |
+
from .fused import ConvBn2d
|
6 |
+
from .fused import ConvBn3d
|
7 |
+
from .fused import ConvBnReLU1d
|
8 |
+
from .fused import ConvBnReLU2d
|
9 |
+
from .fused import ConvBnReLU3d
|
10 |
+
from .fused import ConvReLU1d
|
11 |
+
from .fused import ConvReLU2d
|
12 |
+
from .fused import ConvReLU3d
|
13 |
+
from .fused import LinearBn1d
|
14 |
+
from .fused import LinearReLU
|
15 |
+
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
'BNReLU2d',
|
19 |
+
'BNReLU3d',
|
20 |
+
'ConvBn1d',
|
21 |
+
'ConvBn2d',
|
22 |
+
'ConvBn3d',
|
23 |
+
'ConvBnReLU1d',
|
24 |
+
'ConvBnReLU2d',
|
25 |
+
'ConvBnReLU3d',
|
26 |
+
'ConvReLU1d',
|
27 |
+
'ConvReLU2d',
|
28 |
+
'ConvReLU3d',
|
29 |
+
'LinearBn1d',
|
30 |
+
'LinearReLU',
|
31 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (752 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc
ADDED
Binary file (802 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic import BNReLU2d
|
2 |
+
from torch.ao.nn.intrinsic import BNReLU3d
|
3 |
+
from torch.ao.nn.intrinsic import ConvBn1d
|
4 |
+
from torch.ao.nn.intrinsic import ConvBn2d
|
5 |
+
from torch.ao.nn.intrinsic import ConvBn3d
|
6 |
+
from torch.ao.nn.intrinsic import ConvBnReLU1d
|
7 |
+
from torch.ao.nn.intrinsic import ConvBnReLU2d
|
8 |
+
from torch.ao.nn.intrinsic import ConvBnReLU3d
|
9 |
+
from torch.ao.nn.intrinsic import ConvReLU1d
|
10 |
+
from torch.ao.nn.intrinsic import ConvReLU2d
|
11 |
+
from torch.ao.nn.intrinsic import ConvReLU3d
|
12 |
+
from torch.ao.nn.intrinsic import LinearBn1d
|
13 |
+
from torch.ao.nn.intrinsic import LinearReLU
|
14 |
+
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
'BNReLU2d',
|
18 |
+
'BNReLU3d',
|
19 |
+
'ConvBn1d',
|
20 |
+
'ConvBn2d',
|
21 |
+
'ConvBn3d',
|
22 |
+
'ConvBnReLU1d',
|
23 |
+
'ConvBnReLU2d',
|
24 |
+
'ConvBnReLU3d',
|
25 |
+
'ConvReLU1d',
|
26 |
+
'ConvReLU2d',
|
27 |
+
'ConvReLU3d',
|
28 |
+
'LinearBn1d',
|
29 |
+
'LinearReLU',
|
30 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (219 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (647 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc
ADDED
Binary file (1.04 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc
ADDED
Binary file (653 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (652 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Intrinsic QAT Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
# Modules
|
13 |
+
'ConvBn1d',
|
14 |
+
'ConvBnReLU1d',
|
15 |
+
'ConvReLU1d',
|
16 |
+
'ConvBn2d',
|
17 |
+
'ConvBnReLU2d',
|
18 |
+
'ConvReLU2d',
|
19 |
+
'ConvBn3d',
|
20 |
+
'ConvBnReLU3d',
|
21 |
+
'ConvReLU3d',
|
22 |
+
# Utilities
|
23 |
+
'freeze_bn_stats',
|
24 |
+
'update_bn_stats',
|
25 |
+
]
|
26 |
+
|
27 |
+
from torch.ao.nn.intrinsic.qat import ConvBn1d
|
28 |
+
from torch.ao.nn.intrinsic.qat import ConvBnReLU1d
|
29 |
+
from torch.ao.nn.intrinsic.qat import ConvReLU1d
|
30 |
+
from torch.ao.nn.intrinsic.qat import ConvBn2d
|
31 |
+
from torch.ao.nn.intrinsic.qat import ConvBnReLU2d
|
32 |
+
from torch.ao.nn.intrinsic.qat import ConvReLU2d
|
33 |
+
from torch.ao.nn.intrinsic.qat import ConvBn3d
|
34 |
+
from torch.ao.nn.intrinsic.qat import ConvBnReLU3d
|
35 |
+
from torch.ao.nn.intrinsic.qat import ConvReLU3d
|
36 |
+
from torch.ao.nn.intrinsic.qat import freeze_bn_stats
|
37 |
+
from torch.ao.nn.intrinsic.qat import update_bn_stats
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Intrinsic QAT Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
'LinearBn1d',
|
13 |
+
]
|
14 |
+
|
15 |
+
from torch.ao.nn.intrinsic.qat import LinearBn1d
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
2 |
+
# to ensure customers can use the module below
|
3 |
+
# without importing it directly
|
4 |
+
import torch.nn.intrinsic.quantized.dynamic
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
'BNReLU2d',
|
8 |
+
'BNReLU3d',
|
9 |
+
'ConvReLU1d',
|
10 |
+
'ConvReLU2d',
|
11 |
+
'ConvReLU3d',
|
12 |
+
'LinearReLU',
|
13 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (287 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (318 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .linear_relu import LinearReLU
|
2 |
+
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
|
3 |
+
from .bn_relu import BNReLU2d, BNReLU3d
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
'LinearReLU',
|
7 |
+
'ConvReLU1d',
|
8 |
+
'ConvReLU2d',
|
9 |
+
'ConvReLU3d',
|
10 |
+
'BNReLU2d',
|
11 |
+
'BNReLU3d',
|
12 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic.quantized import BNReLU2d
|
2 |
+
from torch.ao.nn.intrinsic.quantized import BNReLU3d
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
'BNReLU2d',
|
6 |
+
'BNReLU3d',
|
7 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic.quantized import LinearReLU
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
'LinearReLU',
|
5 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/_functions.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.distributed as dist
|
3 |
+
|
4 |
+
from torch.autograd.function import Function
|
5 |
+
|
6 |
+
class SyncBatchNorm(Function):
|
7 |
+
|
8 |
+
@staticmethod
|
9 |
+
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
|
10 |
+
if not (
|
11 |
+
input.is_contiguous(memory_format=torch.channels_last) or
|
12 |
+
input.is_contiguous(memory_format=torch.channels_last_3d)
|
13 |
+
):
|
14 |
+
input = input.contiguous()
|
15 |
+
if weight is not None:
|
16 |
+
weight = weight.contiguous()
|
17 |
+
|
18 |
+
size = int(input.numel() // input.size(1))
|
19 |
+
if size == 1 and world_size < 2:
|
20 |
+
raise ValueError(f'Expected more than 1 value per channel when training, got input size {size}')
|
21 |
+
|
22 |
+
num_channels = input.shape[1]
|
23 |
+
if input.numel() > 0:
|
24 |
+
# calculate mean/invstd for input.
|
25 |
+
mean, invstd = torch.batch_norm_stats(input, eps)
|
26 |
+
|
27 |
+
count = torch.full(
|
28 |
+
(1,),
|
29 |
+
input.numel() // input.size(1),
|
30 |
+
dtype=mean.dtype,
|
31 |
+
device=mean.device
|
32 |
+
)
|
33 |
+
|
34 |
+
# C, C, 1 -> (2C + 1)
|
35 |
+
combined = torch.cat([mean, invstd, count], dim=0)
|
36 |
+
else:
|
37 |
+
# for empty input, set stats and the count to zero. The stats with
|
38 |
+
# zero count will be filtered out later when computing global mean
|
39 |
+
# & invstd, but they still needs to participate the all_gather
|
40 |
+
# collective communication to unblock other peer processes.
|
41 |
+
combined = torch.zeros(
|
42 |
+
2 * num_channels + 1,
|
43 |
+
dtype=input.dtype,
|
44 |
+
device=input.device
|
45 |
+
)
|
46 |
+
|
47 |
+
# Use allgather instead of allreduce because count could be different across
|
48 |
+
# ranks, simple all reduce op can not give correct results.
|
49 |
+
# batch_norm_gather_stats_with_counts calculates global mean & invstd based on
|
50 |
+
# all gathered mean, invstd and count.
|
51 |
+
# for nccl backend, use the optimized version of all gather.
|
52 |
+
# The Gloo backend does not support `all_gather_into_tensor`.
|
53 |
+
if process_group._get_backend_name() != "gloo":
|
54 |
+
# world_size * (2C + 1)
|
55 |
+
combined_size = combined.numel()
|
56 |
+
combined_flat = torch.empty(1,
|
57 |
+
combined_size * world_size,
|
58 |
+
dtype=combined.dtype,
|
59 |
+
device=combined.device)
|
60 |
+
dist.all_gather_into_tensor(combined_flat, combined, process_group, async_op=False)
|
61 |
+
combined = torch.reshape(combined_flat, (world_size, combined_size))
|
62 |
+
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
|
63 |
+
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
|
64 |
+
else:
|
65 |
+
# world_size * (2C + 1)
|
66 |
+
combined_list = [
|
67 |
+
torch.empty_like(combined) for _ in range(world_size)
|
68 |
+
]
|
69 |
+
dist.all_gather(combined_list, combined, process_group, async_op=False)
|
70 |
+
combined = torch.stack(combined_list, dim=0)
|
71 |
+
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
|
72 |
+
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
|
73 |
+
|
74 |
+
if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
|
75 |
+
# The lines below force a synchronization between CUDA and CPU, because
|
76 |
+
# the shape of the result count_all depends on the values in mask tensor.
|
77 |
+
# Such synchronizations break CUDA Graph capturing.
|
78 |
+
# See https://github.com/pytorch/pytorch/issues/78549
|
79 |
+
# FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
|
80 |
+
# a better longer-term solution.
|
81 |
+
|
82 |
+
# remove stats from empty inputs
|
83 |
+
mask = count_all.squeeze(-1) >= 1
|
84 |
+
count_all = count_all[mask]
|
85 |
+
mean_all = mean_all[mask]
|
86 |
+
invstd_all = invstd_all[mask]
|
87 |
+
|
88 |
+
# calculate global mean & invstd
|
89 |
+
counts = count_all.view(-1)
|
90 |
+
if running_mean is not None and counts.dtype != running_mean.dtype:
|
91 |
+
counts = counts.to(running_mean.dtype)
|
92 |
+
mean, invstd = torch.batch_norm_gather_stats_with_counts(
|
93 |
+
input,
|
94 |
+
mean_all,
|
95 |
+
invstd_all,
|
96 |
+
running_mean,
|
97 |
+
running_var,
|
98 |
+
momentum,
|
99 |
+
eps,
|
100 |
+
counts,
|
101 |
+
)
|
102 |
+
|
103 |
+
self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
|
104 |
+
self.process_group = process_group
|
105 |
+
|
106 |
+
# apply element-wise normalization
|
107 |
+
if input.numel() > 0:
|
108 |
+
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
|
109 |
+
else:
|
110 |
+
return torch.empty_like(input)
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
def backward(self, grad_output):
|
114 |
+
if not (
|
115 |
+
grad_output.is_contiguous(memory_format=torch.channels_last) or
|
116 |
+
grad_output.is_contiguous(memory_format=torch.channels_last_3d)
|
117 |
+
):
|
118 |
+
grad_output = grad_output.contiguous()
|
119 |
+
saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
|
120 |
+
grad_input = grad_weight = grad_bias = None
|
121 |
+
process_group = self.process_group
|
122 |
+
|
123 |
+
if saved_input.numel() > 0:
|
124 |
+
# calculate local stats as well as grad_weight / grad_bias
|
125 |
+
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
|
126 |
+
grad_output,
|
127 |
+
saved_input,
|
128 |
+
mean,
|
129 |
+
invstd,
|
130 |
+
weight,
|
131 |
+
self.needs_input_grad[0],
|
132 |
+
self.needs_input_grad[1],
|
133 |
+
self.needs_input_grad[2]
|
134 |
+
)
|
135 |
+
|
136 |
+
if self.needs_input_grad[0]:
|
137 |
+
# synchronizing stats used to calculate input gradient.
|
138 |
+
num_channels = sum_dy.shape[0]
|
139 |
+
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
|
140 |
+
torch.distributed.all_reduce(
|
141 |
+
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
|
142 |
+
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
|
143 |
+
|
144 |
+
# backward pass for gradient calculation
|
145 |
+
if weight is not None and weight.dtype != mean.dtype:
|
146 |
+
weight = weight.to(mean.dtype)
|
147 |
+
grad_input = torch.batch_norm_backward_elemt(
|
148 |
+
grad_output,
|
149 |
+
saved_input,
|
150 |
+
mean,
|
151 |
+
invstd,
|
152 |
+
weight,
|
153 |
+
sum_dy,
|
154 |
+
sum_dy_xmu,
|
155 |
+
count_tensor
|
156 |
+
)
|
157 |
+
# synchronizing of grad_weight / grad_bias is not needed as distributed
|
158 |
+
# training would handle all reduce.
|
159 |
+
if weight is None or not self.needs_input_grad[1]:
|
160 |
+
grad_weight = None
|
161 |
+
|
162 |
+
if weight is None or not self.needs_input_grad[2]:
|
163 |
+
grad_bias = None
|
164 |
+
else:
|
165 |
+
# This process got an empty input tensor in the forward pass.
|
166 |
+
# Although this process can directly set grad_input as an empty
|
167 |
+
# tensor of zeros, it still needs to participate in the collective
|
168 |
+
# communication to unblock its peers, as other peer processes might
|
169 |
+
# have received non-empty inputs.
|
170 |
+
num_channels = saved_input.shape[1]
|
171 |
+
if self.needs_input_grad[0]:
|
172 |
+
# launch all_reduce to unblock other peer processes
|
173 |
+
combined = torch.zeros(
|
174 |
+
2 * num_channels,
|
175 |
+
dtype=saved_input.dtype,
|
176 |
+
device=saved_input.device
|
177 |
+
)
|
178 |
+
torch.distributed.all_reduce(
|
179 |
+
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
|
180 |
+
|
181 |
+
# Leave grad_input, grad_weight and grad_bias as None, which will be
|
182 |
+
# interpreted by the autograd engine as Tensors full of zeros.
|
183 |
+
|
184 |
+
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
|
185 |
+
|
186 |
+
class CrossMapLRN2d(Function):
|
187 |
+
|
188 |
+
@staticmethod
|
189 |
+
def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
|
190 |
+
ctx.size = size
|
191 |
+
ctx.alpha = alpha
|
192 |
+
ctx.beta = beta
|
193 |
+
ctx.k = k
|
194 |
+
ctx.scale = None
|
195 |
+
|
196 |
+
if input.dim() != 4:
|
197 |
+
raise ValueError(f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead.")
|
198 |
+
|
199 |
+
ctx.scale = ctx.scale or input.new()
|
200 |
+
output = input.new()
|
201 |
+
|
202 |
+
batch_size = input.size(0)
|
203 |
+
channels = input.size(1)
|
204 |
+
input_height = input.size(2)
|
205 |
+
input_width = input.size(3)
|
206 |
+
|
207 |
+
output.resize_as_(input)
|
208 |
+
ctx.scale.resize_as_(input)
|
209 |
+
|
210 |
+
# use output storage as temporary buffer
|
211 |
+
input_square = output
|
212 |
+
torch.pow(input, 2, out=input_square)
|
213 |
+
|
214 |
+
pre_pad = int((ctx.size - 1) / 2 + 1)
|
215 |
+
pre_pad_crop = min(pre_pad, channels)
|
216 |
+
|
217 |
+
scale_first = ctx.scale.select(1, 0)
|
218 |
+
scale_first.zero_()
|
219 |
+
# compute first feature map normalization
|
220 |
+
for c in range(pre_pad_crop):
|
221 |
+
scale_first.add_(input_square.select(1, c))
|
222 |
+
|
223 |
+
# reuse computations for next feature maps normalization
|
224 |
+
# by adding the next feature map and removing the previous
|
225 |
+
for c in range(1, channels):
|
226 |
+
scale_previous = ctx.scale.select(1, c - 1)
|
227 |
+
scale_current = ctx.scale.select(1, c)
|
228 |
+
scale_current.copy_(scale_previous)
|
229 |
+
if c < channels - pre_pad + 1:
|
230 |
+
square_next = input_square.select(1, c + pre_pad - 1)
|
231 |
+
scale_current.add_(square_next, alpha=1)
|
232 |
+
|
233 |
+
if c > pre_pad:
|
234 |
+
square_previous = input_square.select(1, c - pre_pad)
|
235 |
+
scale_current.add_(square_previous, alpha=-1)
|
236 |
+
|
237 |
+
ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
|
238 |
+
|
239 |
+
torch.pow(ctx.scale, -ctx.beta, out=output)
|
240 |
+
output.mul_(input)
|
241 |
+
|
242 |
+
ctx.save_for_backward(input, output)
|
243 |
+
return output
|
244 |
+
|
245 |
+
@staticmethod
|
246 |
+
def backward(ctx, grad_output):
|
247 |
+
input, output = ctx.saved_tensors
|
248 |
+
grad_input = grad_output.new()
|
249 |
+
|
250 |
+
batch_size = input.size(0)
|
251 |
+
channels = input.size(1)
|
252 |
+
input_height = input.size(2)
|
253 |
+
input_width = input.size(3)
|
254 |
+
|
255 |
+
paddded_ratio = input.new(channels + ctx.size - 1, input_height,
|
256 |
+
input_width)
|
257 |
+
accum_ratio = input.new(input_height, input_width)
|
258 |
+
|
259 |
+
cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
|
260 |
+
inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
|
261 |
+
|
262 |
+
grad_input.resize_as_(input)
|
263 |
+
torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
|
264 |
+
|
265 |
+
paddded_ratio.zero_()
|
266 |
+
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
|
267 |
+
channels)
|
268 |
+
for n in range(batch_size):
|
269 |
+
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
|
270 |
+
padded_ratio_center.div_(ctx.scale[n])
|
271 |
+
torch.sum(
|
272 |
+
paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
|
273 |
+
for c in range(channels):
|
274 |
+
accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
|
275 |
+
grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
|
276 |
+
accum_ratio.add_(paddded_ratio[c], alpha=-1)
|
277 |
+
|
278 |
+
return grad_input, None, None, None, None
|
279 |
+
|
280 |
+
class BackwardHookFunction(torch.autograd.Function):
|
281 |
+
@staticmethod
|
282 |
+
def forward(ctx, *args):
|
283 |
+
ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
|
284 |
+
return args
|
285 |
+
|
286 |
+
@staticmethod
|
287 |
+
def backward(ctx, *args):
|
288 |
+
return args
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/flatten.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
|
3 |
+
from typing import Tuple, Union
|
4 |
+
from torch import Tensor
|
5 |
+
from torch.types import _size
|
6 |
+
|
7 |
+
__all__ = ['Flatten', 'Unflatten']
|
8 |
+
|
9 |
+
class Flatten(Module):
|
10 |
+
r"""
|
11 |
+
Flattens a contiguous range of dims into a tensor.
|
12 |
+
|
13 |
+
For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.
|
14 |
+
|
15 |
+
Shape:
|
16 |
+
- Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
|
17 |
+
where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
|
18 |
+
number of dimensions including none.
|
19 |
+
- Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
start_dim: first dim to flatten (default = 1).
|
23 |
+
end_dim: last dim to flatten (default = -1).
|
24 |
+
|
25 |
+
Examples::
|
26 |
+
>>> input = torch.randn(32, 1, 5, 5)
|
27 |
+
>>> # With default parameters
|
28 |
+
>>> m = nn.Flatten()
|
29 |
+
>>> output = m(input)
|
30 |
+
>>> output.size()
|
31 |
+
torch.Size([32, 25])
|
32 |
+
>>> # With non-default parameters
|
33 |
+
>>> m = nn.Flatten(0, 2)
|
34 |
+
>>> output = m(input)
|
35 |
+
>>> output.size()
|
36 |
+
torch.Size([160, 5])
|
37 |
+
"""
|
38 |
+
|
39 |
+
__constants__ = ['start_dim', 'end_dim']
|
40 |
+
start_dim: int
|
41 |
+
end_dim: int
|
42 |
+
|
43 |
+
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
|
44 |
+
super().__init__()
|
45 |
+
self.start_dim = start_dim
|
46 |
+
self.end_dim = end_dim
|
47 |
+
|
48 |
+
def forward(self, input: Tensor) -> Tensor:
|
49 |
+
return input.flatten(self.start_dim, self.end_dim)
|
50 |
+
|
51 |
+
def extra_repr(self) -> str:
|
52 |
+
return f'start_dim={self.start_dim}, end_dim={self.end_dim}'
|
53 |
+
|
54 |
+
|
55 |
+
class Unflatten(Module):
|
56 |
+
r"""
|
57 |
+
Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
|
58 |
+
|
59 |
+
* :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
|
60 |
+
be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
|
61 |
+
|
62 |
+
* :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
|
63 |
+
a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
|
64 |
+
(tuple of `(name, size)` tuples) for `NamedTensor` input.
|
65 |
+
|
66 |
+
Shape:
|
67 |
+
- Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
|
68 |
+
dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
|
69 |
+
- Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
|
70 |
+
:math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
|
71 |
+
|
72 |
+
Args:
|
73 |
+
dim (Union[int, str]): Dimension to be unflattened
|
74 |
+
unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
|
75 |
+
|
76 |
+
Examples:
|
77 |
+
>>> input = torch.randn(2, 50)
|
78 |
+
>>> # With tuple of ints
|
79 |
+
>>> m = nn.Sequential(
|
80 |
+
>>> nn.Linear(50, 50),
|
81 |
+
>>> nn.Unflatten(1, (2, 5, 5))
|
82 |
+
>>> )
|
83 |
+
>>> output = m(input)
|
84 |
+
>>> output.size()
|
85 |
+
torch.Size([2, 2, 5, 5])
|
86 |
+
>>> # With torch.Size
|
87 |
+
>>> m = nn.Sequential(
|
88 |
+
>>> nn.Linear(50, 50),
|
89 |
+
>>> nn.Unflatten(1, torch.Size([2, 5, 5]))
|
90 |
+
>>> )
|
91 |
+
>>> output = m(input)
|
92 |
+
>>> output.size()
|
93 |
+
torch.Size([2, 2, 5, 5])
|
94 |
+
>>> # With namedshape (tuple of tuples)
|
95 |
+
>>> input = torch.randn(2, 50, names=('N', 'features'))
|
96 |
+
>>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
|
97 |
+
>>> output = unflatten(input)
|
98 |
+
>>> output.size()
|
99 |
+
torch.Size([2, 2, 5, 5])
|
100 |
+
"""
|
101 |
+
|
102 |
+
NamedShape = Tuple[Tuple[str, int]]
|
103 |
+
|
104 |
+
__constants__ = ['dim', 'unflattened_size']
|
105 |
+
dim: Union[int, str]
|
106 |
+
unflattened_size: Union[_size, NamedShape]
|
107 |
+
|
108 |
+
def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None:
|
109 |
+
super().__init__()
|
110 |
+
|
111 |
+
if isinstance(dim, int):
|
112 |
+
self._require_tuple_int(unflattened_size)
|
113 |
+
elif isinstance(dim, str):
|
114 |
+
self._require_tuple_tuple(unflattened_size)
|
115 |
+
else:
|
116 |
+
raise TypeError("invalid argument type for dim parameter")
|
117 |
+
|
118 |
+
self.dim = dim
|
119 |
+
self.unflattened_size = unflattened_size
|
120 |
+
|
121 |
+
def _require_tuple_tuple(self, input):
|
122 |
+
if (isinstance(input, tuple)):
|
123 |
+
for idx, elem in enumerate(input):
|
124 |
+
if not isinstance(elem, tuple):
|
125 |
+
raise TypeError("unflattened_size must be tuple of tuples, " +
|
126 |
+
f"but found element of type {type(elem).__name__} at pos {idx}")
|
127 |
+
return
|
128 |
+
raise TypeError("unflattened_size must be a tuple of tuples, " +
|
129 |
+
f"but found type {type(input).__name__}")
|
130 |
+
|
131 |
+
def _require_tuple_int(self, input):
|
132 |
+
if (isinstance(input, (tuple, list))):
|
133 |
+
for idx, elem in enumerate(input):
|
134 |
+
if not isinstance(elem, int):
|
135 |
+
raise TypeError("unflattened_size must be tuple of ints, " +
|
136 |
+
f"but found element of type {type(elem).__name__} at pos {idx}")
|
137 |
+
return
|
138 |
+
raise TypeError(f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}")
|
139 |
+
|
140 |
+
def forward(self, input: Tensor) -> Tensor:
|
141 |
+
return input.unflatten(self.dim, self.unflattened_size)
|
142 |
+
|
143 |
+
def extra_repr(self) -> str:
|
144 |
+
return f'dim={self.dim}, unflattened_size={self.unflattened_size}'
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/fold.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
from .. import functional as F
|
3 |
+
|
4 |
+
from torch import Tensor
|
5 |
+
from ..common_types import _size_any_t
|
6 |
+
|
7 |
+
__all__ = ['Fold', 'Unfold']
|
8 |
+
|
9 |
+
class Fold(Module):
|
10 |
+
r"""Combines an array of sliding local blocks into a large containing tensor.
|
11 |
+
|
12 |
+
Consider a batched :attr:`input` tensor containing sliding local blocks,
|
13 |
+
e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
|
14 |
+
where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})`
|
15 |
+
is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})`
|
16 |
+
spatial locations each containing a :math:`C`-channeled vector), and
|
17 |
+
:math:`L` is the total number of blocks. (This is exactly the
|
18 |
+
same specification as the output shape of :class:`~torch.nn.Unfold`.) This
|
19 |
+
operation combines these local blocks into the large :attr:`output` tensor
|
20 |
+
of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
|
21 |
+
by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the
|
22 |
+
arguments must satisfy
|
23 |
+
|
24 |
+
.. math::
|
25 |
+
L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] %
|
26 |
+
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
|
27 |
+
|
28 |
+
where :math:`d` is over all spatial dimensions.
|
29 |
+
|
30 |
+
* :attr:`output_size` describes the spatial shape of the large containing
|
31 |
+
tensor of the sliding local blocks. It is useful to resolve the ambiguity
|
32 |
+
when multiple input shapes map to same number of sliding blocks, e.g.,
|
33 |
+
with ``stride > 0``.
|
34 |
+
|
35 |
+
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
|
36 |
+
how the sliding blocks are retrieved.
|
37 |
+
|
38 |
+
* :attr:`stride` controls the stride for the sliding blocks.
|
39 |
+
|
40 |
+
* :attr:`padding` controls the amount of implicit zero-paddings on both
|
41 |
+
sides for :attr:`padding` number of points for each dimension before
|
42 |
+
reshaping.
|
43 |
+
|
44 |
+
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
|
45 |
+
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
output_size (int or tuple): the shape of the spatial dimensions of the
|
49 |
+
output (i.e., ``output.sizes()[2:]``)
|
50 |
+
kernel_size (int or tuple): the size of the sliding blocks
|
51 |
+
dilation (int or tuple, optional): a parameter that controls the
|
52 |
+
stride of elements within the
|
53 |
+
neighborhood. Default: 1
|
54 |
+
padding (int or tuple, optional): implicit zero padding to be added on
|
55 |
+
both sides of input. Default: 0
|
56 |
+
stride (int or tuple): the stride of the sliding blocks in the input
|
57 |
+
spatial dimensions. Default: 1
|
58 |
+
|
59 |
+
* If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
|
60 |
+
:attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then
|
61 |
+
their values will be replicated across all spatial dimensions.
|
62 |
+
|
63 |
+
* For the case of two output spatial dimensions this operation is sometimes
|
64 |
+
called ``col2im``.
|
65 |
+
|
66 |
+
.. note::
|
67 |
+
:class:`~torch.nn.Fold` calculates each combined value in the resulting
|
68 |
+
large tensor by summing all values from all containing blocks.
|
69 |
+
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
|
70 |
+
copying from the large tensor. So, if the blocks overlap, they are not
|
71 |
+
inverses of each other.
|
72 |
+
|
73 |
+
In general, folding and unfolding operations are related as
|
74 |
+
follows. Consider :class:`~torch.nn.Fold` and
|
75 |
+
:class:`~torch.nn.Unfold` instances created with the same
|
76 |
+
parameters:
|
77 |
+
|
78 |
+
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
|
79 |
+
>>> fold = nn.Fold(output_size=..., **fold_params)
|
80 |
+
>>> unfold = nn.Unfold(**fold_params)
|
81 |
+
|
82 |
+
Then for any (supported) ``input`` tensor the following
|
83 |
+
equality holds:
|
84 |
+
|
85 |
+
::
|
86 |
+
|
87 |
+
fold(unfold(input)) == divisor * input
|
88 |
+
|
89 |
+
where ``divisor`` is a tensor that depends only on the shape
|
90 |
+
and dtype of the ``input``:
|
91 |
+
|
92 |
+
>>> # xdoctest: +SKIP
|
93 |
+
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
|
94 |
+
>>> divisor = fold(unfold(input_ones))
|
95 |
+
|
96 |
+
When the ``divisor`` tensor contains no zero elements, then
|
97 |
+
``fold`` and ``unfold`` operations are inverses of each
|
98 |
+
other (up to constant divisor).
|
99 |
+
|
100 |
+
.. warning::
|
101 |
+
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
|
102 |
+
|
103 |
+
Shape:
|
104 |
+
- Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)`
|
105 |
+
- Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
|
106 |
+
or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above
|
107 |
+
|
108 |
+
Examples::
|
109 |
+
|
110 |
+
>>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
|
111 |
+
>>> input = torch.randn(1, 3 * 2 * 2, 12)
|
112 |
+
>>> output = fold(input)
|
113 |
+
>>> output.size()
|
114 |
+
torch.Size([1, 3, 4, 5])
|
115 |
+
|
116 |
+
.. _link:
|
117 |
+
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
118 |
+
|
119 |
+
"""
|
120 |
+
|
121 |
+
__constants__ = ['output_size', 'kernel_size', 'dilation', 'padding',
|
122 |
+
'stride']
|
123 |
+
output_size: _size_any_t
|
124 |
+
kernel_size: _size_any_t
|
125 |
+
dilation: _size_any_t
|
126 |
+
padding: _size_any_t
|
127 |
+
stride: _size_any_t
|
128 |
+
|
129 |
+
def __init__(
|
130 |
+
self,
|
131 |
+
output_size: _size_any_t,
|
132 |
+
kernel_size: _size_any_t,
|
133 |
+
dilation: _size_any_t = 1,
|
134 |
+
padding: _size_any_t = 0,
|
135 |
+
stride: _size_any_t = 1
|
136 |
+
) -> None:
|
137 |
+
super().__init__()
|
138 |
+
self.output_size = output_size
|
139 |
+
self.kernel_size = kernel_size
|
140 |
+
self.dilation = dilation
|
141 |
+
self.padding = padding
|
142 |
+
self.stride = stride
|
143 |
+
|
144 |
+
def forward(self, input: Tensor) -> Tensor:
|
145 |
+
return F.fold(input, self.output_size, self.kernel_size, self.dilation,
|
146 |
+
self.padding, self.stride)
|
147 |
+
|
148 |
+
def extra_repr(self) -> str:
|
149 |
+
return 'output_size={output_size}, kernel_size={kernel_size}, ' \
|
150 |
+
'dilation={dilation}, padding={padding}, stride={stride}'.format(
|
151 |
+
**self.__dict__
|
152 |
+
)
|
153 |
+
|
154 |
+
|
155 |
+
class Unfold(Module):
|
156 |
+
r"""Extracts sliding local blocks from a batched input tensor.
|
157 |
+
|
158 |
+
Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`,
|
159 |
+
where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
|
160 |
+
and :math:`*` represent arbitrary spatial dimensions. This operation flattens
|
161 |
+
each sliding :attr:`kernel_size`-sized block within the spatial dimensions
|
162 |
+
of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output`
|
163 |
+
tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where
|
164 |
+
:math:`C \times \prod(\text{kernel\_size})` is the total number of values
|
165 |
+
within each block (a block has :math:`\prod(\text{kernel\_size})` spatial
|
166 |
+
locations each containing a :math:`C`-channeled vector), and :math:`L` is
|
167 |
+
the total number of such blocks:
|
168 |
+
|
169 |
+
.. math::
|
170 |
+
L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] %
|
171 |
+
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
|
172 |
+
|
173 |
+
where :math:`\text{spatial\_size}` is formed by the spatial dimensions
|
174 |
+
of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial
|
175 |
+
dimensions.
|
176 |
+
|
177 |
+
Therefore, indexing :attr:`output` at the last dimension (column dimension)
|
178 |
+
gives all values within a certain block.
|
179 |
+
|
180 |
+
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
|
181 |
+
how the sliding blocks are retrieved.
|
182 |
+
|
183 |
+
* :attr:`stride` controls the stride for the sliding blocks.
|
184 |
+
|
185 |
+
* :attr:`padding` controls the amount of implicit zero-paddings on both
|
186 |
+
sides for :attr:`padding` number of points for each dimension before
|
187 |
+
reshaping.
|
188 |
+
|
189 |
+
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
|
190 |
+
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
|
191 |
+
|
192 |
+
Args:
|
193 |
+
kernel_size (int or tuple): the size of the sliding blocks
|
194 |
+
dilation (int or tuple, optional): a parameter that controls the
|
195 |
+
stride of elements within the
|
196 |
+
neighborhood. Default: 1
|
197 |
+
padding (int or tuple, optional): implicit zero padding to be added on
|
198 |
+
both sides of input. Default: 0
|
199 |
+
stride (int or tuple, optional): the stride of the sliding blocks in the input
|
200 |
+
spatial dimensions. Default: 1
|
201 |
+
|
202 |
+
* If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or
|
203 |
+
:attr:`stride` is an int or a tuple of length 1, their values will be
|
204 |
+
replicated across all spatial dimensions.
|
205 |
+
|
206 |
+
* For the case of two input spatial dimensions this operation is sometimes
|
207 |
+
called ``im2col``.
|
208 |
+
|
209 |
+
.. note::
|
210 |
+
:class:`~torch.nn.Fold` calculates each combined value in the resulting
|
211 |
+
large tensor by summing all values from all containing blocks.
|
212 |
+
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
|
213 |
+
copying from the large tensor. So, if the blocks overlap, they are not
|
214 |
+
inverses of each other.
|
215 |
+
|
216 |
+
In general, folding and unfolding operations are related as
|
217 |
+
follows. Consider :class:`~torch.nn.Fold` and
|
218 |
+
:class:`~torch.nn.Unfold` instances created with the same
|
219 |
+
parameters:
|
220 |
+
|
221 |
+
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
|
222 |
+
>>> fold = nn.Fold(output_size=..., **fold_params)
|
223 |
+
>>> unfold = nn.Unfold(**fold_params)
|
224 |
+
|
225 |
+
Then for any (supported) ``input`` tensor the following
|
226 |
+
equality holds:
|
227 |
+
|
228 |
+
::
|
229 |
+
|
230 |
+
fold(unfold(input)) == divisor * input
|
231 |
+
|
232 |
+
where ``divisor`` is a tensor that depends only on the shape
|
233 |
+
and dtype of the ``input``:
|
234 |
+
|
235 |
+
>>> # xdoctest: +SKIP
|
236 |
+
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
|
237 |
+
>>> divisor = fold(unfold(input_ones))
|
238 |
+
|
239 |
+
When the ``divisor`` tensor contains no zero elements, then
|
240 |
+
``fold`` and ``unfold`` operations are inverses of each
|
241 |
+
other (up to constant divisor).
|
242 |
+
|
243 |
+
.. warning::
|
244 |
+
Currently, only 4-D input tensors (batched image-like tensors) are
|
245 |
+
supported.
|
246 |
+
|
247 |
+
Shape:
|
248 |
+
- Input: :math:`(N, C, *)`
|
249 |
+
- Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above
|
250 |
+
|
251 |
+
Examples::
|
252 |
+
|
253 |
+
>>> unfold = nn.Unfold(kernel_size=(2, 3))
|
254 |
+
>>> input = torch.randn(2, 5, 3, 4)
|
255 |
+
>>> output = unfold(input)
|
256 |
+
>>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
|
257 |
+
>>> # 4 blocks (2x3 kernels) in total in the 3x4 input
|
258 |
+
>>> output.size()
|
259 |
+
torch.Size([2, 30, 4])
|
260 |
+
|
261 |
+
>>> # xdoctest: +IGNORE_WANT
|
262 |
+
>>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
|
263 |
+
>>> inp = torch.randn(1, 3, 10, 12)
|
264 |
+
>>> w = torch.randn(2, 3, 4, 5)
|
265 |
+
>>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
|
266 |
+
>>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
|
267 |
+
>>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
|
268 |
+
>>> # or equivalently (and avoiding a copy),
|
269 |
+
>>> # out = out_unf.view(1, 2, 7, 8)
|
270 |
+
>>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
|
271 |
+
tensor(1.9073e-06)
|
272 |
+
|
273 |
+
.. _link:
|
274 |
+
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
275 |
+
|
276 |
+
"""
|
277 |
+
|
278 |
+
__constants__ = ['kernel_size', 'dilation', 'padding', 'stride']
|
279 |
+
kernel_size: _size_any_t
|
280 |
+
dilation: _size_any_t
|
281 |
+
padding: _size_any_t
|
282 |
+
stride: _size_any_t
|
283 |
+
|
284 |
+
def __init__(
|
285 |
+
self,
|
286 |
+
kernel_size: _size_any_t,
|
287 |
+
dilation: _size_any_t = 1,
|
288 |
+
padding: _size_any_t = 0,
|
289 |
+
stride: _size_any_t = 1
|
290 |
+
) -> None:
|
291 |
+
super().__init__()
|
292 |
+
self.kernel_size = kernel_size
|
293 |
+
self.dilation = dilation
|
294 |
+
self.padding = padding
|
295 |
+
self.stride = stride
|
296 |
+
|
297 |
+
def forward(self, input: Tensor) -> Tensor:
|
298 |
+
return F.unfold(input, self.kernel_size, self.dilation,
|
299 |
+
self.padding, self.stride)
|
300 |
+
|
301 |
+
def extra_repr(self) -> str:
|
302 |
+
return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \
|
303 |
+
' stride={stride}'.format(**self.__dict__)
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import warnings
|
3 |
+
from torch import Tensor
|
4 |
+
|
5 |
+
from .batchnorm import _LazyNormBase, _NormBase
|
6 |
+
from .. import functional as F
|
7 |
+
|
8 |
+
__all__ = ['InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LazyInstanceNorm1d',
|
9 |
+
'LazyInstanceNorm2d', 'LazyInstanceNorm3d']
|
10 |
+
|
11 |
+
class _InstanceNorm(_NormBase):
|
12 |
+
def __init__(
|
13 |
+
self,
|
14 |
+
num_features: int,
|
15 |
+
eps: float = 1e-5,
|
16 |
+
momentum: float = 0.1,
|
17 |
+
affine: bool = False,
|
18 |
+
track_running_stats: bool = False,
|
19 |
+
device=None,
|
20 |
+
dtype=None
|
21 |
+
) -> None:
|
22 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
23 |
+
super().__init__(
|
24 |
+
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
|
25 |
+
|
26 |
+
def _check_input_dim(self, input):
|
27 |
+
raise NotImplementedError
|
28 |
+
|
29 |
+
def _get_no_batch_dim(self):
|
30 |
+
raise NotImplementedError
|
31 |
+
|
32 |
+
def _handle_no_batch_input(self, input):
|
33 |
+
return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0)
|
34 |
+
|
35 |
+
def _apply_instance_norm(self, input):
|
36 |
+
return F.instance_norm(
|
37 |
+
input, self.running_mean, self.running_var, self.weight, self.bias,
|
38 |
+
self.training or not self.track_running_stats, self.momentum, self.eps)
|
39 |
+
|
40 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
41 |
+
missing_keys, unexpected_keys, error_msgs):
|
42 |
+
version = local_metadata.get('version', None)
|
43 |
+
# at version 1: removed running_mean and running_var when
|
44 |
+
# track_running_stats=False (default)
|
45 |
+
if version is None and not self.track_running_stats:
|
46 |
+
running_stats_keys = []
|
47 |
+
for name in ('running_mean', 'running_var'):
|
48 |
+
key = prefix + name
|
49 |
+
if key in state_dict:
|
50 |
+
running_stats_keys.append(key)
|
51 |
+
if len(running_stats_keys) > 0:
|
52 |
+
error_msgs.append(
|
53 |
+
'Unexpected running stats buffer(s) {names} for {klass} '
|
54 |
+
'with track_running_stats=False. If state_dict is a '
|
55 |
+
'checkpoint saved before 0.4.0, this may be expected '
|
56 |
+
'because {klass} does not track running stats by default '
|
57 |
+
'since 0.4.0. Please remove these keys from state_dict. If '
|
58 |
+
'the running stats are actually needed, instead set '
|
59 |
+
'track_running_stats=True in {klass} to enable them. See '
|
60 |
+
'the documentation of {klass} for details.'
|
61 |
+
.format(names=" and ".join(f'"{k}"' for k in running_stats_keys),
|
62 |
+
klass=self.__class__.__name__))
|
63 |
+
for key in running_stats_keys:
|
64 |
+
state_dict.pop(key)
|
65 |
+
|
66 |
+
super()._load_from_state_dict(
|
67 |
+
state_dict, prefix, local_metadata, strict,
|
68 |
+
missing_keys, unexpected_keys, error_msgs)
|
69 |
+
|
70 |
+
def forward(self, input: Tensor) -> Tensor:
|
71 |
+
self._check_input_dim(input)
|
72 |
+
|
73 |
+
feature_dim = input.dim() - self._get_no_batch_dim()
|
74 |
+
if input.size(feature_dim) != self.num_features:
|
75 |
+
if self.affine:
|
76 |
+
raise ValueError(
|
77 |
+
f"expected input's size at dim={feature_dim} to match num_features"
|
78 |
+
f" ({self.num_features}), but got: {input.size(feature_dim)}.")
|
79 |
+
else:
|
80 |
+
warnings.warn(f"input's size at dim={feature_dim} does not match num_features. "
|
81 |
+
"You can silence this warning by not passing in num_features, "
|
82 |
+
"which is not used because affine=False")
|
83 |
+
|
84 |
+
if input.dim() == self._get_no_batch_dim():
|
85 |
+
return self._handle_no_batch_input(input)
|
86 |
+
|
87 |
+
return self._apply_instance_norm(input)
|
88 |
+
|
89 |
+
|
90 |
+
class InstanceNorm1d(_InstanceNorm):
|
91 |
+
r"""Applies Instance Normalization.
|
92 |
+
|
93 |
+
This operation applies Instance Normalization
|
94 |
+
over a 2D (unbatched) or 3D (batched) input as described in the paper
|
95 |
+
`Instance Normalization: The Missing Ingredient for Fast Stylization
|
96 |
+
<https://arxiv.org/abs/1607.08022>`__.
|
97 |
+
|
98 |
+
.. math::
|
99 |
+
|
100 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
101 |
+
|
102 |
+
The mean and standard-deviation are calculated per-dimension separately
|
103 |
+
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
104 |
+
of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
|
105 |
+
The standard-deviation is calculated via the biased estimator, equivalent to
|
106 |
+
`torch.var(input, unbiased=False)`.
|
107 |
+
|
108 |
+
By default, this layer uses instance statistics computed from input data in
|
109 |
+
both training and evaluation modes.
|
110 |
+
|
111 |
+
If :attr:`track_running_stats` is set to ``True``, during training this
|
112 |
+
layer keeps running estimates of its computed mean and variance, which are
|
113 |
+
then used for normalization during evaluation. The running estimates are
|
114 |
+
kept with a default :attr:`momentum` of 0.1.
|
115 |
+
|
116 |
+
.. note::
|
117 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
118 |
+
classes and the conventional notion of momentum. Mathematically, the
|
119 |
+
update rule for running statistics here is
|
120 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
121 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
122 |
+
new observed value.
|
123 |
+
|
124 |
+
.. note::
|
125 |
+
:class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
|
126 |
+
have some subtle differences. :class:`InstanceNorm1d` is applied
|
127 |
+
on each channel of channeled data like multidimensional time series, but
|
128 |
+
:class:`LayerNorm` is usually applied on entire sample and often in NLP
|
129 |
+
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
|
130 |
+
transform, while :class:`InstanceNorm1d` usually don't apply affine
|
131 |
+
transform.
|
132 |
+
|
133 |
+
Args:
|
134 |
+
num_features: number of features or channels :math:`C` of the input
|
135 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
136 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
137 |
+
affine: a boolean value that when set to ``True``, this module has
|
138 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
139 |
+
Default: ``False``.
|
140 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
141 |
+
module tracks the running mean and variance, and when set to ``False``,
|
142 |
+
this module does not track such statistics and always uses batch
|
143 |
+
statistics in both training and eval modes. Default: ``False``
|
144 |
+
|
145 |
+
Shape:
|
146 |
+
- Input: :math:`(N, C, L)` or :math:`(C, L)`
|
147 |
+
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
|
148 |
+
|
149 |
+
Examples::
|
150 |
+
|
151 |
+
>>> # Without Learnable Parameters
|
152 |
+
>>> m = nn.InstanceNorm1d(100)
|
153 |
+
>>> # With Learnable Parameters
|
154 |
+
>>> m = nn.InstanceNorm1d(100, affine=True)
|
155 |
+
>>> input = torch.randn(20, 100, 40)
|
156 |
+
>>> output = m(input)
|
157 |
+
"""
|
158 |
+
|
159 |
+
def _get_no_batch_dim(self):
|
160 |
+
return 2
|
161 |
+
|
162 |
+
def _check_input_dim(self, input):
|
163 |
+
if input.dim() not in (2, 3):
|
164 |
+
raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
|
165 |
+
|
166 |
+
|
167 |
+
class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):
|
168 |
+
r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.
|
169 |
+
|
170 |
+
The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
|
171 |
+
The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.
|
172 |
+
|
173 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
174 |
+
on lazy modules and their limitations.
|
175 |
+
|
176 |
+
Args:
|
177 |
+
num_features: :math:`C` from an expected input of size
|
178 |
+
:math:`(N, C, L)` or :math:`(C, L)`
|
179 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
180 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
181 |
+
affine: a boolean value that when set to ``True``, this module has
|
182 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
183 |
+
Default: ``False``.
|
184 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
185 |
+
module tracks the running mean and variance, and when set to ``False``,
|
186 |
+
this module does not track such statistics and always uses batch
|
187 |
+
statistics in both training and eval modes. Default: ``False``
|
188 |
+
|
189 |
+
Shape:
|
190 |
+
- Input: :math:`(N, C, L)` or :math:`(C, L)`
|
191 |
+
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
|
192 |
+
"""
|
193 |
+
|
194 |
+
cls_to_become = InstanceNorm1d # type: ignore[assignment]
|
195 |
+
|
196 |
+
def _get_no_batch_dim(self):
|
197 |
+
return 2
|
198 |
+
|
199 |
+
def _check_input_dim(self, input):
|
200 |
+
if input.dim() not in (2, 3):
|
201 |
+
raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
|
202 |
+
|
203 |
+
|
204 |
+
class InstanceNorm2d(_InstanceNorm):
|
205 |
+
r"""Applies Instance Normalization.
|
206 |
+
|
207 |
+
This operation applies Instance Normalization
|
208 |
+
over a 4D input (a mini-batch of 2D inputs
|
209 |
+
with additional channel dimension) as described in the paper
|
210 |
+
`Instance Normalization: The Missing Ingredient for Fast Stylization
|
211 |
+
<https://arxiv.org/abs/1607.08022>`__.
|
212 |
+
|
213 |
+
.. math::
|
214 |
+
|
215 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
216 |
+
|
217 |
+
The mean and standard-deviation are calculated per-dimension separately
|
218 |
+
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
219 |
+
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
|
220 |
+
The standard-deviation is calculated via the biased estimator, equivalent to
|
221 |
+
`torch.var(input, unbiased=False)`.
|
222 |
+
|
223 |
+
By default, this layer uses instance statistics computed from input data in
|
224 |
+
both training and evaluation modes.
|
225 |
+
|
226 |
+
If :attr:`track_running_stats` is set to ``True``, during training this
|
227 |
+
layer keeps running estimates of its computed mean and variance, which are
|
228 |
+
then used for normalization during evaluation. The running estimates are
|
229 |
+
kept with a default :attr:`momentum` of 0.1.
|
230 |
+
|
231 |
+
.. note::
|
232 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
233 |
+
classes and the conventional notion of momentum. Mathematically, the
|
234 |
+
update rule for running statistics here is
|
235 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
236 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
237 |
+
new observed value.
|
238 |
+
|
239 |
+
.. note::
|
240 |
+
:class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
|
241 |
+
have some subtle differences. :class:`InstanceNorm2d` is applied
|
242 |
+
on each channel of channeled data like RGB images, but
|
243 |
+
:class:`LayerNorm` is usually applied on entire sample and often in NLP
|
244 |
+
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
|
245 |
+
transform, while :class:`InstanceNorm2d` usually don't apply affine
|
246 |
+
transform.
|
247 |
+
|
248 |
+
Args:
|
249 |
+
num_features: :math:`C` from an expected input of size
|
250 |
+
:math:`(N, C, H, W)` or :math:`(C, H, W)`
|
251 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
252 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
253 |
+
affine: a boolean value that when set to ``True``, this module has
|
254 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
255 |
+
Default: ``False``.
|
256 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
257 |
+
module tracks the running mean and variance, and when set to ``False``,
|
258 |
+
this module does not track such statistics and always uses batch
|
259 |
+
statistics in both training and eval modes. Default: ``False``
|
260 |
+
|
261 |
+
Shape:
|
262 |
+
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
|
263 |
+
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
|
264 |
+
|
265 |
+
Examples::
|
266 |
+
|
267 |
+
>>> # Without Learnable Parameters
|
268 |
+
>>> m = nn.InstanceNorm2d(100)
|
269 |
+
>>> # With Learnable Parameters
|
270 |
+
>>> m = nn.InstanceNorm2d(100, affine=True)
|
271 |
+
>>> input = torch.randn(20, 100, 35, 45)
|
272 |
+
>>> output = m(input)
|
273 |
+
"""
|
274 |
+
|
275 |
+
def _get_no_batch_dim(self):
|
276 |
+
return 3
|
277 |
+
|
278 |
+
def _check_input_dim(self, input):
|
279 |
+
if input.dim() not in (3, 4):
|
280 |
+
raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
|
281 |
+
|
282 |
+
|
283 |
+
class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
|
284 |
+
r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.
|
285 |
+
|
286 |
+
The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
|
287 |
+
The attributes that will be lazily initialized are `weight`, `bias`,
|
288 |
+
`running_mean` and `running_var`.
|
289 |
+
|
290 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
291 |
+
on lazy modules and their limitations.
|
292 |
+
|
293 |
+
Args:
|
294 |
+
num_features: :math:`C` from an expected input of size
|
295 |
+
:math:`(N, C, H, W)` or :math:`(C, H, W)`
|
296 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
297 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
298 |
+
affine: a boolean value that when set to ``True``, this module has
|
299 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
300 |
+
Default: ``False``.
|
301 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
302 |
+
module tracks the running mean and variance, and when set to ``False``,
|
303 |
+
this module does not track such statistics and always uses batch
|
304 |
+
statistics in both training and eval modes. Default: ``False``
|
305 |
+
|
306 |
+
Shape:
|
307 |
+
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
|
308 |
+
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
|
309 |
+
"""
|
310 |
+
|
311 |
+
cls_to_become = InstanceNorm2d # type: ignore[assignment]
|
312 |
+
|
313 |
+
def _get_no_batch_dim(self):
|
314 |
+
return 3
|
315 |
+
|
316 |
+
def _check_input_dim(self, input):
|
317 |
+
if input.dim() not in (3, 4):
|
318 |
+
raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
|
319 |
+
|
320 |
+
|
321 |
+
class InstanceNorm3d(_InstanceNorm):
|
322 |
+
r"""Applies Instance Normalization.
|
323 |
+
|
324 |
+
This operation applies Instance Normalization
|
325 |
+
over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
|
326 |
+
`Instance Normalization: The Missing Ingredient for Fast Stylization
|
327 |
+
<https://arxiv.org/abs/1607.08022>`__.
|
328 |
+
|
329 |
+
.. math::
|
330 |
+
|
331 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
332 |
+
|
333 |
+
The mean and standard-deviation are calculated per-dimension separately
|
334 |
+
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
335 |
+
of size C (where C is the input size) if :attr:`affine` is ``True``.
|
336 |
+
The standard-deviation is calculated via the biased estimator, equivalent to
|
337 |
+
`torch.var(input, unbiased=False)`.
|
338 |
+
|
339 |
+
By default, this layer uses instance statistics computed from input data in
|
340 |
+
both training and evaluation modes.
|
341 |
+
|
342 |
+
If :attr:`track_running_stats` is set to ``True``, during training this
|
343 |
+
layer keeps running estimates of its computed mean and variance, which are
|
344 |
+
then used for normalization during evaluation. The running estimates are
|
345 |
+
kept with a default :attr:`momentum` of 0.1.
|
346 |
+
|
347 |
+
.. note::
|
348 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
349 |
+
classes and the conventional notion of momentum. Mathematically, the
|
350 |
+
update rule for running statistics here is
|
351 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
352 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
353 |
+
new observed value.
|
354 |
+
|
355 |
+
.. note::
|
356 |
+
:class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
|
357 |
+
have some subtle differences. :class:`InstanceNorm3d` is applied
|
358 |
+
on each channel of channeled data like 3D models with RGB color, but
|
359 |
+
:class:`LayerNorm` is usually applied on entire sample and often in NLP
|
360 |
+
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
|
361 |
+
transform, while :class:`InstanceNorm3d` usually don't apply affine
|
362 |
+
transform.
|
363 |
+
|
364 |
+
Args:
|
365 |
+
num_features: :math:`C` from an expected input of size
|
366 |
+
:math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
|
367 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
368 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
369 |
+
affine: a boolean value that when set to ``True``, this module has
|
370 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
371 |
+
Default: ``False``.
|
372 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
373 |
+
module tracks the running mean and variance, and when set to ``False``,
|
374 |
+
this module does not track such statistics and always uses batch
|
375 |
+
statistics in both training and eval modes. Default: ``False``
|
376 |
+
|
377 |
+
Shape:
|
378 |
+
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
|
379 |
+
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
|
380 |
+
|
381 |
+
Examples::
|
382 |
+
|
383 |
+
>>> # Without Learnable Parameters
|
384 |
+
>>> m = nn.InstanceNorm3d(100)
|
385 |
+
>>> # With Learnable Parameters
|
386 |
+
>>> m = nn.InstanceNorm3d(100, affine=True)
|
387 |
+
>>> input = torch.randn(20, 100, 35, 45, 10)
|
388 |
+
>>> output = m(input)
|
389 |
+
"""
|
390 |
+
|
391 |
+
def _get_no_batch_dim(self):
|
392 |
+
return 4
|
393 |
+
|
394 |
+
def _check_input_dim(self, input):
|
395 |
+
if input.dim() not in (4, 5):
|
396 |
+
raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
|
397 |
+
|
398 |
+
|
399 |
+
class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):
|
400 |
+
r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.
|
401 |
+
|
402 |
+
The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
|
403 |
+
The attributes that will be lazily initialized are `weight`, `bias`,
|
404 |
+
`running_mean` and `running_var`.
|
405 |
+
|
406 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
407 |
+
on lazy modules and their limitations.
|
408 |
+
|
409 |
+
Args:
|
410 |
+
num_features: :math:`C` from an expected input of size
|
411 |
+
:math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
|
412 |
+
eps: a value added to the denominator for numerical stability. Default: 1e-5
|
413 |
+
momentum: the value used for the running_mean and running_var computation. Default: 0.1
|
414 |
+
affine: a boolean value that when set to ``True``, this module has
|
415 |
+
learnable affine parameters, initialized the same way as done for batch normalization.
|
416 |
+
Default: ``False``.
|
417 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
418 |
+
module tracks the running mean and variance, and when set to ``False``,
|
419 |
+
this module does not track such statistics and always uses batch
|
420 |
+
statistics in both training and eval modes. Default: ``False``
|
421 |
+
|
422 |
+
Shape:
|
423 |
+
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
|
424 |
+
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
|
425 |
+
"""
|
426 |
+
|
427 |
+
cls_to_become = InstanceNorm3d # type: ignore[assignment]
|
428 |
+
|
429 |
+
def _get_no_batch_dim(self):
|
430 |
+
return 4
|
431 |
+
|
432 |
+
def _check_input_dim(self, input):
|
433 |
+
if input.dim() not in (4, 5):
|
434 |
+
raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/module.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/padding.py
ADDED
@@ -0,0 +1,801 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
from .utils import _pair, _quadruple, _ntuple
|
3 |
+
from .. import functional as F
|
4 |
+
|
5 |
+
from torch import Tensor
|
6 |
+
from ..common_types import _size_2_t, _size_4_t, _size_6_t
|
7 |
+
from typing import Sequence, Tuple
|
8 |
+
|
9 |
+
|
10 |
+
# TODO: grad_output size asserts in THNN
|
11 |
+
|
12 |
+
__all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d',
|
13 |
+
'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d',
|
14 |
+
'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d']
|
15 |
+
|
16 |
+
|
17 |
+
class _CircularPadNd(Module):
|
18 |
+
__constants__ = ['padding']
|
19 |
+
padding: Sequence[int]
|
20 |
+
|
21 |
+
def _check_input_dim(self, input):
|
22 |
+
raise NotImplementedError
|
23 |
+
|
24 |
+
def forward(self, input: Tensor) -> Tensor:
|
25 |
+
self._check_input_dim(input)
|
26 |
+
return F.pad(input, self.padding, 'circular')
|
27 |
+
|
28 |
+
def extra_repr(self) -> str:
|
29 |
+
return f'{self.padding}'
|
30 |
+
|
31 |
+
|
32 |
+
class CircularPad1d(_CircularPadNd):
|
33 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
34 |
+
|
35 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
36 |
+
and values at the end are used to pad the beginning. If negative padding is
|
37 |
+
applied then the ends of the tensor get removed.
|
38 |
+
|
39 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
43 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
44 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
45 |
+
|
46 |
+
Shape:
|
47 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
48 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
49 |
+
|
50 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
51 |
+
|
52 |
+
Examples::
|
53 |
+
|
54 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
55 |
+
>>> m = nn.CircularPad1d(2)
|
56 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
57 |
+
>>> input
|
58 |
+
tensor([[[0., 1., 2., 3.],
|
59 |
+
[4., 5., 6., 7.]]])
|
60 |
+
>>> m(input)
|
61 |
+
tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
|
62 |
+
[6., 7., 4., 5., 6., 7., 4., 5.]]])
|
63 |
+
>>> # using different paddings for different sides
|
64 |
+
>>> m = nn.CircularPad1d((3, 1))
|
65 |
+
>>> m(input)
|
66 |
+
tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
|
67 |
+
[5., 6., 7., 4., 5., 6., 7., 4.]]])
|
68 |
+
"""
|
69 |
+
|
70 |
+
padding: Tuple[int, int]
|
71 |
+
|
72 |
+
def __init__(self, padding: _size_2_t) -> None:
|
73 |
+
super().__init__()
|
74 |
+
self.padding = _pair(padding)
|
75 |
+
|
76 |
+
def _check_input_dim(self, input):
|
77 |
+
if input.dim() != 2 and input.dim() != 3:
|
78 |
+
raise ValueError(
|
79 |
+
f"expected 2D or 3D input (got {input.dim()}D input)"
|
80 |
+
)
|
81 |
+
|
82 |
+
|
83 |
+
class CircularPad2d(_CircularPadNd):
|
84 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
85 |
+
|
86 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
87 |
+
and values at the end are used to pad the beginning. If negative padding is
|
88 |
+
applied then the ends of the tensor get removed.
|
89 |
+
|
90 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
94 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
95 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
96 |
+
|
97 |
+
Shape:
|
98 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
99 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
100 |
+
|
101 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
102 |
+
|
103 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
104 |
+
|
105 |
+
Examples::
|
106 |
+
|
107 |
+
>>> m = nn.CircularPad2d(2)
|
108 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
109 |
+
>>> input
|
110 |
+
tensor([[[[0., 1., 2.],
|
111 |
+
[3., 4., 5.],
|
112 |
+
[6., 7., 8.]]]])
|
113 |
+
>>> m(input)
|
114 |
+
tensor([[[[4., 5., 3., 4., 5., 3., 4.],
|
115 |
+
[7., 8., 6., 7., 8., 6., 7.],
|
116 |
+
[1., 2., 0., 1., 2., 0., 1.],
|
117 |
+
[4., 5., 3., 4., 5., 3., 4.],
|
118 |
+
[7., 8., 6., 7., 8., 6., 7.],
|
119 |
+
[1., 2., 0., 1., 2., 0., 1.],
|
120 |
+
[4., 5., 3., 4., 5., 3., 4.]]]])
|
121 |
+
>>> # using different paddings for different sides
|
122 |
+
>>> m = nn.CircularPad2d((1, 1, 2, 0))
|
123 |
+
>>> m(input)
|
124 |
+
tensor([[[[5., 3., 4., 5., 3.],
|
125 |
+
[8., 6., 7., 8., 6.],
|
126 |
+
[2., 0., 1., 2., 0.],
|
127 |
+
[5., 3., 4., 5., 3.],
|
128 |
+
[8., 6., 7., 8., 6.]]]])
|
129 |
+
"""
|
130 |
+
|
131 |
+
padding: Tuple[int, int, int, int]
|
132 |
+
|
133 |
+
def __init__(self, padding: _size_4_t) -> None:
|
134 |
+
super().__init__()
|
135 |
+
self.padding = _quadruple(padding)
|
136 |
+
|
137 |
+
def _check_input_dim(self, input):
|
138 |
+
if input.dim() != 3 and input.dim() != 4:
|
139 |
+
raise ValueError(
|
140 |
+
f"expected 3D or 4D input (got {input.dim()}D input)"
|
141 |
+
)
|
142 |
+
|
143 |
+
|
144 |
+
class CircularPad3d(_CircularPadNd):
|
145 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
146 |
+
|
147 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
148 |
+
and values at the end are used to pad the beginning. If negative padding is
|
149 |
+
applied then the ends of the tensor get removed.
|
150 |
+
|
151 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
152 |
+
|
153 |
+
Args:
|
154 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
155 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
156 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
157 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
158 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
159 |
+
|
160 |
+
Shape:
|
161 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
162 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
163 |
+
where
|
164 |
+
|
165 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
166 |
+
|
167 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
168 |
+
|
169 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
170 |
+
|
171 |
+
Examples::
|
172 |
+
|
173 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
174 |
+
>>> m = nn.CircularPad3d(3)
|
175 |
+
>>> input = torch.randn(16, 3, 8, 320, 480)
|
176 |
+
>>> output = m(input)
|
177 |
+
>>> # using different paddings for different sides
|
178 |
+
>>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
|
179 |
+
>>> output = m(input)
|
180 |
+
"""
|
181 |
+
|
182 |
+
padding: Tuple[int, int, int, int, int, int]
|
183 |
+
|
184 |
+
def __init__(self, padding: _size_6_t) -> None:
|
185 |
+
super().__init__()
|
186 |
+
self.padding = _ntuple(6)(padding)
|
187 |
+
|
188 |
+
def _check_input_dim(self, input):
|
189 |
+
if input.dim() != 4 and input.dim() != 5:
|
190 |
+
raise ValueError(
|
191 |
+
f"expected 4D or 5D input (got {input.dim()}D input)"
|
192 |
+
)
|
193 |
+
|
194 |
+
|
195 |
+
class _ConstantPadNd(Module):
|
196 |
+
__constants__ = ['padding', 'value']
|
197 |
+
value: float
|
198 |
+
padding: Sequence[int]
|
199 |
+
|
200 |
+
def __init__(self, value: float) -> None:
|
201 |
+
super().__init__()
|
202 |
+
self.value = value
|
203 |
+
|
204 |
+
def forward(self, input: Tensor) -> Tensor:
|
205 |
+
return F.pad(input, self.padding, 'constant', self.value)
|
206 |
+
|
207 |
+
def extra_repr(self) -> str:
|
208 |
+
return f'padding={self.padding}, value={self.value}'
|
209 |
+
|
210 |
+
|
211 |
+
class ConstantPad1d(_ConstantPadNd):
|
212 |
+
r"""Pads the input tensor boundaries with a constant value.
|
213 |
+
|
214 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
215 |
+
|
216 |
+
Args:
|
217 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
218 |
+
padding in both boundaries. If a 2-`tuple`, uses
|
219 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
220 |
+
|
221 |
+
Shape:
|
222 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
223 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
224 |
+
|
225 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
226 |
+
|
227 |
+
Examples::
|
228 |
+
|
229 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
230 |
+
>>> m = nn.ConstantPad1d(2, 3.5)
|
231 |
+
>>> input = torch.randn(1, 2, 4)
|
232 |
+
>>> input
|
233 |
+
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
|
234 |
+
[-1.3287, 1.8966, 0.1466, -0.2771]]])
|
235 |
+
>>> m(input)
|
236 |
+
tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
|
237 |
+
3.5000],
|
238 |
+
[ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
|
239 |
+
3.5000]]])
|
240 |
+
>>> m = nn.ConstantPad1d(2, 3.5)
|
241 |
+
>>> input = torch.randn(1, 2, 3)
|
242 |
+
>>> input
|
243 |
+
tensor([[[ 1.6616, 1.4523, -1.1255],
|
244 |
+
[-3.6372, 0.1182, -1.8652]]])
|
245 |
+
>>> m(input)
|
246 |
+
tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
|
247 |
+
[ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
|
248 |
+
>>> # using different paddings for different sides
|
249 |
+
>>> m = nn.ConstantPad1d((3, 1), 3.5)
|
250 |
+
>>> m(input)
|
251 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
|
252 |
+
[ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
|
253 |
+
"""
|
254 |
+
|
255 |
+
padding: Tuple[int, int]
|
256 |
+
|
257 |
+
def __init__(self, padding: _size_2_t, value: float):
|
258 |
+
super().__init__(value)
|
259 |
+
self.padding = _pair(padding)
|
260 |
+
|
261 |
+
|
262 |
+
class ConstantPad2d(_ConstantPadNd):
|
263 |
+
r"""Pads the input tensor boundaries with a constant value.
|
264 |
+
|
265 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
266 |
+
|
267 |
+
Args:
|
268 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
269 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
270 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
271 |
+
|
272 |
+
Shape:
|
273 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
274 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
275 |
+
|
276 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
277 |
+
|
278 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
279 |
+
|
280 |
+
Examples::
|
281 |
+
|
282 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
283 |
+
>>> m = nn.ConstantPad2d(2, 3.5)
|
284 |
+
>>> input = torch.randn(1, 2, 2)
|
285 |
+
>>> input
|
286 |
+
tensor([[[ 1.6585, 0.4320],
|
287 |
+
[-0.8701, -0.4649]]])
|
288 |
+
>>> m(input)
|
289 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
290 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
291 |
+
[ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
|
292 |
+
[ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
|
293 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
294 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
|
295 |
+
>>> # using different paddings for different sides
|
296 |
+
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
|
297 |
+
>>> m(input)
|
298 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
299 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
300 |
+
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
|
301 |
+
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
|
302 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
|
303 |
+
"""
|
304 |
+
|
305 |
+
__constants__ = ['padding', 'value']
|
306 |
+
padding: Tuple[int, int, int, int]
|
307 |
+
|
308 |
+
def __init__(self, padding: _size_4_t, value: float) -> None:
|
309 |
+
super().__init__(value)
|
310 |
+
self.padding = _quadruple(padding)
|
311 |
+
|
312 |
+
|
313 |
+
class ConstantPad3d(_ConstantPadNd):
|
314 |
+
r"""Pads the input tensor boundaries with a constant value.
|
315 |
+
|
316 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
317 |
+
|
318 |
+
Args:
|
319 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
320 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
321 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
322 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
323 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
324 |
+
|
325 |
+
Shape:
|
326 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
327 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
328 |
+
:math:`(C, D_{out}, H_{out}, W_{out})`, where
|
329 |
+
|
330 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
331 |
+
|
332 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
333 |
+
|
334 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
335 |
+
|
336 |
+
Examples::
|
337 |
+
|
338 |
+
>>> m = nn.ConstantPad3d(3, 3.5)
|
339 |
+
>>> input = torch.randn(16, 3, 10, 20, 30)
|
340 |
+
>>> output = m(input)
|
341 |
+
>>> # using different paddings for different sides
|
342 |
+
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
|
343 |
+
>>> output = m(input)
|
344 |
+
"""
|
345 |
+
|
346 |
+
padding: Tuple[int, int, int, int, int, int]
|
347 |
+
|
348 |
+
def __init__(self, padding: _size_6_t, value: float) -> None:
|
349 |
+
super().__init__(value)
|
350 |
+
self.padding = _ntuple(6)(padding)
|
351 |
+
|
352 |
+
|
353 |
+
class _ReflectionPadNd(Module):
|
354 |
+
__constants__ = ['padding']
|
355 |
+
padding: Sequence[int]
|
356 |
+
|
357 |
+
def forward(self, input: Tensor) -> Tensor:
|
358 |
+
return F.pad(input, self.padding, 'reflect')
|
359 |
+
|
360 |
+
def extra_repr(self) -> str:
|
361 |
+
return f'{self.padding}'
|
362 |
+
|
363 |
+
|
364 |
+
class ReflectionPad1d(_ReflectionPadNd):
|
365 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
366 |
+
|
367 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
368 |
+
|
369 |
+
Args:
|
370 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
371 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
372 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
373 |
+
|
374 |
+
Shape:
|
375 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
376 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
377 |
+
|
378 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
379 |
+
|
380 |
+
Examples::
|
381 |
+
|
382 |
+
>>> m = nn.ReflectionPad1d(2)
|
383 |
+
>>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
|
384 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
385 |
+
>>> input
|
386 |
+
tensor([[[0., 1., 2., 3.],
|
387 |
+
[4., 5., 6., 7.]]])
|
388 |
+
>>> m(input)
|
389 |
+
tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
|
390 |
+
[6., 5., 4., 5., 6., 7., 6., 5.]]])
|
391 |
+
>>> # using different paddings for different sides
|
392 |
+
>>> m = nn.ReflectionPad1d((3, 1))
|
393 |
+
>>> m(input)
|
394 |
+
tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
|
395 |
+
[7., 6., 5., 4., 5., 6., 7., 6.]]])
|
396 |
+
"""
|
397 |
+
|
398 |
+
padding: Tuple[int, int]
|
399 |
+
|
400 |
+
def __init__(self, padding: _size_2_t) -> None:
|
401 |
+
super().__init__()
|
402 |
+
self.padding = _pair(padding)
|
403 |
+
|
404 |
+
|
405 |
+
class ReflectionPad2d(_ReflectionPadNd):
|
406 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
407 |
+
|
408 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
409 |
+
|
410 |
+
Args:
|
411 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
412 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
413 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
414 |
+
Note that padding size should be less than the corresponding input dimension.
|
415 |
+
|
416 |
+
Shape:
|
417 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
418 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
|
419 |
+
|
420 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
421 |
+
|
422 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
423 |
+
|
424 |
+
Examples::
|
425 |
+
|
426 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
427 |
+
>>> m = nn.ReflectionPad2d(2)
|
428 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
429 |
+
>>> input
|
430 |
+
tensor([[[[0., 1., 2.],
|
431 |
+
[3., 4., 5.],
|
432 |
+
[6., 7., 8.]]]])
|
433 |
+
>>> m(input)
|
434 |
+
tensor([[[[8., 7., 6., 7., 8., 7., 6.],
|
435 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
436 |
+
[2., 1., 0., 1., 2., 1., 0.],
|
437 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
438 |
+
[8., 7., 6., 7., 8., 7., 6.],
|
439 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
440 |
+
[2., 1., 0., 1., 2., 1., 0.]]]])
|
441 |
+
>>> # using different paddings for different sides
|
442 |
+
>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
|
443 |
+
>>> m(input)
|
444 |
+
tensor([[[[7., 6., 7., 8., 7.],
|
445 |
+
[4., 3., 4., 5., 4.],
|
446 |
+
[1., 0., 1., 2., 1.],
|
447 |
+
[4., 3., 4., 5., 4.],
|
448 |
+
[7., 6., 7., 8., 7.]]]])
|
449 |
+
"""
|
450 |
+
|
451 |
+
padding: Tuple[int, int, int, int]
|
452 |
+
|
453 |
+
def __init__(self, padding: _size_4_t) -> None:
|
454 |
+
super().__init__()
|
455 |
+
self.padding = _quadruple(padding)
|
456 |
+
|
457 |
+
|
458 |
+
class ReflectionPad3d(_ReflectionPadNd):
|
459 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
460 |
+
|
461 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
462 |
+
|
463 |
+
Args:
|
464 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
465 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
466 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
467 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
468 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
469 |
+
|
470 |
+
Shape:
|
471 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
472 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
473 |
+
where
|
474 |
+
|
475 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
476 |
+
|
477 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
478 |
+
|
479 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
480 |
+
|
481 |
+
Examples::
|
482 |
+
|
483 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
484 |
+
>>> m = nn.ReflectionPad3d(1)
|
485 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
|
486 |
+
>>> m(input)
|
487 |
+
tensor([[[[[7., 6., 7., 6.],
|
488 |
+
[5., 4., 5., 4.],
|
489 |
+
[7., 6., 7., 6.],
|
490 |
+
[5., 4., 5., 4.]],
|
491 |
+
[[3., 2., 3., 2.],
|
492 |
+
[1., 0., 1., 0.],
|
493 |
+
[3., 2., 3., 2.],
|
494 |
+
[1., 0., 1., 0.]],
|
495 |
+
[[7., 6., 7., 6.],
|
496 |
+
[5., 4., 5., 4.],
|
497 |
+
[7., 6., 7., 6.],
|
498 |
+
[5., 4., 5., 4.]],
|
499 |
+
[[3., 2., 3., 2.],
|
500 |
+
[1., 0., 1., 0.],
|
501 |
+
[3., 2., 3., 2.],
|
502 |
+
[1., 0., 1., 0.]]]]])
|
503 |
+
"""
|
504 |
+
|
505 |
+
padding: Tuple[int, int, int, int, int, int]
|
506 |
+
|
507 |
+
def __init__(self, padding: _size_6_t) -> None:
|
508 |
+
super().__init__()
|
509 |
+
self.padding = _ntuple(6)(padding)
|
510 |
+
|
511 |
+
|
512 |
+
class _ReplicationPadNd(Module):
|
513 |
+
__constants__ = ['padding']
|
514 |
+
padding: Sequence[int]
|
515 |
+
|
516 |
+
def forward(self, input: Tensor) -> Tensor:
|
517 |
+
return F.pad(input, self.padding, 'replicate')
|
518 |
+
|
519 |
+
def extra_repr(self) -> str:
|
520 |
+
return f'{self.padding}'
|
521 |
+
|
522 |
+
|
523 |
+
class ReplicationPad1d(_ReplicationPadNd):
|
524 |
+
r"""Pads the input tensor using replication of the input boundary.
|
525 |
+
|
526 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
527 |
+
|
528 |
+
Args:
|
529 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
530 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
531 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
532 |
+
|
533 |
+
Shape:
|
534 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
535 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
536 |
+
|
537 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
538 |
+
|
539 |
+
Examples::
|
540 |
+
|
541 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
542 |
+
>>> m = nn.ReplicationPad1d(2)
|
543 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
544 |
+
>>> input
|
545 |
+
tensor([[[0., 1., 2., 3.],
|
546 |
+
[4., 5., 6., 7.]]])
|
547 |
+
>>> m(input)
|
548 |
+
tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
|
549 |
+
[4., 4., 4., 5., 6., 7., 7., 7.]]])
|
550 |
+
>>> # using different paddings for different sides
|
551 |
+
>>> m = nn.ReplicationPad1d((3, 1))
|
552 |
+
>>> m(input)
|
553 |
+
tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
|
554 |
+
[4., 4., 4., 4., 5., 6., 7., 7.]]])
|
555 |
+
"""
|
556 |
+
|
557 |
+
padding: Tuple[int, int]
|
558 |
+
|
559 |
+
def __init__(self, padding: _size_2_t) -> None:
|
560 |
+
super().__init__()
|
561 |
+
self.padding = _pair(padding)
|
562 |
+
|
563 |
+
|
564 |
+
class ReplicationPad2d(_ReplicationPadNd):
|
565 |
+
r"""Pads the input tensor using replication of the input boundary.
|
566 |
+
|
567 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
568 |
+
|
569 |
+
Args:
|
570 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
571 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
572 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
573 |
+
|
574 |
+
Shape:
|
575 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
576 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
577 |
+
|
578 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
579 |
+
|
580 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
581 |
+
|
582 |
+
Examples::
|
583 |
+
|
584 |
+
>>> m = nn.ReplicationPad2d(2)
|
585 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
586 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
587 |
+
>>> input
|
588 |
+
tensor([[[[0., 1., 2.],
|
589 |
+
[3., 4., 5.],
|
590 |
+
[6., 7., 8.]]]])
|
591 |
+
>>> m(input)
|
592 |
+
tensor([[[[0., 0., 0., 1., 2., 2., 2.],
|
593 |
+
[0., 0., 0., 1., 2., 2., 2.],
|
594 |
+
[0., 0., 0., 1., 2., 2., 2.],
|
595 |
+
[3., 3., 3., 4., 5., 5., 5.],
|
596 |
+
[6., 6., 6., 7., 8., 8., 8.],
|
597 |
+
[6., 6., 6., 7., 8., 8., 8.],
|
598 |
+
[6., 6., 6., 7., 8., 8., 8.]]]])
|
599 |
+
>>> # using different paddings for different sides
|
600 |
+
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
|
601 |
+
>>> m(input)
|
602 |
+
tensor([[[[0., 0., 1., 2., 2.],
|
603 |
+
[0., 0., 1., 2., 2.],
|
604 |
+
[0., 0., 1., 2., 2.],
|
605 |
+
[3., 3., 4., 5., 5.],
|
606 |
+
[6., 6., 7., 8., 8.]]]])
|
607 |
+
"""
|
608 |
+
|
609 |
+
padding: Tuple[int, int, int, int]
|
610 |
+
|
611 |
+
def __init__(self, padding: _size_4_t) -> None:
|
612 |
+
super().__init__()
|
613 |
+
self.padding = _quadruple(padding)
|
614 |
+
|
615 |
+
|
616 |
+
class ReplicationPad3d(_ReplicationPadNd):
|
617 |
+
r"""Pads the input tensor using replication of the input boundary.
|
618 |
+
|
619 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
620 |
+
|
621 |
+
Args:
|
622 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
623 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
624 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
625 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
626 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
627 |
+
|
628 |
+
Shape:
|
629 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
630 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
631 |
+
where
|
632 |
+
|
633 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
634 |
+
|
635 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
636 |
+
|
637 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
638 |
+
|
639 |
+
Examples::
|
640 |
+
|
641 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
642 |
+
>>> m = nn.ReplicationPad3d(3)
|
643 |
+
>>> input = torch.randn(16, 3, 8, 320, 480)
|
644 |
+
>>> output = m(input)
|
645 |
+
>>> # using different paddings for different sides
|
646 |
+
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
|
647 |
+
>>> output = m(input)
|
648 |
+
"""
|
649 |
+
|
650 |
+
padding: Tuple[int, int, int, int, int, int]
|
651 |
+
|
652 |
+
def __init__(self, padding: _size_6_t) -> None:
|
653 |
+
super().__init__()
|
654 |
+
self.padding = _ntuple(6)(padding)
|
655 |
+
|
656 |
+
|
657 |
+
class ZeroPad1d(ConstantPad1d):
|
658 |
+
r"""Pads the input tensor boundaries with zero.
|
659 |
+
|
660 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
661 |
+
|
662 |
+
Args:
|
663 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
664 |
+
padding in both boundaries. If a 2-`tuple`, uses
|
665 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
666 |
+
|
667 |
+
Shape:
|
668 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
669 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
670 |
+
|
671 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
672 |
+
|
673 |
+
Examples::
|
674 |
+
|
675 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
676 |
+
>>> m = nn.ZeroPad1d(2)
|
677 |
+
>>> input = torch.randn(1, 2, 4)
|
678 |
+
>>> input
|
679 |
+
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
|
680 |
+
[-1.3287, 1.8966, 0.1466, -0.2771]]])
|
681 |
+
>>> m(input)
|
682 |
+
tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
|
683 |
+
0.0000],
|
684 |
+
[ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
|
685 |
+
0.0000]]])
|
686 |
+
>>> m = nn.ZeroPad1d(2)
|
687 |
+
>>> input = torch.randn(1, 2, 3)
|
688 |
+
>>> input
|
689 |
+
tensor([[[ 1.6616, 1.4523, -1.1255],
|
690 |
+
[-3.6372, 0.1182, -1.8652]]])
|
691 |
+
>>> m(input)
|
692 |
+
tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
|
693 |
+
[ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
|
694 |
+
>>> # using different paddings for different sides
|
695 |
+
>>> m = nn.ZeroPad1d((3, 1))
|
696 |
+
>>> m(input)
|
697 |
+
tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
|
698 |
+
[ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
|
699 |
+
"""
|
700 |
+
|
701 |
+
padding: Tuple[int, int]
|
702 |
+
|
703 |
+
def __init__(self, padding: _size_2_t) -> None:
|
704 |
+
super().__init__(padding, 0.)
|
705 |
+
|
706 |
+
def extra_repr(self) -> str:
|
707 |
+
return f'{self.padding}'
|
708 |
+
|
709 |
+
class ZeroPad2d(ConstantPad2d):
|
710 |
+
r"""Pads the input tensor boundaries with zero.
|
711 |
+
|
712 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
713 |
+
|
714 |
+
Args:
|
715 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
716 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
717 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
718 |
+
|
719 |
+
Shape:
|
720 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
721 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
722 |
+
|
723 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
724 |
+
|
725 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
726 |
+
|
727 |
+
Examples::
|
728 |
+
|
729 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
730 |
+
>>> m = nn.ZeroPad2d(2)
|
731 |
+
>>> input = torch.randn(1, 1, 3, 3)
|
732 |
+
>>> input
|
733 |
+
tensor([[[[-0.1678, -0.4418, 1.9466],
|
734 |
+
[ 0.9604, -0.4219, -0.5241],
|
735 |
+
[-0.9162, -0.5436, -0.6446]]]])
|
736 |
+
>>> m(input)
|
737 |
+
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
738 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
739 |
+
[ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
|
740 |
+
[ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
|
741 |
+
[ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
|
742 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
743 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
|
744 |
+
>>> # using different paddings for different sides
|
745 |
+
>>> m = nn.ZeroPad2d((1, 1, 2, 0))
|
746 |
+
>>> m(input)
|
747 |
+
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
748 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
749 |
+
[ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
|
750 |
+
[ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
|
751 |
+
[ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
|
752 |
+
"""
|
753 |
+
|
754 |
+
padding: Tuple[int, int, int, int]
|
755 |
+
|
756 |
+
def __init__(self, padding: _size_4_t) -> None:
|
757 |
+
super().__init__(padding, 0.)
|
758 |
+
|
759 |
+
def extra_repr(self) -> str:
|
760 |
+
return f'{self.padding}'
|
761 |
+
|
762 |
+
class ZeroPad3d(ConstantPad3d):
|
763 |
+
r"""Pads the input tensor boundaries with zero.
|
764 |
+
|
765 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
766 |
+
|
767 |
+
Args:
|
768 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
769 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
770 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
771 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
772 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
773 |
+
|
774 |
+
Shape:
|
775 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
776 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
777 |
+
:math:`(C, D_{out}, H_{out}, W_{out})`, where
|
778 |
+
|
779 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
780 |
+
|
781 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
782 |
+
|
783 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
784 |
+
|
785 |
+
Examples::
|
786 |
+
|
787 |
+
>>> m = nn.ZeroPad3d(3)
|
788 |
+
>>> input = torch.randn(16, 3, 10, 20, 30)
|
789 |
+
>>> output = m(input)
|
790 |
+
>>> # using different paddings for different sides
|
791 |
+
>>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
|
792 |
+
>>> output = m(input)
|
793 |
+
"""
|
794 |
+
|
795 |
+
padding: Tuple[int, int, int, int, int, int]
|
796 |
+
|
797 |
+
def __init__(self, padding: _size_6_t) -> None:
|
798 |
+
super().__init__(padding, 0.)
|
799 |
+
|
800 |
+
def extra_repr(self) -> str:
|
801 |
+
return f'{self.padding}'
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
from .. import functional as F
|
3 |
+
|
4 |
+
from torch import Tensor
|
5 |
+
|
6 |
+
__all__ = ['PixelShuffle', 'PixelUnshuffle']
|
7 |
+
|
8 |
+
class PixelShuffle(Module):
|
9 |
+
r"""Rearrange elements in a tensor according to an upscaling factor.
|
10 |
+
|
11 |
+
Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
|
12 |
+
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
|
13 |
+
|
14 |
+
This is useful for implementing efficient sub-pixel convolution
|
15 |
+
with a stride of :math:`1/r`.
|
16 |
+
|
17 |
+
See the paper:
|
18 |
+
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
|
19 |
+
by Shi et. al (2016) for more details.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
upscale_factor (int): factor to increase spatial resolution by
|
23 |
+
|
24 |
+
Shape:
|
25 |
+
- Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
|
26 |
+
- Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
|
27 |
+
|
28 |
+
.. math::
|
29 |
+
C_{out} = C_{in} \div \text{upscale\_factor}^2
|
30 |
+
|
31 |
+
.. math::
|
32 |
+
H_{out} = H_{in} \times \text{upscale\_factor}
|
33 |
+
|
34 |
+
.. math::
|
35 |
+
W_{out} = W_{in} \times \text{upscale\_factor}
|
36 |
+
|
37 |
+
Examples::
|
38 |
+
|
39 |
+
>>> pixel_shuffle = nn.PixelShuffle(3)
|
40 |
+
>>> input = torch.randn(1, 9, 4, 4)
|
41 |
+
>>> output = pixel_shuffle(input)
|
42 |
+
>>> print(output.size())
|
43 |
+
torch.Size([1, 1, 12, 12])
|
44 |
+
|
45 |
+
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
|
46 |
+
https://arxiv.org/abs/1609.05158
|
47 |
+
"""
|
48 |
+
|
49 |
+
__constants__ = ['upscale_factor']
|
50 |
+
upscale_factor: int
|
51 |
+
|
52 |
+
def __init__(self, upscale_factor: int) -> None:
|
53 |
+
super().__init__()
|
54 |
+
self.upscale_factor = upscale_factor
|
55 |
+
|
56 |
+
def forward(self, input: Tensor) -> Tensor:
|
57 |
+
return F.pixel_shuffle(input, self.upscale_factor)
|
58 |
+
|
59 |
+
def extra_repr(self) -> str:
|
60 |
+
return f'upscale_factor={self.upscale_factor}'
|
61 |
+
|
62 |
+
|
63 |
+
class PixelUnshuffle(Module):
|
64 |
+
r"""Reverse the PixelShuffle operation.
|
65 |
+
|
66 |
+
Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements
|
67 |
+
in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
|
68 |
+
:math:`(*, C \times r^2, H, W)`, where r is a downscale factor.
|
69 |
+
|
70 |
+
See the paper:
|
71 |
+
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
|
72 |
+
by Shi et. al (2016) for more details.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
downscale_factor (int): factor to decrease spatial resolution by
|
76 |
+
|
77 |
+
Shape:
|
78 |
+
- Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
|
79 |
+
- Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
|
80 |
+
|
81 |
+
.. math::
|
82 |
+
C_{out} = C_{in} \times \text{downscale\_factor}^2
|
83 |
+
|
84 |
+
.. math::
|
85 |
+
H_{out} = H_{in} \div \text{downscale\_factor}
|
86 |
+
|
87 |
+
.. math::
|
88 |
+
W_{out} = W_{in} \div \text{downscale\_factor}
|
89 |
+
|
90 |
+
Examples::
|
91 |
+
|
92 |
+
>>> pixel_unshuffle = nn.PixelUnshuffle(3)
|
93 |
+
>>> input = torch.randn(1, 1, 12, 12)
|
94 |
+
>>> output = pixel_unshuffle(input)
|
95 |
+
>>> print(output.size())
|
96 |
+
torch.Size([1, 9, 4, 4])
|
97 |
+
|
98 |
+
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
|
99 |
+
https://arxiv.org/abs/1609.05158
|
100 |
+
"""
|
101 |
+
|
102 |
+
__constants__ = ['downscale_factor']
|
103 |
+
downscale_factor: int
|
104 |
+
|
105 |
+
def __init__(self, downscale_factor: int) -> None:
|
106 |
+
super().__init__()
|
107 |
+
self.downscale_factor = downscale_factor
|
108 |
+
|
109 |
+
def forward(self, input: Tensor) -> Tensor:
|
110 |
+
return F.pixel_unshuffle(input, self.downscale_factor)
|
111 |
+
|
112 |
+
def extra_repr(self) -> str:
|
113 |
+
return f'downscale_factor={self.downscale_factor}'
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/transformer.py
ADDED
@@ -0,0 +1,975 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
from typing import Optional, Any, Union, Callable
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import warnings
|
6 |
+
from torch import Tensor
|
7 |
+
from .. import functional as F
|
8 |
+
from .module import Module
|
9 |
+
from .activation import MultiheadAttention
|
10 |
+
from .container import ModuleList
|
11 |
+
from ..init import xavier_uniform_
|
12 |
+
from .dropout import Dropout
|
13 |
+
from .linear import Linear
|
14 |
+
from .normalization import LayerNorm
|
15 |
+
|
16 |
+
__all__ = ['Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer']
|
17 |
+
|
18 |
+
def _generate_square_subsequent_mask(
|
19 |
+
sz: int,
|
20 |
+
device: Optional[torch.device] = None,
|
21 |
+
dtype: Optional[torch.dtype] = None,
|
22 |
+
) -> Tensor:
|
23 |
+
r"""Generate a square causal mask for the sequence.
|
24 |
+
|
25 |
+
The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
|
26 |
+
"""
|
27 |
+
if device is None:
|
28 |
+
device = torch.device('cpu')
|
29 |
+
if dtype is None:
|
30 |
+
dtype = torch.float32
|
31 |
+
return torch.triu(
|
32 |
+
torch.full((sz, sz), float('-inf'), dtype=dtype, device=device),
|
33 |
+
diagonal=1,
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
def _get_seq_len(
|
38 |
+
src: Tensor,
|
39 |
+
batch_first: bool
|
40 |
+
) -> Optional[int]:
|
41 |
+
|
42 |
+
if src.is_nested:
|
43 |
+
return None
|
44 |
+
else:
|
45 |
+
src_size = src.size()
|
46 |
+
if len(src_size) == 2:
|
47 |
+
# unbatched: S, E
|
48 |
+
return src_size[0]
|
49 |
+
else:
|
50 |
+
# batched: B, S, E if batch_first else S, B, E
|
51 |
+
seq_len_pos = 1 if batch_first else 0
|
52 |
+
return src_size[seq_len_pos]
|
53 |
+
|
54 |
+
|
55 |
+
class Transformer(Module):
|
56 |
+
r"""A transformer model.
|
57 |
+
|
58 |
+
User is able to modify the attributes as needed. The architecture
|
59 |
+
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
|
60 |
+
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
|
61 |
+
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
|
62 |
+
Processing Systems, pages 6000-6010.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
d_model: the number of expected features in the encoder/decoder inputs (default=512).
|
66 |
+
nhead: the number of heads in the multiheadattention models (default=8).
|
67 |
+
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
|
68 |
+
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
|
69 |
+
dim_feedforward: the dimension of the feedforward network model (default=2048).
|
70 |
+
dropout: the dropout value (default=0.1).
|
71 |
+
activation: the activation function of encoder/decoder intermediate layer, can be a string
|
72 |
+
("relu" or "gelu") or a unary callable. Default: relu
|
73 |
+
custom_encoder: custom encoder (default=None).
|
74 |
+
custom_decoder: custom decoder (default=None).
|
75 |
+
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
|
76 |
+
batch_first: If ``True``, then the input and output tensors are provided
|
77 |
+
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
|
78 |
+
norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
|
79 |
+
other attention and feedforward operations, otherwise after. Default: ``False`` (after).
|
80 |
+
bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
|
81 |
+
bias. Default: ``True``.
|
82 |
+
|
83 |
+
Examples::
|
84 |
+
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
|
85 |
+
>>> src = torch.rand((10, 32, 512))
|
86 |
+
>>> tgt = torch.rand((20, 32, 512))
|
87 |
+
>>> out = transformer_model(src, tgt)
|
88 |
+
|
89 |
+
Note: A full example to apply nn.Transformer module for the word language model is available in
|
90 |
+
https://github.com/pytorch/examples/tree/master/word_language_model
|
91 |
+
"""
|
92 |
+
|
93 |
+
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
|
94 |
+
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
|
95 |
+
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
96 |
+
custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None,
|
97 |
+
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
|
98 |
+
bias: bool = True, device=None, dtype=None) -> None:
|
99 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
100 |
+
super().__init__()
|
101 |
+
torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
|
102 |
+
|
103 |
+
if custom_encoder is not None:
|
104 |
+
self.encoder = custom_encoder
|
105 |
+
else:
|
106 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
|
107 |
+
activation, layer_norm_eps, batch_first, norm_first,
|
108 |
+
bias, **factory_kwargs)
|
109 |
+
encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
110 |
+
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
111 |
+
|
112 |
+
if custom_decoder is not None:
|
113 |
+
self.decoder = custom_decoder
|
114 |
+
else:
|
115 |
+
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
|
116 |
+
activation, layer_norm_eps, batch_first, norm_first,
|
117 |
+
bias, **factory_kwargs)
|
118 |
+
decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
119 |
+
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
|
120 |
+
|
121 |
+
self._reset_parameters()
|
122 |
+
|
123 |
+
self.d_model = d_model
|
124 |
+
self.nhead = nhead
|
125 |
+
|
126 |
+
self.batch_first = batch_first
|
127 |
+
|
128 |
+
def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,
|
129 |
+
memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,
|
130 |
+
tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None,
|
131 |
+
src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None,
|
132 |
+
memory_is_causal: bool = False) -> Tensor:
|
133 |
+
r"""Take in and process masked source/target sequences.
|
134 |
+
|
135 |
+
.. note::
|
136 |
+
|
137 |
+
If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are
|
138 |
+
not allowed to participate in the attention,
|
139 |
+
which is the opposite of the definition for :attr:`attn_mask`
|
140 |
+
in :func:`torch.nn.functional.scaled_dot_product_attention`.
|
141 |
+
|
142 |
+
Args:
|
143 |
+
src: the sequence to the encoder (required).
|
144 |
+
tgt: the sequence to the decoder (required).
|
145 |
+
src_mask: the additive mask for the src sequence (optional).
|
146 |
+
tgt_mask: the additive mask for the tgt sequence (optional).
|
147 |
+
memory_mask: the additive mask for the encoder output (optional).
|
148 |
+
src_key_padding_mask: the Tensor mask for src keys per batch (optional).
|
149 |
+
tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional).
|
150 |
+
memory_key_padding_mask: the Tensor mask for memory keys per batch (optional).
|
151 |
+
src_is_causal: If specified, applies a causal mask as ``src_mask``.
|
152 |
+
Default: ``None``; try to detect a causal mask.
|
153 |
+
Warning:
|
154 |
+
``src_is_causal`` provides a hint that ``src_mask`` is
|
155 |
+
the causal mask. Providing incorrect hints can result in
|
156 |
+
incorrect execution, including forward and backward
|
157 |
+
compatibility.
|
158 |
+
tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``.
|
159 |
+
Default: ``None``; try to detect a causal mask.
|
160 |
+
Warning:
|
161 |
+
``tgt_is_causal`` provides a hint that ``tgt_mask`` is
|
162 |
+
the causal mask. Providing incorrect hints can result in
|
163 |
+
incorrect execution, including forward and backward
|
164 |
+
compatibility.
|
165 |
+
memory_is_causal: If specified, applies a causal mask as
|
166 |
+
``memory_mask``.
|
167 |
+
Default: ``False``.
|
168 |
+
Warning:
|
169 |
+
``memory_is_causal`` provides a hint that
|
170 |
+
``memory_mask`` is the causal mask. Providing incorrect
|
171 |
+
hints can result in incorrect execution, including
|
172 |
+
forward and backward compatibility.
|
173 |
+
|
174 |
+
Shape:
|
175 |
+
- src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or
|
176 |
+
`(N, S, E)` if `batch_first=True`.
|
177 |
+
- tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
|
178 |
+
`(N, T, E)` if `batch_first=True`.
|
179 |
+
- src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`.
|
180 |
+
- tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`.
|
181 |
+
- memory_mask: :math:`(T, S)`.
|
182 |
+
- src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
|
183 |
+
- tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`.
|
184 |
+
- memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
|
185 |
+
|
186 |
+
Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked
|
187 |
+
positions. If a BoolTensor is provided, positions with ``True``
|
188 |
+
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
189 |
+
is provided, it will be added to the attention weight.
|
190 |
+
[src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
|
191 |
+
the attention. If a BoolTensor is provided, the positions with the
|
192 |
+
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
193 |
+
|
194 |
+
- output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
|
195 |
+
`(N, T, E)` if `batch_first=True`.
|
196 |
+
|
197 |
+
Note: Due to the multi-head attention architecture in the transformer model,
|
198 |
+
the output sequence length of a transformer is same as the input sequence
|
199 |
+
(i.e. target) length of the decoder.
|
200 |
+
|
201 |
+
where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the
|
202 |
+
batch size, :math:`E` is the feature number
|
203 |
+
|
204 |
+
Examples:
|
205 |
+
>>> # xdoctest: +SKIP
|
206 |
+
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
|
207 |
+
"""
|
208 |
+
is_batched = src.dim() == 3
|
209 |
+
if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
|
210 |
+
raise RuntimeError("the batch number of src and tgt must be equal")
|
211 |
+
elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
|
212 |
+
raise RuntimeError("the batch number of src and tgt must be equal")
|
213 |
+
|
214 |
+
if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
|
215 |
+
raise RuntimeError("the feature number of src and tgt must be equal to d_model")
|
216 |
+
|
217 |
+
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask,
|
218 |
+
is_causal=src_is_causal)
|
219 |
+
output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
|
220 |
+
tgt_key_padding_mask=tgt_key_padding_mask,
|
221 |
+
memory_key_padding_mask=memory_key_padding_mask,
|
222 |
+
tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal)
|
223 |
+
return output
|
224 |
+
|
225 |
+
@staticmethod
|
226 |
+
def generate_square_subsequent_mask(
|
227 |
+
sz: int,
|
228 |
+
device: Optional[torch.device] = None,
|
229 |
+
dtype: Optional[torch.dtype] = None,
|
230 |
+
) -> Tensor:
|
231 |
+
r"""Generate a square causal mask for the sequence.
|
232 |
+
|
233 |
+
The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
|
234 |
+
"""
|
235 |
+
return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)
|
236 |
+
|
237 |
+
def _reset_parameters(self):
|
238 |
+
r"""Initiate parameters in the transformer model."""
|
239 |
+
for p in self.parameters():
|
240 |
+
if p.dim() > 1:
|
241 |
+
xavier_uniform_(p)
|
242 |
+
|
243 |
+
|
244 |
+
class TransformerEncoder(Module):
|
245 |
+
r"""TransformerEncoder is a stack of N encoder layers.
|
246 |
+
|
247 |
+
Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
|
248 |
+
|
249 |
+
Args:
|
250 |
+
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
|
251 |
+
num_layers: the number of sub-encoder-layers in the encoder (required).
|
252 |
+
norm: the layer normalization component (optional).
|
253 |
+
enable_nested_tensor: if True, input will automatically convert to nested tensor
|
254 |
+
(and convert back on output). This will improve the overall performance of
|
255 |
+
TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
|
256 |
+
|
257 |
+
Examples::
|
258 |
+
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
|
259 |
+
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
|
260 |
+
>>> src = torch.rand(10, 32, 512)
|
261 |
+
>>> out = transformer_encoder(src)
|
262 |
+
"""
|
263 |
+
|
264 |
+
__constants__ = ['norm']
|
265 |
+
|
266 |
+
def __init__(
|
267 |
+
self,
|
268 |
+
encoder_layer: "TransformerEncoderLayer",
|
269 |
+
num_layers: int,
|
270 |
+
norm: Optional[Module] = None,
|
271 |
+
enable_nested_tensor: bool = True,
|
272 |
+
mask_check: bool = True
|
273 |
+
) -> None:
|
274 |
+
super().__init__()
|
275 |
+
torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
|
276 |
+
self.layers = _get_clones(encoder_layer, num_layers)
|
277 |
+
self.num_layers = num_layers
|
278 |
+
self.norm = norm
|
279 |
+
# this attribute saves the value providedat object construction
|
280 |
+
self.enable_nested_tensor = enable_nested_tensor
|
281 |
+
# this attribute controls whether nested tensors are used
|
282 |
+
self.use_nested_tensor = enable_nested_tensor
|
283 |
+
self.mask_check = mask_check
|
284 |
+
|
285 |
+
enc_layer = "encoder_layer"
|
286 |
+
why_not_sparsity_fast_path = ''
|
287 |
+
if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer):
|
288 |
+
why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer"
|
289 |
+
elif encoder_layer.norm_first :
|
290 |
+
why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True"
|
291 |
+
elif not encoder_layer.self_attn.batch_first:
|
292 |
+
why_not_sparsity_fast_path = (f"{enc_layer}.self_attn.batch_first was not True" +
|
293 |
+
"(use batch_first for better inference performance)")
|
294 |
+
elif not encoder_layer.self_attn._qkv_same_embed_dim:
|
295 |
+
why_not_sparsity_fast_path = f"{enc_layer}.self_attn._qkv_same_embed_dim was not True"
|
296 |
+
elif encoder_layer.self_attn.in_proj_bias is None:
|
297 |
+
why_not_sparsity_fast_path = f"{enc_layer}.self_attn was passed bias=False"
|
298 |
+
elif not encoder_layer.activation_relu_or_gelu:
|
299 |
+
why_not_sparsity_fast_path = f"{enc_layer}.activation_relu_or_gelu was not True"
|
300 |
+
elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps) :
|
301 |
+
why_not_sparsity_fast_path = f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps"
|
302 |
+
elif encoder_layer.self_attn.num_heads % 2 == 1:
|
303 |
+
why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd"
|
304 |
+
|
305 |
+
if enable_nested_tensor and why_not_sparsity_fast_path:
|
306 |
+
warnings.warn(f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}")
|
307 |
+
self.use_nested_tensor = False
|
308 |
+
|
309 |
+
|
310 |
+
def forward(
|
311 |
+
self,
|
312 |
+
src: Tensor,
|
313 |
+
mask: Optional[Tensor] = None,
|
314 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
315 |
+
is_causal: Optional[bool] = None) -> Tensor:
|
316 |
+
r"""Pass the input through the encoder layers in turn.
|
317 |
+
|
318 |
+
Args:
|
319 |
+
src: the sequence to the encoder (required).
|
320 |
+
mask: the mask for the src sequence (optional).
|
321 |
+
src_key_padding_mask: the mask for the src keys per batch (optional).
|
322 |
+
is_causal: If specified, applies a causal mask as ``mask``.
|
323 |
+
Default: ``None``; try to detect a causal mask.
|
324 |
+
Warning:
|
325 |
+
``is_causal`` provides a hint that ``mask`` is the
|
326 |
+
causal mask. Providing incorrect hints can result in
|
327 |
+
incorrect execution, including forward and backward
|
328 |
+
compatibility.
|
329 |
+
|
330 |
+
Shape:
|
331 |
+
see the docs in :class:`~torch.nn.Transformer`.
|
332 |
+
"""
|
333 |
+
src_key_padding_mask = F._canonical_mask(
|
334 |
+
mask=src_key_padding_mask,
|
335 |
+
mask_name="src_key_padding_mask",
|
336 |
+
other_type=F._none_or_dtype(mask),
|
337 |
+
other_name="mask",
|
338 |
+
target_type=src.dtype
|
339 |
+
)
|
340 |
+
|
341 |
+
mask = F._canonical_mask(
|
342 |
+
mask=mask,
|
343 |
+
mask_name="mask",
|
344 |
+
other_type=None,
|
345 |
+
other_name="",
|
346 |
+
target_type=src.dtype,
|
347 |
+
check_other=False,
|
348 |
+
)
|
349 |
+
|
350 |
+
output = src
|
351 |
+
convert_to_nested = False
|
352 |
+
first_layer = self.layers[0]
|
353 |
+
src_key_padding_mask_for_layers = src_key_padding_mask
|
354 |
+
why_not_sparsity_fast_path = ''
|
355 |
+
str_first_layer = "self.layers[0]"
|
356 |
+
batch_first = first_layer.self_attn.batch_first
|
357 |
+
is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
|
358 |
+
|
359 |
+
if not is_fastpath_enabled:
|
360 |
+
why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
|
361 |
+
elif not hasattr(self, "use_nested_tensor"):
|
362 |
+
why_not_sparsity_fast_path = "use_nested_tensor attribute not present"
|
363 |
+
elif not self.use_nested_tensor:
|
364 |
+
why_not_sparsity_fast_path = "self.use_nested_tensor (set in init) was not True"
|
365 |
+
elif first_layer.training:
|
366 |
+
why_not_sparsity_fast_path = f"{str_first_layer} was in training mode"
|
367 |
+
elif not src.dim() == 3:
|
368 |
+
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
|
369 |
+
elif src_key_padding_mask is None:
|
370 |
+
why_not_sparsity_fast_path = "src_key_padding_mask was None"
|
371 |
+
elif (((not hasattr(self, "mask_check")) or self.mask_check)
|
372 |
+
and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())):
|
373 |
+
why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned"
|
374 |
+
elif output.is_nested:
|
375 |
+
why_not_sparsity_fast_path = "NestedTensor input is not supported"
|
376 |
+
elif mask is not None:
|
377 |
+
why_not_sparsity_fast_path = "src_key_padding_mask and mask were both supplied"
|
378 |
+
elif torch.is_autocast_enabled():
|
379 |
+
why_not_sparsity_fast_path = "autocast is enabled"
|
380 |
+
|
381 |
+
if not why_not_sparsity_fast_path:
|
382 |
+
tensor_args = (
|
383 |
+
src,
|
384 |
+
first_layer.self_attn.in_proj_weight,
|
385 |
+
first_layer.self_attn.in_proj_bias,
|
386 |
+
first_layer.self_attn.out_proj.weight,
|
387 |
+
first_layer.self_attn.out_proj.bias,
|
388 |
+
first_layer.norm1.weight,
|
389 |
+
first_layer.norm1.bias,
|
390 |
+
first_layer.norm2.weight,
|
391 |
+
first_layer.norm2.bias,
|
392 |
+
first_layer.linear1.weight,
|
393 |
+
first_layer.linear1.bias,
|
394 |
+
first_layer.linear2.weight,
|
395 |
+
first_layer.linear2.bias,
|
396 |
+
)
|
397 |
+
_supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
|
398 |
+
if torch.overrides.has_torch_function(tensor_args):
|
399 |
+
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
|
400 |
+
elif src.device.type not in _supported_device_type:
|
401 |
+
why_not_sparsity_fast_path = f"src device is neither one of {_supported_device_type}"
|
402 |
+
elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
|
403 |
+
why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
|
404 |
+
"input/output projection weights or biases requires_grad")
|
405 |
+
|
406 |
+
if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None):
|
407 |
+
convert_to_nested = True
|
408 |
+
output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False)
|
409 |
+
src_key_padding_mask_for_layers = None
|
410 |
+
|
411 |
+
seq_len = _get_seq_len(src, batch_first)
|
412 |
+
is_causal = _detect_is_causal_mask(mask, is_causal, seq_len)
|
413 |
+
|
414 |
+
for mod in self.layers:
|
415 |
+
output = mod(output, src_mask=mask, is_causal=is_causal, src_key_padding_mask=src_key_padding_mask_for_layers)
|
416 |
+
|
417 |
+
if convert_to_nested:
|
418 |
+
output = output.to_padded_tensor(0., src.size())
|
419 |
+
|
420 |
+
if self.norm is not None:
|
421 |
+
output = self.norm(output)
|
422 |
+
|
423 |
+
return output
|
424 |
+
|
425 |
+
|
426 |
+
class TransformerDecoder(Module):
|
427 |
+
r"""TransformerDecoder is a stack of N decoder layers.
|
428 |
+
|
429 |
+
Args:
|
430 |
+
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
|
431 |
+
num_layers: the number of sub-decoder-layers in the decoder (required).
|
432 |
+
norm: the layer normalization component (optional).
|
433 |
+
|
434 |
+
Examples::
|
435 |
+
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
|
436 |
+
>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
|
437 |
+
>>> memory = torch.rand(10, 32, 512)
|
438 |
+
>>> tgt = torch.rand(20, 32, 512)
|
439 |
+
>>> out = transformer_decoder(tgt, memory)
|
440 |
+
"""
|
441 |
+
|
442 |
+
__constants__ = ['norm']
|
443 |
+
|
444 |
+
def __init__(
|
445 |
+
self,
|
446 |
+
decoder_layer: "TransformerDecoderLayer",
|
447 |
+
num_layers: int,
|
448 |
+
norm: Optional[Module] = None
|
449 |
+
) -> None:
|
450 |
+
super().__init__()
|
451 |
+
torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
|
452 |
+
self.layers = _get_clones(decoder_layer, num_layers)
|
453 |
+
self.num_layers = num_layers
|
454 |
+
self.norm = norm
|
455 |
+
|
456 |
+
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
|
457 |
+
memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
|
458 |
+
memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: Optional[bool] = None,
|
459 |
+
memory_is_causal: bool = False) -> Tensor:
|
460 |
+
r"""Pass the inputs (and mask) through the decoder layer in turn.
|
461 |
+
|
462 |
+
Args:
|
463 |
+
tgt: the sequence to the decoder (required).
|
464 |
+
memory: the sequence from the last layer of the encoder (required).
|
465 |
+
tgt_mask: the mask for the tgt sequence (optional).
|
466 |
+
memory_mask: the mask for the memory sequence (optional).
|
467 |
+
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
|
468 |
+
memory_key_padding_mask: the mask for the memory keys per batch (optional).
|
469 |
+
tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
|
470 |
+
Default: ``None``; try to detect a causal mask.
|
471 |
+
Warning:
|
472 |
+
``tgt_is_causal`` provides a hint that ``tgt_mask`` is
|
473 |
+
the causal mask. Providing incorrect hints can result in
|
474 |
+
incorrect execution, including forward and backward
|
475 |
+
compatibility.
|
476 |
+
memory_is_causal: If specified, applies a causal mask as
|
477 |
+
``memory mask``.
|
478 |
+
Default: ``False``.
|
479 |
+
Warning:
|
480 |
+
``memory_is_causal`` provides a hint that
|
481 |
+
``memory_mask`` is the causal mask. Providing incorrect
|
482 |
+
hints can result in incorrect execution, including
|
483 |
+
forward and backward compatibility.
|
484 |
+
|
485 |
+
Shape:
|
486 |
+
see the docs in :class:`~torch.nn.Transformer`.
|
487 |
+
"""
|
488 |
+
output = tgt
|
489 |
+
|
490 |
+
seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first)
|
491 |
+
tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len)
|
492 |
+
|
493 |
+
for mod in self.layers:
|
494 |
+
output = mod(output, memory, tgt_mask=tgt_mask,
|
495 |
+
memory_mask=memory_mask,
|
496 |
+
tgt_key_padding_mask=tgt_key_padding_mask,
|
497 |
+
memory_key_padding_mask=memory_key_padding_mask,
|
498 |
+
tgt_is_causal=tgt_is_causal,
|
499 |
+
memory_is_causal=memory_is_causal)
|
500 |
+
|
501 |
+
if self.norm is not None:
|
502 |
+
output = self.norm(output)
|
503 |
+
|
504 |
+
return output
|
505 |
+
|
506 |
+
class TransformerEncoderLayer(Module):
|
507 |
+
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
|
508 |
+
|
509 |
+
This standard encoder layer is based on the paper "Attention Is All You Need".
|
510 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
|
511 |
+
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
|
512 |
+
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
|
513 |
+
in a different way during application.
|
514 |
+
|
515 |
+
TransformerEncoderLayer can handle either traditional torch.tensor inputs,
|
516 |
+
or Nested Tensor inputs. Derived classes are expected to similarly accept
|
517 |
+
both input formats. (Not all combinations of inputs are currently
|
518 |
+
supported by TransformerEncoderLayer while Nested Tensor is in prototype
|
519 |
+
state.)
|
520 |
+
|
521 |
+
If you are implementing a custom layer, you may derive it either from
|
522 |
+
the Module or TransformerEncoderLayer class. If your custom layer
|
523 |
+
supports both torch.Tensors and Nested Tensors inputs, make its
|
524 |
+
implementation a derived class of TransformerEncoderLayer. If your custom
|
525 |
+
Layer supports only torch.Tensor inputs, derive its implementation from
|
526 |
+
Module.
|
527 |
+
|
528 |
+
Args:
|
529 |
+
d_model: the number of expected features in the input (required).
|
530 |
+
nhead: the number of heads in the multiheadattention models (required).
|
531 |
+
dim_feedforward: the dimension of the feedforward network model (default=2048).
|
532 |
+
dropout: the dropout value (default=0.1).
|
533 |
+
activation: the activation function of the intermediate layer, can be a string
|
534 |
+
("relu" or "gelu") or a unary callable. Default: relu
|
535 |
+
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
|
536 |
+
batch_first: If ``True``, then the input and output tensors are provided
|
537 |
+
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
|
538 |
+
norm_first: if ``True``, layer norm is done prior to attention and feedforward
|
539 |
+
operations, respectively. Otherwise it's done after. Default: ``False`` (after).
|
540 |
+
bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
|
541 |
+
bias. Default: ``True``.
|
542 |
+
|
543 |
+
Examples::
|
544 |
+
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
|
545 |
+
>>> src = torch.rand(10, 32, 512)
|
546 |
+
>>> out = encoder_layer(src)
|
547 |
+
|
548 |
+
Alternatively, when ``batch_first`` is ``True``:
|
549 |
+
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
|
550 |
+
>>> src = torch.rand(32, 10, 512)
|
551 |
+
>>> out = encoder_layer(src)
|
552 |
+
|
553 |
+
Fast path:
|
554 |
+
forward() will use a special optimized implementation described in
|
555 |
+
`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
|
556 |
+
conditions are met:
|
557 |
+
|
558 |
+
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
|
559 |
+
argument ``requires_grad``
|
560 |
+
- training is disabled (using ``.eval()``)
|
561 |
+
- batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
|
562 |
+
- activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
|
563 |
+
- at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
|
564 |
+
- if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
|
565 |
+
nor ``src_key_padding_mask`` is passed
|
566 |
+
- the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
|
567 |
+
unless the caller has manually modified one without modifying the other)
|
568 |
+
|
569 |
+
If the optimized implementation is in use, a
|
570 |
+
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
|
571 |
+
passed for ``src`` to represent padding more efficiently than using a padding
|
572 |
+
mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
|
573 |
+
returned, and an additional speedup proportional to the fraction of the input that
|
574 |
+
is padding can be expected.
|
575 |
+
|
576 |
+
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
|
577 |
+
https://arxiv.org/abs/2205.14135
|
578 |
+
|
579 |
+
"""
|
580 |
+
|
581 |
+
__constants__ = ['norm_first']
|
582 |
+
|
583 |
+
def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
|
584 |
+
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
585 |
+
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
|
586 |
+
bias: bool = True, device=None, dtype=None) -> None:
|
587 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
588 |
+
super().__init__()
|
589 |
+
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout,
|
590 |
+
bias=bias, batch_first=batch_first,
|
591 |
+
**factory_kwargs)
|
592 |
+
# Implementation of Feedforward model
|
593 |
+
self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
|
594 |
+
self.dropout = Dropout(dropout)
|
595 |
+
self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
|
596 |
+
|
597 |
+
self.norm_first = norm_first
|
598 |
+
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
599 |
+
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
600 |
+
self.dropout1 = Dropout(dropout)
|
601 |
+
self.dropout2 = Dropout(dropout)
|
602 |
+
|
603 |
+
# Legacy string support for activation function.
|
604 |
+
if isinstance(activation, str):
|
605 |
+
activation = _get_activation_fn(activation)
|
606 |
+
|
607 |
+
# We can't test self.activation in forward() in TorchScript,
|
608 |
+
# so stash some information about it instead.
|
609 |
+
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
|
610 |
+
self.activation_relu_or_gelu = 1
|
611 |
+
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
|
612 |
+
self.activation_relu_or_gelu = 2
|
613 |
+
else:
|
614 |
+
self.activation_relu_or_gelu = 0
|
615 |
+
self.activation = activation
|
616 |
+
|
617 |
+
def __setstate__(self, state):
|
618 |
+
super().__setstate__(state)
|
619 |
+
if not hasattr(self, 'activation'):
|
620 |
+
self.activation = F.relu
|
621 |
+
|
622 |
+
|
623 |
+
def forward(
|
624 |
+
self,
|
625 |
+
src: Tensor,
|
626 |
+
src_mask: Optional[Tensor] = None,
|
627 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
628 |
+
is_causal: bool = False) -> Tensor:
|
629 |
+
r"""Pass the input through the encoder layer.
|
630 |
+
|
631 |
+
Args:
|
632 |
+
src: the sequence to the encoder layer (required).
|
633 |
+
src_mask: the mask for the src sequence (optional).
|
634 |
+
src_key_padding_mask: the mask for the src keys per batch (optional).
|
635 |
+
is_causal: If specified, applies a causal mask as ``src mask``.
|
636 |
+
Default: ``False``.
|
637 |
+
Warning:
|
638 |
+
``is_causal`` provides a hint that ``src_mask`` is the
|
639 |
+
causal mask. Providing incorrect hints can result in
|
640 |
+
incorrect execution, including forward and backward
|
641 |
+
compatibility.
|
642 |
+
|
643 |
+
Shape:
|
644 |
+
see the docs in :class:`~torch.nn.Transformer`.
|
645 |
+
"""
|
646 |
+
src_key_padding_mask = F._canonical_mask(
|
647 |
+
mask=src_key_padding_mask,
|
648 |
+
mask_name="src_key_padding_mask",
|
649 |
+
other_type=F._none_or_dtype(src_mask),
|
650 |
+
other_name="src_mask",
|
651 |
+
target_type=src.dtype
|
652 |
+
)
|
653 |
+
|
654 |
+
src_mask = F._canonical_mask(
|
655 |
+
mask=src_mask,
|
656 |
+
mask_name="src_mask",
|
657 |
+
other_type=None,
|
658 |
+
other_name="",
|
659 |
+
target_type=src.dtype,
|
660 |
+
check_other=False,
|
661 |
+
)
|
662 |
+
|
663 |
+
is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
|
664 |
+
|
665 |
+
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
|
666 |
+
why_not_sparsity_fast_path = ''
|
667 |
+
if not is_fastpath_enabled:
|
668 |
+
why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
|
669 |
+
elif not src.dim() == 3:
|
670 |
+
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
|
671 |
+
elif self.training:
|
672 |
+
why_not_sparsity_fast_path = "training is enabled"
|
673 |
+
elif not self.self_attn.batch_first:
|
674 |
+
why_not_sparsity_fast_path = "self_attn.batch_first was not True"
|
675 |
+
elif self.self_attn.in_proj_bias is None:
|
676 |
+
why_not_sparsity_fast_path = "self_attn was passed bias=False"
|
677 |
+
elif not self.self_attn._qkv_same_embed_dim:
|
678 |
+
why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
|
679 |
+
elif not self.activation_relu_or_gelu:
|
680 |
+
why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
|
681 |
+
elif not (self.norm1.eps == self.norm2.eps):
|
682 |
+
why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
|
683 |
+
elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None):
|
684 |
+
why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
|
685 |
+
elif self.self_attn.num_heads % 2 == 1:
|
686 |
+
why_not_sparsity_fast_path = "num_head is odd"
|
687 |
+
elif torch.is_autocast_enabled():
|
688 |
+
why_not_sparsity_fast_path = "autocast is enabled"
|
689 |
+
if not why_not_sparsity_fast_path:
|
690 |
+
tensor_args = (
|
691 |
+
src,
|
692 |
+
self.self_attn.in_proj_weight,
|
693 |
+
self.self_attn.in_proj_bias,
|
694 |
+
self.self_attn.out_proj.weight,
|
695 |
+
self.self_attn.out_proj.bias,
|
696 |
+
self.norm1.weight,
|
697 |
+
self.norm1.bias,
|
698 |
+
self.norm2.weight,
|
699 |
+
self.norm2.bias,
|
700 |
+
self.linear1.weight,
|
701 |
+
self.linear1.bias,
|
702 |
+
self.linear2.weight,
|
703 |
+
self.linear2.bias,
|
704 |
+
)
|
705 |
+
|
706 |
+
# We have to use list comprehensions below because TorchScript does not support
|
707 |
+
# generator expressions.
|
708 |
+
_supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
|
709 |
+
if torch.overrides.has_torch_function(tensor_args):
|
710 |
+
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
|
711 |
+
elif not all((x.device.type in _supported_device_type) for x in tensor_args):
|
712 |
+
why_not_sparsity_fast_path = ("some Tensor argument's device is neither one of "
|
713 |
+
f"{_supported_device_type}")
|
714 |
+
elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
|
715 |
+
why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
|
716 |
+
"input/output projection weights or biases requires_grad")
|
717 |
+
|
718 |
+
if not why_not_sparsity_fast_path:
|
719 |
+
merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src)
|
720 |
+
return torch._transformer_encoder_layer_fwd(
|
721 |
+
src,
|
722 |
+
self.self_attn.embed_dim,
|
723 |
+
self.self_attn.num_heads,
|
724 |
+
self.self_attn.in_proj_weight,
|
725 |
+
self.self_attn.in_proj_bias,
|
726 |
+
self.self_attn.out_proj.weight,
|
727 |
+
self.self_attn.out_proj.bias,
|
728 |
+
self.activation_relu_or_gelu == 2,
|
729 |
+
self.norm_first,
|
730 |
+
self.norm1.eps,
|
731 |
+
self.norm1.weight,
|
732 |
+
self.norm1.bias,
|
733 |
+
self.norm2.weight,
|
734 |
+
self.norm2.bias,
|
735 |
+
self.linear1.weight,
|
736 |
+
self.linear1.bias,
|
737 |
+
self.linear2.weight,
|
738 |
+
self.linear2.bias,
|
739 |
+
merged_mask,
|
740 |
+
mask_type,
|
741 |
+
)
|
742 |
+
|
743 |
+
|
744 |
+
x = src
|
745 |
+
if self.norm_first:
|
746 |
+
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal)
|
747 |
+
x = x + self._ff_block(self.norm2(x))
|
748 |
+
else:
|
749 |
+
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal))
|
750 |
+
x = self.norm2(x + self._ff_block(x))
|
751 |
+
|
752 |
+
return x
|
753 |
+
|
754 |
+
# self-attention block
|
755 |
+
def _sa_block(self, x: Tensor,
|
756 |
+
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
|
757 |
+
x = self.self_attn(x, x, x,
|
758 |
+
attn_mask=attn_mask,
|
759 |
+
key_padding_mask=key_padding_mask,
|
760 |
+
need_weights=False, is_causal=is_causal)[0]
|
761 |
+
return self.dropout1(x)
|
762 |
+
|
763 |
+
# feed forward block
|
764 |
+
def _ff_block(self, x: Tensor) -> Tensor:
|
765 |
+
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
|
766 |
+
return self.dropout2(x)
|
767 |
+
|
768 |
+
|
769 |
+
class TransformerDecoderLayer(Module):
|
770 |
+
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
|
771 |
+
|
772 |
+
This standard decoder layer is based on the paper "Attention Is All You Need".
|
773 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
|
774 |
+
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
|
775 |
+
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
|
776 |
+
in a different way during application.
|
777 |
+
|
778 |
+
Args:
|
779 |
+
d_model: the number of expected features in the input (required).
|
780 |
+
nhead: the number of heads in the multiheadattention models (required).
|
781 |
+
dim_feedforward: the dimension of the feedforward network model (default=2048).
|
782 |
+
dropout: the dropout value (default=0.1).
|
783 |
+
activation: the activation function of the intermediate layer, can be a string
|
784 |
+
("relu" or "gelu") or a unary callable. Default: relu
|
785 |
+
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
|
786 |
+
batch_first: If ``True``, then the input and output tensors are provided
|
787 |
+
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
|
788 |
+
norm_first: if ``True``, layer norm is done prior to self attention, multihead
|
789 |
+
attention and feedforward operations, respectively. Otherwise it's done after.
|
790 |
+
Default: ``False`` (after).
|
791 |
+
bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
|
792 |
+
bias. Default: ``True``.
|
793 |
+
|
794 |
+
Examples::
|
795 |
+
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
|
796 |
+
>>> memory = torch.rand(10, 32, 512)
|
797 |
+
>>> tgt = torch.rand(20, 32, 512)
|
798 |
+
>>> out = decoder_layer(tgt, memory)
|
799 |
+
|
800 |
+
Alternatively, when ``batch_first`` is ``True``:
|
801 |
+
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
|
802 |
+
>>> memory = torch.rand(32, 10, 512)
|
803 |
+
>>> tgt = torch.rand(32, 20, 512)
|
804 |
+
>>> out = decoder_layer(tgt, memory)
|
805 |
+
"""
|
806 |
+
|
807 |
+
__constants__ = ['norm_first']
|
808 |
+
|
809 |
+
def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
|
810 |
+
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
811 |
+
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
|
812 |
+
bias: bool = True, device=None, dtype=None) -> None:
|
813 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
814 |
+
super().__init__()
|
815 |
+
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
|
816 |
+
bias=bias, **factory_kwargs)
|
817 |
+
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
|
818 |
+
bias=bias, **factory_kwargs)
|
819 |
+
# Implementation of Feedforward model
|
820 |
+
self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
|
821 |
+
self.dropout = Dropout(dropout)
|
822 |
+
self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
|
823 |
+
|
824 |
+
self.norm_first = norm_first
|
825 |
+
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
826 |
+
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
827 |
+
self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
|
828 |
+
self.dropout1 = Dropout(dropout)
|
829 |
+
self.dropout2 = Dropout(dropout)
|
830 |
+
self.dropout3 = Dropout(dropout)
|
831 |
+
|
832 |
+
# Legacy string support for activation function.
|
833 |
+
if isinstance(activation, str):
|
834 |
+
self.activation = _get_activation_fn(activation)
|
835 |
+
else:
|
836 |
+
self.activation = activation
|
837 |
+
|
838 |
+
def __setstate__(self, state):
|
839 |
+
if 'activation' not in state:
|
840 |
+
state['activation'] = F.relu
|
841 |
+
super().__setstate__(state)
|
842 |
+
|
843 |
+
def forward(
|
844 |
+
self,
|
845 |
+
tgt: Tensor,
|
846 |
+
memory: Tensor,
|
847 |
+
tgt_mask: Optional[Tensor] = None,
|
848 |
+
memory_mask: Optional[Tensor] = None,
|
849 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
850 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
851 |
+
tgt_is_causal: bool = False,
|
852 |
+
memory_is_causal: bool = False,
|
853 |
+
) -> Tensor:
|
854 |
+
r"""Pass the inputs (and mask) through the decoder layer.
|
855 |
+
|
856 |
+
Args:
|
857 |
+
tgt: the sequence to the decoder layer (required).
|
858 |
+
memory: the sequence from the last layer of the encoder (required).
|
859 |
+
tgt_mask: the mask for the tgt sequence (optional).
|
860 |
+
memory_mask: the mask for the memory sequence (optional).
|
861 |
+
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
|
862 |
+
memory_key_padding_mask: the mask for the memory keys per batch (optional).
|
863 |
+
tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
|
864 |
+
Default: ``False``.
|
865 |
+
Warning:
|
866 |
+
``tgt_is_causal`` provides a hint that ``tgt_mask`` is
|
867 |
+
the causal mask. Providing incorrect hints can result in
|
868 |
+
incorrect execution, including forward and backward
|
869 |
+
compatibility.
|
870 |
+
memory_is_causal: If specified, applies a causal mask as
|
871 |
+
``memory mask``.
|
872 |
+
Default: ``False``.
|
873 |
+
Warning:
|
874 |
+
``memory_is_causal`` provides a hint that
|
875 |
+
``memory_mask`` is the causal mask. Providing incorrect
|
876 |
+
hints can result in incorrect execution, including
|
877 |
+
forward and backward compatibility.
|
878 |
+
|
879 |
+
Shape:
|
880 |
+
see the docs in :class:`~torch.nn.Transformer`.
|
881 |
+
"""
|
882 |
+
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
|
883 |
+
|
884 |
+
x = tgt
|
885 |
+
if self.norm_first:
|
886 |
+
x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal)
|
887 |
+
x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal)
|
888 |
+
x = x + self._ff_block(self.norm3(x))
|
889 |
+
else:
|
890 |
+
x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal))
|
891 |
+
x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask, memory_is_causal))
|
892 |
+
x = self.norm3(x + self._ff_block(x))
|
893 |
+
|
894 |
+
return x
|
895 |
+
|
896 |
+
# self-attention block
|
897 |
+
def _sa_block(self, x: Tensor,
|
898 |
+
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
|
899 |
+
x = self.self_attn(x, x, x,
|
900 |
+
attn_mask=attn_mask,
|
901 |
+
key_padding_mask=key_padding_mask,
|
902 |
+
is_causal=is_causal,
|
903 |
+
need_weights=False)[0]
|
904 |
+
return self.dropout1(x)
|
905 |
+
|
906 |
+
# multihead attention block
|
907 |
+
def _mha_block(self, x: Tensor, mem: Tensor,
|
908 |
+
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
|
909 |
+
x = self.multihead_attn(x, mem, mem,
|
910 |
+
attn_mask=attn_mask,
|
911 |
+
key_padding_mask=key_padding_mask,
|
912 |
+
is_causal=is_causal,
|
913 |
+
need_weights=False)[0]
|
914 |
+
return self.dropout2(x)
|
915 |
+
|
916 |
+
# feed forward block
|
917 |
+
def _ff_block(self, x: Tensor) -> Tensor:
|
918 |
+
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
|
919 |
+
return self.dropout3(x)
|
920 |
+
|
921 |
+
|
922 |
+
def _get_clones(module, N):
|
923 |
+
# FIXME: copy.deepcopy() is not defined on nn.module
|
924 |
+
return ModuleList([copy.deepcopy(module) for i in range(N)])
|
925 |
+
|
926 |
+
|
927 |
+
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
|
928 |
+
if activation == "relu":
|
929 |
+
return F.relu
|
930 |
+
elif activation == "gelu":
|
931 |
+
return F.gelu
|
932 |
+
|
933 |
+
raise RuntimeError(f"activation should be relu/gelu, not {activation}")
|
934 |
+
|
935 |
+
|
936 |
+
def _detect_is_causal_mask(
|
937 |
+
mask: Optional[Tensor],
|
938 |
+
is_causal: Optional[bool] = None,
|
939 |
+
size: Optional[int] = None,
|
940 |
+
) -> bool:
|
941 |
+
"""Return whether the given attention mask is causal.
|
942 |
+
|
943 |
+
Warning:
|
944 |
+
If ``is_causal`` is not ``None``, its value will be returned as is. If a
|
945 |
+
user supplies an incorrect ``is_causal`` hint,
|
946 |
+
|
947 |
+
``is_causal=False`` when the mask is in fact a causal attention.mask
|
948 |
+
may lead to reduced performance relative to what would be achievable
|
949 |
+
with ``is_causal=True``;
|
950 |
+
``is_causal=True`` when the mask is in fact not a causal attention.mask
|
951 |
+
may lead to incorrect and unpredictable execution - in some scenarios,
|
952 |
+
a causal mask may be applied based on the hint, in other execution
|
953 |
+
scenarios the specified mask may be used. The choice may not appear
|
954 |
+
to be deterministic, in that a number of factors like alignment,
|
955 |
+
hardware SKU, etc influence the decision whether to use a mask or
|
956 |
+
rely on the hint.
|
957 |
+
``size`` if not None, check whether the mask is a causal mask of the provided size
|
958 |
+
Otherwise, checks for any causal mask.
|
959 |
+
"""
|
960 |
+
# Prevent type refinement
|
961 |
+
make_causal = (is_causal is True)
|
962 |
+
|
963 |
+
if is_causal is None and mask is not None:
|
964 |
+
sz = size if size is not None else mask.size(-2)
|
965 |
+
causal_comparison = _generate_square_subsequent_mask(
|
966 |
+
sz, device=mask.device, dtype=mask.dtype)
|
967 |
+
|
968 |
+
# Do not use `torch.equal` so we handle batched masks by
|
969 |
+
# broadcasting the comparison.
|
970 |
+
if mask.size() == causal_comparison.size():
|
971 |
+
make_causal = bool((mask == causal_comparison).all())
|
972 |
+
else:
|
973 |
+
make_causal = False
|
974 |
+
|
975 |
+
return make_causal
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/upsampling.py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
from .. import functional as F
|
3 |
+
|
4 |
+
from torch import Tensor
|
5 |
+
from typing import Optional
|
6 |
+
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
|
7 |
+
|
8 |
+
__all__ = ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d']
|
9 |
+
|
10 |
+
|
11 |
+
class Upsample(Module):
|
12 |
+
r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
|
13 |
+
|
14 |
+
The input data is assumed to be of the form
|
15 |
+
`minibatch x channels x [optional depth] x [optional height] x width`.
|
16 |
+
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
|
17 |
+
|
18 |
+
The algorithms available for upsampling are nearest neighbor and linear,
|
19 |
+
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
|
20 |
+
respectively.
|
21 |
+
|
22 |
+
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
|
23 |
+
calculate the output size. (You cannot give both, as it is ambiguous)
|
24 |
+
|
25 |
+
Args:
|
26 |
+
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
|
27 |
+
output spatial sizes
|
28 |
+
scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
|
29 |
+
multiplier for spatial size. Has to match input size if it is a tuple.
|
30 |
+
mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
|
31 |
+
``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
|
32 |
+
Default: ``'nearest'``
|
33 |
+
align_corners (bool, optional): if ``True``, the corner pixels of the input
|
34 |
+
and output tensors are aligned, and thus preserving the values at
|
35 |
+
those pixels. This only has effect when :attr:`mode` is
|
36 |
+
``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
|
37 |
+
Default: ``False``
|
38 |
+
recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
|
39 |
+
interpolation calculation. If `recompute_scale_factor` is ``True``, then
|
40 |
+
`scale_factor` must be passed in and `scale_factor` is used to compute the
|
41 |
+
output `size`. The computed output `size` will be used to infer new scales for
|
42 |
+
the interpolation. Note that when `scale_factor` is floating-point, it may differ
|
43 |
+
from the recomputed `scale_factor` due to rounding and precision issues.
|
44 |
+
If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
|
45 |
+
be used directly for interpolation.
|
46 |
+
|
47 |
+
Shape:
|
48 |
+
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
|
49 |
+
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
|
50 |
+
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
|
51 |
+
|
52 |
+
.. math::
|
53 |
+
D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
|
54 |
+
|
55 |
+
.. math::
|
56 |
+
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
|
57 |
+
|
58 |
+
.. math::
|
59 |
+
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
|
60 |
+
|
61 |
+
.. warning::
|
62 |
+
With ``align_corners = True``, the linearly interpolating modes
|
63 |
+
(`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
|
64 |
+
align the output and input pixels, and thus the output values can depend
|
65 |
+
on the input size. This was the default behavior for these modes up to
|
66 |
+
version 0.3.1. Since then, the default behavior is
|
67 |
+
``align_corners = False``. See below for concrete examples on how this
|
68 |
+
affects the outputs.
|
69 |
+
|
70 |
+
.. note::
|
71 |
+
If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
|
72 |
+
|
73 |
+
Examples::
|
74 |
+
|
75 |
+
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
|
76 |
+
>>> input
|
77 |
+
tensor([[[[1., 2.],
|
78 |
+
[3., 4.]]]])
|
79 |
+
|
80 |
+
>>> m = nn.Upsample(scale_factor=2, mode='nearest')
|
81 |
+
>>> m(input)
|
82 |
+
tensor([[[[1., 1., 2., 2.],
|
83 |
+
[1., 1., 2., 2.],
|
84 |
+
[3., 3., 4., 4.],
|
85 |
+
[3., 3., 4., 4.]]]])
|
86 |
+
|
87 |
+
>>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
|
88 |
+
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
|
89 |
+
>>> m(input)
|
90 |
+
tensor([[[[1.0000, 1.2500, 1.7500, 2.0000],
|
91 |
+
[1.5000, 1.7500, 2.2500, 2.5000],
|
92 |
+
[2.5000, 2.7500, 3.2500, 3.5000],
|
93 |
+
[3.0000, 3.2500, 3.7500, 4.0000]]]])
|
94 |
+
|
95 |
+
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
|
96 |
+
>>> m(input)
|
97 |
+
tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
|
98 |
+
[1.6667, 2.0000, 2.3333, 2.6667],
|
99 |
+
[2.3333, 2.6667, 3.0000, 3.3333],
|
100 |
+
[3.0000, 3.3333, 3.6667, 4.0000]]]])
|
101 |
+
|
102 |
+
>>> # Try scaling the same data in a larger tensor
|
103 |
+
>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
|
104 |
+
>>> input_3x3[:, :, :2, :2].copy_(input)
|
105 |
+
tensor([[[[1., 2.],
|
106 |
+
[3., 4.]]]])
|
107 |
+
>>> input_3x3
|
108 |
+
tensor([[[[1., 2., 0.],
|
109 |
+
[3., 4., 0.],
|
110 |
+
[0., 0., 0.]]]])
|
111 |
+
|
112 |
+
>>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session")
|
113 |
+
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
|
114 |
+
>>> # Notice that values in top left corner are the same with the small input (except at boundary)
|
115 |
+
>>> m(input_3x3)
|
116 |
+
tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
|
117 |
+
[1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
|
118 |
+
[2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
|
119 |
+
[2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
|
120 |
+
[0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
|
121 |
+
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
|
122 |
+
|
123 |
+
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
|
124 |
+
>>> # Notice that values in top left corner are now changed
|
125 |
+
>>> m(input_3x3)
|
126 |
+
tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
|
127 |
+
[1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
|
128 |
+
[2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
|
129 |
+
[2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
|
130 |
+
[1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
|
131 |
+
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
|
132 |
+
"""
|
133 |
+
|
134 |
+
__constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor']
|
135 |
+
name: str
|
136 |
+
size: Optional[_size_any_t]
|
137 |
+
scale_factor: Optional[_ratio_any_t]
|
138 |
+
mode: str
|
139 |
+
align_corners: Optional[bool]
|
140 |
+
recompute_scale_factor: Optional[bool]
|
141 |
+
|
142 |
+
def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None,
|
143 |
+
mode: str = 'nearest', align_corners: Optional[bool] = None,
|
144 |
+
recompute_scale_factor: Optional[bool] = None) -> None:
|
145 |
+
super().__init__()
|
146 |
+
self.name = type(self).__name__
|
147 |
+
self.size = size
|
148 |
+
if isinstance(scale_factor, tuple):
|
149 |
+
self.scale_factor = tuple(float(factor) for factor in scale_factor)
|
150 |
+
else:
|
151 |
+
self.scale_factor = float(scale_factor) if scale_factor else None
|
152 |
+
self.mode = mode
|
153 |
+
self.align_corners = align_corners
|
154 |
+
self.recompute_scale_factor = recompute_scale_factor
|
155 |
+
|
156 |
+
def forward(self, input: Tensor) -> Tensor:
|
157 |
+
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,
|
158 |
+
recompute_scale_factor=self.recompute_scale_factor)
|
159 |
+
|
160 |
+
def __setstate__(self, state):
|
161 |
+
if 'recompute_scale_factor' not in state:
|
162 |
+
state['recompute_scale_factor'] = True
|
163 |
+
|
164 |
+
super().__setstate__(state)
|
165 |
+
|
166 |
+
def extra_repr(self) -> str:
|
167 |
+
if self.scale_factor is not None:
|
168 |
+
info = 'scale_factor=' + repr(self.scale_factor)
|
169 |
+
else:
|
170 |
+
info = 'size=' + repr(self.size)
|
171 |
+
info += ', mode=' + repr(self.mode)
|
172 |
+
return info
|
173 |
+
|
174 |
+
|
175 |
+
class UpsamplingNearest2d(Upsample):
|
176 |
+
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels.
|
177 |
+
|
178 |
+
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
|
179 |
+
as it's constructor argument.
|
180 |
+
|
181 |
+
When :attr:`size` is given, it is the output size of the image `(h, w)`.
|
182 |
+
|
183 |
+
Args:
|
184 |
+
size (int or Tuple[int, int], optional): output spatial sizes
|
185 |
+
scale_factor (float or Tuple[float, float], optional): multiplier for
|
186 |
+
spatial size.
|
187 |
+
|
188 |
+
.. warning::
|
189 |
+
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
|
190 |
+
|
191 |
+
Shape:
|
192 |
+
- Input: :math:`(N, C, H_{in}, W_{in})`
|
193 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` where
|
194 |
+
|
195 |
+
.. math::
|
196 |
+
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
|
197 |
+
|
198 |
+
.. math::
|
199 |
+
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
|
200 |
+
|
201 |
+
Examples::
|
202 |
+
|
203 |
+
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
|
204 |
+
>>> input
|
205 |
+
tensor([[[[1., 2.],
|
206 |
+
[3., 4.]]]])
|
207 |
+
|
208 |
+
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
|
209 |
+
>>> m(input)
|
210 |
+
tensor([[[[1., 1., 2., 2.],
|
211 |
+
[1., 1., 2., 2.],
|
212 |
+
[3., 3., 4., 4.],
|
213 |
+
[3., 3., 4., 4.]]]])
|
214 |
+
"""
|
215 |
+
|
216 |
+
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
|
217 |
+
super().__init__(size, scale_factor, mode='nearest')
|
218 |
+
|
219 |
+
|
220 |
+
class UpsamplingBilinear2d(Upsample):
|
221 |
+
r"""Applies a 2D bilinear upsampling to an input signal composed of several input channels.
|
222 |
+
|
223 |
+
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
|
224 |
+
as it's constructor argument.
|
225 |
+
|
226 |
+
When :attr:`size` is given, it is the output size of the image `(h, w)`.
|
227 |
+
|
228 |
+
Args:
|
229 |
+
size (int or Tuple[int, int], optional): output spatial sizes
|
230 |
+
scale_factor (float or Tuple[float, float], optional): multiplier for
|
231 |
+
spatial size.
|
232 |
+
|
233 |
+
.. warning::
|
234 |
+
This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
|
235 |
+
equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
|
236 |
+
|
237 |
+
Shape:
|
238 |
+
- Input: :math:`(N, C, H_{in}, W_{in})`
|
239 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` where
|
240 |
+
|
241 |
+
.. math::
|
242 |
+
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
|
243 |
+
|
244 |
+
.. math::
|
245 |
+
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
|
246 |
+
|
247 |
+
Examples::
|
248 |
+
|
249 |
+
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
|
250 |
+
>>> input
|
251 |
+
tensor([[[[1., 2.],
|
252 |
+
[3., 4.]]]])
|
253 |
+
|
254 |
+
>>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
|
255 |
+
>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
|
256 |
+
>>> m(input)
|
257 |
+
tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
|
258 |
+
[1.6667, 2.0000, 2.3333, 2.6667],
|
259 |
+
[2.3333, 2.6667, 3.0000, 3.3333],
|
260 |
+
[3.0000, 3.3333, 3.6667, 4.0000]]]])
|
261 |
+
"""
|
262 |
+
|
263 |
+
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
|
264 |
+
super().__init__(size, scale_factor, mode='bilinear', align_corners=True)
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/utils.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
from itertools import repeat
|
3 |
+
from typing import List, Dict, Any
|
4 |
+
|
5 |
+
__all__ = ['consume_prefix_in_state_dict_if_present']
|
6 |
+
|
7 |
+
|
8 |
+
def _ntuple(n, name="parse"):
|
9 |
+
def parse(x):
|
10 |
+
if isinstance(x, collections.abc.Iterable):
|
11 |
+
return tuple(x)
|
12 |
+
return tuple(repeat(x, n))
|
13 |
+
|
14 |
+
parse.__name__ = name
|
15 |
+
return parse
|
16 |
+
|
17 |
+
|
18 |
+
_single = _ntuple(1, "_single")
|
19 |
+
_pair = _ntuple(2, "_pair")
|
20 |
+
_triple = _ntuple(3, "_triple")
|
21 |
+
_quadruple = _ntuple(4, "_quadruple")
|
22 |
+
|
23 |
+
|
24 |
+
def _reverse_repeat_tuple(t, n):
|
25 |
+
r"""Reverse the order of `t` and repeat each element for `n` times.
|
26 |
+
|
27 |
+
This can be used to translate padding arg used by Conv and Pooling modules
|
28 |
+
to the ones used by `F.pad`.
|
29 |
+
"""
|
30 |
+
return tuple(x for x in reversed(t) for _ in range(n))
|
31 |
+
|
32 |
+
|
33 |
+
def _list_with_default(out_size: List[int], defaults: List[int]) -> List[int]:
|
34 |
+
import torch
|
35 |
+
if isinstance(out_size, (int, torch.SymInt)):
|
36 |
+
return out_size
|
37 |
+
if len(defaults) <= len(out_size):
|
38 |
+
raise ValueError(
|
39 |
+
f"Input dimension should be at least {len(out_size) + 1}"
|
40 |
+
)
|
41 |
+
return [
|
42 |
+
v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size) :])
|
43 |
+
]
|
44 |
+
|
45 |
+
|
46 |
+
def consume_prefix_in_state_dict_if_present(
|
47 |
+
state_dict: Dict[str, Any], prefix: str
|
48 |
+
) -> None:
|
49 |
+
r"""Strip the prefix in state_dict in place, if any.
|
50 |
+
|
51 |
+
..note::
|
52 |
+
Given a `state_dict` from a DP/DDP model, a local model can load it by applying
|
53 |
+
`consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling
|
54 |
+
:meth:`torch.nn.Module.load_state_dict`.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
state_dict (OrderedDict): a state-dict to be loaded to the model.
|
58 |
+
prefix (str): prefix.
|
59 |
+
"""
|
60 |
+
keys = list(state_dict.keys())
|
61 |
+
for key in keys:
|
62 |
+
if key.startswith(prefix):
|
63 |
+
newkey = key[len(prefix) :]
|
64 |
+
state_dict[newkey] = state_dict.pop(key)
|
65 |
+
|
66 |
+
# also strip the prefix in metadata if any.
|
67 |
+
if hasattr(state_dict, "_metadata"):
|
68 |
+
keys = list(state_dict._metadata.keys())
|
69 |
+
for key in keys:
|
70 |
+
# for the metadata dict, the key can be:
|
71 |
+
# '': for the DDP module, which we want to remove.
|
72 |
+
# 'module': for the actual model.
|
73 |
+
# 'module.xx.xx': for the rest.
|
74 |
+
if len(key) == 0:
|
75 |
+
continue
|
76 |
+
# handling both, 'module' case and 'module.' cases
|
77 |
+
if key == prefix.replace('.', '') or key.startswith(prefix):
|
78 |
+
newkey = key[len(prefix) :]
|
79 |
+
state_dict._metadata[newkey] = state_dict._metadata.pop(key)
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""QAT Dynamic Modules.
|
3 |
+
|
4 |
+
This package is in the process of being deprecated.
|
5 |
+
Please, use `torch.ao.nn.qat.dynamic` instead.
|
6 |
+
"""
|
7 |
+
from . import dynamic # noqa: F403
|
8 |
+
from . import modules # noqa: F403
|
9 |
+
from .modules import * # noqa: F403
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"Linear",
|
13 |
+
"Conv1d",
|
14 |
+
"Conv2d",
|
15 |
+
"Conv3d",
|
16 |
+
"Embedding",
|
17 |
+
"EmbeddingBag",
|
18 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""QAT Dynamic Modules.
|
3 |
+
|
4 |
+
This package is in the process of being deprecated.
|
5 |
+
Please, use `torch.ao.nn.qat.dynamic` instead.
|
6 |
+
"""
|
7 |
+
from .modules import * # noqa: F403
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (355 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .linear import Linear
|
2 |
+
|
3 |
+
__all__ = ["Linear"]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (260 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc
ADDED
Binary file (616 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""QAT Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/qat/dynamic`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/qat/dynamic/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
from torch.ao.nn.qat.dynamic.modules.linear import Linear
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""QAT Modules.
|
3 |
+
|
4 |
+
This package is in the process of being deprecated.
|
5 |
+
Please, use `torch.ao.nn.qat.modules` instead.
|
6 |
+
"""
|
7 |
+
from torch.ao.nn.qat.modules.linear import Linear
|
8 |
+
from torch.ao.nn.qat.modules.conv import Conv1d
|
9 |
+
from torch.ao.nn.qat.modules.conv import Conv2d
|
10 |
+
from torch.ao.nn.qat.modules.conv import Conv3d
|
11 |
+
from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag, Embedding
|
12 |
+
|
13 |
+
from . import conv
|
14 |
+
from . import embedding_ops
|
15 |
+
from . import linear
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"Linear",
|
19 |
+
"Conv1d",
|
20 |
+
"Conv2d",
|
21 |
+
"Conv3d",
|
22 |
+
"Embedding",
|
23 |
+
"EmbeddingBag",
|
24 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""QAT Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/qat`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/qat/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
from torch.ao.nn.qat.modules.conv import Conv1d
|
11 |
+
from torch.ao.nn.qat.modules.conv import Conv2d
|
12 |
+
from torch.ao.nn.qat.modules.conv import Conv3d
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc
ADDED
Binary file (757 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (795 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/activation.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Quantized Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/quantized`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/quantized/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
from torch.ao.nn.quantized.modules.activation import ELU
|
12 |
+
from torch.ao.nn.quantized.modules.activation import Hardswish
|
13 |
+
from torch.ao.nn.quantized.modules.activation import LeakyReLU
|
14 |
+
from torch.ao.nn.quantized.modules.activation import MultiheadAttention
|
15 |
+
from torch.ao.nn.quantized.modules.activation import PReLU
|
16 |
+
from torch.ao.nn.quantized.modules.activation import ReLU6
|
17 |
+
from torch.ao.nn.quantized.modules.activation import Sigmoid
|
18 |
+
from torch.ao.nn.quantized.modules.activation import Softmax
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/conv.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Quantized Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/quantized`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/quantized/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
|
12 |
+
|
13 |
+
from torch.ao.nn.quantized.modules.conv import _reverse_repeat_padding
|
14 |
+
|
15 |
+
from torch.ao.nn.quantized.modules.conv import Conv1d
|
16 |
+
from torch.ao.nn.quantized.modules.conv import Conv2d
|
17 |
+
from torch.ao.nn.quantized.modules.conv import Conv3d
|
18 |
+
|
19 |
+
from torch.ao.nn.quantized.modules.conv import ConvTranspose1d
|
20 |
+
from torch.ao.nn.quantized.modules.conv import ConvTranspose2d
|
21 |
+
from torch.ao.nn.quantized.modules.conv import ConvTranspose3d
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/dropout.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Quantized Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/quantized`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/quantized/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = ['Dropout']
|
12 |
+
|
13 |
+
from torch.ao.nn.quantized.modules.dropout import Dropout
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/embedding_ops.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Quantized Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/quantized`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/quantized/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = ['EmbeddingPackedParams', 'Embedding', 'EmbeddingBag']
|
12 |
+
|
13 |
+
from torch.ao.nn.quantized.modules.embedding_ops import Embedding
|
14 |
+
from torch.ao.nn.quantized.modules.embedding_ops import EmbeddingBag
|
15 |
+
from torch.ao.nn.quantized.modules.embedding_ops import EmbeddingPackedParams
|
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/functional_modules.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Quantized Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/quantized`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/quantized/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = ['FloatFunctional', 'FXFloatFunctional', 'QFunctional']
|
12 |
+
|
13 |
+
from torch.ao.nn.quantized.modules.functional_modules import FloatFunctional
|
14 |
+
from torch.ao.nn.quantized.modules.functional_modules import FXFloatFunctional
|
15 |
+
from torch.ao.nn.quantized.modules.functional_modules import QFunctional
|