applied-ai-018 commited on
Commit
46d9881
·
verified ·
1 Parent(s): d23304a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/normalization.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/sparse.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/transformer.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/utils.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/nn/modules/activation.py +1624 -0
  32. venv/lib/python3.10/site-packages/torch/nn/modules/adaptive.py +312 -0
  33. venv/lib/python3.10/site-packages/torch/nn/modules/conv.py +1602 -0
  34. venv/lib/python3.10/site-packages/torch/nn/modules/distance.py +89 -0
  35. venv/lib/python3.10/site-packages/torch/nn/modules/dropout.py +294 -0
  36. venv/lib/python3.10/site-packages/torch/nn/modules/fold.py +303 -0
  37. venv/lib/python3.10/site-packages/torch/nn/modules/lazy.py +265 -0
  38. venv/lib/python3.10/site-packages/torch/nn/modules/loss.py +1790 -0
  39. venv/lib/python3.10/site-packages/torch/nn/modules/normalization.py +297 -0
  40. venv/lib/python3.10/site-packages/torch/nn/modules/padding.py +801 -0
  41. venv/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py +113 -0
  42. venv/lib/python3.10/site-packages/torch/nn/modules/sparse.py +455 -0
  43. venv/lib/python3.10/site-packages/torch/nn/modules/transformer.py +975 -0
  44. venv/lib/python3.10/site-packages/torch/nn/modules/utils.py +79 -0
  45. venv/lib/python3.10/site-packages/torch/nn/utils/__init__.py +32 -0
  46. venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198ddbf890b43dd6e96048e31c1ed6abc4d3eb4b19d7d54155c04108fa73ffca
3
+ size 33555612
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be50aeea76da0dc1a922243fd0fddc96f71d32bd0caa18da742bd9d0881ec032
3
+ size 33555627
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e28c74cf785f8c67a68201aa14360678fc1eb2a5a48e82354e96c6178117886
3
+ size 33555612
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:025ae16894556c909f0cf895e4192489618f7715640e783f205d09ecc38b865b
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (5.99 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc ADDED
Binary file (32 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (58.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc ADDED
Binary file (5.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc ADDED
Binary file (93.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc ADDED
Binary file (89.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/normalization.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc ADDED
Binary file (33.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc ADDED
Binary file (57 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (54.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (36.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.77 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/modules/activation.py ADDED
@@ -0,0 +1,1624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Optional, Tuple
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from .linear import NonDynamicallyQuantizableLinear
7
+ from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
8
+ from torch.nn.parameter import Parameter
9
+ from .module import Module
10
+ from .. import functional as F
11
+
12
+ __all__ = ['Threshold', 'ReLU', 'RReLU', 'Hardtanh', 'ReLU6', 'Sigmoid', 'Hardsigmoid', 'Tanh',
13
+ 'SiLU', 'Mish', 'Hardswish', 'ELU', 'CELU', 'SELU', 'GLU', 'GELU', 'Hardshrink', 'LeakyReLU',
14
+ 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Tanhshrink',
15
+ 'Softmin', 'Softmax', 'Softmax2d', 'LogSoftmax']
16
+
17
+
18
+ class Threshold(Module):
19
+ r"""Thresholds each element of the input Tensor.
20
+
21
+ Threshold is defined as:
22
+
23
+ .. math::
24
+ y =
25
+ \begin{cases}
26
+ x, &\text{ if } x > \text{threshold} \\
27
+ \text{value}, &\text{ otherwise }
28
+ \end{cases}
29
+
30
+ Args:
31
+ threshold: The value to threshold at
32
+ value: The value to replace with
33
+ inplace: can optionally do the operation in-place. Default: ``False``
34
+
35
+ Shape:
36
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
37
+ - Output: :math:`(*)`, same shape as the input.
38
+
39
+ Examples::
40
+
41
+ >>> m = nn.Threshold(0.1, 20)
42
+ >>> input = torch.randn(2)
43
+ >>> output = m(input)
44
+ """
45
+
46
+ __constants__ = ['threshold', 'value', 'inplace']
47
+
48
+ threshold: float
49
+ value: float
50
+ inplace: bool
51
+
52
+ def __init__(self, threshold: float, value: float, inplace: bool = False) -> None:
53
+ super().__init__()
54
+ self.threshold = threshold
55
+ self.value = value
56
+ self.inplace = inplace
57
+ # TODO: check in THNN (if inplace == True, then assert value <= threshold)
58
+
59
+ def forward(self, input: Tensor) -> Tensor:
60
+ return F.threshold(input, self.threshold, self.value, self.inplace)
61
+
62
+ def extra_repr(self):
63
+ inplace_str = ', inplace=True' if self.inplace else ''
64
+ return f'threshold={self.threshold}, value={self.value}{inplace_str}'
65
+
66
+
67
+ class ReLU(Module):
68
+ r"""Applies the rectified linear unit function element-wise.
69
+
70
+ :math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
71
+
72
+ Args:
73
+ inplace: can optionally do the operation in-place. Default: ``False``
74
+
75
+ Shape:
76
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
77
+ - Output: :math:`(*)`, same shape as the input.
78
+
79
+ .. image:: ../scripts/activation_images/ReLU.png
80
+
81
+ Examples::
82
+
83
+ >>> m = nn.ReLU()
84
+ >>> input = torch.randn(2)
85
+ >>> output = m(input)
86
+
87
+
88
+ An implementation of CReLU - https://arxiv.org/abs/1603.05201
89
+
90
+ >>> m = nn.ReLU()
91
+ >>> input = torch.randn(2).unsqueeze(0)
92
+ >>> output = torch.cat((m(input), m(-input)))
93
+ """
94
+
95
+ __constants__ = ['inplace']
96
+ inplace: bool
97
+
98
+ def __init__(self, inplace: bool = False):
99
+ super().__init__()
100
+ self.inplace = inplace
101
+
102
+ def forward(self, input: Tensor) -> Tensor:
103
+ return F.relu(input, inplace=self.inplace)
104
+
105
+ def extra_repr(self) -> str:
106
+ inplace_str = 'inplace=True' if self.inplace else ''
107
+ return inplace_str
108
+
109
+
110
+ class RReLU(Module):
111
+ r"""Applies the randomized leaky rectified linear unit function, element-wise.
112
+
113
+ Method described in the paper:
114
+ `Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_.
115
+
116
+ The function is defined as:
117
+
118
+ .. math::
119
+ \text{RReLU}(x) =
120
+ \begin{cases}
121
+ x & \text{if } x \geq 0 \\
122
+ ax & \text{ otherwise }
123
+ \end{cases}
124
+
125
+ where :math:`a` is randomly sampled from uniform distribution
126
+ :math:`\mathcal{U}(\text{lower}, \text{upper})` during training while during
127
+ evaluation :math:`a` is fixed with :math:`a = \frac{\text{lower} + \text{upper}}{2}`.
128
+
129
+ Args:
130
+ lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
131
+ upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
132
+ inplace: can optionally do the operation in-place. Default: ``False``
133
+
134
+ Shape:
135
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
136
+ - Output: :math:`(*)`, same shape as the input.
137
+
138
+ .. image:: ../scripts/activation_images/RReLU.png
139
+
140
+ Examples::
141
+
142
+ >>> m = nn.RReLU(0.1, 0.3)
143
+ >>> input = torch.randn(2)
144
+ >>> output = m(input)
145
+
146
+ """
147
+
148
+ __constants__ = ['lower', 'upper', 'inplace']
149
+
150
+ lower: float
151
+ upper: float
152
+ inplace: bool
153
+
154
+ def __init__(
155
+ self,
156
+ lower: float = 1. / 8,
157
+ upper: float = 1. / 3,
158
+ inplace: bool = False
159
+ ):
160
+ super().__init__()
161
+ self.lower = lower
162
+ self.upper = upper
163
+ self.inplace = inplace
164
+
165
+ def forward(self, input: Tensor) -> Tensor:
166
+ return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
167
+
168
+ def extra_repr(self):
169
+ inplace_str = ', inplace=True' if self.inplace else ''
170
+ return f'lower={self.lower}, upper={self.upper}{inplace_str}'
171
+
172
+
173
+ class Hardtanh(Module):
174
+ r"""Applies the HardTanh function element-wise.
175
+
176
+ HardTanh is defined as:
177
+
178
+ .. math::
179
+ \text{HardTanh}(x) = \begin{cases}
180
+ \text{max\_val} & \text{ if } x > \text{ max\_val } \\
181
+ \text{min\_val} & \text{ if } x < \text{ min\_val } \\
182
+ x & \text{ otherwise } \\
183
+ \end{cases}
184
+
185
+ Args:
186
+ min_val: minimum value of the linear region range. Default: -1
187
+ max_val: maximum value of the linear region range. Default: 1
188
+ inplace: can optionally do the operation in-place. Default: ``False``
189
+
190
+ Keyword arguments :attr:`min_value` and :attr:`max_value`
191
+ have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
192
+
193
+ Shape:
194
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
195
+ - Output: :math:`(*)`, same shape as the input.
196
+
197
+ .. image:: ../scripts/activation_images/Hardtanh.png
198
+
199
+ Examples::
200
+
201
+ >>> m = nn.Hardtanh(-2, 2)
202
+ >>> input = torch.randn(2)
203
+ >>> output = m(input)
204
+ """
205
+
206
+ __constants__ = ['min_val', 'max_val', 'inplace']
207
+
208
+ min_val: float
209
+ max_val: float
210
+ inplace: bool
211
+
212
+ def __init__(
213
+ self,
214
+ min_val: float = -1.,
215
+ max_val: float = 1.,
216
+ inplace: bool = False,
217
+ min_value: Optional[float] = None,
218
+ max_value: Optional[float] = None
219
+ ) -> None:
220
+ super().__init__()
221
+ if min_value is not None:
222
+ warnings.warn("keyword argument min_value is deprecated and rename to min_val")
223
+ min_val = min_value
224
+ if max_value is not None:
225
+ warnings.warn("keyword argument max_value is deprecated and rename to max_val")
226
+ max_val = max_value
227
+
228
+ self.min_val = min_val
229
+ self.max_val = max_val
230
+ self.inplace = inplace
231
+ assert self.max_val > self.min_val
232
+
233
+ def forward(self, input: Tensor) -> Tensor:
234
+ return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
235
+
236
+ def extra_repr(self) -> str:
237
+ inplace_str = ', inplace=True' if self.inplace else ''
238
+ return f'min_val={self.min_val}, max_val={self.max_val}{inplace_str}'
239
+
240
+
241
+ class ReLU6(Hardtanh):
242
+ r"""Applies the ReLU6 function element-wise.
243
+
244
+ .. math::
245
+ \text{ReLU6}(x) = \min(\max(0,x), 6)
246
+
247
+ Args:
248
+ inplace: can optionally do the operation in-place. Default: ``False``
249
+
250
+ Shape:
251
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
252
+ - Output: :math:`(*)`, same shape as the input.
253
+
254
+ .. image:: ../scripts/activation_images/ReLU6.png
255
+
256
+ Examples::
257
+
258
+ >>> m = nn.ReLU6()
259
+ >>> input = torch.randn(2)
260
+ >>> output = m(input)
261
+ """
262
+
263
+ def __init__(self, inplace: bool = False):
264
+ super().__init__(0., 6., inplace)
265
+
266
+ def extra_repr(self) -> str:
267
+ inplace_str = 'inplace=True' if self.inplace else ''
268
+ return inplace_str
269
+
270
+
271
+ class Sigmoid(Module):
272
+ r"""Applies the Sigmoid function element-wise.
273
+
274
+ .. math::
275
+ \text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
276
+
277
+
278
+ Shape:
279
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
280
+ - Output: :math:`(*)`, same shape as the input.
281
+
282
+ .. image:: ../scripts/activation_images/Sigmoid.png
283
+
284
+ Examples::
285
+
286
+ >>> m = nn.Sigmoid()
287
+ >>> input = torch.randn(2)
288
+ >>> output = m(input)
289
+ """
290
+
291
+ def forward(self, input: Tensor) -> Tensor:
292
+ return torch.sigmoid(input)
293
+
294
+
295
+ class Hardsigmoid(Module):
296
+ r"""Applies the Hardsigmoid function element-wise.
297
+
298
+ Hardsigmoid is defined as:
299
+
300
+ .. math::
301
+ \text{Hardsigmoid}(x) = \begin{cases}
302
+ 0 & \text{if~} x \le -3, \\
303
+ 1 & \text{if~} x \ge +3, \\
304
+ x / 6 + 1 / 2 & \text{otherwise}
305
+ \end{cases}
306
+
307
+ Args:
308
+ inplace: can optionally do the operation in-place. Default: ``False``
309
+
310
+ Shape:
311
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
312
+ - Output: :math:`(*)`, same shape as the input.
313
+
314
+ .. image:: ../scripts/activation_images/Hardsigmoid.png
315
+
316
+ Examples::
317
+
318
+ >>> m = nn.Hardsigmoid()
319
+ >>> input = torch.randn(2)
320
+ >>> output = m(input)
321
+ """
322
+
323
+ __constants__ = ['inplace']
324
+
325
+ inplace: bool
326
+
327
+ def __init__(self, inplace : bool = False) -> None:
328
+ super().__init__()
329
+ self.inplace = inplace
330
+
331
+ def forward(self, input: Tensor) -> Tensor:
332
+ return F.hardsigmoid(input, self.inplace)
333
+
334
+
335
+ class Tanh(Module):
336
+ r"""Applies the Hyperbolic Tangent (Tanh) function element-wise.
337
+
338
+ Tanh is defined as:
339
+
340
+ .. math::
341
+ \text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
342
+
343
+ Shape:
344
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
345
+ - Output: :math:`(*)`, same shape as the input.
346
+
347
+ .. image:: ../scripts/activation_images/Tanh.png
348
+
349
+ Examples::
350
+
351
+ >>> m = nn.Tanh()
352
+ >>> input = torch.randn(2)
353
+ >>> output = m(input)
354
+ """
355
+
356
+ def forward(self, input: Tensor) -> Tensor:
357
+ return torch.tanh(input)
358
+
359
+ class SiLU(Module):
360
+ r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
361
+
362
+ The SiLU function is also known as the swish function.
363
+
364
+ .. math::
365
+ \text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
366
+
367
+ .. note::
368
+ See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
369
+ where the SiLU (Sigmoid Linear Unit) was originally coined, and see
370
+ `Sigmoid-Weighted Linear Units for Neural Network Function Approximation
371
+ in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
372
+ a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
373
+ where the SiLU was experimented with later.
374
+
375
+ Shape:
376
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
377
+ - Output: :math:`(*)`, same shape as the input.
378
+
379
+ .. image:: ../scripts/activation_images/SiLU.png
380
+
381
+ Examples::
382
+
383
+ >>> m = nn.SiLU()
384
+ >>> input = torch.randn(2)
385
+ >>> output = m(input)
386
+ """
387
+
388
+ __constants__ = ['inplace']
389
+ inplace: bool
390
+
391
+ def __init__(self, inplace: bool = False):
392
+ super().__init__()
393
+ self.inplace = inplace
394
+
395
+ def forward(self, input: Tensor) -> Tensor:
396
+ return F.silu(input, inplace=self.inplace)
397
+
398
+ def extra_repr(self) -> str:
399
+ inplace_str = 'inplace=True' if self.inplace else ''
400
+ return inplace_str
401
+
402
+ class Mish(Module):
403
+ r"""Applies the Mish function, element-wise.
404
+
405
+ Mish: A Self Regularized Non-Monotonic Neural Activation Function.
406
+
407
+ .. math::
408
+ \text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
409
+
410
+ .. note::
411
+ See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
412
+
413
+ Shape:
414
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
415
+ - Output: :math:`(*)`, same shape as the input.
416
+
417
+ .. image:: ../scripts/activation_images/Mish.png
418
+
419
+ Examples::
420
+
421
+ >>> m = nn.Mish()
422
+ >>> input = torch.randn(2)
423
+ >>> output = m(input)
424
+ """
425
+
426
+ __constants__ = ['inplace']
427
+ inplace: bool
428
+
429
+ def __init__(self, inplace: bool = False):
430
+ super().__init__()
431
+ self.inplace = inplace
432
+
433
+ def forward(self, input: Tensor) -> Tensor:
434
+ return F.mish(input, inplace=self.inplace)
435
+
436
+ def extra_repr(self) -> str:
437
+ inplace_str = 'inplace=True' if self.inplace else ''
438
+ return inplace_str
439
+
440
+ class Hardswish(Module):
441
+ r"""Applies the Hardswish function, element-wise.
442
+
443
+ Method described in the paper: `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
444
+
445
+ Hardswish is defined as:
446
+
447
+ .. math::
448
+ \text{Hardswish}(x) = \begin{cases}
449
+ 0 & \text{if~} x \le -3, \\
450
+ x & \text{if~} x \ge +3, \\
451
+ x \cdot (x + 3) /6 & \text{otherwise}
452
+ \end{cases}
453
+
454
+ Args:
455
+ inplace: can optionally do the operation in-place. Default: ``False``
456
+
457
+ Shape:
458
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
459
+ - Output: :math:`(*)`, same shape as the input.
460
+
461
+ .. image:: ../scripts/activation_images/Hardswish.png
462
+
463
+ Examples::
464
+
465
+ >>> m = nn.Hardswish()
466
+ >>> input = torch.randn(2)
467
+ >>> output = m(input)
468
+ """
469
+
470
+ __constants__ = ['inplace']
471
+
472
+ inplace: bool
473
+
474
+ def __init__(self, inplace : bool = False) -> None:
475
+ super().__init__()
476
+ self.inplace = inplace
477
+
478
+ def forward(self, input: Tensor) -> Tensor:
479
+ return F.hardswish(input, self.inplace)
480
+
481
+
482
+ class ELU(Module):
483
+ r"""Applies the Exponential Linear Unit (ELU) function, element-wise.
484
+
485
+ Method described in the paper: `Fast and Accurate Deep Network Learning by Exponential Linear
486
+ Units (ELUs) <https://arxiv.org/abs/1511.07289>`__.
487
+
488
+ ELU is defined as:
489
+
490
+ .. math::
491
+ \text{ELU}(x) = \begin{cases}
492
+ x, & \text{ if } x > 0\\
493
+ \alpha * (\exp(x) - 1), & \text{ if } x \leq 0
494
+ \end{cases}
495
+
496
+ Args:
497
+ alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
498
+ inplace: can optionally do the operation in-place. Default: ``False``
499
+
500
+ Shape:
501
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
502
+ - Output: :math:`(*)`, same shape as the input.
503
+
504
+ .. image:: ../scripts/activation_images/ELU.png
505
+
506
+ Examples::
507
+
508
+ >>> m = nn.ELU()
509
+ >>> input = torch.randn(2)
510
+ >>> output = m(input)
511
+ """
512
+
513
+ __constants__ = ['alpha', 'inplace']
514
+ alpha: float
515
+ inplace: bool
516
+
517
+ def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
518
+ super().__init__()
519
+ self.alpha = alpha
520
+ self.inplace = inplace
521
+
522
+ def forward(self, input: Tensor) -> Tensor:
523
+ return F.elu(input, self.alpha, self.inplace)
524
+
525
+ def extra_repr(self) -> str:
526
+ inplace_str = ', inplace=True' if self.inplace else ''
527
+ return f'alpha={self.alpha}{inplace_str}'
528
+
529
+
530
+ class CELU(Module):
531
+ r"""Applies the CELU function element-wise.
532
+
533
+ .. math::
534
+ \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
535
+
536
+ More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
537
+
538
+ Args:
539
+ alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
540
+ inplace: can optionally do the operation in-place. Default: ``False``
541
+
542
+ Shape:
543
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
544
+ - Output: :math:`(*)`, same shape as the input.
545
+
546
+ .. image:: ../scripts/activation_images/CELU.png
547
+
548
+ Examples::
549
+
550
+ >>> m = nn.CELU()
551
+ >>> input = torch.randn(2)
552
+ >>> output = m(input)
553
+
554
+ .. _`Continuously Differentiable Exponential Linear Units`:
555
+ https://arxiv.org/abs/1704.07483
556
+ """
557
+
558
+ __constants__ = ['alpha', 'inplace']
559
+ alpha: float
560
+ inplace: bool
561
+
562
+ def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
563
+ super().__init__()
564
+ self.alpha = alpha
565
+ self.inplace = inplace
566
+
567
+ def forward(self, input: Tensor) -> Tensor:
568
+ return F.celu(input, self.alpha, self.inplace)
569
+
570
+ def extra_repr(self) -> str:
571
+ inplace_str = ', inplace=True' if self.inplace else ''
572
+ return f'alpha={self.alpha}{inplace_str}'
573
+
574
+
575
+ class SELU(Module):
576
+ r"""Applies the SELU function element-wise.
577
+
578
+ .. math::
579
+ \text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))
580
+
581
+ with :math:`\alpha = 1.6732632423543772848170429916717` and
582
+ :math:`\text{scale} = 1.0507009873554804934193349852946`.
583
+
584
+ .. warning::
585
+ When using ``kaiming_normal`` or ``kaiming_normal_`` for initialisation,
586
+ ``nonlinearity='linear'`` should be used instead of ``nonlinearity='selu'``
587
+ in order to get `Self-Normalizing Neural Networks`_.
588
+ See :func:`torch.nn.init.calculate_gain` for more information.
589
+
590
+ More details can be found in the paper `Self-Normalizing Neural Networks`_ .
591
+
592
+ Args:
593
+ inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
594
+
595
+ Shape:
596
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
597
+ - Output: :math:`(*)`, same shape as the input.
598
+
599
+ .. image:: ../scripts/activation_images/SELU.png
600
+
601
+ Examples::
602
+
603
+ >>> m = nn.SELU()
604
+ >>> input = torch.randn(2)
605
+ >>> output = m(input)
606
+
607
+ .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
608
+ """
609
+
610
+ __constants__ = ['inplace']
611
+ inplace: bool
612
+
613
+ def __init__(self, inplace: bool = False) -> None:
614
+ super().__init__()
615
+ self.inplace = inplace
616
+
617
+ def forward(self, input: Tensor) -> Tensor:
618
+ return F.selu(input, self.inplace)
619
+
620
+ def extra_repr(self) -> str:
621
+ inplace_str = 'inplace=True' if self.inplace else ''
622
+ return inplace_str
623
+
624
+
625
+ class GLU(Module):
626
+ r"""Applies the gated linear unit function.
627
+
628
+ :math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
629
+ of the input matrices and :math:`b` is the second half.
630
+
631
+ Args:
632
+ dim (int): the dimension on which to split the input. Default: -1
633
+
634
+ Shape:
635
+ - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
636
+ dimensions
637
+ - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
638
+
639
+ Examples::
640
+
641
+ >>> m = nn.GLU()
642
+ >>> input = torch.randn(4, 2)
643
+ >>> output = m(input)
644
+ """
645
+
646
+ __constants__ = ['dim']
647
+ dim: int
648
+
649
+ def __init__(self, dim: int = -1) -> None:
650
+ super().__init__()
651
+ self.dim = dim
652
+
653
+ def forward(self, input: Tensor) -> Tensor:
654
+ return F.glu(input, self.dim)
655
+
656
+ def extra_repr(self) -> str:
657
+ return f'dim={self.dim}'
658
+
659
+
660
+ class GELU(Module):
661
+ r"""Applies the Gaussian Error Linear Units function.
662
+
663
+ .. math:: \text{GELU}(x) = x * \Phi(x)
664
+
665
+ where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
666
+
667
+ When the approximate argument is 'tanh', Gelu is estimated with:
668
+
669
+ .. math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
670
+
671
+ Args:
672
+ approximate (str, optional): the gelu approximation algorithm to use:
673
+ ``'none'`` | ``'tanh'``. Default: ``'none'``
674
+
675
+ Shape:
676
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
677
+ - Output: :math:`(*)`, same shape as the input.
678
+
679
+ .. image:: ../scripts/activation_images/GELU.png
680
+
681
+ Examples::
682
+
683
+ >>> m = nn.GELU()
684
+ >>> input = torch.randn(2)
685
+ >>> output = m(input)
686
+ """
687
+
688
+ __constants__ = ['approximate']
689
+ approximate: str
690
+
691
+ def __init__(self, approximate: str = 'none') -> None:
692
+ super().__init__()
693
+ self.approximate = approximate
694
+
695
+ def forward(self, input: Tensor) -> Tensor:
696
+ return F.gelu(input, approximate=self.approximate)
697
+
698
+ def extra_repr(self) -> str:
699
+ return f'approximate={repr(self.approximate)}'
700
+
701
+
702
+ class Hardshrink(Module):
703
+ r"""Applies the Hard Shrinkage (Hardshrink) function element-wise.
704
+
705
+ Hardshrink is defined as:
706
+
707
+ .. math::
708
+ \text{HardShrink}(x) =
709
+ \begin{cases}
710
+ x, & \text{ if } x > \lambda \\
711
+ x, & \text{ if } x < -\lambda \\
712
+ 0, & \text{ otherwise }
713
+ \end{cases}
714
+
715
+ Args:
716
+ lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
717
+
718
+ Shape:
719
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
720
+ - Output: :math:`(*)`, same shape as the input.
721
+
722
+ .. image:: ../scripts/activation_images/Hardshrink.png
723
+
724
+ Examples::
725
+
726
+ >>> m = nn.Hardshrink()
727
+ >>> input = torch.randn(2)
728
+ >>> output = m(input)
729
+ """
730
+
731
+ __constants__ = ['lambd']
732
+ lambd: float
733
+
734
+ def __init__(self, lambd: float = 0.5) -> None:
735
+ super().__init__()
736
+ self.lambd = lambd
737
+
738
+ def forward(self, input: Tensor) -> Tensor:
739
+ return F.hardshrink(input, self.lambd)
740
+
741
+ def extra_repr(self) -> str:
742
+ return f'{self.lambd}'
743
+
744
+
745
+ class LeakyReLU(Module):
746
+ r"""Applies the LeakyReLU function element-wise.
747
+
748
+ .. math::
749
+ \text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)
750
+
751
+
752
+ or
753
+
754
+ .. math::
755
+ \text{LeakyReLU}(x) =
756
+ \begin{cases}
757
+ x, & \text{ if } x \geq 0 \\
758
+ \text{negative\_slope} \times x, & \text{ otherwise }
759
+ \end{cases}
760
+
761
+ Args:
762
+ negative_slope: Controls the angle of the negative slope (which is used for
763
+ negative input values). Default: 1e-2
764
+ inplace: can optionally do the operation in-place. Default: ``False``
765
+
766
+ Shape:
767
+ - Input: :math:`(*)` where `*` means, any number of additional
768
+ dimensions
769
+ - Output: :math:`(*)`, same shape as the input
770
+
771
+ .. image:: ../scripts/activation_images/LeakyReLU.png
772
+
773
+ Examples::
774
+
775
+ >>> m = nn.LeakyReLU(0.1)
776
+ >>> input = torch.randn(2)
777
+ >>> output = m(input)
778
+ """
779
+
780
+ __constants__ = ['inplace', 'negative_slope']
781
+ inplace: bool
782
+ negative_slope: float
783
+
784
+ def __init__(self, negative_slope: float = 1e-2, inplace: bool = False) -> None:
785
+ super().__init__()
786
+ self.negative_slope = negative_slope
787
+ self.inplace = inplace
788
+
789
+ def forward(self, input: Tensor) -> Tensor:
790
+ return F.leaky_relu(input, self.negative_slope, self.inplace)
791
+
792
+ def extra_repr(self) -> str:
793
+ inplace_str = ', inplace=True' if self.inplace else ''
794
+ return f'negative_slope={self.negative_slope}{inplace_str}'
795
+
796
+
797
+ class LogSigmoid(Module):
798
+ r"""Applies the Logsigmoid function element-wise.
799
+
800
+ .. math::
801
+ \text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
802
+
803
+ Shape:
804
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
805
+ - Output: :math:`(*)`, same shape as the input.
806
+
807
+ .. image:: ../scripts/activation_images/LogSigmoid.png
808
+
809
+ Examples::
810
+
811
+ >>> m = nn.LogSigmoid()
812
+ >>> input = torch.randn(2)
813
+ >>> output = m(input)
814
+ """
815
+
816
+ def forward(self, input: Tensor) -> Tensor:
817
+ return F.logsigmoid(input)
818
+
819
+
820
+ class Softplus(Module):
821
+ r"""Applies the Softplus function element-wise.
822
+
823
+ .. math::
824
+ \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
825
+
826
+ SoftPlus is a smooth approximation to the ReLU function and can be used
827
+ to constrain the output of a machine to always be positive.
828
+
829
+ For numerical stability the implementation reverts to the linear function
830
+ when :math:`input \times \beta > threshold`.
831
+
832
+ Args:
833
+ beta: the :math:`\beta` value for the Softplus formulation. Default: 1
834
+ threshold: values above this revert to a linear function. Default: 20
835
+
836
+ Shape:
837
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
838
+ - Output: :math:`(*)`, same shape as the input.
839
+
840
+ .. image:: ../scripts/activation_images/Softplus.png
841
+
842
+ Examples::
843
+
844
+ >>> m = nn.Softplus()
845
+ >>> input = torch.randn(2)
846
+ >>> output = m(input)
847
+ """
848
+
849
+ __constants__ = ['beta', 'threshold']
850
+ beta: float
851
+ threshold: float
852
+
853
+ def __init__(self, beta: float = 1.0, threshold: float = 20.0) -> None:
854
+ super().__init__()
855
+ self.beta = beta
856
+ self.threshold = threshold
857
+
858
+ def forward(self, input: Tensor) -> Tensor:
859
+ return F.softplus(input, self.beta, self.threshold)
860
+
861
+ def extra_repr(self) -> str:
862
+ return f'beta={self.beta}, threshold={self.threshold}'
863
+
864
+
865
+ class Softshrink(Module):
866
+ r"""Applies the soft shrinkage function element-wise.
867
+
868
+ .. math::
869
+ \text{SoftShrinkage}(x) =
870
+ \begin{cases}
871
+ x - \lambda, & \text{ if } x > \lambda \\
872
+ x + \lambda, & \text{ if } x < -\lambda \\
873
+ 0, & \text{ otherwise }
874
+ \end{cases}
875
+
876
+ Args:
877
+ lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
878
+
879
+ Shape:
880
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
881
+ - Output: :math:`(*)`, same shape as the input.
882
+
883
+ .. image:: ../scripts/activation_images/Softshrink.png
884
+
885
+ Examples::
886
+
887
+ >>> m = nn.Softshrink()
888
+ >>> input = torch.randn(2)
889
+ >>> output = m(input)
890
+ """
891
+
892
+ __constants__ = ['lambd']
893
+ lambd: float
894
+
895
+ def __init__(self, lambd: float = 0.5) -> None:
896
+ super().__init__()
897
+ self.lambd = lambd
898
+
899
+ def forward(self, input: Tensor) -> Tensor:
900
+ return F.softshrink(input, self.lambd)
901
+
902
+ def extra_repr(self) -> str:
903
+ return str(self.lambd)
904
+
905
+
906
+ def _check_arg_device(x: Optional[torch.Tensor]) -> bool:
907
+ if x is not None:
908
+ return x.device.type in ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
909
+ return True
910
+
911
+
912
+ def _arg_requires_grad(x: Optional[torch.Tensor]) -> bool:
913
+ if x is not None:
914
+ return x.requires_grad
915
+ return False
916
+
917
+
918
+ def _is_make_fx_tracing():
919
+ if not torch.jit.is_scripting():
920
+ torch_dispatch_mode_stack = torch.utils._python_dispatch._get_current_dispatch_mode_stack()
921
+ return any(type(x) == torch.fx.experimental.proxy_tensor.ProxyTorchDispatchMode for x in torch_dispatch_mode_stack)
922
+ else:
923
+ return False
924
+
925
+
926
+ class MultiheadAttention(Module):
927
+ r"""Allows the model to jointly attend to information from different representation subspaces.
928
+
929
+ Method described in the paper:
930
+ `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
931
+
932
+ Multi-Head Attention is defined as:
933
+
934
+ .. math::
935
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
936
+
937
+ where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
938
+
939
+ ``nn.MultiHeadAttention`` will use the optimized implementations of
940
+ ``scaled_dot_product_attention()`` when possible.
941
+
942
+ In addition to support for the new ``scaled_dot_product_attention()``
943
+ function, for speeding up Inference, MHA will use
944
+ fastpath inference with support for Nested Tensors, iff:
945
+
946
+ - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor).
947
+ - inputs are batched (3D) with ``batch_first==True``
948
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
949
+ - training is disabled (using ``.eval()``)
950
+ - ``add_bias_kv`` is ``False``
951
+ - ``add_zero_attn`` is ``False``
952
+ - ``kdim`` and ``vdim`` are equal to ``embed_dim``
953
+ - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
954
+ nor ``attn_mask`` is passed
955
+ - autocast is disabled
956
+
957
+ If the optimized inference fastpath implementation is in use, a
958
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
959
+ ``query``/``key``/``value`` to represent padding more efficiently than using a
960
+ padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
961
+ will be returned, and an additional speedup proportional to the fraction of the input
962
+ that is padding can be expected.
963
+
964
+ Args:
965
+ embed_dim: Total dimension of the model.
966
+ num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
967
+ across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
968
+ dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
969
+ bias: If specified, adds bias to input / output projection layers. Default: ``True``.
970
+ add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
971
+ add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
972
+ Default: ``False``.
973
+ kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
974
+ vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
975
+ batch_first: If ``True``, then the input and output tensors are provided
976
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
977
+
978
+ Examples::
979
+
980
+ >>> # xdoctest: +SKIP
981
+ >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
982
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
983
+
984
+ .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
985
+ https://arxiv.org/abs/2205.14135
986
+
987
+ """
988
+
989
+ __constants__ = ['batch_first']
990
+ bias_k: Optional[torch.Tensor]
991
+ bias_v: Optional[torch.Tensor]
992
+
993
+ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
994
+ kdim=None, vdim=None, batch_first=False, device=None, dtype=None) -> None:
995
+ if embed_dim <= 0 or num_heads <= 0:
996
+ raise ValueError(
997
+ f"embed_dim and num_heads must be greater than 0,"
998
+ f" got embed_dim={embed_dim} and num_heads={num_heads} instead"
999
+ )
1000
+ factory_kwargs = {'device': device, 'dtype': dtype}
1001
+ super().__init__()
1002
+ self.embed_dim = embed_dim
1003
+ self.kdim = kdim if kdim is not None else embed_dim
1004
+ self.vdim = vdim if vdim is not None else embed_dim
1005
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
1006
+
1007
+ self.num_heads = num_heads
1008
+ self.dropout = dropout
1009
+ self.batch_first = batch_first
1010
+ self.head_dim = embed_dim // num_heads
1011
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
1012
+
1013
+ if not self._qkv_same_embed_dim:
1014
+ self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
1015
+ self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
1016
+ self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
1017
+ self.register_parameter('in_proj_weight', None)
1018
+ else:
1019
+ self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
1020
+ self.register_parameter('q_proj_weight', None)
1021
+ self.register_parameter('k_proj_weight', None)
1022
+ self.register_parameter('v_proj_weight', None)
1023
+
1024
+ if bias:
1025
+ self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
1026
+ else:
1027
+ self.register_parameter('in_proj_bias', None)
1028
+ self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
1029
+
1030
+ if add_bias_kv:
1031
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
1032
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
1033
+ else:
1034
+ self.bias_k = self.bias_v = None
1035
+
1036
+ self.add_zero_attn = add_zero_attn
1037
+
1038
+ self._reset_parameters()
1039
+
1040
+ def _reset_parameters(self):
1041
+ if self._qkv_same_embed_dim:
1042
+ xavier_uniform_(self.in_proj_weight)
1043
+ else:
1044
+ xavier_uniform_(self.q_proj_weight)
1045
+ xavier_uniform_(self.k_proj_weight)
1046
+ xavier_uniform_(self.v_proj_weight)
1047
+
1048
+ if self.in_proj_bias is not None:
1049
+ constant_(self.in_proj_bias, 0.)
1050
+ constant_(self.out_proj.bias, 0.)
1051
+ if self.bias_k is not None:
1052
+ xavier_normal_(self.bias_k)
1053
+ if self.bias_v is not None:
1054
+ xavier_normal_(self.bias_v)
1055
+
1056
+ def __setstate__(self, state):
1057
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
1058
+ if '_qkv_same_embed_dim' not in state:
1059
+ state['_qkv_same_embed_dim'] = True
1060
+
1061
+ super().__setstate__(state)
1062
+
1063
+ def forward(
1064
+ self,
1065
+ query: Tensor,
1066
+ key: Tensor,
1067
+ value: Tensor,
1068
+ key_padding_mask: Optional[Tensor] = None,
1069
+ need_weights: bool = True,
1070
+ attn_mask: Optional[Tensor] = None,
1071
+ average_attn_weights: bool = True,
1072
+ is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
1073
+ r"""Compute attention outputs using query, key, and value embeddings.
1074
+
1075
+ Supports optional parameters for padding, masks and attention weights.
1076
+
1077
+ Args:
1078
+ query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
1079
+ or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
1080
+ :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
1081
+ Queries are compared against key-value pairs to produce the output.
1082
+ See "Attention Is All You Need" for more details.
1083
+ key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
1084
+ or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
1085
+ :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
1086
+ See "Attention Is All You Need" for more details.
1087
+ value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
1088
+ ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
1089
+ sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
1090
+ See "Attention Is All You Need" for more details.
1091
+ key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
1092
+ to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
1093
+ Binary and float masks are supported.
1094
+ For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
1095
+ the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
1096
+ need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
1097
+ Set ``need_weights=False`` to use the optimized ``scaled_dot_product_attention``
1098
+ and achieve the best performance for MHA.
1099
+ Default: ``True``.
1100
+ attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
1101
+ :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
1102
+ :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
1103
+ broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
1104
+ Binary and float masks are supported. For a binary mask, a ``True`` value indicates that the
1105
+ corresponding position is not allowed to attend. For a float mask, the mask values will be added to
1106
+ the attention weight.
1107
+ If both attn_mask and key_padding_mask are supplied, their types should match.
1108
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
1109
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
1110
+ effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
1111
+ is_causal: If specified, applies a causal mask as attention mask.
1112
+ Default: ``False``.
1113
+ Warning:
1114
+ ``is_causal`` provides a hint that ``attn_mask`` is the
1115
+ causal mask. Providing incorrect hints can result in
1116
+ incorrect execution, including forward and backward
1117
+ compatibility.
1118
+
1119
+ Outputs:
1120
+ - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
1121
+ :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
1122
+ where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
1123
+ embedding dimension ``embed_dim``.
1124
+ - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
1125
+ returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
1126
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
1127
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
1128
+ head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
1129
+
1130
+ .. note::
1131
+ `batch_first` argument is ignored for unbatched inputs.
1132
+ """
1133
+ why_not_fast_path = ''
1134
+ if ((attn_mask is not None and torch.is_floating_point(attn_mask))
1135
+ or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
1136
+ why_not_fast_path = "floating-point masks are not supported for fast path."
1137
+
1138
+ is_batched = query.dim() == 3
1139
+
1140
+ key_padding_mask = F._canonical_mask(
1141
+ mask=key_padding_mask,
1142
+ mask_name="key_padding_mask",
1143
+ other_type=F._none_or_dtype(attn_mask),
1144
+ other_name="attn_mask",
1145
+ target_type=query.dtype
1146
+ )
1147
+
1148
+ attn_mask = F._canonical_mask(
1149
+ mask=attn_mask,
1150
+ mask_name="attn_mask",
1151
+ other_type=None,
1152
+ other_name="",
1153
+ target_type=query.dtype,
1154
+ check_other=False,
1155
+ )
1156
+
1157
+ is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
1158
+
1159
+ if not is_fastpath_enabled:
1160
+ why_not_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
1161
+ elif not is_batched:
1162
+ why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
1163
+ elif query is not key or key is not value:
1164
+ # When lifting this restriction, don't forget to either
1165
+ # enforce that the dtypes all match or test cases where
1166
+ # they don't!
1167
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
1168
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
1169
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
1170
+ elif self.in_proj_weight is None:
1171
+ why_not_fast_path = "in_proj_weight was None"
1172
+ elif query.dtype != self.in_proj_weight.dtype:
1173
+ # this case will fail anyway, but at least they'll get a useful error message.
1174
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
1175
+ elif self.training:
1176
+ why_not_fast_path = "training is enabled"
1177
+ elif (self.num_heads % 2) != 0:
1178
+ why_not_fast_path = "self.num_heads is not even"
1179
+ elif not self.batch_first:
1180
+ why_not_fast_path = "batch_first was not True"
1181
+ elif self.bias_k is not None:
1182
+ why_not_fast_path = "self.bias_k was not None"
1183
+ elif self.bias_v is not None:
1184
+ why_not_fast_path = "self.bias_v was not None"
1185
+ elif self.add_zero_attn:
1186
+ why_not_fast_path = "add_zero_attn was enabled"
1187
+ elif not self._qkv_same_embed_dim:
1188
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
1189
+ elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
1190
+ why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
1191
+ is not supported with NestedTensor input"
1192
+ elif torch.is_autocast_enabled():
1193
+ why_not_fast_path = "autocast is enabled"
1194
+
1195
+ if not why_not_fast_path:
1196
+ tensor_args = (
1197
+ query,
1198
+ key,
1199
+ value,
1200
+ self.in_proj_weight,
1201
+ self.in_proj_bias,
1202
+ self.out_proj.weight,
1203
+ self.out_proj.bias,
1204
+ )
1205
+ # We have to use list comprehensions below because TorchScript does not support
1206
+ # generator expressions.
1207
+ if torch.overrides.has_torch_function(tensor_args):
1208
+ why_not_fast_path = "some Tensor argument has_torch_function"
1209
+ elif _is_make_fx_tracing():
1210
+ why_not_fast_path = "we are running make_fx tracing"
1211
+ elif not all(_check_arg_device(x) for x in tensor_args):
1212
+ why_not_fast_path = ("some Tensor argument's device is neither one of "
1213
+ f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
1214
+ elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
1215
+ why_not_fast_path = ("grad is enabled and at least one of query or the "
1216
+ "input/output projection weights or biases requires_grad")
1217
+ if not why_not_fast_path:
1218
+ merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
1219
+
1220
+ if self.in_proj_bias is not None and self.in_proj_weight is not None:
1221
+ return torch._native_multi_head_attention(
1222
+ query,
1223
+ key,
1224
+ value,
1225
+ self.embed_dim,
1226
+ self.num_heads,
1227
+ self.in_proj_weight,
1228
+ self.in_proj_bias,
1229
+ self.out_proj.weight,
1230
+ self.out_proj.bias,
1231
+ merged_mask,
1232
+ need_weights,
1233
+ average_attn_weights,
1234
+ mask_type)
1235
+
1236
+ any_nested = query.is_nested or key.is_nested or value.is_nested
1237
+ assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
1238
+ f"The fast path was not hit because {why_not_fast_path}")
1239
+
1240
+ if self.batch_first and is_batched:
1241
+ # make sure that the transpose op does not affect the "is" property
1242
+ if key is value:
1243
+ if query is key:
1244
+ query = key = value = query.transpose(1, 0)
1245
+ else:
1246
+ query, key = (x.transpose(1, 0) for x in (query, key))
1247
+ value = key
1248
+ else:
1249
+ query, key, value = (x.transpose(1, 0) for x in (query, key, value))
1250
+
1251
+ if not self._qkv_same_embed_dim:
1252
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
1253
+ query, key, value, self.embed_dim, self.num_heads,
1254
+ self.in_proj_weight, self.in_proj_bias,
1255
+ self.bias_k, self.bias_v, self.add_zero_attn,
1256
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
1257
+ training=self.training,
1258
+ key_padding_mask=key_padding_mask, need_weights=need_weights,
1259
+ attn_mask=attn_mask,
1260
+ use_separate_proj_weight=True,
1261
+ q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
1262
+ v_proj_weight=self.v_proj_weight,
1263
+ average_attn_weights=average_attn_weights,
1264
+ is_causal=is_causal)
1265
+ else:
1266
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
1267
+ query, key, value, self.embed_dim, self.num_heads,
1268
+ self.in_proj_weight, self.in_proj_bias,
1269
+ self.bias_k, self.bias_v, self.add_zero_attn,
1270
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
1271
+ training=self.training,
1272
+ key_padding_mask=key_padding_mask,
1273
+ need_weights=need_weights,
1274
+ attn_mask=attn_mask,
1275
+ average_attn_weights=average_attn_weights,
1276
+ is_causal=is_causal)
1277
+ if self.batch_first and is_batched:
1278
+ return attn_output.transpose(1, 0), attn_output_weights
1279
+ else:
1280
+ return attn_output, attn_output_weights
1281
+
1282
+ def merge_masks(self, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor],
1283
+ query: Tensor) -> Tuple[Optional[Tensor], Optional[int]]:
1284
+ r"""Determine mask type and combine masks if necessary.
1285
+
1286
+ If only one mask is provided, that mask
1287
+ and the corresponding mask type will be returned. If both masks are provided, they will be both
1288
+ expanded to shape ``(batch_size, num_heads, seq_len, seq_len)``, combined with logical ``or``
1289
+ and mask type 2 will be returned
1290
+ Args:
1291
+ attn_mask: attention mask of shape ``(seq_len, seq_len)``, mask type 0
1292
+ key_padding_mask: padding mask of shape ``(batch_size, seq_len)``, mask type 1
1293
+ query: query embeddings of shape ``(batch_size, seq_len, embed_dim)``
1294
+ Returns:
1295
+ merged_mask: merged mask
1296
+ mask_type: merged mask type (0, 1, or 2)
1297
+ """
1298
+ mask_type: Optional[int] = None
1299
+ merged_mask: Optional[Tensor] = None
1300
+
1301
+ if key_padding_mask is not None:
1302
+ mask_type = 1
1303
+ merged_mask = key_padding_mask
1304
+
1305
+ if attn_mask is not None:
1306
+ # In this branch query can't be a nested tensor, so it has a shape
1307
+ batch_size, seq_len, _ = query.shape
1308
+ mask_type = 2
1309
+
1310
+ # Always expands attn_mask to 4D
1311
+ if attn_mask.dim() == 3:
1312
+ attn_mask_expanded = attn_mask.view(batch_size, -1, seq_len, seq_len)
1313
+ else: # attn_mask.dim() == 2:
1314
+ attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(batch_size, self.num_heads, -1, -1)
1315
+ merged_mask = attn_mask_expanded
1316
+
1317
+ if key_padding_mask is not None:
1318
+ key_padding_mask_expanded = key_padding_mask.view(batch_size, 1, 1, seq_len).expand(-1, self.num_heads, -1, -1)
1319
+ merged_mask = attn_mask_expanded + key_padding_mask_expanded
1320
+
1321
+ # no attn_mask and no key_padding_mask, returns None, None
1322
+ return merged_mask, mask_type
1323
+
1324
+
1325
+ class PReLU(Module):
1326
+ r"""Applies the element-wise PReLU function.
1327
+
1328
+ .. math::
1329
+ \text{PReLU}(x) = \max(0,x) + a * \min(0,x)
1330
+
1331
+ or
1332
+
1333
+ .. math::
1334
+ \text{PReLU}(x) =
1335
+ \begin{cases}
1336
+ x, & \text{ if } x \geq 0 \\
1337
+ ax, & \text{ otherwise }
1338
+ \end{cases}
1339
+
1340
+ Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
1341
+ parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
1342
+ a separate :math:`a` is used for each input channel.
1343
+
1344
+
1345
+ .. note::
1346
+ weight decay should not be used when learning :math:`a` for good performance.
1347
+
1348
+ .. note::
1349
+ Channel dim is the 2nd dim of input. When input has dims < 2, then there is
1350
+ no channel dim and the number of channels = 1.
1351
+
1352
+ Args:
1353
+ num_parameters (int): number of :math:`a` to learn.
1354
+ Although it takes an int as input, there is only two values are legitimate:
1355
+ 1, or the number of channels at input. Default: 1
1356
+ init (float): the initial value of :math:`a`. Default: 0.25
1357
+
1358
+ Shape:
1359
+ - Input: :math:`( *)` where `*` means, any number of additional
1360
+ dimensions.
1361
+ - Output: :math:`(*)`, same shape as the input.
1362
+
1363
+ Attributes:
1364
+ weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
1365
+
1366
+ .. image:: ../scripts/activation_images/PReLU.png
1367
+
1368
+ Examples::
1369
+
1370
+ >>> m = nn.PReLU()
1371
+ >>> input = torch.randn(2)
1372
+ >>> output = m(input)
1373
+ """
1374
+
1375
+ __constants__ = ['num_parameters']
1376
+ num_parameters: int
1377
+
1378
+ def __init__(self, num_parameters: int = 1, init: float = 0.25,
1379
+ device=None, dtype=None) -> None:
1380
+ factory_kwargs = {'device': device, 'dtype': dtype}
1381
+ self.num_parameters = num_parameters
1382
+ super().__init__()
1383
+ self.init = init
1384
+ self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs))
1385
+ self.reset_parameters()
1386
+
1387
+ def reset_parameters(self):
1388
+ torch.nn.init.constant_(self.weight, self.init)
1389
+
1390
+ def forward(self, input: Tensor) -> Tensor:
1391
+ return F.prelu(input, self.weight)
1392
+
1393
+ def extra_repr(self) -> str:
1394
+ return f'num_parameters={self.num_parameters}'
1395
+
1396
+
1397
+ class Softsign(Module):
1398
+ r"""Applies the element-wise Softsign function.
1399
+
1400
+ .. math::
1401
+ \text{SoftSign}(x) = \frac{x}{ 1 + |x|}
1402
+
1403
+ Shape:
1404
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
1405
+ - Output: :math:`(*)`, same shape as the input.
1406
+
1407
+ .. image:: ../scripts/activation_images/Softsign.png
1408
+
1409
+ Examples::
1410
+
1411
+ >>> m = nn.Softsign()
1412
+ >>> input = torch.randn(2)
1413
+ >>> output = m(input)
1414
+ """
1415
+
1416
+ def forward(self, input: Tensor) -> Tensor:
1417
+ return F.softsign(input)
1418
+
1419
+
1420
+ class Tanhshrink(Module):
1421
+ r"""Applies the element-wise Tanhshrink function.
1422
+
1423
+ .. math::
1424
+ \text{Tanhshrink}(x) = x - \tanh(x)
1425
+
1426
+ Shape:
1427
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
1428
+ - Output: :math:`(*)`, same shape as the input.
1429
+
1430
+ .. image:: ../scripts/activation_images/Tanhshrink.png
1431
+
1432
+ Examples::
1433
+
1434
+ >>> m = nn.Tanhshrink()
1435
+ >>> input = torch.randn(2)
1436
+ >>> output = m(input)
1437
+ """
1438
+
1439
+ def forward(self, input: Tensor) -> Tensor:
1440
+ return F.tanhshrink(input)
1441
+
1442
+
1443
+ class Softmin(Module):
1444
+ r"""Applies the Softmin function to an n-dimensional input Tensor.
1445
+
1446
+ Rescales them so that the elements of the n-dimensional output Tensor
1447
+ lie in the range `[0, 1]` and sum to 1.
1448
+
1449
+ Softmin is defined as:
1450
+
1451
+ .. math::
1452
+ \text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
1453
+
1454
+ Shape:
1455
+ - Input: :math:`(*)` where `*` means, any number of additional
1456
+ dimensions
1457
+ - Output: :math:`(*)`, same shape as the input
1458
+
1459
+ Args:
1460
+ dim (int): A dimension along which Softmin will be computed (so every slice
1461
+ along dim will sum to 1).
1462
+
1463
+ Returns:
1464
+ a Tensor of the same dimension and shape as the input, with
1465
+ values in the range [0, 1]
1466
+
1467
+ Examples::
1468
+
1469
+ >>> m = nn.Softmin(dim=1)
1470
+ >>> input = torch.randn(2, 3)
1471
+ >>> output = m(input)
1472
+ """
1473
+
1474
+ __constants__ = ['dim']
1475
+ dim: Optional[int]
1476
+
1477
+ def __init__(self, dim: Optional[int] = None) -> None:
1478
+ super().__init__()
1479
+ self.dim = dim
1480
+
1481
+ def __setstate__(self, state):
1482
+ super().__setstate__(state)
1483
+ if not hasattr(self, 'dim'):
1484
+ self.dim = None
1485
+
1486
+ def forward(self, input: Tensor) -> Tensor:
1487
+ return F.softmin(input, self.dim, _stacklevel=5)
1488
+
1489
+ def extra_repr(self):
1490
+ return f'dim={self.dim}'
1491
+
1492
+ class Softmax(Module):
1493
+ r"""Applies the Softmax function to an n-dimensional input Tensor.
1494
+
1495
+ Rescales them so that the elements of the n-dimensional output Tensor
1496
+ lie in the range [0,1] and sum to 1.
1497
+
1498
+ Softmax is defined as:
1499
+
1500
+ .. math::
1501
+ \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
1502
+
1503
+ When the input Tensor is a sparse tensor then the unspecified
1504
+ values are treated as ``-inf``.
1505
+
1506
+ Shape:
1507
+ - Input: :math:`(*)` where `*` means, any number of additional
1508
+ dimensions
1509
+ - Output: :math:`(*)`, same shape as the input
1510
+
1511
+ Returns:
1512
+ a Tensor of the same dimension and shape as the input with
1513
+ values in the range [0, 1]
1514
+
1515
+ Args:
1516
+ dim (int): A dimension along which Softmax will be computed (so every slice
1517
+ along dim will sum to 1).
1518
+
1519
+ .. note::
1520
+ This module doesn't work directly with NLLLoss,
1521
+ which expects the Log to be computed between the Softmax and itself.
1522
+ Use `LogSoftmax` instead (it's faster and has better numerical properties).
1523
+
1524
+ Examples::
1525
+
1526
+ >>> m = nn.Softmax(dim=1)
1527
+ >>> input = torch.randn(2, 3)
1528
+ >>> output = m(input)
1529
+
1530
+ """
1531
+
1532
+ __constants__ = ['dim']
1533
+ dim: Optional[int]
1534
+
1535
+ def __init__(self, dim: Optional[int] = None) -> None:
1536
+ super().__init__()
1537
+ self.dim = dim
1538
+
1539
+ def __setstate__(self, state):
1540
+ super().__setstate__(state)
1541
+ if not hasattr(self, 'dim'):
1542
+ self.dim = None
1543
+
1544
+ def forward(self, input: Tensor) -> Tensor:
1545
+ return F.softmax(input, self.dim, _stacklevel=5)
1546
+
1547
+ def extra_repr(self) -> str:
1548
+ return f'dim={self.dim}'
1549
+
1550
+
1551
+ class Softmax2d(Module):
1552
+ r"""Applies SoftMax over features to each spatial location.
1553
+
1554
+ When given an image of ``Channels x Height x Width``, it will
1555
+ apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
1556
+
1557
+ Shape:
1558
+ - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
1559
+ - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
1560
+
1561
+ Returns:
1562
+ a Tensor of the same dimension and shape as the input with
1563
+ values in the range [0, 1]
1564
+
1565
+ Examples::
1566
+
1567
+ >>> m = nn.Softmax2d()
1568
+ >>> # you softmax over the 2nd dimension
1569
+ >>> input = torch.randn(2, 3, 12, 13)
1570
+ >>> output = m(input)
1571
+ """
1572
+
1573
+ def forward(self, input: Tensor) -> Tensor:
1574
+ if input.dim() not in (3, 4):
1575
+ raise ValueError(
1576
+ f"Softmax2d: expected input to be 3D or 4D, got {input.dim()}D instead"
1577
+ )
1578
+ return F.softmax(input, -3, _stacklevel=5)
1579
+
1580
+
1581
+ class LogSoftmax(Module):
1582
+ r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional input Tensor.
1583
+
1584
+ The LogSoftmax formulation can be simplified as:
1585
+
1586
+ .. math::
1587
+ \text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
1588
+
1589
+ Shape:
1590
+ - Input: :math:`(*)` where `*` means, any number of additional
1591
+ dimensions
1592
+ - Output: :math:`(*)`, same shape as the input
1593
+
1594
+ Args:
1595
+ dim (int): A dimension along which LogSoftmax will be computed.
1596
+
1597
+ Returns:
1598
+ a Tensor of the same dimension and shape as the input with
1599
+ values in the range [-inf, 0)
1600
+
1601
+ Examples::
1602
+
1603
+ >>> m = nn.LogSoftmax(dim=1)
1604
+ >>> input = torch.randn(2, 3)
1605
+ >>> output = m(input)
1606
+ """
1607
+
1608
+ __constants__ = ['dim']
1609
+ dim: Optional[int]
1610
+
1611
+ def __init__(self, dim: Optional[int] = None) -> None:
1612
+ super().__init__()
1613
+ self.dim = dim
1614
+
1615
+ def __setstate__(self, state):
1616
+ super().__setstate__(state)
1617
+ if not hasattr(self, 'dim'):
1618
+ self.dim = None
1619
+
1620
+ def forward(self, input: Tensor) -> Tensor:
1621
+ return F.log_softmax(input, self.dim, _stacklevel=5)
1622
+
1623
+ def extra_repr(self):
1624
+ return f'dim={self.dim}'
venv/lib/python3.10/site-packages/torch/nn/modules/adaptive.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from collections import namedtuple
3
+
4
+ import torch
5
+
6
+ from torch import Tensor
7
+ from typing import List, Sequence
8
+
9
+ from . import Sequential, ModuleList, Linear
10
+ from .module import Module
11
+ from ..functional import log_softmax
12
+
13
+ __all__ = ['AdaptiveLogSoftmaxWithLoss']
14
+
15
+ _ASMoutput = namedtuple('_ASMoutput', ['output', 'loss'])
16
+
17
+
18
+ class AdaptiveLogSoftmaxWithLoss(Module):
19
+ r"""Efficient softmax approximation.
20
+
21
+ As described in
22
+ `Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
23
+ Moustapha Cissé, David Grangier, and Hervé Jégou
24
+ <https://arxiv.org/abs/1609.04309>`__.
25
+
26
+ Adaptive softmax is an approximate strategy for training models with large
27
+ output spaces. It is most effective when the label distribution is highly
28
+ imbalanced, for example in natural language modelling, where the word
29
+ frequency distribution approximately follows the `Zipf's law`_.
30
+
31
+ Adaptive softmax partitions the labels into several clusters, according to
32
+ their frequency. These clusters may contain different number of targets
33
+ each.
34
+ Additionally, clusters containing less frequent labels assign lower
35
+ dimensional embeddings to those labels, which speeds up the computation.
36
+ For each minibatch, only clusters for which at least one target is
37
+ present are evaluated.
38
+
39
+ The idea is that the clusters which are accessed frequently
40
+ (like the first one, containing most frequent labels), should also be cheap
41
+ to compute -- that is, contain a small number of assigned labels.
42
+
43
+ We highly recommend taking a look at the original paper for more details.
44
+
45
+ * :attr:`cutoffs` should be an ordered Sequence of integers sorted
46
+ in the increasing order.
47
+ It controls number of clusters and the partitioning of targets into
48
+ clusters. For example setting ``cutoffs = [10, 100, 1000]``
49
+ means that first `10` targets will be assigned
50
+ to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be
51
+ assigned to the first cluster, and targets `101, 102, ..., 1000` will be
52
+ assigned to the second cluster, while targets
53
+ `1001, 1002, ..., n_classes - 1` will be assigned
54
+ to the last, third cluster.
55
+
56
+ * :attr:`div_value` is used to compute the size of each additional cluster,
57
+ which is given as
58
+ :math:`\left\lfloor\frac{\texttt{in\_features}}{\texttt{div\_value}^{idx}}\right\rfloor`,
59
+ where :math:`idx` is the cluster index (with clusters
60
+ for less frequent words having larger indices,
61
+ and indices starting from :math:`1`).
62
+
63
+ * :attr:`head_bias` if set to True, adds a bias term to the 'head' of the
64
+ adaptive softmax. See paper for details. Set to False in the official
65
+ implementation.
66
+
67
+ .. warning::
68
+ Labels passed as inputs to this module should be sorted according to
69
+ their frequency. This means that the most frequent label should be
70
+ represented by the index `0`, and the least frequent
71
+ label should be represented by the index `n_classes - 1`.
72
+
73
+ .. note::
74
+ This module returns a ``NamedTuple`` with ``output``
75
+ and ``loss`` fields. See further documentation for details.
76
+
77
+ .. note::
78
+ To compute log-probabilities for all classes, the ``log_prob``
79
+ method can be used.
80
+
81
+ Args:
82
+ in_features (int): Number of features in the input tensor
83
+ n_classes (int): Number of classes in the dataset
84
+ cutoffs (Sequence): Cutoffs used to assign targets to their buckets
85
+ div_value (float, optional): value used as an exponent to compute sizes
86
+ of the clusters. Default: 4.0
87
+ head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the
88
+ adaptive softmax. Default: ``False``
89
+
90
+ Returns:
91
+ ``NamedTuple`` with ``output`` and ``loss`` fields:
92
+ * **output** is a Tensor of size ``N`` containing computed target
93
+ log probabilities for each example
94
+ * **loss** is a Scalar representing the computed negative
95
+ log likelihood loss
96
+
97
+ Shape:
98
+ - input: :math:`(N, \texttt{in\_features})` or :math:`(\texttt{in\_features})`
99
+ - target: :math:`(N)` or :math:`()` where each value satisfies :math:`0 <= \texttt{target[i]} <= \texttt{n\_classes}`
100
+ - output1: :math:`(N)` or :math:`()`
101
+ - output2: ``Scalar``
102
+
103
+ .. _Zipf's law: https://en.wikipedia.org/wiki/Zipf%27s_law
104
+ """
105
+
106
+ in_features: int
107
+ n_classes: int
108
+ cutoffs: List[int]
109
+ div_value: float
110
+ head_bias: bool
111
+ head: Linear
112
+ tail: ModuleList
113
+
114
+ def __init__(
115
+ self,
116
+ in_features: int,
117
+ n_classes: int,
118
+ cutoffs: Sequence[int],
119
+ div_value: float = 4.,
120
+ head_bias: bool = False,
121
+ device=None,
122
+ dtype=None
123
+ ) -> None:
124
+ factory_kwargs = {'device': device, 'dtype': dtype}
125
+ super().__init__()
126
+
127
+ cutoffs = list(cutoffs)
128
+
129
+ if (len(cutoffs) == 0):
130
+ raise ValueError("cutoffs should be a sequence of length larger than 0")
131
+
132
+ if (cutoffs != sorted(cutoffs)) \
133
+ or (min(cutoffs) <= 0) \
134
+ or (max(cutoffs) > (n_classes - 1)) \
135
+ or (len(set(cutoffs)) != len(cutoffs)) \
136
+ or any(int(c) != c for c in cutoffs):
137
+
138
+ raise ValueError("cutoffs should be a sequence of unique, positive "
139
+ "integers sorted in an increasing order, where "
140
+ "each value is between 1 and n_classes-1")
141
+
142
+ self.in_features = in_features
143
+ self.n_classes = n_classes
144
+ self.cutoffs = cutoffs + [n_classes]
145
+ self.div_value = div_value
146
+ self.head_bias = head_bias
147
+
148
+ self.shortlist_size = self.cutoffs[0]
149
+ self.n_clusters = len(self.cutoffs) - 1
150
+ self.head_size = self.shortlist_size + self.n_clusters
151
+
152
+ self.head = Linear(self.in_features, self.head_size, bias=self.head_bias,
153
+ **factory_kwargs)
154
+ self.tail = ModuleList()
155
+
156
+ for i in range(self.n_clusters):
157
+
158
+ hsz = int(self.in_features // (self.div_value ** (i + 1)))
159
+ osz = self.cutoffs[i + 1] - self.cutoffs[i]
160
+
161
+ projection = Sequential(
162
+ Linear(self.in_features, hsz, bias=False, **factory_kwargs),
163
+ Linear(hsz, osz, bias=False, **factory_kwargs),
164
+ )
165
+
166
+ self.tail.append(projection)
167
+
168
+ def reset_parameters(self) -> None:
169
+ self.head.reset_parameters()
170
+ for i2h, h2o in self.tail:
171
+ i2h.reset_parameters()
172
+ h2o.reset_parameters()
173
+
174
+ def forward(self, input_: Tensor, target_: Tensor) -> _ASMoutput:
175
+ targ_dim = target_.dim()
176
+
177
+ if targ_dim == 1:
178
+ if input_.size(0) != target_.size(0):
179
+ raise RuntimeError('Input and target should have the same size '
180
+ 'in the batch dimension.')
181
+ if input_.dim() != 2:
182
+ raise RuntimeError('1D target tensor expects 2D input tensors, '
183
+ 'but found inputs with size', input_.size())
184
+ elif targ_dim == 0:
185
+ if input_.dim() != 1:
186
+ raise RuntimeError('0D target tensor expects 1D input tensors, '
187
+ 'but found inputs with size', input_.size())
188
+ else:
189
+ raise RuntimeError('0D or 1D target tensor expected, '
190
+ 'multi-target not supported')
191
+
192
+ is_batched = targ_dim > 0
193
+ input = input_ if is_batched else input_.unsqueeze(0)
194
+ target = target_ if is_batched else target_.unsqueeze(0)
195
+
196
+ used_rows = 0
197
+ batch_size = target.size(0)
198
+
199
+ output = input.new_zeros(batch_size)
200
+ gather_inds = target.new_empty(batch_size)
201
+
202
+ cutoff_values = [0] + self.cutoffs
203
+ for i in range(len(cutoff_values) - 1):
204
+
205
+ low_idx = cutoff_values[i]
206
+ high_idx = cutoff_values[i + 1]
207
+
208
+ target_mask = (target >= low_idx) & (target < high_idx)
209
+ row_indices = target_mask.nonzero().squeeze()
210
+
211
+ if row_indices.numel() == 0:
212
+ continue
213
+
214
+ if i == 0:
215
+ gather_inds.index_copy_(0, row_indices, target[target_mask])
216
+
217
+ else:
218
+ relative_target = target[target_mask] - low_idx
219
+ input_subset = input.index_select(0, row_indices)
220
+
221
+ cluster_output = self.tail[i - 1](input_subset)
222
+ cluster_index = self.shortlist_size + i - 1
223
+
224
+ gather_inds.index_fill_(0, row_indices, cluster_index)
225
+ cluster_logprob = log_softmax(cluster_output, dim=1)
226
+ local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1))
227
+ output.index_copy_(0, row_indices, local_logprob.squeeze(1))
228
+
229
+ used_rows += row_indices.numel()
230
+
231
+ if used_rows != batch_size:
232
+ raise RuntimeError(f"Target values should be in [0, {self.n_classes - 1}], "
233
+ f"but values in range [{target.min().item()}, {target.max().item()}] "
234
+ "were found. ")
235
+
236
+ head_output = self.head(input)
237
+ head_logprob = log_softmax(head_output, dim=1)
238
+ output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze()
239
+ loss = (-output).mean()
240
+
241
+ if not is_batched:
242
+ output = output.squeeze(0)
243
+
244
+ return _ASMoutput(output, loss)
245
+
246
+ def _get_full_log_prob(self, input, head_output):
247
+ """Given input tensor, and output of ``self.head``, compute the log of the full distribution."""
248
+ out = input.new_empty((head_output.size(0), self.n_classes))
249
+ head_logprob = log_softmax(head_output, dim=1)
250
+
251
+ out[:, :self.shortlist_size] = head_logprob[:, :self.shortlist_size]
252
+
253
+ for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):
254
+ cluster_output = self.tail[i](input)
255
+ cluster_logprob = log_softmax(cluster_output, dim=1)
256
+ output_logprob = cluster_logprob + head_logprob[:, self.shortlist_size + i].unsqueeze(1)
257
+
258
+ out[:, start_idx:stop_idx] = output_logprob
259
+
260
+ return out
261
+
262
+ def log_prob(self, input: Tensor) -> Tensor:
263
+ r"""Compute log probabilities for all :math:`\texttt{n\_classes}`.
264
+
265
+ Args:
266
+ input (Tensor): a minibatch of examples
267
+
268
+ Returns:
269
+ log-probabilities of for each class :math:`c`
270
+ in range :math:`0 <= c <= \texttt{n\_classes}`, where :math:`\texttt{n\_classes}` is a
271
+ parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
272
+
273
+ Shape:
274
+ - Input: :math:`(N, \texttt{in\_features})`
275
+ - Output: :math:`(N, \texttt{n\_classes})`
276
+
277
+ """
278
+ head_output = self.head(input)
279
+ return self._get_full_log_prob(input, head_output)
280
+
281
+ def predict(self, input: Tensor) -> Tensor:
282
+ r"""Return the class with the highest probability for each example in the input minibatch.
283
+
284
+ This is equivalent to ``self.log_prob(input).argmax(dim=1)``, but is more efficient in some cases.
285
+
286
+ Args:
287
+ input (Tensor): a minibatch of examples
288
+
289
+ Returns:
290
+ output (Tensor): a class with the highest probability for each example
291
+
292
+ Shape:
293
+ - Input: :math:`(N, \texttt{in\_features})`
294
+ - Output: :math:`(N)`
295
+ """
296
+ head_output = self.head(input)
297
+ output = torch.argmax(head_output, dim=1)
298
+ not_in_shortlist = (output >= self.shortlist_size)
299
+ all_in_shortlist = not (not_in_shortlist.any())
300
+
301
+ if all_in_shortlist:
302
+ return output
303
+
304
+ elif not_in_shortlist.all():
305
+ log_prob = self._get_full_log_prob(input, head_output)
306
+ return torch.argmax(log_prob, dim=1)
307
+
308
+ else:
309
+ log_prob = self._get_full_log_prob(input[not_in_shortlist],
310
+ head_output[not_in_shortlist])
311
+ output[not_in_shortlist] = torch.argmax(log_prob, dim=1)
312
+ return output
venv/lib/python3.10/site-packages/torch/nn/modules/conv.py ADDED
@@ -0,0 +1,1602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn.parameter import Parameter, UninitializedParameter
7
+ from .. import functional as F
8
+ from .. import init
9
+ from .lazy import LazyModuleMixin
10
+ from .module import Module
11
+ from .utils import _single, _pair, _triple, _reverse_repeat_tuple
12
+ from torch._torch_docs import reproducibility_notes
13
+
14
+ from ..common_types import _size_1_t, _size_2_t, _size_3_t
15
+ from typing import Optional, List, Tuple, Union
16
+
17
+ __all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
18
+ 'LazyConv1d', 'LazyConv2d', 'LazyConv3d', 'LazyConvTranspose1d', 'LazyConvTranspose2d',
19
+ 'LazyConvTranspose3d']
20
+
21
+ convolution_notes = \
22
+ {"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
23
+ :attr:`in_channels` and :attr:`out_channels` must both be divisible by
24
+ :attr:`groups`. For example,
25
+
26
+ * At groups=1, all inputs are convolved to all outputs.
27
+ * At groups=2, the operation becomes equivalent to having two conv
28
+ layers side by side, each seeing half the input channels
29
+ and producing half the output channels, and both subsequently
30
+ concatenated.
31
+ * At groups= :attr:`in_channels`, each input channel is convolved with
32
+ its own set of filters (of size
33
+ :math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""",
34
+
35
+ "depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`,
36
+ where `K` is a positive integer, this operation is also known as a "depthwise convolution".
37
+
38
+ In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
39
+ a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
40
+ :math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950
41
+
42
+
43
+
44
+
45
+
46
+ class _ConvNd(Module):
47
+
48
+ __constants__ = ['stride', 'padding', 'dilation', 'groups',
49
+ 'padding_mode', 'output_padding', 'in_channels',
50
+ 'out_channels', 'kernel_size']
51
+ __annotations__ = {'bias': Optional[torch.Tensor]}
52
+
53
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: # type: ignore[empty-body]
54
+ ...
55
+
56
+ in_channels: int
57
+ _reversed_padding_repeated_twice: List[int]
58
+ out_channels: int
59
+ kernel_size: Tuple[int, ...]
60
+ stride: Tuple[int, ...]
61
+ padding: Union[str, Tuple[int, ...]]
62
+ dilation: Tuple[int, ...]
63
+ transposed: bool
64
+ output_padding: Tuple[int, ...]
65
+ groups: int
66
+ padding_mode: str
67
+ weight: Tensor
68
+ bias: Optional[Tensor]
69
+
70
+ def __init__(self,
71
+ in_channels: int,
72
+ out_channels: int,
73
+ kernel_size: Tuple[int, ...],
74
+ stride: Tuple[int, ...],
75
+ padding: Tuple[int, ...],
76
+ dilation: Tuple[int, ...],
77
+ transposed: bool,
78
+ output_padding: Tuple[int, ...],
79
+ groups: int,
80
+ bias: bool,
81
+ padding_mode: str,
82
+ device=None,
83
+ dtype=None) -> None:
84
+ factory_kwargs = {'device': device, 'dtype': dtype}
85
+ super().__init__()
86
+ if groups <= 0:
87
+ raise ValueError('groups must be a positive integer')
88
+ if in_channels % groups != 0:
89
+ raise ValueError('in_channels must be divisible by groups')
90
+ if out_channels % groups != 0:
91
+ raise ValueError('out_channels must be divisible by groups')
92
+ valid_padding_strings = {'same', 'valid'}
93
+ if isinstance(padding, str):
94
+ if padding not in valid_padding_strings:
95
+ raise ValueError(
96
+ f"Invalid padding string {padding!r}, should be one of {valid_padding_strings}")
97
+ if padding == 'same' and any(s != 1 for s in stride):
98
+ raise ValueError("padding='same' is not supported for strided convolutions")
99
+
100
+ valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
101
+ if padding_mode not in valid_padding_modes:
102
+ raise ValueError(f"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'")
103
+ self.in_channels = in_channels
104
+ self.out_channels = out_channels
105
+ self.kernel_size = kernel_size
106
+ self.stride = stride
107
+ self.padding = padding
108
+ self.dilation = dilation
109
+ self.transposed = transposed
110
+ self.output_padding = output_padding
111
+ self.groups = groups
112
+ self.padding_mode = padding_mode
113
+ # `_reversed_padding_repeated_twice` is the padding to be passed to
114
+ # `F.pad` if needed (e.g., for non-zero padding types that are
115
+ # implemented as two ops: padding + conv). `F.pad` accepts paddings in
116
+ # reverse order than the dimension.
117
+ if isinstance(self.padding, str):
118
+ self._reversed_padding_repeated_twice = [0, 0] * len(kernel_size)
119
+ if padding == 'same':
120
+ for d, k, i in zip(dilation, kernel_size,
121
+ range(len(kernel_size) - 1, -1, -1)):
122
+ total_padding = d * (k - 1)
123
+ left_pad = total_padding // 2
124
+ self._reversed_padding_repeated_twice[2 * i] = left_pad
125
+ self._reversed_padding_repeated_twice[2 * i + 1] = (
126
+ total_padding - left_pad)
127
+ else:
128
+ self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
129
+
130
+ if transposed:
131
+ self.weight = Parameter(torch.empty(
132
+ (in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
133
+ else:
134
+ self.weight = Parameter(torch.empty(
135
+ (out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
136
+ if bias:
137
+ self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
138
+ else:
139
+ self.register_parameter('bias', None)
140
+
141
+ self.reset_parameters()
142
+
143
+ def reset_parameters(self) -> None:
144
+ # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
145
+ # uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)
146
+ # For more details see: https://github.com/pytorch/pytorch/issues/15314#issuecomment-477448573
147
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
148
+ if self.bias is not None:
149
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
150
+ if fan_in != 0:
151
+ bound = 1 / math.sqrt(fan_in)
152
+ init.uniform_(self.bias, -bound, bound)
153
+
154
+ def extra_repr(self):
155
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
156
+ ', stride={stride}')
157
+ if self.padding != (0,) * len(self.padding):
158
+ s += ', padding={padding}'
159
+ if self.dilation != (1,) * len(self.dilation):
160
+ s += ', dilation={dilation}'
161
+ if self.output_padding != (0,) * len(self.output_padding):
162
+ s += ', output_padding={output_padding}'
163
+ if self.groups != 1:
164
+ s += ', groups={groups}'
165
+ if self.bias is None:
166
+ s += ', bias=False'
167
+ if self.padding_mode != 'zeros':
168
+ s += ', padding_mode={padding_mode}'
169
+ return s.format(**self.__dict__)
170
+
171
+ def __setstate__(self, state):
172
+ super().__setstate__(state)
173
+ if not hasattr(self, 'padding_mode'):
174
+ self.padding_mode = 'zeros'
175
+
176
+
177
+ class Conv1d(_ConvNd):
178
+ __doc__ = r"""Applies a 1D convolution over an input signal composed of several input
179
+ planes.
180
+
181
+ In the simplest case, the output value of the layer with input size
182
+ :math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be
183
+ precisely described as:
184
+
185
+ .. math::
186
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
187
+ \sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k)
188
+ \star \text{input}(N_i, k)
189
+
190
+ where :math:`\star` is the valid `cross-correlation`_ operator,
191
+ :math:`N` is a batch size, :math:`C` denotes a number of channels,
192
+ :math:`L` is a length of signal sequence.
193
+ """ + r"""
194
+
195
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
196
+
197
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
198
+
199
+ * :attr:`stride` controls the stride for the cross-correlation, a single
200
+ number or a one-element tuple.
201
+
202
+ * :attr:`padding` controls the amount of padding applied to the input. It
203
+ can be either a string {{'valid', 'same'}} or a tuple of ints giving the
204
+ amount of implicit padding applied on both sides.
205
+
206
+ * :attr:`dilation` controls the spacing between the kernel points; also
207
+ known as the à trous algorithm. It is harder to describe, but this `link`_
208
+ has a nice visualization of what :attr:`dilation` does.
209
+
210
+ {groups_note}
211
+
212
+ Note:
213
+ {depthwise_separable_note}
214
+ Note:
215
+ {cudnn_reproducibility_note}
216
+
217
+ Note:
218
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
219
+ the input so the output has the shape as the input. However, this mode
220
+ doesn't support any stride values other than 1.
221
+
222
+ Note:
223
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
224
+
225
+ Args:
226
+ in_channels (int): Number of channels in the input image
227
+ out_channels (int): Number of channels produced by the convolution
228
+ kernel_size (int or tuple): Size of the convolving kernel
229
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
230
+ padding (int, tuple or str, optional): Padding added to both sides of
231
+ the input. Default: 0
232
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
233
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
234
+ dilation (int or tuple, optional): Spacing between kernel
235
+ elements. Default: 1
236
+ groups (int, optional): Number of blocked connections from input
237
+ channels to output channels. Default: 1
238
+ bias (bool, optional): If ``True``, adds a learnable bias to the
239
+ output. Default: ``True``
240
+
241
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
242
+
243
+ Shape:
244
+ - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
245
+ - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
246
+
247
+ .. math::
248
+ L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
249
+ \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
250
+
251
+ Attributes:
252
+ weight (Tensor): the learnable weights of the module of shape
253
+ :math:`(\text{out\_channels},
254
+ \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`.
255
+ The values of these weights are sampled from
256
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
257
+ :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
258
+ bias (Tensor): the learnable bias of the module of shape
259
+ (out_channels). If :attr:`bias` is ``True``, then the values of these weights are
260
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
261
+ :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
262
+
263
+ Examples::
264
+
265
+ >>> m = nn.Conv1d(16, 33, 3, stride=2)
266
+ >>> input = torch.randn(20, 16, 50)
267
+ >>> output = m(input)
268
+
269
+ .. _cross-correlation:
270
+ https://en.wikipedia.org/wiki/Cross-correlation
271
+
272
+ .. _link:
273
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ in_channels: int,
279
+ out_channels: int,
280
+ kernel_size: _size_1_t,
281
+ stride: _size_1_t = 1,
282
+ padding: Union[str, _size_1_t] = 0,
283
+ dilation: _size_1_t = 1,
284
+ groups: int = 1,
285
+ bias: bool = True,
286
+ padding_mode: str = 'zeros', # TODO: refine this type
287
+ device=None,
288
+ dtype=None
289
+ ) -> None:
290
+ factory_kwargs = {'device': device, 'dtype': dtype}
291
+ # we create new variables below to make mypy happy since kernel_size has
292
+ # type Union[int, Tuple[int]] and kernel_size_ has type Tuple[int]
293
+ kernel_size_ = _single(kernel_size)
294
+ stride_ = _single(stride)
295
+ padding_ = padding if isinstance(padding, str) else _single(padding)
296
+ dilation_ = _single(dilation)
297
+ super().__init__(
298
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
299
+ False, _single(0), groups, bias, padding_mode, **factory_kwargs)
300
+
301
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
302
+ if self.padding_mode != 'zeros':
303
+ return F.conv1d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
304
+ weight, bias, self.stride,
305
+ _single(0), self.dilation, self.groups)
306
+ return F.conv1d(input, weight, bias, self.stride,
307
+ self.padding, self.dilation, self.groups)
308
+
309
+ def forward(self, input: Tensor) -> Tensor:
310
+ return self._conv_forward(input, self.weight, self.bias)
311
+
312
+
313
+ class Conv2d(_ConvNd):
314
+ __doc__ = r"""Applies a 2D convolution over an input signal composed of several input
315
+ planes.
316
+
317
+ In the simplest case, the output value of the layer with input size
318
+ :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
319
+ can be precisely described as:
320
+
321
+ .. math::
322
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
323
+ \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
324
+
325
+
326
+ where :math:`\star` is the valid 2D `cross-correlation`_ operator,
327
+ :math:`N` is a batch size, :math:`C` denotes a number of channels,
328
+ :math:`H` is a height of input planes in pixels, and :math:`W` is
329
+ width in pixels.
330
+ """ + r"""
331
+
332
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
333
+
334
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
335
+
336
+ * :attr:`stride` controls the stride for the cross-correlation, a single
337
+ number or a tuple.
338
+
339
+ * :attr:`padding` controls the amount of padding applied to the input. It
340
+ can be either a string {{'valid', 'same'}} or an int / a tuple of ints giving the
341
+ amount of implicit padding applied on both sides.
342
+
343
+ * :attr:`dilation` controls the spacing between the kernel points; also
344
+ known as the à trous algorithm. It is harder to describe, but this `link`_
345
+ has a nice visualization of what :attr:`dilation` does.
346
+
347
+ {groups_note}
348
+
349
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
350
+
351
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
352
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
353
+ and the second `int` for the width dimension
354
+
355
+ Note:
356
+ {depthwise_separable_note}
357
+
358
+ Note:
359
+ {cudnn_reproducibility_note}
360
+
361
+ Note:
362
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
363
+ the input so the output has the shape as the input. However, this mode
364
+ doesn't support any stride values other than 1.
365
+
366
+ Note:
367
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
368
+
369
+ Args:
370
+ in_channels (int): Number of channels in the input image
371
+ out_channels (int): Number of channels produced by the convolution
372
+ kernel_size (int or tuple): Size of the convolving kernel
373
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
374
+ padding (int, tuple or str, optional): Padding added to all four sides of
375
+ the input. Default: 0
376
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
377
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
378
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
379
+ groups (int, optional): Number of blocked connections from input
380
+ channels to output channels. Default: 1
381
+ bias (bool, optional): If ``True``, adds a learnable bias to the
382
+ output. Default: ``True``
383
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
384
+
385
+ Shape:
386
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
387
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
388
+
389
+ .. math::
390
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
391
+ \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
392
+
393
+ .. math::
394
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
395
+ \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
396
+
397
+ Attributes:
398
+ weight (Tensor): the learnable weights of the module of shape
399
+ :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
400
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
401
+ The values of these weights are sampled from
402
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
403
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
404
+ bias (Tensor): the learnable bias of the module of shape
405
+ (out_channels). If :attr:`bias` is ``True``,
406
+ then the values of these weights are
407
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
408
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
409
+
410
+ Examples:
411
+
412
+ >>> # With square kernels and equal stride
413
+ >>> m = nn.Conv2d(16, 33, 3, stride=2)
414
+ >>> # non-square kernels and unequal stride and with padding
415
+ >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
416
+ >>> # non-square kernels and unequal stride and with padding and dilation
417
+ >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
418
+ >>> input = torch.randn(20, 16, 50, 100)
419
+ >>> output = m(input)
420
+
421
+ .. _cross-correlation:
422
+ https://en.wikipedia.org/wiki/Cross-correlation
423
+
424
+ .. _link:
425
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
426
+ """
427
+
428
+ def __init__(
429
+ self,
430
+ in_channels: int,
431
+ out_channels: int,
432
+ kernel_size: _size_2_t,
433
+ stride: _size_2_t = 1,
434
+ padding: Union[str, _size_2_t] = 0,
435
+ dilation: _size_2_t = 1,
436
+ groups: int = 1,
437
+ bias: bool = True,
438
+ padding_mode: str = 'zeros', # TODO: refine this type
439
+ device=None,
440
+ dtype=None
441
+ ) -> None:
442
+ factory_kwargs = {'device': device, 'dtype': dtype}
443
+ kernel_size_ = _pair(kernel_size)
444
+ stride_ = _pair(stride)
445
+ padding_ = padding if isinstance(padding, str) else _pair(padding)
446
+ dilation_ = _pair(dilation)
447
+ super().__init__(
448
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
449
+ False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
450
+
451
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
452
+ if self.padding_mode != 'zeros':
453
+ return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
454
+ weight, bias, self.stride,
455
+ _pair(0), self.dilation, self.groups)
456
+ return F.conv2d(input, weight, bias, self.stride,
457
+ self.padding, self.dilation, self.groups)
458
+
459
+ def forward(self, input: Tensor) -> Tensor:
460
+ return self._conv_forward(input, self.weight, self.bias)
461
+
462
+ class Conv3d(_ConvNd):
463
+ __doc__ = r"""Applies a 3D convolution over an input signal composed of several input
464
+ planes.
465
+
466
+ In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
467
+ and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
468
+
469
+ .. math::
470
+ out(N_i, C_{out_j}) = bias(C_{out_j}) +
471
+ \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)
472
+
473
+ where :math:`\star` is the valid 3D `cross-correlation`_ operator
474
+ """ + r"""
475
+
476
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
477
+
478
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
479
+
480
+ * :attr:`stride` controls the stride for the cross-correlation.
481
+
482
+ * :attr:`padding` controls the amount of padding applied to the input. It
483
+ can be either a string {{'valid', 'same'}} or a tuple of ints giving the
484
+ amount of implicit padding applied on both sides.
485
+
486
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
487
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
488
+
489
+ {groups_note}
490
+
491
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
492
+
493
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
494
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
495
+ the second `int` for the height dimension and the third `int` for the width dimension
496
+
497
+ Note:
498
+ {depthwise_separable_note}
499
+
500
+ Note:
501
+ {cudnn_reproducibility_note}
502
+
503
+ Note:
504
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
505
+ the input so the output has the shape as the input. However, this mode
506
+ doesn't support any stride values other than 1.
507
+
508
+ Note:
509
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
510
+
511
+ Args:
512
+ in_channels (int): Number of channels in the input image
513
+ out_channels (int): Number of channels produced by the convolution
514
+ kernel_size (int or tuple): Size of the convolving kernel
515
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
516
+ padding (int, tuple or str, optional): Padding added to all six sides of
517
+ the input. Default: 0
518
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
519
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
520
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
521
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
522
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
523
+
524
+ Shape:
525
+ - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
526
+ - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`,
527
+ where
528
+
529
+ .. math::
530
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
531
+ \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
532
+
533
+ .. math::
534
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
535
+ \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
536
+
537
+ .. math::
538
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
539
+ \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
540
+
541
+ Attributes:
542
+ weight (Tensor): the learnable weights of the module of shape
543
+ :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
544
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
545
+ The values of these weights are sampled from
546
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
547
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
548
+ bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
549
+ then the values of these weights are
550
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
551
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
552
+
553
+ Examples::
554
+
555
+ >>> # With square kernels and equal stride
556
+ >>> m = nn.Conv3d(16, 33, 3, stride=2)
557
+ >>> # non-square kernels and unequal stride and with padding
558
+ >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
559
+ >>> input = torch.randn(20, 16, 10, 50, 100)
560
+ >>> output = m(input)
561
+
562
+ .. _cross-correlation:
563
+ https://en.wikipedia.org/wiki/Cross-correlation
564
+
565
+ .. _link:
566
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
567
+ """
568
+
569
+ def __init__(
570
+ self,
571
+ in_channels: int,
572
+ out_channels: int,
573
+ kernel_size: _size_3_t,
574
+ stride: _size_3_t = 1,
575
+ padding: Union[str, _size_3_t] = 0,
576
+ dilation: _size_3_t = 1,
577
+ groups: int = 1,
578
+ bias: bool = True,
579
+ padding_mode: str = 'zeros',
580
+ device=None,
581
+ dtype=None
582
+ ) -> None:
583
+ factory_kwargs = {'device': device, 'dtype': dtype}
584
+ kernel_size_ = _triple(kernel_size)
585
+ stride_ = _triple(stride)
586
+ padding_ = padding if isinstance(padding, str) else _triple(padding)
587
+ dilation_ = _triple(dilation)
588
+ super().__init__(
589
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
590
+ False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
591
+
592
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
593
+ if self.padding_mode != "zeros":
594
+ return F.conv3d(
595
+ F.pad(
596
+ input, self._reversed_padding_repeated_twice, mode=self.padding_mode
597
+ ),
598
+ weight,
599
+ bias,
600
+ self.stride,
601
+ _triple(0),
602
+ self.dilation,
603
+ self.groups,
604
+ )
605
+ return F.conv3d(
606
+ input, weight, bias, self.stride, self.padding, self.dilation, self.groups
607
+ )
608
+
609
+ def forward(self, input: Tensor) -> Tensor:
610
+ return self._conv_forward(input, self.weight, self.bias)
611
+
612
+
613
+
614
+ class _ConvTransposeNd(_ConvNd):
615
+ def __init__(self, in_channels, out_channels, kernel_size, stride,
616
+ padding, dilation, transposed, output_padding,
617
+ groups, bias, padding_mode, device=None, dtype=None) -> None:
618
+ if padding_mode != 'zeros':
619
+ raise ValueError(f'Only "zeros" padding mode is supported for {self.__class__.__name__}')
620
+
621
+ factory_kwargs = {'device': device, 'dtype': dtype}
622
+ super().__init__(
623
+ in_channels, out_channels, kernel_size, stride,
624
+ padding, dilation, transposed, output_padding,
625
+ groups, bias, padding_mode, **factory_kwargs)
626
+
627
+ # dilation being an optional parameter is for backwards
628
+ # compatibility
629
+ def _output_padding(self, input: Tensor, output_size: Optional[List[int]],
630
+ stride: List[int], padding: List[int], kernel_size: List[int],
631
+ num_spatial_dims: int, dilation: Optional[List[int]] = None) -> List[int]:
632
+ if output_size is None:
633
+ ret = _single(self.output_padding) # converting to list if was not already
634
+ else:
635
+ has_batch_dim = input.dim() == num_spatial_dims + 2
636
+ num_non_spatial_dims = 2 if has_batch_dim else 1
637
+ if len(output_size) == num_non_spatial_dims + num_spatial_dims:
638
+ output_size = output_size[num_non_spatial_dims:]
639
+ if len(output_size) != num_spatial_dims:
640
+ raise ValueError(
641
+ "ConvTranspose{}D: for {}D input, output_size must have {} or {} elements (got {})"
642
+ .format(num_spatial_dims, input.dim(), num_spatial_dims,
643
+ num_non_spatial_dims + num_spatial_dims, len(output_size)))
644
+
645
+ min_sizes = torch.jit.annotate(List[int], [])
646
+ max_sizes = torch.jit.annotate(List[int], [])
647
+ for d in range(num_spatial_dims):
648
+ dim_size = ((input.size(d + num_non_spatial_dims) - 1) * stride[d] -
649
+ 2 * padding[d] +
650
+ (dilation[d] if dilation is not None else 1) * (kernel_size[d] - 1) + 1)
651
+ min_sizes.append(dim_size)
652
+ max_sizes.append(min_sizes[d] + stride[d] - 1)
653
+
654
+ for i in range(len(output_size)):
655
+ size = output_size[i]
656
+ min_size = min_sizes[i]
657
+ max_size = max_sizes[i]
658
+ if size < min_size or size > max_size:
659
+ raise ValueError(
660
+ f"requested an output size of {output_size}, but valid sizes range "
661
+ f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})")
662
+
663
+ res = torch.jit.annotate(List[int], [])
664
+ for d in range(num_spatial_dims):
665
+ res.append(output_size[d] - min_sizes[d])
666
+
667
+ ret = res
668
+ return ret
669
+
670
+
671
+ class ConvTranspose1d(_ConvTransposeNd):
672
+ __doc__ = r"""Applies a 1D transposed convolution operator over an input image
673
+ composed of several input planes.
674
+
675
+ This module can be seen as the gradient of Conv1d with respect to its input.
676
+ It is also known as a fractionally-strided convolution or
677
+ a deconvolution (although it is not an actual deconvolution operation as it does
678
+ not compute a true inverse of convolution). For more information, see the visualizations
679
+ `here`_ and the `Deconvolutional Networks`_ paper.
680
+
681
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
682
+
683
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
684
+
685
+ * :attr:`stride` controls the stride for the cross-correlation.
686
+
687
+ * :attr:`padding` controls the amount of implicit zero padding on both
688
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
689
+ below for details.
690
+
691
+ * :attr:`output_padding` controls the additional size added to one side
692
+ of the output shape. See note below for details.
693
+
694
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
695
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
696
+
697
+ {groups_note}
698
+
699
+ Note:
700
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
701
+ amount of zero padding to both sizes of the input. This is set so that
702
+ when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
703
+ are initialized with same parameters, they are inverses of each other in
704
+ regard to the input and output shapes. However, when ``stride > 1``,
705
+ :class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
706
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
707
+ effectively increasing the calculated output shape on one side. Note
708
+ that :attr:`output_padding` is only used to find output shape, but does
709
+ not actually add zero-padding to output.
710
+
711
+ Note:
712
+ In some circumstances when using the CUDA backend with CuDNN, this operator
713
+ may select a nondeterministic algorithm to increase performance. If this is
714
+ undesirable, you can try to make the operation deterministic (potentially at
715
+ a performance cost) by setting ``torch.backends.cudnn.deterministic =
716
+ True``.
717
+ Please see the notes on :doc:`/notes/randomness` for background.
718
+
719
+
720
+ Args:
721
+ in_channels (int): Number of channels in the input image
722
+ out_channels (int): Number of channels produced by the convolution
723
+ kernel_size (int or tuple): Size of the convolving kernel
724
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
725
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
726
+ will be added to both sides of the input. Default: 0
727
+ output_padding (int or tuple, optional): Additional size added to one side
728
+ of the output shape. Default: 0
729
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
730
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
731
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
732
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
733
+
734
+ Shape:
735
+ - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
736
+ - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
737
+
738
+ .. math::
739
+ L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation}
740
+ \times (\text{kernel\_size} - 1) + \text{output\_padding} + 1
741
+
742
+ Attributes:
743
+ weight (Tensor): the learnable weights of the module of shape
744
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
745
+ :math:`\text{kernel\_size})`.
746
+ The values of these weights are sampled from
747
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
748
+ :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
749
+ bias (Tensor): the learnable bias of the module of shape (out_channels).
750
+ If :attr:`bias` is ``True``, then the values of these weights are
751
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
752
+ :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
753
+
754
+ .. _`here`:
755
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
756
+
757
+ .. _`Deconvolutional Networks`:
758
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
759
+ """
760
+
761
+ def __init__(
762
+ self,
763
+ in_channels: int,
764
+ out_channels: int,
765
+ kernel_size: _size_1_t,
766
+ stride: _size_1_t = 1,
767
+ padding: _size_1_t = 0,
768
+ output_padding: _size_1_t = 0,
769
+ groups: int = 1,
770
+ bias: bool = True,
771
+ dilation: _size_1_t = 1,
772
+ padding_mode: str = 'zeros',
773
+ device=None,
774
+ dtype=None
775
+ ) -> None:
776
+ factory_kwargs = {'device': device, 'dtype': dtype}
777
+ kernel_size = _single(kernel_size)
778
+ stride = _single(stride)
779
+ padding = _single(padding)
780
+ dilation = _single(dilation)
781
+ output_padding = _single(output_padding)
782
+ super().__init__(
783
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
784
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
785
+
786
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
787
+ if self.padding_mode != 'zeros':
788
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose1d')
789
+
790
+ assert isinstance(self.padding, tuple)
791
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
792
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
793
+ num_spatial_dims = 1
794
+ output_padding = self._output_padding(
795
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
796
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
797
+ return F.conv_transpose1d(
798
+ input, self.weight, self.bias, self.stride, self.padding,
799
+ output_padding, self.groups, self.dilation)
800
+
801
+
802
+ class ConvTranspose2d(_ConvTransposeNd):
803
+ __doc__ = r"""Applies a 2D transposed convolution operator over an input image
804
+ composed of several input planes.
805
+
806
+ This module can be seen as the gradient of Conv2d with respect to its input.
807
+ It is also known as a fractionally-strided convolution or
808
+ a deconvolution (although it is not an actual deconvolution operation as it does
809
+ not compute a true inverse of convolution). For more information, see the visualizations
810
+ `here`_ and the `Deconvolutional Networks`_ paper.
811
+
812
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
813
+
814
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
815
+
816
+ * :attr:`stride` controls the stride for the cross-correlation.
817
+
818
+ * :attr:`padding` controls the amount of implicit zero padding on both
819
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
820
+ below for details.
821
+
822
+ * :attr:`output_padding` controls the additional size added to one side
823
+ of the output shape. See note below for details.
824
+
825
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
826
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
827
+
828
+ {groups_note}
829
+
830
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
831
+ can either be:
832
+
833
+ - a single ``int`` -- in which case the same value is used for the height and width dimensions
834
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
835
+ and the second `int` for the width dimension
836
+
837
+ Note:
838
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
839
+ amount of zero padding to both sizes of the input. This is set so that
840
+ when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
841
+ are initialized with same parameters, they are inverses of each other in
842
+ regard to the input and output shapes. However, when ``stride > 1``,
843
+ :class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
844
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
845
+ effectively increasing the calculated output shape on one side. Note
846
+ that :attr:`output_padding` is only used to find output shape, but does
847
+ not actually add zero-padding to output.
848
+
849
+ Note:
850
+ {cudnn_reproducibility_note}
851
+
852
+ Args:
853
+ in_channels (int): Number of channels in the input image
854
+ out_channels (int): Number of channels produced by the convolution
855
+ kernel_size (int or tuple): Size of the convolving kernel
856
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
857
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
858
+ will be added to both sides of each dimension in the input. Default: 0
859
+ output_padding (int or tuple, optional): Additional size added to one side
860
+ of each dimension in the output shape. Default: 0
861
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
862
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
863
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
864
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
865
+
866
+ Shape:
867
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
868
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
869
+
870
+ .. math::
871
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
872
+ \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
873
+ .. math::
874
+ W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
875
+ \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
876
+
877
+ Attributes:
878
+ weight (Tensor): the learnable weights of the module of shape
879
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
880
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
881
+ The values of these weights are sampled from
882
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
883
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
884
+ bias (Tensor): the learnable bias of the module of shape (out_channels)
885
+ If :attr:`bias` is ``True``, then the values of these weights are
886
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
887
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
888
+
889
+ Examples::
890
+
891
+ >>> # With square kernels and equal stride
892
+ >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
893
+ >>> # non-square kernels and unequal stride and with padding
894
+ >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
895
+ >>> input = torch.randn(20, 16, 50, 100)
896
+ >>> output = m(input)
897
+ >>> # exact output size can be also specified as an argument
898
+ >>> input = torch.randn(1, 16, 12, 12)
899
+ >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
900
+ >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
901
+ >>> h = downsample(input)
902
+ >>> h.size()
903
+ torch.Size([1, 16, 6, 6])
904
+ >>> output = upsample(h, output_size=input.size())
905
+ >>> output.size()
906
+ torch.Size([1, 16, 12, 12])
907
+
908
+ .. _`here`:
909
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
910
+
911
+ .. _`Deconvolutional Networks`:
912
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
913
+ """
914
+
915
+ def __init__(
916
+ self,
917
+ in_channels: int,
918
+ out_channels: int,
919
+ kernel_size: _size_2_t,
920
+ stride: _size_2_t = 1,
921
+ padding: _size_2_t = 0,
922
+ output_padding: _size_2_t = 0,
923
+ groups: int = 1,
924
+ bias: bool = True,
925
+ dilation: _size_2_t = 1,
926
+ padding_mode: str = 'zeros',
927
+ device=None,
928
+ dtype=None
929
+ ) -> None:
930
+ factory_kwargs = {'device': device, 'dtype': dtype}
931
+ kernel_size = _pair(kernel_size)
932
+ stride = _pair(stride)
933
+ padding = _pair(padding)
934
+ dilation = _pair(dilation)
935
+ output_padding = _pair(output_padding)
936
+ super().__init__(
937
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
938
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
939
+
940
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
941
+ if self.padding_mode != 'zeros':
942
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')
943
+
944
+ assert isinstance(self.padding, tuple)
945
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
946
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
947
+ num_spatial_dims = 2
948
+ output_padding = self._output_padding(
949
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
950
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
951
+
952
+ return F.conv_transpose2d(
953
+ input, self.weight, self.bias, self.stride, self.padding,
954
+ output_padding, self.groups, self.dilation)
955
+
956
+
957
+ class ConvTranspose3d(_ConvTransposeNd):
958
+ __doc__ = r"""Applies a 3D transposed convolution operator over an input image composed of several input
959
+ planes.
960
+ The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
961
+ and sums over the outputs from all input feature planes.
962
+
963
+ This module can be seen as the gradient of Conv3d with respect to its input.
964
+ It is also known as a fractionally-strided convolution or
965
+ a deconvolution (although it is not an actual deconvolution operation as it does
966
+ not compute a true inverse of convolution). For more information, see the visualizations
967
+ `here`_ and the `Deconvolutional Networks`_ paper.
968
+
969
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
970
+
971
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
972
+
973
+ * :attr:`stride` controls the stride for the cross-correlation.
974
+
975
+ * :attr:`padding` controls the amount of implicit zero padding on both
976
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
977
+ below for details.
978
+
979
+ * :attr:`output_padding` controls the additional size added to one side
980
+ of the output shape. See note below for details.
981
+
982
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
983
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
984
+
985
+ {groups_note}
986
+
987
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
988
+ can either be:
989
+
990
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
991
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
992
+ the second `int` for the height dimension and the third `int` for the width dimension
993
+
994
+ Note:
995
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
996
+ amount of zero padding to both sizes of the input. This is set so that
997
+ when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
998
+ are initialized with same parameters, they are inverses of each other in
999
+ regard to the input and output shapes. However, when ``stride > 1``,
1000
+ :class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
1001
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
1002
+ effectively increasing the calculated output shape on one side. Note
1003
+ that :attr:`output_padding` is only used to find output shape, but does
1004
+ not actually add zero-padding to output.
1005
+
1006
+ Note:
1007
+ {cudnn_reproducibility_note}
1008
+
1009
+ Args:
1010
+ in_channels (int): Number of channels in the input image
1011
+ out_channels (int): Number of channels produced by the convolution
1012
+ kernel_size (int or tuple): Size of the convolving kernel
1013
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1014
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1015
+ will be added to both sides of each dimension in the input. Default: 0
1016
+ output_padding (int or tuple, optional): Additional size added to one side
1017
+ of each dimension in the output shape. Default: 0
1018
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1019
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1020
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1021
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
1022
+
1023
+ Shape:
1024
+ - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
1025
+ - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
1026
+ :math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where
1027
+
1028
+ .. math::
1029
+ D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
1030
+ \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
1031
+ .. math::
1032
+ H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
1033
+ \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
1034
+ .. math::
1035
+ W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
1036
+ \times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1
1037
+
1038
+
1039
+ Attributes:
1040
+ weight (Tensor): the learnable weights of the module of shape
1041
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
1042
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
1043
+ The values of these weights are sampled from
1044
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
1045
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
1046
+ bias (Tensor): the learnable bias of the module of shape (out_channels)
1047
+ If :attr:`bias` is ``True``, then the values of these weights are
1048
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
1049
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
1050
+
1051
+ Examples::
1052
+
1053
+ >>> # With square kernels and equal stride
1054
+ >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
1055
+ >>> # non-square kernels and unequal stride and with padding
1056
+ >>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
1057
+ >>> input = torch.randn(20, 16, 10, 50, 100)
1058
+ >>> output = m(input)
1059
+
1060
+ .. _`here`:
1061
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
1062
+
1063
+ .. _`Deconvolutional Networks`:
1064
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
1065
+ """
1066
+
1067
+ def __init__(
1068
+ self,
1069
+ in_channels: int,
1070
+ out_channels: int,
1071
+ kernel_size: _size_3_t,
1072
+ stride: _size_3_t = 1,
1073
+ padding: _size_3_t = 0,
1074
+ output_padding: _size_3_t = 0,
1075
+ groups: int = 1,
1076
+ bias: bool = True,
1077
+ dilation: _size_3_t = 1,
1078
+ padding_mode: str = 'zeros',
1079
+ device=None,
1080
+ dtype=None
1081
+ ) -> None:
1082
+ factory_kwargs = {'device': device, 'dtype': dtype}
1083
+ kernel_size = _triple(kernel_size)
1084
+ stride = _triple(stride)
1085
+ padding = _triple(padding)
1086
+ dilation = _triple(dilation)
1087
+ output_padding = _triple(output_padding)
1088
+ super().__init__(
1089
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
1090
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
1091
+
1092
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
1093
+ if self.padding_mode != 'zeros':
1094
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d')
1095
+
1096
+ assert isinstance(self.padding, tuple)
1097
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
1098
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
1099
+ num_spatial_dims = 3
1100
+ output_padding = self._output_padding(
1101
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
1102
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
1103
+
1104
+ return F.conv_transpose3d(
1105
+ input, self.weight, self.bias, self.stride, self.padding,
1106
+ output_padding, self.groups, self.dilation)
1107
+
1108
+
1109
+ # TODO: Deprecate and remove the following alias `_ConvTransposeMixin`.
1110
+ #
1111
+ # `_ConvTransposeMixin` was a mixin that was removed. It is meant to be used
1112
+ # with `_ConvNd` to construct actual module classes that implements conv
1113
+ # transpose ops:
1114
+ #
1115
+ # class MyConvTranspose(_ConvNd, _ConvTransposeMixin):
1116
+ # ...
1117
+ #
1118
+ # In PyTorch, it has been replaced by `_ConvTransposeNd`, which is a proper
1119
+ # subclass of `_ConvNd`. However, some user code in the wild still (incorrectly)
1120
+ # use the internal class `_ConvTransposeMixin`. Hence, we provide this alias
1121
+ # for BC, because it is cheap and easy for us to do so, even though that
1122
+ # `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
1123
+ # above would still work).
1124
+ class _ConvTransposeMixin(_ConvTransposeNd):
1125
+ def __init__(self, *args, **kwargs):
1126
+ warnings.warn(
1127
+ "_ConvTransposeMixin is a deprecated internal class. "
1128
+ "Please consider using public APIs.")
1129
+ super().__init__(*args, **kwargs)
1130
+
1131
+
1132
+ # TODO: Conv2dLocal
1133
+ # TODO: Conv2dMap
1134
+ # TODO: ConvTranspose2dMap
1135
+
1136
+
1137
+ class _LazyConvXdMixin(LazyModuleMixin):
1138
+ groups: int
1139
+ transposed: bool
1140
+ in_channels: int
1141
+ out_channels: int
1142
+ kernel_size: Tuple[int, ...]
1143
+ weight: UninitializedParameter
1144
+ bias: UninitializedParameter
1145
+
1146
+ def reset_parameters(self) -> None:
1147
+ # has_uninitialized_params is defined in parent class and it is using a protocol on self
1148
+ if not self.has_uninitialized_params() and self.in_channels != 0: # type: ignore[misc]
1149
+ # "type:ignore[..]" is required because mypy thinks that "reset_parameters" is undefined
1150
+ # in super class. Turns out that it is defined in _ConvND which is inherited by any class
1151
+ # that also inherits _LazyConvXdMixin
1152
+ super().reset_parameters() # type: ignore[misc]
1153
+
1154
+ # Signature of "initialize_parameters" is incompatible with the definition in supertype LazyModuleMixin
1155
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
1156
+ # defined by parent class but using a protocol
1157
+ if self.has_uninitialized_params(): # type: ignore[misc]
1158
+ self.in_channels = self._get_in_channels(input)
1159
+ if self.in_channels % self.groups != 0:
1160
+ raise ValueError('in_channels must be divisible by groups')
1161
+ assert isinstance(self.weight, UninitializedParameter)
1162
+ if self.transposed:
1163
+ self.weight.materialize((
1164
+ self.in_channels, self.out_channels // self.groups, *self.kernel_size))
1165
+ else:
1166
+ self.weight.materialize((
1167
+ self.out_channels, self.in_channels // self.groups, *self.kernel_size))
1168
+ if self.bias is not None:
1169
+ assert isinstance(self.bias, UninitializedParameter)
1170
+ self.bias.materialize((self.out_channels,))
1171
+ self.reset_parameters()
1172
+
1173
+ # Function to extract in_channels from first input.
1174
+ def _get_in_channels(self, input: Tensor) -> int:
1175
+ num_spatial_dims = self._get_num_spatial_dims()
1176
+ num_dims_no_batch = num_spatial_dims + 1 # +1 for channels dim
1177
+ num_dims_batch = num_dims_no_batch + 1
1178
+ if input.dim() not in (num_dims_no_batch, num_dims_batch):
1179
+ raise RuntimeError("Expected {}D (unbatched) or {}D (batched) input to {}, but "
1180
+ "got input of size: {}".format(num_dims_no_batch, num_dims_batch,
1181
+ self.__class__.__name__, input.shape))
1182
+ return input.shape[1] if input.dim() == num_dims_batch else input.shape[0]
1183
+
1184
+ # Function to return the number of spatial dims expected for inputs to the module.
1185
+ # This is expected to be implemented by subclasses.
1186
+ def _get_num_spatial_dims(self) -> int:
1187
+ raise NotImplementedError()
1188
+
1189
+
1190
+ # LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter
1191
+ class LazyConv1d(_LazyConvXdMixin, Conv1d): # type: ignore[misc]
1192
+ r"""A :class:`torch.nn.Conv1d` module with lazy initialization of the ``in_channels`` argument.
1193
+
1194
+ The ``in_channels`` argument of the :class:`Conv1d` is inferred from the ``input.size(1)``.
1195
+ The attributes that will be lazily initialized are `weight` and `bias`.
1196
+
1197
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1198
+ on lazy modules and their limitations.
1199
+
1200
+ Args:
1201
+ out_channels (int): Number of channels produced by the convolution
1202
+ kernel_size (int or tuple): Size of the convolving kernel
1203
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1204
+ padding (int or tuple, optional): Zero-padding added to both sides of
1205
+ the input. Default: 0
1206
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1207
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1208
+ dilation (int or tuple, optional): Spacing between kernel
1209
+ elements. Default: 1
1210
+ groups (int, optional): Number of blocked connections from input
1211
+ channels to output channels. Default: 1
1212
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1213
+ output. Default: ``True``
1214
+
1215
+ .. seealso:: :class:`torch.nn.Conv1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1216
+ """
1217
+
1218
+ # super class define this variable as None. "type: ignore[..] is required
1219
+ # since we are redefining the variable.
1220
+ cls_to_become = Conv1d # type: ignore[assignment]
1221
+
1222
+ def __init__(
1223
+ self,
1224
+ out_channels: int,
1225
+ kernel_size: _size_1_t,
1226
+ stride: _size_1_t = 1,
1227
+ padding: _size_1_t = 0,
1228
+ dilation: _size_1_t = 1,
1229
+ groups: int = 1,
1230
+ bias: bool = True,
1231
+ padding_mode: str = 'zeros',
1232
+ device=None,
1233
+ dtype=None
1234
+ ) -> None:
1235
+ factory_kwargs = {'device': device, 'dtype': dtype}
1236
+ super().__init__(
1237
+ 0,
1238
+ 0,
1239
+ kernel_size,
1240
+ stride,
1241
+ padding,
1242
+ dilation,
1243
+ groups,
1244
+ # bias is hardcoded to False to avoid creating tensor
1245
+ # that will soon be overwritten.
1246
+ False,
1247
+ padding_mode,
1248
+ **factory_kwargs
1249
+ )
1250
+ self.weight = UninitializedParameter(**factory_kwargs)
1251
+ self.out_channels = out_channels
1252
+ if bias:
1253
+ self.bias = UninitializedParameter(**factory_kwargs)
1254
+
1255
+ def _get_num_spatial_dims(self) -> int:
1256
+ return 1
1257
+
1258
+
1259
+ # LazyConv2d defines weight as a Tensor but derived class defines it as UnitializeParameter
1260
+ class LazyConv2d(_LazyConvXdMixin, Conv2d): # type: ignore[misc]
1261
+ r"""A :class:`torch.nn.Conv2d` module with lazy initialization of the ``in_channels`` argument.
1262
+
1263
+ The ``in_channels`` argument of the :class:`Conv2d` that is inferred from the ``input.size(1)``.
1264
+ The attributes that will be lazily initialized are `weight` and `bias`.
1265
+
1266
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1267
+ on lazy modules and their limitations.
1268
+
1269
+ Args:
1270
+ out_channels (int): Number of channels produced by the convolution
1271
+ kernel_size (int or tuple): Size of the convolving kernel
1272
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1273
+ padding (int or tuple, optional): Zero-padding added to both sides of
1274
+ the input. Default: 0
1275
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1276
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1277
+ dilation (int or tuple, optional): Spacing between kernel
1278
+ elements. Default: 1
1279
+ groups (int, optional): Number of blocked connections from input
1280
+ channels to output channels. Default: 1
1281
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1282
+ output. Default: ``True``
1283
+
1284
+ .. seealso:: :class:`torch.nn.Conv2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1285
+ """
1286
+
1287
+ # super class define this variable as None. "type: ignore[..] is required
1288
+ # since we are redefining the variable.
1289
+ cls_to_become = Conv2d # type: ignore[assignment]
1290
+
1291
+ def __init__(
1292
+ self,
1293
+ out_channels: int,
1294
+ kernel_size: _size_2_t,
1295
+ stride: _size_2_t = 1,
1296
+ padding: _size_2_t = 0,
1297
+ dilation: _size_2_t = 1,
1298
+ groups: int = 1,
1299
+ bias: bool = True,
1300
+ padding_mode: str = 'zeros', # TODO: refine this type
1301
+ device=None,
1302
+ dtype=None
1303
+ ) -> None:
1304
+ factory_kwargs = {'device': device, 'dtype': dtype}
1305
+ super().__init__(
1306
+ 0,
1307
+ 0,
1308
+ kernel_size,
1309
+ stride,
1310
+ padding,
1311
+ dilation,
1312
+ groups,
1313
+ # bias is hardcoded to False to avoid creating tensor
1314
+ # that will soon be overwritten.
1315
+ False,
1316
+ padding_mode,
1317
+ **factory_kwargs
1318
+ )
1319
+ self.weight = UninitializedParameter(**factory_kwargs)
1320
+ self.out_channels = out_channels
1321
+ if bias:
1322
+ self.bias = UninitializedParameter(**factory_kwargs)
1323
+
1324
+ def _get_num_spatial_dims(self) -> int:
1325
+ return 2
1326
+
1327
+
1328
+ # LazyConv3d defines weight as a Tensor but derived class defines it as UnitializeParameter
1329
+ class LazyConv3d(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
1330
+ r"""A :class:`torch.nn.Conv3d` module with lazy initialization of the ``in_channels`` argument.
1331
+
1332
+ The ``in_channels`` argument of the :class:`Conv3d` that is inferred from
1333
+ the ``input.size(1)``.
1334
+ The attributes that will be lazily initialized are `weight` and `bias`.
1335
+
1336
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1337
+ on lazy modules and their limitations.
1338
+
1339
+ Args:
1340
+ out_channels (int): Number of channels produced by the convolution
1341
+ kernel_size (int or tuple): Size of the convolving kernel
1342
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1343
+ padding (int or tuple, optional): Zero-padding added to both sides of
1344
+ the input. Default: 0
1345
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1346
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1347
+ dilation (int or tuple, optional): Spacing between kernel
1348
+ elements. Default: 1
1349
+ groups (int, optional): Number of blocked connections from input
1350
+ channels to output channels. Default: 1
1351
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1352
+ output. Default: ``True``
1353
+
1354
+ .. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1355
+ """
1356
+
1357
+ # super class define this variable as None. "type: ignore[..] is required
1358
+ # since we are redefining the variable.
1359
+ cls_to_become = Conv3d # type: ignore[assignment]
1360
+
1361
+ def __init__(
1362
+ self,
1363
+ out_channels: int,
1364
+ kernel_size: _size_3_t,
1365
+ stride: _size_3_t = 1,
1366
+ padding: _size_3_t = 0,
1367
+ dilation: _size_3_t = 1,
1368
+ groups: int = 1,
1369
+ bias: bool = True,
1370
+ padding_mode: str = 'zeros',
1371
+ device=None,
1372
+ dtype=None
1373
+ ) -> None:
1374
+ factory_kwargs = {'device': device, 'dtype': dtype}
1375
+ super().__init__(
1376
+ 0,
1377
+ 0,
1378
+ kernel_size,
1379
+ stride,
1380
+ padding,
1381
+ dilation,
1382
+ groups,
1383
+ # bias is hardcoded to False to avoid creating tensor
1384
+ # that will soon be overwritten.
1385
+ False,
1386
+ padding_mode,
1387
+ **factory_kwargs
1388
+ )
1389
+ self.weight = UninitializedParameter(**factory_kwargs)
1390
+ self.out_channels = out_channels
1391
+ if bias:
1392
+ self.bias = UninitializedParameter(**factory_kwargs)
1393
+
1394
+ def _get_num_spatial_dims(self) -> int:
1395
+ return 3
1396
+
1397
+
1398
+ # LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UnitializeParameter
1399
+ class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc]
1400
+ r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of the ``in_channels`` argument.
1401
+
1402
+ The ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
1403
+ the ``input.size(1)``.
1404
+ The attributes that will be lazily initialized are `weight` and `bias`.
1405
+
1406
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1407
+ on lazy modules and their limitations.
1408
+
1409
+ Args:
1410
+ out_channels (int): Number of channels produced by the convolution
1411
+ kernel_size (int or tuple): Size of the convolving kernel
1412
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1413
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1414
+ will be added to both sides of the input. Default: 0
1415
+ output_padding (int or tuple, optional): Additional size added to one side
1416
+ of the output shape. Default: 0
1417
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1418
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1419
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1420
+
1421
+ .. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1422
+ """
1423
+
1424
+ # super class define this variable as None. "type: ignore[..] is required
1425
+ # since we are redefining the variable.
1426
+ cls_to_become = ConvTranspose1d # type: ignore[assignment]
1427
+
1428
+ def __init__(
1429
+ self,
1430
+ out_channels: int,
1431
+ kernel_size: _size_1_t,
1432
+ stride: _size_1_t = 1,
1433
+ padding: _size_1_t = 0,
1434
+ output_padding: _size_1_t = 0,
1435
+ groups: int = 1,
1436
+ bias: bool = True,
1437
+ dilation: _size_1_t = 1,
1438
+ padding_mode: str = 'zeros',
1439
+ device=None,
1440
+ dtype=None
1441
+ ) -> None:
1442
+ factory_kwargs = {'device': device, 'dtype': dtype}
1443
+ super().__init__(
1444
+ 0,
1445
+ 0,
1446
+ kernel_size,
1447
+ stride,
1448
+ padding,
1449
+ output_padding,
1450
+ groups,
1451
+ # bias is hardcoded to False to avoid creating tensor
1452
+ # that will soon be overwritten.
1453
+ False,
1454
+ dilation,
1455
+ padding_mode,
1456
+ **factory_kwargs
1457
+ )
1458
+ self.weight = UninitializedParameter(**factory_kwargs)
1459
+ self.out_channels = out_channels
1460
+ if bias:
1461
+ self.bias = UninitializedParameter(**factory_kwargs)
1462
+
1463
+ def _get_num_spatial_dims(self) -> int:
1464
+ return 1
1465
+
1466
+
1467
+ # LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UnitializeParameter
1468
+ class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d): # type: ignore[misc]
1469
+ r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of the ``in_channels`` argument.
1470
+
1471
+ The ``in_channels`` argument of the :class:`ConvTranspose2d` is inferred from
1472
+ the ``input.size(1)``.
1473
+ The attributes that will be lazily initialized are `weight` and `bias`.
1474
+
1475
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1476
+ on lazy modules and their limitations.
1477
+
1478
+ Args:
1479
+ out_channels (int): Number of channels produced by the convolution
1480
+ kernel_size (int or tuple): Size of the convolving kernel
1481
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1482
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1483
+ will be added to both sides of each dimension in the input. Default: 0
1484
+ output_padding (int or tuple, optional): Additional size added to one side
1485
+ of each dimension in the output shape. Default: 0
1486
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1487
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1488
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1489
+
1490
+ .. seealso:: :class:`torch.nn.ConvTranspose2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1491
+ """
1492
+
1493
+ # super class define this variable as None. "type: ignore[..] is required
1494
+ # since we are redefining the variable.
1495
+ cls_to_become = ConvTranspose2d # type: ignore[assignment]
1496
+
1497
+ def __init__(
1498
+ self,
1499
+ out_channels: int,
1500
+ kernel_size: _size_2_t,
1501
+ stride: _size_2_t = 1,
1502
+ padding: _size_2_t = 0,
1503
+ output_padding: _size_2_t = 0,
1504
+ groups: int = 1,
1505
+ bias: bool = True,
1506
+ dilation: int = 1,
1507
+ padding_mode: str = 'zeros',
1508
+ device=None,
1509
+ dtype=None
1510
+ ) -> None:
1511
+ factory_kwargs = {'device': device, 'dtype': dtype}
1512
+ super().__init__(
1513
+ 0,
1514
+ 0,
1515
+ kernel_size,
1516
+ stride,
1517
+ padding,
1518
+ output_padding,
1519
+ groups,
1520
+ # bias is hardcoded to False to avoid creating tensor
1521
+ # that will soon be overwritten.
1522
+ False,
1523
+ dilation,
1524
+ padding_mode,
1525
+ **factory_kwargs
1526
+ )
1527
+ self.weight = UninitializedParameter(**factory_kwargs)
1528
+ self.out_channels = out_channels
1529
+ if bias:
1530
+ self.bias = UninitializedParameter(**factory_kwargs)
1531
+
1532
+ def _get_num_spatial_dims(self) -> int:
1533
+ return 2
1534
+
1535
+
1536
+ # LazyConvTranspose3d defines weight as a Tensor but derived class defines it as UnitializeParameter
1537
+ class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
1538
+ r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of the ``in_channels`` argument.
1539
+
1540
+ The ``in_channels`` argument of the :class:`ConvTranspose3d` is inferred from
1541
+ the ``input.size(1)``.
1542
+ The attributes that will be lazily initialized are `weight` and `bias`.
1543
+
1544
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1545
+ on lazy modules and their limitations.
1546
+
1547
+ Args:
1548
+ out_channels (int): Number of channels produced by the convolution
1549
+ kernel_size (int or tuple): Size of the convolving kernel
1550
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1551
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1552
+ will be added to both sides of each dimension in the input. Default: 0
1553
+ output_padding (int or tuple, optional): Additional size added to one side
1554
+ of each dimension in the output shape. Default: 0
1555
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1556
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1557
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1558
+
1559
+ .. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1560
+ """
1561
+
1562
+ # super class define this variable as None. "type: ignore[..] is required
1563
+ # since we are redefining the variable.
1564
+ cls_to_become = ConvTranspose3d # type: ignore[assignment]
1565
+
1566
+ def __init__(
1567
+ self,
1568
+ out_channels: int,
1569
+ kernel_size: _size_3_t,
1570
+ stride: _size_3_t = 1,
1571
+ padding: _size_3_t = 0,
1572
+ output_padding: _size_3_t = 0,
1573
+ groups: int = 1,
1574
+ bias: bool = True,
1575
+ dilation: _size_3_t = 1,
1576
+ padding_mode: str = 'zeros',
1577
+ device=None,
1578
+ dtype=None
1579
+ ) -> None:
1580
+ factory_kwargs = {'device': device, 'dtype': dtype}
1581
+ super().__init__(
1582
+ 0,
1583
+ 0,
1584
+ kernel_size,
1585
+ stride,
1586
+ padding,
1587
+ output_padding,
1588
+ groups,
1589
+ # bias is hardcoded to False to avoid creating tensor
1590
+ # that will soon be overwritten.
1591
+ False,
1592
+ dilation,
1593
+ padding_mode,
1594
+ **factory_kwargs
1595
+ )
1596
+ self.weight = UninitializedParameter(**factory_kwargs)
1597
+ self.out_channels = out_channels
1598
+ if bias:
1599
+ self.bias = UninitializedParameter(**factory_kwargs)
1600
+
1601
+ def _get_num_spatial_dims(self) -> int:
1602
+ return 3
venv/lib/python3.10/site-packages/torch/nn/modules/distance.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['PairwiseDistance', 'CosineSimilarity']
7
+
8
+ class PairwiseDistance(Module):
9
+ r"""
10
+ Computes the pairwise distance between input vectors, or between columns of input matrices.
11
+
12
+ Distances are computed using ``p``-norm, with constant ``eps`` added to avoid division by zero
13
+ if ``p`` is negative, i.e.:
14
+
15
+ .. math ::
16
+ \mathrm{dist}\left(x, y\right) = \left\Vert x-y + \epsilon e \right\Vert_p,
17
+
18
+ where :math:`e` is the vector of ones and the ``p``-norm is given by.
19
+
20
+ .. math ::
21
+ \Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
22
+
23
+ Args:
24
+ p (real, optional): the norm degree. Can be negative. Default: 2
25
+ eps (float, optional): Small value to avoid division by zero.
26
+ Default: 1e-6
27
+ keepdim (bool, optional): Determines whether or not to keep the vector dimension.
28
+ Default: False
29
+ Shape:
30
+ - Input1: :math:`(N, D)` or :math:`(D)` where `N = batch dimension` and `D = vector dimension`
31
+ - Input2: :math:`(N, D)` or :math:`(D)`, same shape as the Input1
32
+ - Output: :math:`(N)` or :math:`()` based on input dimension.
33
+ If :attr:`keepdim` is ``True``, then :math:`(N, 1)` or :math:`(1)` based on input dimension.
34
+
35
+ Examples::
36
+ >>> pdist = nn.PairwiseDistance(p=2)
37
+ >>> input1 = torch.randn(100, 128)
38
+ >>> input2 = torch.randn(100, 128)
39
+ >>> output = pdist(input1, input2)
40
+ """
41
+
42
+ __constants__ = ['norm', 'eps', 'keepdim']
43
+ norm: float
44
+ eps: float
45
+ keepdim: bool
46
+
47
+ def __init__(self, p: float = 2., eps: float = 1e-6, keepdim: bool = False) -> None:
48
+ super().__init__()
49
+ self.norm = p
50
+ self.eps = eps
51
+ self.keepdim = keepdim
52
+
53
+ def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
54
+ return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
55
+
56
+
57
+ class CosineSimilarity(Module):
58
+ r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
59
+
60
+ .. math ::
61
+ \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
62
+
63
+ Args:
64
+ dim (int, optional): Dimension where cosine similarity is computed. Default: 1
65
+ eps (float, optional): Small value to avoid division by zero.
66
+ Default: 1e-8
67
+ Shape:
68
+ - Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
69
+ - Input2: :math:`(\ast_1, D, \ast_2)`, same number of dimensions as x1, matching x1 size at dimension `dim`,
70
+ and broadcastable with x1 at other dimensions.
71
+ - Output: :math:`(\ast_1, \ast_2)`
72
+ Examples::
73
+ >>> input1 = torch.randn(100, 128)
74
+ >>> input2 = torch.randn(100, 128)
75
+ >>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
76
+ >>> output = cos(input1, input2)
77
+ """
78
+
79
+ __constants__ = ['dim', 'eps']
80
+ dim: int
81
+ eps: float
82
+
83
+ def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
84
+ super().__init__()
85
+ self.dim = dim
86
+ self.eps = eps
87
+
88
+ def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
89
+ return F.cosine_similarity(x1, x2, self.dim, self.eps)
venv/lib/python3.10/site-packages/torch/nn/modules/dropout.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout']
7
+
8
+ class _DropoutNd(Module):
9
+ __constants__ = ['p', 'inplace']
10
+ p: float
11
+ inplace: bool
12
+
13
+ def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
14
+ super().__init__()
15
+ if p < 0 or p > 1:
16
+ raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
17
+ self.p = p
18
+ self.inplace = inplace
19
+
20
+ def extra_repr(self) -> str:
21
+ return f'p={self.p}, inplace={self.inplace}'
22
+
23
+
24
+ class Dropout(_DropoutNd):
25
+ r"""During training, randomly zeroes some of the elements of the input tensor with probability :attr:`p`.
26
+
27
+ The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution.
28
+
29
+ Each channel will be zeroed out independently on every forward call.
30
+
31
+ This has proven to be an effective technique for regularization and
32
+ preventing the co-adaptation of neurons as described in the paper
33
+ `Improving neural networks by preventing co-adaptation of feature
34
+ detectors`_ .
35
+
36
+ Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
37
+ training. This means that during evaluation the module simply computes an
38
+ identity function.
39
+
40
+ Args:
41
+ p: probability of an element to be zeroed. Default: 0.5
42
+ inplace: If set to ``True``, will do this operation in-place. Default: ``False``
43
+
44
+ Shape:
45
+ - Input: :math:`(*)`. Input can be of any shape
46
+ - Output: :math:`(*)`. Output is of the same shape as input
47
+
48
+ Examples::
49
+
50
+ >>> m = nn.Dropout(p=0.2)
51
+ >>> input = torch.randn(20, 16)
52
+ >>> output = m(input)
53
+
54
+ .. _Improving neural networks by preventing co-adaptation of feature
55
+ detectors: https://arxiv.org/abs/1207.0580
56
+ """
57
+
58
+ def forward(self, input: Tensor) -> Tensor:
59
+ return F.dropout(input, self.p, self.training, self.inplace)
60
+
61
+
62
+ class Dropout1d(_DropoutNd):
63
+ r"""Randomly zero out entire channels.
64
+
65
+ A channel is a 1D feature map,
66
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
67
+ batched input is a 1D tensor :math:`\text{input}[i, j]`.
68
+
69
+ Each channel will be zeroed out independently on every forward call with
70
+ probability :attr:`p` using samples from a Bernoulli distribution.
71
+
72
+ Usually the input comes from :class:`nn.Conv1d` modules.
73
+
74
+ As described in the paper
75
+ `Efficient Object Localization Using Convolutional Networks`_ ,
76
+ if adjacent pixels within feature maps are strongly correlated
77
+ (as is normally the case in early convolution layers) then i.i.d. dropout
78
+ will not regularize the activations and will otherwise just result
79
+ in an effective learning rate decrease.
80
+
81
+ In this case, :func:`nn.Dropout1d` will help promote independence between
82
+ feature maps and should be used instead.
83
+
84
+ Args:
85
+ p (float, optional): probability of an element to be zero-ed.
86
+ inplace (bool, optional): If set to ``True``, will do this operation
87
+ in-place
88
+
89
+ Shape:
90
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`.
91
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
92
+
93
+ Examples::
94
+
95
+ >>> m = nn.Dropout1d(p=0.2)
96
+ >>> input = torch.randn(20, 16, 32)
97
+ >>> output = m(input)
98
+
99
+ .. _Efficient Object Localization Using Convolutional Networks:
100
+ https://arxiv.org/abs/1411.4280
101
+ """
102
+
103
+ def forward(self, input: Tensor) -> Tensor:
104
+ return F.dropout1d(input, self.p, self.training, self.inplace)
105
+
106
+
107
+ class Dropout2d(_DropoutNd):
108
+ r"""Randomly zero out entire channels.
109
+
110
+ A channel is a 2D feature map,
111
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
112
+ batched input is a 2D tensor :math:`\text{input}[i, j]`.
113
+
114
+ Each channel will be zeroed out independently on every forward call with
115
+ probability :attr:`p` using samples from a Bernoulli distribution.
116
+
117
+ Usually the input comes from :class:`nn.Conv2d` modules.
118
+
119
+ As described in the paper
120
+ `Efficient Object Localization Using Convolutional Networks`_ ,
121
+ if adjacent pixels within feature maps are strongly correlated
122
+ (as is normally the case in early convolution layers) then i.i.d. dropout
123
+ will not regularize the activations and will otherwise just result
124
+ in an effective learning rate decrease.
125
+
126
+ In this case, :func:`nn.Dropout2d` will help promote independence between
127
+ feature maps and should be used instead.
128
+
129
+ Args:
130
+ p (float, optional): probability of an element to be zero-ed.
131
+ inplace (bool, optional): If set to ``True``, will do this operation
132
+ in-place
133
+
134
+ .. warning ::
135
+ Due to historical reasons, this class will perform 1D channel-wise dropout
136
+ for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
137
+ support inputs without a batch dimension of shape :math:`(C, H, W)`. This
138
+ behavior will change in a future release to interpret 3D inputs as no-batch-dim
139
+ inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
140
+
141
+ Shape:
142
+ - Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
143
+ - Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
144
+
145
+ Examples::
146
+
147
+ >>> m = nn.Dropout2d(p=0.2)
148
+ >>> input = torch.randn(20, 16, 32, 32)
149
+ >>> output = m(input)
150
+
151
+ .. _Efficient Object Localization Using Convolutional Networks:
152
+ https://arxiv.org/abs/1411.4280
153
+ """
154
+
155
+ def forward(self, input: Tensor) -> Tensor:
156
+ return F.dropout2d(input, self.p, self.training, self.inplace)
157
+
158
+
159
+ class Dropout3d(_DropoutNd):
160
+ r"""Randomly zero out entire channels.
161
+
162
+ A channel is a 3D feature map,
163
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
164
+ batched input is a 3D tensor :math:`\text{input}[i, j]`.
165
+
166
+ Each channel will be zeroed out independently on every forward call with
167
+ probability :attr:`p` using samples from a Bernoulli distribution.
168
+
169
+ Usually the input comes from :class:`nn.Conv3d` modules.
170
+
171
+ As described in the paper
172
+ `Efficient Object Localization Using Convolutional Networks`_ ,
173
+ if adjacent pixels within feature maps are strongly correlated
174
+ (as is normally the case in early convolution layers) then i.i.d. dropout
175
+ will not regularize the activations and will otherwise just result
176
+ in an effective learning rate decrease.
177
+
178
+ In this case, :func:`nn.Dropout3d` will help promote independence between
179
+ feature maps and should be used instead.
180
+
181
+ Args:
182
+ p (float, optional): probability of an element to be zeroed.
183
+ inplace (bool, optional): If set to ``True``, will do this operation
184
+ in-place
185
+
186
+ Shape:
187
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
188
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
189
+
190
+ Examples::
191
+
192
+ >>> m = nn.Dropout3d(p=0.2)
193
+ >>> input = torch.randn(20, 16, 4, 32, 32)
194
+ >>> output = m(input)
195
+
196
+ .. _Efficient Object Localization Using Convolutional Networks:
197
+ https://arxiv.org/abs/1411.4280
198
+ """
199
+
200
+ def forward(self, input: Tensor) -> Tensor:
201
+ return F.dropout3d(input, self.p, self.training, self.inplace)
202
+
203
+
204
+ class AlphaDropout(_DropoutNd):
205
+ r"""Applies Alpha Dropout over the input.
206
+
207
+ Alpha Dropout is a type of Dropout that maintains the self-normalizing
208
+ property.
209
+ For an input with zero mean and unit standard deviation, the output of
210
+ Alpha Dropout maintains the original mean and standard deviation of the
211
+ input.
212
+ Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
213
+ that the outputs have zero mean and unit standard deviation.
214
+
215
+ During training, it randomly masks some of the elements of the input
216
+ tensor with probability *p* using samples from a bernoulli distribution.
217
+ The elements to masked are randomized on every forward call, and scaled
218
+ and shifted to maintain zero mean and unit standard deviation.
219
+
220
+ During evaluation the module simply computes an identity function.
221
+
222
+ More details can be found in the paper `Self-Normalizing Neural Networks`_ .
223
+
224
+ Args:
225
+ p (float): probability of an element to be dropped. Default: 0.5
226
+ inplace (bool, optional): If set to ``True``, will do this operation
227
+ in-place
228
+
229
+ Shape:
230
+ - Input: :math:`(*)`. Input can be of any shape
231
+ - Output: :math:`(*)`. Output is of the same shape as input
232
+
233
+ Examples::
234
+
235
+ >>> m = nn.AlphaDropout(p=0.2)
236
+ >>> input = torch.randn(20, 16)
237
+ >>> output = m(input)
238
+
239
+ .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
240
+ """
241
+
242
+ def forward(self, input: Tensor) -> Tensor:
243
+ return F.alpha_dropout(input, self.p, self.training)
244
+
245
+
246
+ class FeatureAlphaDropout(_DropoutNd):
247
+ r"""Randomly masks out entire channels.
248
+
249
+ A channel is a feature map,
250
+ e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
251
+ is a tensor :math:`\text{input}[i, j]` of the input tensor). Instead of
252
+ setting activations to zero, as in regular Dropout, the activations are set
253
+ to the negative saturation value of the SELU activation function. More details
254
+ can be found in the paper `Self-Normalizing Neural Networks`_ .
255
+
256
+ Each element will be masked independently for each sample on every forward
257
+ call with probability :attr:`p` using samples from a Bernoulli distribution.
258
+ The elements to be masked are randomized on every forward call, and scaled
259
+ and shifted to maintain zero mean and unit variance.
260
+
261
+ Usually the input comes from :class:`nn.AlphaDropout` modules.
262
+
263
+ As described in the paper
264
+ `Efficient Object Localization Using Convolutional Networks`_ ,
265
+ if adjacent pixels within feature maps are strongly correlated
266
+ (as is normally the case in early convolution layers) then i.i.d. dropout
267
+ will not regularize the activations and will otherwise just result
268
+ in an effective learning rate decrease.
269
+
270
+ In this case, :func:`nn.AlphaDropout` will help promote independence between
271
+ feature maps and should be used instead.
272
+
273
+ Args:
274
+ p (float, optional): probability of an element to be zeroed. Default: 0.5
275
+ inplace (bool, optional): If set to ``True``, will do this operation
276
+ in-place
277
+
278
+ Shape:
279
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
280
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
281
+
282
+ Examples::
283
+
284
+ >>> m = nn.FeatureAlphaDropout(p=0.2)
285
+ >>> input = torch.randn(20, 16, 4, 32, 32)
286
+ >>> output = m(input)
287
+
288
+ .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
289
+ .. _Efficient Object Localization Using Convolutional Networks:
290
+ https://arxiv.org/abs/1411.4280
291
+ """
292
+
293
+ def forward(self, input: Tensor) -> Tensor:
294
+ return F.feature_alpha_dropout(input, self.p, self.training)
venv/lib/python3.10/site-packages/torch/nn/modules/fold.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+ from ..common_types import _size_any_t
6
+
7
+ __all__ = ['Fold', 'Unfold']
8
+
9
+ class Fold(Module):
10
+ r"""Combines an array of sliding local blocks into a large containing tensor.
11
+
12
+ Consider a batched :attr:`input` tensor containing sliding local blocks,
13
+ e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
14
+ where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})`
15
+ is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})`
16
+ spatial locations each containing a :math:`C`-channeled vector), and
17
+ :math:`L` is the total number of blocks. (This is exactly the
18
+ same specification as the output shape of :class:`~torch.nn.Unfold`.) This
19
+ operation combines these local blocks into the large :attr:`output` tensor
20
+ of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
21
+ by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the
22
+ arguments must satisfy
23
+
24
+ .. math::
25
+ L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] %
26
+ - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
27
+
28
+ where :math:`d` is over all spatial dimensions.
29
+
30
+ * :attr:`output_size` describes the spatial shape of the large containing
31
+ tensor of the sliding local blocks. It is useful to resolve the ambiguity
32
+ when multiple input shapes map to same number of sliding blocks, e.g.,
33
+ with ``stride > 0``.
34
+
35
+ The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
36
+ how the sliding blocks are retrieved.
37
+
38
+ * :attr:`stride` controls the stride for the sliding blocks.
39
+
40
+ * :attr:`padding` controls the amount of implicit zero-paddings on both
41
+ sides for :attr:`padding` number of points for each dimension before
42
+ reshaping.
43
+
44
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
45
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
46
+
47
+ Args:
48
+ output_size (int or tuple): the shape of the spatial dimensions of the
49
+ output (i.e., ``output.sizes()[2:]``)
50
+ kernel_size (int or tuple): the size of the sliding blocks
51
+ dilation (int or tuple, optional): a parameter that controls the
52
+ stride of elements within the
53
+ neighborhood. Default: 1
54
+ padding (int or tuple, optional): implicit zero padding to be added on
55
+ both sides of input. Default: 0
56
+ stride (int or tuple): the stride of the sliding blocks in the input
57
+ spatial dimensions. Default: 1
58
+
59
+ * If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
60
+ :attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then
61
+ their values will be replicated across all spatial dimensions.
62
+
63
+ * For the case of two output spatial dimensions this operation is sometimes
64
+ called ``col2im``.
65
+
66
+ .. note::
67
+ :class:`~torch.nn.Fold` calculates each combined value in the resulting
68
+ large tensor by summing all values from all containing blocks.
69
+ :class:`~torch.nn.Unfold` extracts the values in the local blocks by
70
+ copying from the large tensor. So, if the blocks overlap, they are not
71
+ inverses of each other.
72
+
73
+ In general, folding and unfolding operations are related as
74
+ follows. Consider :class:`~torch.nn.Fold` and
75
+ :class:`~torch.nn.Unfold` instances created with the same
76
+ parameters:
77
+
78
+ >>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
79
+ >>> fold = nn.Fold(output_size=..., **fold_params)
80
+ >>> unfold = nn.Unfold(**fold_params)
81
+
82
+ Then for any (supported) ``input`` tensor the following
83
+ equality holds:
84
+
85
+ ::
86
+
87
+ fold(unfold(input)) == divisor * input
88
+
89
+ where ``divisor`` is a tensor that depends only on the shape
90
+ and dtype of the ``input``:
91
+
92
+ >>> # xdoctest: +SKIP
93
+ >>> input_ones = torch.ones(input.shape, dtype=input.dtype)
94
+ >>> divisor = fold(unfold(input_ones))
95
+
96
+ When the ``divisor`` tensor contains no zero elements, then
97
+ ``fold`` and ``unfold`` operations are inverses of each
98
+ other (up to constant divisor).
99
+
100
+ .. warning::
101
+ Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
102
+
103
+ Shape:
104
+ - Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)`
105
+ - Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
106
+ or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above
107
+
108
+ Examples::
109
+
110
+ >>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
111
+ >>> input = torch.randn(1, 3 * 2 * 2, 12)
112
+ >>> output = fold(input)
113
+ >>> output.size()
114
+ torch.Size([1, 3, 4, 5])
115
+
116
+ .. _link:
117
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
118
+
119
+ """
120
+
121
+ __constants__ = ['output_size', 'kernel_size', 'dilation', 'padding',
122
+ 'stride']
123
+ output_size: _size_any_t
124
+ kernel_size: _size_any_t
125
+ dilation: _size_any_t
126
+ padding: _size_any_t
127
+ stride: _size_any_t
128
+
129
+ def __init__(
130
+ self,
131
+ output_size: _size_any_t,
132
+ kernel_size: _size_any_t,
133
+ dilation: _size_any_t = 1,
134
+ padding: _size_any_t = 0,
135
+ stride: _size_any_t = 1
136
+ ) -> None:
137
+ super().__init__()
138
+ self.output_size = output_size
139
+ self.kernel_size = kernel_size
140
+ self.dilation = dilation
141
+ self.padding = padding
142
+ self.stride = stride
143
+
144
+ def forward(self, input: Tensor) -> Tensor:
145
+ return F.fold(input, self.output_size, self.kernel_size, self.dilation,
146
+ self.padding, self.stride)
147
+
148
+ def extra_repr(self) -> str:
149
+ return 'output_size={output_size}, kernel_size={kernel_size}, ' \
150
+ 'dilation={dilation}, padding={padding}, stride={stride}'.format(
151
+ **self.__dict__
152
+ )
153
+
154
+
155
+ class Unfold(Module):
156
+ r"""Extracts sliding local blocks from a batched input tensor.
157
+
158
+ Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`,
159
+ where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
160
+ and :math:`*` represent arbitrary spatial dimensions. This operation flattens
161
+ each sliding :attr:`kernel_size`-sized block within the spatial dimensions
162
+ of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output`
163
+ tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where
164
+ :math:`C \times \prod(\text{kernel\_size})` is the total number of values
165
+ within each block (a block has :math:`\prod(\text{kernel\_size})` spatial
166
+ locations each containing a :math:`C`-channeled vector), and :math:`L` is
167
+ the total number of such blocks:
168
+
169
+ .. math::
170
+ L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] %
171
+ - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
172
+
173
+ where :math:`\text{spatial\_size}` is formed by the spatial dimensions
174
+ of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial
175
+ dimensions.
176
+
177
+ Therefore, indexing :attr:`output` at the last dimension (column dimension)
178
+ gives all values within a certain block.
179
+
180
+ The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
181
+ how the sliding blocks are retrieved.
182
+
183
+ * :attr:`stride` controls the stride for the sliding blocks.
184
+
185
+ * :attr:`padding` controls the amount of implicit zero-paddings on both
186
+ sides for :attr:`padding` number of points for each dimension before
187
+ reshaping.
188
+
189
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
190
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
191
+
192
+ Args:
193
+ kernel_size (int or tuple): the size of the sliding blocks
194
+ dilation (int or tuple, optional): a parameter that controls the
195
+ stride of elements within the
196
+ neighborhood. Default: 1
197
+ padding (int or tuple, optional): implicit zero padding to be added on
198
+ both sides of input. Default: 0
199
+ stride (int or tuple, optional): the stride of the sliding blocks in the input
200
+ spatial dimensions. Default: 1
201
+
202
+ * If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or
203
+ :attr:`stride` is an int or a tuple of length 1, their values will be
204
+ replicated across all spatial dimensions.
205
+
206
+ * For the case of two input spatial dimensions this operation is sometimes
207
+ called ``im2col``.
208
+
209
+ .. note::
210
+ :class:`~torch.nn.Fold` calculates each combined value in the resulting
211
+ large tensor by summing all values from all containing blocks.
212
+ :class:`~torch.nn.Unfold` extracts the values in the local blocks by
213
+ copying from the large tensor. So, if the blocks overlap, they are not
214
+ inverses of each other.
215
+
216
+ In general, folding and unfolding operations are related as
217
+ follows. Consider :class:`~torch.nn.Fold` and
218
+ :class:`~torch.nn.Unfold` instances created with the same
219
+ parameters:
220
+
221
+ >>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
222
+ >>> fold = nn.Fold(output_size=..., **fold_params)
223
+ >>> unfold = nn.Unfold(**fold_params)
224
+
225
+ Then for any (supported) ``input`` tensor the following
226
+ equality holds:
227
+
228
+ ::
229
+
230
+ fold(unfold(input)) == divisor * input
231
+
232
+ where ``divisor`` is a tensor that depends only on the shape
233
+ and dtype of the ``input``:
234
+
235
+ >>> # xdoctest: +SKIP
236
+ >>> input_ones = torch.ones(input.shape, dtype=input.dtype)
237
+ >>> divisor = fold(unfold(input_ones))
238
+
239
+ When the ``divisor`` tensor contains no zero elements, then
240
+ ``fold`` and ``unfold`` operations are inverses of each
241
+ other (up to constant divisor).
242
+
243
+ .. warning::
244
+ Currently, only 4-D input tensors (batched image-like tensors) are
245
+ supported.
246
+
247
+ Shape:
248
+ - Input: :math:`(N, C, *)`
249
+ - Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above
250
+
251
+ Examples::
252
+
253
+ >>> unfold = nn.Unfold(kernel_size=(2, 3))
254
+ >>> input = torch.randn(2, 5, 3, 4)
255
+ >>> output = unfold(input)
256
+ >>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
257
+ >>> # 4 blocks (2x3 kernels) in total in the 3x4 input
258
+ >>> output.size()
259
+ torch.Size([2, 30, 4])
260
+
261
+ >>> # xdoctest: +IGNORE_WANT
262
+ >>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
263
+ >>> inp = torch.randn(1, 3, 10, 12)
264
+ >>> w = torch.randn(2, 3, 4, 5)
265
+ >>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
266
+ >>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
267
+ >>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
268
+ >>> # or equivalently (and avoiding a copy),
269
+ >>> # out = out_unf.view(1, 2, 7, 8)
270
+ >>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
271
+ tensor(1.9073e-06)
272
+
273
+ .. _link:
274
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
275
+
276
+ """
277
+
278
+ __constants__ = ['kernel_size', 'dilation', 'padding', 'stride']
279
+ kernel_size: _size_any_t
280
+ dilation: _size_any_t
281
+ padding: _size_any_t
282
+ stride: _size_any_t
283
+
284
+ def __init__(
285
+ self,
286
+ kernel_size: _size_any_t,
287
+ dilation: _size_any_t = 1,
288
+ padding: _size_any_t = 0,
289
+ stride: _size_any_t = 1
290
+ ) -> None:
291
+ super().__init__()
292
+ self.kernel_size = kernel_size
293
+ self.dilation = dilation
294
+ self.padding = padding
295
+ self.stride = stride
296
+
297
+ def forward(self, input: Tensor) -> Tensor:
298
+ return F.unfold(input, self.kernel_size, self.dilation,
299
+ self.padding, self.stride)
300
+
301
+ def extra_repr(self) -> str:
302
+ return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \
303
+ ' stride={stride}'.format(**self.__dict__)
venv/lib/python3.10/site-packages/torch/nn/modules/lazy.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from typing import Protocol, Optional, Type, Any
4
+
5
+ import torch
6
+ from ..parameter import is_lazy
7
+
8
+ __all__ = ['LazyModuleMixin']
9
+
10
+ class _LazyProtocol(Protocol):
11
+ """This class is used to avoid errors with mypy checks for the attributes in a mixin.
12
+
13
+ https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
14
+ """
15
+
16
+ def _register_load_state_dict_pre_hook(self, hook):
17
+ ...
18
+
19
+ def register_forward_pre_hook(self, hook, *, prepend=False, with_kwargs=False):
20
+ ...
21
+
22
+ def _lazy_load_hook(
23
+ self, state_dict, prefix, local_metadata, strict,
24
+ missing_keys, unexpected_keys, error_msgs):
25
+ ...
26
+
27
+ def _get_name(self):
28
+ ...
29
+
30
+ def _infer_parameters(self, module, input):
31
+ ...
32
+
33
+ @property
34
+ def _parameters(self):
35
+ ...
36
+
37
+ @property
38
+ def _buffers(self):
39
+ ...
40
+
41
+ @property
42
+ def _non_persistent_buffers_set(self):
43
+ ...
44
+
45
+ @property
46
+ def _load_hook(self):
47
+ ...
48
+
49
+ @property
50
+ def _initialize_hook(self):
51
+ ...
52
+
53
+
54
+ class LazyModuleMixin:
55
+ r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules".
56
+
57
+ .. warning:
58
+ Lazy modules are an experimental new feature under active development,
59
+ and their API is likely to change.
60
+
61
+ Modules that lazily initialize parameters, or "lazy modules",
62
+ derive the shapes of their parameters from the first input(s)
63
+ to their forward method. Until that first forward they contain
64
+ :class:`torch.nn.UninitializedParameter` s that should not be accessed
65
+ or used, and afterward they contain regular :class:`torch.nn.Parameter` s.
66
+ Lazy modules are convenient since they don't require computing some
67
+ module arguments, like the :attr:`in_features` argument of a
68
+ typical :class:`torch.nn.Linear`.
69
+
70
+ After construction, networks with lazy modules should first
71
+ be converted to the desired dtype and placed on the expected device.
72
+ This is because lazy modules only perform shape inference so the usual dtype
73
+ and device placement behavior applies.
74
+ The lazy modules should then perform "dry runs" to initialize all the components in the module.
75
+ These "dry runs" send inputs of the correct size, dtype, and device through
76
+ the network and to each one of its lazy modules. After this the network can be used as usual.
77
+
78
+ >>> # xdoctest: +SKIP
79
+ >>> class LazyMLP(torch.nn.Module):
80
+ ... def __init__(self):
81
+ ... super().__init__()
82
+ ... self.fc1 = torch.nn.LazyLinear(10)
83
+ ... self.relu1 = torch.nn.ReLU()
84
+ ... self.fc2 = torch.nn.LazyLinear(1)
85
+ ... self.relu2 = torch.nn.ReLU()
86
+ ...
87
+ ... def forward(self, input):
88
+ ... x = self.relu1(self.fc1(input))
89
+ ... y = self.relu2(self.fc2(x))
90
+ ... return y
91
+ >>> # constructs a network with lazy modules
92
+ >>> lazy_mlp = LazyMLP()
93
+ >>> # transforms the network's device and dtype
94
+ >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'
95
+ >>> lazy_mlp = lazy_mlp.cuda().double()
96
+ >>> lazy_mlp
97
+ LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)
98
+ (relu1): ReLU()
99
+ (fc2): LazyLinear(in_features=0, out_features=1, bias=True)
100
+ (relu2): ReLU()
101
+ )
102
+ >>> # performs a dry run to initialize the network's lazy modules
103
+ >>> lazy_mlp(torch.ones(10,10).cuda())
104
+ >>> # after initialization, LazyLinear modules become regular Linear modules
105
+ >>> lazy_mlp
106
+ LazyMLP(
107
+ (fc1): Linear(in_features=10, out_features=10, bias=True)
108
+ (relu1): ReLU()
109
+ (fc2): Linear(in_features=10, out_features=1, bias=True)
110
+ (relu2): ReLU()
111
+ )
112
+ >>> # attaches an optimizer, since parameters can now be used as usual
113
+ >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)
114
+
115
+ A final caveat when using lazy modules is that the order of initialization of a network's
116
+ parameters may change, since the lazy modules are always initialized after other modules.
117
+ For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module
118
+ first and then a regular :class:`torch.nn.Linear` second, the second module would be
119
+ initialized on construction and the first module would be initialized during the first dry run.
120
+ This can cause the parameters of a network using lazy modules to be initialized differently
121
+ than the parameters of a network without lazy modules as the order of parameter initializations,
122
+ which often depends on a stateful random number generator, is different.
123
+ Check :doc:`/notes/randomness` for more details.
124
+
125
+ Lazy modules can be serialized with a state dict like other modules. For example:
126
+
127
+ >>> lazy_mlp = LazyMLP()
128
+ >>> # The state dict shows the uninitialized parameters
129
+ >>> lazy_mlp.state_dict()
130
+ OrderedDict([('fc1.weight', Uninitialized parameter),
131
+ ('fc1.bias',
132
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
133
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
134
+ ('fc2.weight', Uninitialized parameter),
135
+ ('fc2.bias', tensor([0.0019]))])
136
+
137
+
138
+ Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize
139
+ initialized LazyModules and they will remain initialized)
140
+
141
+
142
+ >>> full_mlp = LazyMLP()
143
+ >>> # Dry run to initialize another module
144
+ >>> full_mlp.forward(torch.ones(10, 1))
145
+ >>> # Load an initialized state into a lazy module
146
+ >>> lazy_mlp.load_state_dict(full_mlp.state_dict())
147
+ >>> # The state dict now holds valid values
148
+ >>> lazy_mlp.state_dict()
149
+ OrderedDict([('fc1.weight',
150
+ tensor([[-0.3837],
151
+ [ 0.0907],
152
+ [ 0.6708],
153
+ [-0.5223],
154
+ [-0.9028],
155
+ [ 0.2851],
156
+ [-0.4537],
157
+ [ 0.6813],
158
+ [ 0.5766],
159
+ [-0.8678]])),
160
+ ('fc1.bias',
161
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
162
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
163
+ ('fc2.weight',
164
+ tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,
165
+ 0.2479, 0.1091]])),
166
+ ('fc2.bias', tensor([0.0019]))])
167
+
168
+ Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized
169
+ when the state is loaded. This prevents using initialized modules in different contexts.
170
+ """
171
+
172
+ # modules inheriting from this will change their __class__ to the specified
173
+ # one after they are fully initialized
174
+ cls_to_become: Optional[Type[Any]] = None
175
+
176
+ def __init__(self: _LazyProtocol, *args, **kwargs):
177
+ # Mypy doesnt like this super call in a mixin
178
+ super().__init__(*args, **kwargs) # type: ignore[misc]
179
+ self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
180
+ self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters, with_kwargs=True)
181
+ warnings.warn('Lazy modules are a new feature under heavy development '
182
+ 'so changes to the API or functionality can happen at any moment.')
183
+
184
+ def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
185
+ # This should be ideally implemented as a hook,
186
+ # but we should override `detach` in the UninitializedParameter to return itself
187
+ # which is not clean
188
+ for name, param in self._parameters.items():
189
+ if param is not None:
190
+ if not (is_lazy(param) or keep_vars):
191
+ param = param.detach()
192
+ destination[prefix + name] = param
193
+ for name, buf in self._buffers.items():
194
+ if buf is not None and name not in self._non_persistent_buffers_set:
195
+ if not (is_lazy(buf) or keep_vars):
196
+ buf = buf.detach()
197
+ destination[prefix + name] = buf
198
+
199
+ def _lazy_load_hook(
200
+ self: _LazyProtocol, state_dict, prefix, local_metadata, strict,
201
+ missing_keys, unexpected_keys, error_msgs):
202
+ """load_state_dict pre-hook function for lazy buffers and parameters.
203
+
204
+ The purpose of this hook is to adjust the current state and/or
205
+ ``state_dict`` being loaded so that a module instance serialized in
206
+ both un/initialized state can be deserialized onto both un/initialized
207
+ module instance.
208
+ See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
209
+ for the details of the hook specification.
210
+ """
211
+ for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):
212
+ key = prefix + name
213
+ if key in state_dict and param is not None:
214
+ input_param = state_dict[key]
215
+ if is_lazy(param):
216
+ # The current parameter is not initialized but the one being loaded one is
217
+ # create a new parameter based on the uninitialized one
218
+ if not is_lazy(input_param):
219
+ with torch.no_grad():
220
+ param.materialize(input_param.shape)
221
+
222
+ def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
223
+ r"""Initialize parameters according to the input batch properties.
224
+
225
+ This adds an interface to isolate parameter initialization from the
226
+ forward pass when doing parameter shape inference.
227
+ """
228
+ raise NotImplementedError(f'initialize_parameters is not implemented for {self.__class__.__name__}')
229
+
230
+ def has_uninitialized_params(self: _LazyProtocol):
231
+ r"""Check if a module has parameters that are not initialized."""
232
+ # This is to avoid the JIT to track this parameter and force
233
+ # custom modules __setstate__ to add it
234
+ params = self._parameters.values()
235
+ buffers = self._buffers.values()
236
+ for param in itertools.chain(params, buffers):
237
+ if is_lazy(param):
238
+ return True
239
+ return False
240
+
241
+ def _infer_parameters(self: _LazyProtocol, module, args, kwargs=None):
242
+ r"""Infers the size and initializes the parameters according to the provided input batch.
243
+
244
+ Given a module that contains parameters that were declared inferrable
245
+ using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
246
+ in the complete module using the provided input to initialize all the parameters
247
+ as needed.
248
+ The module is set into evaluation mode before running the forward pass in order
249
+ to avoid saving statistics or calculating gradients
250
+ """
251
+ kwargs = kwargs if kwargs else {}
252
+ module.initialize_parameters(*args, **kwargs)
253
+ if module.has_uninitialized_params():
254
+ raise RuntimeError(f'module {self._get_name()} has not been fully initialized')
255
+ module._initialize_hook.remove()
256
+ module._load_hook.remove()
257
+ delattr(module, '_initialize_hook')
258
+ delattr(module, '_load_hook')
259
+ if module.cls_to_become is not None:
260
+ module.__class__ = module.cls_to_become
261
+
262
+
263
+ def _replicate_for_data_parallel(self: _LazyProtocol):
264
+ raise RuntimeError('Modules with uninitialized parameters can\'t be used with `DataParallel`. '
265
+ 'Run a dummy forward pass to correctly initialize the modules')
venv/lib/python3.10/site-packages/torch/nn/modules/loss.py ADDED
@@ -0,0 +1,1790 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from .distance import PairwiseDistance
4
+ from .module import Module
5
+ from .. import functional as F
6
+ from .. import _reduction as _Reduction
7
+
8
+ from torch import Tensor
9
+ from typing import Callable, Optional
10
+
11
+ __all__ = ['L1Loss', 'NLLLoss', 'NLLLoss2d', 'PoissonNLLLoss', 'GaussianNLLLoss', 'KLDivLoss',
12
+ 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss',
13
+ 'SmoothL1Loss', 'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'MultiLabelSoftMarginLoss',
14
+ 'CosineEmbeddingLoss', 'MarginRankingLoss', 'MultiMarginLoss', 'TripletMarginLoss',
15
+ 'TripletMarginWithDistanceLoss', 'CTCLoss']
16
+
17
+ class _Loss(Module):
18
+ reduction: str
19
+
20
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
21
+ super().__init__()
22
+ if size_average is not None or reduce is not None:
23
+ self.reduction: str = _Reduction.legacy_get_string(size_average, reduce)
24
+ else:
25
+ self.reduction = reduction
26
+
27
+
28
+ class _WeightedLoss(_Loss):
29
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
30
+ super().__init__(size_average, reduce, reduction)
31
+ self.register_buffer('weight', weight)
32
+ self.weight: Optional[Tensor]
33
+
34
+
35
+ class L1Loss(_Loss):
36
+ r"""Creates a criterion that measures the mean absolute error (MAE) between each element in
37
+ the input :math:`x` and target :math:`y`.
38
+
39
+ The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
40
+
41
+ .. math::
42
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
43
+ l_n = \left| x_n - y_n \right|,
44
+
45
+ where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
46
+ (default ``'mean'``), then:
47
+
48
+ .. math::
49
+ \ell(x, y) =
50
+ \begin{cases}
51
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
52
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
53
+ \end{cases}
54
+
55
+ :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
56
+ of :math:`n` elements each.
57
+
58
+ The sum operation still operates over all the elements, and divides by :math:`n`.
59
+
60
+ The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
61
+
62
+ Supports real-valued and complex-valued inputs.
63
+
64
+ Args:
65
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
66
+ the losses are averaged over each loss element in the batch. Note that for
67
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
68
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
69
+ when :attr:`reduce` is ``False``. Default: ``True``
70
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
71
+ losses are averaged or summed over observations for each minibatch depending
72
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
73
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
74
+ reduction (str, optional): Specifies the reduction to apply to the output:
75
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
76
+ ``'mean'``: the sum of the output will be divided by the number of
77
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
78
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
79
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
80
+
81
+ Shape:
82
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
83
+ - Target: :math:`(*)`, same shape as the input.
84
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then
85
+ :math:`(*)`, same shape as the input.
86
+
87
+ Examples::
88
+
89
+ >>> loss = nn.L1Loss()
90
+ >>> input = torch.randn(3, 5, requires_grad=True)
91
+ >>> target = torch.randn(3, 5)
92
+ >>> output = loss(input, target)
93
+ >>> output.backward()
94
+ """
95
+ __constants__ = ['reduction']
96
+
97
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
98
+ super().__init__(size_average, reduce, reduction)
99
+
100
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
101
+ return F.l1_loss(input, target, reduction=self.reduction)
102
+
103
+
104
+ class NLLLoss(_WeightedLoss):
105
+ r"""The negative log likelihood loss. It is useful to train a classification
106
+ problem with `C` classes.
107
+
108
+ If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning
109
+ weight to each of the classes. This is particularly useful when you have an
110
+ unbalanced training set.
111
+
112
+ The `input` given through a forward call is expected to contain
113
+ log-probabilities of each class. `input` has to be a Tensor of size either
114
+ :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
115
+ with :math:`K \geq 1` for the `K`-dimensional case. The latter is useful for
116
+ higher dimension inputs, such as computing NLL loss per-pixel for 2D images.
117
+
118
+ Obtaining log-probabilities in a neural network is easily achieved by
119
+ adding a `LogSoftmax` layer in the last layer of your network.
120
+ You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
121
+ layer.
122
+
123
+ The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
124
+ where `C = number of classes`; if `ignore_index` is specified, this loss also accepts
125
+ this class index (this index may not necessarily be in the class range).
126
+
127
+ The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
128
+
129
+ .. math::
130
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
131
+ l_n = - w_{y_n} x_{n,y_n}, \quad
132
+ w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\},
133
+
134
+ where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
135
+ :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
136
+ (default ``'mean'``), then
137
+
138
+ .. math::
139
+ \ell(x, y) = \begin{cases}
140
+ \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, &
141
+ \text{if reduction} = \text{`mean';}\\
142
+ \sum_{n=1}^N l_n, &
143
+ \text{if reduction} = \text{`sum'.}
144
+ \end{cases}
145
+
146
+ Args:
147
+ weight (Tensor, optional): a manual rescaling weight given to each
148
+ class. If given, it has to be a Tensor of size `C`. Otherwise, it is
149
+ treated as if having all ones.
150
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
151
+ the losses are averaged over each loss element in the batch. Note that for
152
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
153
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
154
+ when :attr:`reduce` is ``False``. Default: ``None``
155
+ ignore_index (int, optional): Specifies a target value that is ignored
156
+ and does not contribute to the input gradient. When
157
+ :attr:`size_average` is ``True``, the loss is averaged over
158
+ non-ignored targets.
159
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
160
+ losses are averaged or summed over observations for each minibatch depending
161
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
162
+ batch element instead and ignores :attr:`size_average`. Default: ``None``
163
+ reduction (str, optional): Specifies the reduction to apply to the output:
164
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
165
+ be applied, ``'mean'``: the weighted mean of the output is taken,
166
+ ``'sum'``: the output will be summed. Note: :attr:`size_average`
167
+ and :attr:`reduce` are in the process of being deprecated, and in
168
+ the meantime, specifying either of those two args will override
169
+ :attr:`reduction`. Default: ``'mean'``
170
+
171
+ Shape:
172
+ - Input: :math:`(N, C)` or :math:`(C)`, where `C = number of classes`, or
173
+ :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
174
+ in the case of `K`-dimensional loss.
175
+ - Target: :math:`(N)` or :math:`()`, where each value is
176
+ :math:`0 \leq \text{targets}[i] \leq C-1`, or
177
+ :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of
178
+ K-dimensional loss.
179
+ - Output: If :attr:`reduction` is ``'none'``, shape :math:`(N)` or
180
+ :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss.
181
+ Otherwise, scalar.
182
+
183
+ Examples::
184
+
185
+ >>> m = nn.LogSoftmax(dim=1)
186
+ >>> loss = nn.NLLLoss()
187
+ >>> # input is of size N x C = 3 x 5
188
+ >>> input = torch.randn(3, 5, requires_grad=True)
189
+ >>> # each element in target has to have 0 <= value < C
190
+ >>> target = torch.tensor([1, 0, 4])
191
+ >>> output = loss(m(input), target)
192
+ >>> output.backward()
193
+ >>>
194
+ >>>
195
+ >>> # 2D loss example (used, for example, with image inputs)
196
+ >>> N, C = 5, 4
197
+ >>> loss = nn.NLLLoss()
198
+ >>> # input is of size N x C x height x width
199
+ >>> data = torch.randn(N, 16, 10, 10)
200
+ >>> conv = nn.Conv2d(16, C, (3, 3))
201
+ >>> m = nn.LogSoftmax(dim=1)
202
+ >>> # each element in target has to have 0 <= value < C
203
+ >>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
204
+ >>> output = loss(m(conv(data)), target)
205
+ >>> output.backward()
206
+ """
207
+ __constants__ = ['ignore_index', 'reduction']
208
+ ignore_index: int
209
+
210
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
211
+ reduce=None, reduction: str = 'mean') -> None:
212
+ super().__init__(weight, size_average, reduce, reduction)
213
+ self.ignore_index = ignore_index
214
+
215
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
216
+ return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
217
+
218
+
219
+ class NLLLoss2d(NLLLoss):
220
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
221
+ reduce=None, reduction: str = 'mean') -> None:
222
+ warnings.warn("NLLLoss2d has been deprecated. "
223
+ "Please use NLLLoss instead as a drop-in replacement and see "
224
+ "https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss for more details.")
225
+ super().__init__(weight, size_average, ignore_index, reduce, reduction)
226
+
227
+
228
+ class PoissonNLLLoss(_Loss):
229
+ r"""Negative log likelihood loss with Poisson distribution of target.
230
+
231
+ The loss can be described as:
232
+
233
+ .. math::
234
+ \text{target} \sim \mathrm{Poisson}(\text{input})
235
+
236
+ \text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input})
237
+ + \log(\text{target!})
238
+
239
+ The last term can be omitted or approximated with Stirling formula. The
240
+ approximation is used for target values more than 1. For targets less or
241
+ equal to 1 zeros are added to the loss.
242
+
243
+ Args:
244
+ log_input (bool, optional): if ``True`` the loss is computed as
245
+ :math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is
246
+ :math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`.
247
+ full (bool, optional): whether to compute full loss, i. e. to add the
248
+ Stirling approximation term
249
+
250
+ .. math::
251
+ \text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}).
252
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
253
+ the losses are averaged over each loss element in the batch. Note that for
254
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
255
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
256
+ when :attr:`reduce` is ``False``. Default: ``True``
257
+ eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
258
+ :attr:`log_input = False`. Default: 1e-8
259
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
260
+ losses are averaged or summed over observations for each minibatch depending
261
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
262
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
263
+ reduction (str, optional): Specifies the reduction to apply to the output:
264
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
265
+ ``'mean'``: the sum of the output will be divided by the number of
266
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
267
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
268
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
269
+
270
+ Examples::
271
+
272
+ >>> loss = nn.PoissonNLLLoss()
273
+ >>> log_input = torch.randn(5, 2, requires_grad=True)
274
+ >>> target = torch.randn(5, 2)
275
+ >>> output = loss(log_input, target)
276
+ >>> output.backward()
277
+
278
+ Shape:
279
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
280
+ - Target: :math:`(*)`, same shape as the input.
281
+ - Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(*)`,
282
+ the same shape as the input.
283
+ """
284
+ __constants__ = ['log_input', 'full', 'eps', 'reduction']
285
+ log_input: bool
286
+ full: bool
287
+ eps: float
288
+
289
+ def __init__(self, log_input: bool = True, full: bool = False, size_average=None,
290
+ eps: float = 1e-8, reduce=None, reduction: str = 'mean') -> None:
291
+ super().__init__(size_average, reduce, reduction)
292
+ self.log_input = log_input
293
+ self.full = full
294
+ self.eps = eps
295
+
296
+ def forward(self, log_input: Tensor, target: Tensor) -> Tensor:
297
+ return F.poisson_nll_loss(log_input, target, log_input=self.log_input, full=self.full,
298
+ eps=self.eps, reduction=self.reduction)
299
+
300
+
301
+ class GaussianNLLLoss(_Loss):
302
+ r"""Gaussian negative log likelihood loss.
303
+
304
+ The targets are treated as samples from Gaussian distributions with
305
+ expectations and variances predicted by the neural network. For a
306
+ ``target`` tensor modelled as having Gaussian distribution with a tensor
307
+ of expectations ``input`` and a tensor of positive variances ``var`` the loss is:
308
+
309
+ .. math::
310
+ \text{loss} = \frac{1}{2}\left(\log\left(\text{max}\left(\text{var},
311
+ \ \text{eps}\right)\right) + \frac{\left(\text{input} - \text{target}\right)^2}
312
+ {\text{max}\left(\text{var}, \ \text{eps}\right)}\right) + \text{const.}
313
+
314
+ where :attr:`eps` is used for stability. By default, the constant term of
315
+ the loss function is omitted unless :attr:`full` is ``True``. If ``var`` is not the same
316
+ size as ``input`` (due to a homoscedastic assumption), it must either have a final dimension
317
+ of 1 or have one fewer dimension (with all other sizes being the same) for correct broadcasting.
318
+
319
+ Args:
320
+ full (bool, optional): include the constant term in the loss
321
+ calculation. Default: ``False``.
322
+ eps (float, optional): value used to clamp ``var`` (see note below), for
323
+ stability. Default: 1e-6.
324
+ reduction (str, optional): specifies the reduction to apply to the
325
+ output:``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
326
+ will be applied, ``'mean'``: the output is the average of all batch
327
+ member losses, ``'sum'``: the output is the sum of all batch member
328
+ losses. Default: ``'mean'``.
329
+
330
+ Shape:
331
+ - Input: :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of additional
332
+ dimensions
333
+ - Target: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input
334
+ but with one dimension equal to 1 (to allow for broadcasting)
335
+ - Var: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but
336
+ with one dimension equal to 1, or same shape as the input but with one fewer
337
+ dimension (to allow for broadcasting)
338
+ - Output: scalar if :attr:`reduction` is ``'mean'`` (default) or
339
+ ``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
340
+ shape as the input
341
+
342
+ Examples::
343
+ >>> loss = nn.GaussianNLLLoss()
344
+ >>> input = torch.randn(5, 2, requires_grad=True)
345
+ >>> target = torch.randn(5, 2)
346
+ >>> var = torch.ones(5, 2, requires_grad=True) # heteroscedastic
347
+ >>> output = loss(input, target, var)
348
+ >>> output.backward()
349
+
350
+ >>> loss = nn.GaussianNLLLoss()
351
+ >>> input = torch.randn(5, 2, requires_grad=True)
352
+ >>> target = torch.randn(5, 2)
353
+ >>> var = torch.ones(5, 1, requires_grad=True) # homoscedastic
354
+ >>> output = loss(input, target, var)
355
+ >>> output.backward()
356
+
357
+ Note:
358
+ The clamping of ``var`` is ignored with respect to autograd, and so the
359
+ gradients are unaffected by it.
360
+
361
+ Reference:
362
+ Nix, D. A. and Weigend, A. S., "Estimating the mean and variance of the
363
+ target probability distribution", Proceedings of 1994 IEEE International
364
+ Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60
365
+ vol.1, doi: 10.1109/ICNN.1994.374138.
366
+ """
367
+ __constants__ = ['full', 'eps', 'reduction']
368
+ full: bool
369
+ eps: float
370
+
371
+ def __init__(self, *, full: bool = False, eps: float = 1e-6, reduction: str = 'mean') -> None:
372
+ super().__init__(None, None, reduction)
373
+ self.full = full
374
+ self.eps = eps
375
+
376
+ def forward(self, input: Tensor, target: Tensor, var: Tensor) -> Tensor:
377
+ return F.gaussian_nll_loss(input, target, var, full=self.full, eps=self.eps, reduction=self.reduction)
378
+
379
+
380
+ class KLDivLoss(_Loss):
381
+ r"""The Kullback-Leibler divergence loss.
382
+
383
+ For tensors of the same shape :math:`y_{\text{pred}},\ y_{\text{true}}`,
384
+ where :math:`y_{\text{pred}}` is the :attr:`input` and :math:`y_{\text{true}}` is the
385
+ :attr:`target`, we define the **pointwise KL-divergence** as
386
+
387
+ .. math::
388
+
389
+ L(y_{\text{pred}},\ y_{\text{true}})
390
+ = y_{\text{true}} \cdot \log \frac{y_{\text{true}}}{y_{\text{pred}}}
391
+ = y_{\text{true}} \cdot (\log y_{\text{true}} - \log y_{\text{pred}})
392
+
393
+ To avoid underflow issues when computing this quantity, this loss expects the argument
394
+ :attr:`input` in the log-space. The argument :attr:`target` may also be provided in the
395
+ log-space if :attr:`log_target`\ `= True`.
396
+
397
+ To summarise, this function is roughly equivalent to computing
398
+
399
+ .. code-block:: python
400
+
401
+ if not log_target: # default
402
+ loss_pointwise = target * (target.log() - input)
403
+ else:
404
+ loss_pointwise = target.exp() * (target - input)
405
+
406
+ and then reducing this result depending on the argument :attr:`reduction` as
407
+
408
+ .. code-block:: python
409
+
410
+ if reduction == "mean": # default
411
+ loss = loss_pointwise.mean()
412
+ elif reduction == "batchmean": # mathematically correct
413
+ loss = loss_pointwise.sum() / input.size(0)
414
+ elif reduction == "sum":
415
+ loss = loss_pointwise.sum()
416
+ else: # reduction == "none"
417
+ loss = loss_pointwise
418
+
419
+ .. note::
420
+ As all the other losses in PyTorch, this function expects the first argument,
421
+ :attr:`input`, to be the output of the model (e.g. the neural network)
422
+ and the second, :attr:`target`, to be the observations in the dataset.
423
+ This differs from the standard mathematical notation :math:`KL(P\ ||\ Q)` where
424
+ :math:`P` denotes the distribution of the observations and :math:`Q` denotes the model.
425
+
426
+ .. warning::
427
+ :attr:`reduction`\ `= "mean"` doesn't return the true KL divergence value, please use
428
+ :attr:`reduction`\ `= "batchmean"` which aligns with the mathematical definition.
429
+
430
+ Args:
431
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
432
+ the losses are averaged over each loss element in the batch. Note that for
433
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
434
+ is set to `False`, the losses are instead summed for each minibatch. Ignored
435
+ when :attr:`reduce` is `False`. Default: `True`
436
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
437
+ losses are averaged or summed over observations for each minibatch depending
438
+ on :attr:`size_average`. When :attr:`reduce` is `False`, returns a loss per
439
+ batch element instead and ignores :attr:`size_average`. Default: `True`
440
+ reduction (str, optional): Specifies the reduction to apply to the output. Default: `"mean"`
441
+ log_target (bool, optional): Specifies whether `target` is the log space. Default: `False`
442
+
443
+ Shape:
444
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
445
+ - Target: :math:`(*)`, same shape as the input.
446
+ - Output: scalar by default. If :attr:`reduction` is `'none'`, then :math:`(*)`,
447
+ same shape as the input.
448
+
449
+ Examples::
450
+
451
+ >>> import torch.nn.functional as F
452
+ >>> kl_loss = nn.KLDivLoss(reduction="batchmean")
453
+ >>> # input should be a distribution in the log space
454
+ >>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True), dim=1)
455
+ >>> # Sample a batch of distributions. Usually this would come from the dataset
456
+ >>> target = F.softmax(torch.rand(3, 5), dim=1)
457
+ >>> output = kl_loss(input, target)
458
+
459
+ >>> kl_loss = nn.KLDivLoss(reduction="batchmean", log_target=True)
460
+ >>> log_target = F.log_softmax(torch.rand(3, 5), dim=1)
461
+ >>> output = kl_loss(input, log_target)
462
+ """
463
+ __constants__ = ['reduction']
464
+
465
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', log_target: bool = False) -> None:
466
+ super().__init__(size_average, reduce, reduction)
467
+ self.log_target = log_target
468
+
469
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
470
+ return F.kl_div(input, target, reduction=self.reduction, log_target=self.log_target)
471
+
472
+
473
+ class MSELoss(_Loss):
474
+ r"""Creates a criterion that measures the mean squared error (squared L2 norm) between
475
+ each element in the input :math:`x` and target :math:`y`.
476
+
477
+ The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
478
+
479
+ .. math::
480
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
481
+ l_n = \left( x_n - y_n \right)^2,
482
+
483
+ where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
484
+ (default ``'mean'``), then:
485
+
486
+ .. math::
487
+ \ell(x, y) =
488
+ \begin{cases}
489
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
490
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
491
+ \end{cases}
492
+
493
+ :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
494
+ of :math:`n` elements each.
495
+
496
+ The mean operation still operates over all the elements, and divides by :math:`n`.
497
+
498
+ The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
499
+
500
+ Args:
501
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
502
+ the losses are averaged over each loss element in the batch. Note that for
503
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
504
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
505
+ when :attr:`reduce` is ``False``. Default: ``True``
506
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
507
+ losses are averaged or summed over observations for each minibatch depending
508
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
509
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
510
+ reduction (str, optional): Specifies the reduction to apply to the output:
511
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
512
+ ``'mean'``: the sum of the output will be divided by the number of
513
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
514
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
515
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
516
+
517
+ Shape:
518
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
519
+ - Target: :math:`(*)`, same shape as the input.
520
+
521
+ Examples::
522
+
523
+ >>> loss = nn.MSELoss()
524
+ >>> input = torch.randn(3, 5, requires_grad=True)
525
+ >>> target = torch.randn(3, 5)
526
+ >>> output = loss(input, target)
527
+ >>> output.backward()
528
+ """
529
+ __constants__ = ['reduction']
530
+
531
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
532
+ super().__init__(size_average, reduce, reduction)
533
+
534
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
535
+ return F.mse_loss(input, target, reduction=self.reduction)
536
+
537
+
538
+ class BCELoss(_WeightedLoss):
539
+ r"""Creates a criterion that measures the Binary Cross Entropy between the target and
540
+ the input probabilities:
541
+
542
+ The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
543
+
544
+ .. math::
545
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
546
+ l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
547
+
548
+ where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
549
+ (default ``'mean'``), then
550
+
551
+ .. math::
552
+ \ell(x, y) = \begin{cases}
553
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
554
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
555
+ \end{cases}
556
+
557
+ This is used for measuring the error of a reconstruction in for example
558
+ an auto-encoder. Note that the targets :math:`y` should be numbers
559
+ between 0 and 1.
560
+
561
+ Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
562
+ mathematically undefined in the above loss equation. PyTorch chooses to set
563
+ :math:`\log (0) = -\infty`, since :math:`\lim_{x\to 0} \log (x) = -\infty`.
564
+ However, an infinite term in the loss equation is not desirable for several reasons.
565
+
566
+ For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
567
+ multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
568
+ we would also have an infinite term in our gradient, since
569
+ :math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`.
570
+ This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
571
+ and using it for things like linear regression would not be straight-forward.
572
+
573
+ Our solution is that BCELoss clamps its log function outputs to be greater than
574
+ or equal to -100. This way, we can always have a finite loss value and a linear
575
+ backward method.
576
+
577
+
578
+ Args:
579
+ weight (Tensor, optional): a manual rescaling weight given to the loss
580
+ of each batch element. If given, has to be a Tensor of size `nbatch`.
581
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
582
+ the losses are averaged over each loss element in the batch. Note that for
583
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
584
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
585
+ when :attr:`reduce` is ``False``. Default: ``True``
586
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
587
+ losses are averaged or summed over observations for each minibatch depending
588
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
589
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
590
+ reduction (str, optional): Specifies the reduction to apply to the output:
591
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
592
+ ``'mean'``: the sum of the output will be divided by the number of
593
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
594
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
595
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
596
+
597
+ Shape:
598
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
599
+ - Target: :math:`(*)`, same shape as the input.
600
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
601
+ shape as input.
602
+
603
+ Examples::
604
+
605
+ >>> m = nn.Sigmoid()
606
+ >>> loss = nn.BCELoss()
607
+ >>> input = torch.randn(3, 2, requires_grad=True)
608
+ >>> target = torch.rand(3, 2, requires_grad=False)
609
+ >>> output = loss(m(input), target)
610
+ >>> output.backward()
611
+ """
612
+ __constants__ = ['reduction']
613
+
614
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
615
+ super().__init__(weight, size_average, reduce, reduction)
616
+
617
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
618
+ return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
619
+
620
+
621
+ class BCEWithLogitsLoss(_Loss):
622
+ r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
623
+ class. This version is more numerically stable than using a plain `Sigmoid`
624
+ followed by a `BCELoss` as, by combining the operations into one layer,
625
+ we take advantage of the log-sum-exp trick for numerical stability.
626
+
627
+ The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
628
+
629
+ .. math::
630
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
631
+ l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
632
+ + (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
633
+
634
+ where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
635
+ (default ``'mean'``), then
636
+
637
+ .. math::
638
+ \ell(x, y) = \begin{cases}
639
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
640
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
641
+ \end{cases}
642
+
643
+ This is used for measuring the error of a reconstruction in for example
644
+ an auto-encoder. Note that the targets `t[i]` should be numbers
645
+ between 0 and 1.
646
+
647
+ It's possible to trade off recall and precision by adding weights to positive examples.
648
+ In the case of multi-label classification the loss can be described as:
649
+
650
+ .. math::
651
+ \ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
652
+ l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
653
+ + (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
654
+
655
+ where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification,
656
+ :math:`c = 1` for single-label binary classification),
657
+ :math:`n` is the number of the sample in the batch and
658
+ :math:`p_c` is the weight of the positive answer for the class :math:`c`.
659
+
660
+ :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision.
661
+
662
+ For example, if a dataset contains 100 positive and 300 negative examples of a single class,
663
+ then ``pos_weight`` for the class should be equal to :math:`\frac{300}{100}=3`.
664
+ The loss would act as if the dataset contains :math:`3\times 100=300` positive examples.
665
+
666
+ Examples::
667
+
668
+ >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10
669
+ >>> output = torch.full([10, 64], 1.5) # A prediction (logit)
670
+ >>> pos_weight = torch.ones([64]) # All weights are equal to 1
671
+ >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
672
+ >>> criterion(output, target) # -log(sigmoid(1.5))
673
+ tensor(0.20...)
674
+
675
+ In the above example, the ``pos_weight`` tensor's elements correspond to the 64 distinct classes
676
+ in a multi-label binary classification scenario. Each element in ``pos_weight`` is designed to adjust the
677
+ loss function based on the imbalance between negative and positive samples for the respective class.
678
+ This approach is useful in datasets with varying levels of class imbalance, ensuring that the loss
679
+ calculation accurately accounts for the distribution in each class.
680
+
681
+ Args:
682
+ weight (Tensor, optional): a manual rescaling weight given to the loss
683
+ of each batch element. If given, has to be a Tensor of size `nbatch`.
684
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
685
+ the losses are averaged over each loss element in the batch. Note that for
686
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
687
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
688
+ when :attr:`reduce` is ``False``. Default: ``True``
689
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
690
+ losses are averaged or summed over observations for each minibatch depending
691
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
692
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
693
+ reduction (str, optional): Specifies the reduction to apply to the output:
694
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
695
+ ``'mean'``: the sum of the output will be divided by the number of
696
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
697
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
698
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
699
+ pos_weight (Tensor, optional): a weight of positive examples to be broadcasted with target.
700
+ Must be a tensor with equal size along the class dimension to the number of classes.
701
+ Pay close attention to PyTorch's broadcasting semantics in order to achieve the desired
702
+ operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of
703
+ size [B, C, H, W] will apply different pos_weights to each element of the batch or
704
+ [C, H, W] the same pos_weights across the batch. To apply the same positive weight
705
+ along all spacial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1].
706
+ Default: ``None``
707
+
708
+ Shape:
709
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
710
+ - Target: :math:`(*)`, same shape as the input.
711
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
712
+ shape as input.
713
+
714
+ Examples::
715
+
716
+ >>> loss = nn.BCEWithLogitsLoss()
717
+ >>> input = torch.randn(3, requires_grad=True)
718
+ >>> target = torch.empty(3).random_(2)
719
+ >>> output = loss(input, target)
720
+ >>> output.backward()
721
+ """
722
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean',
723
+ pos_weight: Optional[Tensor] = None) -> None:
724
+ super().__init__(size_average, reduce, reduction)
725
+ self.register_buffer('weight', weight)
726
+ self.register_buffer('pos_weight', pos_weight)
727
+ self.weight: Optional[Tensor]
728
+ self.pos_weight: Optional[Tensor]
729
+
730
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
731
+ return F.binary_cross_entropy_with_logits(input, target,
732
+ self.weight,
733
+ pos_weight=self.pos_weight,
734
+ reduction=self.reduction)
735
+
736
+
737
+ class HingeEmbeddingLoss(_Loss):
738
+ r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`
739
+ (containing 1 or -1).
740
+ This is usually used for measuring whether two inputs are similar or
741
+ dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically
742
+ used for learning nonlinear embeddings or semi-supervised learning.
743
+
744
+ The loss function for :math:`n`-th sample in the mini-batch is
745
+
746
+ .. math::
747
+ l_n = \begin{cases}
748
+ x_n, & \text{if}\; y_n = 1,\\
749
+ \max \{0, margin - x_n\}, & \text{if}\; y_n = -1,
750
+ \end{cases}
751
+
752
+ and the total loss functions is
753
+
754
+ .. math::
755
+ \ell(x, y) = \begin{cases}
756
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
757
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
758
+ \end{cases}
759
+
760
+ where :math:`L = \{l_1,\dots,l_N\}^\top`.
761
+
762
+ Args:
763
+ margin (float, optional): Has a default value of `1`.
764
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
765
+ the losses are averaged over each loss element in the batch. Note that for
766
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
767
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
768
+ when :attr:`reduce` is ``False``. Default: ``True``
769
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
770
+ losses are averaged or summed over observations for each minibatch depending
771
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
772
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
773
+ reduction (str, optional): Specifies the reduction to apply to the output:
774
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
775
+ ``'mean'``: the sum of the output will be divided by the number of
776
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
777
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
778
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
779
+
780
+ Shape:
781
+ - Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation
782
+ operates over all the elements.
783
+ - Target: :math:`(*)`, same shape as the input
784
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input
785
+ """
786
+ __constants__ = ['margin', 'reduction']
787
+ margin: float
788
+
789
+ def __init__(self, margin: float = 1.0, size_average=None, reduce=None, reduction: str = 'mean') -> None:
790
+ super().__init__(size_average, reduce, reduction)
791
+ self.margin = margin
792
+
793
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
794
+ return F.hinge_embedding_loss(input, target, margin=self.margin, reduction=self.reduction)
795
+
796
+
797
+ class MultiLabelMarginLoss(_Loss):
798
+ r"""Creates a criterion that optimizes a multi-class multi-classification
799
+ hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
800
+ and output :math:`y` (which is a 2D `Tensor` of target class indices).
801
+ For each sample in the mini-batch:
802
+
803
+ .. math::
804
+ \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}
805
+
806
+ where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \
807
+ :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \
808
+ :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \
809
+ and :math:`i \neq y[j]` for all :math:`i` and :math:`j`.
810
+
811
+ :math:`y` and :math:`x` must have the same size.
812
+
813
+ The criterion only considers a contiguous block of non-negative targets that
814
+ starts at the front.
815
+
816
+ This allows for different samples to have variable amounts of target classes.
817
+
818
+ Args:
819
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
820
+ the losses are averaged over each loss element in the batch. Note that for
821
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
822
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
823
+ when :attr:`reduce` is ``False``. Default: ``True``
824
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
825
+ losses are averaged or summed over observations for each minibatch depending
826
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
827
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
828
+ reduction (str, optional): Specifies the reduction to apply to the output:
829
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
830
+ ``'mean'``: the sum of the output will be divided by the number of
831
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
832
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
833
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
834
+
835
+ Shape:
836
+ - Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C`
837
+ is the number of classes.
838
+ - Target: :math:`(C)` or :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input.
839
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
840
+
841
+ Examples::
842
+
843
+ >>> loss = nn.MultiLabelMarginLoss()
844
+ >>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
845
+ >>> # for target y, only consider labels 3 and 0, not after label -1
846
+ >>> y = torch.LongTensor([[3, 0, -1, 1]])
847
+ >>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
848
+ >>> loss(x, y)
849
+ tensor(0.85...)
850
+
851
+ """
852
+ __constants__ = ['reduction']
853
+
854
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
855
+ super().__init__(size_average, reduce, reduction)
856
+
857
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
858
+ return F.multilabel_margin_loss(input, target, reduction=self.reduction)
859
+
860
+
861
+ class SmoothL1Loss(_Loss):
862
+ r"""Creates a criterion that uses a squared term if the absolute
863
+ element-wise error falls below beta and an L1 term otherwise.
864
+ It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
865
+ prevents exploding gradients (e.g. see the paper `Fast R-CNN`_ by Ross Girshick).
866
+
867
+ For a batch of size :math:`N`, the unreduced loss can be described as:
868
+
869
+ .. math::
870
+ \ell(x, y) = L = \{l_1, ..., l_N\}^T
871
+
872
+ with
873
+
874
+ .. math::
875
+ l_n = \begin{cases}
876
+ 0.5 (x_n - y_n)^2 / beta, & \text{if } |x_n - y_n| < beta \\
877
+ |x_n - y_n| - 0.5 * beta, & \text{otherwise }
878
+ \end{cases}
879
+
880
+ If `reduction` is not `none`, then:
881
+
882
+ .. math::
883
+ \ell(x, y) =
884
+ \begin{cases}
885
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
886
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
887
+ \end{cases}
888
+
889
+ .. note::
890
+ Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
891
+ portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
892
+ The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
893
+
894
+ .. note::
895
+ Smooth L1 loss is closely related to :class:`HuberLoss`, being
896
+ equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
897
+ also known as delta for Huber). This leads to the following differences:
898
+
899
+ * As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
900
+ converges to a constant 0 loss. When beta is 0, Smooth L1 loss is equivalent to L1 loss.
901
+ * As beta -> :math:`+\infty`, Smooth L1 loss converges to a constant 0 loss, while
902
+ :class:`HuberLoss` converges to :class:`MSELoss`.
903
+ * For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
904
+ For :class:`HuberLoss`, the slope of the L1 segment is beta.
905
+
906
+ .. _`Fast R-CNN`: https://arxiv.org/abs/1504.08083
907
+
908
+ Args:
909
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
910
+ the losses are averaged over each loss element in the batch. Note that for
911
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
912
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
913
+ when :attr:`reduce` is ``False``. Default: ``True``
914
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
915
+ losses are averaged or summed over observations for each minibatch depending
916
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
917
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
918
+ reduction (str, optional): Specifies the reduction to apply to the output:
919
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
920
+ ``'mean'``: the sum of the output will be divided by the number of
921
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
922
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
923
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
924
+ beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
925
+ The value must be non-negative. Default: 1.0
926
+
927
+ Shape:
928
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
929
+ - Target: :math:`(*)`, same shape as the input.
930
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
931
+ """
932
+ __constants__ = ['reduction']
933
+
934
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', beta: float = 1.0) -> None:
935
+ super().__init__(size_average, reduce, reduction)
936
+ self.beta = beta
937
+
938
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
939
+ return F.smooth_l1_loss(input, target, reduction=self.reduction, beta=self.beta)
940
+
941
+
942
+ class HuberLoss(_Loss):
943
+ r"""Creates a criterion that uses a squared term if the absolute
944
+ element-wise error falls below delta and a delta-scaled L1 term otherwise.
945
+ This loss combines advantages of both :class:`L1Loss` and :class:`MSELoss`; the
946
+ delta-scaled L1 region makes the loss less sensitive to outliers than :class:`MSELoss`,
947
+ while the L2 region provides smoothness over :class:`L1Loss` near 0. See
948
+ `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`_ for more information.
949
+
950
+ For a batch of size :math:`N`, the unreduced loss can be described as:
951
+
952
+ .. math::
953
+ \ell(x, y) = L = \{l_1, ..., l_N\}^T
954
+
955
+ with
956
+
957
+ .. math::
958
+ l_n = \begin{cases}
959
+ 0.5 (x_n - y_n)^2, & \text{if } |x_n - y_n| < delta \\
960
+ delta * (|x_n - y_n| - 0.5 * delta), & \text{otherwise }
961
+ \end{cases}
962
+
963
+ If `reduction` is not `none`, then:
964
+
965
+ .. math::
966
+ \ell(x, y) =
967
+ \begin{cases}
968
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
969
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
970
+ \end{cases}
971
+
972
+ .. note::
973
+ When delta is set to 1, this loss is equivalent to :class:`SmoothL1Loss`.
974
+ In general, this loss differs from :class:`SmoothL1Loss` by a factor of delta (AKA beta
975
+ in Smooth L1).
976
+ See :class:`SmoothL1Loss` for additional discussion on the differences in behavior
977
+ between the two losses.
978
+
979
+ Args:
980
+ reduction (str, optional): Specifies the reduction to apply to the output:
981
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
982
+ ``'mean'``: the sum of the output will be divided by the number of
983
+ elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
984
+ delta (float, optional): Specifies the threshold at which to change between delta-scaled L1 and L2 loss.
985
+ The value must be positive. Default: 1.0
986
+
987
+ Shape:
988
+ - Input: :math:`(*)` where :math:`*` means any number of dimensions.
989
+ - Target: :math:`(*)`, same shape as the input.
990
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
991
+ """
992
+ __constants__ = ['reduction', 'delta']
993
+
994
+ def __init__(self, reduction: str = 'mean', delta: float = 1.0) -> None:
995
+ super().__init__(reduction=reduction)
996
+ self.delta = delta
997
+
998
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
999
+ return F.huber_loss(input, target, reduction=self.reduction, delta=self.delta)
1000
+
1001
+
1002
+ class SoftMarginLoss(_Loss):
1003
+ r"""Creates a criterion that optimizes a two-class classification
1004
+ logistic loss between input tensor :math:`x` and target tensor :math:`y`
1005
+ (containing 1 or -1).
1006
+
1007
+ .. math::
1008
+ \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
1009
+
1010
+ Args:
1011
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1012
+ the losses are averaged over each loss element in the batch. Note that for
1013
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1014
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1015
+ when :attr:`reduce` is ``False``. Default: ``True``
1016
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1017
+ losses are averaged or summed over observations for each minibatch depending
1018
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1019
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1020
+ reduction (str, optional): Specifies the reduction to apply to the output:
1021
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1022
+ ``'mean'``: the sum of the output will be divided by the number of
1023
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1024
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1025
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1026
+
1027
+ Shape:
1028
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
1029
+ - Target: :math:`(*)`, same shape as the input.
1030
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
1031
+ shape as input.
1032
+
1033
+ """
1034
+ __constants__ = ['reduction']
1035
+
1036
+ def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
1037
+ super().__init__(size_average, reduce, reduction)
1038
+
1039
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
1040
+ return F.soft_margin_loss(input, target, reduction=self.reduction)
1041
+
1042
+
1043
+ class CrossEntropyLoss(_WeightedLoss):
1044
+ r"""This criterion computes the cross entropy loss between input logits
1045
+ and target.
1046
+
1047
+ It is useful when training a classification problem with `C` classes.
1048
+ If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
1049
+ assigning weight to each of the classes.
1050
+ This is particularly useful when you have an unbalanced training set.
1051
+
1052
+ The `input` is expected to contain the unnormalized logits for each class (which do `not` need
1053
+ to be positive or sum to 1, in general).
1054
+ `input` has to be a Tensor of size :math:`(C)` for unbatched input,
1055
+ :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the
1056
+ `K`-dimensional case. The last being useful for higher dimension inputs, such
1057
+ as computing cross entropy loss per-pixel for 2D images.
1058
+
1059
+ The `target` that this criterion expects should contain either:
1060
+
1061
+ - Class indices in the range :math:`[0, C)` where :math:`C` is the number of classes; if
1062
+ `ignore_index` is specified, this loss also accepts this class index (this index
1063
+ may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction`
1064
+ set to ``'none'``) loss for this case can be described as:
1065
+
1066
+ .. math::
1067
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
1068
+ l_n = - w_{y_n} \log \frac{\exp(x_{n,y_n})}{\sum_{c=1}^C \exp(x_{n,c})}
1069
+ \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}
1070
+
1071
+ where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
1072
+ :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
1073
+ :math:`d_1, ..., d_k` for the `K`-dimensional case. If
1074
+ :attr:`reduction` is not ``'none'`` (default ``'mean'``), then
1075
+
1076
+ .. math::
1077
+ \ell(x, y) = \begin{cases}
1078
+ \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}} l_n, &
1079
+ \text{if reduction} = \text{`mean';}\\
1080
+ \sum_{n=1}^N l_n, &
1081
+ \text{if reduction} = \text{`sum'.}
1082
+ \end{cases}
1083
+
1084
+ Note that this case is equivalent to applying :class:`~torch.nn.LogSoftmax`
1085
+ on an input, followed by :class:`~torch.nn.NLLLoss`.
1086
+
1087
+ - Probabilities for each class; useful when labels beyond a single class per minibatch item
1088
+ are required, such as for blended labels, label smoothing, etc. The unreduced (i.e. with
1089
+ :attr:`reduction` set to ``'none'``) loss for this case can be described as:
1090
+
1091
+ .. math::
1092
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
1093
+ l_n = - \sum_{c=1}^C w_c \log \frac{\exp(x_{n,c})}{\sum_{i=1}^C \exp(x_{n,i})} y_{n,c}
1094
+
1095
+ where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
1096
+ :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
1097
+ :math:`d_1, ..., d_k` for the `K`-dimensional case. If
1098
+ :attr:`reduction` is not ``'none'`` (default ``'mean'``), then
1099
+
1100
+ .. math::
1101
+ \ell(x, y) = \begin{cases}
1102
+ \frac{\sum_{n=1}^N l_n}{N}, &
1103
+ \text{if reduction} = \text{`mean';}\\
1104
+ \sum_{n=1}^N l_n, &
1105
+ \text{if reduction} = \text{`sum'.}
1106
+ \end{cases}
1107
+
1108
+ .. note::
1109
+ The performance of this criterion is generally better when `target` contains class
1110
+ indices, as this allows for optimized computation. Consider providing `target` as
1111
+ class probabilities only when a single class label per minibatch item is too restrictive.
1112
+
1113
+ Args:
1114
+ weight (Tensor, optional): a manual rescaling weight given to each class.
1115
+ If given, has to be a Tensor of size `C` and floating point dtype
1116
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1117
+ the losses are averaged over each loss element in the batch. Note that for
1118
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1119
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1120
+ when :attr:`reduce` is ``False``. Default: ``True``
1121
+ ignore_index (int, optional): Specifies a target value that is ignored
1122
+ and does not contribute to the input gradient. When :attr:`size_average` is
1123
+ ``True``, the loss is averaged over non-ignored targets. Note that
1124
+ :attr:`ignore_index` is only applicable when the target contains class indices.
1125
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1126
+ losses are averaged or summed over observations for each minibatch depending
1127
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1128
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1129
+ reduction (str, optional): Specifies the reduction to apply to the output:
1130
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
1131
+ be applied, ``'mean'``: the weighted mean of the output is taken,
1132
+ ``'sum'``: the output will be summed. Note: :attr:`size_average`
1133
+ and :attr:`reduce` are in the process of being deprecated, and in
1134
+ the meantime, specifying either of those two args will override
1135
+ :attr:`reduction`. Default: ``'mean'``
1136
+ label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
1137
+ of smoothing when computing the loss, where 0.0 means no smoothing. The targets
1138
+ become a mixture of the original ground truth and a uniform distribution as described in
1139
+ `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
1140
+
1141
+ Shape:
1142
+ - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
1143
+ in the case of `K`-dimensional loss.
1144
+ - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
1145
+ :math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
1146
+ If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
1147
+ - Output: If reduction is 'none', shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
1148
+ in the case of K-dimensional loss, depending on the shape of the input. Otherwise, scalar.
1149
+
1150
+
1151
+ where:
1152
+
1153
+ .. math::
1154
+ \begin{aligned}
1155
+ C ={} & \text{number of classes} \\
1156
+ N ={} & \text{batch size} \\
1157
+ \end{aligned}
1158
+
1159
+ Examples::
1160
+
1161
+ >>> # Example of target with class indices
1162
+ >>> loss = nn.CrossEntropyLoss()
1163
+ >>> input = torch.randn(3, 5, requires_grad=True)
1164
+ >>> target = torch.empty(3, dtype=torch.long).random_(5)
1165
+ >>> output = loss(input, target)
1166
+ >>> output.backward()
1167
+ >>>
1168
+ >>> # Example of target with class probabilities
1169
+ >>> input = torch.randn(3, 5, requires_grad=True)
1170
+ >>> target = torch.randn(3, 5).softmax(dim=1)
1171
+ >>> output = loss(input, target)
1172
+ >>> output.backward()
1173
+ """
1174
+ __constants__ = ['ignore_index', 'reduction', 'label_smoothing']
1175
+ ignore_index: int
1176
+ label_smoothing: float
1177
+
1178
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
1179
+ reduce=None, reduction: str = 'mean', label_smoothing: float = 0.0) -> None:
1180
+ super().__init__(weight, size_average, reduce, reduction)
1181
+ self.ignore_index = ignore_index
1182
+ self.label_smoothing = label_smoothing
1183
+
1184
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
1185
+ return F.cross_entropy(input, target, weight=self.weight,
1186
+ ignore_index=self.ignore_index, reduction=self.reduction,
1187
+ label_smoothing=self.label_smoothing)
1188
+
1189
+
1190
+ class MultiLabelSoftMarginLoss(_WeightedLoss):
1191
+ r"""Creates a criterion that optimizes a multi-label one-versus-all
1192
+ loss based on max-entropy, between input :math:`x` and target :math:`y` of size
1193
+ :math:`(N, C)`.
1194
+ For each sample in the minibatch:
1195
+
1196
+ .. math::
1197
+ loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1})
1198
+ + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right)
1199
+
1200
+ where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`,
1201
+ :math:`y[i] \in \left\{0, \; 1\right\}`.
1202
+
1203
+ Args:
1204
+ weight (Tensor, optional): a manual rescaling weight given to each
1205
+ class. If given, it has to be a Tensor of size `C`. Otherwise, it is
1206
+ treated as if having all ones.
1207
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1208
+ the losses are averaged over each loss element in the batch. Note that for
1209
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1210
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1211
+ when :attr:`reduce` is ``False``. Default: ``True``
1212
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1213
+ losses are averaged or summed over observations for each minibatch depending
1214
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1215
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1216
+ reduction (str, optional): Specifies the reduction to apply to the output:
1217
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1218
+ ``'mean'``: the sum of the output will be divided by the number of
1219
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1220
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1221
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1222
+
1223
+ Shape:
1224
+ - Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes.
1225
+ - Target: :math:`(N, C)`, label targets must have the same shape as the input.
1226
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
1227
+ """
1228
+ __constants__ = ['reduction']
1229
+
1230
+ def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
1231
+ super().__init__(weight, size_average, reduce, reduction)
1232
+
1233
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
1234
+ return F.multilabel_soft_margin_loss(input, target, weight=self.weight, reduction=self.reduction)
1235
+
1236
+
1237
+ class CosineEmbeddingLoss(_Loss):
1238
+ r"""Creates a criterion that measures the loss given input tensors
1239
+ :math:`x_1`, :math:`x_2` and a `Tensor` label :math:`y` with values 1 or -1.
1240
+ Use (:math:`y=1`) to maximize the cosine similarity of two inputs, and (:math:`y=-1`) otherwise.
1241
+ This is typically used for learning nonlinear
1242
+ embeddings or semi-supervised learning.
1243
+
1244
+ The loss function for each sample is:
1245
+
1246
+ .. math::
1247
+ \text{loss}(x, y) =
1248
+ \begin{cases}
1249
+ 1 - \cos(x_1, x_2), & \text{if } y = 1 \\
1250
+ \max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1
1251
+ \end{cases}
1252
+
1253
+ Args:
1254
+ margin (float, optional): Should be a number from :math:`-1` to :math:`1`,
1255
+ :math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the
1256
+ default value is :math:`0`.
1257
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1258
+ the losses are averaged over each loss element in the batch. Note that for
1259
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1260
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1261
+ when :attr:`reduce` is ``False``. Default: ``True``
1262
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1263
+ losses are averaged or summed over observations for each minibatch depending
1264
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1265
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1266
+ reduction (str, optional): Specifies the reduction to apply to the output:
1267
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1268
+ ``'mean'``: the sum of the output will be divided by the number of
1269
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1270
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1271
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1272
+
1273
+ Shape:
1274
+ - Input1: :math:`(N, D)` or :math:`(D)`, where `N` is the batch size and `D` is the embedding dimension.
1275
+ - Input2: :math:`(N, D)` or :math:`(D)`, same shape as Input1.
1276
+ - Target: :math:`(N)` or :math:`()`.
1277
+ - Output: If :attr:`reduction` is ``'none'``, then :math:`(N)`, otherwise scalar.
1278
+
1279
+ Examples::
1280
+
1281
+ >>> loss = nn.CosineEmbeddingLoss()
1282
+ >>> input1 = torch.randn(3, 5, requires_grad=True)
1283
+ >>> input2 = torch.randn(3, 5, requires_grad=True)
1284
+ >>> target = torch.ones(3)
1285
+ >>> output = loss(input1, input2, target)
1286
+ >>> output.backward()
1287
+ """
1288
+ __constants__ = ['margin', 'reduction']
1289
+ margin: float
1290
+
1291
+ def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
1292
+ super().__init__(size_average, reduce, reduction)
1293
+ self.margin = margin
1294
+
1295
+ def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
1296
+ return F.cosine_embedding_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
1297
+
1298
+
1299
+ class MarginRankingLoss(_Loss):
1300
+ r"""Creates a criterion that measures the loss given
1301
+ inputs :math:`x1`, :math:`x2`, two 1D mini-batch or 0D `Tensors`,
1302
+ and a label 1D mini-batch or 0D `Tensor` :math:`y` (containing 1 or -1).
1303
+
1304
+ If :math:`y = 1` then it assumed the first input should be ranked higher
1305
+ (have a larger value) than the second input, and vice-versa for :math:`y = -1`.
1306
+
1307
+ The loss function for each pair of samples in the mini-batch is:
1308
+
1309
+ .. math::
1310
+ \text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin})
1311
+
1312
+ Args:
1313
+ margin (float, optional): Has a default value of :math:`0`.
1314
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1315
+ the losses are averaged over each loss element in the batch. Note that for
1316
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1317
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1318
+ when :attr:`reduce` is ``False``. Default: ``True``
1319
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1320
+ losses are averaged or summed over observations for each minibatch depending
1321
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1322
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1323
+ reduction (str, optional): Specifies the reduction to apply to the output:
1324
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1325
+ ``'mean'``: the sum of the output will be divided by the number of
1326
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1327
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1328
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1329
+
1330
+ Shape:
1331
+ - Input1: :math:`(N)` or :math:`()` where `N` is the batch size.
1332
+ - Input2: :math:`(N)` or :math:`()`, same shape as the Input1.
1333
+ - Target: :math:`(N)` or :math:`()`, same shape as the inputs.
1334
+ - Output: scalar. If :attr:`reduction` is ``'none'`` and Input size is not :math:`()`, then :math:`(N)`.
1335
+
1336
+ Examples::
1337
+
1338
+ >>> loss = nn.MarginRankingLoss()
1339
+ >>> input1 = torch.randn(3, requires_grad=True)
1340
+ >>> input2 = torch.randn(3, requires_grad=True)
1341
+ >>> target = torch.randn(3).sign()
1342
+ >>> output = loss(input1, input2, target)
1343
+ >>> output.backward()
1344
+ """
1345
+ __constants__ = ['margin', 'reduction']
1346
+ margin: float
1347
+
1348
+ def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
1349
+ super().__init__(size_average, reduce, reduction)
1350
+ self.margin = margin
1351
+
1352
+ def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
1353
+ return F.margin_ranking_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
1354
+
1355
+
1356
+ class MultiMarginLoss(_WeightedLoss):
1357
+ r"""Creates a criterion that optimizes a multi-class classification hinge
1358
+ loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and
1359
+ output :math:`y` (which is a 1D tensor of target class indices,
1360
+ :math:`0 \leq y \leq \text{x.size}(1)-1`):
1361
+
1362
+ For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar
1363
+ output :math:`y` is:
1364
+
1365
+ .. math::
1366
+ \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)}
1367
+
1368
+ where :math:`i \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`
1369
+ and :math:`i \neq y`.
1370
+
1371
+ Optionally, you can give non-equal weighting on the classes by passing
1372
+ a 1D :attr:`weight` tensor into the constructor.
1373
+
1374
+ The loss function then becomes:
1375
+
1376
+ .. math::
1377
+ \text{loss}(x, y) = \frac{\sum_i w[y] * \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)}
1378
+
1379
+ Args:
1380
+ p (int, optional): Has a default value of :math:`1`. :math:`1` and :math:`2`
1381
+ are the only supported values.
1382
+ margin (float, optional): Has a default value of :math:`1`.
1383
+ weight (Tensor, optional): a manual rescaling weight given to each
1384
+ class. If given, it has to be a Tensor of size `C`. Otherwise, it is
1385
+ treated as if having all ones.
1386
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1387
+ the losses are averaged over each loss element in the batch. Note that for
1388
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1389
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1390
+ when :attr:`reduce` is ``False``. Default: ``True``
1391
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1392
+ losses are averaged or summed over observations for each minibatch depending
1393
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1394
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1395
+ reduction (str, optional): Specifies the reduction to apply to the output:
1396
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1397
+ ``'mean'``: the sum of the output will be divided by the number of
1398
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1399
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1400
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1401
+
1402
+ Shape:
1403
+ - Input: :math:`(N, C)` or :math:`(C)`, where :math:`N` is the batch size and :math:`C` is the number of classes.
1404
+ - Target: :math:`(N)` or :math:`()`, where each value is :math:`0 \leq \text{targets}[i] \leq C-1`.
1405
+ - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the target.
1406
+
1407
+ Examples::
1408
+
1409
+ >>> loss = nn.MultiMarginLoss()
1410
+ >>> x = torch.tensor([[0.1, 0.2, 0.4, 0.8]])
1411
+ >>> y = torch.tensor([3])
1412
+ >>> # 0.25 * ((1-(0.8-0.1)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
1413
+ >>> loss(x, y)
1414
+ tensor(0.32...)
1415
+ """
1416
+ __constants__ = ['p', 'margin', 'reduction']
1417
+ margin: float
1418
+ p: int
1419
+
1420
+ def __init__(self, p: int = 1, margin: float = 1., weight: Optional[Tensor] = None, size_average=None,
1421
+ reduce=None, reduction: str = 'mean') -> None:
1422
+ super().__init__(weight, size_average, reduce, reduction)
1423
+ if p != 1 and p != 2:
1424
+ raise ValueError("only p == 1 and p == 2 supported")
1425
+ if weight is not None and weight.dim() != 1 :
1426
+ raise ValueError(
1427
+ f"MultiMarginLoss: expected weight to be None or 1D tensor, got {weight.dim()}D instead"
1428
+ )
1429
+ self.p = p
1430
+ self.margin = margin
1431
+
1432
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
1433
+ return F.multi_margin_loss(input, target, p=self.p, margin=self.margin,
1434
+ weight=self.weight, reduction=self.reduction)
1435
+
1436
+
1437
+ class TripletMarginLoss(_Loss):
1438
+ r"""Creates a criterion that measures the triplet loss given an input
1439
+ tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
1440
+ This is used for measuring a relative similarity between samples. A triplet
1441
+ is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
1442
+ examples` respectively). The shapes of all input tensors should be
1443
+ :math:`(N, D)`.
1444
+
1445
+ The distance swap is described in detail in the paper `Learning shallow
1446
+ convolutional feature descriptors with triplet losses`_ by
1447
+ V. Balntas, E. Riba et al.
1448
+
1449
+ The loss function for each sample in the mini-batch is:
1450
+
1451
+ .. math::
1452
+ L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
1453
+
1454
+
1455
+ where
1456
+
1457
+ .. math::
1458
+ d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
1459
+
1460
+ The norm is calculated using the specified p value and a small constant :math:`\varepsilon` is
1461
+ added for numerical stability.
1462
+
1463
+ See also :class:`~torch.nn.TripletMarginWithDistanceLoss`, which computes the
1464
+ triplet margin loss for input tensors using a custom distance function.
1465
+
1466
+ Args:
1467
+ margin (float, optional): Default: :math:`1`.
1468
+ p (int, optional): The norm degree for pairwise distance. Default: :math:`2`.
1469
+ eps (float, optional): Small constant for numerical stability. Default: :math:`1e-6`.
1470
+ swap (bool, optional): The distance swap is described in detail in the paper
1471
+ `Learning shallow convolutional feature descriptors with triplet losses` by
1472
+ V. Balntas, E. Riba et al. Default: ``False``.
1473
+ size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
1474
+ the losses are averaged over each loss element in the batch. Note that for
1475
+ some losses, there are multiple elements per sample. If the field :attr:`size_average`
1476
+ is set to ``False``, the losses are instead summed for each minibatch. Ignored
1477
+ when :attr:`reduce` is ``False``. Default: ``True``
1478
+ reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
1479
+ losses are averaged or summed over observations for each minibatch depending
1480
+ on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
1481
+ batch element instead and ignores :attr:`size_average`. Default: ``True``
1482
+ reduction (str, optional): Specifies the reduction to apply to the output:
1483
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1484
+ ``'mean'``: the sum of the output will be divided by the number of
1485
+ elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
1486
+ and :attr:`reduce` are in the process of being deprecated, and in the meantime,
1487
+ specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
1488
+
1489
+ Shape:
1490
+ - Input: :math:`(N, D)` or :math:`(D)` where :math:`D` is the vector dimension.
1491
+ - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'`` and
1492
+ input shape is :math:`(N, D)`; a scalar otherwise.
1493
+
1494
+ Examples::
1495
+
1496
+ >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7)
1497
+ >>> anchor = torch.randn(100, 128, requires_grad=True)
1498
+ >>> positive = torch.randn(100, 128, requires_grad=True)
1499
+ >>> negative = torch.randn(100, 128, requires_grad=True)
1500
+ >>> output = triplet_loss(anchor, positive, negative)
1501
+ >>> output.backward()
1502
+
1503
+ .. _Learning shallow convolutional feature descriptors with triplet losses:
1504
+ http://www.bmva.org/bmvc/2016/papers/paper119/index.html
1505
+ """
1506
+ __constants__ = ['margin', 'p', 'eps', 'swap', 'reduction']
1507
+ margin: float
1508
+ p: float
1509
+ eps: float
1510
+ swap: bool
1511
+
1512
+ def __init__(self, margin: float = 1.0, p: float = 2., eps: float = 1e-6, swap: bool = False, size_average=None,
1513
+ reduce=None, reduction: str = 'mean'):
1514
+ super().__init__(size_average, reduce, reduction)
1515
+ self.margin = margin
1516
+ self.p = p
1517
+ self.eps = eps
1518
+ self.swap = swap
1519
+
1520
+ def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
1521
+ return F.triplet_margin_loss(anchor, positive, negative, margin=self.margin, p=self.p,
1522
+ eps=self.eps, swap=self.swap, reduction=self.reduction)
1523
+
1524
+
1525
+ class TripletMarginWithDistanceLoss(_Loss):
1526
+ r"""Creates a criterion that measures the triplet loss given input
1527
+ tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor,
1528
+ positive, and negative examples, respectively), and a nonnegative,
1529
+ real-valued function ("distance function") used to compute the relationship
1530
+ between the anchor and positive example ("positive distance") and the
1531
+ anchor and negative example ("negative distance").
1532
+
1533
+ The unreduced loss (i.e., with :attr:`reduction` set to ``'none'``)
1534
+ can be described as:
1535
+
1536
+ .. math::
1537
+ \ell(a, p, n) = L = \{l_1,\dots,l_N\}^\top, \quad
1538
+ l_i = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
1539
+
1540
+ where :math:`N` is the batch size; :math:`d` is a nonnegative, real-valued function
1541
+ quantifying the closeness of two tensors, referred to as the :attr:`distance_function`;
1542
+ and :math:`margin` is a nonnegative margin representing the minimum difference
1543
+ between the positive and negative distances that is required for the loss to
1544
+ be 0. The input tensors have :math:`N` elements each and can be of any shape
1545
+ that the distance function can handle.
1546
+
1547
+ If :attr:`reduction` is not ``'none'``
1548
+ (default ``'mean'``), then:
1549
+
1550
+ .. math::
1551
+ \ell(x, y) =
1552
+ \begin{cases}
1553
+ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
1554
+ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
1555
+ \end{cases}
1556
+
1557
+ See also :class:`~torch.nn.TripletMarginLoss`, which computes the triplet
1558
+ loss for input tensors using the :math:`l_p` distance as the distance function.
1559
+
1560
+ Args:
1561
+ distance_function (Callable, optional): A nonnegative, real-valued function that
1562
+ quantifies the closeness of two tensors. If not specified,
1563
+ `nn.PairwiseDistance` will be used. Default: ``None``
1564
+ margin (float, optional): A nonnegative margin representing the minimum difference
1565
+ between the positive and negative distances required for the loss to be 0. Larger
1566
+ margins penalize cases where the negative examples are not distant enough from the
1567
+ anchors, relative to the positives. Default: :math:`1`.
1568
+ swap (bool, optional): Whether to use the distance swap described in the paper
1569
+ `Learning shallow convolutional feature descriptors with triplet losses` by
1570
+ V. Balntas, E. Riba et al. If True, and if the positive example is closer to the
1571
+ negative example than the anchor is, swaps the positive example and the anchor in
1572
+ the loss computation. Default: ``False``.
1573
+ reduction (str, optional): Specifies the (optional) reduction to apply to the output:
1574
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1575
+ ``'mean'``: the sum of the output will be divided by the number of
1576
+ elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
1577
+
1578
+
1579
+ Shape:
1580
+ - Input: :math:`(N, *)` where :math:`*` represents any number of additional dimensions
1581
+ as supported by the distance function.
1582
+ - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar
1583
+ otherwise.
1584
+
1585
+ Examples::
1586
+
1587
+ >>> # Initialize embeddings
1588
+ >>> embedding = nn.Embedding(1000, 128)
1589
+ >>> anchor_ids = torch.randint(0, 1000, (1,))
1590
+ >>> positive_ids = torch.randint(0, 1000, (1,))
1591
+ >>> negative_ids = torch.randint(0, 1000, (1,))
1592
+ >>> anchor = embedding(anchor_ids)
1593
+ >>> positive = embedding(positive_ids)
1594
+ >>> negative = embedding(negative_ids)
1595
+ >>>
1596
+ >>> # Built-in Distance Function
1597
+ >>> triplet_loss = \
1598
+ >>> nn.TripletMarginWithDistanceLoss(distance_function=nn.PairwiseDistance())
1599
+ >>> output = triplet_loss(anchor, positive, negative)
1600
+ >>> output.backward()
1601
+ >>>
1602
+ >>> # Custom Distance Function
1603
+ >>> def l_infinity(x1, x2):
1604
+ >>> return torch.max(torch.abs(x1 - x2), dim=1).values
1605
+ >>>
1606
+ >>> # xdoctest: +SKIP("FIXME: Would call backwards a second time")
1607
+ >>> triplet_loss = (
1608
+ >>> nn.TripletMarginWithDistanceLoss(distance_function=l_infinity, margin=1.5))
1609
+ >>> output = triplet_loss(anchor, positive, negative)
1610
+ >>> output.backward()
1611
+ >>>
1612
+ >>> # Custom Distance Function (Lambda)
1613
+ >>> triplet_loss = (
1614
+ >>> nn.TripletMarginWithDistanceLoss(
1615
+ >>> distance_function=lambda x, y: 1.0 - F.cosine_similarity(x, y)))
1616
+ >>> output = triplet_loss(anchor, positive, negative)
1617
+ >>> output.backward()
1618
+
1619
+ Reference:
1620
+ V. Balntas, et al.: Learning shallow convolutional feature descriptors with triplet losses:
1621
+ http://www.bmva.org/bmvc/2016/papers/paper119/index.html
1622
+ """
1623
+ __constants__ = ['margin', 'swap', 'reduction']
1624
+ margin: float
1625
+ swap: bool
1626
+
1627
+ def __init__(self, *, distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
1628
+ margin: float = 1.0, swap: bool = False, reduction: str = 'mean'):
1629
+ super().__init__(size_average=None, reduce=None, reduction=reduction)
1630
+ self.distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = \
1631
+ distance_function if distance_function is not None else PairwiseDistance()
1632
+ self.margin = margin
1633
+ self.swap = swap
1634
+
1635
+ def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
1636
+ return F.triplet_margin_with_distance_loss(anchor, positive, negative,
1637
+ distance_function=self.distance_function,
1638
+ margin=self.margin, swap=self.swap, reduction=self.reduction)
1639
+
1640
+
1641
+ class CTCLoss(_Loss):
1642
+ r"""The Connectionist Temporal Classification loss.
1643
+
1644
+ Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
1645
+ probability of possible alignments of input to target, producing a loss value which is differentiable
1646
+ with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
1647
+ limits the length of the target sequence such that it must be :math:`\leq` the input length.
1648
+
1649
+ Args:
1650
+ blank (int, optional): blank label. Default :math:`0`.
1651
+ reduction (str, optional): Specifies the reduction to apply to the output:
1652
+ ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
1653
+ ``'mean'``: the output losses will be divided by the target lengths and
1654
+ then the mean over the batch is taken, ``'sum'``: the output losses will be summed.
1655
+ Default: ``'mean'``
1656
+ zero_infinity (bool, optional):
1657
+ Whether to zero infinite losses and the associated gradients.
1658
+ Default: ``False``
1659
+ Infinite losses mainly occur when the inputs are too short
1660
+ to be aligned to the targets.
1661
+
1662
+ Shape:
1663
+ - Log_probs: Tensor of size :math:`(T, N, C)` or :math:`(T, C)`,
1664
+ where :math:`T = \text{input length}`,
1665
+ :math:`N = \text{batch size}`, and
1666
+ :math:`C = \text{number of classes (including blank)}`.
1667
+ The logarithmized probabilities of the outputs (e.g. obtained with
1668
+ :func:`torch.nn.functional.log_softmax`).
1669
+ - Targets: Tensor of size :math:`(N, S)` or
1670
+ :math:`(\operatorname{sum}(\text{target\_lengths}))`,
1671
+ where :math:`N = \text{batch size}` and
1672
+ :math:`S = \text{max target length, if shape is } (N, S)`.
1673
+ It represent the target sequences. Each element in the target
1674
+ sequence is a class index. And the target index cannot be blank (default=0).
1675
+ In the :math:`(N, S)` form, targets are padded to the
1676
+ length of the longest sequence, and stacked.
1677
+ In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form,
1678
+ the targets are assumed to be un-padded and
1679
+ concatenated within 1 dimension.
1680
+ - Input_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
1681
+ where :math:`N = \text{batch size}`. It represent the lengths of the
1682
+ inputs (must each be :math:`\leq T`). And the lengths are specified
1683
+ for each sequence to achieve masking under the assumption that sequences
1684
+ are padded to equal lengths.
1685
+ - Target_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
1686
+ where :math:`N = \text{batch size}`. It represent lengths of the targets.
1687
+ Lengths are specified for each sequence to achieve masking under the
1688
+ assumption that sequences are padded to equal lengths. If target shape is
1689
+ :math:`(N,S)`, target_lengths are effectively the stop index
1690
+ :math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
1691
+ each target in a batch. Lengths must each be :math:`\leq S`
1692
+ If the targets are given as a 1d tensor that is the concatenation of individual
1693
+ targets, the target_lengths must add up to the total length of the tensor.
1694
+ - Output: scalar if :attr:`reduction` is ``'mean'`` (default) or
1695
+ ``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N)` if input is batched or
1696
+ :math:`()` if input is unbatched, where :math:`N = \text{batch size}`.
1697
+
1698
+ Examples::
1699
+
1700
+ >>> # Target are to be padded
1701
+ >>> T = 50 # Input sequence length
1702
+ >>> C = 20 # Number of classes (including blank)
1703
+ >>> N = 16 # Batch size
1704
+ >>> S = 30 # Target sequence length of longest target in batch (padding length)
1705
+ >>> S_min = 10 # Minimum target length, for demonstration purposes
1706
+ >>>
1707
+ >>> # Initialize random batch of input vectors, for *size = (T,N,C)
1708
+ >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
1709
+ >>>
1710
+ >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
1711
+ >>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long)
1712
+ >>>
1713
+ >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
1714
+ >>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long)
1715
+ >>> ctc_loss = nn.CTCLoss()
1716
+ >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
1717
+ >>> loss.backward()
1718
+ >>>
1719
+ >>>
1720
+ >>> # Target are to be un-padded
1721
+ >>> T = 50 # Input sequence length
1722
+ >>> C = 20 # Number of classes (including blank)
1723
+ >>> N = 16 # Batch size
1724
+ >>>
1725
+ >>> # Initialize random batch of input vectors, for *size = (T,N,C)
1726
+ >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
1727
+ >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
1728
+ >>>
1729
+ >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
1730
+ >>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long)
1731
+ >>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long)
1732
+ >>> ctc_loss = nn.CTCLoss()
1733
+ >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
1734
+ >>> loss.backward()
1735
+ >>>
1736
+ >>>
1737
+ >>> # Target are to be un-padded and unbatched (effectively N=1)
1738
+ >>> T = 50 # Input sequence length
1739
+ >>> C = 20 # Number of classes (including blank)
1740
+ >>>
1741
+ >>> # Initialize random batch of input vectors, for *size = (T,C)
1742
+ >>> # xdoctest: +SKIP("FIXME: error in doctest")
1743
+ >>> input = torch.randn(T, C).log_softmax(1).detach().requires_grad_()
1744
+ >>> input_lengths = torch.tensor(T, dtype=torch.long)
1745
+ >>>
1746
+ >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
1747
+ >>> target_lengths = torch.randint(low=1, high=T, size=(), dtype=torch.long)
1748
+ >>> target = torch.randint(low=1, high=C, size=(target_lengths,), dtype=torch.long)
1749
+ >>> ctc_loss = nn.CTCLoss()
1750
+ >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
1751
+ >>> loss.backward()
1752
+
1753
+ Reference:
1754
+ A. Graves et al.: Connectionist Temporal Classification:
1755
+ Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
1756
+ https://www.cs.toronto.edu/~graves/icml_2006.pdf
1757
+
1758
+ Note:
1759
+ In order to use CuDNN, the following must be satisfied: :attr:`targets` must be
1760
+ in concatenated format, all :attr:`input_lengths` must be `T`. :math:`blank=0`,
1761
+ :attr:`target_lengths` :math:`\leq 256`, the integer arguments must be of
1762
+ dtype :attr:`torch.int32`.
1763
+
1764
+ The regular implementation uses the (more common in PyTorch) `torch.long` dtype.
1765
+
1766
+
1767
+ Note:
1768
+ In some circumstances when using the CUDA backend with CuDNN, this operator
1769
+ may select a nondeterministic algorithm to increase performance. If this is
1770
+ undesirable, you can try to make the operation deterministic (potentially at
1771
+ a performance cost) by setting ``torch.backends.cudnn.deterministic =
1772
+ True``.
1773
+ Please see the notes on :doc:`/notes/randomness` for background.
1774
+ """
1775
+ __constants__ = ['blank', 'reduction']
1776
+ blank: int
1777
+ zero_infinity: bool
1778
+
1779
+ def __init__(self, blank: int = 0, reduction: str = 'mean', zero_infinity: bool = False):
1780
+ super().__init__(reduction=reduction)
1781
+ self.blank = blank
1782
+ self.zero_infinity = zero_infinity
1783
+
1784
+ def forward(self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor) -> Tensor:
1785
+ return F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
1786
+ self.zero_infinity)
1787
+
1788
+ # TODO: L1HingeEmbeddingCriterion
1789
+ # TODO: MSECriterion weight
1790
+ # TODO: ClassSimplexCriterion
venv/lib/python3.10/site-packages/torch/nn/modules/normalization.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numbers
3
+ from torch.nn.parameter import Parameter
4
+ from .module import Module
5
+ from ._functions import CrossMapLRN2d as _cross_map_lrn2d
6
+ from .. import functional as F
7
+ from .. import init
8
+
9
+ from torch import Tensor, Size
10
+ from typing import Union, List, Tuple
11
+
12
+ __all__ = ['LocalResponseNorm', 'CrossMapLRN2d', 'LayerNorm', 'GroupNorm']
13
+
14
+ class LocalResponseNorm(Module):
15
+ r"""Applies local response normalization over an input signal.
16
+
17
+ The input signal is composed of several input planes, where channels occupy the second dimension.
18
+ Applies normalization across channels.
19
+
20
+ .. math::
21
+ b_{c} = a_{c}\left(k + \frac{\alpha}{n}
22
+ \sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
23
+
24
+ Args:
25
+ size: amount of neighbouring channels used for normalization
26
+ alpha: multiplicative factor. Default: 0.0001
27
+ beta: exponent. Default: 0.75
28
+ k: additive factor. Default: 1
29
+
30
+ Shape:
31
+ - Input: :math:`(N, C, *)`
32
+ - Output: :math:`(N, C, *)` (same shape as input)
33
+
34
+ Examples::
35
+
36
+ >>> lrn = nn.LocalResponseNorm(2)
37
+ >>> signal_2d = torch.randn(32, 5, 24, 24)
38
+ >>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
39
+ >>> output_2d = lrn(signal_2d)
40
+ >>> output_4d = lrn(signal_4d)
41
+
42
+ """
43
+
44
+ __constants__ = ['size', 'alpha', 'beta', 'k']
45
+ size: int
46
+ alpha: float
47
+ beta: float
48
+ k: float
49
+
50
+ def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.) -> None:
51
+ super().__init__()
52
+ self.size = size
53
+ self.alpha = alpha
54
+ self.beta = beta
55
+ self.k = k
56
+
57
+ def forward(self, input: Tensor) -> Tensor:
58
+ return F.local_response_norm(input, self.size, self.alpha, self.beta,
59
+ self.k)
60
+
61
+ def extra_repr(self):
62
+ return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
63
+
64
+
65
+ class CrossMapLRN2d(Module):
66
+ size: int
67
+ alpha: float
68
+ beta: float
69
+ k: float
70
+
71
+ def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1) -> None:
72
+ super().__init__()
73
+ self.size = size
74
+ self.alpha = alpha
75
+ self.beta = beta
76
+ self.k = k
77
+
78
+ def forward(self, input: Tensor) -> Tensor:
79
+ return _cross_map_lrn2d.apply(input, self.size, self.alpha, self.beta,
80
+ self.k)
81
+
82
+ def extra_repr(self) -> str:
83
+ return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
84
+
85
+
86
+ _shape_t = Union[int, List[int], Size]
87
+
88
+
89
+ class LayerNorm(Module):
90
+ r"""Applies Layer Normalization over a mini-batch of inputs.
91
+
92
+ This layer implements the operation as described in
93
+ the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
94
+
95
+ .. math::
96
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
97
+
98
+ The mean and standard-deviation are calculated over the last `D` dimensions, where `D`
99
+ is the dimension of :attr:`normalized_shape`. For example, if :attr:`normalized_shape`
100
+ is ``(3, 5)`` (a 2-dimensional shape), the mean and standard-deviation are computed over
101
+ the last 2 dimensions of the input (i.e. ``input.mean((-2, -1))``).
102
+ :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
103
+ :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
104
+ The standard-deviation is calculated via the biased estimator, equivalent to
105
+ `torch.var(input, unbiased=False)`.
106
+
107
+ .. note::
108
+ Unlike Batch Normalization and Instance Normalization, which applies
109
+ scalar scale and bias for each entire channel/plane with the
110
+ :attr:`affine` option, Layer Normalization applies per-element scale and
111
+ bias with :attr:`elementwise_affine`.
112
+
113
+ This layer uses statistics computed from input data in both training and
114
+ evaluation modes.
115
+
116
+ Args:
117
+ normalized_shape (int or list or torch.Size): input shape from an expected input
118
+ of size
119
+
120
+ .. math::
121
+ [* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1]
122
+ \times \ldots \times \text{normalized\_shape}[-1]]
123
+
124
+ If a single integer is used, it is treated as a singleton list, and this module will
125
+ normalize over the last dimension which is expected to be of that specific size.
126
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
127
+ elementwise_affine: a boolean value that when set to ``True``, this module
128
+ has learnable per-element affine parameters initialized to ones (for weights)
129
+ and zeros (for biases). Default: ``True``.
130
+ bias: If set to ``False``, the layer will not learn an additive bias (only relevant if
131
+ :attr:`elementwise_affine` is ``True``). Default: ``True``.
132
+
133
+ Attributes:
134
+ weight: the learnable weights of the module of shape
135
+ :math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``.
136
+ The values are initialized to 1.
137
+ bias: the learnable bias of the module of shape
138
+ :math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``.
139
+ The values are initialized to 0.
140
+
141
+ Shape:
142
+ - Input: :math:`(N, *)`
143
+ - Output: :math:`(N, *)` (same shape as input)
144
+
145
+ Examples::
146
+
147
+ >>> # NLP Example
148
+ >>> batch, sentence_length, embedding_dim = 20, 5, 10
149
+ >>> embedding = torch.randn(batch, sentence_length, embedding_dim)
150
+ >>> layer_norm = nn.LayerNorm(embedding_dim)
151
+ >>> # Activate module
152
+ >>> layer_norm(embedding)
153
+ >>>
154
+ >>> # Image Example
155
+ >>> N, C, H, W = 20, 5, 10, 10
156
+ >>> input = torch.randn(N, C, H, W)
157
+ >>> # Normalize over the last three dimensions (i.e. the channel and spatial dimensions)
158
+ >>> # as shown in the image below
159
+ >>> layer_norm = nn.LayerNorm([C, H, W])
160
+ >>> output = layer_norm(input)
161
+
162
+ .. image:: ../_static/img/nn/layer_norm.jpg
163
+ :scale: 50 %
164
+
165
+ """
166
+
167
+ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine']
168
+ normalized_shape: Tuple[int, ...]
169
+ eps: float
170
+ elementwise_affine: bool
171
+
172
+ def __init__(self, normalized_shape: _shape_t, eps: float = 1e-5, elementwise_affine: bool = True,
173
+ bias: bool = True, device=None, dtype=None) -> None:
174
+ factory_kwargs = {'device': device, 'dtype': dtype}
175
+ super().__init__()
176
+ if isinstance(normalized_shape, numbers.Integral):
177
+ # mypy error: incompatible types in assignment
178
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
179
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
180
+ self.eps = eps
181
+ self.elementwise_affine = elementwise_affine
182
+ if self.elementwise_affine:
183
+ self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
184
+ if bias:
185
+ self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
186
+ else:
187
+ self.register_parameter('bias', None)
188
+ else:
189
+ self.register_parameter('weight', None)
190
+ self.register_parameter('bias', None)
191
+
192
+ self.reset_parameters()
193
+
194
+ def reset_parameters(self) -> None:
195
+ if self.elementwise_affine:
196
+ init.ones_(self.weight)
197
+ if self.bias is not None:
198
+ init.zeros_(self.bias)
199
+
200
+ def forward(self, input: Tensor) -> Tensor:
201
+ return F.layer_norm(
202
+ input, self.normalized_shape, self.weight, self.bias, self.eps)
203
+
204
+ def extra_repr(self) -> str:
205
+ return '{normalized_shape}, eps={eps}, ' \
206
+ 'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
207
+
208
+
209
+ class GroupNorm(Module):
210
+ r"""Applies Group Normalization over a mini-batch of inputs.
211
+
212
+ This layer implements the operation as described in
213
+ the paper `Group Normalization <https://arxiv.org/abs/1803.08494>`__
214
+
215
+ .. math::
216
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
217
+
218
+ The input channels are separated into :attr:`num_groups` groups, each containing
219
+ ``num_channels / num_groups`` channels. :attr:`num_channels` must be divisible by
220
+ :attr:`num_groups`. The mean and standard-deviation are calculated
221
+ separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
222
+ per-channel affine transform parameter vectors of size :attr:`num_channels` if
223
+ :attr:`affine` is ``True``.
224
+ The standard-deviation is calculated via the biased estimator, equivalent to
225
+ `torch.var(input, unbiased=False)`.
226
+
227
+ This layer uses statistics computed from input data in both training and
228
+ evaluation modes.
229
+
230
+ Args:
231
+ num_groups (int): number of groups to separate the channels into
232
+ num_channels (int): number of channels expected in input
233
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
234
+ affine: a boolean value that when set to ``True``, this module
235
+ has learnable per-channel affine parameters initialized to ones (for weights)
236
+ and zeros (for biases). Default: ``True``.
237
+
238
+ Shape:
239
+ - Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}`
240
+ - Output: :math:`(N, C, *)` (same shape as input)
241
+
242
+ Examples::
243
+
244
+ >>> input = torch.randn(20, 6, 10, 10)
245
+ >>> # Separate 6 channels into 3 groups
246
+ >>> m = nn.GroupNorm(3, 6)
247
+ >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
248
+ >>> m = nn.GroupNorm(6, 6)
249
+ >>> # Put all 6 channels into a single group (equivalent with LayerNorm)
250
+ >>> m = nn.GroupNorm(1, 6)
251
+ >>> # Activating the module
252
+ >>> output = m(input)
253
+ """
254
+
255
+ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine']
256
+ num_groups: int
257
+ num_channels: int
258
+ eps: float
259
+ affine: bool
260
+
261
+ def __init__(self, num_groups: int, num_channels: int, eps: float = 1e-5, affine: bool = True,
262
+ device=None, dtype=None) -> None:
263
+ factory_kwargs = {'device': device, 'dtype': dtype}
264
+ super().__init__()
265
+ if num_channels % num_groups != 0:
266
+ raise ValueError('num_channels must be divisible by num_groups')
267
+
268
+ self.num_groups = num_groups
269
+ self.num_channels = num_channels
270
+ self.eps = eps
271
+ self.affine = affine
272
+ if self.affine:
273
+ self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
274
+ self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
275
+ else:
276
+ self.register_parameter('weight', None)
277
+ self.register_parameter('bias', None)
278
+
279
+ self.reset_parameters()
280
+
281
+ def reset_parameters(self) -> None:
282
+ if self.affine:
283
+ init.ones_(self.weight)
284
+ init.zeros_(self.bias)
285
+
286
+ def forward(self, input: Tensor) -> Tensor:
287
+ return F.group_norm(
288
+ input, self.num_groups, self.weight, self.bias, self.eps)
289
+
290
+ def extra_repr(self) -> str:
291
+ return '{num_groups}, {num_channels}, eps={eps}, ' \
292
+ 'affine={affine}'.format(**self.__dict__)
293
+
294
+
295
+ # TODO: ContrastiveNorm2d
296
+ # TODO: DivisiveNorm2d
297
+ # TODO: SubtractiveNorm2d
venv/lib/python3.10/site-packages/torch/nn/modules/padding.py ADDED
@@ -0,0 +1,801 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .utils import _pair, _quadruple, _ntuple
3
+ from .. import functional as F
4
+
5
+ from torch import Tensor
6
+ from ..common_types import _size_2_t, _size_4_t, _size_6_t
7
+ from typing import Sequence, Tuple
8
+
9
+
10
+ # TODO: grad_output size asserts in THNN
11
+
12
+ __all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d',
13
+ 'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d',
14
+ 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d']
15
+
16
+
17
+ class _CircularPadNd(Module):
18
+ __constants__ = ['padding']
19
+ padding: Sequence[int]
20
+
21
+ def _check_input_dim(self, input):
22
+ raise NotImplementedError
23
+
24
+ def forward(self, input: Tensor) -> Tensor:
25
+ self._check_input_dim(input)
26
+ return F.pad(input, self.padding, 'circular')
27
+
28
+ def extra_repr(self) -> str:
29
+ return f'{self.padding}'
30
+
31
+
32
+ class CircularPad1d(_CircularPadNd):
33
+ r"""Pads the input tensor using circular padding of the input boundary.
34
+
35
+ Tensor values at the beginning of the dimension are used to pad the end,
36
+ and values at the end are used to pad the beginning. If negative padding is
37
+ applied then the ends of the tensor get removed.
38
+
39
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
40
+
41
+ Args:
42
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
43
+ padding in all boundaries. If a 2-`tuple`, uses
44
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
45
+
46
+ Shape:
47
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
48
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
49
+
50
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
51
+
52
+ Examples::
53
+
54
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
55
+ >>> m = nn.CircularPad1d(2)
56
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
57
+ >>> input
58
+ tensor([[[0., 1., 2., 3.],
59
+ [4., 5., 6., 7.]]])
60
+ >>> m(input)
61
+ tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
62
+ [6., 7., 4., 5., 6., 7., 4., 5.]]])
63
+ >>> # using different paddings for different sides
64
+ >>> m = nn.CircularPad1d((3, 1))
65
+ >>> m(input)
66
+ tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
67
+ [5., 6., 7., 4., 5., 6., 7., 4.]]])
68
+ """
69
+
70
+ padding: Tuple[int, int]
71
+
72
+ def __init__(self, padding: _size_2_t) -> None:
73
+ super().__init__()
74
+ self.padding = _pair(padding)
75
+
76
+ def _check_input_dim(self, input):
77
+ if input.dim() != 2 and input.dim() != 3:
78
+ raise ValueError(
79
+ f"expected 2D or 3D input (got {input.dim()}D input)"
80
+ )
81
+
82
+
83
+ class CircularPad2d(_CircularPadNd):
84
+ r"""Pads the input tensor using circular padding of the input boundary.
85
+
86
+ Tensor values at the beginning of the dimension are used to pad the end,
87
+ and values at the end are used to pad the beginning. If negative padding is
88
+ applied then the ends of the tensor get removed.
89
+
90
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
91
+
92
+ Args:
93
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
94
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
95
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
96
+
97
+ Shape:
98
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
99
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
100
+
101
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
102
+
103
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
104
+
105
+ Examples::
106
+
107
+ >>> m = nn.CircularPad2d(2)
108
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
109
+ >>> input
110
+ tensor([[[[0., 1., 2.],
111
+ [3., 4., 5.],
112
+ [6., 7., 8.]]]])
113
+ >>> m(input)
114
+ tensor([[[[4., 5., 3., 4., 5., 3., 4.],
115
+ [7., 8., 6., 7., 8., 6., 7.],
116
+ [1., 2., 0., 1., 2., 0., 1.],
117
+ [4., 5., 3., 4., 5., 3., 4.],
118
+ [7., 8., 6., 7., 8., 6., 7.],
119
+ [1., 2., 0., 1., 2., 0., 1.],
120
+ [4., 5., 3., 4., 5., 3., 4.]]]])
121
+ >>> # using different paddings for different sides
122
+ >>> m = nn.CircularPad2d((1, 1, 2, 0))
123
+ >>> m(input)
124
+ tensor([[[[5., 3., 4., 5., 3.],
125
+ [8., 6., 7., 8., 6.],
126
+ [2., 0., 1., 2., 0.],
127
+ [5., 3., 4., 5., 3.],
128
+ [8., 6., 7., 8., 6.]]]])
129
+ """
130
+
131
+ padding: Tuple[int, int, int, int]
132
+
133
+ def __init__(self, padding: _size_4_t) -> None:
134
+ super().__init__()
135
+ self.padding = _quadruple(padding)
136
+
137
+ def _check_input_dim(self, input):
138
+ if input.dim() != 3 and input.dim() != 4:
139
+ raise ValueError(
140
+ f"expected 3D or 4D input (got {input.dim()}D input)"
141
+ )
142
+
143
+
144
+ class CircularPad3d(_CircularPadNd):
145
+ r"""Pads the input tensor using circular padding of the input boundary.
146
+
147
+ Tensor values at the beginning of the dimension are used to pad the end,
148
+ and values at the end are used to pad the beginning. If negative padding is
149
+ applied then the ends of the tensor get removed.
150
+
151
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
152
+
153
+ Args:
154
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
155
+ padding in all boundaries. If a 6-`tuple`, uses
156
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
157
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
158
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
159
+
160
+ Shape:
161
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
162
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
163
+ where
164
+
165
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
166
+
167
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
168
+
169
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
170
+
171
+ Examples::
172
+
173
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
174
+ >>> m = nn.CircularPad3d(3)
175
+ >>> input = torch.randn(16, 3, 8, 320, 480)
176
+ >>> output = m(input)
177
+ >>> # using different paddings for different sides
178
+ >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
179
+ >>> output = m(input)
180
+ """
181
+
182
+ padding: Tuple[int, int, int, int, int, int]
183
+
184
+ def __init__(self, padding: _size_6_t) -> None:
185
+ super().__init__()
186
+ self.padding = _ntuple(6)(padding)
187
+
188
+ def _check_input_dim(self, input):
189
+ if input.dim() != 4 and input.dim() != 5:
190
+ raise ValueError(
191
+ f"expected 4D or 5D input (got {input.dim()}D input)"
192
+ )
193
+
194
+
195
+ class _ConstantPadNd(Module):
196
+ __constants__ = ['padding', 'value']
197
+ value: float
198
+ padding: Sequence[int]
199
+
200
+ def __init__(self, value: float) -> None:
201
+ super().__init__()
202
+ self.value = value
203
+
204
+ def forward(self, input: Tensor) -> Tensor:
205
+ return F.pad(input, self.padding, 'constant', self.value)
206
+
207
+ def extra_repr(self) -> str:
208
+ return f'padding={self.padding}, value={self.value}'
209
+
210
+
211
+ class ConstantPad1d(_ConstantPadNd):
212
+ r"""Pads the input tensor boundaries with a constant value.
213
+
214
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
215
+
216
+ Args:
217
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
218
+ padding in both boundaries. If a 2-`tuple`, uses
219
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
220
+
221
+ Shape:
222
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
223
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
224
+
225
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
226
+
227
+ Examples::
228
+
229
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
230
+ >>> m = nn.ConstantPad1d(2, 3.5)
231
+ >>> input = torch.randn(1, 2, 4)
232
+ >>> input
233
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
234
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
235
+ >>> m(input)
236
+ tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
237
+ 3.5000],
238
+ [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
239
+ 3.5000]]])
240
+ >>> m = nn.ConstantPad1d(2, 3.5)
241
+ >>> input = torch.randn(1, 2, 3)
242
+ >>> input
243
+ tensor([[[ 1.6616, 1.4523, -1.1255],
244
+ [-3.6372, 0.1182, -1.8652]]])
245
+ >>> m(input)
246
+ tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
247
+ [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
248
+ >>> # using different paddings for different sides
249
+ >>> m = nn.ConstantPad1d((3, 1), 3.5)
250
+ >>> m(input)
251
+ tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
252
+ [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
253
+ """
254
+
255
+ padding: Tuple[int, int]
256
+
257
+ def __init__(self, padding: _size_2_t, value: float):
258
+ super().__init__(value)
259
+ self.padding = _pair(padding)
260
+
261
+
262
+ class ConstantPad2d(_ConstantPadNd):
263
+ r"""Pads the input tensor boundaries with a constant value.
264
+
265
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
266
+
267
+ Args:
268
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
269
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
270
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
271
+
272
+ Shape:
273
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
274
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
275
+
276
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
277
+
278
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
279
+
280
+ Examples::
281
+
282
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
283
+ >>> m = nn.ConstantPad2d(2, 3.5)
284
+ >>> input = torch.randn(1, 2, 2)
285
+ >>> input
286
+ tensor([[[ 1.6585, 0.4320],
287
+ [-0.8701, -0.4649]]])
288
+ >>> m(input)
289
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
290
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
291
+ [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
292
+ [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
293
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
294
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
295
+ >>> # using different paddings for different sides
296
+ >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
297
+ >>> m(input)
298
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
299
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
300
+ [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
301
+ [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
302
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
303
+ """
304
+
305
+ __constants__ = ['padding', 'value']
306
+ padding: Tuple[int, int, int, int]
307
+
308
+ def __init__(self, padding: _size_4_t, value: float) -> None:
309
+ super().__init__(value)
310
+ self.padding = _quadruple(padding)
311
+
312
+
313
+ class ConstantPad3d(_ConstantPadNd):
314
+ r"""Pads the input tensor boundaries with a constant value.
315
+
316
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
317
+
318
+ Args:
319
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
320
+ padding in all boundaries. If a 6-`tuple`, uses
321
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
322
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
323
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
324
+
325
+ Shape:
326
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
327
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
328
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
329
+
330
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
331
+
332
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
333
+
334
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
335
+
336
+ Examples::
337
+
338
+ >>> m = nn.ConstantPad3d(3, 3.5)
339
+ >>> input = torch.randn(16, 3, 10, 20, 30)
340
+ >>> output = m(input)
341
+ >>> # using different paddings for different sides
342
+ >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
343
+ >>> output = m(input)
344
+ """
345
+
346
+ padding: Tuple[int, int, int, int, int, int]
347
+
348
+ def __init__(self, padding: _size_6_t, value: float) -> None:
349
+ super().__init__(value)
350
+ self.padding = _ntuple(6)(padding)
351
+
352
+
353
+ class _ReflectionPadNd(Module):
354
+ __constants__ = ['padding']
355
+ padding: Sequence[int]
356
+
357
+ def forward(self, input: Tensor) -> Tensor:
358
+ return F.pad(input, self.padding, 'reflect')
359
+
360
+ def extra_repr(self) -> str:
361
+ return f'{self.padding}'
362
+
363
+
364
+ class ReflectionPad1d(_ReflectionPadNd):
365
+ r"""Pads the input tensor using the reflection of the input boundary.
366
+
367
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
368
+
369
+ Args:
370
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
371
+ padding in all boundaries. If a 2-`tuple`, uses
372
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
373
+
374
+ Shape:
375
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
376
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
377
+
378
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
379
+
380
+ Examples::
381
+
382
+ >>> m = nn.ReflectionPad1d(2)
383
+ >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
384
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
385
+ >>> input
386
+ tensor([[[0., 1., 2., 3.],
387
+ [4., 5., 6., 7.]]])
388
+ >>> m(input)
389
+ tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
390
+ [6., 5., 4., 5., 6., 7., 6., 5.]]])
391
+ >>> # using different paddings for different sides
392
+ >>> m = nn.ReflectionPad1d((3, 1))
393
+ >>> m(input)
394
+ tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
395
+ [7., 6., 5., 4., 5., 6., 7., 6.]]])
396
+ """
397
+
398
+ padding: Tuple[int, int]
399
+
400
+ def __init__(self, padding: _size_2_t) -> None:
401
+ super().__init__()
402
+ self.padding = _pair(padding)
403
+
404
+
405
+ class ReflectionPad2d(_ReflectionPadNd):
406
+ r"""Pads the input tensor using the reflection of the input boundary.
407
+
408
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
409
+
410
+ Args:
411
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
412
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
413
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
414
+ Note that padding size should be less than the corresponding input dimension.
415
+
416
+ Shape:
417
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
418
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
419
+
420
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
421
+
422
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
423
+
424
+ Examples::
425
+
426
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
427
+ >>> m = nn.ReflectionPad2d(2)
428
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
429
+ >>> input
430
+ tensor([[[[0., 1., 2.],
431
+ [3., 4., 5.],
432
+ [6., 7., 8.]]]])
433
+ >>> m(input)
434
+ tensor([[[[8., 7., 6., 7., 8., 7., 6.],
435
+ [5., 4., 3., 4., 5., 4., 3.],
436
+ [2., 1., 0., 1., 2., 1., 0.],
437
+ [5., 4., 3., 4., 5., 4., 3.],
438
+ [8., 7., 6., 7., 8., 7., 6.],
439
+ [5., 4., 3., 4., 5., 4., 3.],
440
+ [2., 1., 0., 1., 2., 1., 0.]]]])
441
+ >>> # using different paddings for different sides
442
+ >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
443
+ >>> m(input)
444
+ tensor([[[[7., 6., 7., 8., 7.],
445
+ [4., 3., 4., 5., 4.],
446
+ [1., 0., 1., 2., 1.],
447
+ [4., 3., 4., 5., 4.],
448
+ [7., 6., 7., 8., 7.]]]])
449
+ """
450
+
451
+ padding: Tuple[int, int, int, int]
452
+
453
+ def __init__(self, padding: _size_4_t) -> None:
454
+ super().__init__()
455
+ self.padding = _quadruple(padding)
456
+
457
+
458
+ class ReflectionPad3d(_ReflectionPadNd):
459
+ r"""Pads the input tensor using the reflection of the input boundary.
460
+
461
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
462
+
463
+ Args:
464
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
465
+ padding in all boundaries. If a 6-`tuple`, uses
466
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
467
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
468
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
469
+
470
+ Shape:
471
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
472
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
473
+ where
474
+
475
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
476
+
477
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
478
+
479
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
480
+
481
+ Examples::
482
+
483
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
484
+ >>> m = nn.ReflectionPad3d(1)
485
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
486
+ >>> m(input)
487
+ tensor([[[[[7., 6., 7., 6.],
488
+ [5., 4., 5., 4.],
489
+ [7., 6., 7., 6.],
490
+ [5., 4., 5., 4.]],
491
+ [[3., 2., 3., 2.],
492
+ [1., 0., 1., 0.],
493
+ [3., 2., 3., 2.],
494
+ [1., 0., 1., 0.]],
495
+ [[7., 6., 7., 6.],
496
+ [5., 4., 5., 4.],
497
+ [7., 6., 7., 6.],
498
+ [5., 4., 5., 4.]],
499
+ [[3., 2., 3., 2.],
500
+ [1., 0., 1., 0.],
501
+ [3., 2., 3., 2.],
502
+ [1., 0., 1., 0.]]]]])
503
+ """
504
+
505
+ padding: Tuple[int, int, int, int, int, int]
506
+
507
+ def __init__(self, padding: _size_6_t) -> None:
508
+ super().__init__()
509
+ self.padding = _ntuple(6)(padding)
510
+
511
+
512
+ class _ReplicationPadNd(Module):
513
+ __constants__ = ['padding']
514
+ padding: Sequence[int]
515
+
516
+ def forward(self, input: Tensor) -> Tensor:
517
+ return F.pad(input, self.padding, 'replicate')
518
+
519
+ def extra_repr(self) -> str:
520
+ return f'{self.padding}'
521
+
522
+
523
+ class ReplicationPad1d(_ReplicationPadNd):
524
+ r"""Pads the input tensor using replication of the input boundary.
525
+
526
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
527
+
528
+ Args:
529
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
530
+ padding in all boundaries. If a 2-`tuple`, uses
531
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
532
+
533
+ Shape:
534
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
535
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
536
+
537
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
538
+
539
+ Examples::
540
+
541
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
542
+ >>> m = nn.ReplicationPad1d(2)
543
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
544
+ >>> input
545
+ tensor([[[0., 1., 2., 3.],
546
+ [4., 5., 6., 7.]]])
547
+ >>> m(input)
548
+ tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
549
+ [4., 4., 4., 5., 6., 7., 7., 7.]]])
550
+ >>> # using different paddings for different sides
551
+ >>> m = nn.ReplicationPad1d((3, 1))
552
+ >>> m(input)
553
+ tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
554
+ [4., 4., 4., 4., 5., 6., 7., 7.]]])
555
+ """
556
+
557
+ padding: Tuple[int, int]
558
+
559
+ def __init__(self, padding: _size_2_t) -> None:
560
+ super().__init__()
561
+ self.padding = _pair(padding)
562
+
563
+
564
+ class ReplicationPad2d(_ReplicationPadNd):
565
+ r"""Pads the input tensor using replication of the input boundary.
566
+
567
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
568
+
569
+ Args:
570
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
571
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
572
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
573
+
574
+ Shape:
575
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
576
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
577
+
578
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
579
+
580
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
581
+
582
+ Examples::
583
+
584
+ >>> m = nn.ReplicationPad2d(2)
585
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
586
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
587
+ >>> input
588
+ tensor([[[[0., 1., 2.],
589
+ [3., 4., 5.],
590
+ [6., 7., 8.]]]])
591
+ >>> m(input)
592
+ tensor([[[[0., 0., 0., 1., 2., 2., 2.],
593
+ [0., 0., 0., 1., 2., 2., 2.],
594
+ [0., 0., 0., 1., 2., 2., 2.],
595
+ [3., 3., 3., 4., 5., 5., 5.],
596
+ [6., 6., 6., 7., 8., 8., 8.],
597
+ [6., 6., 6., 7., 8., 8., 8.],
598
+ [6., 6., 6., 7., 8., 8., 8.]]]])
599
+ >>> # using different paddings for different sides
600
+ >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
601
+ >>> m(input)
602
+ tensor([[[[0., 0., 1., 2., 2.],
603
+ [0., 0., 1., 2., 2.],
604
+ [0., 0., 1., 2., 2.],
605
+ [3., 3., 4., 5., 5.],
606
+ [6., 6., 7., 8., 8.]]]])
607
+ """
608
+
609
+ padding: Tuple[int, int, int, int]
610
+
611
+ def __init__(self, padding: _size_4_t) -> None:
612
+ super().__init__()
613
+ self.padding = _quadruple(padding)
614
+
615
+
616
+ class ReplicationPad3d(_ReplicationPadNd):
617
+ r"""Pads the input tensor using replication of the input boundary.
618
+
619
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
620
+
621
+ Args:
622
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
623
+ padding in all boundaries. If a 6-`tuple`, uses
624
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
625
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
626
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
627
+
628
+ Shape:
629
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
630
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
631
+ where
632
+
633
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
634
+
635
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
636
+
637
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
638
+
639
+ Examples::
640
+
641
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
642
+ >>> m = nn.ReplicationPad3d(3)
643
+ >>> input = torch.randn(16, 3, 8, 320, 480)
644
+ >>> output = m(input)
645
+ >>> # using different paddings for different sides
646
+ >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
647
+ >>> output = m(input)
648
+ """
649
+
650
+ padding: Tuple[int, int, int, int, int, int]
651
+
652
+ def __init__(self, padding: _size_6_t) -> None:
653
+ super().__init__()
654
+ self.padding = _ntuple(6)(padding)
655
+
656
+
657
+ class ZeroPad1d(ConstantPad1d):
658
+ r"""Pads the input tensor boundaries with zero.
659
+
660
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
661
+
662
+ Args:
663
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
664
+ padding in both boundaries. If a 2-`tuple`, uses
665
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
666
+
667
+ Shape:
668
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
669
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
670
+
671
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
672
+
673
+ Examples::
674
+
675
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
676
+ >>> m = nn.ZeroPad1d(2)
677
+ >>> input = torch.randn(1, 2, 4)
678
+ >>> input
679
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
680
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
681
+ >>> m(input)
682
+ tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
683
+ 0.0000],
684
+ [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
685
+ 0.0000]]])
686
+ >>> m = nn.ZeroPad1d(2)
687
+ >>> input = torch.randn(1, 2, 3)
688
+ >>> input
689
+ tensor([[[ 1.6616, 1.4523, -1.1255],
690
+ [-3.6372, 0.1182, -1.8652]]])
691
+ >>> m(input)
692
+ tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
693
+ [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
694
+ >>> # using different paddings for different sides
695
+ >>> m = nn.ZeroPad1d((3, 1))
696
+ >>> m(input)
697
+ tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
698
+ [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
699
+ """
700
+
701
+ padding: Tuple[int, int]
702
+
703
+ def __init__(self, padding: _size_2_t) -> None:
704
+ super().__init__(padding, 0.)
705
+
706
+ def extra_repr(self) -> str:
707
+ return f'{self.padding}'
708
+
709
+ class ZeroPad2d(ConstantPad2d):
710
+ r"""Pads the input tensor boundaries with zero.
711
+
712
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
713
+
714
+ Args:
715
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
716
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
717
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
718
+
719
+ Shape:
720
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
721
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
722
+
723
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
724
+
725
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
726
+
727
+ Examples::
728
+
729
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
730
+ >>> m = nn.ZeroPad2d(2)
731
+ >>> input = torch.randn(1, 1, 3, 3)
732
+ >>> input
733
+ tensor([[[[-0.1678, -0.4418, 1.9466],
734
+ [ 0.9604, -0.4219, -0.5241],
735
+ [-0.9162, -0.5436, -0.6446]]]])
736
+ >>> m(input)
737
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
738
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
739
+ [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
740
+ [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
741
+ [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
742
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
743
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
744
+ >>> # using different paddings for different sides
745
+ >>> m = nn.ZeroPad2d((1, 1, 2, 0))
746
+ >>> m(input)
747
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
748
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
749
+ [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
750
+ [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
751
+ [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
752
+ """
753
+
754
+ padding: Tuple[int, int, int, int]
755
+
756
+ def __init__(self, padding: _size_4_t) -> None:
757
+ super().__init__(padding, 0.)
758
+
759
+ def extra_repr(self) -> str:
760
+ return f'{self.padding}'
761
+
762
+ class ZeroPad3d(ConstantPad3d):
763
+ r"""Pads the input tensor boundaries with zero.
764
+
765
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
766
+
767
+ Args:
768
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
769
+ padding in all boundaries. If a 6-`tuple`, uses
770
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
771
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
772
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
773
+
774
+ Shape:
775
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
776
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
777
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
778
+
779
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
780
+
781
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
782
+
783
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
784
+
785
+ Examples::
786
+
787
+ >>> m = nn.ZeroPad3d(3)
788
+ >>> input = torch.randn(16, 3, 10, 20, 30)
789
+ >>> output = m(input)
790
+ >>> # using different paddings for different sides
791
+ >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
792
+ >>> output = m(input)
793
+ """
794
+
795
+ padding: Tuple[int, int, int, int, int, int]
796
+
797
+ def __init__(self, padding: _size_6_t) -> None:
798
+ super().__init__(padding, 0.)
799
+
800
+ def extra_repr(self) -> str:
801
+ return f'{self.padding}'
venv/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['PixelShuffle', 'PixelUnshuffle']
7
+
8
+ class PixelShuffle(Module):
9
+ r"""Rearrange elements in a tensor according to an upscaling factor.
10
+
11
+ Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
12
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
13
+
14
+ This is useful for implementing efficient sub-pixel convolution
15
+ with a stride of :math:`1/r`.
16
+
17
+ See the paper:
18
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
19
+ by Shi et. al (2016) for more details.
20
+
21
+ Args:
22
+ upscale_factor (int): factor to increase spatial resolution by
23
+
24
+ Shape:
25
+ - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
26
+ - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
27
+
28
+ .. math::
29
+ C_{out} = C_{in} \div \text{upscale\_factor}^2
30
+
31
+ .. math::
32
+ H_{out} = H_{in} \times \text{upscale\_factor}
33
+
34
+ .. math::
35
+ W_{out} = W_{in} \times \text{upscale\_factor}
36
+
37
+ Examples::
38
+
39
+ >>> pixel_shuffle = nn.PixelShuffle(3)
40
+ >>> input = torch.randn(1, 9, 4, 4)
41
+ >>> output = pixel_shuffle(input)
42
+ >>> print(output.size())
43
+ torch.Size([1, 1, 12, 12])
44
+
45
+ .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
46
+ https://arxiv.org/abs/1609.05158
47
+ """
48
+
49
+ __constants__ = ['upscale_factor']
50
+ upscale_factor: int
51
+
52
+ def __init__(self, upscale_factor: int) -> None:
53
+ super().__init__()
54
+ self.upscale_factor = upscale_factor
55
+
56
+ def forward(self, input: Tensor) -> Tensor:
57
+ return F.pixel_shuffle(input, self.upscale_factor)
58
+
59
+ def extra_repr(self) -> str:
60
+ return f'upscale_factor={self.upscale_factor}'
61
+
62
+
63
+ class PixelUnshuffle(Module):
64
+ r"""Reverse the PixelShuffle operation.
65
+
66
+ Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements
67
+ in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
68
+ :math:`(*, C \times r^2, H, W)`, where r is a downscale factor.
69
+
70
+ See the paper:
71
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
72
+ by Shi et. al (2016) for more details.
73
+
74
+ Args:
75
+ downscale_factor (int): factor to decrease spatial resolution by
76
+
77
+ Shape:
78
+ - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
79
+ - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
80
+
81
+ .. math::
82
+ C_{out} = C_{in} \times \text{downscale\_factor}^2
83
+
84
+ .. math::
85
+ H_{out} = H_{in} \div \text{downscale\_factor}
86
+
87
+ .. math::
88
+ W_{out} = W_{in} \div \text{downscale\_factor}
89
+
90
+ Examples::
91
+
92
+ >>> pixel_unshuffle = nn.PixelUnshuffle(3)
93
+ >>> input = torch.randn(1, 1, 12, 12)
94
+ >>> output = pixel_unshuffle(input)
95
+ >>> print(output.size())
96
+ torch.Size([1, 9, 4, 4])
97
+
98
+ .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
99
+ https://arxiv.org/abs/1609.05158
100
+ """
101
+
102
+ __constants__ = ['downscale_factor']
103
+ downscale_factor: int
104
+
105
+ def __init__(self, downscale_factor: int) -> None:
106
+ super().__init__()
107
+ self.downscale_factor = downscale_factor
108
+
109
+ def forward(self, input: Tensor) -> Tensor:
110
+ return F.pixel_unshuffle(input, self.downscale_factor)
111
+
112
+ def extra_repr(self) -> str:
113
+ return f'downscale_factor={self.downscale_factor}'
venv/lib/python3.10/site-packages/torch/nn/modules/sparse.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch.nn.parameter import Parameter
6
+
7
+ from .module import Module
8
+ from .. import functional as F
9
+ from .. import init
10
+
11
+ __all__ = ['Embedding', 'EmbeddingBag']
12
+
13
+ class Embedding(Module):
14
+ r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
15
+
16
+ This module is often used to store word embeddings and retrieve them using indices.
17
+ The input to the module is a list of indices, and the output is the corresponding
18
+ word embeddings.
19
+
20
+ Args:
21
+ num_embeddings (int): size of the dictionary of embeddings
22
+ embedding_dim (int): the size of each embedding vector
23
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
24
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
25
+ i.e. it remains as a fixed "pad". For a newly constructed Embedding,
26
+ the embedding vector at :attr:`padding_idx` will default to all zeros,
27
+ but can be updated to another value to be used as the padding vector.
28
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
29
+ is renormalized to have norm :attr:`max_norm`.
30
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
31
+ scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
32
+ the words in the mini-batch. Default ``False``.
33
+ sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
34
+ See Notes for more details regarding sparse gradients.
35
+
36
+ Attributes:
37
+ weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
38
+ initialized from :math:`\mathcal{N}(0, 1)`
39
+
40
+ Shape:
41
+ - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
42
+ - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
43
+
44
+ .. note::
45
+ Keep in mind that only a limited number of optimizers support
46
+ sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
47
+ :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
48
+
49
+ .. note::
50
+ When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
51
+ :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
52
+ modified in-place, performing a differentiable operation on ``Embedding.weight`` before
53
+ calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
54
+ :attr:`max_norm` is not ``None``. For example::
55
+
56
+ n, d, m = 3, 5, 7
57
+ embedding = nn.Embedding(n, d, max_norm=True)
58
+ W = torch.randn((m, d), requires_grad=True)
59
+ idx = torch.tensor([1, 2])
60
+ a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
61
+ b = embedding(idx) @ W.t() # modifies weight in-place
62
+ out = (a.unsqueeze(0) + b.unsqueeze(1))
63
+ loss = out.sigmoid().prod()
64
+ loss.backward()
65
+
66
+ Examples::
67
+
68
+ >>> # an Embedding module containing 10 tensors of size 3
69
+ >>> embedding = nn.Embedding(10, 3)
70
+ >>> # a batch of 2 samples of 4 indices each
71
+ >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
72
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
73
+ >>> embedding(input)
74
+ tensor([[[-0.0251, -1.6902, 0.7172],
75
+ [-0.6431, 0.0748, 0.6969],
76
+ [ 1.4970, 1.3448, -0.9685],
77
+ [-0.3677, -2.7265, -0.1685]],
78
+
79
+ [[ 1.4970, 1.3448, -0.9685],
80
+ [ 0.4362, -0.4004, 0.9400],
81
+ [-0.6431, 0.0748, 0.6969],
82
+ [ 0.9124, -2.3616, 1.1151]]])
83
+
84
+
85
+ >>> # example with padding_idx
86
+ >>> embedding = nn.Embedding(10, 3, padding_idx=0)
87
+ >>> input = torch.LongTensor([[0, 2, 0, 5]])
88
+ >>> embedding(input)
89
+ tensor([[[ 0.0000, 0.0000, 0.0000],
90
+ [ 0.1535, -2.0309, 0.9315],
91
+ [ 0.0000, 0.0000, 0.0000],
92
+ [-0.1655, 0.9897, 0.0635]]])
93
+
94
+ >>> # example of changing `pad` vector
95
+ >>> padding_idx = 0
96
+ >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
97
+ >>> embedding.weight
98
+ Parameter containing:
99
+ tensor([[ 0.0000, 0.0000, 0.0000],
100
+ [-0.7895, -0.7089, -0.0364],
101
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
102
+ >>> with torch.no_grad():
103
+ ... embedding.weight[padding_idx] = torch.ones(3)
104
+ >>> embedding.weight
105
+ Parameter containing:
106
+ tensor([[ 1.0000, 1.0000, 1.0000],
107
+ [-0.7895, -0.7089, -0.0364],
108
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
109
+ """
110
+
111
+ __constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
112
+ 'norm_type', 'scale_grad_by_freq', 'sparse']
113
+
114
+ num_embeddings: int
115
+ embedding_dim: int
116
+ padding_idx: Optional[int]
117
+ max_norm: Optional[float]
118
+ norm_type: float
119
+ scale_grad_by_freq: bool
120
+ weight: Tensor
121
+ freeze: bool
122
+ sparse: bool
123
+
124
+ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
125
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
126
+ sparse: bool = False, _weight: Optional[Tensor] = None, _freeze: bool = False,
127
+ device=None, dtype=None) -> None:
128
+ factory_kwargs = {'device': device, 'dtype': dtype}
129
+ super().__init__()
130
+ self.num_embeddings = num_embeddings
131
+ self.embedding_dim = embedding_dim
132
+ if padding_idx is not None:
133
+ if padding_idx > 0:
134
+ assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
135
+ elif padding_idx < 0:
136
+ assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
137
+ padding_idx = self.num_embeddings + padding_idx
138
+ self.padding_idx = padding_idx
139
+ self.max_norm = max_norm
140
+ self.norm_type = norm_type
141
+ self.scale_grad_by_freq = scale_grad_by_freq
142
+ if _weight is None:
143
+ self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs),
144
+ requires_grad=not _freeze)
145
+ self.reset_parameters()
146
+ else:
147
+ assert list(_weight.shape) == [num_embeddings, embedding_dim], \
148
+ 'Shape of weight does not match num_embeddings and embedding_dim'
149
+ self.weight = Parameter(_weight, requires_grad=not _freeze)
150
+
151
+ self.sparse = sparse
152
+
153
+ def reset_parameters(self) -> None:
154
+ init.normal_(self.weight)
155
+ self._fill_padding_idx_with_zero()
156
+
157
+ def _fill_padding_idx_with_zero(self) -> None:
158
+ if self.padding_idx is not None:
159
+ with torch.no_grad():
160
+ self.weight[self.padding_idx].fill_(0)
161
+
162
+ def forward(self, input: Tensor) -> Tensor:
163
+ return F.embedding(
164
+ input, self.weight, self.padding_idx, self.max_norm,
165
+ self.norm_type, self.scale_grad_by_freq, self.sparse)
166
+
167
+ def extra_repr(self) -> str:
168
+ s = '{num_embeddings}, {embedding_dim}'
169
+ if self.padding_idx is not None:
170
+ s += ', padding_idx={padding_idx}'
171
+ if self.max_norm is not None:
172
+ s += ', max_norm={max_norm}'
173
+ if self.norm_type != 2:
174
+ s += ', norm_type={norm_type}'
175
+ if self.scale_grad_by_freq is not False:
176
+ s += ', scale_grad_by_freq={scale_grad_by_freq}'
177
+ if self.sparse is not False:
178
+ s += ', sparse=True'
179
+ return s.format(**self.__dict__)
180
+
181
+ @classmethod
182
+ def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
183
+ max_norm=None, norm_type=2., scale_grad_by_freq=False,
184
+ sparse=False):
185
+ r"""Create Embedding instance from given 2-dimensional FloatTensor.
186
+
187
+ Args:
188
+ embeddings (Tensor): FloatTensor containing weights for the Embedding.
189
+ First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
190
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
191
+ Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
192
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
193
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
194
+ i.e. it remains as a fixed "pad".
195
+ max_norm (float, optional): See module initialization documentation.
196
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
197
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
198
+ sparse (bool, optional): See module initialization documentation.
199
+
200
+ Examples::
201
+
202
+ >>> # FloatTensor containing pretrained weights
203
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
204
+ >>> embedding = nn.Embedding.from_pretrained(weight)
205
+ >>> # Get embeddings for index 1
206
+ >>> input = torch.LongTensor([1])
207
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
208
+ >>> embedding(input)
209
+ tensor([[ 4.0000, 5.1000, 6.3000]])
210
+ """
211
+ assert embeddings.dim() == 2, \
212
+ 'Embeddings parameter is expected to be 2-dimensional'
213
+ rows, cols = embeddings.shape
214
+ embedding = cls(
215
+ num_embeddings=rows,
216
+ embedding_dim=cols,
217
+ _weight=embeddings,
218
+ _freeze=freeze,
219
+ padding_idx=padding_idx,
220
+ max_norm=max_norm,
221
+ norm_type=norm_type,
222
+ scale_grad_by_freq=scale_grad_by_freq,
223
+ sparse=sparse)
224
+ return embedding
225
+
226
+
227
+ class EmbeddingBag(Module):
228
+ r"""Compute sums or means of 'bags' of embeddings, without instantiating the intermediate embeddings.
229
+
230
+ For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
231
+ and with 2D inputs, this class
232
+
233
+ * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
234
+ * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
235
+ * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.
236
+
237
+ However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
238
+ operations.
239
+
240
+ EmbeddingBag also supports per-sample weights as an argument to the forward
241
+ pass. This scales the output of the Embedding before performing a weighted
242
+ reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
243
+ only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
244
+ :attr:`per_sample_weights`.
245
+
246
+ Args:
247
+ num_embeddings (int): size of the dictionary of embeddings
248
+ embedding_dim (int): the size of each embedding vector
249
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
250
+ is renormalized to have norm :attr:`max_norm`.
251
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
252
+ scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
253
+ the words in the mini-batch. Default ``False``.
254
+ Note: this option is not supported when ``mode="max"``.
255
+ mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
256
+ ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
257
+ into consideration. ``"mean"`` computes the average of the values
258
+ in the bag, ``"max"`` computes the max value over each bag.
259
+ Default: ``"mean"``
260
+ sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
261
+ Notes for more details regarding sparse gradients. Note: this option is not
262
+ supported when ``mode="max"``.
263
+ include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
264
+ is equivalent to the size of `indices`. This matches the CSR format.
265
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
266
+ gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
267
+ during training, i.e. it remains as a fixed "pad". For a newly constructed
268
+ EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
269
+ zeros, but can be updated to another value to be used as the padding vector.
270
+ Note that the embedding vector at :attr:`padding_idx` is excluded from the
271
+ reduction.
272
+
273
+ Attributes:
274
+ weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
275
+ initialized from :math:`\mathcal{N}(0, 1)`.
276
+
277
+ Examples::
278
+
279
+ >>> # an EmbeddingBag module containing 10 tensors of size 3
280
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
281
+ >>> # a batch of 2 samples of 4 indices each
282
+ >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
283
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
284
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
285
+ >>> embedding_sum(input, offsets)
286
+ tensor([[-0.8861, -5.4350, -0.0523],
287
+ [ 1.1306, -2.5798, -1.0044]])
288
+
289
+ >>> # Example with padding_idx
290
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
291
+ >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
292
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
293
+ >>> embedding_sum(input, offsets)
294
+ tensor([[ 0.0000, 0.0000, 0.0000],
295
+ [-0.7082, 3.2145, -2.6251]])
296
+
297
+ >>> # An EmbeddingBag can be loaded from an Embedding like so
298
+ >>> embedding = nn.Embedding(10, 3, padding_idx=2)
299
+ >>> embedding_sum = nn.EmbeddingBag.from_pretrained(
300
+ embedding.weight,
301
+ padding_idx=embedding.padding_idx,
302
+ mode='sum')
303
+ """
304
+
305
+ __constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type',
306
+ 'scale_grad_by_freq', 'mode', 'sparse', 'include_last_offset',
307
+ 'padding_idx']
308
+
309
+ num_embeddings: int
310
+ embedding_dim: int
311
+ max_norm: Optional[float]
312
+ norm_type: float
313
+ scale_grad_by_freq: bool
314
+ weight: Tensor
315
+ mode: str
316
+ sparse: bool
317
+ include_last_offset: bool
318
+ padding_idx: Optional[int]
319
+
320
+ def __init__(self, num_embeddings: int, embedding_dim: int,
321
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
322
+ mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
323
+ include_last_offset: bool = False, padding_idx: Optional[int] = None,
324
+ device=None, dtype=None) -> None:
325
+ factory_kwargs = {'device': device, 'dtype': dtype}
326
+ super().__init__()
327
+ self.num_embeddings = num_embeddings
328
+ self.embedding_dim = embedding_dim
329
+ self.max_norm = max_norm
330
+ self.norm_type = norm_type
331
+ self.scale_grad_by_freq = scale_grad_by_freq
332
+ if padding_idx is not None:
333
+ if padding_idx > 0:
334
+ assert padding_idx < self.num_embeddings, 'padding_idx must be within num_embeddings'
335
+ elif padding_idx < 0:
336
+ assert padding_idx >= -self.num_embeddings, 'padding_idx must be within num_embeddings'
337
+ padding_idx = self.num_embeddings + padding_idx
338
+ self.padding_idx = padding_idx
339
+ if _weight is None:
340
+ self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
341
+ self.reset_parameters()
342
+ else:
343
+ assert list(_weight.shape) == [num_embeddings, embedding_dim], \
344
+ 'Shape of weight does not match num_embeddings and embedding_dim'
345
+ self.weight = Parameter(_weight)
346
+ self.mode = mode
347
+ self.sparse = sparse
348
+ self.include_last_offset = include_last_offset
349
+
350
+ def reset_parameters(self) -> None:
351
+ init.normal_(self.weight)
352
+ self._fill_padding_idx_with_zero()
353
+
354
+ def _fill_padding_idx_with_zero(self) -> None:
355
+ if self.padding_idx is not None:
356
+ with torch.no_grad():
357
+ self.weight[self.padding_idx].fill_(0)
358
+
359
+ def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
360
+ """Forward pass of EmbeddingBag.
361
+
362
+ Args:
363
+ input (Tensor): Tensor containing bags of indices into the embedding matrix.
364
+ offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
365
+ the starting index position of each bag (sequence) in :attr:`input`.
366
+ per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
367
+ to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
368
+ must have exactly the same shape as input and is treated as having the same
369
+ :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
370
+
371
+ Returns:
372
+ Tensor output shape of `(B, embedding_dim)`.
373
+
374
+ .. note::
375
+
376
+ A few notes about ``input`` and ``offsets``:
377
+
378
+ - :attr:`input` and :attr:`offsets` have to be of the same type, either int or long
379
+
380
+ - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
381
+ each of fixed length ``N``, and this will return ``B`` values aggregated in a way
382
+ depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
383
+
384
+ - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
385
+ multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the
386
+ starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
387
+ :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
388
+ returned vectors filled by zeros.
389
+ """
390
+ return F.embedding_bag(input, self.weight, offsets,
391
+ self.max_norm, self.norm_type,
392
+ self.scale_grad_by_freq, self.mode, self.sparse,
393
+ per_sample_weights, self.include_last_offset,
394
+ self.padding_idx)
395
+
396
+ def extra_repr(self) -> str:
397
+ s = '{num_embeddings}, {embedding_dim}'
398
+ if self.max_norm is not None:
399
+ s += ', max_norm={max_norm}'
400
+ if self.norm_type != 2:
401
+ s += ', norm_type={norm_type}'
402
+ if self.scale_grad_by_freq is not False:
403
+ s += ', scale_grad_by_freq={scale_grad_by_freq}'
404
+ s += ', mode={mode}'
405
+ if self.padding_idx is not None:
406
+ s += ', padding_idx={padding_idx}'
407
+ return s.format(**{k: repr(v) for k, v in self.__dict__.items()})
408
+
409
+ @classmethod
410
+ def from_pretrained(cls, embeddings: Tensor, freeze: bool = True, max_norm: Optional[float] = None,
411
+ norm_type: float = 2., scale_grad_by_freq: bool = False,
412
+ mode: str = 'mean', sparse: bool = False, include_last_offset: bool = False,
413
+ padding_idx: Optional[int] = None) -> 'EmbeddingBag':
414
+ r"""Create EmbeddingBag instance from given 2-dimensional FloatTensor.
415
+
416
+ Args:
417
+ embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
418
+ First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
419
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
420
+ Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
421
+ max_norm (float, optional): See module initialization documentation. Default: ``None``
422
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
423
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
424
+ mode (str, optional): See module initialization documentation. Default: ``"mean"``
425
+ sparse (bool, optional): See module initialization documentation. Default: ``False``.
426
+ include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
427
+ padding_idx (int, optional): See module initialization documentation. Default: ``None``.
428
+
429
+ Examples::
430
+
431
+ >>> # FloatTensor containing pretrained weights
432
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
433
+ >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
434
+ >>> # Get embeddings for index 1
435
+ >>> input = torch.LongTensor([[1, 0]])
436
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
437
+ >>> embeddingbag(input)
438
+ tensor([[ 2.5000, 3.7000, 4.6500]])
439
+ """
440
+ assert embeddings.dim() == 2, \
441
+ 'Embeddings parameter is expected to be 2-dimensional'
442
+ rows, cols = embeddings.shape
443
+ embeddingbag = cls(
444
+ num_embeddings=rows,
445
+ embedding_dim=cols,
446
+ _weight=embeddings,
447
+ max_norm=max_norm,
448
+ norm_type=norm_type,
449
+ scale_grad_by_freq=scale_grad_by_freq,
450
+ mode=mode,
451
+ sparse=sparse,
452
+ include_last_offset=include_last_offset,
453
+ padding_idx=padding_idx)
454
+ embeddingbag.weight.requires_grad = not freeze
455
+ return embeddingbag
venv/lib/python3.10/site-packages/torch/nn/modules/transformer.py ADDED
@@ -0,0 +1,975 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Optional, Any, Union, Callable
3
+
4
+ import torch
5
+ import warnings
6
+ from torch import Tensor
7
+ from .. import functional as F
8
+ from .module import Module
9
+ from .activation import MultiheadAttention
10
+ from .container import ModuleList
11
+ from ..init import xavier_uniform_
12
+ from .dropout import Dropout
13
+ from .linear import Linear
14
+ from .normalization import LayerNorm
15
+
16
+ __all__ = ['Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer']
17
+
18
+ def _generate_square_subsequent_mask(
19
+ sz: int,
20
+ device: Optional[torch.device] = None,
21
+ dtype: Optional[torch.dtype] = None,
22
+ ) -> Tensor:
23
+ r"""Generate a square causal mask for the sequence.
24
+
25
+ The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
26
+ """
27
+ if device is None:
28
+ device = torch.device('cpu')
29
+ if dtype is None:
30
+ dtype = torch.float32
31
+ return torch.triu(
32
+ torch.full((sz, sz), float('-inf'), dtype=dtype, device=device),
33
+ diagonal=1,
34
+ )
35
+
36
+
37
+ def _get_seq_len(
38
+ src: Tensor,
39
+ batch_first: bool
40
+ ) -> Optional[int]:
41
+
42
+ if src.is_nested:
43
+ return None
44
+ else:
45
+ src_size = src.size()
46
+ if len(src_size) == 2:
47
+ # unbatched: S, E
48
+ return src_size[0]
49
+ else:
50
+ # batched: B, S, E if batch_first else S, B, E
51
+ seq_len_pos = 1 if batch_first else 0
52
+ return src_size[seq_len_pos]
53
+
54
+
55
+ class Transformer(Module):
56
+ r"""A transformer model.
57
+
58
+ User is able to modify the attributes as needed. The architecture
59
+ is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
60
+ Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
61
+ Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
62
+ Processing Systems, pages 6000-6010.
63
+
64
+ Args:
65
+ d_model: the number of expected features in the encoder/decoder inputs (default=512).
66
+ nhead: the number of heads in the multiheadattention models (default=8).
67
+ num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
68
+ num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
69
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
70
+ dropout: the dropout value (default=0.1).
71
+ activation: the activation function of encoder/decoder intermediate layer, can be a string
72
+ ("relu" or "gelu") or a unary callable. Default: relu
73
+ custom_encoder: custom encoder (default=None).
74
+ custom_decoder: custom decoder (default=None).
75
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
76
+ batch_first: If ``True``, then the input and output tensors are provided
77
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
78
+ norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
79
+ other attention and feedforward operations, otherwise after. Default: ``False`` (after).
80
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
81
+ bias. Default: ``True``.
82
+
83
+ Examples::
84
+ >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
85
+ >>> src = torch.rand((10, 32, 512))
86
+ >>> tgt = torch.rand((20, 32, 512))
87
+ >>> out = transformer_model(src, tgt)
88
+
89
+ Note: A full example to apply nn.Transformer module for the word language model is available in
90
+ https://github.com/pytorch/examples/tree/master/word_language_model
91
+ """
92
+
93
+ def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
94
+ num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
95
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
96
+ custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None,
97
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
98
+ bias: bool = True, device=None, dtype=None) -> None:
99
+ factory_kwargs = {'device': device, 'dtype': dtype}
100
+ super().__init__()
101
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
102
+
103
+ if custom_encoder is not None:
104
+ self.encoder = custom_encoder
105
+ else:
106
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
107
+ activation, layer_norm_eps, batch_first, norm_first,
108
+ bias, **factory_kwargs)
109
+ encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
110
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
111
+
112
+ if custom_decoder is not None:
113
+ self.decoder = custom_decoder
114
+ else:
115
+ decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
116
+ activation, layer_norm_eps, batch_first, norm_first,
117
+ bias, **factory_kwargs)
118
+ decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
119
+ self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
120
+
121
+ self._reset_parameters()
122
+
123
+ self.d_model = d_model
124
+ self.nhead = nhead
125
+
126
+ self.batch_first = batch_first
127
+
128
+ def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,
129
+ memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,
130
+ tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None,
131
+ src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None,
132
+ memory_is_causal: bool = False) -> Tensor:
133
+ r"""Take in and process masked source/target sequences.
134
+
135
+ .. note::
136
+
137
+ If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are
138
+ not allowed to participate in the attention,
139
+ which is the opposite of the definition for :attr:`attn_mask`
140
+ in :func:`torch.nn.functional.scaled_dot_product_attention`.
141
+
142
+ Args:
143
+ src: the sequence to the encoder (required).
144
+ tgt: the sequence to the decoder (required).
145
+ src_mask: the additive mask for the src sequence (optional).
146
+ tgt_mask: the additive mask for the tgt sequence (optional).
147
+ memory_mask: the additive mask for the encoder output (optional).
148
+ src_key_padding_mask: the Tensor mask for src keys per batch (optional).
149
+ tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional).
150
+ memory_key_padding_mask: the Tensor mask for memory keys per batch (optional).
151
+ src_is_causal: If specified, applies a causal mask as ``src_mask``.
152
+ Default: ``None``; try to detect a causal mask.
153
+ Warning:
154
+ ``src_is_causal`` provides a hint that ``src_mask`` is
155
+ the causal mask. Providing incorrect hints can result in
156
+ incorrect execution, including forward and backward
157
+ compatibility.
158
+ tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``.
159
+ Default: ``None``; try to detect a causal mask.
160
+ Warning:
161
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
162
+ the causal mask. Providing incorrect hints can result in
163
+ incorrect execution, including forward and backward
164
+ compatibility.
165
+ memory_is_causal: If specified, applies a causal mask as
166
+ ``memory_mask``.
167
+ Default: ``False``.
168
+ Warning:
169
+ ``memory_is_causal`` provides a hint that
170
+ ``memory_mask`` is the causal mask. Providing incorrect
171
+ hints can result in incorrect execution, including
172
+ forward and backward compatibility.
173
+
174
+ Shape:
175
+ - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or
176
+ `(N, S, E)` if `batch_first=True`.
177
+ - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
178
+ `(N, T, E)` if `batch_first=True`.
179
+ - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`.
180
+ - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`.
181
+ - memory_mask: :math:`(T, S)`.
182
+ - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
183
+ - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`.
184
+ - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
185
+
186
+ Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked
187
+ positions. If a BoolTensor is provided, positions with ``True``
188
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
189
+ is provided, it will be added to the attention weight.
190
+ [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
191
+ the attention. If a BoolTensor is provided, the positions with the
192
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
193
+
194
+ - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
195
+ `(N, T, E)` if `batch_first=True`.
196
+
197
+ Note: Due to the multi-head attention architecture in the transformer model,
198
+ the output sequence length of a transformer is same as the input sequence
199
+ (i.e. target) length of the decoder.
200
+
201
+ where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the
202
+ batch size, :math:`E` is the feature number
203
+
204
+ Examples:
205
+ >>> # xdoctest: +SKIP
206
+ >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
207
+ """
208
+ is_batched = src.dim() == 3
209
+ if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
210
+ raise RuntimeError("the batch number of src and tgt must be equal")
211
+ elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
212
+ raise RuntimeError("the batch number of src and tgt must be equal")
213
+
214
+ if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
215
+ raise RuntimeError("the feature number of src and tgt must be equal to d_model")
216
+
217
+ memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask,
218
+ is_causal=src_is_causal)
219
+ output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
220
+ tgt_key_padding_mask=tgt_key_padding_mask,
221
+ memory_key_padding_mask=memory_key_padding_mask,
222
+ tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal)
223
+ return output
224
+
225
+ @staticmethod
226
+ def generate_square_subsequent_mask(
227
+ sz: int,
228
+ device: Optional[torch.device] = None,
229
+ dtype: Optional[torch.dtype] = None,
230
+ ) -> Tensor:
231
+ r"""Generate a square causal mask for the sequence.
232
+
233
+ The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
234
+ """
235
+ return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)
236
+
237
+ def _reset_parameters(self):
238
+ r"""Initiate parameters in the transformer model."""
239
+ for p in self.parameters():
240
+ if p.dim() > 1:
241
+ xavier_uniform_(p)
242
+
243
+
244
+ class TransformerEncoder(Module):
245
+ r"""TransformerEncoder is a stack of N encoder layers.
246
+
247
+ Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
248
+
249
+ Args:
250
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
251
+ num_layers: the number of sub-encoder-layers in the encoder (required).
252
+ norm: the layer normalization component (optional).
253
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
254
+ (and convert back on output). This will improve the overall performance of
255
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
256
+
257
+ Examples::
258
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
259
+ >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
260
+ >>> src = torch.rand(10, 32, 512)
261
+ >>> out = transformer_encoder(src)
262
+ """
263
+
264
+ __constants__ = ['norm']
265
+
266
+ def __init__(
267
+ self,
268
+ encoder_layer: "TransformerEncoderLayer",
269
+ num_layers: int,
270
+ norm: Optional[Module] = None,
271
+ enable_nested_tensor: bool = True,
272
+ mask_check: bool = True
273
+ ) -> None:
274
+ super().__init__()
275
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
276
+ self.layers = _get_clones(encoder_layer, num_layers)
277
+ self.num_layers = num_layers
278
+ self.norm = norm
279
+ # this attribute saves the value providedat object construction
280
+ self.enable_nested_tensor = enable_nested_tensor
281
+ # this attribute controls whether nested tensors are used
282
+ self.use_nested_tensor = enable_nested_tensor
283
+ self.mask_check = mask_check
284
+
285
+ enc_layer = "encoder_layer"
286
+ why_not_sparsity_fast_path = ''
287
+ if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer):
288
+ why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer"
289
+ elif encoder_layer.norm_first :
290
+ why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True"
291
+ elif not encoder_layer.self_attn.batch_first:
292
+ why_not_sparsity_fast_path = (f"{enc_layer}.self_attn.batch_first was not True" +
293
+ "(use batch_first for better inference performance)")
294
+ elif not encoder_layer.self_attn._qkv_same_embed_dim:
295
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn._qkv_same_embed_dim was not True"
296
+ elif encoder_layer.self_attn.in_proj_bias is None:
297
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn was passed bias=False"
298
+ elif not encoder_layer.activation_relu_or_gelu:
299
+ why_not_sparsity_fast_path = f"{enc_layer}.activation_relu_or_gelu was not True"
300
+ elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps) :
301
+ why_not_sparsity_fast_path = f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps"
302
+ elif encoder_layer.self_attn.num_heads % 2 == 1:
303
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd"
304
+
305
+ if enable_nested_tensor and why_not_sparsity_fast_path:
306
+ warnings.warn(f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}")
307
+ self.use_nested_tensor = False
308
+
309
+
310
+ def forward(
311
+ self,
312
+ src: Tensor,
313
+ mask: Optional[Tensor] = None,
314
+ src_key_padding_mask: Optional[Tensor] = None,
315
+ is_causal: Optional[bool] = None) -> Tensor:
316
+ r"""Pass the input through the encoder layers in turn.
317
+
318
+ Args:
319
+ src: the sequence to the encoder (required).
320
+ mask: the mask for the src sequence (optional).
321
+ src_key_padding_mask: the mask for the src keys per batch (optional).
322
+ is_causal: If specified, applies a causal mask as ``mask``.
323
+ Default: ``None``; try to detect a causal mask.
324
+ Warning:
325
+ ``is_causal`` provides a hint that ``mask`` is the
326
+ causal mask. Providing incorrect hints can result in
327
+ incorrect execution, including forward and backward
328
+ compatibility.
329
+
330
+ Shape:
331
+ see the docs in :class:`~torch.nn.Transformer`.
332
+ """
333
+ src_key_padding_mask = F._canonical_mask(
334
+ mask=src_key_padding_mask,
335
+ mask_name="src_key_padding_mask",
336
+ other_type=F._none_or_dtype(mask),
337
+ other_name="mask",
338
+ target_type=src.dtype
339
+ )
340
+
341
+ mask = F._canonical_mask(
342
+ mask=mask,
343
+ mask_name="mask",
344
+ other_type=None,
345
+ other_name="",
346
+ target_type=src.dtype,
347
+ check_other=False,
348
+ )
349
+
350
+ output = src
351
+ convert_to_nested = False
352
+ first_layer = self.layers[0]
353
+ src_key_padding_mask_for_layers = src_key_padding_mask
354
+ why_not_sparsity_fast_path = ''
355
+ str_first_layer = "self.layers[0]"
356
+ batch_first = first_layer.self_attn.batch_first
357
+ is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
358
+
359
+ if not is_fastpath_enabled:
360
+ why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
361
+ elif not hasattr(self, "use_nested_tensor"):
362
+ why_not_sparsity_fast_path = "use_nested_tensor attribute not present"
363
+ elif not self.use_nested_tensor:
364
+ why_not_sparsity_fast_path = "self.use_nested_tensor (set in init) was not True"
365
+ elif first_layer.training:
366
+ why_not_sparsity_fast_path = f"{str_first_layer} was in training mode"
367
+ elif not src.dim() == 3:
368
+ why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
369
+ elif src_key_padding_mask is None:
370
+ why_not_sparsity_fast_path = "src_key_padding_mask was None"
371
+ elif (((not hasattr(self, "mask_check")) or self.mask_check)
372
+ and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())):
373
+ why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned"
374
+ elif output.is_nested:
375
+ why_not_sparsity_fast_path = "NestedTensor input is not supported"
376
+ elif mask is not None:
377
+ why_not_sparsity_fast_path = "src_key_padding_mask and mask were both supplied"
378
+ elif torch.is_autocast_enabled():
379
+ why_not_sparsity_fast_path = "autocast is enabled"
380
+
381
+ if not why_not_sparsity_fast_path:
382
+ tensor_args = (
383
+ src,
384
+ first_layer.self_attn.in_proj_weight,
385
+ first_layer.self_attn.in_proj_bias,
386
+ first_layer.self_attn.out_proj.weight,
387
+ first_layer.self_attn.out_proj.bias,
388
+ first_layer.norm1.weight,
389
+ first_layer.norm1.bias,
390
+ first_layer.norm2.weight,
391
+ first_layer.norm2.bias,
392
+ first_layer.linear1.weight,
393
+ first_layer.linear1.bias,
394
+ first_layer.linear2.weight,
395
+ first_layer.linear2.bias,
396
+ )
397
+ _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
398
+ if torch.overrides.has_torch_function(tensor_args):
399
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
400
+ elif src.device.type not in _supported_device_type:
401
+ why_not_sparsity_fast_path = f"src device is neither one of {_supported_device_type}"
402
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
403
+ why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
404
+ "input/output projection weights or biases requires_grad")
405
+
406
+ if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None):
407
+ convert_to_nested = True
408
+ output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False)
409
+ src_key_padding_mask_for_layers = None
410
+
411
+ seq_len = _get_seq_len(src, batch_first)
412
+ is_causal = _detect_is_causal_mask(mask, is_causal, seq_len)
413
+
414
+ for mod in self.layers:
415
+ output = mod(output, src_mask=mask, is_causal=is_causal, src_key_padding_mask=src_key_padding_mask_for_layers)
416
+
417
+ if convert_to_nested:
418
+ output = output.to_padded_tensor(0., src.size())
419
+
420
+ if self.norm is not None:
421
+ output = self.norm(output)
422
+
423
+ return output
424
+
425
+
426
+ class TransformerDecoder(Module):
427
+ r"""TransformerDecoder is a stack of N decoder layers.
428
+
429
+ Args:
430
+ decoder_layer: an instance of the TransformerDecoderLayer() class (required).
431
+ num_layers: the number of sub-decoder-layers in the decoder (required).
432
+ norm: the layer normalization component (optional).
433
+
434
+ Examples::
435
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
436
+ >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
437
+ >>> memory = torch.rand(10, 32, 512)
438
+ >>> tgt = torch.rand(20, 32, 512)
439
+ >>> out = transformer_decoder(tgt, memory)
440
+ """
441
+
442
+ __constants__ = ['norm']
443
+
444
+ def __init__(
445
+ self,
446
+ decoder_layer: "TransformerDecoderLayer",
447
+ num_layers: int,
448
+ norm: Optional[Module] = None
449
+ ) -> None:
450
+ super().__init__()
451
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
452
+ self.layers = _get_clones(decoder_layer, num_layers)
453
+ self.num_layers = num_layers
454
+ self.norm = norm
455
+
456
+ def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
457
+ memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
458
+ memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: Optional[bool] = None,
459
+ memory_is_causal: bool = False) -> Tensor:
460
+ r"""Pass the inputs (and mask) through the decoder layer in turn.
461
+
462
+ Args:
463
+ tgt: the sequence to the decoder (required).
464
+ memory: the sequence from the last layer of the encoder (required).
465
+ tgt_mask: the mask for the tgt sequence (optional).
466
+ memory_mask: the mask for the memory sequence (optional).
467
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
468
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
469
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
470
+ Default: ``None``; try to detect a causal mask.
471
+ Warning:
472
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
473
+ the causal mask. Providing incorrect hints can result in
474
+ incorrect execution, including forward and backward
475
+ compatibility.
476
+ memory_is_causal: If specified, applies a causal mask as
477
+ ``memory mask``.
478
+ Default: ``False``.
479
+ Warning:
480
+ ``memory_is_causal`` provides a hint that
481
+ ``memory_mask`` is the causal mask. Providing incorrect
482
+ hints can result in incorrect execution, including
483
+ forward and backward compatibility.
484
+
485
+ Shape:
486
+ see the docs in :class:`~torch.nn.Transformer`.
487
+ """
488
+ output = tgt
489
+
490
+ seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first)
491
+ tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len)
492
+
493
+ for mod in self.layers:
494
+ output = mod(output, memory, tgt_mask=tgt_mask,
495
+ memory_mask=memory_mask,
496
+ tgt_key_padding_mask=tgt_key_padding_mask,
497
+ memory_key_padding_mask=memory_key_padding_mask,
498
+ tgt_is_causal=tgt_is_causal,
499
+ memory_is_causal=memory_is_causal)
500
+
501
+ if self.norm is not None:
502
+ output = self.norm(output)
503
+
504
+ return output
505
+
506
+ class TransformerEncoderLayer(Module):
507
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
508
+
509
+ This standard encoder layer is based on the paper "Attention Is All You Need".
510
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
511
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
512
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
513
+ in a different way during application.
514
+
515
+ TransformerEncoderLayer can handle either traditional torch.tensor inputs,
516
+ or Nested Tensor inputs. Derived classes are expected to similarly accept
517
+ both input formats. (Not all combinations of inputs are currently
518
+ supported by TransformerEncoderLayer while Nested Tensor is in prototype
519
+ state.)
520
+
521
+ If you are implementing a custom layer, you may derive it either from
522
+ the Module or TransformerEncoderLayer class. If your custom layer
523
+ supports both torch.Tensors and Nested Tensors inputs, make its
524
+ implementation a derived class of TransformerEncoderLayer. If your custom
525
+ Layer supports only torch.Tensor inputs, derive its implementation from
526
+ Module.
527
+
528
+ Args:
529
+ d_model: the number of expected features in the input (required).
530
+ nhead: the number of heads in the multiheadattention models (required).
531
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
532
+ dropout: the dropout value (default=0.1).
533
+ activation: the activation function of the intermediate layer, can be a string
534
+ ("relu" or "gelu") or a unary callable. Default: relu
535
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
536
+ batch_first: If ``True``, then the input and output tensors are provided
537
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
538
+ norm_first: if ``True``, layer norm is done prior to attention and feedforward
539
+ operations, respectively. Otherwise it's done after. Default: ``False`` (after).
540
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
541
+ bias. Default: ``True``.
542
+
543
+ Examples::
544
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
545
+ >>> src = torch.rand(10, 32, 512)
546
+ >>> out = encoder_layer(src)
547
+
548
+ Alternatively, when ``batch_first`` is ``True``:
549
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
550
+ >>> src = torch.rand(32, 10, 512)
551
+ >>> out = encoder_layer(src)
552
+
553
+ Fast path:
554
+ forward() will use a special optimized implementation described in
555
+ `FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
556
+ conditions are met:
557
+
558
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
559
+ argument ``requires_grad``
560
+ - training is disabled (using ``.eval()``)
561
+ - batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
562
+ - activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
563
+ - at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
564
+ - if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
565
+ nor ``src_key_padding_mask`` is passed
566
+ - the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
567
+ unless the caller has manually modified one without modifying the other)
568
+
569
+ If the optimized implementation is in use, a
570
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
571
+ passed for ``src`` to represent padding more efficiently than using a padding
572
+ mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
573
+ returned, and an additional speedup proportional to the fraction of the input that
574
+ is padding can be expected.
575
+
576
+ .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
577
+ https://arxiv.org/abs/2205.14135
578
+
579
+ """
580
+
581
+ __constants__ = ['norm_first']
582
+
583
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
584
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
585
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
586
+ bias: bool = True, device=None, dtype=None) -> None:
587
+ factory_kwargs = {'device': device, 'dtype': dtype}
588
+ super().__init__()
589
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout,
590
+ bias=bias, batch_first=batch_first,
591
+ **factory_kwargs)
592
+ # Implementation of Feedforward model
593
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
594
+ self.dropout = Dropout(dropout)
595
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
596
+
597
+ self.norm_first = norm_first
598
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
599
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
600
+ self.dropout1 = Dropout(dropout)
601
+ self.dropout2 = Dropout(dropout)
602
+
603
+ # Legacy string support for activation function.
604
+ if isinstance(activation, str):
605
+ activation = _get_activation_fn(activation)
606
+
607
+ # We can't test self.activation in forward() in TorchScript,
608
+ # so stash some information about it instead.
609
+ if activation is F.relu or isinstance(activation, torch.nn.ReLU):
610
+ self.activation_relu_or_gelu = 1
611
+ elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
612
+ self.activation_relu_or_gelu = 2
613
+ else:
614
+ self.activation_relu_or_gelu = 0
615
+ self.activation = activation
616
+
617
+ def __setstate__(self, state):
618
+ super().__setstate__(state)
619
+ if not hasattr(self, 'activation'):
620
+ self.activation = F.relu
621
+
622
+
623
+ def forward(
624
+ self,
625
+ src: Tensor,
626
+ src_mask: Optional[Tensor] = None,
627
+ src_key_padding_mask: Optional[Tensor] = None,
628
+ is_causal: bool = False) -> Tensor:
629
+ r"""Pass the input through the encoder layer.
630
+
631
+ Args:
632
+ src: the sequence to the encoder layer (required).
633
+ src_mask: the mask for the src sequence (optional).
634
+ src_key_padding_mask: the mask for the src keys per batch (optional).
635
+ is_causal: If specified, applies a causal mask as ``src mask``.
636
+ Default: ``False``.
637
+ Warning:
638
+ ``is_causal`` provides a hint that ``src_mask`` is the
639
+ causal mask. Providing incorrect hints can result in
640
+ incorrect execution, including forward and backward
641
+ compatibility.
642
+
643
+ Shape:
644
+ see the docs in :class:`~torch.nn.Transformer`.
645
+ """
646
+ src_key_padding_mask = F._canonical_mask(
647
+ mask=src_key_padding_mask,
648
+ mask_name="src_key_padding_mask",
649
+ other_type=F._none_or_dtype(src_mask),
650
+ other_name="src_mask",
651
+ target_type=src.dtype
652
+ )
653
+
654
+ src_mask = F._canonical_mask(
655
+ mask=src_mask,
656
+ mask_name="src_mask",
657
+ other_type=None,
658
+ other_name="",
659
+ target_type=src.dtype,
660
+ check_other=False,
661
+ )
662
+
663
+ is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
664
+
665
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
666
+ why_not_sparsity_fast_path = ''
667
+ if not is_fastpath_enabled:
668
+ why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
669
+ elif not src.dim() == 3:
670
+ why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
671
+ elif self.training:
672
+ why_not_sparsity_fast_path = "training is enabled"
673
+ elif not self.self_attn.batch_first:
674
+ why_not_sparsity_fast_path = "self_attn.batch_first was not True"
675
+ elif self.self_attn.in_proj_bias is None:
676
+ why_not_sparsity_fast_path = "self_attn was passed bias=False"
677
+ elif not self.self_attn._qkv_same_embed_dim:
678
+ why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
679
+ elif not self.activation_relu_or_gelu:
680
+ why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
681
+ elif not (self.norm1.eps == self.norm2.eps):
682
+ why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
683
+ elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None):
684
+ why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
685
+ elif self.self_attn.num_heads % 2 == 1:
686
+ why_not_sparsity_fast_path = "num_head is odd"
687
+ elif torch.is_autocast_enabled():
688
+ why_not_sparsity_fast_path = "autocast is enabled"
689
+ if not why_not_sparsity_fast_path:
690
+ tensor_args = (
691
+ src,
692
+ self.self_attn.in_proj_weight,
693
+ self.self_attn.in_proj_bias,
694
+ self.self_attn.out_proj.weight,
695
+ self.self_attn.out_proj.bias,
696
+ self.norm1.weight,
697
+ self.norm1.bias,
698
+ self.norm2.weight,
699
+ self.norm2.bias,
700
+ self.linear1.weight,
701
+ self.linear1.bias,
702
+ self.linear2.weight,
703
+ self.linear2.bias,
704
+ )
705
+
706
+ # We have to use list comprehensions below because TorchScript does not support
707
+ # generator expressions.
708
+ _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
709
+ if torch.overrides.has_torch_function(tensor_args):
710
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
711
+ elif not all((x.device.type in _supported_device_type) for x in tensor_args):
712
+ why_not_sparsity_fast_path = ("some Tensor argument's device is neither one of "
713
+ f"{_supported_device_type}")
714
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
715
+ why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
716
+ "input/output projection weights or biases requires_grad")
717
+
718
+ if not why_not_sparsity_fast_path:
719
+ merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src)
720
+ return torch._transformer_encoder_layer_fwd(
721
+ src,
722
+ self.self_attn.embed_dim,
723
+ self.self_attn.num_heads,
724
+ self.self_attn.in_proj_weight,
725
+ self.self_attn.in_proj_bias,
726
+ self.self_attn.out_proj.weight,
727
+ self.self_attn.out_proj.bias,
728
+ self.activation_relu_or_gelu == 2,
729
+ self.norm_first,
730
+ self.norm1.eps,
731
+ self.norm1.weight,
732
+ self.norm1.bias,
733
+ self.norm2.weight,
734
+ self.norm2.bias,
735
+ self.linear1.weight,
736
+ self.linear1.bias,
737
+ self.linear2.weight,
738
+ self.linear2.bias,
739
+ merged_mask,
740
+ mask_type,
741
+ )
742
+
743
+
744
+ x = src
745
+ if self.norm_first:
746
+ x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal)
747
+ x = x + self._ff_block(self.norm2(x))
748
+ else:
749
+ x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal))
750
+ x = self.norm2(x + self._ff_block(x))
751
+
752
+ return x
753
+
754
+ # self-attention block
755
+ def _sa_block(self, x: Tensor,
756
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
757
+ x = self.self_attn(x, x, x,
758
+ attn_mask=attn_mask,
759
+ key_padding_mask=key_padding_mask,
760
+ need_weights=False, is_causal=is_causal)[0]
761
+ return self.dropout1(x)
762
+
763
+ # feed forward block
764
+ def _ff_block(self, x: Tensor) -> Tensor:
765
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
766
+ return self.dropout2(x)
767
+
768
+
769
+ class TransformerDecoderLayer(Module):
770
+ r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
771
+
772
+ This standard decoder layer is based on the paper "Attention Is All You Need".
773
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
774
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
775
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
776
+ in a different way during application.
777
+
778
+ Args:
779
+ d_model: the number of expected features in the input (required).
780
+ nhead: the number of heads in the multiheadattention models (required).
781
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
782
+ dropout: the dropout value (default=0.1).
783
+ activation: the activation function of the intermediate layer, can be a string
784
+ ("relu" or "gelu") or a unary callable. Default: relu
785
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
786
+ batch_first: If ``True``, then the input and output tensors are provided
787
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
788
+ norm_first: if ``True``, layer norm is done prior to self attention, multihead
789
+ attention and feedforward operations, respectively. Otherwise it's done after.
790
+ Default: ``False`` (after).
791
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
792
+ bias. Default: ``True``.
793
+
794
+ Examples::
795
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
796
+ >>> memory = torch.rand(10, 32, 512)
797
+ >>> tgt = torch.rand(20, 32, 512)
798
+ >>> out = decoder_layer(tgt, memory)
799
+
800
+ Alternatively, when ``batch_first`` is ``True``:
801
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
802
+ >>> memory = torch.rand(32, 10, 512)
803
+ >>> tgt = torch.rand(32, 20, 512)
804
+ >>> out = decoder_layer(tgt, memory)
805
+ """
806
+
807
+ __constants__ = ['norm_first']
808
+
809
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
810
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
811
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
812
+ bias: bool = True, device=None, dtype=None) -> None:
813
+ factory_kwargs = {'device': device, 'dtype': dtype}
814
+ super().__init__()
815
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
816
+ bias=bias, **factory_kwargs)
817
+ self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
818
+ bias=bias, **factory_kwargs)
819
+ # Implementation of Feedforward model
820
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
821
+ self.dropout = Dropout(dropout)
822
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
823
+
824
+ self.norm_first = norm_first
825
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
826
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
827
+ self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
828
+ self.dropout1 = Dropout(dropout)
829
+ self.dropout2 = Dropout(dropout)
830
+ self.dropout3 = Dropout(dropout)
831
+
832
+ # Legacy string support for activation function.
833
+ if isinstance(activation, str):
834
+ self.activation = _get_activation_fn(activation)
835
+ else:
836
+ self.activation = activation
837
+
838
+ def __setstate__(self, state):
839
+ if 'activation' not in state:
840
+ state['activation'] = F.relu
841
+ super().__setstate__(state)
842
+
843
+ def forward(
844
+ self,
845
+ tgt: Tensor,
846
+ memory: Tensor,
847
+ tgt_mask: Optional[Tensor] = None,
848
+ memory_mask: Optional[Tensor] = None,
849
+ tgt_key_padding_mask: Optional[Tensor] = None,
850
+ memory_key_padding_mask: Optional[Tensor] = None,
851
+ tgt_is_causal: bool = False,
852
+ memory_is_causal: bool = False,
853
+ ) -> Tensor:
854
+ r"""Pass the inputs (and mask) through the decoder layer.
855
+
856
+ Args:
857
+ tgt: the sequence to the decoder layer (required).
858
+ memory: the sequence from the last layer of the encoder (required).
859
+ tgt_mask: the mask for the tgt sequence (optional).
860
+ memory_mask: the mask for the memory sequence (optional).
861
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
862
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
863
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
864
+ Default: ``False``.
865
+ Warning:
866
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
867
+ the causal mask. Providing incorrect hints can result in
868
+ incorrect execution, including forward and backward
869
+ compatibility.
870
+ memory_is_causal: If specified, applies a causal mask as
871
+ ``memory mask``.
872
+ Default: ``False``.
873
+ Warning:
874
+ ``memory_is_causal`` provides a hint that
875
+ ``memory_mask`` is the causal mask. Providing incorrect
876
+ hints can result in incorrect execution, including
877
+ forward and backward compatibility.
878
+
879
+ Shape:
880
+ see the docs in :class:`~torch.nn.Transformer`.
881
+ """
882
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
883
+
884
+ x = tgt
885
+ if self.norm_first:
886
+ x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal)
887
+ x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal)
888
+ x = x + self._ff_block(self.norm3(x))
889
+ else:
890
+ x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal))
891
+ x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask, memory_is_causal))
892
+ x = self.norm3(x + self._ff_block(x))
893
+
894
+ return x
895
+
896
+ # self-attention block
897
+ def _sa_block(self, x: Tensor,
898
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
899
+ x = self.self_attn(x, x, x,
900
+ attn_mask=attn_mask,
901
+ key_padding_mask=key_padding_mask,
902
+ is_causal=is_causal,
903
+ need_weights=False)[0]
904
+ return self.dropout1(x)
905
+
906
+ # multihead attention block
907
+ def _mha_block(self, x: Tensor, mem: Tensor,
908
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
909
+ x = self.multihead_attn(x, mem, mem,
910
+ attn_mask=attn_mask,
911
+ key_padding_mask=key_padding_mask,
912
+ is_causal=is_causal,
913
+ need_weights=False)[0]
914
+ return self.dropout2(x)
915
+
916
+ # feed forward block
917
+ def _ff_block(self, x: Tensor) -> Tensor:
918
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
919
+ return self.dropout3(x)
920
+
921
+
922
+ def _get_clones(module, N):
923
+ # FIXME: copy.deepcopy() is not defined on nn.module
924
+ return ModuleList([copy.deepcopy(module) for i in range(N)])
925
+
926
+
927
+ def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
928
+ if activation == "relu":
929
+ return F.relu
930
+ elif activation == "gelu":
931
+ return F.gelu
932
+
933
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}")
934
+
935
+
936
+ def _detect_is_causal_mask(
937
+ mask: Optional[Tensor],
938
+ is_causal: Optional[bool] = None,
939
+ size: Optional[int] = None,
940
+ ) -> bool:
941
+ """Return whether the given attention mask is causal.
942
+
943
+ Warning:
944
+ If ``is_causal`` is not ``None``, its value will be returned as is. If a
945
+ user supplies an incorrect ``is_causal`` hint,
946
+
947
+ ``is_causal=False`` when the mask is in fact a causal attention.mask
948
+ may lead to reduced performance relative to what would be achievable
949
+ with ``is_causal=True``;
950
+ ``is_causal=True`` when the mask is in fact not a causal attention.mask
951
+ may lead to incorrect and unpredictable execution - in some scenarios,
952
+ a causal mask may be applied based on the hint, in other execution
953
+ scenarios the specified mask may be used. The choice may not appear
954
+ to be deterministic, in that a number of factors like alignment,
955
+ hardware SKU, etc influence the decision whether to use a mask or
956
+ rely on the hint.
957
+ ``size`` if not None, check whether the mask is a causal mask of the provided size
958
+ Otherwise, checks for any causal mask.
959
+ """
960
+ # Prevent type refinement
961
+ make_causal = (is_causal is True)
962
+
963
+ if is_causal is None and mask is not None:
964
+ sz = size if size is not None else mask.size(-2)
965
+ causal_comparison = _generate_square_subsequent_mask(
966
+ sz, device=mask.device, dtype=mask.dtype)
967
+
968
+ # Do not use `torch.equal` so we handle batched masks by
969
+ # broadcasting the comparison.
970
+ if mask.size() == causal_comparison.size():
971
+ make_causal = bool((mask == causal_comparison).all())
972
+ else:
973
+ make_causal = False
974
+
975
+ return make_causal
venv/lib/python3.10/site-packages/torch/nn/modules/utils.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from itertools import repeat
3
+ from typing import List, Dict, Any
4
+
5
+ __all__ = ['consume_prefix_in_state_dict_if_present']
6
+
7
+
8
+ def _ntuple(n, name="parse"):
9
+ def parse(x):
10
+ if isinstance(x, collections.abc.Iterable):
11
+ return tuple(x)
12
+ return tuple(repeat(x, n))
13
+
14
+ parse.__name__ = name
15
+ return parse
16
+
17
+
18
+ _single = _ntuple(1, "_single")
19
+ _pair = _ntuple(2, "_pair")
20
+ _triple = _ntuple(3, "_triple")
21
+ _quadruple = _ntuple(4, "_quadruple")
22
+
23
+
24
+ def _reverse_repeat_tuple(t, n):
25
+ r"""Reverse the order of `t` and repeat each element for `n` times.
26
+
27
+ This can be used to translate padding arg used by Conv and Pooling modules
28
+ to the ones used by `F.pad`.
29
+ """
30
+ return tuple(x for x in reversed(t) for _ in range(n))
31
+
32
+
33
+ def _list_with_default(out_size: List[int], defaults: List[int]) -> List[int]:
34
+ import torch
35
+ if isinstance(out_size, (int, torch.SymInt)):
36
+ return out_size
37
+ if len(defaults) <= len(out_size):
38
+ raise ValueError(
39
+ f"Input dimension should be at least {len(out_size) + 1}"
40
+ )
41
+ return [
42
+ v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size) :])
43
+ ]
44
+
45
+
46
+ def consume_prefix_in_state_dict_if_present(
47
+ state_dict: Dict[str, Any], prefix: str
48
+ ) -> None:
49
+ r"""Strip the prefix in state_dict in place, if any.
50
+
51
+ ..note::
52
+ Given a `state_dict` from a DP/DDP model, a local model can load it by applying
53
+ `consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling
54
+ :meth:`torch.nn.Module.load_state_dict`.
55
+
56
+ Args:
57
+ state_dict (OrderedDict): a state-dict to be loaded to the model.
58
+ prefix (str): prefix.
59
+ """
60
+ keys = list(state_dict.keys())
61
+ for key in keys:
62
+ if key.startswith(prefix):
63
+ newkey = key[len(prefix) :]
64
+ state_dict[newkey] = state_dict.pop(key)
65
+
66
+ # also strip the prefix in metadata if any.
67
+ if hasattr(state_dict, "_metadata"):
68
+ keys = list(state_dict._metadata.keys())
69
+ for key in keys:
70
+ # for the metadata dict, the key can be:
71
+ # '': for the DDP module, which we want to remove.
72
+ # 'module': for the actual model.
73
+ # 'module.xx.xx': for the rest.
74
+ if len(key) == 0:
75
+ continue
76
+ # handling both, 'module' case and 'module.' cases
77
+ if key == prefix.replace('.', '') or key.startswith(prefix):
78
+ newkey = key[len(prefix) :]
79
+ state_dict._metadata[newkey] = state_dict._metadata.pop(key)
venv/lib/python3.10/site-packages/torch/nn/utils/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import rnn
2
+ from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
3
+ from .weight_norm import weight_norm, remove_weight_norm
4
+ from .convert_parameters import parameters_to_vector, vector_to_parameters
5
+ from .spectral_norm import spectral_norm, remove_spectral_norm
6
+ from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights, fuse_linear_bn_eval, fuse_linear_bn_weights
7
+ from .memory_format import convert_conv2d_weight_memory_format, convert_conv3d_weight_memory_format
8
+ from . import parametrizations
9
+ from .init import skip_init
10
+ from . import stateless
11
+
12
+ __all__ = [
13
+ "clip_grad_norm",
14
+ "clip_grad_norm_",
15
+ "clip_grad_value_",
16
+ "convert_conv2d_weight_memory_format",
17
+ "convert_conv3d_weight_memory_format",
18
+ "fuse_conv_bn_eval",
19
+ "fuse_conv_bn_weights",
20
+ "fuse_linear_bn_eval",
21
+ "fuse_linear_bn_weights",
22
+ "parameters_to_vector",
23
+ "parametrizations",
24
+ "remove_spectral_norm",
25
+ "remove_weight_norm",
26
+ "rnn",
27
+ "skip_init",
28
+ "spectral_norm",
29
+ "stateless",
30
+ "vector_to_parameters",
31
+ "weight_norm",
32
+ ]
venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc ADDED
Binary file (17.9 kB). View file