applied-ai-018 commited on
Commit
d23304a
·
verified ·
1 Parent(s): a4fdea9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/torch/nn/attention/__init__.py +117 -0
  2. venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/nn/attention/_utils.py +57 -0
  6. venv/lib/python3.10/site-packages/torch/nn/attention/bias.py +353 -0
  7. venv/lib/python3.10/site-packages/torch/nn/modules/__init__.py +68 -0
  8. venv/lib/python3.10/site-packages/torch/nn/modules/_functions.py +288 -0
  9. venv/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py +849 -0
  10. venv/lib/python3.10/site-packages/torch/nn/modules/channelshuffle.py +57 -0
  11. venv/lib/python3.10/site-packages/torch/nn/modules/container.py +911 -0
  12. venv/lib/python3.10/site-packages/torch/nn/modules/flatten.py +144 -0
  13. venv/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py +434 -0
  14. venv/lib/python3.10/site-packages/torch/nn/modules/linear.py +264 -0
  15. venv/lib/python3.10/site-packages/torch/nn/modules/module.py +0 -0
  16. venv/lib/python3.10/site-packages/torch/nn/modules/pooling.py +1306 -0
  17. venv/lib/python3.10/site-packages/torch/nn/modules/rnn.py +1480 -0
  18. venv/lib/python3.10/site-packages/torch/nn/modules/upsampling.py +264 -0
  19. venv/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py +9 -0
  21. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py +10 -0
  25. venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py +11 -0
  26. venv/lib/python3.10/site-packages/torch/nn/quantized/__init__.py +40 -0
  27. venv/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py +1 -0
  30. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py +31 -0
  32. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py +19 -0
  39. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py +12 -0
  40. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py +17 -0
  41. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py +13 -0
  42. venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py +15 -0
  43. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py +1 -0
  44. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py +32 -0
  46. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/conv.py +18 -0
venv/lib/python3.10/site-packages/torch/nn/attention/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This module contains functions and classes that alter the behavior of torch.nn.functional.scaled_dot_product_attention """
2
+ import contextlib
3
+ from typing import List, Union
4
+ from warnings import warn
5
+
6
+ from torch.backends.cuda import (
7
+ can_use_efficient_attention,
8
+ can_use_flash_attention,
9
+ enable_flash_sdp,
10
+ enable_math_sdp,
11
+ enable_mem_efficient_sdp,
12
+ flash_sdp_enabled,
13
+ math_sdp_enabled,
14
+ mem_efficient_sdp_enabled,
15
+ SDPAParams,
16
+ )
17
+
18
+ __all__: List[str] = ["SDPBackend", "sdpa_kernel", "WARN_FOR_UNFUSED_KERNELS"]
19
+
20
+ # Note: [SDPA warnings]
21
+ # TODO: Consider using this for sdpa regardless of subclasses
22
+ # This only effects users of bias subclasses
23
+ # If this is set to True, we will warn the user if they are not using the fused kernels
24
+ # As well, it will raise warnings for all the reasons why the fused kernels can't be run.
25
+ # To set this to True, run
26
+ # torch.nn.attention.WARN_FOR_UNFUSED_KERNELS = True
27
+ WARN_FOR_UNFUSED_KERNELS = False
28
+
29
+
30
+ from torch._C import _SDPBackend as SDPBackend
31
+
32
+ # Hacks for Sphinx documentation:
33
+ # https://stackoverflow.com/questions/38765577/overriding-sphinx-autodoc-alias-of-for-import-of-private-class
34
+ SDPBackend = SDPBackend
35
+ r"""An enum-like class that contains the different backends for scaled dot product attention.
36
+ This backend class is designed to be used with the sdpa_kernel context manager.
37
+
38
+ The following Enums are available:
39
+ - ERROR: An error occurred when trying to determine the backend.
40
+ - MATH: The math backend for scaled dot product attention.
41
+ - FLASH_ATTENTION: The flash attention backend for scaled dot product attention.
42
+ - EFFICIENT_ATTENTION: The efficient attention backend for scaled dot product attention.
43
+ - CUDNN_ATTENTION: The cuDNN backend for scaled dot product attention.
44
+
45
+ See :func:`torch.nn.attention.sdpa_kernel` for more details.
46
+
47
+ .. warning:: This class is in beta and subject to change.
48
+ """
49
+ SDPBackend.__module__ = __name__
50
+ SDPBackend.__name__ = "SDPBackend"
51
+
52
+
53
+ def _raise_kernel_warnings(params: SDPAParams) -> None:
54
+ """
55
+ If WARN_FOR_UNFUSED_KERNELS is set to True, this will raise warnings
56
+ for all the reasons why the fused kernels can't be run. If using subclasses
57
+ """
58
+ if WARN_FOR_UNFUSED_KERNELS:
59
+ if not can_use_efficient_attention(params):
60
+ warn("Efficient attention can't be used because:")
61
+ can_use_efficient_attention(params, True)
62
+ if not can_use_flash_attention(params):
63
+ warn("Flash attention can't be used because:")
64
+ can_use_flash_attention(params, True)
65
+
66
+
67
+ @contextlib.contextmanager
68
+ def sdpa_kernel(backends: Union[List[SDPBackend], SDPBackend]):
69
+ r"""
70
+ Context manager to select which backend to use for scaled dot product attention.
71
+
72
+ .. warning:: This function is beta and subject to change.
73
+
74
+ Args:
75
+ backend (Union[List[SDPBackend], SDPBackend]): A backend or list of backends for scaled dot product attention.
76
+
77
+ Example:
78
+
79
+ .. code-block:: python
80
+
81
+ from torch.nn.functional import scaled_dot_product_attention
82
+ from torch.nn.attention import SDPBackend, sdpa_kernel
83
+ # Only enable flash attention backend
84
+ with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
85
+ scaled_dot_product_attention(...)
86
+
87
+ # Enable the Math or Efficient attention backends
88
+ with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]):
89
+ scaled_dot_product_attention(...)
90
+
91
+ This context manager can be used to select which backend to use for scaled dot product attention.
92
+ Upon exiting the context manager, the previous state of the flags will be restored, enabling all backends.
93
+ """
94
+ assert isinstance(
95
+ backends, (list, SDPBackend)
96
+ ), "Backend must be an instance of SDPBackend or a list of SDPBackend instances"
97
+
98
+ if isinstance(backends, SDPBackend):
99
+ backends = [backends]
100
+
101
+ backends = set(backends)
102
+ previous_flash: bool = flash_sdp_enabled()
103
+ previous_mem_efficient: bool = mem_efficient_sdp_enabled()
104
+ previous_math: bool = math_sdp_enabled()
105
+ try:
106
+ enable_flash = SDPBackend.FLASH_ATTENTION in backends
107
+ enable_mem_efficient = SDPBackend.EFFICIENT_ATTENTION in backends
108
+ enable_math = SDPBackend.MATH in backends
109
+
110
+ enable_flash_sdp(enable_flash)
111
+ enable_mem_efficient_sdp(enable_mem_efficient)
112
+ enable_math_sdp(enable_math)
113
+ yield {}
114
+ finally:
115
+ enable_flash_sdp(previous_flash)
116
+ enable_mem_efficient_sdp(previous_mem_efficient)
117
+ enable_math_sdp(previous_math)
venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/attention/_utils.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Defines utilities for interacting with scaled_dot_product_attention"""
2
+ import math
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+
7
+ __all__: List[str] = []
8
+
9
+
10
+ def _input_requires_grad(*tensors: torch.Tensor) -> bool:
11
+ """Returns True if any of the tensors requires grad"""
12
+ return any(t.requires_grad for t in tensors)
13
+
14
+
15
+ def _postprocess_flash_output(inpt_tensor: torch.Tensor, og_size: int) -> torch.Tensor:
16
+ """Handles the unpad of the last dimension"""
17
+ if inpt_tensor.size(-1) != og_size:
18
+ return inpt_tensor[..., :og_size]
19
+ return inpt_tensor
20
+
21
+
22
+ def _calculate_scale(head_dim_size: int, scale: Optional[float]) -> float:
23
+ """
24
+ For FlashAttention we pad the head dimension to be a multiple of 8 so we need to scale the output
25
+ by the original head size and not the padded.
26
+ """
27
+ if scale is not None:
28
+ return scale
29
+ return 1.0 / math.sqrt(head_dim_size)
30
+
31
+
32
+ def _validate_sdpa_input(
33
+ query: torch.Tensor,
34
+ key: torch.Tensor,
35
+ value: torch.Tensor,
36
+ attn_mask: Optional[torch.Tensor] = None,
37
+ dropout_p=0.0,
38
+ is_causal=False,
39
+ scale=None,
40
+ ):
41
+ if query.dtype != key.dtype or query.dtype != value.dtype:
42
+ raise ValueError(
43
+ f"Expected query, key, and value to have the same dtype, "
44
+ f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
45
+ f"and value.dtype: {value.dtype} instead."
46
+ )
47
+ if query.device != key.device or query.device != value.device:
48
+ raise ValueError(
49
+ f"Expected query, key, and value to have the same device type, "
50
+ f"but got query.device: {query.device}, key.device: {key.device}, "
51
+ f"and value.device: {value.device} instead."
52
+ )
53
+ if query.dim() < 2 or key.dim() < 2 or value.dim() < 2:
54
+ raise ValueError(
55
+ f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: "
56
+ f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
57
+ )
venv/lib/python3.10/site-packages/torch/nn/attention/bias.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Defines bias subclasses that work with scaled_dot_product_attention"""
2
+ from enum import auto, IntEnum
3
+ from typing import Optional
4
+ from warnings import warn
5
+
6
+ import torch
7
+ from torch.backends.cuda import (
8
+ can_use_efficient_attention,
9
+ can_use_flash_attention,
10
+ SDPAParams,
11
+ )
12
+ from torch.nn.attention import _raise_kernel_warnings
13
+ from torch.nn.attention._utils import (
14
+ _calculate_scale,
15
+ _input_requires_grad,
16
+ _postprocess_flash_output,
17
+ _validate_sdpa_input,
18
+ )
19
+ from torch.nn.functional import scaled_dot_product_attention
20
+
21
+ __all__ = ["causal_upper_left", "causal_lower_right", "CausalVariant", "CausalBias"]
22
+
23
+
24
+ torch._dynamo.allow_in_graph(can_use_flash_attention)
25
+ torch._dynamo.allow_in_graph(can_use_efficient_attention)
26
+ torch._dynamo.allow_in_graph(SDPAParams)
27
+
28
+
29
+ class CausalVariant(IntEnum):
30
+ r"""
31
+ Enum for causal variants used in attention mechanisms.
32
+
33
+ Defines two types of causal biases:
34
+
35
+ `UPPER_LEFT`: Represents upper-left triangular bias for standard causal attention.
36
+ The equivalent pytorch code for constructing this bias is:
37
+
38
+ .. code-block:: python
39
+
40
+ torch.tril(torch.ones(size, dtype=torch.bool))
41
+
42
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
43
+
44
+ .. code-block:: text
45
+
46
+ [[1, 0, 0, 0],
47
+ [1, 1, 0, 0],
48
+ [1, 1, 1, 0]]
49
+
50
+
51
+ `LOWER_RIGHT`: Represents lower-right triangular bias, the include values are aligned to the lower
52
+ right corner of the matrix.
53
+
54
+ The equivalent pytorch code for constructing this bias is:
55
+
56
+ .. code-block:: python
57
+
58
+ diagonal_offset = size[1] - size[0]
59
+ torch.tril(
60
+ torch.ones(size, dtype=torch.bool),
61
+ diagonal=diagonal_offset,
62
+ )
63
+
64
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
65
+
66
+ .. code-block:: text
67
+
68
+ [[1, 1, 0, 0],
69
+ [1, 1, 1, 0],
70
+ [1, 1, 1, 1]]
71
+
72
+ Note that these variants are equivalent to each other when the sequence lengths of the query and key/value
73
+ tensors are equal since the triangular matrix is square.
74
+
75
+ .. warning:: This enum is a prototype and subject to change.
76
+ """
77
+
78
+ UPPER_LEFT = auto()
79
+ LOWER_RIGHT = auto()
80
+
81
+
82
+ class CausalBias(torch.Tensor):
83
+ """
84
+ A bias representing causal attention patterns. For an overview of the bias structure, see the :class:`CausalVariant` enum.
85
+
86
+ This class is used for defining causal (triangular) attention biases. For construing the bias, there exist
87
+ two factory functions: :func:`causal_upper_left` and :func:`causal_lower_right`.
88
+
89
+ Example:
90
+
91
+ .. code-block:: python
92
+
93
+ from torch.nn.attention.bias import causal_lower_right
94
+
95
+ bsz, num_heads, seqlen_q, seqlen_kv, head_dim = 32, 8, 4, 12, 8
96
+
97
+ # Create a lower-right causal bias
98
+ attn_bias = causal_lower_right(seqlen_q, seqlen_kv)
99
+
100
+ q = torch.randn(bsz, num_heads, seqlen_q, head_dim, device="cuda", dtype=torch.float16)
101
+ k = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
102
+ v = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
103
+
104
+ out = F.scaled_dot_product_attention(q, k, v, attn_bias)
105
+
106
+ .. warning:: This class is a prototype and subject to change.
107
+ """
108
+
109
+ def __init__(self, variant: CausalVariant, seq_len_q: int, seq_len_kv: int):
110
+ """
111
+ Initializes the CausalBias instance with a specified variant and sequence lengths.
112
+
113
+ Args:
114
+ variant (CausalVariant): The type of causal bias to use (either UPPER_LEFT or LOWER_RIGHT).
115
+ seq_len_q (int): The sequence length of the query tensor.
116
+ seq_len_kv (int): The sequence length of the key/value tensor.
117
+
118
+ Raises a warning if the LOWER_RIGHT variant is used with seq_len_q > seq_len_kv, as it may produce NaNs.
119
+ """
120
+ assert isinstance(variant, CausalVariant)
121
+ self.variant = variant
122
+ self.seq_len_q = seq_len_q
123
+ self.seq_len_kv = seq_len_kv
124
+ if seq_len_q > seq_len_kv and variant == CausalVariant.LOWER_RIGHT:
125
+ warn(
126
+ "Lower right causal bias will produce NaNs in the output when seq_len_q > seq_len_kv!"
127
+ )
128
+
129
+ def _upper_left(self, device: torch.device) -> torch.Tensor:
130
+ """Upper left causal bias"""
131
+ return torch.tril(
132
+ torch.ones(self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool)
133
+ )
134
+
135
+ def _lower_right(self, device: torch.device) -> torch.Tensor:
136
+ """Lower right causal bias"""
137
+ diagonal_offset = self.seq_len_kv - self.seq_len_q
138
+ return torch.tril(
139
+ torch.ones(
140
+ self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool
141
+ ),
142
+ diagonal=diagonal_offset,
143
+ )
144
+
145
+ def _materialize(self, device: Optional[torch.device] = None) -> torch.Tensor:
146
+ """
147
+ Materializes the causal bias into a tensor form.
148
+
149
+ Depending on the variant, this method generates either an upper-left or lower-right
150
+ triangular matrix to represent the causal bias.
151
+
152
+ Args:
153
+ device (Optional[torch.device]): The device on which to create the tensor. Defaults to CPU.
154
+
155
+ Returns:
156
+ torch.Tensor: The materialized bias tensor.
157
+ """
158
+ if device is None:
159
+ device = torch.device("cpu")
160
+ if self.variant == CausalVariant.UPPER_LEFT:
161
+ return self._upper_left(device)
162
+ elif self.variant == CausalVariant.LOWER_RIGHT:
163
+ return self._lower_right(device)
164
+
165
+ @staticmethod
166
+ def _dispatch(
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attn_mask: "CausalBias",
171
+ dropout_p: float = 0.0,
172
+ is_causal: bool = False,
173
+ scale: Optional[float] = None,
174
+ ) -> torch.Tensor:
175
+ r"""
176
+ Handles the logic for computing attention with the specified causal bias.
177
+
178
+ Args:
179
+ query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`.
180
+ key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`.
181
+ value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`.
182
+ attn_mask (CausalBias): The type of causal attention to apply.
183
+ A boolean mask where a value of True indicates that the element *should* take part in attention.
184
+ A float mask of the same type as query, key, value that is added to the attention score.
185
+ dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied
186
+ is_causal (bool): If true, assumes upper left causal attention masking and errors if both attn_mask and is_causal
187
+ are set.
188
+ scale (optional float): Scaling factor applied prior to softmax. If None, the default value is set
189
+ to :math:`\frac{1}{\sqrt{E}}`.
190
+
191
+ Returns:
192
+ output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`.
193
+
194
+ Raises:
195
+ ValueError: If the causal bias variant is not a CausalVariant type.
196
+
197
+ """
198
+ if is_causal:
199
+ raise ValueError("CausalBias should not be used with causal=True")
200
+
201
+ if (
202
+ attn_mask.seq_len_q == attn_mask.seq_len_kv
203
+ or attn_mask.variant == CausalVariant.UPPER_LEFT
204
+ ):
205
+ return scaled_dot_product_attention(
206
+ query,
207
+ key,
208
+ value,
209
+ attn_mask=None,
210
+ dropout_p=dropout_p,
211
+ is_causal=True,
212
+ scale=scale,
213
+ )
214
+ elif attn_mask.variant == CausalVariant.LOWER_RIGHT:
215
+ _validate_sdpa_input(query, key, value, None, dropout_p, is_causal, scale)
216
+ sdpa_params = SDPAParams(query, key, value, None, dropout_p, is_causal)
217
+ if can_use_flash_attention(sdpa_params):
218
+ needs_padding = query.size(-1) % 8 != 0
219
+ og_head_size = query.size(-1)
220
+ og_scale = _calculate_scale(og_head_size, scale)
221
+ if needs_padding:
222
+ query = torch.nn.functional.pad(query, (0, 8 - query.size(-1) % 8))
223
+ key = torch.nn.functional.pad(key, (0, 8 - key.size(-1) % 8))
224
+ value = torch.nn.functional.pad(value, (0, 8 - value.size(-1) % 8))
225
+ out = torch.ops.aten._scaled_dot_product_flash_attention(
226
+ query,
227
+ key,
228
+ value,
229
+ dropout_p,
230
+ is_causal=True, # TODO: Flash accepts causal = True and for this particular op it means lower right
231
+ return_debug_mask=False,
232
+ scale=og_scale,
233
+ )[0]
234
+ return _postprocess_flash_output(out, og_head_size)
235
+ if can_use_efficient_attention(sdpa_params):
236
+ compute_log_sumexp = False
237
+ if _input_requires_grad(query, key, value):
238
+ compute_log_sumexp = True
239
+ return torch.ops.aten._efficient_attention_forward(
240
+ query.transpose(1, 2),
241
+ key.transpose(1, 2),
242
+ value.transpose(1, 2),
243
+ bias=None,
244
+ cu_seqlens_q=None,
245
+ cu_seqlens_k=None,
246
+ max_seqlen_q=None,
247
+ max_seqlen_k=None,
248
+ dropout_p=dropout_p,
249
+ custom_mask_type=int(attn_mask.variant),
250
+ compute_log_sumexp=compute_log_sumexp,
251
+ scale=scale,
252
+ causal_diagonal=None,
253
+ seqlen_k=None,
254
+ )[0].transpose(1, 2)
255
+ else:
256
+ _raise_kernel_warnings(sdpa_params)
257
+ # We cant use efficient attention the only support for lower right is via materialization
258
+ return scaled_dot_product_attention(
259
+ query,
260
+ key,
261
+ value,
262
+ attn_mask=attn_mask._materialize(query.device),
263
+ dropout_p=dropout_p,
264
+ is_causal=False,
265
+ scale=scale,
266
+ )
267
+ else:
268
+ raise ValueError(
269
+ f"CausalBias.variant must be a CausalVariant type, but found: {attn_mask.variant}"
270
+ )
271
+
272
+ @classmethod
273
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
274
+ """Defines the behavior of torch.nn.functional.scaled_dot_product_attention when the attn_bias is an AttnBias"""
275
+ if kwargs is None:
276
+ kwargs = {}
277
+ if func != torch.nn.functional.scaled_dot_product_attention:
278
+ raise NotImplementedError(
279
+ "CausalBias only supports scaled_dot_product_attention"
280
+ )
281
+ return cls._dispatch(*args, **kwargs)
282
+
283
+ def __repr__(self):
284
+ return self._materialize().__repr__()
285
+
286
+
287
+ def causal_upper_left(*size) -> CausalBias:
288
+ """
289
+ Creates an upper-left triangular causal bias.
290
+
291
+ This function generates a upper-left triangular matrix to represent causal attention bias with a
292
+ diagonal offset set so that the inclusive values are aligned to the upper left corner of the matrix.
293
+ This equivalent to the `is_causal=True` argument in `scaled_dot_product_attention`.
294
+
295
+ The equivalent pytorch code for constructing this bias is:
296
+
297
+ .. code-block:: python
298
+
299
+ torch.tril(torch.ones(size, dtype=torch.bool))
300
+
301
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
302
+
303
+ .. code-block:: text
304
+
305
+ [[1, 0, 0, 0],
306
+ [1, 1, 0, 0],
307
+ [1, 1, 1, 0]]
308
+
309
+ Args:
310
+ size: The size of the bias matrix.
311
+
312
+ Returns:
313
+ CausalBias: The UPPER_LEFT triangular causal bias variant.
314
+ """
315
+ assert len(size) == 2, "causal_upper_left only supports 2D tensors"
316
+ seq_len_q, seq_len_kv = size
317
+ return CausalBias(CausalVariant.UPPER_LEFT, seq_len_q, seq_len_kv)
318
+
319
+
320
+ def causal_lower_right(*size) -> CausalBias:
321
+ """
322
+ Creates a lower-right triangular causal bias.
323
+
324
+ This function generates a lower-right triangular matrix to represent causal attention bias with a
325
+ diagonal offset set so that the inclusive values are aligned to the lower right corner of the matrix.
326
+
327
+ The equivalent pytorch code for constructing this bias is:
328
+
329
+ .. code-block:: python
330
+
331
+ diagonal_offset = size[1] - size[0]
332
+ torch.tril(
333
+ torch.ones(size, dtype=torch.bool),
334
+ diagonal=diagonal_offset,
335
+ )
336
+
337
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
338
+
339
+ .. code-block:: text
340
+
341
+ [[1, 1, 0, 0],
342
+ [1, 1, 1, 0],
343
+ [1, 1, 1, 1]]
344
+
345
+ Args:
346
+ size: The size of the bias matrix.
347
+
348
+ Returns:
349
+ CausalBias: The LOWER_RIGHT triangular causal bias variant.
350
+ """
351
+ assert len(size) == 2, "causal_lower_right only supports 2D tensors"
352
+ seq_len_q, seq_len_kv = size
353
+ return CausalBias(CausalVariant.LOWER_RIGHT, seq_len_q, seq_len_kv)
venv/lib/python3.10/site-packages/torch/nn/modules/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .linear import Identity, Linear, Bilinear, LazyLinear
3
+ from .conv import Conv1d, Conv2d, Conv3d, \
4
+ ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
5
+ LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
6
+ from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
7
+ Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
8
+ Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
9
+ Hardsigmoid, Hardswish, SiLU, Mish
10
+ from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
11
+ CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
12
+ MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
13
+ SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
14
+ from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
15
+ from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
16
+ MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, LPPool3d, \
17
+ AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
18
+ from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
19
+ LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
20
+ from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
21
+ LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
22
+ from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
23
+ from .dropout import Dropout, Dropout1d, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
24
+ from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
25
+ ReplicationPad3d, ZeroPad1d, ZeroPad2d, ZeroPad3d, ConstantPad1d, ConstantPad2d, ConstantPad3d, \
26
+ CircularPad1d, CircularPad2d, CircularPad3d
27
+ from .sparse import Embedding, EmbeddingBag
28
+ from .rnn import RNNBase, RNN, LSTM, GRU, \
29
+ RNNCellBase, RNNCell, LSTMCell, GRUCell
30
+ from .pixelshuffle import PixelShuffle, PixelUnshuffle
31
+ from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
32
+ from .distance import PairwiseDistance, CosineSimilarity
33
+ from .fold import Fold, Unfold
34
+ from .adaptive import AdaptiveLogSoftmaxWithLoss
35
+ from .transformer import TransformerEncoder, TransformerDecoder, \
36
+ TransformerEncoderLayer, TransformerDecoderLayer, Transformer
37
+ from .flatten import Flatten, Unflatten
38
+ from .channelshuffle import ChannelShuffle
39
+
40
+ __all__ = [
41
+ 'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
42
+ 'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
43
+ 'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
44
+ 'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
45
+ 'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
46
+ 'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
47
+ 'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss', 'GaussianNLLLoss',
48
+ 'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
49
+ 'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
50
+ 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
51
+ 'LPPool1d', 'LPPool2d', 'LPPool3d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d',
52
+ 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
53
+ 'Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
54
+ 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
55
+ 'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
56
+ 'LSTMCell', 'GRUCell', 'PixelShuffle', 'PixelUnshuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
57
+ 'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
58
+ 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d',
59
+ 'ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
60
+ 'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
61
+ 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
62
+ 'LazyLinear', 'LazyConv1d', 'LazyConv2d', 'LazyConv3d',
63
+ 'LazyConvTranspose1d', 'LazyConvTranspose2d', 'LazyConvTranspose3d',
64
+ 'LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d',
65
+ 'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d',
66
+ 'Flatten', 'Unflatten', 'Hardsigmoid', 'Hardswish', 'SiLU', 'Mish', 'TripletMarginWithDistanceLoss', 'ChannelShuffle',
67
+ 'CircularPad1d', 'CircularPad2d', 'CircularPad3d'
68
+ ]
venv/lib/python3.10/site-packages/torch/nn/modules/_functions.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+
4
+ from torch.autograd.function import Function
5
+
6
+ class SyncBatchNorm(Function):
7
+
8
+ @staticmethod
9
+ def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
10
+ if not (
11
+ input.is_contiguous(memory_format=torch.channels_last) or
12
+ input.is_contiguous(memory_format=torch.channels_last_3d)
13
+ ):
14
+ input = input.contiguous()
15
+ if weight is not None:
16
+ weight = weight.contiguous()
17
+
18
+ size = int(input.numel() // input.size(1))
19
+ if size == 1 and world_size < 2:
20
+ raise ValueError(f'Expected more than 1 value per channel when training, got input size {size}')
21
+
22
+ num_channels = input.shape[1]
23
+ if input.numel() > 0:
24
+ # calculate mean/invstd for input.
25
+ mean, invstd = torch.batch_norm_stats(input, eps)
26
+
27
+ count = torch.full(
28
+ (1,),
29
+ input.numel() // input.size(1),
30
+ dtype=mean.dtype,
31
+ device=mean.device
32
+ )
33
+
34
+ # C, C, 1 -> (2C + 1)
35
+ combined = torch.cat([mean, invstd, count], dim=0)
36
+ else:
37
+ # for empty input, set stats and the count to zero. The stats with
38
+ # zero count will be filtered out later when computing global mean
39
+ # & invstd, but they still needs to participate the all_gather
40
+ # collective communication to unblock other peer processes.
41
+ combined = torch.zeros(
42
+ 2 * num_channels + 1,
43
+ dtype=input.dtype,
44
+ device=input.device
45
+ )
46
+
47
+ # Use allgather instead of allreduce because count could be different across
48
+ # ranks, simple all reduce op can not give correct results.
49
+ # batch_norm_gather_stats_with_counts calculates global mean & invstd based on
50
+ # all gathered mean, invstd and count.
51
+ # for nccl backend, use the optimized version of all gather.
52
+ # The Gloo backend does not support `all_gather_into_tensor`.
53
+ if process_group._get_backend_name() != "gloo":
54
+ # world_size * (2C + 1)
55
+ combined_size = combined.numel()
56
+ combined_flat = torch.empty(1,
57
+ combined_size * world_size,
58
+ dtype=combined.dtype,
59
+ device=combined.device)
60
+ dist.all_gather_into_tensor(combined_flat, combined, process_group, async_op=False)
61
+ combined = torch.reshape(combined_flat, (world_size, combined_size))
62
+ # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
63
+ mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
64
+ else:
65
+ # world_size * (2C + 1)
66
+ combined_list = [
67
+ torch.empty_like(combined) for _ in range(world_size)
68
+ ]
69
+ dist.all_gather(combined_list, combined, process_group, async_op=False)
70
+ combined = torch.stack(combined_list, dim=0)
71
+ # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
72
+ mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
73
+
74
+ if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
75
+ # The lines below force a synchronization between CUDA and CPU, because
76
+ # the shape of the result count_all depends on the values in mask tensor.
77
+ # Such synchronizations break CUDA Graph capturing.
78
+ # See https://github.com/pytorch/pytorch/issues/78549
79
+ # FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
80
+ # a better longer-term solution.
81
+
82
+ # remove stats from empty inputs
83
+ mask = count_all.squeeze(-1) >= 1
84
+ count_all = count_all[mask]
85
+ mean_all = mean_all[mask]
86
+ invstd_all = invstd_all[mask]
87
+
88
+ # calculate global mean & invstd
89
+ counts = count_all.view(-1)
90
+ if running_mean is not None and counts.dtype != running_mean.dtype:
91
+ counts = counts.to(running_mean.dtype)
92
+ mean, invstd = torch.batch_norm_gather_stats_with_counts(
93
+ input,
94
+ mean_all,
95
+ invstd_all,
96
+ running_mean,
97
+ running_var,
98
+ momentum,
99
+ eps,
100
+ counts,
101
+ )
102
+
103
+ self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
104
+ self.process_group = process_group
105
+
106
+ # apply element-wise normalization
107
+ if input.numel() > 0:
108
+ return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
109
+ else:
110
+ return torch.empty_like(input)
111
+
112
+ @staticmethod
113
+ def backward(self, grad_output):
114
+ if not (
115
+ grad_output.is_contiguous(memory_format=torch.channels_last) or
116
+ grad_output.is_contiguous(memory_format=torch.channels_last_3d)
117
+ ):
118
+ grad_output = grad_output.contiguous()
119
+ saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
120
+ grad_input = grad_weight = grad_bias = None
121
+ process_group = self.process_group
122
+
123
+ if saved_input.numel() > 0:
124
+ # calculate local stats as well as grad_weight / grad_bias
125
+ sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
126
+ grad_output,
127
+ saved_input,
128
+ mean,
129
+ invstd,
130
+ weight,
131
+ self.needs_input_grad[0],
132
+ self.needs_input_grad[1],
133
+ self.needs_input_grad[2]
134
+ )
135
+
136
+ if self.needs_input_grad[0]:
137
+ # synchronizing stats used to calculate input gradient.
138
+ num_channels = sum_dy.shape[0]
139
+ combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
140
+ torch.distributed.all_reduce(
141
+ combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
142
+ sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
143
+
144
+ # backward pass for gradient calculation
145
+ if weight is not None and weight.dtype != mean.dtype:
146
+ weight = weight.to(mean.dtype)
147
+ grad_input = torch.batch_norm_backward_elemt(
148
+ grad_output,
149
+ saved_input,
150
+ mean,
151
+ invstd,
152
+ weight,
153
+ sum_dy,
154
+ sum_dy_xmu,
155
+ count_tensor
156
+ )
157
+ # synchronizing of grad_weight / grad_bias is not needed as distributed
158
+ # training would handle all reduce.
159
+ if weight is None or not self.needs_input_grad[1]:
160
+ grad_weight = None
161
+
162
+ if weight is None or not self.needs_input_grad[2]:
163
+ grad_bias = None
164
+ else:
165
+ # This process got an empty input tensor in the forward pass.
166
+ # Although this process can directly set grad_input as an empty
167
+ # tensor of zeros, it still needs to participate in the collective
168
+ # communication to unblock its peers, as other peer processes might
169
+ # have received non-empty inputs.
170
+ num_channels = saved_input.shape[1]
171
+ if self.needs_input_grad[0]:
172
+ # launch all_reduce to unblock other peer processes
173
+ combined = torch.zeros(
174
+ 2 * num_channels,
175
+ dtype=saved_input.dtype,
176
+ device=saved_input.device
177
+ )
178
+ torch.distributed.all_reduce(
179
+ combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
180
+
181
+ # Leave grad_input, grad_weight and grad_bias as None, which will be
182
+ # interpreted by the autograd engine as Tensors full of zeros.
183
+
184
+ return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
185
+
186
+ class CrossMapLRN2d(Function):
187
+
188
+ @staticmethod
189
+ def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
190
+ ctx.size = size
191
+ ctx.alpha = alpha
192
+ ctx.beta = beta
193
+ ctx.k = k
194
+ ctx.scale = None
195
+
196
+ if input.dim() != 4:
197
+ raise ValueError(f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead.")
198
+
199
+ ctx.scale = ctx.scale or input.new()
200
+ output = input.new()
201
+
202
+ batch_size = input.size(0)
203
+ channels = input.size(1)
204
+ input_height = input.size(2)
205
+ input_width = input.size(3)
206
+
207
+ output.resize_as_(input)
208
+ ctx.scale.resize_as_(input)
209
+
210
+ # use output storage as temporary buffer
211
+ input_square = output
212
+ torch.pow(input, 2, out=input_square)
213
+
214
+ pre_pad = int((ctx.size - 1) / 2 + 1)
215
+ pre_pad_crop = min(pre_pad, channels)
216
+
217
+ scale_first = ctx.scale.select(1, 0)
218
+ scale_first.zero_()
219
+ # compute first feature map normalization
220
+ for c in range(pre_pad_crop):
221
+ scale_first.add_(input_square.select(1, c))
222
+
223
+ # reuse computations for next feature maps normalization
224
+ # by adding the next feature map and removing the previous
225
+ for c in range(1, channels):
226
+ scale_previous = ctx.scale.select(1, c - 1)
227
+ scale_current = ctx.scale.select(1, c)
228
+ scale_current.copy_(scale_previous)
229
+ if c < channels - pre_pad + 1:
230
+ square_next = input_square.select(1, c + pre_pad - 1)
231
+ scale_current.add_(square_next, alpha=1)
232
+
233
+ if c > pre_pad:
234
+ square_previous = input_square.select(1, c - pre_pad)
235
+ scale_current.add_(square_previous, alpha=-1)
236
+
237
+ ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
238
+
239
+ torch.pow(ctx.scale, -ctx.beta, out=output)
240
+ output.mul_(input)
241
+
242
+ ctx.save_for_backward(input, output)
243
+ return output
244
+
245
+ @staticmethod
246
+ def backward(ctx, grad_output):
247
+ input, output = ctx.saved_tensors
248
+ grad_input = grad_output.new()
249
+
250
+ batch_size = input.size(0)
251
+ channels = input.size(1)
252
+ input_height = input.size(2)
253
+ input_width = input.size(3)
254
+
255
+ paddded_ratio = input.new(channels + ctx.size - 1, input_height,
256
+ input_width)
257
+ accum_ratio = input.new(input_height, input_width)
258
+
259
+ cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
260
+ inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
261
+
262
+ grad_input.resize_as_(input)
263
+ torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
264
+
265
+ paddded_ratio.zero_()
266
+ padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
267
+ channels)
268
+ for n in range(batch_size):
269
+ torch.mul(grad_output[n], output[n], out=padded_ratio_center)
270
+ padded_ratio_center.div_(ctx.scale[n])
271
+ torch.sum(
272
+ paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
273
+ for c in range(channels):
274
+ accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
275
+ grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
276
+ accum_ratio.add_(paddded_ratio[c], alpha=-1)
277
+
278
+ return grad_input, None, None, None, None
279
+
280
+ class BackwardHookFunction(torch.autograd.Function):
281
+ @staticmethod
282
+ def forward(ctx, *args):
283
+ ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
284
+ return args
285
+
286
+ @staticmethod
287
+ def backward(ctx, *args):
288
+ return args
venv/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py ADDED
@@ -0,0 +1,849 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer
6
+
7
+ from .. import functional as F
8
+ from .. import init
9
+ from ._functions import SyncBatchNorm as sync_batch_norm
10
+ from .lazy import LazyModuleMixin
11
+ from .module import Module
12
+
13
+ __all__ = ['BatchNorm1d', 'LazyBatchNorm1d', 'BatchNorm2d', 'LazyBatchNorm2d', 'BatchNorm3d',
14
+ 'LazyBatchNorm3d', 'SyncBatchNorm']
15
+
16
+
17
+ class _NormBase(Module):
18
+ """Common base of _InstanceNorm and _BatchNorm."""
19
+
20
+ _version = 2
21
+ __constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
22
+ num_features: int
23
+ eps: float
24
+ momentum: float
25
+ affine: bool
26
+ track_running_stats: bool
27
+ # WARNING: weight and bias purposely not defined here.
28
+ # See https://github.com/pytorch/pytorch/issues/39670
29
+
30
+ def __init__(
31
+ self,
32
+ num_features: int,
33
+ eps: float = 1e-5,
34
+ momentum: float = 0.1,
35
+ affine: bool = True,
36
+ track_running_stats: bool = True,
37
+ device=None,
38
+ dtype=None
39
+ ) -> None:
40
+ factory_kwargs = {'device': device, 'dtype': dtype}
41
+ super().__init__()
42
+ self.num_features = num_features
43
+ self.eps = eps
44
+ self.momentum = momentum
45
+ self.affine = affine
46
+ self.track_running_stats = track_running_stats
47
+ if self.affine:
48
+ self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
49
+ self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
50
+ else:
51
+ self.register_parameter("weight", None)
52
+ self.register_parameter("bias", None)
53
+ if self.track_running_stats:
54
+ self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
55
+ self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
56
+ self.running_mean: Optional[Tensor]
57
+ self.running_var: Optional[Tensor]
58
+ self.register_buffer('num_batches_tracked',
59
+ torch.tensor(0, dtype=torch.long,
60
+ **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
61
+ self.num_batches_tracked: Optional[Tensor]
62
+ else:
63
+ self.register_buffer("running_mean", None)
64
+ self.register_buffer("running_var", None)
65
+ self.register_buffer("num_batches_tracked", None)
66
+ self.reset_parameters()
67
+
68
+ def reset_running_stats(self) -> None:
69
+ if self.track_running_stats:
70
+ # running_mean/running_var/num_batches... are registered at runtime depending
71
+ # if self.track_running_stats is on
72
+ self.running_mean.zero_() # type: ignore[union-attr]
73
+ self.running_var.fill_(1) # type: ignore[union-attr]
74
+ self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
75
+
76
+ def reset_parameters(self) -> None:
77
+ self.reset_running_stats()
78
+ if self.affine:
79
+ init.ones_(self.weight)
80
+ init.zeros_(self.bias)
81
+
82
+ def _check_input_dim(self, input):
83
+ raise NotImplementedError
84
+
85
+ def extra_repr(self):
86
+ return (
87
+ "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
88
+ "track_running_stats={track_running_stats}".format(**self.__dict__)
89
+ )
90
+
91
+ def _load_from_state_dict(
92
+ self,
93
+ state_dict,
94
+ prefix,
95
+ local_metadata,
96
+ strict,
97
+ missing_keys,
98
+ unexpected_keys,
99
+ error_msgs,
100
+ ):
101
+ version = local_metadata.get("version", None)
102
+
103
+ if (version is None or version < 2) and self.track_running_stats:
104
+ # at version 2: added num_batches_tracked buffer
105
+ # this should have a default value of 0
106
+ num_batches_tracked_key = prefix + "num_batches_tracked"
107
+ if num_batches_tracked_key not in state_dict:
108
+ state_dict[num_batches_tracked_key] = (
109
+ self.num_batches_tracked
110
+ if self.num_batches_tracked is not None and self.num_batches_tracked.device != torch.device('meta')
111
+ else torch.tensor(0, dtype=torch.long)
112
+ )
113
+
114
+ super()._load_from_state_dict(
115
+ state_dict,
116
+ prefix,
117
+ local_metadata,
118
+ strict,
119
+ missing_keys,
120
+ unexpected_keys,
121
+ error_msgs,
122
+ )
123
+
124
+
125
+ class _BatchNorm(_NormBase):
126
+ def __init__(
127
+ self,
128
+ num_features: int,
129
+ eps: float = 1e-5,
130
+ momentum: float = 0.1,
131
+ affine: bool = True,
132
+ track_running_stats: bool = True,
133
+ device=None,
134
+ dtype=None
135
+ ) -> None:
136
+ factory_kwargs = {'device': device, 'dtype': dtype}
137
+ super().__init__(
138
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
139
+ )
140
+
141
+ def forward(self, input: Tensor) -> Tensor:
142
+ self._check_input_dim(input)
143
+
144
+ # exponential_average_factor is set to self.momentum
145
+ # (when it is available) only so that it gets updated
146
+ # in ONNX graph when this node is exported to ONNX.
147
+ if self.momentum is None:
148
+ exponential_average_factor = 0.0
149
+ else:
150
+ exponential_average_factor = self.momentum
151
+
152
+ if self.training and self.track_running_stats:
153
+ # TODO: if statement only here to tell the jit to skip emitting this when it is None
154
+ if self.num_batches_tracked is not None: # type: ignore[has-type]
155
+ self.num_batches_tracked.add_(1) # type: ignore[has-type]
156
+ if self.momentum is None: # use cumulative moving average
157
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked)
158
+ else: # use exponential moving average
159
+ exponential_average_factor = self.momentum
160
+
161
+ r"""
162
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
163
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
164
+ """
165
+ if self.training:
166
+ bn_training = True
167
+ else:
168
+ bn_training = (self.running_mean is None) and (self.running_var is None)
169
+
170
+ r"""
171
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
172
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
173
+ used for normalization (i.e. in eval mode when buffers are not None).
174
+ """
175
+ return F.batch_norm(
176
+ input,
177
+ # If buffers are not to be tracked, ensure that they won't be updated
178
+ self.running_mean
179
+ if not self.training or self.track_running_stats
180
+ else None,
181
+ self.running_var if not self.training or self.track_running_stats else None,
182
+ self.weight,
183
+ self.bias,
184
+ bn_training,
185
+ exponential_average_factor,
186
+ self.eps,
187
+ )
188
+
189
+
190
+ class _LazyNormBase(LazyModuleMixin, _NormBase):
191
+
192
+ weight: UninitializedParameter # type: ignore[assignment]
193
+ bias: UninitializedParameter # type: ignore[assignment]
194
+
195
+ def __init__(self, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
196
+ device=None, dtype=None) -> None:
197
+ factory_kwargs = {'device': device, 'dtype': dtype}
198
+ super().__init__(
199
+ # affine and track_running_stats are hardcoded to False to
200
+ # avoid creating tensors that will soon be overwritten.
201
+ 0,
202
+ eps,
203
+ momentum,
204
+ False,
205
+ False,
206
+ **factory_kwargs,
207
+ )
208
+ self.affine = affine
209
+ self.track_running_stats = track_running_stats
210
+ if self.affine:
211
+ self.weight = UninitializedParameter(**factory_kwargs)
212
+ self.bias = UninitializedParameter(**factory_kwargs)
213
+ if self.track_running_stats:
214
+ self.running_mean = UninitializedBuffer(**factory_kwargs)
215
+ self.running_var = UninitializedBuffer(**factory_kwargs)
216
+ self.num_batches_tracked = torch.tensor(
217
+ 0, dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
218
+
219
+ def reset_parameters(self) -> None:
220
+ if not self.has_uninitialized_params() and self.num_features != 0:
221
+ super().reset_parameters()
222
+
223
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
224
+ if self.has_uninitialized_params():
225
+ self.num_features = input.shape[1]
226
+ if self.affine:
227
+ assert isinstance(self.weight, UninitializedParameter)
228
+ assert isinstance(self.bias, UninitializedParameter)
229
+ self.weight.materialize((self.num_features,))
230
+ self.bias.materialize((self.num_features,))
231
+ if self.track_running_stats:
232
+ self.running_mean.materialize((self.num_features,)) # type:ignore[union-attr]
233
+ self.running_var.materialize((self.num_features,)) # type:ignore[union-attr]
234
+ self.reset_parameters()
235
+
236
+
237
+ class BatchNorm1d(_BatchNorm):
238
+ r"""Applies Batch Normalization over a 2D or 3D input.
239
+
240
+ Method described in the paper
241
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
242
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
243
+
244
+ .. math::
245
+
246
+ y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
247
+
248
+ The mean and standard-deviation are calculated per-dimension over
249
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
250
+ of size `C` (where `C` is the number of features or channels of the input). By default, the
251
+ elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
252
+ At train time in the forward pass, the standard-deviation is calculated via the biased estimator,
253
+ equivalent to ``torch.var(input, unbiased=False)``. However, the value stored in the
254
+ moving average of the standard-deviation is calculated via the unbiased estimator, equivalent to
255
+ ``torch.var(input, unbiased=True)``.
256
+
257
+ Also by default, during training this layer keeps running estimates of its
258
+ computed mean and variance, which are then used for normalization during
259
+ evaluation. The running estimates are kept with a default :attr:`momentum`
260
+ of 0.1.
261
+
262
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
263
+ keep running estimates, and batch statistics are instead used during
264
+ evaluation time as well.
265
+
266
+ .. note::
267
+ This :attr:`momentum` argument is different from one used in optimizer
268
+ classes and the conventional notion of momentum. Mathematically, the
269
+ update rule for running statistics here is
270
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
271
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
272
+ new observed value.
273
+
274
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
275
+ on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
276
+
277
+ Args:
278
+ num_features: number of features or channels :math:`C` of the input
279
+ eps: a value added to the denominator for numerical stability.
280
+ Default: 1e-5
281
+ momentum: the value used for the running_mean and running_var
282
+ computation. Can be set to ``None`` for cumulative moving average
283
+ (i.e. simple average). Default: 0.1
284
+ affine: a boolean value that when set to ``True``, this module has
285
+ learnable affine parameters. Default: ``True``
286
+ track_running_stats: a boolean value that when set to ``True``, this
287
+ module tracks the running mean and variance, and when set to ``False``,
288
+ this module does not track such statistics, and initializes statistics
289
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
290
+ When these buffers are ``None``, this module always uses batch statistics.
291
+ in both training and eval modes. Default: ``True``
292
+
293
+ Shape:
294
+ - Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
295
+ :math:`C` is the number of features or channels, and :math:`L` is the sequence length
296
+ - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
297
+
298
+ Examples::
299
+
300
+ >>> # With Learnable Parameters
301
+ >>> m = nn.BatchNorm1d(100)
302
+ >>> # Without Learnable Parameters
303
+ >>> m = nn.BatchNorm1d(100, affine=False)
304
+ >>> input = torch.randn(20, 100)
305
+ >>> output = m(input)
306
+ """
307
+
308
+ def _check_input_dim(self, input):
309
+ if input.dim() != 2 and input.dim() != 3:
310
+ raise ValueError(
311
+ f"expected 2D or 3D input (got {input.dim()}D input)"
312
+ )
313
+
314
+
315
+ class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
316
+ r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization.
317
+
318
+ Lazy initialization based on the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
319
+ from the ``input.size(1)``.
320
+ The attributes that will be lazily initialized are `weight`, `bias`,
321
+ `running_mean` and `running_var`.
322
+
323
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
324
+ on lazy modules and their limitations.
325
+
326
+ Args:
327
+ eps: a value added to the denominator for numerical stability.
328
+ Default: 1e-5
329
+ momentum: the value used for the running_mean and running_var
330
+ computation. Can be set to ``None`` for cumulative moving average
331
+ (i.e. simple average). Default: 0.1
332
+ affine: a boolean value that when set to ``True``, this module has
333
+ learnable affine parameters. Default: ``True``
334
+ track_running_stats: a boolean value that when set to ``True``, this
335
+ module tracks the running mean and variance, and when set to ``False``,
336
+ this module does not track such statistics, and initializes statistics
337
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
338
+ When these buffers are ``None``, this module always uses batch statistics.
339
+ in both training and eval modes. Default: ``True``
340
+ """
341
+
342
+ cls_to_become = BatchNorm1d # type: ignore[assignment]
343
+
344
+ def _check_input_dim(self, input):
345
+ if input.dim() != 2 and input.dim() != 3:
346
+ raise ValueError(
347
+ f"expected 2D or 3D input (got {input.dim()}D input)"
348
+ )
349
+
350
+
351
+ class BatchNorm2d(_BatchNorm):
352
+ r"""Applies Batch Normalization over a 4D input.
353
+
354
+ 4D is a mini-batch of 2D inputs
355
+ with additional channel dimension. Method described in the paper
356
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
357
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
358
+
359
+ .. math::
360
+
361
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
362
+
363
+ The mean and standard-deviation are calculated per-dimension over
364
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
365
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
366
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
367
+ standard-deviation is calculated via the biased estimator, equivalent to
368
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
369
+ standard-deviation is calculated via the unbiased estimator, equivalent to
370
+ ``torch.var(input, unbiased=True)``.
371
+
372
+ Also by default, during training this layer keeps running estimates of its
373
+ computed mean and variance, which are then used for normalization during
374
+ evaluation. The running estimates are kept with a default :attr:`momentum`
375
+ of 0.1.
376
+
377
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
378
+ keep running estimates, and batch statistics are instead used during
379
+ evaluation time as well.
380
+
381
+ .. note::
382
+ This :attr:`momentum` argument is different from one used in optimizer
383
+ classes and the conventional notion of momentum. Mathematically, the
384
+ update rule for running statistics here is
385
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
386
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
387
+ new observed value.
388
+
389
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
390
+ on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
391
+
392
+ Args:
393
+ num_features: :math:`C` from an expected input of size
394
+ :math:`(N, C, H, W)`
395
+ eps: a value added to the denominator for numerical stability.
396
+ Default: 1e-5
397
+ momentum: the value used for the running_mean and running_var
398
+ computation. Can be set to ``None`` for cumulative moving average
399
+ (i.e. simple average). Default: 0.1
400
+ affine: a boolean value that when set to ``True``, this module has
401
+ learnable affine parameters. Default: ``True``
402
+ track_running_stats: a boolean value that when set to ``True``, this
403
+ module tracks the running mean and variance, and when set to ``False``,
404
+ this module does not track such statistics, and initializes statistics
405
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
406
+ When these buffers are ``None``, this module always uses batch statistics.
407
+ in both training and eval modes. Default: ``True``
408
+
409
+ Shape:
410
+ - Input: :math:`(N, C, H, W)`
411
+ - Output: :math:`(N, C, H, W)` (same shape as input)
412
+
413
+ Examples::
414
+
415
+ >>> # With Learnable Parameters
416
+ >>> m = nn.BatchNorm2d(100)
417
+ >>> # Without Learnable Parameters
418
+ >>> m = nn.BatchNorm2d(100, affine=False)
419
+ >>> input = torch.randn(20, 100, 35, 45)
420
+ >>> output = m(input)
421
+ """
422
+
423
+ def _check_input_dim(self, input):
424
+ if input.dim() != 4:
425
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
426
+
427
+
428
+ class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
429
+ r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization.
430
+
431
+ Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
432
+ from the ``input.size(1)``.
433
+ The attributes that will be lazily initialized are `weight`, `bias`,
434
+ `running_mean` and `running_var`.
435
+
436
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
437
+ on lazy modules and their limitations.
438
+
439
+ Args:
440
+ eps: a value added to the denominator for numerical stability.
441
+ Default: 1e-5
442
+ momentum: the value used for the running_mean and running_var
443
+ computation. Can be set to ``None`` for cumulative moving average
444
+ (i.e. simple average). Default: 0.1
445
+ affine: a boolean value that when set to ``True``, this module has
446
+ learnable affine parameters. Default: ``True``
447
+ track_running_stats: a boolean value that when set to ``True``, this
448
+ module tracks the running mean and variance, and when set to ``False``,
449
+ this module does not track such statistics, and initializes statistics
450
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
451
+ When these buffers are ``None``, this module always uses batch statistics.
452
+ in both training and eval modes. Default: ``True``
453
+ """
454
+
455
+ cls_to_become = BatchNorm2d # type: ignore[assignment]
456
+
457
+ def _check_input_dim(self, input):
458
+ if input.dim() != 4:
459
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
460
+
461
+
462
+ class BatchNorm3d(_BatchNorm):
463
+ r"""Applies Batch Normalization over a 5D input.
464
+
465
+ 5D is a mini-batch of 3D inputs with additional channel dimension as described in the paper
466
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
467
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
468
+
469
+ .. math::
470
+
471
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
472
+
473
+ The mean and standard-deviation are calculated per-dimension over
474
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
475
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
476
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
477
+ standard-deviation is calculated via the biased estimator, equivalent to
478
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
479
+ standard-deviation is calculated via the unbiased estimator, equivalent to
480
+ ``torch.var(input, unbiased=True)``.
481
+
482
+ Also by default, during training this layer keeps running estimates of its
483
+ computed mean and variance, which are then used for normalization during
484
+ evaluation. The running estimates are kept with a default :attr:`momentum`
485
+ of 0.1.
486
+
487
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
488
+ keep running estimates, and batch statistics are instead used during
489
+ evaluation time as well.
490
+
491
+ .. note::
492
+ This :attr:`momentum` argument is different from one used in optimizer
493
+ classes and the conventional notion of momentum. Mathematically, the
494
+ update rule for running statistics here is
495
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
496
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
497
+ new observed value.
498
+
499
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
500
+ on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
501
+ or Spatio-temporal Batch Normalization.
502
+
503
+ Args:
504
+ num_features: :math:`C` from an expected input of size
505
+ :math:`(N, C, D, H, W)`
506
+ eps: a value added to the denominator for numerical stability.
507
+ Default: 1e-5
508
+ momentum: the value used for the running_mean and running_var
509
+ computation. Can be set to ``None`` for cumulative moving average
510
+ (i.e. simple average). Default: 0.1
511
+ affine: a boolean value that when set to ``True``, this module has
512
+ learnable affine parameters. Default: ``True``
513
+ track_running_stats: a boolean value that when set to ``True``, this
514
+ module tracks the running mean and variance, and when set to ``False``,
515
+ this module does not track such statistics, and initializes statistics
516
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
517
+ When these buffers are ``None``, this module always uses batch statistics.
518
+ in both training and eval modes. Default: ``True``
519
+
520
+ Shape:
521
+ - Input: :math:`(N, C, D, H, W)`
522
+ - Output: :math:`(N, C, D, H, W)` (same shape as input)
523
+
524
+ Examples::
525
+
526
+ >>> # With Learnable Parameters
527
+ >>> m = nn.BatchNorm3d(100)
528
+ >>> # Without Learnable Parameters
529
+ >>> m = nn.BatchNorm3d(100, affine=False)
530
+ >>> input = torch.randn(20, 100, 35, 45, 10)
531
+ >>> output = m(input)
532
+ """
533
+
534
+ def _check_input_dim(self, input):
535
+ if input.dim() != 5:
536
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
537
+
538
+
539
+ class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
540
+ r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization.
541
+
542
+ Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
543
+ from the ``input.size(1)``.
544
+ The attributes that will be lazily initialized are `weight`, `bias`,
545
+ `running_mean` and `running_var`.
546
+
547
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
548
+ on lazy modules and their limitations.
549
+
550
+ Args:
551
+ eps: a value added to the denominator for numerical stability.
552
+ Default: 1e-5
553
+ momentum: the value used for the running_mean and running_var
554
+ computation. Can be set to ``None`` for cumulative moving average
555
+ (i.e. simple average). Default: 0.1
556
+ affine: a boolean value that when set to ``True``, this module has
557
+ learnable affine parameters. Default: ``True``
558
+ track_running_stats: a boolean value that when set to ``True``, this
559
+ module tracks the running mean and variance, and when set to ``False``,
560
+ this module does not track such statistics, and initializes statistics
561
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
562
+ When these buffers are ``None``, this module always uses batch statistics.
563
+ in both training and eval modes. Default: ``True``
564
+ """
565
+
566
+ cls_to_become = BatchNorm3d # type: ignore[assignment]
567
+
568
+ def _check_input_dim(self, input):
569
+ if input.dim() != 5:
570
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
571
+
572
+
573
+ class SyncBatchNorm(_BatchNorm):
574
+ r"""Applies Batch Normalization over a N-Dimensional input.
575
+
576
+ The N-D input is a mini-batch of [N-2]D inputs with additional channel dimension) as described in the paper
577
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
578
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
579
+
580
+ .. math::
581
+
582
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
583
+
584
+ The mean and standard-deviation are calculated per-dimension over all
585
+ mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
586
+ are learnable parameter vectors of size `C` (where `C` is the input size).
587
+ By default, the elements of :math:`\gamma` are sampled from
588
+ :math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
589
+ The standard-deviation is calculated via the biased estimator, equivalent to
590
+ `torch.var(input, unbiased=False)`.
591
+
592
+ Also by default, during training this layer keeps running estimates of its
593
+ computed mean and variance, which are then used for normalization during
594
+ evaluation. The running estimates are kept with a default :attr:`momentum`
595
+ of 0.1.
596
+
597
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
598
+ keep running estimates, and batch statistics are instead used during
599
+ evaluation time as well.
600
+
601
+ .. note::
602
+ This :attr:`momentum` argument is different from one used in optimizer
603
+ classes and the conventional notion of momentum. Mathematically, the
604
+ update rule for running statistics here is
605
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
606
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
607
+ new observed value.
608
+
609
+ Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
610
+ statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
611
+ Normalization or Spatio-temporal Batch Normalization.
612
+
613
+ Currently :class:`SyncBatchNorm` only supports
614
+ :class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
615
+ :meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
616
+ :attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
617
+ Network with DDP.
618
+
619
+ Args:
620
+ num_features: :math:`C` from an expected input of size
621
+ :math:`(N, C, +)`
622
+ eps: a value added to the denominator for numerical stability.
623
+ Default: ``1e-5``
624
+ momentum: the value used for the running_mean and running_var
625
+ computation. Can be set to ``None`` for cumulative moving average
626
+ (i.e. simple average). Default: 0.1
627
+ affine: a boolean value that when set to ``True``, this module has
628
+ learnable affine parameters. Default: ``True``
629
+ track_running_stats: a boolean value that when set to ``True``, this
630
+ module tracks the running mean and variance, and when set to ``False``,
631
+ this module does not track such statistics, and initializes statistics
632
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
633
+ When these buffers are ``None``, this module always uses batch statistics.
634
+ in both training and eval modes. Default: ``True``
635
+ process_group: synchronization of stats happen within each process group
636
+ individually. Default behavior is synchronization across the whole
637
+ world
638
+
639
+ Shape:
640
+ - Input: :math:`(N, C, +)`
641
+ - Output: :math:`(N, C, +)` (same shape as input)
642
+
643
+ .. note::
644
+ Synchronization of batchnorm statistics occurs only while training, i.e.
645
+ synchronization is disabled when ``model.eval()`` is set or if
646
+ ``self.training`` is otherwise ``False``.
647
+
648
+ Examples::
649
+
650
+ >>> # xdoctest: +SKIP
651
+ >>> # With Learnable Parameters
652
+ >>> m = nn.SyncBatchNorm(100)
653
+ >>> # creating process group (optional)
654
+ >>> # ranks is a list of int identifying rank ids.
655
+ >>> ranks = list(range(8))
656
+ >>> r1, r2 = ranks[:4], ranks[4:]
657
+ >>> # Note: every rank calls into new_group for every
658
+ >>> # process group created, even if that rank is not
659
+ >>> # part of the group.
660
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
661
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
662
+ >>> # Without Learnable Parameters
663
+ >>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
664
+ >>> input = torch.randn(20, 100, 35, 45, 10)
665
+ >>> output = m(input)
666
+
667
+ >>> # network is nn.BatchNorm layer
668
+ >>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
669
+ >>> # only single gpu per process is currently supported
670
+ >>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
671
+ >>> sync_bn_network,
672
+ >>> device_ids=[args.local_rank],
673
+ >>> output_device=args.local_rank)
674
+ """
675
+
676
+ def __init__(
677
+ self,
678
+ num_features: int,
679
+ eps: float = 1e-5,
680
+ momentum: float = 0.1,
681
+ affine: bool = True,
682
+ track_running_stats: bool = True,
683
+ process_group: Optional[Any] = None,
684
+ device=None,
685
+ dtype=None
686
+ ) -> None:
687
+ factory_kwargs = {'device': device, 'dtype': dtype}
688
+ super().__init__(
689
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
690
+ )
691
+ self.process_group = process_group
692
+
693
+ def _check_input_dim(self, input):
694
+ if input.dim() < 2:
695
+ raise ValueError(
696
+ f"expected at least 2D input (got {input.dim()}D input)"
697
+ )
698
+
699
+ def _check_non_zero_input_channels(self, input):
700
+ if input.size(1) == 0:
701
+ raise ValueError(
702
+ "SyncBatchNorm number of input channels should be non-zero"
703
+ )
704
+
705
+ def forward(self, input: Tensor) -> Tensor:
706
+ self._check_input_dim(input)
707
+ self._check_non_zero_input_channels(input)
708
+
709
+ # exponential_average_factor is set to self.momentum
710
+ # (when it is available) only so that it gets updated
711
+ # in ONNX graph when this node is exported to ONNX.
712
+ if self.momentum is None:
713
+ exponential_average_factor = 0.0
714
+ else:
715
+ exponential_average_factor = self.momentum
716
+
717
+ if self.training and self.track_running_stats:
718
+ assert self.num_batches_tracked is not None
719
+ self.num_batches_tracked.add_(1)
720
+ if self.momentum is None: # use cumulative moving average
721
+ exponential_average_factor = 1.0 / self.num_batches_tracked.item()
722
+ else: # use exponential moving average
723
+ exponential_average_factor = self.momentum
724
+
725
+ r"""
726
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
727
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
728
+ """
729
+ if self.training:
730
+ bn_training = True
731
+ else:
732
+ bn_training = (self.running_mean is None) and (self.running_var is None)
733
+
734
+ r"""
735
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
736
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
737
+ used for normalization (i.e. in eval mode when buffers are not None).
738
+ """
739
+ # If buffers are not to be tracked, ensure that they won't be updated
740
+ running_mean = (
741
+ self.running_mean if not self.training or self.track_running_stats else None
742
+ )
743
+ running_var = (
744
+ self.running_var if not self.training or self.track_running_stats else None
745
+ )
746
+
747
+ # Don't sync batchnorm stats in inference mode (model.eval()).
748
+ need_sync = (bn_training and self.training and
749
+ torch.distributed.is_available() and torch.distributed.is_initialized())
750
+ if need_sync:
751
+ # currently only GPU/PrivateUse1 input is supported
752
+ if input.device.type not in ["cuda", torch._C._get_privateuse1_backend_name()]:
753
+ raise ValueError("SyncBatchNorm expected input tensor to be on GPU or "
754
+ f"{torch._C._get_privateuse1_backend_name()}")
755
+
756
+ process_group = torch.distributed.group.WORLD
757
+ if self.process_group:
758
+ process_group = self.process_group
759
+ world_size = torch.distributed.get_world_size(process_group)
760
+ need_sync = world_size > 1
761
+
762
+ # fallback to framework BN when synchronization is not necessary
763
+ if not need_sync:
764
+ return F.batch_norm(
765
+ input,
766
+ running_mean,
767
+ running_var,
768
+ self.weight,
769
+ self.bias,
770
+ bn_training,
771
+ exponential_average_factor,
772
+ self.eps,
773
+ )
774
+ else:
775
+ assert bn_training
776
+ return sync_batch_norm.apply(
777
+ input,
778
+ self.weight,
779
+ self.bias,
780
+ running_mean,
781
+ running_var,
782
+ self.eps,
783
+ exponential_average_factor,
784
+ process_group, # type: ignore[possibly-undefined]
785
+ world_size, # type: ignore[possibly-undefined]
786
+ )
787
+
788
+ @classmethod
789
+ def convert_sync_batchnorm(cls, module, process_group=None):
790
+ r"""Converts all :attr:`BatchNorm*D` layers in the model to :class:`torch.nn.SyncBatchNorm` layers.
791
+
792
+ Args:
793
+ module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
794
+ process_group (optional): process group to scope synchronization,
795
+ default is the whole world
796
+
797
+ Returns:
798
+ The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
799
+ layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
800
+ a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
801
+ instead.
802
+
803
+ Example::
804
+
805
+ >>> # Network with nn.BatchNorm layer
806
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
807
+ >>> module = torch.nn.Sequential(
808
+ >>> torch.nn.Linear(20, 100),
809
+ >>> torch.nn.BatchNorm1d(100),
810
+ >>> ).cuda()
811
+ >>> # creating process group (optional)
812
+ >>> # ranks is a list of int identifying rank ids.
813
+ >>> ranks = list(range(8))
814
+ >>> r1, r2 = ranks[:4], ranks[4:]
815
+ >>> # Note: every rank calls into new_group for every
816
+ >>> # process group created, even if that rank is not
817
+ >>> # part of the group.
818
+ >>> # xdoctest: +SKIP("distributed")
819
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
820
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
821
+ >>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
822
+
823
+ """
824
+ module_output = module
825
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
826
+ module_output = torch.nn.SyncBatchNorm(
827
+ module.num_features,
828
+ module.eps,
829
+ module.momentum,
830
+ module.affine,
831
+ module.track_running_stats,
832
+ process_group,
833
+ )
834
+ if module.affine:
835
+ with torch.no_grad():
836
+ module_output.weight = module.weight
837
+ module_output.bias = module.bias
838
+ module_output.running_mean = module.running_mean
839
+ module_output.running_var = module.running_var
840
+ module_output.num_batches_tracked = module.num_batches_tracked
841
+ module_output.training = module.training
842
+ if hasattr(module, "qconfig"):
843
+ module_output.qconfig = module.qconfig
844
+ for name, child in module.named_children():
845
+ module_output.add_module(
846
+ name, cls.convert_sync_batchnorm(child, process_group)
847
+ )
848
+ del module
849
+ return module_output
venv/lib/python3.10/site-packages/torch/nn/modules/channelshuffle.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['ChannelShuffle']
7
+
8
+ class ChannelShuffle(Module):
9
+ r"""Divides and rearranges the channels in a tensor.
10
+
11
+ This operation divides the channels in a tensor of shape :math:`(*, C , H, W)`
12
+ into g groups and rearranges them as :math:`(*, \frac{C}{g}, g, H, W)`,
13
+ while keeping the original tensor shape.
14
+
15
+ Args:
16
+ groups (int): number of groups to divide channels in.
17
+
18
+ Examples::
19
+
20
+ >>> # xdoctest: +IGNORE_WANT("FIXME: incorrect want")
21
+ >>> channel_shuffle = nn.ChannelShuffle(2)
22
+ >>> input = torch.randn(1, 4, 2, 2)
23
+ >>> print(input)
24
+ [[[[1, 2],
25
+ [3, 4]],
26
+ [[5, 6],
27
+ [7, 8]],
28
+ [[9, 10],
29
+ [11, 12]],
30
+ [[13, 14],
31
+ [15, 16]],
32
+ ]]
33
+ >>> output = channel_shuffle(input)
34
+ >>> print(output)
35
+ [[[[1, 2],
36
+ [3, 4]],
37
+ [[9, 10],
38
+ [11, 12]],
39
+ [[5, 6],
40
+ [7, 8]],
41
+ [[13, 14],
42
+ [15, 16]],
43
+ ]]
44
+ """
45
+
46
+ __constants__ = ['groups']
47
+ groups: int
48
+
49
+ def __init__(self, groups: int) -> None:
50
+ super().__init__()
51
+ self.groups = groups
52
+
53
+ def forward(self, input: Tensor) -> Tensor:
54
+ return F.channel_shuffle(input, self.groups)
55
+
56
+ def extra_repr(self) -> str:
57
+ return f'groups={self.groups}'
venv/lib/python3.10/site-packages/torch/nn/modules/container.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import OrderedDict, abc as container_abcs
3
+ from itertools import chain, islice
4
+ import operator
5
+
6
+ import torch
7
+ from .module import Module
8
+ from ..parameter import Parameter
9
+ from torch._jit_internal import _copy_to_script_wrapper
10
+
11
+ from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
12
+ from typing_extensions import Self
13
+
14
+ __all__ = ['Container', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict']
15
+
16
+ T = TypeVar('T', bound=Module)
17
+
18
+
19
+ # Copied from torch.nn.modules.module, required for a custom __repr__ for ModuleList
20
+ def _addindent(s_, numSpaces):
21
+ s = s_.split('\n')
22
+ # don't do anything for single-line stuff
23
+ if len(s) == 1:
24
+ return s_
25
+ first = s.pop(0)
26
+ s = [(numSpaces * ' ') + line for line in s]
27
+ s = '\n'.join(s)
28
+ s = first + '\n' + s
29
+ return s
30
+
31
+
32
+ class Container(Module):
33
+
34
+ def __init__(self, **kwargs: Any) -> None:
35
+ super().__init__()
36
+ # DeprecationWarning is ignored by default <sigh>
37
+ warnings.warn("nn.Container is deprecated. All of it's functionality "
38
+ "is now implemented in nn.Module. Subclass that instead.")
39
+ for key, value in kwargs.items():
40
+ self.add_module(key, value)
41
+
42
+
43
+ class Sequential(Module):
44
+ r"""A sequential container.
45
+
46
+ Modules will be added to it in the order they are passed in the
47
+ constructor. Alternatively, an ``OrderedDict`` of modules can be
48
+ passed in. The ``forward()`` method of ``Sequential`` accepts any
49
+ input and forwards it to the first module it contains. It then
50
+ "chains" outputs to inputs sequentially for each subsequent module,
51
+ finally returning the output of the last module.
52
+
53
+ The value a ``Sequential`` provides over manually calling a sequence
54
+ of modules is that it allows treating the whole container as a
55
+ single module, such that performing a transformation on the
56
+ ``Sequential`` applies to each of the modules it stores (which are
57
+ each a registered submodule of the ``Sequential``).
58
+
59
+ What's the difference between a ``Sequential`` and a
60
+ :class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
61
+ sounds like--a list for storing ``Module`` s! On the other hand,
62
+ the layers in a ``Sequential`` are connected in a cascading way.
63
+
64
+ Example::
65
+
66
+ # Using Sequential to create a small model. When `model` is run,
67
+ # input will first be passed to `Conv2d(1,20,5)`. The output of
68
+ # `Conv2d(1,20,5)` will be used as the input to the first
69
+ # `ReLU`; the output of the first `ReLU` will become the input
70
+ # for `Conv2d(20,64,5)`. Finally, the output of
71
+ # `Conv2d(20,64,5)` will be used as input to the second `ReLU`
72
+ model = nn.Sequential(
73
+ nn.Conv2d(1,20,5),
74
+ nn.ReLU(),
75
+ nn.Conv2d(20,64,5),
76
+ nn.ReLU()
77
+ )
78
+
79
+ # Using Sequential with OrderedDict. This is functionally the
80
+ # same as the above code
81
+ model = nn.Sequential(OrderedDict([
82
+ ('conv1', nn.Conv2d(1,20,5)),
83
+ ('relu1', nn.ReLU()),
84
+ ('conv2', nn.Conv2d(20,64,5)),
85
+ ('relu2', nn.ReLU())
86
+ ]))
87
+ """
88
+
89
+ _modules: Dict[str, Module] # type: ignore[assignment]
90
+
91
+ @overload
92
+ def __init__(self, *args: Module) -> None:
93
+ ...
94
+
95
+ @overload
96
+ def __init__(self, arg: 'OrderedDict[str, Module]') -> None:
97
+ ...
98
+
99
+ def __init__(self, *args):
100
+ super().__init__()
101
+ if len(args) == 1 and isinstance(args[0], OrderedDict):
102
+ for key, module in args[0].items():
103
+ self.add_module(key, module)
104
+ else:
105
+ for idx, module in enumerate(args):
106
+ self.add_module(str(idx), module)
107
+
108
+ def _get_item_by_idx(self, iterator, idx) -> T: # type: ignore[misc, type-var]
109
+ """Get the idx-th item of the iterator."""
110
+ size = len(self)
111
+ idx = operator.index(idx)
112
+ if not -size <= idx < size:
113
+ raise IndexError(f'index {idx} is out of range')
114
+ idx %= size
115
+ return next(islice(iterator, idx, None))
116
+
117
+ @_copy_to_script_wrapper
118
+ def __getitem__(self, idx: Union[slice, int]) -> Union['Sequential', T]:
119
+ if isinstance(idx, slice):
120
+ return self.__class__(OrderedDict(list(self._modules.items())[idx]))
121
+ else:
122
+ return self._get_item_by_idx(self._modules.values(), idx)
123
+
124
+ def __setitem__(self, idx: int, module: Module) -> None:
125
+ key: str = self._get_item_by_idx(self._modules.keys(), idx)
126
+ return setattr(self, key, module)
127
+
128
+ def __delitem__(self, idx: Union[slice, int]) -> None:
129
+ if isinstance(idx, slice):
130
+ for key in list(self._modules.keys())[idx]:
131
+ delattr(self, key)
132
+ else:
133
+ key = self._get_item_by_idx(self._modules.keys(), idx)
134
+ delattr(self, key)
135
+ # To preserve numbering
136
+ str_indices = [str(i) for i in range(len(self._modules))]
137
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
138
+
139
+ @_copy_to_script_wrapper
140
+ def __len__(self) -> int:
141
+ return len(self._modules)
142
+
143
+ def __add__(self, other) -> 'Sequential':
144
+ if isinstance(other, Sequential):
145
+ ret = Sequential()
146
+ for layer in self:
147
+ ret.append(layer)
148
+ for layer in other:
149
+ ret.append(layer)
150
+ return ret
151
+ else:
152
+ raise ValueError('add operator supports only objects '
153
+ f'of Sequential class, but {str(type(other))} is given.')
154
+
155
+ def pop(self, key: Union[int, slice]) -> Module:
156
+ v = self[key]
157
+ del self[key]
158
+ return v
159
+
160
+ def __iadd__(self, other) -> Self:
161
+ if isinstance(other, Sequential):
162
+ offset = len(self)
163
+ for i, module in enumerate(other):
164
+ self.add_module(str(i + offset), module)
165
+ return self
166
+ else:
167
+ raise ValueError('add operator supports only objects '
168
+ f'of Sequential class, but {str(type(other))} is given.')
169
+
170
+ def __mul__(self, other: int) -> 'Sequential':
171
+ if not isinstance(other, int):
172
+ raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
173
+ elif (other <= 0):
174
+ raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
175
+ else:
176
+ combined = Sequential()
177
+ offset = 0
178
+ for _ in range(other):
179
+ for module in self:
180
+ combined.add_module(str(offset), module)
181
+ offset += 1
182
+ return combined
183
+
184
+ def __rmul__(self, other: int) -> 'Sequential':
185
+ return self.__mul__(other)
186
+
187
+ def __imul__(self, other: int) -> Self:
188
+ if not isinstance(other, int):
189
+ raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
190
+ elif (other <= 0):
191
+ raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
192
+ else:
193
+ len_original = len(self)
194
+ offset = len(self)
195
+ for _ in range(other - 1):
196
+ for i in range(len_original):
197
+ self.add_module(str(i + offset), self._modules[str(i)])
198
+ offset += len_original
199
+ return self
200
+
201
+ @_copy_to_script_wrapper
202
+ def __dir__(self):
203
+ keys = super().__dir__()
204
+ keys = [key for key in keys if not key.isdigit()]
205
+ return keys
206
+
207
+ @_copy_to_script_wrapper
208
+ def __iter__(self) -> Iterator[Module]:
209
+ return iter(self._modules.values())
210
+
211
+ # NB: We can't really type check this function as the type of input
212
+ # may change dynamically (as is tested in
213
+ # TestScript.test_sequential_intermediary_types). Cannot annotate
214
+ # with Any as TorchScript expects a more precise type
215
+ def forward(self, input):
216
+ for module in self:
217
+ input = module(input)
218
+ return input
219
+
220
+ def append(self, module: Module) -> 'Sequential':
221
+ r"""Append a given module to the end.
222
+
223
+ Args:
224
+ module (nn.Module): module to append
225
+ """
226
+ self.add_module(str(len(self)), module)
227
+ return self
228
+
229
+ def insert(self, index: int, module: Module) -> 'Sequential':
230
+ if not isinstance(module, Module):
231
+ raise AssertionError(
232
+ f'module should be of type: {Module}')
233
+ n = len(self._modules)
234
+ if not (-n <= index <= n):
235
+ raise IndexError(
236
+ f'Index out of range: {index}')
237
+ if index < 0:
238
+ index += n
239
+ for i in range(n, index, -1):
240
+ self._modules[str(i)] = self._modules[str(i - 1)]
241
+ self._modules[str(index)] = module
242
+ return self
243
+
244
+ def extend(self, sequential) -> 'Sequential':
245
+ for layer in sequential:
246
+ self.append(layer)
247
+ return self
248
+
249
+
250
+ class ModuleList(Module):
251
+ r"""Holds submodules in a list.
252
+
253
+ :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
254
+ modules it contains are properly registered, and will be visible by all
255
+ :class:`~torch.nn.Module` methods.
256
+
257
+ Args:
258
+ modules (iterable, optional): an iterable of modules to add
259
+
260
+ Example::
261
+
262
+ class MyModule(nn.Module):
263
+ def __init__(self):
264
+ super().__init__()
265
+ self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
266
+
267
+ def forward(self, x):
268
+ # ModuleList can act as an iterable, or be indexed using ints
269
+ for i, l in enumerate(self.linears):
270
+ x = self.linears[i // 2](x) + l(x)
271
+ return x
272
+ """
273
+
274
+ _modules: Dict[str, Module] # type: ignore[assignment]
275
+
276
+ def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
277
+ super().__init__()
278
+ if modules is not None:
279
+ self += modules
280
+
281
+ def _get_abs_string_index(self, idx):
282
+ """Get the absolute index for the list of modules."""
283
+ idx = operator.index(idx)
284
+ if not (-len(self) <= idx < len(self)):
285
+ raise IndexError(f'index {idx} is out of range')
286
+ if idx < 0:
287
+ idx += len(self)
288
+ return str(idx)
289
+
290
+ @_copy_to_script_wrapper
291
+ def __getitem__(self, idx: Union[int, slice]) -> Union[Module, 'ModuleList']:
292
+ if isinstance(idx, slice):
293
+ return self.__class__(list(self._modules.values())[idx])
294
+ else:
295
+ return self._modules[self._get_abs_string_index(idx)]
296
+
297
+ def __setitem__(self, idx: int, module: Module) -> None:
298
+ idx = self._get_abs_string_index(idx)
299
+ return setattr(self, str(idx), module)
300
+
301
+ def __delitem__(self, idx: Union[int, slice]) -> None:
302
+ if isinstance(idx, slice):
303
+ for k in range(len(self._modules))[idx]:
304
+ delattr(self, str(k))
305
+ else:
306
+ delattr(self, self._get_abs_string_index(idx))
307
+ # To preserve numbering, self._modules is being reconstructed with modules after deletion
308
+ str_indices = [str(i) for i in range(len(self._modules))]
309
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
310
+
311
+ @_copy_to_script_wrapper
312
+ def __len__(self) -> int:
313
+ return len(self._modules)
314
+
315
+ @_copy_to_script_wrapper
316
+ def __iter__(self) -> Iterator[Module]:
317
+ return iter(self._modules.values())
318
+
319
+ def __iadd__(self, modules: Iterable[Module]) -> Self:
320
+ return self.extend(modules)
321
+
322
+ def __add__(self, other: Iterable[Module]) -> 'ModuleList':
323
+ combined = ModuleList()
324
+ for i, module in enumerate(chain(self, other)):
325
+ combined.add_module(str(i), module)
326
+ return combined
327
+
328
+ def __repr__(self):
329
+ """Return a custom repr for ModuleList that compresses repeated module representations."""
330
+ list_of_reprs = [repr(item) for item in self]
331
+ if len(list_of_reprs) == 0:
332
+ return self._get_name() + '()'
333
+
334
+ start_end_indices = [[0, 0]]
335
+ repeated_blocks = [list_of_reprs[0]]
336
+ for i, r in enumerate(list_of_reprs[1:], 1):
337
+ if r == repeated_blocks[-1]:
338
+ start_end_indices[-1][1] += 1
339
+ continue
340
+
341
+ start_end_indices.append([i, i])
342
+ repeated_blocks.append(r)
343
+
344
+ lines = []
345
+ main_str = self._get_name() + '('
346
+ for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):
347
+ local_repr = f"({start_id}): {b}" # default repr
348
+
349
+ if start_id != end_id:
350
+ n = end_id - start_id + 1
351
+ local_repr = f"({start_id}-{end_id}): {n} x {b}"
352
+
353
+ local_repr = _addindent(local_repr, 2)
354
+ lines.append(local_repr)
355
+
356
+ main_str += '\n ' + '\n '.join(lines) + '\n'
357
+ main_str += ')'
358
+ return main_str
359
+
360
+ @_copy_to_script_wrapper
361
+ def __dir__(self):
362
+ keys = super().__dir__()
363
+ keys = [key for key in keys if not key.isdigit()]
364
+ return keys
365
+
366
+ def insert(self, index: int, module: Module) -> None:
367
+ r"""Insert a given module before a given index in the list.
368
+
369
+ Args:
370
+ index (int): index to insert.
371
+ module (nn.Module): module to insert
372
+ """
373
+ for i in range(len(self._modules), index, -1):
374
+ self._modules[str(i)] = self._modules[str(i - 1)]
375
+ self._modules[str(index)] = module
376
+
377
+ def append(self, module: Module) -> 'ModuleList':
378
+ r"""Append a given module to the end of the list.
379
+
380
+ Args:
381
+ module (nn.Module): module to append
382
+ """
383
+ self.add_module(str(len(self)), module)
384
+ return self
385
+
386
+ def pop(self, key: Union[int, slice]) -> Module:
387
+ v = self[key]
388
+ del self[key]
389
+ return v
390
+
391
+ def extend(self, modules: Iterable[Module]) -> Self:
392
+ r"""Append modules from a Python iterable to the end of the list.
393
+
394
+ Args:
395
+ modules (iterable): iterable of modules to append
396
+ """
397
+ if not isinstance(modules, container_abcs.Iterable):
398
+ raise TypeError("ModuleList.extend should be called with an "
399
+ "iterable, but got " + type(modules).__name__)
400
+ offset = len(self)
401
+ for i, module in enumerate(modules):
402
+ self.add_module(str(offset + i), module)
403
+ return self
404
+
405
+ # remove forward alltogether to fallback on Module's _forward_unimplemented
406
+
407
+
408
+ class ModuleDict(Module):
409
+ r"""Holds submodules in a dictionary.
410
+
411
+ :class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
412
+ but modules it contains are properly registered, and will be visible by all
413
+ :class:`~torch.nn.Module` methods.
414
+
415
+ :class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
416
+
417
+ * the order of insertion, and
418
+
419
+ * in :meth:`~torch.nn.ModuleDict.update`, the order of the merged
420
+ ``OrderedDict``, ``dict`` (started from Python 3.6) or another
421
+ :class:`~torch.nn.ModuleDict` (the argument to
422
+ :meth:`~torch.nn.ModuleDict.update`).
423
+
424
+ Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
425
+ types (e.g., Python's plain ``dict`` before Python version 3.6) does not
426
+ preserve the order of the merged mapping.
427
+
428
+ Args:
429
+ modules (iterable, optional): a mapping (dictionary) of (string: module)
430
+ or an iterable of key-value pairs of type (string, module)
431
+
432
+ Example::
433
+
434
+ class MyModule(nn.Module):
435
+ def __init__(self):
436
+ super().__init__()
437
+ self.choices = nn.ModuleDict({
438
+ 'conv': nn.Conv2d(10, 10, 3),
439
+ 'pool': nn.MaxPool2d(3)
440
+ })
441
+ self.activations = nn.ModuleDict([
442
+ ['lrelu', nn.LeakyReLU()],
443
+ ['prelu', nn.PReLU()]
444
+ ])
445
+
446
+ def forward(self, x, choice, act):
447
+ x = self.choices[choice](x)
448
+ x = self.activations[act](x)
449
+ return x
450
+ """
451
+
452
+ _modules: Dict[str, Module] # type: ignore[assignment]
453
+
454
+ def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
455
+ super().__init__()
456
+ if modules is not None:
457
+ self.update(modules)
458
+
459
+ @_copy_to_script_wrapper
460
+ def __getitem__(self, key: str) -> Module:
461
+ return self._modules[key]
462
+
463
+ def __setitem__(self, key: str, module: Module) -> None:
464
+ self.add_module(key, module)
465
+
466
+ def __delitem__(self, key: str) -> None:
467
+ del self._modules[key]
468
+
469
+ @_copy_to_script_wrapper
470
+ def __len__(self) -> int:
471
+ return len(self._modules)
472
+
473
+ @_copy_to_script_wrapper
474
+ def __iter__(self) -> Iterator[str]:
475
+ return iter(self._modules)
476
+
477
+ @_copy_to_script_wrapper
478
+ def __contains__(self, key: str) -> bool:
479
+ return key in self._modules
480
+
481
+ def clear(self) -> None:
482
+ """Remove all items from the ModuleDict."""
483
+ self._modules.clear()
484
+
485
+ def pop(self, key: str) -> Module:
486
+ r"""Remove key from the ModuleDict and return its module.
487
+
488
+ Args:
489
+ key (str): key to pop from the ModuleDict
490
+ """
491
+ v = self[key]
492
+ del self[key]
493
+ return v
494
+
495
+ @_copy_to_script_wrapper
496
+ def keys(self) -> Iterable[str]:
497
+ r"""Return an iterable of the ModuleDict keys."""
498
+ return self._modules.keys()
499
+
500
+ @_copy_to_script_wrapper
501
+ def items(self) -> Iterable[Tuple[str, Module]]:
502
+ r"""Return an iterable of the ModuleDict key/value pairs."""
503
+ return self._modules.items()
504
+
505
+ @_copy_to_script_wrapper
506
+ def values(self) -> Iterable[Module]:
507
+ r"""Return an iterable of the ModuleDict values."""
508
+ return self._modules.values()
509
+
510
+ def update(self, modules: Mapping[str, Module]) -> None:
511
+ r"""Update the :class:`~torch.nn.ModuleDict` with key-value pairs from a mapping, overwriting existing keys.
512
+
513
+ .. note::
514
+ If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
515
+ an iterable of key-value pairs, the order of new elements in it is preserved.
516
+
517
+ Args:
518
+ modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
519
+ or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
520
+ """
521
+ if not isinstance(modules, container_abcs.Iterable):
522
+ raise TypeError("ModuleDict.update should be called with an "
523
+ "iterable of key/value pairs, but got " +
524
+ type(modules).__name__)
525
+
526
+ if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
527
+ for key, module in modules.items():
528
+ self[key] = module
529
+ else:
530
+ # modules here can be a list with two items
531
+ for j, m in enumerate(modules):
532
+ if not isinstance(m, container_abcs.Iterable):
533
+ raise TypeError("ModuleDict update sequence element "
534
+ "#" + str(j) + " should be Iterable; is" +
535
+ type(m).__name__)
536
+ if not len(m) == 2:
537
+ raise ValueError("ModuleDict update sequence element "
538
+ "#" + str(j) + " has length " + str(len(m)) +
539
+ "; 2 is required")
540
+ # modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
541
+ # that's too cumbersome to type correctly with overloads, so we add an ignore here
542
+ self[m[0]] = m[1] # type: ignore[assignment]
543
+
544
+ # remove forward alltogether to fallback on Module's _forward_unimplemented
545
+
546
+
547
+ class ParameterList(Module):
548
+ r"""Holds parameters in a list.
549
+
550
+ :class:`~torch.nn.ParameterList` can be used like a regular Python
551
+ list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
552
+ and will be visible by all :class:`~torch.nn.Module` methods.
553
+
554
+ Note that the constructor, assigning an element of the list, the
555
+ :meth:`~torch.nn.ParameterDict.append` method and the :meth:`~torch.nn.ParameterDict.extend`
556
+ method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
557
+
558
+ Args:
559
+ parameters (iterable, optional): an iterable of elements to add to the list.
560
+
561
+ Example::
562
+
563
+ class MyModule(nn.Module):
564
+ def __init__(self):
565
+ super().__init__()
566
+ self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
567
+
568
+ def forward(self, x):
569
+ # ParameterList can act as an iterable, or be indexed using ints
570
+ for i, p in enumerate(self.params):
571
+ x = self.params[i // 2].mm(x) + p.mm(x)
572
+ return x
573
+ """
574
+
575
+ def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
576
+ super().__init__()
577
+ self._size = 0
578
+ if values is not None:
579
+ self += values
580
+
581
+ def _get_abs_string_index(self, idx):
582
+ """Get the absolute index for the list of modules."""
583
+ idx = operator.index(idx)
584
+ if not (-len(self) <= idx < len(self)):
585
+ raise IndexError(f'index {idx} is out of range')
586
+ if idx < 0:
587
+ idx += len(self)
588
+ return str(idx)
589
+
590
+ @overload
591
+ def __getitem__(self, idx: int) -> Any:
592
+ ...
593
+
594
+ @overload
595
+ def __getitem__(self: T, idx: slice) -> T:
596
+ ...
597
+
598
+ def __getitem__(self, idx):
599
+ if isinstance(idx, slice):
600
+ start, stop, step = idx.indices(len(self))
601
+ out = self.__class__()
602
+ for i in range(start, stop, step):
603
+ out.append(self[i])
604
+ return out
605
+ else:
606
+ idx = self._get_abs_string_index(idx)
607
+ return getattr(self, str(idx))
608
+
609
+ def __setitem__(self, idx: int, param: Any) -> None:
610
+ # Note that all other function that add an entry to the list part of
611
+ # the ParameterList end up here. So this is the only place where we need
612
+ # to wrap things into Parameter if needed.
613
+ # Objects added via setattr() are not in the list part and thus won't
614
+ # call into this function.
615
+ idx = self._get_abs_string_index(idx)
616
+ if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
617
+ param = Parameter(param)
618
+ return setattr(self, str(idx), param)
619
+
620
+ def __len__(self) -> int:
621
+ return self._size
622
+
623
+ def __iter__(self) -> Iterator[Any]:
624
+ return iter(self[i] for i in range(len(self)))
625
+
626
+ def __iadd__(self, parameters: Iterable[Any]) -> Self:
627
+ return self.extend(parameters)
628
+
629
+ def __dir__(self):
630
+ keys = super().__dir__()
631
+ keys = [key for key in keys if not key.isdigit()]
632
+ return keys
633
+
634
+ def append(self, value: Any) -> 'ParameterList':
635
+ """Append a given value at the end of the list.
636
+
637
+ Args:
638
+ value (Any): value to append
639
+ """
640
+ new_idx = len(self)
641
+ self._size += 1
642
+ self[new_idx] = value
643
+ return self
644
+
645
+ def extend(self, values: Iterable[Any]) -> Self:
646
+ """Append values from a Python iterable to the end of the list.
647
+
648
+ Args:
649
+ values (iterable): iterable of values to append
650
+ """
651
+ # Tensor is an iterable but we never want to unpack it here
652
+ if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):
653
+ raise TypeError("ParameterList.extend should be called with an "
654
+ "iterable, but got " + type(values).__name__)
655
+ for value in values:
656
+ self.append(value)
657
+ return self
658
+
659
+ def extra_repr(self) -> str:
660
+ child_lines = []
661
+ for k, p in enumerate(self):
662
+ if isinstance(p, torch.Tensor):
663
+ size_str = 'x'.join(str(size) for size in p.size())
664
+ if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
665
+ device_str = f' ({p.device})'
666
+ else:
667
+ device_str = ''
668
+ parastr = '{} containing: [{} of size {}{}]'.format(
669
+ "Parameter" if isinstance(p, Parameter) else "Tensor",
670
+ p.dtype, size_str, device_str)
671
+ child_lines.append(' (' + str(k) + '): ' + parastr)
672
+ else:
673
+ child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
674
+
675
+ tmpstr = '\n'.join(child_lines)
676
+ return tmpstr
677
+
678
+ def __call__(self, *args, **kwargs):
679
+ raise RuntimeError('ParameterList should not be called.')
680
+
681
+
682
+ class ParameterDict(Module):
683
+ r"""Holds parameters in a dictionary.
684
+
685
+ ParameterDict can be indexed like a regular Python dictionary, but Parameters it
686
+ contains are properly registered, and will be visible by all Module methods.
687
+ Other objects are treated as would be done by a regular Python dictionary
688
+
689
+ :class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
690
+ :meth:`~torch.nn.ParameterDict.update` with other unordered mapping
691
+ types (e.g., Python's plain ``dict``) does not preserve the order of the
692
+ merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
693
+ will preserve their ordering.
694
+
695
+ Note that the constructor, assigning an element of the dictionary and the
696
+ :meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
697
+ :class:`~torch.nn.Parameter`.
698
+
699
+ Args:
700
+ values (iterable, optional): a mapping (dictionary) of
701
+ (string : Any) or an iterable of key-value pairs
702
+ of type (string, Any)
703
+
704
+ Example::
705
+
706
+ class MyModule(nn.Module):
707
+ def __init__(self):
708
+ super().__init__()
709
+ self.params = nn.ParameterDict({
710
+ 'left': nn.Parameter(torch.randn(5, 10)),
711
+ 'right': nn.Parameter(torch.randn(5, 10))
712
+ })
713
+
714
+ def forward(self, x, choice):
715
+ x = self.params[choice].mm(x)
716
+ return x
717
+ """
718
+
719
+ def __init__(self, parameters: Any = None) -> None:
720
+ super().__init__()
721
+ self._keys: Dict[str, None] = {}
722
+ if parameters is not None:
723
+ self.update(parameters)
724
+
725
+ def _key_to_attr(self, key: str) -> str:
726
+ if not isinstance(key, str):
727
+ raise TypeError("Index given to ParameterDict cannot be used as a key as it is "
728
+ f"not a string (type is '{type(key).__name__}'). Open an issue on "
729
+ "github if you need non-string keys.")
730
+ else:
731
+ # Use the key as-is so that `.named_parameters()` returns the right thing
732
+ return key
733
+
734
+ def __getitem__(self, key: str) -> Any:
735
+ attr = self._key_to_attr(key)
736
+ return getattr(self, attr)
737
+
738
+ def __setitem__(self, key: str, value: Any) -> None:
739
+ # Note that all other function that add an entry to the dictionary part of
740
+ # the ParameterDict end up here. So this is the only place where we need
741
+ # to wrap things into Parameter if needed.
742
+ # Objects added via setattr() are not in the dictionary part and thus won't
743
+ # call into this function.
744
+ self._keys[key] = None
745
+ attr = self._key_to_attr(key)
746
+ if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
747
+ value = Parameter(value)
748
+ setattr(self, attr, value)
749
+
750
+ def __delitem__(self, key: str) -> None:
751
+ del self._keys[key]
752
+ attr = self._key_to_attr(key)
753
+ delattr(self, attr)
754
+
755
+ def __len__(self) -> int:
756
+ return len(self._keys)
757
+
758
+ def __iter__(self) -> Iterator[str]:
759
+ return iter(self._keys)
760
+
761
+ def __reversed__(self) -> Iterator[str]:
762
+ return reversed(list(self._keys))
763
+
764
+ def copy(self) -> 'ParameterDict':
765
+ """Return a copy of this :class:`~torch.nn.ParameterDict` instance."""
766
+ # We have to use an OrderedDict because the ParameterDict constructor
767
+ # behaves differently on plain dict vs OrderedDict
768
+ return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
769
+
770
+ def __contains__(self, key: str) -> bool:
771
+ return key in self._keys
772
+
773
+ def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
774
+ """Set the default for a key in the Parameterdict.
775
+
776
+ If key is in the ParameterDict, return its value.
777
+ If not, insert `key` with a parameter `default` and return `default`.
778
+ `default` defaults to `None`.
779
+
780
+ Args:
781
+ key (str): key to set default for
782
+ default (Any): the parameter set to the key
783
+ """
784
+ if key not in self:
785
+ self[key] = default
786
+ return self[key]
787
+
788
+ def clear(self) -> None:
789
+ """Remove all items from the ParameterDict."""
790
+ for k in self._keys.copy():
791
+ del self[k]
792
+
793
+ def pop(self, key: str) -> Any:
794
+ r"""Remove key from the ParameterDict and return its parameter.
795
+
796
+ Args:
797
+ key (str): key to pop from the ParameterDict
798
+ """
799
+ v = self[key]
800
+ del self[key]
801
+ return v
802
+
803
+ def popitem(self) -> Tuple[str, Any]:
804
+ """Remove and return the last inserted `(key, parameter)` pair from the ParameterDict."""
805
+ k, _ = self._keys.popitem()
806
+ # We need the key in the _keys to be able to access/del
807
+ self._keys[k] = None
808
+ val = self[k]
809
+ del self[k]
810
+ return k, val
811
+
812
+ def get(self, key: str, default: Optional[Any] = None) -> Any:
813
+ r"""Return the parameter associated with key if present. Otherwise return default if provided, None if not.
814
+
815
+ Args:
816
+ key (str): key to get from the ParameterDict
817
+ default (Parameter, optional): value to return if key not present
818
+ """
819
+ return self[key] if key in self else default
820
+
821
+ def fromkeys(self, keys: Iterable[str], default: Optional[Any] = None) -> 'ParameterDict':
822
+ r"""Return a new ParameterDict with the keys provided.
823
+
824
+ Args:
825
+ keys (iterable, string): keys to make the new ParameterDict from
826
+ default (Parameter, optional): value to set for all keys
827
+ """
828
+ return ParameterDict((k, default) for k in keys)
829
+
830
+ def keys(self) -> Iterable[str]:
831
+ r"""Return an iterable of the ParameterDict keys."""
832
+ return self._keys.keys()
833
+
834
+ def items(self) -> Iterable[Tuple[str, Any]]:
835
+ r"""Return an iterable of the ParameterDict key/value pairs."""
836
+ return ((k, self[k]) for k in self._keys)
837
+
838
+ def values(self) -> Iterable[Any]:
839
+ r"""Return an iterable of the ParameterDict values."""
840
+ return (self[k] for k in self._keys)
841
+
842
+ def update(self, parameters: Union[Mapping[str, Any], 'ParameterDict']) -> None:
843
+ r"""Update the :class:`~torch.nn.ParameterDict` with key-value pairs from ``parameters``, overwriting existing keys.
844
+
845
+ .. note::
846
+ If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
847
+ an iterable of key-value pairs, the order of new elements in it is preserved.
848
+
849
+ Args:
850
+ parameters (iterable): a mapping (dictionary) from string to
851
+ :class:`~torch.nn.Parameter`, or an iterable of
852
+ key-value pairs of type (string, :class:`~torch.nn.Parameter`)
853
+ """
854
+ if not isinstance(parameters, container_abcs.Iterable):
855
+ raise TypeError("ParametersDict.update should be called with an "
856
+ "iterable of key/value pairs, but got " +
857
+ type(parameters).__name__)
858
+
859
+ if isinstance(parameters, (OrderedDict, ParameterDict)):
860
+ for key, parameter in parameters.items():
861
+ self[key] = parameter
862
+ elif isinstance(parameters, container_abcs.Mapping):
863
+ for key, parameter in sorted(parameters.items()):
864
+ self[key] = parameter
865
+ else:
866
+ for j, p in enumerate(parameters):
867
+ if not isinstance(p, container_abcs.Iterable):
868
+ raise TypeError("ParameterDict update sequence element "
869
+ "#" + str(j) + " should be Iterable; is" +
870
+ type(p).__name__)
871
+ if not len(p) == 2:
872
+ raise ValueError("ParameterDict update sequence element "
873
+ "#" + str(j) + " has length " + str(len(p)) +
874
+ "; 2 is required")
875
+ # parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
876
+ self[p[0]] = p[1] # type: ignore[assignment]
877
+
878
+ def extra_repr(self) -> str:
879
+ child_lines = []
880
+ for k, p in self.items():
881
+ if isinstance(p, torch.Tensor):
882
+ size_str = 'x'.join(str(size) for size in p.size())
883
+ if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
884
+ device_str = f' ({p.device})'
885
+ else:
886
+ device_str = ''
887
+ parastr = '{} containing: [{} of size {}{}]'.format(
888
+ "Parameter" if isinstance(p, Parameter) else "Tensor",
889
+ torch.typename(p), size_str, device_str)
890
+ child_lines.append(' (' + str(k) + '): ' + parastr)
891
+ else:
892
+ child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
893
+ tmpstr = '\n'.join(child_lines)
894
+ return tmpstr
895
+
896
+ def __call__(self, input):
897
+ raise RuntimeError('ParameterDict should not be called.')
898
+
899
+ def __or__(self, other: 'ParameterDict') -> 'ParameterDict':
900
+ copy = self.copy()
901
+ copy.update(other)
902
+ return copy
903
+
904
+ def __ror__(self, other: 'ParameterDict') -> 'ParameterDict':
905
+ copy = other.copy()
906
+ copy.update(self)
907
+ return copy
908
+
909
+ def __ior__(self, other : 'ParameterDict') -> Self:
910
+ self.update(other)
911
+ return self
venv/lib/python3.10/site-packages/torch/nn/modules/flatten.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+
3
+ from typing import Tuple, Union
4
+ from torch import Tensor
5
+ from torch.types import _size
6
+
7
+ __all__ = ['Flatten', 'Unflatten']
8
+
9
+ class Flatten(Module):
10
+ r"""
11
+ Flattens a contiguous range of dims into a tensor.
12
+
13
+ For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.
14
+
15
+ Shape:
16
+ - Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
17
+ where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
18
+ number of dimensions including none.
19
+ - Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
20
+
21
+ Args:
22
+ start_dim: first dim to flatten (default = 1).
23
+ end_dim: last dim to flatten (default = -1).
24
+
25
+ Examples::
26
+ >>> input = torch.randn(32, 1, 5, 5)
27
+ >>> # With default parameters
28
+ >>> m = nn.Flatten()
29
+ >>> output = m(input)
30
+ >>> output.size()
31
+ torch.Size([32, 25])
32
+ >>> # With non-default parameters
33
+ >>> m = nn.Flatten(0, 2)
34
+ >>> output = m(input)
35
+ >>> output.size()
36
+ torch.Size([160, 5])
37
+ """
38
+
39
+ __constants__ = ['start_dim', 'end_dim']
40
+ start_dim: int
41
+ end_dim: int
42
+
43
+ def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
44
+ super().__init__()
45
+ self.start_dim = start_dim
46
+ self.end_dim = end_dim
47
+
48
+ def forward(self, input: Tensor) -> Tensor:
49
+ return input.flatten(self.start_dim, self.end_dim)
50
+
51
+ def extra_repr(self) -> str:
52
+ return f'start_dim={self.start_dim}, end_dim={self.end_dim}'
53
+
54
+
55
+ class Unflatten(Module):
56
+ r"""
57
+ Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
58
+
59
+ * :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
60
+ be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
61
+
62
+ * :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
63
+ a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
64
+ (tuple of `(name, size)` tuples) for `NamedTensor` input.
65
+
66
+ Shape:
67
+ - Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
68
+ dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
69
+ - Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
70
+ :math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
71
+
72
+ Args:
73
+ dim (Union[int, str]): Dimension to be unflattened
74
+ unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
75
+
76
+ Examples:
77
+ >>> input = torch.randn(2, 50)
78
+ >>> # With tuple of ints
79
+ >>> m = nn.Sequential(
80
+ >>> nn.Linear(50, 50),
81
+ >>> nn.Unflatten(1, (2, 5, 5))
82
+ >>> )
83
+ >>> output = m(input)
84
+ >>> output.size()
85
+ torch.Size([2, 2, 5, 5])
86
+ >>> # With torch.Size
87
+ >>> m = nn.Sequential(
88
+ >>> nn.Linear(50, 50),
89
+ >>> nn.Unflatten(1, torch.Size([2, 5, 5]))
90
+ >>> )
91
+ >>> output = m(input)
92
+ >>> output.size()
93
+ torch.Size([2, 2, 5, 5])
94
+ >>> # With namedshape (tuple of tuples)
95
+ >>> input = torch.randn(2, 50, names=('N', 'features'))
96
+ >>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
97
+ >>> output = unflatten(input)
98
+ >>> output.size()
99
+ torch.Size([2, 2, 5, 5])
100
+ """
101
+
102
+ NamedShape = Tuple[Tuple[str, int]]
103
+
104
+ __constants__ = ['dim', 'unflattened_size']
105
+ dim: Union[int, str]
106
+ unflattened_size: Union[_size, NamedShape]
107
+
108
+ def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None:
109
+ super().__init__()
110
+
111
+ if isinstance(dim, int):
112
+ self._require_tuple_int(unflattened_size)
113
+ elif isinstance(dim, str):
114
+ self._require_tuple_tuple(unflattened_size)
115
+ else:
116
+ raise TypeError("invalid argument type for dim parameter")
117
+
118
+ self.dim = dim
119
+ self.unflattened_size = unflattened_size
120
+
121
+ def _require_tuple_tuple(self, input):
122
+ if (isinstance(input, tuple)):
123
+ for idx, elem in enumerate(input):
124
+ if not isinstance(elem, tuple):
125
+ raise TypeError("unflattened_size must be tuple of tuples, " +
126
+ f"but found element of type {type(elem).__name__} at pos {idx}")
127
+ return
128
+ raise TypeError("unflattened_size must be a tuple of tuples, " +
129
+ f"but found type {type(input).__name__}")
130
+
131
+ def _require_tuple_int(self, input):
132
+ if (isinstance(input, (tuple, list))):
133
+ for idx, elem in enumerate(input):
134
+ if not isinstance(elem, int):
135
+ raise TypeError("unflattened_size must be tuple of ints, " +
136
+ f"but found element of type {type(elem).__name__} at pos {idx}")
137
+ return
138
+ raise TypeError(f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}")
139
+
140
+ def forward(self, input: Tensor) -> Tensor:
141
+ return input.unflatten(self.dim, self.unflattened_size)
142
+
143
+ def extra_repr(self) -> str:
144
+ return f'dim={self.dim}, unflattened_size={self.unflattened_size}'
venv/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import warnings
3
+ from torch import Tensor
4
+
5
+ from .batchnorm import _LazyNormBase, _NormBase
6
+ from .. import functional as F
7
+
8
+ __all__ = ['InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LazyInstanceNorm1d',
9
+ 'LazyInstanceNorm2d', 'LazyInstanceNorm3d']
10
+
11
+ class _InstanceNorm(_NormBase):
12
+ def __init__(
13
+ self,
14
+ num_features: int,
15
+ eps: float = 1e-5,
16
+ momentum: float = 0.1,
17
+ affine: bool = False,
18
+ track_running_stats: bool = False,
19
+ device=None,
20
+ dtype=None
21
+ ) -> None:
22
+ factory_kwargs = {'device': device, 'dtype': dtype}
23
+ super().__init__(
24
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
25
+
26
+ def _check_input_dim(self, input):
27
+ raise NotImplementedError
28
+
29
+ def _get_no_batch_dim(self):
30
+ raise NotImplementedError
31
+
32
+ def _handle_no_batch_input(self, input):
33
+ return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0)
34
+
35
+ def _apply_instance_norm(self, input):
36
+ return F.instance_norm(
37
+ input, self.running_mean, self.running_var, self.weight, self.bias,
38
+ self.training or not self.track_running_stats, self.momentum, self.eps)
39
+
40
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
41
+ missing_keys, unexpected_keys, error_msgs):
42
+ version = local_metadata.get('version', None)
43
+ # at version 1: removed running_mean and running_var when
44
+ # track_running_stats=False (default)
45
+ if version is None and not self.track_running_stats:
46
+ running_stats_keys = []
47
+ for name in ('running_mean', 'running_var'):
48
+ key = prefix + name
49
+ if key in state_dict:
50
+ running_stats_keys.append(key)
51
+ if len(running_stats_keys) > 0:
52
+ error_msgs.append(
53
+ 'Unexpected running stats buffer(s) {names} for {klass} '
54
+ 'with track_running_stats=False. If state_dict is a '
55
+ 'checkpoint saved before 0.4.0, this may be expected '
56
+ 'because {klass} does not track running stats by default '
57
+ 'since 0.4.0. Please remove these keys from state_dict. If '
58
+ 'the running stats are actually needed, instead set '
59
+ 'track_running_stats=True in {klass} to enable them. See '
60
+ 'the documentation of {klass} for details.'
61
+ .format(names=" and ".join(f'"{k}"' for k in running_stats_keys),
62
+ klass=self.__class__.__name__))
63
+ for key in running_stats_keys:
64
+ state_dict.pop(key)
65
+
66
+ super()._load_from_state_dict(
67
+ state_dict, prefix, local_metadata, strict,
68
+ missing_keys, unexpected_keys, error_msgs)
69
+
70
+ def forward(self, input: Tensor) -> Tensor:
71
+ self._check_input_dim(input)
72
+
73
+ feature_dim = input.dim() - self._get_no_batch_dim()
74
+ if input.size(feature_dim) != self.num_features:
75
+ if self.affine:
76
+ raise ValueError(
77
+ f"expected input's size at dim={feature_dim} to match num_features"
78
+ f" ({self.num_features}), but got: {input.size(feature_dim)}.")
79
+ else:
80
+ warnings.warn(f"input's size at dim={feature_dim} does not match num_features. "
81
+ "You can silence this warning by not passing in num_features, "
82
+ "which is not used because affine=False")
83
+
84
+ if input.dim() == self._get_no_batch_dim():
85
+ return self._handle_no_batch_input(input)
86
+
87
+ return self._apply_instance_norm(input)
88
+
89
+
90
+ class InstanceNorm1d(_InstanceNorm):
91
+ r"""Applies Instance Normalization.
92
+
93
+ This operation applies Instance Normalization
94
+ over a 2D (unbatched) or 3D (batched) input as described in the paper
95
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
96
+ <https://arxiv.org/abs/1607.08022>`__.
97
+
98
+ .. math::
99
+
100
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
101
+
102
+ The mean and standard-deviation are calculated per-dimension separately
103
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
104
+ of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
105
+ The standard-deviation is calculated via the biased estimator, equivalent to
106
+ `torch.var(input, unbiased=False)`.
107
+
108
+ By default, this layer uses instance statistics computed from input data in
109
+ both training and evaluation modes.
110
+
111
+ If :attr:`track_running_stats` is set to ``True``, during training this
112
+ layer keeps running estimates of its computed mean and variance, which are
113
+ then used for normalization during evaluation. The running estimates are
114
+ kept with a default :attr:`momentum` of 0.1.
115
+
116
+ .. note::
117
+ This :attr:`momentum` argument is different from one used in optimizer
118
+ classes and the conventional notion of momentum. Mathematically, the
119
+ update rule for running statistics here is
120
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
121
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
122
+ new observed value.
123
+
124
+ .. note::
125
+ :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
126
+ have some subtle differences. :class:`InstanceNorm1d` is applied
127
+ on each channel of channeled data like multidimensional time series, but
128
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
129
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
130
+ transform, while :class:`InstanceNorm1d` usually don't apply affine
131
+ transform.
132
+
133
+ Args:
134
+ num_features: number of features or channels :math:`C` of the input
135
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
136
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
137
+ affine: a boolean value that when set to ``True``, this module has
138
+ learnable affine parameters, initialized the same way as done for batch normalization.
139
+ Default: ``False``.
140
+ track_running_stats: a boolean value that when set to ``True``, this
141
+ module tracks the running mean and variance, and when set to ``False``,
142
+ this module does not track such statistics and always uses batch
143
+ statistics in both training and eval modes. Default: ``False``
144
+
145
+ Shape:
146
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`
147
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
148
+
149
+ Examples::
150
+
151
+ >>> # Without Learnable Parameters
152
+ >>> m = nn.InstanceNorm1d(100)
153
+ >>> # With Learnable Parameters
154
+ >>> m = nn.InstanceNorm1d(100, affine=True)
155
+ >>> input = torch.randn(20, 100, 40)
156
+ >>> output = m(input)
157
+ """
158
+
159
+ def _get_no_batch_dim(self):
160
+ return 2
161
+
162
+ def _check_input_dim(self, input):
163
+ if input.dim() not in (2, 3):
164
+ raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
165
+
166
+
167
+ class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):
168
+ r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.
169
+
170
+ The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
171
+ The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.
172
+
173
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
174
+ on lazy modules and their limitations.
175
+
176
+ Args:
177
+ num_features: :math:`C` from an expected input of size
178
+ :math:`(N, C, L)` or :math:`(C, L)`
179
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
180
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
181
+ affine: a boolean value that when set to ``True``, this module has
182
+ learnable affine parameters, initialized the same way as done for batch normalization.
183
+ Default: ``False``.
184
+ track_running_stats: a boolean value that when set to ``True``, this
185
+ module tracks the running mean and variance, and when set to ``False``,
186
+ this module does not track such statistics and always uses batch
187
+ statistics in both training and eval modes. Default: ``False``
188
+
189
+ Shape:
190
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`
191
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
192
+ """
193
+
194
+ cls_to_become = InstanceNorm1d # type: ignore[assignment]
195
+
196
+ def _get_no_batch_dim(self):
197
+ return 2
198
+
199
+ def _check_input_dim(self, input):
200
+ if input.dim() not in (2, 3):
201
+ raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
202
+
203
+
204
+ class InstanceNorm2d(_InstanceNorm):
205
+ r"""Applies Instance Normalization.
206
+
207
+ This operation applies Instance Normalization
208
+ over a 4D input (a mini-batch of 2D inputs
209
+ with additional channel dimension) as described in the paper
210
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
211
+ <https://arxiv.org/abs/1607.08022>`__.
212
+
213
+ .. math::
214
+
215
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
216
+
217
+ The mean and standard-deviation are calculated per-dimension separately
218
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
219
+ of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
220
+ The standard-deviation is calculated via the biased estimator, equivalent to
221
+ `torch.var(input, unbiased=False)`.
222
+
223
+ By default, this layer uses instance statistics computed from input data in
224
+ both training and evaluation modes.
225
+
226
+ If :attr:`track_running_stats` is set to ``True``, during training this
227
+ layer keeps running estimates of its computed mean and variance, which are
228
+ then used for normalization during evaluation. The running estimates are
229
+ kept with a default :attr:`momentum` of 0.1.
230
+
231
+ .. note::
232
+ This :attr:`momentum` argument is different from one used in optimizer
233
+ classes and the conventional notion of momentum. Mathematically, the
234
+ update rule for running statistics here is
235
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
236
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
237
+ new observed value.
238
+
239
+ .. note::
240
+ :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
241
+ have some subtle differences. :class:`InstanceNorm2d` is applied
242
+ on each channel of channeled data like RGB images, but
243
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
244
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
245
+ transform, while :class:`InstanceNorm2d` usually don't apply affine
246
+ transform.
247
+
248
+ Args:
249
+ num_features: :math:`C` from an expected input of size
250
+ :math:`(N, C, H, W)` or :math:`(C, H, W)`
251
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
252
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
253
+ affine: a boolean value that when set to ``True``, this module has
254
+ learnable affine parameters, initialized the same way as done for batch normalization.
255
+ Default: ``False``.
256
+ track_running_stats: a boolean value that when set to ``True``, this
257
+ module tracks the running mean and variance, and when set to ``False``,
258
+ this module does not track such statistics and always uses batch
259
+ statistics in both training and eval modes. Default: ``False``
260
+
261
+ Shape:
262
+ - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
263
+ - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
264
+
265
+ Examples::
266
+
267
+ >>> # Without Learnable Parameters
268
+ >>> m = nn.InstanceNorm2d(100)
269
+ >>> # With Learnable Parameters
270
+ >>> m = nn.InstanceNorm2d(100, affine=True)
271
+ >>> input = torch.randn(20, 100, 35, 45)
272
+ >>> output = m(input)
273
+ """
274
+
275
+ def _get_no_batch_dim(self):
276
+ return 3
277
+
278
+ def _check_input_dim(self, input):
279
+ if input.dim() not in (3, 4):
280
+ raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
281
+
282
+
283
+ class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
284
+ r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.
285
+
286
+ The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
287
+ The attributes that will be lazily initialized are `weight`, `bias`,
288
+ `running_mean` and `running_var`.
289
+
290
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
291
+ on lazy modules and their limitations.
292
+
293
+ Args:
294
+ num_features: :math:`C` from an expected input of size
295
+ :math:`(N, C, H, W)` or :math:`(C, H, W)`
296
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
297
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
298
+ affine: a boolean value that when set to ``True``, this module has
299
+ learnable affine parameters, initialized the same way as done for batch normalization.
300
+ Default: ``False``.
301
+ track_running_stats: a boolean value that when set to ``True``, this
302
+ module tracks the running mean and variance, and when set to ``False``,
303
+ this module does not track such statistics and always uses batch
304
+ statistics in both training and eval modes. Default: ``False``
305
+
306
+ Shape:
307
+ - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
308
+ - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
309
+ """
310
+
311
+ cls_to_become = InstanceNorm2d # type: ignore[assignment]
312
+
313
+ def _get_no_batch_dim(self):
314
+ return 3
315
+
316
+ def _check_input_dim(self, input):
317
+ if input.dim() not in (3, 4):
318
+ raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
319
+
320
+
321
+ class InstanceNorm3d(_InstanceNorm):
322
+ r"""Applies Instance Normalization.
323
+
324
+ This operation applies Instance Normalization
325
+ over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
326
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
327
+ <https://arxiv.org/abs/1607.08022>`__.
328
+
329
+ .. math::
330
+
331
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
332
+
333
+ The mean and standard-deviation are calculated per-dimension separately
334
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
335
+ of size C (where C is the input size) if :attr:`affine` is ``True``.
336
+ The standard-deviation is calculated via the biased estimator, equivalent to
337
+ `torch.var(input, unbiased=False)`.
338
+
339
+ By default, this layer uses instance statistics computed from input data in
340
+ both training and evaluation modes.
341
+
342
+ If :attr:`track_running_stats` is set to ``True``, during training this
343
+ layer keeps running estimates of its computed mean and variance, which are
344
+ then used for normalization during evaluation. The running estimates are
345
+ kept with a default :attr:`momentum` of 0.1.
346
+
347
+ .. note::
348
+ This :attr:`momentum` argument is different from one used in optimizer
349
+ classes and the conventional notion of momentum. Mathematically, the
350
+ update rule for running statistics here is
351
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
352
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
353
+ new observed value.
354
+
355
+ .. note::
356
+ :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
357
+ have some subtle differences. :class:`InstanceNorm3d` is applied
358
+ on each channel of channeled data like 3D models with RGB color, but
359
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
360
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
361
+ transform, while :class:`InstanceNorm3d` usually don't apply affine
362
+ transform.
363
+
364
+ Args:
365
+ num_features: :math:`C` from an expected input of size
366
+ :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
367
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
368
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
369
+ affine: a boolean value that when set to ``True``, this module has
370
+ learnable affine parameters, initialized the same way as done for batch normalization.
371
+ Default: ``False``.
372
+ track_running_stats: a boolean value that when set to ``True``, this
373
+ module tracks the running mean and variance, and when set to ``False``,
374
+ this module does not track such statistics and always uses batch
375
+ statistics in both training and eval modes. Default: ``False``
376
+
377
+ Shape:
378
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
379
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
380
+
381
+ Examples::
382
+
383
+ >>> # Without Learnable Parameters
384
+ >>> m = nn.InstanceNorm3d(100)
385
+ >>> # With Learnable Parameters
386
+ >>> m = nn.InstanceNorm3d(100, affine=True)
387
+ >>> input = torch.randn(20, 100, 35, 45, 10)
388
+ >>> output = m(input)
389
+ """
390
+
391
+ def _get_no_batch_dim(self):
392
+ return 4
393
+
394
+ def _check_input_dim(self, input):
395
+ if input.dim() not in (4, 5):
396
+ raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
397
+
398
+
399
+ class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):
400
+ r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.
401
+
402
+ The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
403
+ The attributes that will be lazily initialized are `weight`, `bias`,
404
+ `running_mean` and `running_var`.
405
+
406
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
407
+ on lazy modules and their limitations.
408
+
409
+ Args:
410
+ num_features: :math:`C` from an expected input of size
411
+ :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
412
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
413
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
414
+ affine: a boolean value that when set to ``True``, this module has
415
+ learnable affine parameters, initialized the same way as done for batch normalization.
416
+ Default: ``False``.
417
+ track_running_stats: a boolean value that when set to ``True``, this
418
+ module tracks the running mean and variance, and when set to ``False``,
419
+ this module does not track such statistics and always uses batch
420
+ statistics in both training and eval modes. Default: ``False``
421
+
422
+ Shape:
423
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
424
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
425
+ """
426
+
427
+ cls_to_become = InstanceNorm3d # type: ignore[assignment]
428
+
429
+ def _get_no_batch_dim(self):
430
+ return 4
431
+
432
+ def _check_input_dim(self, input):
433
+ if input.dim() not in (4, 5):
434
+ raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
venv/lib/python3.10/site-packages/torch/nn/modules/linear.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Any
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn.parameter import Parameter, UninitializedParameter
7
+ from .. import functional as F
8
+ from .. import init
9
+ from .module import Module
10
+ from .lazy import LazyModuleMixin
11
+
12
+
13
+ __all__ = [
14
+ 'Bilinear',
15
+ 'Identity',
16
+ 'LazyLinear',
17
+ 'Linear',
18
+ ]
19
+
20
+
21
+ class Identity(Module):
22
+ r"""A placeholder identity operator that is argument-insensitive.
23
+
24
+ Args:
25
+ args: any argument (unused)
26
+ kwargs: any keyword argument (unused)
27
+
28
+ Shape:
29
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
30
+ - Output: :math:`(*)`, same shape as the input.
31
+
32
+ Examples::
33
+
34
+ >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
35
+ >>> input = torch.randn(128, 20)
36
+ >>> output = m(input)
37
+ >>> print(output.size())
38
+ torch.Size([128, 20])
39
+
40
+ """
41
+
42
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
43
+ super().__init__()
44
+
45
+ def forward(self, input: Tensor) -> Tensor:
46
+ return input
47
+
48
+
49
+ class Linear(Module):
50
+ r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
51
+
52
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
53
+
54
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
55
+
56
+ Args:
57
+ in_features: size of each input sample
58
+ out_features: size of each output sample
59
+ bias: If set to ``False``, the layer will not learn an additive bias.
60
+ Default: ``True``
61
+
62
+ Shape:
63
+ - Input: :math:`(*, H_{in})` where :math:`*` means any number of
64
+ dimensions including none and :math:`H_{in} = \text{in\_features}`.
65
+ - Output: :math:`(*, H_{out})` where all but the last dimension
66
+ are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
67
+
68
+ Attributes:
69
+ weight: the learnable weights of the module of shape
70
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
71
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
72
+ :math:`k = \frac{1}{\text{in\_features}}`
73
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
74
+ If :attr:`bias` is ``True``, the values are initialized from
75
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
76
+ :math:`k = \frac{1}{\text{in\_features}}`
77
+
78
+ Examples::
79
+
80
+ >>> m = nn.Linear(20, 30)
81
+ >>> input = torch.randn(128, 20)
82
+ >>> output = m(input)
83
+ >>> print(output.size())
84
+ torch.Size([128, 30])
85
+ """
86
+
87
+ __constants__ = ['in_features', 'out_features']
88
+ in_features: int
89
+ out_features: int
90
+ weight: Tensor
91
+
92
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
93
+ device=None, dtype=None) -> None:
94
+ factory_kwargs = {'device': device, 'dtype': dtype}
95
+ super().__init__()
96
+ self.in_features = in_features
97
+ self.out_features = out_features
98
+ self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
99
+ if bias:
100
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
101
+ else:
102
+ self.register_parameter('bias', None)
103
+ self.reset_parameters()
104
+
105
+ def reset_parameters(self) -> None:
106
+ # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
107
+ # uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
108
+ # https://github.com/pytorch/pytorch/issues/57109
109
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
110
+ if self.bias is not None:
111
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
112
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
113
+ init.uniform_(self.bias, -bound, bound)
114
+
115
+ def forward(self, input: Tensor) -> Tensor:
116
+ return F.linear(input, self.weight, self.bias)
117
+
118
+ def extra_repr(self) -> str:
119
+ return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
120
+
121
+
122
+ # This class exists solely to avoid triggering an obscure error when scripting
123
+ # an improperly quantized attention layer. See this issue for details:
124
+ # https://github.com/pytorch/pytorch/issues/58969
125
+ # TODO: fail fast on quantization API usage error, then remove this class
126
+ # and replace uses of it with plain Linear
127
+ class NonDynamicallyQuantizableLinear(Linear):
128
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
129
+ device=None, dtype=None) -> None:
130
+ super().__init__(in_features, out_features, bias=bias,
131
+ device=device, dtype=dtype)
132
+
133
+
134
+ class Bilinear(Module):
135
+ r"""Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`.
136
+
137
+ Args:
138
+ in1_features: size of each first input sample
139
+ in2_features: size of each second input sample
140
+ out_features: size of each output sample
141
+ bias: If set to False, the layer will not learn an additive bias.
142
+ Default: ``True``
143
+
144
+ Shape:
145
+ - Input1: :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
146
+ :math:`*` means any number of additional dimensions including none. All but the last dimension
147
+ of the inputs should be the same.
148
+ - Input2: :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
149
+ - Output: :math:`(*, H_{out})` where :math:`H_{out}=\text{out\_features}`
150
+ and all but the last dimension are the same shape as the input.
151
+
152
+ Attributes:
153
+ weight: the learnable weights of the module of shape
154
+ :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
155
+ The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
156
+ :math:`k = \frac{1}{\text{in1\_features}}`
157
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
158
+ If :attr:`bias` is ``True``, the values are initialized from
159
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
160
+ :math:`k = \frac{1}{\text{in1\_features}}`
161
+
162
+ Examples::
163
+
164
+ >>> m = nn.Bilinear(20, 30, 40)
165
+ >>> input1 = torch.randn(128, 20)
166
+ >>> input2 = torch.randn(128, 30)
167
+ >>> output = m(input1, input2)
168
+ >>> print(output.size())
169
+ torch.Size([128, 40])
170
+ """
171
+
172
+ __constants__ = ['in1_features', 'in2_features', 'out_features']
173
+ in1_features: int
174
+ in2_features: int
175
+ out_features: int
176
+ weight: Tensor
177
+
178
+ def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool = True,
179
+ device=None, dtype=None) -> None:
180
+ factory_kwargs = {'device': device, 'dtype': dtype}
181
+ super().__init__()
182
+ self.in1_features = in1_features
183
+ self.in2_features = in2_features
184
+ self.out_features = out_features
185
+ self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
186
+
187
+ if bias:
188
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
189
+ else:
190
+ self.register_parameter('bias', None)
191
+ self.reset_parameters()
192
+
193
+ def reset_parameters(self) -> None:
194
+ bound = 1 / math.sqrt(self.weight.size(1))
195
+ init.uniform_(self.weight, -bound, bound)
196
+ if self.bias is not None:
197
+ init.uniform_(self.bias, -bound, bound)
198
+
199
+ def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
200
+ return F.bilinear(input1, input2, self.weight, self.bias)
201
+
202
+ def extra_repr(self) -> str:
203
+ return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
204
+ self.in1_features, self.in2_features, self.out_features, self.bias is not None
205
+ )
206
+
207
+
208
+ class LazyLinear(LazyModuleMixin, Linear):
209
+ r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
210
+
211
+ In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
212
+ class. They will be initialized after the first call to ``forward`` is done and the
213
+ module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
214
+ of the :class:`Linear` is inferred from the ``input.shape[-1]``.
215
+
216
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
217
+ on lazy modules and their limitations.
218
+
219
+ Args:
220
+ out_features: size of each output sample
221
+ bias: If set to ``False``, the layer will not learn an additive bias.
222
+ Default: ``True``
223
+
224
+ Attributes:
225
+ weight: the learnable weights of the module of shape
226
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
227
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
228
+ :math:`k = \frac{1}{\text{in\_features}}`
229
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
230
+ If :attr:`bias` is ``True``, the values are initialized from
231
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
232
+ :math:`k = \frac{1}{\text{in\_features}}`
233
+
234
+
235
+ """
236
+
237
+ cls_to_become = Linear # type: ignore[assignment]
238
+ weight: UninitializedParameter
239
+ bias: UninitializedParameter # type: ignore[assignment]
240
+
241
+ def __init__(self, out_features: int, bias: bool = True,
242
+ device=None, dtype=None) -> None:
243
+ factory_kwargs = {'device': device, 'dtype': dtype}
244
+ # bias is hardcoded to False to avoid creating tensor
245
+ # that will soon be overwritten.
246
+ super().__init__(0, 0, False)
247
+ self.weight = UninitializedParameter(**factory_kwargs)
248
+ self.out_features = out_features
249
+ if bias:
250
+ self.bias = UninitializedParameter(**factory_kwargs)
251
+
252
+ def reset_parameters(self) -> None:
253
+ if not self.has_uninitialized_params() and self.in_features != 0:
254
+ super().reset_parameters()
255
+
256
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
257
+ if self.has_uninitialized_params():
258
+ with torch.no_grad():
259
+ self.in_features = input.shape[-1]
260
+ self.weight.materialize((self.out_features, self.in_features))
261
+ if self.bias is not None:
262
+ self.bias.materialize((self.out_features,))
263
+ self.reset_parameters()
264
+ # TODO: PartialLinear - maybe in sparse?
venv/lib/python3.10/site-packages/torch/nn/modules/module.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/nn/modules/pooling.py ADDED
@@ -0,0 +1,1306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from torch import Tensor
4
+ from .module import Module
5
+ from .utils import _single, _pair, _triple
6
+ from .. import functional as F
7
+
8
+ from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
9
+ _ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
10
+
11
+ __all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
12
+ 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
13
+ 'LPPool2d', 'LPPool3d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
14
+ 'AdaptiveAvgPool1d', 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
15
+
16
+ class _MaxPoolNd(Module):
17
+ __constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
18
+ 'return_indices', 'ceil_mode']
19
+ return_indices: bool
20
+ ceil_mode: bool
21
+
22
+ def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
23
+ padding: _size_any_t = 0, dilation: _size_any_t = 1,
24
+ return_indices: bool = False, ceil_mode: bool = False) -> None:
25
+ super().__init__()
26
+ self.kernel_size = kernel_size
27
+ self.stride = stride if (stride is not None) else kernel_size
28
+ self.padding = padding
29
+ self.dilation = dilation
30
+ self.return_indices = return_indices
31
+ self.ceil_mode = ceil_mode
32
+
33
+ def extra_repr(self) -> str:
34
+ return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
35
+ ', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
36
+
37
+
38
+ class MaxPool1d(_MaxPoolNd):
39
+ r"""Applies a 1D max pooling over an input signal composed of several input planes.
40
+
41
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
42
+ and output :math:`(N, C, L_{out})` can be precisely described as:
43
+
44
+ .. math::
45
+ out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
46
+ input(N_i, C_j, stride \times k + m)
47
+
48
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
49
+ for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
50
+ sliding window. This `link`_ has a nice visualization of the pooling parameters.
51
+
52
+ Note:
53
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
54
+ or the input. Sliding windows that would start in the right padded region are ignored.
55
+
56
+ Args:
57
+ kernel_size: The size of the sliding window, must be > 0.
58
+ stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
59
+ padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
60
+ dilation: The stride between elements within a sliding window, must be > 0.
61
+ return_indices: If ``True``, will return the argmax along with the max values.
62
+ Useful for :class:`torch.nn.MaxUnpool1d` later
63
+ ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
64
+ ensures that every element in the input tensor is covered by a sliding window.
65
+
66
+ Shape:
67
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
68
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
69
+
70
+ .. math::
71
+ L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
72
+ \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
73
+
74
+ Examples::
75
+
76
+ >>> # pool of size=3, stride=2
77
+ >>> m = nn.MaxPool1d(3, stride=2)
78
+ >>> input = torch.randn(20, 16, 50)
79
+ >>> output = m(input)
80
+
81
+ .. _link:
82
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
83
+ """
84
+
85
+ kernel_size: _size_1_t
86
+ stride: _size_1_t
87
+ padding: _size_1_t
88
+ dilation: _size_1_t
89
+
90
+ def forward(self, input: Tensor):
91
+ return F.max_pool1d(input, self.kernel_size, self.stride,
92
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
93
+ return_indices=self.return_indices)
94
+
95
+
96
+ class MaxPool2d(_MaxPoolNd):
97
+ r"""Applies a 2D max pooling over an input signal composed of several input planes.
98
+
99
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
100
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
101
+ can be precisely described as:
102
+
103
+ .. math::
104
+ \begin{aligned}
105
+ out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
106
+ & \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
107
+ \text{stride[1]} \times w + n)
108
+ \end{aligned}
109
+
110
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
111
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
112
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
113
+
114
+ Note:
115
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
116
+ or the input. Sliding windows that would start in the right padded region are ignored.
117
+
118
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
119
+
120
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
121
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
122
+ and the second `int` for the width dimension
123
+
124
+ Args:
125
+ kernel_size: the size of the window to take a max over
126
+ stride: the stride of the window. Default value is :attr:`kernel_size`
127
+ padding: Implicit negative infinity padding to be added on both sides
128
+ dilation: a parameter that controls the stride of elements in the window
129
+ return_indices: if ``True``, will return the max indices along with the outputs.
130
+ Useful for :class:`torch.nn.MaxUnpool2d` later
131
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
132
+
133
+ Shape:
134
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
135
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
136
+
137
+ .. math::
138
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
139
+ \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
140
+
141
+ .. math::
142
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
143
+ \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
144
+
145
+ Examples::
146
+
147
+ >>> # pool of square window of size=3, stride=2
148
+ >>> m = nn.MaxPool2d(3, stride=2)
149
+ >>> # pool of non-square window
150
+ >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
151
+ >>> input = torch.randn(20, 16, 50, 32)
152
+ >>> output = m(input)
153
+
154
+ .. _link:
155
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
156
+ """
157
+
158
+ kernel_size: _size_2_t
159
+ stride: _size_2_t
160
+ padding: _size_2_t
161
+ dilation: _size_2_t
162
+
163
+ def forward(self, input: Tensor):
164
+ return F.max_pool2d(input, self.kernel_size, self.stride,
165
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
166
+ return_indices=self.return_indices)
167
+
168
+
169
+ class MaxPool3d(_MaxPoolNd):
170
+ r"""Applies a 3D max pooling over an input signal composed of several input planes.
171
+
172
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
173
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
174
+ can be precisely described as:
175
+
176
+ .. math::
177
+ \begin{aligned}
178
+ \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
179
+ & \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
180
+ \text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
181
+ \end{aligned}
182
+
183
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
184
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
185
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
186
+
187
+ Note:
188
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
189
+ or the input. Sliding windows that would start in the right padded region are ignored.
190
+
191
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
192
+
193
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
194
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
195
+ the second `int` for the height dimension and the third `int` for the width dimension
196
+
197
+ Args:
198
+ kernel_size: the size of the window to take a max over
199
+ stride: the stride of the window. Default value is :attr:`kernel_size`
200
+ padding: Implicit negative infinity padding to be added on all three sides
201
+ dilation: a parameter that controls the stride of elements in the window
202
+ return_indices: if ``True``, will return the max indices along with the outputs.
203
+ Useful for :class:`torch.nn.MaxUnpool3d` later
204
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
205
+
206
+ Shape:
207
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
208
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
209
+
210
+ .. math::
211
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
212
+ (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
213
+
214
+ .. math::
215
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
216
+ (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
217
+
218
+ .. math::
219
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
220
+ (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
221
+
222
+ Examples::
223
+
224
+ >>> # pool of square window of size=3, stride=2
225
+ >>> m = nn.MaxPool3d(3, stride=2)
226
+ >>> # pool of non-square window
227
+ >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
228
+ >>> input = torch.randn(20, 16, 50, 44, 31)
229
+ >>> output = m(input)
230
+
231
+ .. _link:
232
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
233
+ """ # noqa: E501
234
+
235
+ kernel_size: _size_3_t
236
+ stride: _size_3_t
237
+ padding: _size_3_t
238
+ dilation: _size_3_t
239
+
240
+ def forward(self, input: Tensor):
241
+ return F.max_pool3d(input, self.kernel_size, self.stride,
242
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
243
+ return_indices=self.return_indices)
244
+
245
+
246
+ class _MaxUnpoolNd(Module):
247
+
248
+ def extra_repr(self) -> str:
249
+ return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
250
+
251
+
252
+ class MaxUnpool1d(_MaxUnpoolNd):
253
+ r"""Computes a partial inverse of :class:`MaxPool1d`.
254
+
255
+ :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
256
+
257
+ :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
258
+ including the indices of the maximal values and computes a partial inverse
259
+ in which all non-maximal values are set to zero.
260
+
261
+ Note:
262
+ This operation may behave nondeterministically when the input indices has repeat values.
263
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
264
+
265
+ .. note:: :class:`MaxPool1d` can map several input sizes to the same output
266
+ sizes. Hence, the inversion process can get ambiguous.
267
+ To accommodate this, you can provide the needed output size
268
+ as an additional argument :attr:`output_size` in the forward call.
269
+ See the Inputs and Example below.
270
+
271
+ Args:
272
+ kernel_size (int or tuple): Size of the max pooling window.
273
+ stride (int or tuple): Stride of the max pooling window.
274
+ It is set to :attr:`kernel_size` by default.
275
+ padding (int or tuple): Padding that was added to the input
276
+
277
+ Inputs:
278
+ - `input`: the input Tensor to invert
279
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
280
+ - `output_size` (optional): the targeted output size
281
+
282
+ Shape:
283
+ - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
284
+ - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
285
+
286
+ .. math::
287
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
288
+
289
+ or as given by :attr:`output_size` in the call operator
290
+
291
+ Example::
292
+
293
+ >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
294
+ >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
295
+ >>> unpool = nn.MaxUnpool1d(2, stride=2)
296
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
297
+ >>> output, indices = pool(input)
298
+ >>> unpool(output, indices)
299
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
300
+
301
+ >>> # Example showcasing the use of output_size
302
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
303
+ >>> output, indices = pool(input)
304
+ >>> unpool(output, indices, output_size=input.size())
305
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
306
+
307
+ >>> unpool(output, indices)
308
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
309
+ """
310
+
311
+ kernel_size: _size_1_t
312
+ stride: _size_1_t
313
+ padding: _size_1_t
314
+
315
+ def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
316
+ super().__init__()
317
+ self.kernel_size = _single(kernel_size)
318
+ self.stride = _single(stride if (stride is not None) else kernel_size)
319
+ self.padding = _single(padding)
320
+
321
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
322
+ return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
323
+ self.padding, output_size)
324
+
325
+
326
+ class MaxUnpool2d(_MaxUnpoolNd):
327
+ r"""Computes a partial inverse of :class:`MaxPool2d`.
328
+
329
+ :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
330
+
331
+ :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
332
+ including the indices of the maximal values and computes a partial inverse
333
+ in which all non-maximal values are set to zero.
334
+
335
+ Note:
336
+ This operation may behave nondeterministically when the input indices has repeat values.
337
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
338
+
339
+ .. note:: :class:`MaxPool2d` can map several input sizes to the same output
340
+ sizes. Hence, the inversion process can get ambiguous.
341
+ To accommodate this, you can provide the needed output size
342
+ as an additional argument :attr:`output_size` in the forward call.
343
+ See the Inputs and Example below.
344
+
345
+ Args:
346
+ kernel_size (int or tuple): Size of the max pooling window.
347
+ stride (int or tuple): Stride of the max pooling window.
348
+ It is set to :attr:`kernel_size` by default.
349
+ padding (int or tuple): Padding that was added to the input
350
+
351
+ Inputs:
352
+ - `input`: the input Tensor to invert
353
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
354
+ - `output_size` (optional): the targeted output size
355
+
356
+ Shape:
357
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
358
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
359
+
360
+ .. math::
361
+ H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
362
+
363
+ .. math::
364
+ W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
365
+
366
+ or as given by :attr:`output_size` in the call operator
367
+
368
+ Example::
369
+
370
+ >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
371
+ >>> unpool = nn.MaxUnpool2d(2, stride=2)
372
+ >>> input = torch.tensor([[[[ 1., 2., 3., 4.],
373
+ [ 5., 6., 7., 8.],
374
+ [ 9., 10., 11., 12.],
375
+ [13., 14., 15., 16.]]]])
376
+ >>> output, indices = pool(input)
377
+ >>> unpool(output, indices)
378
+ tensor([[[[ 0., 0., 0., 0.],
379
+ [ 0., 6., 0., 8.],
380
+ [ 0., 0., 0., 0.],
381
+ [ 0., 14., 0., 16.]]]])
382
+ >>> # Now using output_size to resolve an ambiguous size for the inverse
383
+ >>> input = torch.torch.tensor([[[[ 1., 2., 3., 4., 5.],
384
+ [ 6., 7., 8., 9., 10.],
385
+ [11., 12., 13., 14., 15.],
386
+ [16., 17., 18., 19., 20.]]]])
387
+ >>> output, indices = pool(input)
388
+ >>> # This call will not work without specifying output_size
389
+ >>> unpool(output, indices, output_size=input.size())
390
+ tensor([[[[ 0., 0., 0., 0., 0.],
391
+ [ 0., 7., 0., 9., 0.],
392
+ [ 0., 0., 0., 0., 0.],
393
+ [ 0., 17., 0., 19., 0.]]]])
394
+
395
+
396
+ """
397
+
398
+ kernel_size: _size_2_t
399
+ stride: _size_2_t
400
+ padding: _size_2_t
401
+
402
+ def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
403
+ super().__init__()
404
+ self.kernel_size = _pair(kernel_size)
405
+ self.stride = _pair(stride if (stride is not None) else kernel_size)
406
+ self.padding = _pair(padding)
407
+
408
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
409
+ return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
410
+ self.padding, output_size)
411
+
412
+
413
+ class MaxUnpool3d(_MaxUnpoolNd):
414
+ r"""Computes a partial inverse of :class:`MaxPool3d`.
415
+
416
+ :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
417
+ :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
418
+ including the indices of the maximal values and computes a partial inverse
419
+ in which all non-maximal values are set to zero.
420
+
421
+ Note:
422
+ This operation may behave nondeterministically when the input indices has repeat values.
423
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
424
+
425
+ .. note:: :class:`MaxPool3d` can map several input sizes to the same output
426
+ sizes. Hence, the inversion process can get ambiguous.
427
+ To accommodate this, you can provide the needed output size
428
+ as an additional argument :attr:`output_size` in the forward call.
429
+ See the Inputs section below.
430
+
431
+ Args:
432
+ kernel_size (int or tuple): Size of the max pooling window.
433
+ stride (int or tuple): Stride of the max pooling window.
434
+ It is set to :attr:`kernel_size` by default.
435
+ padding (int or tuple): Padding that was added to the input
436
+
437
+ Inputs:
438
+ - `input`: the input Tensor to invert
439
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
440
+ - `output_size` (optional): the targeted output size
441
+
442
+ Shape:
443
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
444
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
445
+
446
+ .. math::
447
+ D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
448
+
449
+ .. math::
450
+ H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
451
+
452
+ .. math::
453
+ W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
454
+
455
+ or as given by :attr:`output_size` in the call operator
456
+
457
+ Example::
458
+
459
+ >>> # pool of square window of size=3, stride=2
460
+ >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
461
+ >>> unpool = nn.MaxUnpool3d(3, stride=2)
462
+ >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
463
+ >>> unpooled_output = unpool(output, indices)
464
+ >>> unpooled_output.size()
465
+ torch.Size([20, 16, 51, 33, 15])
466
+ """
467
+
468
+ kernel_size: _size_3_t
469
+ stride: _size_3_t
470
+ padding: _size_3_t
471
+
472
+ def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
473
+ super().__init__()
474
+ self.kernel_size = _triple(kernel_size)
475
+ self.stride = _triple(stride if (stride is not None) else kernel_size)
476
+ self.padding = _triple(padding)
477
+
478
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
479
+ return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
480
+ self.padding, output_size)
481
+
482
+
483
+ class _AvgPoolNd(Module):
484
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
485
+
486
+ def extra_repr(self) -> str:
487
+ return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
488
+
489
+
490
+ class AvgPool1d(_AvgPoolNd):
491
+ r"""Applies a 1D average pooling over an input signal composed of several input planes.
492
+
493
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
494
+ output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
495
+ can be precisely described as:
496
+
497
+ .. math::
498
+
499
+ \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
500
+ \text{input}(N_i, C_j, \text{stride} \times l + m)
501
+
502
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
503
+ for :attr:`padding` number of points.
504
+
505
+ Note:
506
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
507
+ or the input. Sliding windows that would start in the right padded region are ignored.
508
+
509
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
510
+ an ``int`` or a one-element tuple.
511
+
512
+ Args:
513
+ kernel_size: the size of the window
514
+ stride: the stride of the window. Default value is :attr:`kernel_size`
515
+ padding: implicit zero padding to be added on both sides
516
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
517
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
518
+
519
+ Shape:
520
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
521
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
522
+
523
+ .. math::
524
+ L_{out} = \left\lfloor \frac{L_{in} +
525
+ 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
526
+
527
+ Per the note above, if ``ceil_mode`` is True and :math:`(L_{out} - 1) \times \text{stride} \geq L_{in}
528
+ + \text{padding}`, we skip the last window as it would start in the right padded region, resulting in
529
+ :math:`L_{out}` being reduced by one.
530
+
531
+ Examples::
532
+
533
+ >>> # pool with window of size=3, stride=2
534
+ >>> m = nn.AvgPool1d(3, stride=2)
535
+ >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
536
+ tensor([[[2., 4., 6.]]])
537
+ """
538
+
539
+ kernel_size: _size_1_t
540
+ stride: _size_1_t
541
+ padding: _size_1_t
542
+ ceil_mode: bool
543
+ count_include_pad: bool
544
+
545
+ def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
546
+ count_include_pad: bool = True) -> None:
547
+ super().__init__()
548
+ self.kernel_size = _single(kernel_size)
549
+ self.stride = _single(stride if stride is not None else kernel_size)
550
+ self.padding = _single(padding)
551
+ self.ceil_mode = ceil_mode
552
+ self.count_include_pad = count_include_pad
553
+
554
+ def forward(self, input: Tensor) -> Tensor:
555
+ return F.avg_pool1d(
556
+ input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
557
+ self.count_include_pad)
558
+
559
+
560
+ class AvgPool2d(_AvgPoolNd):
561
+ r"""Applies a 2D average pooling over an input signal composed of several input planes.
562
+
563
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
564
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
565
+ can be precisely described as:
566
+
567
+ .. math::
568
+
569
+ out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
570
+ input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
571
+
572
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
573
+ for :attr:`padding` number of points.
574
+
575
+ Note:
576
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
577
+ or the input. Sliding windows that would start in the right padded region are ignored.
578
+
579
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
580
+
581
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
582
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
583
+ and the second `int` for the width dimension
584
+
585
+ Args:
586
+ kernel_size: the size of the window
587
+ stride: the stride of the window. Default value is :attr:`kernel_size`
588
+ padding: implicit zero padding to be added on both sides
589
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
590
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
591
+ divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
592
+
593
+
594
+ Shape:
595
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
596
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
597
+
598
+ .. math::
599
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
600
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
601
+
602
+ .. math::
603
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
604
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
605
+
606
+ Per the note above, if ``ceil_mode`` is True and :math:`(H_{out} - 1)\times \text{stride}[0]\geq H_{in}
607
+ + \text{padding}[0]`, we skip the last window as it would start in the bottom padded region,
608
+ resulting in :math:`H_{out}` being reduced by one.
609
+
610
+ The same applies for :math:`W_{out}`.
611
+
612
+ Examples::
613
+
614
+ >>> # pool of square window of size=3, stride=2
615
+ >>> m = nn.AvgPool2d(3, stride=2)
616
+ >>> # pool of non-square window
617
+ >>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
618
+ >>> input = torch.randn(20, 16, 50, 32)
619
+ >>> output = m(input)
620
+ """
621
+
622
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
623
+
624
+ kernel_size: _size_2_t
625
+ stride: _size_2_t
626
+ padding: _size_2_t
627
+ ceil_mode: bool
628
+ count_include_pad: bool
629
+
630
+ def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
631
+ ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
632
+ super().__init__()
633
+ self.kernel_size = kernel_size
634
+ self.stride = stride if (stride is not None) else kernel_size
635
+ self.padding = padding
636
+ self.ceil_mode = ceil_mode
637
+ self.count_include_pad = count_include_pad
638
+ self.divisor_override = divisor_override
639
+
640
+ def forward(self, input: Tensor) -> Tensor:
641
+ return F.avg_pool2d(input, self.kernel_size, self.stride,
642
+ self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
643
+
644
+
645
+ class AvgPool3d(_AvgPoolNd):
646
+ r"""Applies a 3D average pooling over an input signal composed of several input planes.
647
+
648
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
649
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
650
+ can be precisely described as:
651
+
652
+ .. math::
653
+ \begin{aligned}
654
+ \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
655
+ & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
656
+ \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
657
+ {kD \times kH \times kW}
658
+ \end{aligned}
659
+
660
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
661
+ for :attr:`padding` number of points.
662
+
663
+ Note:
664
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
665
+ or the input. Sliding windows that would start in the right padded region are ignored.
666
+
667
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
668
+
669
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
670
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
671
+ the second `int` for the height dimension and the third `int` for the width dimension
672
+
673
+ Args:
674
+ kernel_size: the size of the window
675
+ stride: the stride of the window. Default value is :attr:`kernel_size`
676
+ padding: implicit zero padding to be added on all three sides
677
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
678
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
679
+ divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
680
+
681
+ Shape:
682
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
683
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
684
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
685
+
686
+ .. math::
687
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
688
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
689
+
690
+ .. math::
691
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
692
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
693
+
694
+ .. math::
695
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
696
+ \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
697
+
698
+ Per the note above, if ``ceil_mode`` is True and :math:`(D_{out} - 1)\times \text{stride}[0]\geq D_{in}
699
+ + \text{padding}[0]`, we skip the last window as it would start in the padded region,
700
+ resulting in :math:`D_{out}` being reduced by one.
701
+
702
+ The same applies for :math:`W_{out}` and :math:`H_{out}`.
703
+
704
+ Examples::
705
+
706
+ >>> # pool of square window of size=3, stride=2
707
+ >>> m = nn.AvgPool3d(3, stride=2)
708
+ >>> # pool of non-square window
709
+ >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
710
+ >>> input = torch.randn(20, 16, 50, 44, 31)
711
+ >>> output = m(input)
712
+ """
713
+
714
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
715
+
716
+ kernel_size: _size_3_t
717
+ stride: _size_3_t
718
+ padding: _size_3_t
719
+ ceil_mode: bool
720
+ count_include_pad: bool
721
+
722
+ def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
723
+ ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
724
+ super().__init__()
725
+ self.kernel_size = kernel_size
726
+ self.stride = stride if (stride is not None) else kernel_size
727
+ self.padding = padding
728
+ self.ceil_mode = ceil_mode
729
+ self.count_include_pad = count_include_pad
730
+ self.divisor_override = divisor_override
731
+
732
+ def forward(self, input: Tensor) -> Tensor:
733
+ return F.avg_pool3d(input, self.kernel_size, self.stride,
734
+ self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
735
+
736
+ def __setstate__(self, d):
737
+ super().__setstate__(d)
738
+ self.__dict__.setdefault('padding', 0)
739
+ self.__dict__.setdefault('ceil_mode', False)
740
+ self.__dict__.setdefault('count_include_pad', True)
741
+
742
+
743
+ class FractionalMaxPool2d(Module):
744
+ r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
745
+
746
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
747
+
748
+ The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
749
+ step size determined by the target output size.
750
+ The number of output features is equal to the number of input planes.
751
+
752
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
753
+
754
+ Args:
755
+ kernel_size: the size of the window to take a max over.
756
+ Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
757
+ output_size: the target output size of the image of the form `oH x oW`.
758
+ Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`.
759
+ Note that we must have :math:`kH + oH - 1 <= H_{in}` and :math:`kW + oW - 1 <= W_{in}`
760
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
761
+ This has to be a number or tuple in the range (0, 1).
762
+ Note that we must have :math:`kH + (output\_ratio\_H * H_{in}) - 1 <= H_{in}`
763
+ and :math:`kW + (output\_ratio\_W * W_{in}) - 1 <= W_{in}`
764
+ return_indices: if ``True``, will return the indices along with the outputs.
765
+ Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
766
+
767
+ Shape:
768
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
769
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
770
+ :math:`(H_{out}, W_{out})=\text{output\_size}` or
771
+ :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
772
+
773
+ Examples:
774
+ >>> # pool of square window of size=3, and target output size 13x12
775
+ >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
776
+ >>> # pool of square window and target output size being half of input image size
777
+ >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
778
+ >>> input = torch.randn(20, 16, 50, 32)
779
+ >>> output = m(input)
780
+
781
+ .. _Fractional MaxPooling:
782
+ https://arxiv.org/abs/1412.6071
783
+ """
784
+
785
+ __constants__ = ['kernel_size', 'return_indices', 'output_size',
786
+ 'output_ratio']
787
+
788
+ kernel_size: _size_2_t
789
+ return_indices: bool
790
+ output_size: _size_2_t
791
+ output_ratio: _ratio_2_t
792
+
793
+ def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
794
+ output_ratio: Optional[_ratio_2_t] = None,
795
+ return_indices: bool = False, _random_samples=None) -> None:
796
+ super().__init__()
797
+ self.kernel_size = _pair(kernel_size)
798
+ self.return_indices = return_indices
799
+ self.register_buffer('_random_samples', _random_samples)
800
+ self.output_size = _pair(output_size) if output_size is not None else None
801
+ self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
802
+ if output_size is None and output_ratio is None:
803
+ raise ValueError("FractionalMaxPool2d requires specifying either "
804
+ "an output size, or a pooling ratio")
805
+ if output_size is not None and output_ratio is not None:
806
+ raise ValueError("only one of output_size and output_ratio may be specified")
807
+ if self.output_ratio is not None:
808
+ if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
809
+ raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
810
+
811
+ def forward(self, input: Tensor):
812
+ return F.fractional_max_pool2d(
813
+ input, self.kernel_size, self.output_size, self.output_ratio,
814
+ self.return_indices,
815
+ _random_samples=self._random_samples)
816
+
817
+
818
+ class FractionalMaxPool3d(Module):
819
+ r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
820
+
821
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
822
+
823
+ The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
824
+ step size determined by the target output size.
825
+ The number of output features is equal to the number of input planes.
826
+
827
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
828
+
829
+ Args:
830
+ kernel_size: the size of the window to take a max over.
831
+ Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
832
+ output_size: the target output size of the image of the form `oT x oH x oW`.
833
+ Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
834
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
835
+ This has to be a number or tuple in the range (0, 1)
836
+ return_indices: if ``True``, will return the indices along with the outputs.
837
+ Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
838
+
839
+ Shape:
840
+ - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
841
+ - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
842
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
843
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
844
+
845
+ Examples:
846
+ >>> # pool of cubic window of size=3, and target output size 13x12x11
847
+ >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
848
+ >>> # pool of cubic window and target output size being half of input size
849
+ >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
850
+ >>> input = torch.randn(20, 16, 50, 32, 16)
851
+ >>> output = m(input)
852
+
853
+ .. _Fractional MaxPooling:
854
+ https://arxiv.org/abs/1412.6071
855
+ """
856
+
857
+ __constants__ = ['kernel_size', 'return_indices', 'output_size',
858
+ 'output_ratio']
859
+ kernel_size: _size_3_t
860
+ return_indices: bool
861
+ output_size: _size_3_t
862
+ output_ratio: _ratio_3_t
863
+
864
+ def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
865
+ output_ratio: Optional[_ratio_3_t] = None,
866
+ return_indices: bool = False, _random_samples=None) -> None:
867
+ super().__init__()
868
+ self.kernel_size = _triple(kernel_size)
869
+ self.return_indices = return_indices
870
+ self.register_buffer('_random_samples', _random_samples)
871
+ self.output_size = _triple(output_size) if output_size is not None else None
872
+ self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
873
+ if output_size is None and output_ratio is None:
874
+ raise ValueError("FractionalMaxPool3d requires specifying either "
875
+ "an output size, or a pooling ratio")
876
+ if output_size is not None and output_ratio is not None:
877
+ raise ValueError("only one of output_size and output_ratio may be specified")
878
+ if self.output_ratio is not None:
879
+ if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
880
+ raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
881
+
882
+ def forward(self, input: Tensor):
883
+ return F.fractional_max_pool3d(
884
+ input, self.kernel_size, self.output_size, self.output_ratio,
885
+ self.return_indices,
886
+ _random_samples=self._random_samples)
887
+
888
+
889
+ class _LPPoolNd(Module):
890
+ __constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
891
+
892
+ norm_type: float
893
+ ceil_mode: bool
894
+
895
+ def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
896
+ ceil_mode: bool = False) -> None:
897
+ super().__init__()
898
+ self.norm_type = norm_type
899
+ self.kernel_size = kernel_size
900
+ self.stride = stride
901
+ self.ceil_mode = ceil_mode
902
+
903
+ def extra_repr(self) -> str:
904
+ return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
905
+ 'ceil_mode={ceil_mode}'.format(**self.__dict__)
906
+
907
+
908
+ class LPPool1d(_LPPoolNd):
909
+ r"""Applies a 1D power-average pooling over an input signal composed of several input planes.
910
+
911
+ On each window, the function computed is:
912
+
913
+ .. math::
914
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
915
+
916
+ - At p = :math:`\infty`, one gets Max Pooling
917
+ - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
918
+
919
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
920
+ not defined. This implementation will set the gradient to zero in this case.
921
+
922
+ Args:
923
+ kernel_size: a single int, the size of the window
924
+ stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
925
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
926
+
927
+ Shape:
928
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
929
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
930
+
931
+ .. math::
932
+ L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
933
+
934
+ Examples::
935
+ >>> # power-2 pool of window of length 3, with stride 2.
936
+ >>> m = nn.LPPool1d(2, 3, stride=2)
937
+ >>> input = torch.randn(20, 16, 50)
938
+ >>> output = m(input)
939
+ """
940
+
941
+ kernel_size: _size_1_t
942
+ stride: _size_1_t
943
+
944
+ def forward(self, input: Tensor) -> Tensor:
945
+ return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
946
+ self.stride, self.ceil_mode)
947
+
948
+
949
+ class LPPool2d(_LPPoolNd):
950
+ r"""Applies a 2D power-average pooling over an input signal composed of several input planes.
951
+
952
+ On each window, the function computed is:
953
+
954
+ .. math::
955
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
956
+
957
+ - At p = :math:`\infty`, one gets Max Pooling
958
+ - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
959
+
960
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
961
+
962
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
963
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
964
+ and the second `int` for the width dimension
965
+
966
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
967
+ not defined. This implementation will set the gradient to zero in this case.
968
+
969
+ Args:
970
+ kernel_size: the size of the window
971
+ stride: the stride of the window. Default value is :attr:`kernel_size`
972
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
973
+
974
+ Shape:
975
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
976
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
977
+
978
+ .. math::
979
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
980
+
981
+ .. math::
982
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
983
+
984
+ Examples::
985
+
986
+ >>> # power-2 pool of square window of size=3, stride=2
987
+ >>> m = nn.LPPool2d(2, 3, stride=2)
988
+ >>> # pool of non-square window of power 1.2
989
+ >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
990
+ >>> input = torch.randn(20, 16, 50, 32)
991
+ >>> output = m(input)
992
+
993
+ """
994
+
995
+ kernel_size: _size_2_t
996
+ stride: _size_2_t
997
+
998
+ def forward(self, input: Tensor) -> Tensor:
999
+ return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
1000
+ self.stride, self.ceil_mode)
1001
+
1002
+
1003
+ class LPPool3d(_LPPoolNd):
1004
+ r"""Applies a 3D power-average pooling over an input signal composed of several input planes.
1005
+
1006
+ On each window, the function computed is:
1007
+
1008
+ .. math::
1009
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
1010
+
1011
+ - At p = :math:`\infty`, one gets Max Pooling
1012
+ - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
1013
+
1014
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
1015
+
1016
+ - a single ``int`` -- in which case the same value is used for the height, width and depth dimension
1017
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
1018
+ the second `int` for the height dimension and the third `int` for the width dimension
1019
+
1020
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
1021
+ not defined. This implementation will set the gradient to zero in this case.
1022
+
1023
+ Args:
1024
+ kernel_size: the size of the window
1025
+ stride: the stride of the window. Default value is :attr:`kernel_size`
1026
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
1027
+
1028
+ Shape:
1029
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1030
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
1031
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
1032
+
1033
+ .. math::
1034
+ D_{out} = \left\lfloor\frac{D_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
1035
+
1036
+ .. math::
1037
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
1038
+
1039
+ .. math::
1040
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
1041
+
1042
+ Examples::
1043
+
1044
+ >>> # power-2 pool of square window of size=3, stride=2
1045
+ >>> m = nn.LPPool3d(2, 3, stride=2)
1046
+ >>> # pool of non-square window of power 1.2
1047
+ >>> m = nn.LPPool3d(1.2, (3, 2, 2), stride=(2, 1, 2))
1048
+ >>> input = torch.randn(20, 16, 50, 44, 31)
1049
+ >>> output = m(input)
1050
+
1051
+ """
1052
+
1053
+ kernel_size: _size_3_t
1054
+ stride: _size_3_t
1055
+
1056
+ def forward(self, input: Tensor) -> Tensor:
1057
+ return F.lp_pool3d(input, float(self.norm_type), self.kernel_size,
1058
+ self.stride, self.ceil_mode)
1059
+
1060
+
1061
+ class _AdaptiveMaxPoolNd(Module):
1062
+ __constants__ = ['output_size', 'return_indices']
1063
+ return_indices: bool
1064
+
1065
+ def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
1066
+ super().__init__()
1067
+ self.output_size = output_size
1068
+ self.return_indices = return_indices
1069
+
1070
+ def extra_repr(self) -> str:
1071
+ return f'output_size={self.output_size}'
1072
+
1073
+ # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
1074
+ # output shapes are, and how the operation computes output.
1075
+
1076
+
1077
+ class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
1078
+ r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
1079
+
1080
+ The output size is :math:`L_{out}`, for any input size.
1081
+ The number of output features is equal to the number of input planes.
1082
+
1083
+ Args:
1084
+ output_size: the target output size :math:`L_{out}`.
1085
+ return_indices: if ``True``, will return the indices along with the outputs.
1086
+ Useful to pass to nn.MaxUnpool1d. Default: ``False``
1087
+
1088
+ Shape:
1089
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1090
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1091
+ :math:`L_{out}=\text{output\_size}`.
1092
+
1093
+ Examples:
1094
+ >>> # target output size of 5
1095
+ >>> m = nn.AdaptiveMaxPool1d(5)
1096
+ >>> input = torch.randn(1, 64, 8)
1097
+ >>> output = m(input)
1098
+
1099
+ """
1100
+
1101
+ output_size: _size_1_t
1102
+
1103
+ def forward(self, input: Tensor):
1104
+ return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
1105
+
1106
+
1107
+ class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
1108
+ r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
1109
+
1110
+ The output is of size :math:`H_{out} \times W_{out}`, for any input size.
1111
+ The number of output features is equal to the number of input planes.
1112
+
1113
+ Args:
1114
+ output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
1115
+ Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
1116
+ square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
1117
+ can be either a ``int``, or ``None`` which means the size will be the same as that
1118
+ of the input.
1119
+ return_indices: if ``True``, will return the indices along with the outputs.
1120
+ Useful to pass to nn.MaxUnpool2d. Default: ``False``
1121
+
1122
+ Shape:
1123
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1124
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
1125
+ :math:`(H_{out}, W_{out})=\text{output\_size}`.
1126
+
1127
+ Examples:
1128
+ >>> # target output size of 5x7
1129
+ >>> m = nn.AdaptiveMaxPool2d((5, 7))
1130
+ >>> input = torch.randn(1, 64, 8, 9)
1131
+ >>> output = m(input)
1132
+ >>> # target output size of 7x7 (square)
1133
+ >>> m = nn.AdaptiveMaxPool2d(7)
1134
+ >>> input = torch.randn(1, 64, 10, 9)
1135
+ >>> output = m(input)
1136
+ >>> # target output size of 10x7
1137
+ >>> m = nn.AdaptiveMaxPool2d((None, 7))
1138
+ >>> input = torch.randn(1, 64, 10, 9)
1139
+ >>> output = m(input)
1140
+
1141
+ """
1142
+
1143
+ output_size: _size_2_opt_t
1144
+
1145
+ def forward(self, input: Tensor):
1146
+ return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
1147
+
1148
+
1149
+ class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
1150
+ r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
1151
+
1152
+ The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
1153
+ The number of output features is equal to the number of input planes.
1154
+
1155
+ Args:
1156
+ output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
1157
+ Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
1158
+ :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
1159
+ :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
1160
+ ``int``, or ``None`` which means the size will be the same as that of the input.
1161
+
1162
+ return_indices: if ``True``, will return the indices along with the outputs.
1163
+ Useful to pass to nn.MaxUnpool3d. Default: ``False``
1164
+
1165
+ Shape:
1166
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1167
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
1168
+ where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
1169
+
1170
+ Examples:
1171
+ >>> # target output size of 5x7x9
1172
+ >>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
1173
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1174
+ >>> output = m(input)
1175
+ >>> # target output size of 7x7x7 (cube)
1176
+ >>> m = nn.AdaptiveMaxPool3d(7)
1177
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1178
+ >>> output = m(input)
1179
+ >>> # target output size of 7x9x8
1180
+ >>> m = nn.AdaptiveMaxPool3d((7, None, None))
1181
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1182
+ >>> output = m(input)
1183
+
1184
+ """
1185
+
1186
+ output_size: _size_3_opt_t
1187
+
1188
+ def forward(self, input: Tensor):
1189
+ return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
1190
+
1191
+
1192
+ class _AdaptiveAvgPoolNd(Module):
1193
+ __constants__ = ['output_size']
1194
+
1195
+ def __init__(self, output_size: _size_any_opt_t) -> None:
1196
+ super().__init__()
1197
+ self.output_size = output_size
1198
+
1199
+ def extra_repr(self) -> str:
1200
+ return f'output_size={self.output_size}'
1201
+
1202
+
1203
+ class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
1204
+ r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
1205
+
1206
+ The output size is :math:`L_{out}`, for any input size.
1207
+ The number of output features is equal to the number of input planes.
1208
+
1209
+ Args:
1210
+ output_size: the target output size :math:`L_{out}`.
1211
+
1212
+ Shape:
1213
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1214
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1215
+ :math:`L_{out}=\text{output\_size}`.
1216
+
1217
+ Examples:
1218
+ >>> # target output size of 5
1219
+ >>> m = nn.AdaptiveAvgPool1d(5)
1220
+ >>> input = torch.randn(1, 64, 8)
1221
+ >>> output = m(input)
1222
+
1223
+ """
1224
+
1225
+ output_size: _size_1_t
1226
+
1227
+ def forward(self, input: Tensor) -> Tensor:
1228
+ return F.adaptive_avg_pool1d(input, self.output_size)
1229
+
1230
+
1231
+ class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
1232
+ r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
1233
+
1234
+ The output is of size H x W, for any input size.
1235
+ The number of output features is equal to the number of input planes.
1236
+
1237
+ Args:
1238
+ output_size: the target output size of the image of the form H x W.
1239
+ Can be a tuple (H, W) or a single H for a square image H x H.
1240
+ H and W can be either a ``int``, or ``None`` which means the size will
1241
+ be the same as that of the input.
1242
+
1243
+ Shape:
1244
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1245
+ - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
1246
+ :math:`S=\text{output\_size}`.
1247
+
1248
+ Examples:
1249
+ >>> # target output size of 5x7
1250
+ >>> m = nn.AdaptiveAvgPool2d((5, 7))
1251
+ >>> input = torch.randn(1, 64, 8, 9)
1252
+ >>> output = m(input)
1253
+ >>> # target output size of 7x7 (square)
1254
+ >>> m = nn.AdaptiveAvgPool2d(7)
1255
+ >>> input = torch.randn(1, 64, 10, 9)
1256
+ >>> output = m(input)
1257
+ >>> # target output size of 10x7
1258
+ >>> m = nn.AdaptiveAvgPool2d((None, 7))
1259
+ >>> input = torch.randn(1, 64, 10, 9)
1260
+ >>> output = m(input)
1261
+
1262
+ """
1263
+
1264
+ output_size: _size_2_opt_t
1265
+
1266
+ def forward(self, input: Tensor) -> Tensor:
1267
+ return F.adaptive_avg_pool2d(input, self.output_size)
1268
+
1269
+
1270
+ class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
1271
+ r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
1272
+
1273
+ The output is of size D x H x W, for any input size.
1274
+ The number of output features is equal to the number of input planes.
1275
+
1276
+ Args:
1277
+ output_size: the target output size of the form D x H x W.
1278
+ Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
1279
+ D, H and W can be either a ``int``, or ``None`` which means the size will
1280
+ be the same as that of the input.
1281
+
1282
+ Shape:
1283
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1284
+ - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
1285
+ where :math:`S=\text{output\_size}`.
1286
+
1287
+ Examples:
1288
+ >>> # target output size of 5x7x9
1289
+ >>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
1290
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1291
+ >>> output = m(input)
1292
+ >>> # target output size of 7x7x7 (cube)
1293
+ >>> m = nn.AdaptiveAvgPool3d(7)
1294
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1295
+ >>> output = m(input)
1296
+ >>> # target output size of 7x9x8
1297
+ >>> m = nn.AdaptiveAvgPool3d((7, None, None))
1298
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1299
+ >>> output = m(input)
1300
+
1301
+ """
1302
+
1303
+ output_size: _size_3_opt_t
1304
+
1305
+ def forward(self, input: Tensor) -> Tensor:
1306
+ return F.adaptive_avg_pool3d(input, self.output_size)
venv/lib/python3.10/site-packages/torch/nn/modules/rnn.py ADDED
@@ -0,0 +1,1480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+ import numbers
4
+ import weakref
5
+ from typing import List, Tuple, Optional, overload
6
+
7
+ import torch
8
+ from torch import Tensor
9
+ from .module import Module
10
+ from ..parameter import Parameter
11
+ from ..utils.rnn import PackedSequence
12
+ from .. import init
13
+ from ... import _VF
14
+
15
+ __all__ = ['RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell', 'GRUCell']
16
+
17
+ _rnn_impls = {
18
+ 'RNN_TANH': _VF.rnn_tanh,
19
+ 'RNN_RELU': _VF.rnn_relu,
20
+ }
21
+
22
+
23
+ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
24
+ return tensor.index_select(dim, permutation)
25
+
26
+
27
+ def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
28
+ warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
29
+ return _apply_permutation(tensor, permutation, dim)
30
+
31
+
32
+ class RNNBase(Module):
33
+ r"""Base class for RNN modules (RNN, LSTM, GRU).
34
+
35
+ Implements aspects of RNNs shared by the RNN, LSTM, and GRU classes, such as module initialization
36
+ and utility methods for parameter storage management.
37
+
38
+ .. note::
39
+ The forward method is not implemented by the RNNBase class.
40
+
41
+ .. note::
42
+ LSTM and GRU classes override some methods implemented by RNNBase.
43
+ """
44
+
45
+ __constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
46
+ 'batch_first', 'dropout', 'bidirectional', 'proj_size']
47
+ __jit_unused_properties__ = ['all_weights']
48
+
49
+ mode: str
50
+ input_size: int
51
+ hidden_size: int
52
+ num_layers: int
53
+ bias: bool
54
+ batch_first: bool
55
+ dropout: float
56
+ bidirectional: bool
57
+ proj_size: int
58
+
59
+ def __init__(self, mode: str, input_size: int, hidden_size: int,
60
+ num_layers: int = 1, bias: bool = True, batch_first: bool = False,
61
+ dropout: float = 0., bidirectional: bool = False, proj_size: int = 0,
62
+ device=None, dtype=None) -> None:
63
+ factory_kwargs = {'device': device, 'dtype': dtype}
64
+ super().__init__()
65
+ self.mode = mode
66
+ self.input_size = input_size
67
+ self.hidden_size = hidden_size
68
+ self.num_layers = num_layers
69
+ self.bias = bias
70
+ self.batch_first = batch_first
71
+ self.dropout = float(dropout)
72
+ self.bidirectional = bidirectional
73
+ self.proj_size = proj_size
74
+ self._flat_weight_refs: List[Optional[weakref.ReferenceType[Parameter]]] = []
75
+ num_directions = 2 if bidirectional else 1
76
+
77
+ if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
78
+ isinstance(dropout, bool):
79
+ raise ValueError("dropout should be a number in range [0, 1] "
80
+ "representing the probability of an element being "
81
+ "zeroed")
82
+ if dropout > 0 and num_layers == 1:
83
+ warnings.warn("dropout option adds dropout after all but last "
84
+ "recurrent layer, so non-zero dropout expects "
85
+ f"num_layers greater than 1, but got dropout={dropout} and "
86
+ f"num_layers={num_layers}")
87
+
88
+ if not isinstance(hidden_size, int):
89
+ raise TypeError(f"hidden_size should be of type int, got: {type(hidden_size).__name__}")
90
+ if hidden_size <= 0:
91
+ raise ValueError("hidden_size must be greater than zero")
92
+ if num_layers <= 0:
93
+ raise ValueError("num_layers must be greater than zero")
94
+ if proj_size < 0:
95
+ raise ValueError("proj_size should be a positive integer or zero to disable projections")
96
+ if proj_size >= hidden_size:
97
+ raise ValueError("proj_size has to be smaller than hidden_size")
98
+
99
+ if mode == 'LSTM':
100
+ gate_size = 4 * hidden_size
101
+ elif mode == 'GRU':
102
+ gate_size = 3 * hidden_size
103
+ elif mode == 'RNN_TANH':
104
+ gate_size = hidden_size
105
+ elif mode == 'RNN_RELU':
106
+ gate_size = hidden_size
107
+ else:
108
+ raise ValueError("Unrecognized RNN mode: " + mode)
109
+
110
+ self._flat_weights_names = []
111
+ self._all_weights = []
112
+ for layer in range(num_layers):
113
+ for direction in range(num_directions):
114
+ real_hidden_size = proj_size if proj_size > 0 else hidden_size
115
+ layer_input_size = input_size if layer == 0 else real_hidden_size * num_directions
116
+
117
+ w_ih = Parameter(torch.empty((gate_size, layer_input_size), **factory_kwargs))
118
+ w_hh = Parameter(torch.empty((gate_size, real_hidden_size), **factory_kwargs))
119
+ b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
120
+ # Second bias vector included for CuDNN compatibility. Only one
121
+ # bias vector is needed in standard definition.
122
+ b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
123
+ layer_params: Tuple[Tensor, ...] = ()
124
+ if self.proj_size == 0:
125
+ if bias:
126
+ layer_params = (w_ih, w_hh, b_ih, b_hh)
127
+ else:
128
+ layer_params = (w_ih, w_hh)
129
+ else:
130
+ w_hr = Parameter(torch.empty((proj_size, hidden_size), **factory_kwargs))
131
+ if bias:
132
+ layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr)
133
+ else:
134
+ layer_params = (w_ih, w_hh, w_hr)
135
+
136
+ suffix = '_reverse' if direction == 1 else ''
137
+ param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
138
+ if bias:
139
+ param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
140
+ if self.proj_size > 0:
141
+ param_names += ['weight_hr_l{}{}']
142
+ param_names = [x.format(layer, suffix) for x in param_names]
143
+
144
+ for name, param in zip(param_names, layer_params):
145
+ setattr(self, name, param)
146
+ self._flat_weights_names.extend(param_names)
147
+ self._all_weights.append(param_names)
148
+
149
+ self._init_flat_weights()
150
+
151
+ self.reset_parameters()
152
+
153
+ def _init_flat_weights(self):
154
+ self._flat_weights = [getattr(self, wn) if hasattr(self, wn) else None
155
+ for wn in self._flat_weights_names]
156
+ self._flat_weight_refs = [weakref.ref(w) if w is not None else None
157
+ for w in self._flat_weights]
158
+ self.flatten_parameters()
159
+
160
+ def __setattr__(self, attr, value):
161
+ if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names:
162
+ # keep self._flat_weights up to date if you do self.weight = ...
163
+ idx = self._flat_weights_names.index(attr)
164
+ self._flat_weights[idx] = value
165
+ super().__setattr__(attr, value)
166
+
167
+ def flatten_parameters(self) -> None:
168
+ """Reset parameter data pointer so that they can use faster code paths.
169
+
170
+ Right now, this works only if the module is on the GPU and cuDNN is enabled.
171
+ Otherwise, it's a no-op.
172
+ """
173
+ # Short-circuits if _flat_weights is only partially instantiated
174
+ if len(self._flat_weights) != len(self._flat_weights_names):
175
+ return
176
+
177
+ for w in self._flat_weights:
178
+ if not isinstance(w, Tensor):
179
+ return
180
+ # Short-circuits if any tensor in self._flat_weights is not acceptable to cuDNN
181
+ # or the tensors in _flat_weights are of different dtypes
182
+
183
+ first_fw = self._flat_weights[0]
184
+ dtype = first_fw.dtype
185
+ for fw in self._flat_weights:
186
+ if (not isinstance(fw.data, Tensor) or not (fw.data.dtype == dtype) or
187
+ not fw.data.is_cuda or
188
+ not torch.backends.cudnn.is_acceptable(fw.data)):
189
+ return
190
+
191
+ # If any parameters alias, we fall back to the slower, copying code path. This is
192
+ # a sufficient check, because overlapping parameter buffers that don't completely
193
+ # alias would break the assumptions of the uniqueness check in
194
+ # Module.named_parameters().
195
+ unique_data_ptrs = {p.data_ptr() for p in self._flat_weights}
196
+ if len(unique_data_ptrs) != len(self._flat_weights):
197
+ return
198
+
199
+ with torch.cuda.device_of(first_fw):
200
+ import torch.backends.cudnn.rnn as rnn
201
+
202
+ # Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is
203
+ # an inplace operation on self._flat_weights
204
+ with torch.no_grad():
205
+ if torch._use_cudnn_rnn_flatten_weight():
206
+ num_weights = 4 if self.bias else 2
207
+ if self.proj_size > 0:
208
+ num_weights += 1
209
+ torch._cudnn_rnn_flatten_weight(
210
+ self._flat_weights, num_weights,
211
+ self.input_size, rnn.get_cudnn_mode(self.mode),
212
+ self.hidden_size, self.proj_size, self.num_layers,
213
+ self.batch_first, bool(self.bidirectional))
214
+
215
+ def _apply(self, fn, recurse=True):
216
+ self._flat_weight_refs = []
217
+ ret = super()._apply(fn, recurse)
218
+
219
+ # Resets _flat_weights
220
+ # Note: be v. careful before removing this, as 3rd party device types
221
+ # likely rely on this behavior to properly .to() modules like LSTM.
222
+ self._init_flat_weights()
223
+
224
+ return ret
225
+
226
+ def reset_parameters(self) -> None:
227
+ stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
228
+ for weight in self.parameters():
229
+ init.uniform_(weight, -stdv, stdv)
230
+
231
+ def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
232
+ if not torch.jit.is_scripting():
233
+ if input.dtype != self._flat_weights[0].dtype and not torch._C._is_any_autocast_enabled():
234
+ raise ValueError(f'input must have the type {self._flat_weights[0].dtype}, got type {input.dtype}')
235
+ expected_input_dim = 2 if batch_sizes is not None else 3
236
+ if input.dim() != expected_input_dim:
237
+ raise RuntimeError(
238
+ f'input must have {expected_input_dim} dimensions, got {input.dim()}')
239
+ if self.input_size != input.size(-1):
240
+ raise RuntimeError(
241
+ f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}')
242
+
243
+ def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
244
+ if batch_sizes is not None:
245
+ mini_batch = int(batch_sizes[0])
246
+ else:
247
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
248
+ num_directions = 2 if self.bidirectional else 1
249
+ if self.proj_size > 0:
250
+ expected_hidden_size = (self.num_layers * num_directions,
251
+ mini_batch, self.proj_size)
252
+ else:
253
+ expected_hidden_size = (self.num_layers * num_directions,
254
+ mini_batch, self.hidden_size)
255
+ return expected_hidden_size
256
+
257
+ def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
258
+ msg: str = 'Expected hidden size {}, got {}') -> None:
259
+ if hx.size() != expected_hidden_size:
260
+ raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
261
+
262
+ def _weights_have_changed(self):
263
+ # Returns True if the weight tensors have changed since the last forward pass.
264
+ # This is the case when used with torch.func.functional_call(), for example.
265
+ weights_changed = False
266
+ for ref, name in zip(self._flat_weight_refs, self._flat_weights_names):
267
+ weight = getattr(self, name) if hasattr(self, name) else None
268
+ if weight is not None and ref is not None and ref() is not weight:
269
+ weights_changed = True
270
+ break
271
+ return weights_changed
272
+
273
+ def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]):
274
+ self.check_input(input, batch_sizes)
275
+ expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
276
+
277
+ self.check_hidden_size(hidden, expected_hidden_size)
278
+
279
+ def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]):
280
+ if permutation is None:
281
+ return hx
282
+ return _apply_permutation(hx, permutation)
283
+
284
+
285
+ def extra_repr(self) -> str:
286
+ s = '{input_size}, {hidden_size}'
287
+ if self.proj_size != 0:
288
+ s += ', proj_size={proj_size}'
289
+ if self.num_layers != 1:
290
+ s += ', num_layers={num_layers}'
291
+ if self.bias is not True:
292
+ s += ', bias={bias}'
293
+ if self.batch_first is not False:
294
+ s += ', batch_first={batch_first}'
295
+ if self.dropout != 0:
296
+ s += ', dropout={dropout}'
297
+ if self.bidirectional is not False:
298
+ s += ', bidirectional={bidirectional}'
299
+ return s.format(**self.__dict__)
300
+
301
+ def _update_flat_weights(self):
302
+ if not torch.jit.is_scripting():
303
+ if self._weights_have_changed():
304
+ self._init_flat_weights()
305
+
306
+ def __getstate__(self):
307
+ # If weights have been changed, update the _flat_weights in __getstate__ here.
308
+ self._update_flat_weights()
309
+ # Don't serialize the weight references.
310
+ state = self.__dict__.copy()
311
+ del state['_flat_weight_refs']
312
+ return state
313
+
314
+ def __setstate__(self, d):
315
+ super().__setstate__(d)
316
+ if 'all_weights' in d:
317
+ self._all_weights = d['all_weights']
318
+ # In PyTorch 1.8 we added a proj_size member variable to LSTM.
319
+ # LSTMs that were serialized via torch.save(module) before PyTorch 1.8
320
+ # don't have it, so to preserve compatibility we set proj_size here.
321
+ if 'proj_size' not in d:
322
+ self.proj_size = 0
323
+
324
+ if not isinstance(self._all_weights[0][0], str):
325
+ num_layers = self.num_layers
326
+ num_directions = 2 if self.bidirectional else 1
327
+ self._flat_weights_names = []
328
+ self._all_weights = []
329
+ for layer in range(num_layers):
330
+ for direction in range(num_directions):
331
+ suffix = '_reverse' if direction == 1 else ''
332
+ weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}',
333
+ 'bias_hh_l{}{}', 'weight_hr_l{}{}']
334
+ weights = [x.format(layer, suffix) for x in weights]
335
+ if self.bias:
336
+ if self.proj_size > 0:
337
+ self._all_weights += [weights]
338
+ self._flat_weights_names.extend(weights)
339
+ else:
340
+ self._all_weights += [weights[:4]]
341
+ self._flat_weights_names.extend(weights[:4])
342
+ else:
343
+ if self.proj_size > 0:
344
+ self._all_weights += [weights[:2]] + [weights[-1:]]
345
+ self._flat_weights_names.extend(weights[:2] + [weights[-1:]])
346
+ else:
347
+ self._all_weights += [weights[:2]]
348
+ self._flat_weights_names.extend(weights[:2])
349
+ self._flat_weights = [getattr(self, wn) if hasattr(self, wn) else None
350
+ for wn in self._flat_weights_names]
351
+
352
+ self._flat_weight_refs = [weakref.ref(w) if w is not None else None
353
+ for w in self._flat_weights]
354
+
355
+ @property
356
+ def all_weights(self) -> List[List[Parameter]]:
357
+ return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
358
+
359
+ def _replicate_for_data_parallel(self):
360
+ replica = super()._replicate_for_data_parallel()
361
+ # Need to copy these caches, otherwise the replica will share the same
362
+ # flat weights list.
363
+ replica._flat_weights = replica._flat_weights[:]
364
+ replica._flat_weights_names = replica._flat_weights_names[:]
365
+ return replica
366
+
367
+
368
+ class RNN(RNNBase):
369
+ r"""__init__(input_size,hidden_size,num_layers=1,nonlinearity='tanh',bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
370
+
371
+ Apply a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}`
372
+ non-linearity to an input sequence. For each element in the input sequence,
373
+ each layer computes the following function:
374
+
375
+ .. math::
376
+ h_t = \tanh(x_t W_{ih}^T + b_{ih} + h_{t-1}W_{hh}^T + b_{hh})
377
+
378
+ where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
379
+ the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
380
+ previous layer at time `t-1` or the initial hidden state at time `0`.
381
+ If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
382
+
383
+ .. code-block:: python
384
+
385
+ # Efficient implementation equivalent to the following with bidirectional=False
386
+ def forward(x, h_0=None):
387
+ if batch_first:
388
+ x = x.transpose(0, 1)
389
+ seq_len, batch_size, _ = x.size()
390
+ if h_0 is None:
391
+ h_0 = torch.zeros(num_layers, batch_size, hidden_size)
392
+ h_t_minus_1 = h_0
393
+ h_t = h_0
394
+ output = []
395
+ for t in range(seq_len):
396
+ for layer in range(num_layers):
397
+ h_t[layer] = torch.tanh(
398
+ x[t] @ weight_ih[layer].T
399
+ + bias_ih[layer]
400
+ + h_t_minus_1[layer] @ weight_hh[layer].T
401
+ + bias_hh[layer]
402
+ )
403
+ output.append(h_t[-1])
404
+ h_t_minus_1 = h_t
405
+ output = torch.stack(output)
406
+ if batch_first:
407
+ output = output.transpose(0, 1)
408
+ return output, h_t
409
+
410
+ Args:
411
+ input_size: The number of expected features in the input `x`
412
+ hidden_size: The number of features in the hidden state `h`
413
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
414
+ would mean stacking two RNNs together to form a `stacked RNN`,
415
+ with the second RNN taking in outputs of the first RNN and
416
+ computing the final results. Default: 1
417
+ nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
418
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
419
+ Default: ``True``
420
+ batch_first: If ``True``, then the input and output tensors are provided
421
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
422
+ Note that this does not apply to hidden or cell states. See the
423
+ Inputs/Outputs sections below for details. Default: ``False``
424
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
425
+ RNN layer except the last layer, with dropout probability equal to
426
+ :attr:`dropout`. Default: 0
427
+ bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
428
+
429
+ Inputs: input, h_0
430
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
431
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
432
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
433
+ the input sequence. The input can also be a packed variable length sequence.
434
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
435
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
436
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
437
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
438
+ state for the input sequence batch. Defaults to zeros if not provided.
439
+
440
+ where:
441
+
442
+ .. math::
443
+ \begin{aligned}
444
+ N ={} & \text{batch size} \\
445
+ L ={} & \text{sequence length} \\
446
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
447
+ H_{in} ={} & \text{input\_size} \\
448
+ H_{out} ={} & \text{hidden\_size}
449
+ \end{aligned}
450
+
451
+ Outputs: output, h_n
452
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
453
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
454
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
455
+ `(h_t)` from the last layer of the RNN, for each `t`. If a
456
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
457
+ will also be a packed sequence.
458
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
459
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
460
+ for each element in the batch.
461
+
462
+ Attributes:
463
+ weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
464
+ of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
465
+ `(hidden_size, num_directions * hidden_size)`
466
+ weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
467
+ of shape `(hidden_size, hidden_size)`
468
+ bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
469
+ of shape `(hidden_size)`
470
+ bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
471
+ of shape `(hidden_size)`
472
+
473
+ .. note::
474
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
475
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
476
+
477
+ .. note::
478
+ For bidirectional RNNs, forward and backward are directions 0 and 1 respectively.
479
+ Example of splitting the output layers when ``batch_first=False``:
480
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
481
+
482
+ .. note::
483
+ ``batch_first`` argument is ignored for unbatched inputs.
484
+
485
+ .. include:: ../cudnn_rnn_determinism.rst
486
+
487
+ .. include:: ../cudnn_persistent_rnn.rst
488
+
489
+ Examples::
490
+
491
+ >>> rnn = nn.RNN(10, 20, 2)
492
+ >>> input = torch.randn(5, 3, 10)
493
+ >>> h0 = torch.randn(2, 3, 20)
494
+ >>> output, hn = rnn(input, h0)
495
+ """
496
+
497
+ @overload
498
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1,
499
+ nonlinearity: str = 'tanh', bias: bool = True, batch_first: bool = False,
500
+ dropout: float = 0., bidirectional: bool = False, device=None,
501
+ dtype=None) -> None:
502
+ ...
503
+
504
+ @overload
505
+ def __init__(self, *args, **kwargs):
506
+ ...
507
+
508
+ def __init__(self, *args, **kwargs):
509
+ if 'proj_size' in kwargs:
510
+ raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
511
+ if len(args) > 3:
512
+ self.nonlinearity = args[3]
513
+ args = args[:3] + args[4:]
514
+ else:
515
+ self.nonlinearity = kwargs.pop('nonlinearity', 'tanh')
516
+ if self.nonlinearity == 'tanh':
517
+ mode = 'RNN_TANH'
518
+ elif self.nonlinearity == 'relu':
519
+ mode = 'RNN_RELU'
520
+ else:
521
+ raise ValueError(f"Unknown nonlinearity '{self.nonlinearity}'. Select from 'tanh' or 'relu'.")
522
+ super().__init__(mode, *args, **kwargs)
523
+
524
+ @overload
525
+ @torch._jit_internal._overload_method # noqa: F811
526
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
527
+ pass
528
+
529
+ @overload
530
+ @torch._jit_internal._overload_method # noqa: F811
531
+ def forward(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]:
532
+ pass
533
+
534
+ def forward(self, input, hx=None): # noqa: F811
535
+ self._update_flat_weights()
536
+
537
+ num_directions = 2 if self.bidirectional else 1
538
+ orig_input = input
539
+
540
+ if isinstance(orig_input, PackedSequence):
541
+ input, batch_sizes, sorted_indices, unsorted_indices = input
542
+ max_batch_size = batch_sizes[0]
543
+ # script() is unhappy when max_batch_size is different type in cond branches, so we duplicate
544
+ if hx is None:
545
+ hx = torch.zeros(self.num_layers * num_directions,
546
+ max_batch_size, self.hidden_size,
547
+ dtype=input.dtype, device=input.device)
548
+ else:
549
+ # Each batch of the hidden state should match the input sequence that
550
+ # the user believes he/she is passing in.
551
+ hx = self.permute_hidden(hx, sorted_indices)
552
+ else:
553
+ batch_sizes = None
554
+ if input.dim() not in (2, 3):
555
+ raise ValueError(f"RNN: Expected input to be 2D or 3D, got {input.dim()}D tensor instead")
556
+ is_batched = input.dim() == 3
557
+ batch_dim = 0 if self.batch_first else 1
558
+ if not is_batched:
559
+ input = input.unsqueeze(batch_dim)
560
+ if hx is not None:
561
+ if hx.dim() != 2:
562
+ raise RuntimeError(
563
+ f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor")
564
+ hx = hx.unsqueeze(1)
565
+ else:
566
+ if hx is not None and hx.dim() != 3:
567
+ raise RuntimeError(
568
+ f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor")
569
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
570
+ sorted_indices = None
571
+ unsorted_indices = None
572
+ if hx is None:
573
+ hx = torch.zeros(self.num_layers * num_directions,
574
+ max_batch_size, self.hidden_size,
575
+ dtype=input.dtype, device=input.device)
576
+ else:
577
+ # Each batch of the hidden state should match the input sequence that
578
+ # the user believes he/she is passing in.
579
+ hx = self.permute_hidden(hx, sorted_indices)
580
+
581
+ assert hx is not None
582
+ self.check_forward_args(input, hx, batch_sizes)
583
+ assert self.mode == 'RNN_TANH' or self.mode == 'RNN_RELU'
584
+ if batch_sizes is None:
585
+ if self.mode == 'RNN_TANH':
586
+ result = _VF.rnn_tanh(input, hx, self._flat_weights, self.bias, self.num_layers,
587
+ self.dropout, self.training, self.bidirectional,
588
+ self.batch_first)
589
+ else:
590
+ result = _VF.rnn_relu(input, hx, self._flat_weights, self.bias, self.num_layers,
591
+ self.dropout, self.training, self.bidirectional,
592
+ self.batch_first)
593
+ else:
594
+ if self.mode == 'RNN_TANH':
595
+ result = _VF.rnn_tanh(input, batch_sizes, hx, self._flat_weights, self.bias,
596
+ self.num_layers, self.dropout, self.training,
597
+ self.bidirectional)
598
+ else:
599
+ result = _VF.rnn_relu(input, batch_sizes, hx, self._flat_weights, self.bias,
600
+ self.num_layers, self.dropout, self.training,
601
+ self.bidirectional)
602
+
603
+ output = result[0]
604
+ hidden = result[1]
605
+
606
+ if isinstance(orig_input, PackedSequence):
607
+ output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
608
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
609
+
610
+ if not is_batched: # type: ignore[possibly-undefined]
611
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
612
+ hidden = hidden.squeeze(1)
613
+
614
+ return output, self.permute_hidden(hidden, unsorted_indices)
615
+
616
+ # XXX: LSTM and GRU implementation is different from RNNBase, this is because:
617
+ # 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in
618
+ # its current state could not support the python Union Type or Any Type
619
+ # 2. TorchScript static typing does not allow a Function or Callable type in
620
+ # Dict values, so we have to separately call _VF instead of using _rnn_impls
621
+ # 3. This is temporary only and in the transition state that we want to make it
622
+ # on time for the release
623
+ #
624
+ # More discussion details in https://github.com/pytorch/pytorch/pull/23266
625
+ #
626
+ # TODO: remove the overriding implementations for LSTM and GRU when TorchScript
627
+ # support expressing these two modules generally.
628
+
629
+
630
+ class LSTM(RNNBase):
631
+ r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,proj_size=0,device=None,dtype=None)
632
+
633
+ Apply a multi-layer long short-term memory (LSTM) RNN to an input sequence.
634
+ For each element in the input sequence, each layer computes the following
635
+ function:
636
+
637
+ .. math::
638
+ \begin{array}{ll} \\
639
+ i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
640
+ f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
641
+ g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
642
+ o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
643
+ c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
644
+ h_t = o_t \odot \tanh(c_t) \\
645
+ \end{array}
646
+
647
+ where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
648
+ state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
649
+ is the hidden state of the layer at time `t-1` or the initial hidden
650
+ state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
651
+ :math:`o_t` are the input, forget, cell, and output gates, respectively.
652
+ :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
653
+
654
+ In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
655
+ (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
656
+ dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
657
+ variable which is :math:`0` with probability :attr:`dropout`.
658
+
659
+ If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes
660
+ the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from
661
+ ``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly).
662
+ Second, the output hidden state of each layer will be multiplied by a learnable projection
663
+ matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output
664
+ of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact
665
+ dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128.
666
+
667
+ Args:
668
+ input_size: The number of expected features in the input `x`
669
+ hidden_size: The number of features in the hidden state `h`
670
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
671
+ would mean stacking two LSTMs together to form a `stacked LSTM`,
672
+ with the second LSTM taking in outputs of the first LSTM and
673
+ computing the final results. Default: 1
674
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
675
+ Default: ``True``
676
+ batch_first: If ``True``, then the input and output tensors are provided
677
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
678
+ Note that this does not apply to hidden or cell states. See the
679
+ Inputs/Outputs sections below for details. Default: ``False``
680
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
681
+ LSTM layer except the last layer, with dropout probability equal to
682
+ :attr:`dropout`. Default: 0
683
+ bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
684
+ proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0
685
+
686
+ Inputs: input, (h_0, c_0)
687
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
688
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
689
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
690
+ the input sequence. The input can also be a packed variable length sequence.
691
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
692
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
693
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
694
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the
695
+ initial hidden state for each element in the input sequence.
696
+ Defaults to zeros if (h_0, c_0) is not provided.
697
+ * **c_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
698
+ :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
699
+ initial cell state for each element in the input sequence.
700
+ Defaults to zeros if (h_0, c_0) is not provided.
701
+
702
+ where:
703
+
704
+ .. math::
705
+ \begin{aligned}
706
+ N ={} & \text{batch size} \\
707
+ L ={} & \text{sequence length} \\
708
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
709
+ H_{in} ={} & \text{input\_size} \\
710
+ H_{cell} ={} & \text{hidden\_size} \\
711
+ H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\
712
+ \end{aligned}
713
+
714
+ Outputs: output, (h_n, c_n)
715
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
716
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
717
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
718
+ `(h_t)` from the last layer of the LSTM, for each `t`. If a
719
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
720
+ will also be a packed sequence. When ``bidirectional=True``, `output` will contain
721
+ a concatenation of the forward and reverse hidden states at each time step in the sequence.
722
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
723
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the
724
+ final hidden state for each element in the sequence. When ``bidirectional=True``,
725
+ `h_n` will contain a concatenation of the final forward and reverse hidden states, respectively.
726
+ * **c_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
727
+ :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
728
+ final cell state for each element in the sequence. When ``bidirectional=True``,
729
+ `c_n` will contain a concatenation of the final forward and reverse cell states, respectively.
730
+
731
+ Attributes:
732
+ weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
733
+ `(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
734
+ Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`. If
735
+ ``proj_size > 0`` was specified, the shape will be
736
+ `(4*hidden_size, num_directions * proj_size)` for `k > 0`
737
+ weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
738
+ `(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0``
739
+ was specified, the shape will be `(4*hidden_size, proj_size)`.
740
+ bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
741
+ `(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
742
+ bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
743
+ `(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
744
+ weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer
745
+ of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was
746
+ specified.
747
+ weight_ih_l[k]_reverse: Analogous to `weight_ih_l[k]` for the reverse direction.
748
+ Only present when ``bidirectional=True``.
749
+ weight_hh_l[k]_reverse: Analogous to `weight_hh_l[k]` for the reverse direction.
750
+ Only present when ``bidirectional=True``.
751
+ bias_ih_l[k]_reverse: Analogous to `bias_ih_l[k]` for the reverse direction.
752
+ Only present when ``bidirectional=True``.
753
+ bias_hh_l[k]_reverse: Analogous to `bias_hh_l[k]` for the reverse direction.
754
+ Only present when ``bidirectional=True``.
755
+ weight_hr_l[k]_reverse: Analogous to `weight_hr_l[k]` for the reverse direction.
756
+ Only present when ``bidirectional=True`` and ``proj_size > 0`` was specified.
757
+
758
+ .. note::
759
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
760
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
761
+
762
+ .. note::
763
+ For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively.
764
+ Example of splitting the output layers when ``batch_first=False``:
765
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
766
+
767
+ .. note::
768
+ For bidirectional LSTMs, `h_n` is not equivalent to the last element of `output`; the
769
+ former contains the final forward and reverse hidden states, while the latter contains the
770
+ final forward hidden state and the initial reverse hidden state.
771
+
772
+ .. note::
773
+ ``batch_first`` argument is ignored for unbatched inputs.
774
+
775
+ .. note::
776
+ ``proj_size`` should be smaller than ``hidden_size``.
777
+
778
+ .. include:: ../cudnn_rnn_determinism.rst
779
+
780
+ .. include:: ../cudnn_persistent_rnn.rst
781
+
782
+ Examples::
783
+
784
+ >>> rnn = nn.LSTM(10, 20, 2)
785
+ >>> input = torch.randn(5, 3, 10)
786
+ >>> h0 = torch.randn(2, 3, 20)
787
+ >>> c0 = torch.randn(2, 3, 20)
788
+ >>> output, (hn, cn) = rnn(input, (h0, c0))
789
+ """
790
+
791
+ @overload
792
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True,
793
+ batch_first: bool = False, dropout: float = 0., bidirectional: bool = False,
794
+ proj_size: int = 0, device=None, dtype=None) -> None:
795
+ ...
796
+
797
+ @overload
798
+ def __init__(self, *args, **kwargs):
799
+ ...
800
+
801
+ def __init__(self, *args, **kwargs):
802
+ super().__init__('LSTM', *args, **kwargs)
803
+
804
+ def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
805
+ if batch_sizes is not None:
806
+ mini_batch = int(batch_sizes[0])
807
+ else:
808
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
809
+ num_directions = 2 if self.bidirectional else 1
810
+ expected_hidden_size = (self.num_layers * num_directions,
811
+ mini_batch, self.hidden_size)
812
+ return expected_hidden_size
813
+
814
+ # In the future, we should prevent mypy from applying contravariance rules here.
815
+ # See torch/nn/modules/module.py::_forward_unimplemented
816
+ def check_forward_args(self, # type: ignore[override]
817
+ input: Tensor,
818
+ hidden: Tuple[Tensor, Tensor],
819
+ batch_sizes: Optional[Tensor],
820
+ ):
821
+ self.check_input(input, batch_sizes)
822
+ self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
823
+ 'Expected hidden[0] size {}, got {}')
824
+ self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes),
825
+ 'Expected hidden[1] size {}, got {}')
826
+
827
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
828
+ def permute_hidden(self, # type: ignore[override]
829
+ hx: Tuple[Tensor, Tensor],
830
+ permutation: Optional[Tensor]
831
+ ) -> Tuple[Tensor, Tensor]:
832
+ if permutation is None:
833
+ return hx
834
+ return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation)
835
+
836
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
837
+ @overload # type: ignore[override]
838
+ @torch._jit_internal._overload_method # noqa: F811
839
+ def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
840
+ ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa: F811
841
+ pass
842
+
843
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
844
+ @overload
845
+ @torch._jit_internal._overload_method # noqa: F811
846
+ def forward(self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
847
+ ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: # noqa: F811
848
+ pass
849
+
850
+ def forward(self, input, hx=None): # noqa: F811
851
+ self._update_flat_weights()
852
+
853
+ orig_input = input
854
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
855
+ batch_sizes = None
856
+ do_permute = False
857
+ num_directions = 2 if self.bidirectional else 1
858
+ real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
859
+ if isinstance(orig_input, PackedSequence):
860
+ input, batch_sizes, sorted_indices, unsorted_indices = input
861
+ max_batch_size = batch_sizes[0]
862
+ if hx is None:
863
+ h_zeros = torch.zeros(self.num_layers * num_directions,
864
+ max_batch_size, real_hidden_size,
865
+ dtype=input.dtype, device=input.device)
866
+ c_zeros = torch.zeros(self.num_layers * num_directions,
867
+ max_batch_size, self.hidden_size,
868
+ dtype=input.dtype, device=input.device)
869
+ hx = (h_zeros, c_zeros)
870
+ else:
871
+ # Each batch of the hidden state should match the input sequence that
872
+ # the user believes he/she is passing in.
873
+ hx = self.permute_hidden(hx, sorted_indices)
874
+ else:
875
+ if input.dim() not in (2, 3):
876
+ raise ValueError(f"LSTM: Expected input to be 2D or 3D, got {input.dim()}D instead")
877
+ is_batched = input.dim() == 3
878
+ batch_dim = 0 if self.batch_first else 1
879
+ if not is_batched:
880
+ input = input.unsqueeze(batch_dim)
881
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
882
+ sorted_indices = None
883
+ unsorted_indices = None
884
+ if hx is None:
885
+ h_zeros = torch.zeros(self.num_layers * num_directions,
886
+ max_batch_size, real_hidden_size,
887
+ dtype=input.dtype, device=input.device)
888
+ c_zeros = torch.zeros(self.num_layers * num_directions,
889
+ max_batch_size, self.hidden_size,
890
+ dtype=input.dtype, device=input.device)
891
+ hx = (h_zeros, c_zeros)
892
+ self.check_forward_args(input, hx, batch_sizes)
893
+ else:
894
+ if is_batched:
895
+ if (hx[0].dim() != 3 or hx[1].dim() != 3):
896
+ msg = ("For batched 3-D input, hx and cx should "
897
+ f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
898
+ raise RuntimeError(msg)
899
+ else:
900
+ if hx[0].dim() != 2 or hx[1].dim() != 2:
901
+ msg = ("For unbatched 2-D input, hx and cx should "
902
+ f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
903
+ raise RuntimeError(msg)
904
+ hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
905
+ # Each batch of the hidden state should match the input sequence that
906
+ # the user believes he/she is passing in.
907
+ self.check_forward_args(input, hx, batch_sizes)
908
+ hx = self.permute_hidden(hx, sorted_indices)
909
+
910
+ if batch_sizes is None:
911
+ result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers,
912
+ self.dropout, self.training, self.bidirectional, self.batch_first)
913
+ else:
914
+ result = _VF.lstm(input, batch_sizes, hx, self._flat_weights, self.bias,
915
+ self.num_layers, self.dropout, self.training, self.bidirectional)
916
+ output = result[0]
917
+ hidden = result[1:]
918
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
919
+ if isinstance(orig_input, PackedSequence):
920
+ output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
921
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
922
+ else:
923
+ if not is_batched: # type: ignore[possibly-undefined]
924
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
925
+ hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
926
+ return output, self.permute_hidden(hidden, unsorted_indices)
927
+
928
+
929
+ class GRU(RNNBase):
930
+ r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
931
+
932
+ Apply a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
933
+ For each element in the input sequence, each layer computes the following
934
+ function:
935
+
936
+ .. math::
937
+ \begin{array}{ll}
938
+ r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
939
+ z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
940
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\
941
+ h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)}
942
+ \end{array}
943
+
944
+ where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
945
+ at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
946
+ at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
947
+ :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
948
+ :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
949
+
950
+ In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
951
+ (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
952
+ dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
953
+ variable which is :math:`0` with probability :attr:`dropout`.
954
+
955
+ Args:
956
+ input_size: The number of expected features in the input `x`
957
+ hidden_size: The number of features in the hidden state `h`
958
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
959
+ would mean stacking two GRUs together to form a `stacked GRU`,
960
+ with the second GRU taking in outputs of the first GRU and
961
+ computing the final results. Default: 1
962
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
963
+ Default: ``True``
964
+ batch_first: If ``True``, then the input and output tensors are provided
965
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
966
+ Note that this does not apply to hidden or cell states. See the
967
+ Inputs/Outputs sections below for details. Default: ``False``
968
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
969
+ GRU layer except the last layer, with dropout probability equal to
970
+ :attr:`dropout`. Default: 0
971
+ bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
972
+
973
+ Inputs: input, h_0
974
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
975
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
976
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
977
+ the input sequence. The input can also be a packed variable length sequence.
978
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
979
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
980
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
981
+ :math:`(D * \text{num\_layers}, N, H_{out})`
982
+ containing the initial hidden state for the input sequence. Defaults to zeros if not provided.
983
+
984
+ where:
985
+
986
+ .. math::
987
+ \begin{aligned}
988
+ N ={} & \text{batch size} \\
989
+ L ={} & \text{sequence length} \\
990
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
991
+ H_{in} ={} & \text{input\_size} \\
992
+ H_{out} ={} & \text{hidden\_size}
993
+ \end{aligned}
994
+
995
+ Outputs: output, h_n
996
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
997
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
998
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
999
+ `(h_t)` from the last layer of the GRU, for each `t`. If a
1000
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
1001
+ will also be a packed sequence.
1002
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
1003
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
1004
+ for the input sequence.
1005
+
1006
+ Attributes:
1007
+ weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
1008
+ (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
1009
+ Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
1010
+ weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
1011
+ (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
1012
+ bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
1013
+ (b_ir|b_iz|b_in), of shape `(3*hidden_size)`
1014
+ bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
1015
+ (b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
1016
+
1017
+ .. note::
1018
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1019
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1020
+
1021
+ .. note::
1022
+ For bidirectional GRUs, forward and backward are directions 0 and 1 respectively.
1023
+ Example of splitting the output layers when ``batch_first=False``:
1024
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
1025
+
1026
+ .. note::
1027
+ ``batch_first`` argument is ignored for unbatched inputs.
1028
+
1029
+ .. note::
1030
+ The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks.
1031
+ In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the
1032
+ previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix
1033
+ `W` and addition of bias:
1034
+
1035
+ .. math::
1036
+ \begin{aligned}
1037
+ n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn})
1038
+ \end{aligned}
1039
+
1040
+ This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}`
1041
+
1042
+ .. math::
1043
+ \begin{aligned}
1044
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn}))
1045
+ \end{aligned}
1046
+
1047
+ This implementation differs on purpose for efficiency.
1048
+
1049
+ .. include:: ../cudnn_persistent_rnn.rst
1050
+
1051
+ Examples::
1052
+
1053
+ >>> rnn = nn.GRU(10, 20, 2)
1054
+ >>> input = torch.randn(5, 3, 10)
1055
+ >>> h0 = torch.randn(2, 3, 20)
1056
+ >>> output, hn = rnn(input, h0)
1057
+ """
1058
+
1059
+ @overload
1060
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True,
1061
+ batch_first: bool = False, dropout: float = 0., bidirectional: bool = False,
1062
+ device=None, dtype=None) -> None:
1063
+ ...
1064
+
1065
+ @overload
1066
+ def __init__(self, *args, **kwargs):
1067
+ ...
1068
+
1069
+ def __init__(self, *args, **kwargs):
1070
+ if 'proj_size' in kwargs:
1071
+ raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
1072
+ super().__init__('GRU', *args, **kwargs)
1073
+
1074
+ @overload # type: ignore[override]
1075
+ @torch._jit_internal._overload_method # noqa: F811
1076
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: # noqa: F811
1077
+ pass
1078
+
1079
+ @overload
1080
+ @torch._jit_internal._overload_method # noqa: F811
1081
+ def forward(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]: # noqa: F811
1082
+ pass
1083
+
1084
+ def forward(self, input, hx=None): # noqa: F811
1085
+ self._update_flat_weights()
1086
+
1087
+ orig_input = input
1088
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1089
+ if isinstance(orig_input, PackedSequence):
1090
+ input, batch_sizes, sorted_indices, unsorted_indices = input
1091
+ max_batch_size = batch_sizes[0]
1092
+ if hx is None:
1093
+ num_directions = 2 if self.bidirectional else 1
1094
+ hx = torch.zeros(self.num_layers * num_directions,
1095
+ max_batch_size, self.hidden_size,
1096
+ dtype=input.dtype, device=input.device)
1097
+ else:
1098
+ # Each batch of the hidden state should match the input sequence that
1099
+ # the user believes he/she is passing in.
1100
+ hx = self.permute_hidden(hx, sorted_indices)
1101
+ else:
1102
+ batch_sizes = None
1103
+ if input.dim() not in (2, 3):
1104
+ raise ValueError(f"GRU: Expected input to be 2D or 3D, got {input.dim()}D instead")
1105
+ is_batched = input.dim() == 3
1106
+ batch_dim = 0 if self.batch_first else 1
1107
+ if not is_batched:
1108
+ input = input.unsqueeze(batch_dim)
1109
+ if hx is not None:
1110
+ if hx.dim() != 2:
1111
+ raise RuntimeError(
1112
+ f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor")
1113
+ hx = hx.unsqueeze(1)
1114
+ else:
1115
+ if hx is not None and hx.dim() != 3:
1116
+ raise RuntimeError(
1117
+ f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor")
1118
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
1119
+ sorted_indices = None
1120
+ unsorted_indices = None
1121
+ if hx is None:
1122
+ num_directions = 2 if self.bidirectional else 1
1123
+ hx = torch.zeros(self.num_layers * num_directions,
1124
+ max_batch_size, self.hidden_size,
1125
+ dtype=input.dtype, device=input.device)
1126
+ else:
1127
+ # Each batch of the hidden state should match the input sequence that
1128
+ # the user believes he/she is passing in.
1129
+ hx = self.permute_hidden(hx, sorted_indices)
1130
+
1131
+ self.check_forward_args(input, hx, batch_sizes)
1132
+ if batch_sizes is None:
1133
+ result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,
1134
+ self.dropout, self.training, self.bidirectional, self.batch_first)
1135
+ else:
1136
+ result = _VF.gru(input, batch_sizes, hx, self._flat_weights, self.bias,
1137
+ self.num_layers, self.dropout, self.training, self.bidirectional)
1138
+ output = result[0]
1139
+ hidden = result[1]
1140
+
1141
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1142
+ if isinstance(orig_input, PackedSequence):
1143
+ output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
1144
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
1145
+ else:
1146
+ if not is_batched: # type: ignore[possibly-undefined]
1147
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
1148
+ hidden = hidden.squeeze(1)
1149
+
1150
+ return output, self.permute_hidden(hidden, unsorted_indices)
1151
+
1152
+
1153
+ class RNNCellBase(Module):
1154
+ __constants__ = ['input_size', 'hidden_size', 'bias']
1155
+
1156
+ input_size: int
1157
+ hidden_size: int
1158
+ bias: bool
1159
+ weight_ih: Tensor
1160
+ weight_hh: Tensor
1161
+ # WARNING: bias_ih and bias_hh purposely not defined here.
1162
+ # See https://github.com/pytorch/pytorch/issues/39670
1163
+
1164
+ def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int,
1165
+ device=None, dtype=None) -> None:
1166
+ factory_kwargs = {'device': device, 'dtype': dtype}
1167
+ super().__init__()
1168
+ self.input_size = input_size
1169
+ self.hidden_size = hidden_size
1170
+ self.bias = bias
1171
+ self.weight_ih = Parameter(torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs))
1172
+ self.weight_hh = Parameter(torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs))
1173
+ if bias:
1174
+ self.bias_ih = Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
1175
+ self.bias_hh = Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
1176
+ else:
1177
+ self.register_parameter('bias_ih', None)
1178
+ self.register_parameter('bias_hh', None)
1179
+
1180
+ self.reset_parameters()
1181
+
1182
+ def extra_repr(self) -> str:
1183
+ s = '{input_size}, {hidden_size}'
1184
+ if 'bias' in self.__dict__ and self.bias is not True:
1185
+ s += ', bias={bias}'
1186
+ if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
1187
+ s += ', nonlinearity={nonlinearity}'
1188
+ return s.format(**self.__dict__)
1189
+
1190
+ def reset_parameters(self) -> None:
1191
+ stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
1192
+ for weight in self.parameters():
1193
+ init.uniform_(weight, -stdv, stdv)
1194
+
1195
+
1196
+ class RNNCell(RNNCellBase):
1197
+ r"""An Elman RNN cell with tanh or ReLU non-linearity.
1198
+
1199
+ .. math::
1200
+
1201
+ h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
1202
+
1203
+ If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
1204
+
1205
+ Args:
1206
+ input_size: The number of expected features in the input `x`
1207
+ hidden_size: The number of features in the hidden state `h`
1208
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
1209
+ Default: ``True``
1210
+ nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
1211
+
1212
+ Inputs: input, hidden
1213
+ - **input**: tensor containing input features
1214
+ - **hidden**: tensor containing the initial hidden state
1215
+ Defaults to zero if not provided.
1216
+
1217
+ Outputs: h'
1218
+ - **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
1219
+ for each element in the batch
1220
+
1221
+ Shape:
1222
+ - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
1223
+ :math:`H_{in}` = `input_size`.
1224
+ - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
1225
+ state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
1226
+ - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
1227
+
1228
+ Attributes:
1229
+ weight_ih: the learnable input-hidden weights, of shape
1230
+ `(hidden_size, input_size)`
1231
+ weight_hh: the learnable hidden-hidden weights, of shape
1232
+ `(hidden_size, hidden_size)`
1233
+ bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
1234
+ bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
1235
+
1236
+ .. note::
1237
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1238
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1239
+
1240
+ Examples::
1241
+
1242
+ >>> rnn = nn.RNNCell(10, 20)
1243
+ >>> input = torch.randn(6, 3, 10)
1244
+ >>> hx = torch.randn(3, 20)
1245
+ >>> output = []
1246
+ >>> for i in range(6):
1247
+ ... hx = rnn(input[i], hx)
1248
+ ... output.append(hx)
1249
+ """
1250
+
1251
+ __constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
1252
+ nonlinearity: str
1253
+
1254
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh",
1255
+ device=None, dtype=None) -> None:
1256
+ factory_kwargs = {'device': device, 'dtype': dtype}
1257
+ super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
1258
+ self.nonlinearity = nonlinearity
1259
+
1260
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
1261
+ if input.dim() not in (1, 2):
1262
+ raise ValueError(f"RNNCell: Expected input to be 1D or 2D, got {input.dim()}D instead")
1263
+ if hx is not None and hx.dim() not in (1, 2):
1264
+ raise ValueError(f"RNNCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead")
1265
+ is_batched = input.dim() == 2
1266
+ if not is_batched:
1267
+ input = input.unsqueeze(0)
1268
+
1269
+ if hx is None:
1270
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
1271
+ else:
1272
+ hx = hx.unsqueeze(0) if not is_batched else hx
1273
+
1274
+ if self.nonlinearity == "tanh":
1275
+ ret = _VF.rnn_tanh_cell(
1276
+ input, hx,
1277
+ self.weight_ih, self.weight_hh,
1278
+ self.bias_ih, self.bias_hh,
1279
+ )
1280
+ elif self.nonlinearity == "relu":
1281
+ ret = _VF.rnn_relu_cell(
1282
+ input, hx,
1283
+ self.weight_ih, self.weight_hh,
1284
+ self.bias_ih, self.bias_hh,
1285
+ )
1286
+ else:
1287
+ ret = input # TODO: remove when jit supports exception flow
1288
+ raise RuntimeError(
1289
+ f"Unknown nonlinearity: {self.nonlinearity}")
1290
+
1291
+ if not is_batched:
1292
+ ret = ret.squeeze(0)
1293
+
1294
+ return ret
1295
+
1296
+
1297
+ class LSTMCell(RNNCellBase):
1298
+ r"""A long short-term memory (LSTM) cell.
1299
+
1300
+ .. math::
1301
+
1302
+ \begin{array}{ll}
1303
+ i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
1304
+ f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
1305
+ g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
1306
+ o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
1307
+ c' = f \odot c + i \odot g \\
1308
+ h' = o \odot \tanh(c') \\
1309
+ \end{array}
1310
+
1311
+ where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
1312
+
1313
+ Args:
1314
+ input_size: The number of expected features in the input `x`
1315
+ hidden_size: The number of features in the hidden state `h`
1316
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and
1317
+ `b_hh`. Default: ``True``
1318
+
1319
+ Inputs: input, (h_0, c_0)
1320
+ - **input** of shape `(batch, input_size)` or `(input_size)`: tensor containing input features
1321
+ - **h_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial hidden state
1322
+ - **c_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial cell state
1323
+
1324
+ If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
1325
+
1326
+ Outputs: (h_1, c_1)
1327
+ - **h_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next hidden state
1328
+ - **c_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next cell state
1329
+
1330
+ Attributes:
1331
+ weight_ih: the learnable input-hidden weights, of shape
1332
+ `(4*hidden_size, input_size)`
1333
+ weight_hh: the learnable hidden-hidden weights, of shape
1334
+ `(4*hidden_size, hidden_size)`
1335
+ bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
1336
+ bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
1337
+
1338
+ .. note::
1339
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1340
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1341
+
1342
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
1343
+
1344
+ Examples::
1345
+
1346
+ >>> rnn = nn.LSTMCell(10, 20) # (input_size, hidden_size)
1347
+ >>> input = torch.randn(2, 3, 10) # (time_steps, batch, input_size)
1348
+ >>> hx = torch.randn(3, 20) # (batch, hidden_size)
1349
+ >>> cx = torch.randn(3, 20)
1350
+ >>> output = []
1351
+ >>> for i in range(input.size()[0]):
1352
+ ... hx, cx = rnn(input[i], (hx, cx))
1353
+ ... output.append(hx)
1354
+ >>> output = torch.stack(output, dim=0)
1355
+ """
1356
+
1357
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
1358
+ device=None, dtype=None) -> None:
1359
+ factory_kwargs = {'device': device, 'dtype': dtype}
1360
+ super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs)
1361
+
1362
+ def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
1363
+ if input.dim() not in (1, 2):
1364
+ raise ValueError(f"LSTMCell: Expected input to be 1D or 2D, got {input.dim()}D instead")
1365
+ if hx is not None:
1366
+ for idx, value in enumerate(hx):
1367
+ if value.dim() not in (1, 2):
1368
+ raise ValueError(f"LSTMCell: Expected hx[{idx}] to be 1D or 2D, got {value.dim()}D instead")
1369
+ is_batched = input.dim() == 2
1370
+ if not is_batched:
1371
+ input = input.unsqueeze(0)
1372
+
1373
+ if hx is None:
1374
+ zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
1375
+ hx = (zeros, zeros)
1376
+ else:
1377
+ hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
1378
+
1379
+ ret = _VF.lstm_cell(
1380
+ input, hx,
1381
+ self.weight_ih, self.weight_hh,
1382
+ self.bias_ih, self.bias_hh,
1383
+ )
1384
+
1385
+ if not is_batched:
1386
+ ret = (ret[0].squeeze(0), ret[1].squeeze(0))
1387
+ return ret
1388
+
1389
+
1390
+ class GRUCell(RNNCellBase):
1391
+ r"""A gated recurrent unit (GRU) cell.
1392
+
1393
+ .. math::
1394
+
1395
+ \begin{array}{ll}
1396
+ r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
1397
+ z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
1398
+ n = \tanh(W_{in} x + b_{in} + r \odot (W_{hn} h + b_{hn})) \\
1399
+ h' = (1 - z) \odot n + z \odot h
1400
+ \end{array}
1401
+
1402
+ where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
1403
+
1404
+ Args:
1405
+ input_size: The number of expected features in the input `x`
1406
+ hidden_size: The number of features in the hidden state `h`
1407
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and
1408
+ `b_hh`. Default: ``True``
1409
+
1410
+ Inputs: input, hidden
1411
+ - **input** : tensor containing input features
1412
+ - **hidden** : tensor containing the initial hidden
1413
+ state for each element in the batch.
1414
+ Defaults to zero if not provided.
1415
+
1416
+ Outputs: h'
1417
+ - **h'** : tensor containing the next hidden state
1418
+ for each element in the batch
1419
+
1420
+ Shape:
1421
+ - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
1422
+ :math:`H_{in}` = `input_size`.
1423
+ - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
1424
+ state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
1425
+ - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
1426
+
1427
+ Attributes:
1428
+ weight_ih: the learnable input-hidden weights, of shape
1429
+ `(3*hidden_size, input_size)`
1430
+ weight_hh: the learnable hidden-hidden weights, of shape
1431
+ `(3*hidden_size, hidden_size)`
1432
+ bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
1433
+ bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
1434
+
1435
+ .. note::
1436
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1437
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1438
+
1439
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
1440
+
1441
+ Examples::
1442
+
1443
+ >>> rnn = nn.GRUCell(10, 20)
1444
+ >>> input = torch.randn(6, 3, 10)
1445
+ >>> hx = torch.randn(3, 20)
1446
+ >>> output = []
1447
+ >>> for i in range(6):
1448
+ ... hx = rnn(input[i], hx)
1449
+ ... output.append(hx)
1450
+ """
1451
+
1452
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
1453
+ device=None, dtype=None) -> None:
1454
+ factory_kwargs = {'device': device, 'dtype': dtype}
1455
+ super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
1456
+
1457
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
1458
+ if input.dim() not in (1, 2):
1459
+ raise ValueError(f"GRUCell: Expected input to be 1D or 2D, got {input.dim()}D instead")
1460
+ if hx is not None and hx.dim() not in (1, 2):
1461
+ raise ValueError(f"GRUCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead")
1462
+ is_batched = input.dim() == 2
1463
+ if not is_batched:
1464
+ input = input.unsqueeze(0)
1465
+
1466
+ if hx is None:
1467
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
1468
+ else:
1469
+ hx = hx.unsqueeze(0) if not is_batched else hx
1470
+
1471
+ ret = _VF.gru_cell(
1472
+ input, hx,
1473
+ self.weight_ih, self.weight_hh,
1474
+ self.bias_ih, self.bias_hh,
1475
+ )
1476
+
1477
+ if not is_batched:
1478
+ ret = ret.squeeze(0)
1479
+
1480
+ return ret
venv/lib/python3.10/site-packages/torch/nn/modules/upsampling.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+ from typing import Optional
6
+ from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
7
+
8
+ __all__ = ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d']
9
+
10
+
11
+ class Upsample(Module):
12
+ r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
13
+
14
+ The input data is assumed to be of the form
15
+ `minibatch x channels x [optional depth] x [optional height] x width`.
16
+ Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
17
+
18
+ The algorithms available for upsampling are nearest neighbor and linear,
19
+ bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
20
+ respectively.
21
+
22
+ One can either give a :attr:`scale_factor` or the target output :attr:`size` to
23
+ calculate the output size. (You cannot give both, as it is ambiguous)
24
+
25
+ Args:
26
+ size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
27
+ output spatial sizes
28
+ scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
29
+ multiplier for spatial size. Has to match input size if it is a tuple.
30
+ mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
31
+ ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
32
+ Default: ``'nearest'``
33
+ align_corners (bool, optional): if ``True``, the corner pixels of the input
34
+ and output tensors are aligned, and thus preserving the values at
35
+ those pixels. This only has effect when :attr:`mode` is
36
+ ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
37
+ Default: ``False``
38
+ recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
39
+ interpolation calculation. If `recompute_scale_factor` is ``True``, then
40
+ `scale_factor` must be passed in and `scale_factor` is used to compute the
41
+ output `size`. The computed output `size` will be used to infer new scales for
42
+ the interpolation. Note that when `scale_factor` is floating-point, it may differ
43
+ from the recomputed `scale_factor` due to rounding and precision issues.
44
+ If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
45
+ be used directly for interpolation.
46
+
47
+ Shape:
48
+ - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
49
+ - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
50
+ or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
51
+
52
+ .. math::
53
+ D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
54
+
55
+ .. math::
56
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
57
+
58
+ .. math::
59
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
60
+
61
+ .. warning::
62
+ With ``align_corners = True``, the linearly interpolating modes
63
+ (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
64
+ align the output and input pixels, and thus the output values can depend
65
+ on the input size. This was the default behavior for these modes up to
66
+ version 0.3.1. Since then, the default behavior is
67
+ ``align_corners = False``. See below for concrete examples on how this
68
+ affects the outputs.
69
+
70
+ .. note::
71
+ If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
72
+
73
+ Examples::
74
+
75
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
76
+ >>> input
77
+ tensor([[[[1., 2.],
78
+ [3., 4.]]]])
79
+
80
+ >>> m = nn.Upsample(scale_factor=2, mode='nearest')
81
+ >>> m(input)
82
+ tensor([[[[1., 1., 2., 2.],
83
+ [1., 1., 2., 2.],
84
+ [3., 3., 4., 4.],
85
+ [3., 3., 4., 4.]]]])
86
+
87
+ >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
88
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
89
+ >>> m(input)
90
+ tensor([[[[1.0000, 1.2500, 1.7500, 2.0000],
91
+ [1.5000, 1.7500, 2.2500, 2.5000],
92
+ [2.5000, 2.7500, 3.2500, 3.5000],
93
+ [3.0000, 3.2500, 3.7500, 4.0000]]]])
94
+
95
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
96
+ >>> m(input)
97
+ tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
98
+ [1.6667, 2.0000, 2.3333, 2.6667],
99
+ [2.3333, 2.6667, 3.0000, 3.3333],
100
+ [3.0000, 3.3333, 3.6667, 4.0000]]]])
101
+
102
+ >>> # Try scaling the same data in a larger tensor
103
+ >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
104
+ >>> input_3x3[:, :, :2, :2].copy_(input)
105
+ tensor([[[[1., 2.],
106
+ [3., 4.]]]])
107
+ >>> input_3x3
108
+ tensor([[[[1., 2., 0.],
109
+ [3., 4., 0.],
110
+ [0., 0., 0.]]]])
111
+
112
+ >>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session")
113
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
114
+ >>> # Notice that values in top left corner are the same with the small input (except at boundary)
115
+ >>> m(input_3x3)
116
+ tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
117
+ [1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
118
+ [2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
119
+ [2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
120
+ [0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
121
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
122
+
123
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
124
+ >>> # Notice that values in top left corner are now changed
125
+ >>> m(input_3x3)
126
+ tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
127
+ [1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
128
+ [2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
129
+ [2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
130
+ [1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
131
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
132
+ """
133
+
134
+ __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor']
135
+ name: str
136
+ size: Optional[_size_any_t]
137
+ scale_factor: Optional[_ratio_any_t]
138
+ mode: str
139
+ align_corners: Optional[bool]
140
+ recompute_scale_factor: Optional[bool]
141
+
142
+ def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None,
143
+ mode: str = 'nearest', align_corners: Optional[bool] = None,
144
+ recompute_scale_factor: Optional[bool] = None) -> None:
145
+ super().__init__()
146
+ self.name = type(self).__name__
147
+ self.size = size
148
+ if isinstance(scale_factor, tuple):
149
+ self.scale_factor = tuple(float(factor) for factor in scale_factor)
150
+ else:
151
+ self.scale_factor = float(scale_factor) if scale_factor else None
152
+ self.mode = mode
153
+ self.align_corners = align_corners
154
+ self.recompute_scale_factor = recompute_scale_factor
155
+
156
+ def forward(self, input: Tensor) -> Tensor:
157
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,
158
+ recompute_scale_factor=self.recompute_scale_factor)
159
+
160
+ def __setstate__(self, state):
161
+ if 'recompute_scale_factor' not in state:
162
+ state['recompute_scale_factor'] = True
163
+
164
+ super().__setstate__(state)
165
+
166
+ def extra_repr(self) -> str:
167
+ if self.scale_factor is not None:
168
+ info = 'scale_factor=' + repr(self.scale_factor)
169
+ else:
170
+ info = 'size=' + repr(self.size)
171
+ info += ', mode=' + repr(self.mode)
172
+ return info
173
+
174
+
175
+ class UpsamplingNearest2d(Upsample):
176
+ r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels.
177
+
178
+ To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
179
+ as it's constructor argument.
180
+
181
+ When :attr:`size` is given, it is the output size of the image `(h, w)`.
182
+
183
+ Args:
184
+ size (int or Tuple[int, int], optional): output spatial sizes
185
+ scale_factor (float or Tuple[float, float], optional): multiplier for
186
+ spatial size.
187
+
188
+ .. warning::
189
+ This class is deprecated in favor of :func:`~nn.functional.interpolate`.
190
+
191
+ Shape:
192
+ - Input: :math:`(N, C, H_{in}, W_{in})`
193
+ - Output: :math:`(N, C, H_{out}, W_{out})` where
194
+
195
+ .. math::
196
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
197
+
198
+ .. math::
199
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
200
+
201
+ Examples::
202
+
203
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
204
+ >>> input
205
+ tensor([[[[1., 2.],
206
+ [3., 4.]]]])
207
+
208
+ >>> m = nn.UpsamplingNearest2d(scale_factor=2)
209
+ >>> m(input)
210
+ tensor([[[[1., 1., 2., 2.],
211
+ [1., 1., 2., 2.],
212
+ [3., 3., 4., 4.],
213
+ [3., 3., 4., 4.]]]])
214
+ """
215
+
216
+ def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
217
+ super().__init__(size, scale_factor, mode='nearest')
218
+
219
+
220
+ class UpsamplingBilinear2d(Upsample):
221
+ r"""Applies a 2D bilinear upsampling to an input signal composed of several input channels.
222
+
223
+ To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
224
+ as it's constructor argument.
225
+
226
+ When :attr:`size` is given, it is the output size of the image `(h, w)`.
227
+
228
+ Args:
229
+ size (int or Tuple[int, int], optional): output spatial sizes
230
+ scale_factor (float or Tuple[float, float], optional): multiplier for
231
+ spatial size.
232
+
233
+ .. warning::
234
+ This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
235
+ equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
236
+
237
+ Shape:
238
+ - Input: :math:`(N, C, H_{in}, W_{in})`
239
+ - Output: :math:`(N, C, H_{out}, W_{out})` where
240
+
241
+ .. math::
242
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
243
+
244
+ .. math::
245
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
246
+
247
+ Examples::
248
+
249
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
250
+ >>> input
251
+ tensor([[[[1., 2.],
252
+ [3., 4.]]]])
253
+
254
+ >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
255
+ >>> m = nn.UpsamplingBilinear2d(scale_factor=2)
256
+ >>> m(input)
257
+ tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
258
+ [1.6667, 2.0000, 2.3333, 2.6667],
259
+ [2.3333, 2.6667, 3.0000, 3.3333],
260
+ [3.0000, 3.3333, 3.6667, 4.0000]]]])
261
+ """
262
+
263
+ def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
264
+ super().__init__(size, scale_factor, mode='bilinear', align_corners=True)
venv/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
2
+ from torch.ao.nn.quantizable.modules.rnn import LSTM
3
+ from torch.ao.nn.quantizable.modules.rnn import LSTMCell
4
+
5
+ __all__ = [
6
+ 'LSTM',
7
+ 'LSTMCell',
8
+ 'MultiheadAttention',
9
+ ]
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (412 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (639 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (642 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
venv/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantizable.modules.rnn import LSTM
11
+ from torch.ao.nn.quantizable.modules.rnn import LSTMCell
venv/lib/python3.10/site-packages/torch/nn/quantized/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import dynamic # noqa: F403
2
+ from . import functional # noqa: F403
3
+ from . import modules # noqa: F403
4
+ from .modules import * # noqa: F403
5
+ from .modules import MaxPool2d
6
+
7
+ __all__ = [
8
+ 'BatchNorm2d',
9
+ 'BatchNorm3d',
10
+ 'Conv1d',
11
+ 'Conv2d',
12
+ 'Conv3d',
13
+ 'ConvTranspose1d',
14
+ 'ConvTranspose2d',
15
+ 'ConvTranspose3d',
16
+ 'DeQuantize',
17
+ 'Dropout',
18
+ 'ELU',
19
+ 'Embedding',
20
+ 'EmbeddingBag',
21
+ 'GroupNorm',
22
+ 'Hardswish',
23
+ 'InstanceNorm1d',
24
+ 'InstanceNorm2d',
25
+ 'InstanceNorm3d',
26
+ 'LayerNorm',
27
+ 'LeakyReLU',
28
+ 'Linear',
29
+ 'LSTM',
30
+ 'MultiheadAttention',
31
+ 'PReLU',
32
+ 'Quantize',
33
+ 'ReLU6',
34
+ 'Sigmoid',
35
+ 'Softmax',
36
+ # Wrapper modules
37
+ 'FloatFunctional',
38
+ 'FXFloatFunctional',
39
+ 'QFunctional',
40
+ ]
venv/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (718 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc ADDED
Binary file (455 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (221 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
14
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell, LSTMCell, GRUCell, LSTM
15
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag
16
+
17
+ __all__ = [
18
+ 'Linear',
19
+ 'Conv1d',
20
+ 'Conv2d',
21
+ 'Conv3d',
22
+ 'ConvTranspose1d',
23
+ 'ConvTranspose2d',
24
+ 'ConvTranspose3d',
25
+ 'RNNCell',
26
+ 'LSTMCell',
27
+ 'GRUCell',
28
+ 'LSTM',
29
+ 'Embedding',
30
+ 'EmbeddingBag',
31
+ ]
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (891 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (654 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (801 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (692 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (852 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvNd
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d
14
+ from torch.ao.nn.quantized.reference.modules.conv import Conv2d
15
+ from torch.ao.nn.quantized.reference.modules.conv import Conv3d
16
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvTransposeNd
17
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose1d
18
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose2d
19
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose3d
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCellBase
13
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell
14
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTMCell
15
+ from torch.ao.nn.quantized.reference.modules.rnn import GRUCell
16
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNBase
17
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTM
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding
13
+ from torch.ao.nn.quantized.reference.modules.sparse import EmbeddingBag
venv/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_weight
12
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_and_dequantize_weight
13
+ from torch.ao.nn.quantized.reference.modules.utils import _save_weight_qparams
14
+ from torch.ao.nn.quantized.reference.modules.utils import _get_weight_qparam_keys
15
+ from torch.ao.nn.quantized.reference.modules.utils import ReferenceQuantizedModule
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from torch.ao.nn.quantized.dynamic import * # noqa: F403
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (240 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Dynamic Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantized/dynamic`,
5
+ and is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantized/dynamic`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ from torch.ao.nn.quantized.dynamic.modules import conv
12
+ from torch.ao.nn.quantized.dynamic.modules import linear
13
+ from torch.ao.nn.quantized.dynamic.modules import rnn
14
+
15
+ from torch.ao.nn.quantized.dynamic.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
16
+ from torch.ao.nn.quantized.dynamic.modules.linear import Linear
17
+ from torch.ao.nn.quantized.dynamic.modules.rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
18
+
19
+ __all__ = [
20
+ 'Linear',
21
+ 'LSTM',
22
+ 'GRU',
23
+ 'LSTMCell',
24
+ 'RNNCell',
25
+ 'GRUCell',
26
+ 'Conv1d',
27
+ 'Conv2d',
28
+ 'Conv3d',
29
+ 'ConvTranspose1d',
30
+ 'ConvTranspose2d',
31
+ 'ConvTranspose3d',
32
+ ]
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (868 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (649 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (965 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/conv.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Dynamic Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantized/dynamic`,
5
+ and is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantized/dynamic/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
12
+
13
+ from torch.ao.nn.quantized.dynamic.modules.conv import Conv1d
14
+ from torch.ao.nn.quantized.dynamic.modules.conv import Conv2d
15
+ from torch.ao.nn.quantized.dynamic.modules.conv import Conv3d
16
+ from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose1d
17
+ from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose2d
18
+ from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose3d