diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49333dc419e5d98a6b82aa38ea9bd3ef200d6e32 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d63c06edef6b822179acc421f746343e4b8df1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/thnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/thnn.py new file mode 100644 index 0000000000000000000000000000000000000000..5250b4bff1674880c97be7b36ca81d6cd6b665a4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/backends/thnn.py @@ -0,0 +1,4 @@ +# this is for historical pickle deserialization, it is not used otherwise + +def _get_thnn_function_backend(): + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e19378bc7aee4f9f8c6c73eef4cacd097d39d2fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py @@ -0,0 +1,35 @@ +from torch.ao.nn.intrinsic import ConvBn1d +from torch.ao.nn.intrinsic import ConvBn2d +from torch.ao.nn.intrinsic import ConvBn3d +from torch.ao.nn.intrinsic import ConvBnReLU1d +from torch.ao.nn.intrinsic import ConvBnReLU2d +from torch.ao.nn.intrinsic import ConvBnReLU3d +from torch.ao.nn.intrinsic import ConvReLU1d +from torch.ao.nn.intrinsic import ConvReLU2d +from torch.ao.nn.intrinsic import ConvReLU3d +from torch.ao.nn.intrinsic import LinearReLU +from torch.ao.nn.intrinsic import BNReLU2d +from torch.ao.nn.intrinsic import BNReLU3d +from torch.ao.nn.intrinsic import LinearBn1d +from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401 + +# Include the subpackages in case user imports from it directly +from . import modules # noqa: F401 +from . import qat # noqa: F401 +from . import quantized # noqa: F401 + +__all__ = [ + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearReLU', + 'BNReLU2d', + 'BNReLU3d', + 'LinearBn1d', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..067b04e62c8ce71835c183bf224bff1529c59397 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..670a654efb95096487f85de1d16e72ab55db37e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py @@ -0,0 +1,31 @@ +from .fused import _FusedModule # noqa: F401 +from .fused import BNReLU2d +from .fused import BNReLU3d +from .fused import ConvBn1d +from .fused import ConvBn2d +from .fused import ConvBn3d +from .fused import ConvBnReLU1d +from .fused import ConvBnReLU2d +from .fused import ConvBnReLU3d +from .fused import ConvReLU1d +from .fused import ConvReLU2d +from .fused import ConvReLU3d +from .fused import LinearBn1d +from .fused import LinearReLU + + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearBn1d', + 'LinearReLU', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d803e35bdb7e667106b274108be884a3713d8c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a85425ac537d0a7c0a9277e24ac288ddb71e7d75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py new file mode 100644 index 0000000000000000000000000000000000000000..dc962f956427ec6f6e6b1d0580a1d5c73bd9cd29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py @@ -0,0 +1,30 @@ +from torch.ao.nn.intrinsic import BNReLU2d +from torch.ao.nn.intrinsic import BNReLU3d +from torch.ao.nn.intrinsic import ConvBn1d +from torch.ao.nn.intrinsic import ConvBn2d +from torch.ao.nn.intrinsic import ConvBn3d +from torch.ao.nn.intrinsic import ConvBnReLU1d +from torch.ao.nn.intrinsic import ConvBnReLU2d +from torch.ao.nn.intrinsic import ConvBnReLU3d +from torch.ao.nn.intrinsic import ConvReLU1d +from torch.ao.nn.intrinsic import ConvReLU2d +from torch.ao.nn.intrinsic import ConvReLU3d +from torch.ao.nn.intrinsic import LinearBn1d +from torch.ao.nn.intrinsic import LinearReLU +from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401 + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearBn1d', + 'LinearReLU', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fd6eb6dda621a50609aaeab60d297e63df44f64 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..542b438306c0bbcd9c6e06c6ee43ff86a21e0e24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4059597407919858e50826fb2f0aa08668007135 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e00a532fbf84f1df0b590f2a7584f1ccb32fa044 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198755718133219f4be3cd3b8335692be32db5fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py new file mode 100644 index 0000000000000000000000000000000000000000..2f70dc038b5c4f8a8aa8b5c900314b9f409b5d89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py @@ -0,0 +1,37 @@ +# flake8: noqa: F401 +r"""Intrinsic QAT Modules. + +This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/intrinsic/qat/modules`, +while adding an import statement here. +""" + +__all__ = [ + # Modules + 'ConvBn1d', + 'ConvBnReLU1d', + 'ConvReLU1d', + 'ConvBn2d', + 'ConvBnReLU2d', + 'ConvReLU2d', + 'ConvBn3d', + 'ConvBnReLU3d', + 'ConvReLU3d', + # Utilities + 'freeze_bn_stats', + 'update_bn_stats', +] + +from torch.ao.nn.intrinsic.qat import ConvBn1d +from torch.ao.nn.intrinsic.qat import ConvBnReLU1d +from torch.ao.nn.intrinsic.qat import ConvReLU1d +from torch.ao.nn.intrinsic.qat import ConvBn2d +from torch.ao.nn.intrinsic.qat import ConvBnReLU2d +from torch.ao.nn.intrinsic.qat import ConvReLU2d +from torch.ao.nn.intrinsic.qat import ConvBn3d +from torch.ao.nn.intrinsic.qat import ConvBnReLU3d +from torch.ao.nn.intrinsic.qat import ConvReLU3d +from torch.ao.nn.intrinsic.qat import freeze_bn_stats +from torch.ao.nn.intrinsic.qat import update_bn_stats diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py new file mode 100644 index 0000000000000000000000000000000000000000..6e372a5c1d3f6d73bc128aeede9537e9e5de41b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r"""Intrinsic QAT Modules. + +This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/intrinsic/qat/modules`, +while adding an import statement here. +""" + +__all__ = [ + 'LinearBn1d', +] + +from torch.ao.nn.intrinsic.qat import LinearBn1d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b949303a40834fed294900278d4bdde55711a284 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py @@ -0,0 +1,13 @@ +from .modules import * # noqa: F403 +# to ensure customers can use the module below +# without importing it directly +import torch.nn.intrinsic.quantized.dynamic + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearReLU', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fee6e051b44132ebdb7838f99296463a6a209d25 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a4dd82eeae28e5fceaa2e5d5b82f95dc2d26674 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..521e409b2b6422b005e1b2de87ba31f261ce6590 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py @@ -0,0 +1,12 @@ +from .linear_relu import LinearReLU +from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d +from .bn_relu import BNReLU2d, BNReLU3d + +__all__ = [ + 'LinearReLU', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'BNReLU2d', + 'BNReLU3d', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..7682b4f8ae426b4aa0537505e55c9c98efd47b94 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py @@ -0,0 +1,7 @@ +from torch.ao.nn.intrinsic.quantized import BNReLU2d +from torch.ao.nn.intrinsic.quantized import BNReLU3d + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..3e89e9b5821f78bf18dca840a0834cba70ab8f0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py @@ -0,0 +1,5 @@ +from torch.ao.nn.intrinsic.quantized import LinearReLU + +__all__ = [ + 'LinearReLU', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/_functions.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..669448ce4fdad2732f75461f646cd125734c221d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/_functions.py @@ -0,0 +1,288 @@ +import torch +import torch.distributed as dist + +from torch.autograd.function import Function + +class SyncBatchNorm(Function): + + @staticmethod + def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size): + if not ( + input.is_contiguous(memory_format=torch.channels_last) or + input.is_contiguous(memory_format=torch.channels_last_3d) + ): + input = input.contiguous() + if weight is not None: + weight = weight.contiguous() + + size = int(input.numel() // input.size(1)) + if size == 1 and world_size < 2: + raise ValueError(f'Expected more than 1 value per channel when training, got input size {size}') + + num_channels = input.shape[1] + if input.numel() > 0: + # calculate mean/invstd for input. + mean, invstd = torch.batch_norm_stats(input, eps) + + count = torch.full( + (1,), + input.numel() // input.size(1), + dtype=mean.dtype, + device=mean.device + ) + + # C, C, 1 -> (2C + 1) + combined = torch.cat([mean, invstd, count], dim=0) + else: + # for empty input, set stats and the count to zero. The stats with + # zero count will be filtered out later when computing global mean + # & invstd, but they still needs to participate the all_gather + # collective communication to unblock other peer processes. + combined = torch.zeros( + 2 * num_channels + 1, + dtype=input.dtype, + device=input.device + ) + + # Use allgather instead of allreduce because count could be different across + # ranks, simple all reduce op can not give correct results. + # batch_norm_gather_stats_with_counts calculates global mean & invstd based on + # all gathered mean, invstd and count. + # for nccl backend, use the optimized version of all gather. + # The Gloo backend does not support `all_gather_into_tensor`. + if process_group._get_backend_name() != "gloo": + # world_size * (2C + 1) + combined_size = combined.numel() + combined_flat = torch.empty(1, + combined_size * world_size, + dtype=combined.dtype, + device=combined.device) + dist.all_gather_into_tensor(combined_flat, combined, process_group, async_op=False) + combined = torch.reshape(combined_flat, (world_size, combined_size)) + # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1 + mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1) + else: + # world_size * (2C + 1) + combined_list = [ + torch.empty_like(combined) for _ in range(world_size) + ] + dist.all_gather(combined_list, combined, process_group, async_op=False) + combined = torch.stack(combined_list, dim=0) + # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1 + mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1) + + if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()): + # The lines below force a synchronization between CUDA and CPU, because + # the shape of the result count_all depends on the values in mask tensor. + # Such synchronizations break CUDA Graph capturing. + # See https://github.com/pytorch/pytorch/issues/78549 + # FIXME: https://github.com/pytorch/pytorch/issues/78656 describes + # a better longer-term solution. + + # remove stats from empty inputs + mask = count_all.squeeze(-1) >= 1 + count_all = count_all[mask] + mean_all = mean_all[mask] + invstd_all = invstd_all[mask] + + # calculate global mean & invstd + counts = count_all.view(-1) + if running_mean is not None and counts.dtype != running_mean.dtype: + counts = counts.to(running_mean.dtype) + mean, invstd = torch.batch_norm_gather_stats_with_counts( + input, + mean_all, + invstd_all, + running_mean, + running_var, + momentum, + eps, + counts, + ) + + self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32)) + self.process_group = process_group + + # apply element-wise normalization + if input.numel() > 0: + return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps) + else: + return torch.empty_like(input) + + @staticmethod + def backward(self, grad_output): + if not ( + grad_output.is_contiguous(memory_format=torch.channels_last) or + grad_output.is_contiguous(memory_format=torch.channels_last_3d) + ): + grad_output = grad_output.contiguous() + saved_input, weight, mean, invstd, count_tensor = self.saved_tensors + grad_input = grad_weight = grad_bias = None + process_group = self.process_group + + if saved_input.numel() > 0: + # calculate local stats as well as grad_weight / grad_bias + sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce( + grad_output, + saved_input, + mean, + invstd, + weight, + self.needs_input_grad[0], + self.needs_input_grad[1], + self.needs_input_grad[2] + ) + + if self.needs_input_grad[0]: + # synchronizing stats used to calculate input gradient. + num_channels = sum_dy.shape[0] + combined = torch.cat([sum_dy, sum_dy_xmu], dim=0) + torch.distributed.all_reduce( + combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False) + sum_dy, sum_dy_xmu = torch.split(combined, num_channels) + + # backward pass for gradient calculation + if weight is not None and weight.dtype != mean.dtype: + weight = weight.to(mean.dtype) + grad_input = torch.batch_norm_backward_elemt( + grad_output, + saved_input, + mean, + invstd, + weight, + sum_dy, + sum_dy_xmu, + count_tensor + ) + # synchronizing of grad_weight / grad_bias is not needed as distributed + # training would handle all reduce. + if weight is None or not self.needs_input_grad[1]: + grad_weight = None + + if weight is None or not self.needs_input_grad[2]: + grad_bias = None + else: + # This process got an empty input tensor in the forward pass. + # Although this process can directly set grad_input as an empty + # tensor of zeros, it still needs to participate in the collective + # communication to unblock its peers, as other peer processes might + # have received non-empty inputs. + num_channels = saved_input.shape[1] + if self.needs_input_grad[0]: + # launch all_reduce to unblock other peer processes + combined = torch.zeros( + 2 * num_channels, + dtype=saved_input.dtype, + device=saved_input.device + ) + torch.distributed.all_reduce( + combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False) + + # Leave grad_input, grad_weight and grad_bias as None, which will be + # interpreted by the autograd engine as Tensors full of zeros. + + return grad_input, grad_weight, grad_bias, None, None, None, None, None, None + +class CrossMapLRN2d(Function): + + @staticmethod + def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1): + ctx.size = size + ctx.alpha = alpha + ctx.beta = beta + ctx.k = k + ctx.scale = None + + if input.dim() != 4: + raise ValueError(f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead.") + + ctx.scale = ctx.scale or input.new() + output = input.new() + + batch_size = input.size(0) + channels = input.size(1) + input_height = input.size(2) + input_width = input.size(3) + + output.resize_as_(input) + ctx.scale.resize_as_(input) + + # use output storage as temporary buffer + input_square = output + torch.pow(input, 2, out=input_square) + + pre_pad = int((ctx.size - 1) / 2 + 1) + pre_pad_crop = min(pre_pad, channels) + + scale_first = ctx.scale.select(1, 0) + scale_first.zero_() + # compute first feature map normalization + for c in range(pre_pad_crop): + scale_first.add_(input_square.select(1, c)) + + # reuse computations for next feature maps normalization + # by adding the next feature map and removing the previous + for c in range(1, channels): + scale_previous = ctx.scale.select(1, c - 1) + scale_current = ctx.scale.select(1, c) + scale_current.copy_(scale_previous) + if c < channels - pre_pad + 1: + square_next = input_square.select(1, c + pre_pad - 1) + scale_current.add_(square_next, alpha=1) + + if c > pre_pad: + square_previous = input_square.select(1, c - pre_pad) + scale_current.add_(square_previous, alpha=-1) + + ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k) + + torch.pow(ctx.scale, -ctx.beta, out=output) + output.mul_(input) + + ctx.save_for_backward(input, output) + return output + + @staticmethod + def backward(ctx, grad_output): + input, output = ctx.saved_tensors + grad_input = grad_output.new() + + batch_size = input.size(0) + channels = input.size(1) + input_height = input.size(2) + input_width = input.size(3) + + paddded_ratio = input.new(channels + ctx.size - 1, input_height, + input_width) + accum_ratio = input.new(input_height, input_width) + + cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size + inversePrePad = int(ctx.size - (ctx.size - 1) / 2) + + grad_input.resize_as_(input) + torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output) + + paddded_ratio.zero_() + padded_ratio_center = paddded_ratio.narrow(0, inversePrePad, + channels) + for n in range(batch_size): + torch.mul(grad_output[n], output[n], out=padded_ratio_center) + padded_ratio_center.div_(ctx.scale[n]) + torch.sum( + paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio) + for c in range(channels): + accum_ratio.add_(paddded_ratio[c + ctx.size - 1]) + grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value) + accum_ratio.add_(paddded_ratio[c], alpha=-1) + + return grad_input, None, None, None, None + +class BackwardHookFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad]) + return args + + @staticmethod + def backward(ctx, *args): + return args diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/flatten.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..eaf62d5bbeea7728a124a4f650e735b3022bd5b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/flatten.py @@ -0,0 +1,144 @@ +from .module import Module + +from typing import Tuple, Union +from torch import Tensor +from torch.types import _size + +__all__ = ['Flatten', 'Unflatten'] + +class Flatten(Module): + r""" + Flattens a contiguous range of dims into a tensor. + + For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details. + + Shape: + - Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,' + where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any + number of dimensions including none. + - Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`. + + Args: + start_dim: first dim to flatten (default = 1). + end_dim: last dim to flatten (default = -1). + + Examples:: + >>> input = torch.randn(32, 1, 5, 5) + >>> # With default parameters + >>> m = nn.Flatten() + >>> output = m(input) + >>> output.size() + torch.Size([32, 25]) + >>> # With non-default parameters + >>> m = nn.Flatten(0, 2) + >>> output = m(input) + >>> output.size() + torch.Size([160, 5]) + """ + + __constants__ = ['start_dim', 'end_dim'] + start_dim: int + end_dim: int + + def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None: + super().__init__() + self.start_dim = start_dim + self.end_dim = end_dim + + def forward(self, input: Tensor) -> Tensor: + return input.flatten(self.start_dim, self.end_dim) + + def extra_repr(self) -> str: + return f'start_dim={self.start_dim}, end_dim={self.end_dim}' + + +class Unflatten(Module): + r""" + Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`. + + * :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can + be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively. + + * :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be + a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape` + (tuple of `(name, size)` tuples) for `NamedTensor` input. + + Shape: + - Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at + dimension :attr:`dim` and :math:`*` means any number of dimensions including none. + - Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and + :math:`\prod_{i=1}^n U_i = S_{\text{dim}}`. + + Args: + dim (Union[int, str]): Dimension to be unflattened + unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension + + Examples: + >>> input = torch.randn(2, 50) + >>> # With tuple of ints + >>> m = nn.Sequential( + >>> nn.Linear(50, 50), + >>> nn.Unflatten(1, (2, 5, 5)) + >>> ) + >>> output = m(input) + >>> output.size() + torch.Size([2, 2, 5, 5]) + >>> # With torch.Size + >>> m = nn.Sequential( + >>> nn.Linear(50, 50), + >>> nn.Unflatten(1, torch.Size([2, 5, 5])) + >>> ) + >>> output = m(input) + >>> output.size() + torch.Size([2, 2, 5, 5]) + >>> # With namedshape (tuple of tuples) + >>> input = torch.randn(2, 50, names=('N', 'features')) + >>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5))) + >>> output = unflatten(input) + >>> output.size() + torch.Size([2, 2, 5, 5]) + """ + + NamedShape = Tuple[Tuple[str, int]] + + __constants__ = ['dim', 'unflattened_size'] + dim: Union[int, str] + unflattened_size: Union[_size, NamedShape] + + def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None: + super().__init__() + + if isinstance(dim, int): + self._require_tuple_int(unflattened_size) + elif isinstance(dim, str): + self._require_tuple_tuple(unflattened_size) + else: + raise TypeError("invalid argument type for dim parameter") + + self.dim = dim + self.unflattened_size = unflattened_size + + def _require_tuple_tuple(self, input): + if (isinstance(input, tuple)): + for idx, elem in enumerate(input): + if not isinstance(elem, tuple): + raise TypeError("unflattened_size must be tuple of tuples, " + + f"but found element of type {type(elem).__name__} at pos {idx}") + return + raise TypeError("unflattened_size must be a tuple of tuples, " + + f"but found type {type(input).__name__}") + + def _require_tuple_int(self, input): + if (isinstance(input, (tuple, list))): + for idx, elem in enumerate(input): + if not isinstance(elem, int): + raise TypeError("unflattened_size must be tuple of ints, " + + f"but found element of type {type(elem).__name__} at pos {idx}") + return + raise TypeError(f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}") + + def forward(self, input: Tensor) -> Tensor: + return input.unflatten(self.dim, self.unflattened_size) + + def extra_repr(self) -> str: + return f'dim={self.dim}, unflattened_size={self.unflattened_size}' diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/fold.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/fold.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae911252f996fb9d1001eb73ab0f195e20f5ffe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/fold.py @@ -0,0 +1,303 @@ +from .module import Module +from .. import functional as F + +from torch import Tensor +from ..common_types import _size_any_t + +__all__ = ['Fold', 'Unfold'] + +class Fold(Module): + r"""Combines an array of sliding local blocks into a large containing tensor. + + Consider a batched :attr:`input` tensor containing sliding local blocks, + e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, + where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})` + is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})` + spatial locations each containing a :math:`C`-channeled vector), and + :math:`L` is the total number of blocks. (This is exactly the + same specification as the output shape of :class:`~torch.nn.Unfold`.) This + operation combines these local blocks into the large :attr:`output` tensor + of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)` + by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the + arguments must satisfy + + .. math:: + L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + + where :math:`d` is over all spatial dimensions. + + * :attr:`output_size` describes the spatial shape of the large containing + tensor of the sliding local blocks. It is useful to resolve the ambiguity + when multiple input shapes map to same number of sliding blocks, e.g., + with ``stride > 0``. + + The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify + how the sliding blocks are retrieved. + + * :attr:`stride` controls the stride for the sliding blocks. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension before + reshaping. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + Args: + output_size (int or tuple): the shape of the spatial dimensions of the + output (i.e., ``output.sizes()[2:]``) + kernel_size (int or tuple): the size of the sliding blocks + dilation (int or tuple, optional): a parameter that controls the + stride of elements within the + neighborhood. Default: 1 + padding (int or tuple, optional): implicit zero padding to be added on + both sides of input. Default: 0 + stride (int or tuple): the stride of the sliding blocks in the input + spatial dimensions. Default: 1 + + * If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`, + :attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then + their values will be replicated across all spatial dimensions. + + * For the case of two output spatial dimensions this operation is sometimes + called ``col2im``. + + .. note:: + :class:`~torch.nn.Fold` calculates each combined value in the resulting + large tensor by summing all values from all containing blocks. + :class:`~torch.nn.Unfold` extracts the values in the local blocks by + copying from the large tensor. So, if the blocks overlap, they are not + inverses of each other. + + In general, folding and unfolding operations are related as + follows. Consider :class:`~torch.nn.Fold` and + :class:`~torch.nn.Unfold` instances created with the same + parameters: + + >>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...) + >>> fold = nn.Fold(output_size=..., **fold_params) + >>> unfold = nn.Unfold(**fold_params) + + Then for any (supported) ``input`` tensor the following + equality holds: + + :: + + fold(unfold(input)) == divisor * input + + where ``divisor`` is a tensor that depends only on the shape + and dtype of the ``input``: + + >>> # xdoctest: +SKIP + >>> input_ones = torch.ones(input.shape, dtype=input.dtype) + >>> divisor = fold(unfold(input_ones)) + + When the ``divisor`` tensor contains no zero elements, then + ``fold`` and ``unfold`` operations are inverses of each + other (up to constant divisor). + + .. warning:: + Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported. + + Shape: + - Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)` + - Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)` + or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above + + Examples:: + + >>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2)) + >>> input = torch.randn(1, 3 * 2 * 2, 12) + >>> output = fold(input) + >>> output.size() + torch.Size([1, 3, 4, 5]) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + + """ + + __constants__ = ['output_size', 'kernel_size', 'dilation', 'padding', + 'stride'] + output_size: _size_any_t + kernel_size: _size_any_t + dilation: _size_any_t + padding: _size_any_t + stride: _size_any_t + + def __init__( + self, + output_size: _size_any_t, + kernel_size: _size_any_t, + dilation: _size_any_t = 1, + padding: _size_any_t = 0, + stride: _size_any_t = 1 + ) -> None: + super().__init__() + self.output_size = output_size + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, input: Tensor) -> Tensor: + return F.fold(input, self.output_size, self.kernel_size, self.dilation, + self.padding, self.stride) + + def extra_repr(self) -> str: + return 'output_size={output_size}, kernel_size={kernel_size}, ' \ + 'dilation={dilation}, padding={padding}, stride={stride}'.format( + **self.__dict__ + ) + + +class Unfold(Module): + r"""Extracts sliding local blocks from a batched input tensor. + + Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`, + where :math:`N` is the batch dimension, :math:`C` is the channel dimension, + and :math:`*` represent arbitrary spatial dimensions. This operation flattens + each sliding :attr:`kernel_size`-sized block within the spatial dimensions + of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output` + tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where + :math:`C \times \prod(\text{kernel\_size})` is the total number of values + within each block (a block has :math:`\prod(\text{kernel\_size})` spatial + locations each containing a :math:`C`-channeled vector), and :math:`L` is + the total number of such blocks: + + .. math:: + L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + + where :math:`\text{spatial\_size}` is formed by the spatial dimensions + of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial + dimensions. + + Therefore, indexing :attr:`output` at the last dimension (column dimension) + gives all values within a certain block. + + The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify + how the sliding blocks are retrieved. + + * :attr:`stride` controls the stride for the sliding blocks. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension before + reshaping. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + Args: + kernel_size (int or tuple): the size of the sliding blocks + dilation (int or tuple, optional): a parameter that controls the + stride of elements within the + neighborhood. Default: 1 + padding (int or tuple, optional): implicit zero padding to be added on + both sides of input. Default: 0 + stride (int or tuple, optional): the stride of the sliding blocks in the input + spatial dimensions. Default: 1 + + * If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or + :attr:`stride` is an int or a tuple of length 1, their values will be + replicated across all spatial dimensions. + + * For the case of two input spatial dimensions this operation is sometimes + called ``im2col``. + + .. note:: + :class:`~torch.nn.Fold` calculates each combined value in the resulting + large tensor by summing all values from all containing blocks. + :class:`~torch.nn.Unfold` extracts the values in the local blocks by + copying from the large tensor. So, if the blocks overlap, they are not + inverses of each other. + + In general, folding and unfolding operations are related as + follows. Consider :class:`~torch.nn.Fold` and + :class:`~torch.nn.Unfold` instances created with the same + parameters: + + >>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...) + >>> fold = nn.Fold(output_size=..., **fold_params) + >>> unfold = nn.Unfold(**fold_params) + + Then for any (supported) ``input`` tensor the following + equality holds: + + :: + + fold(unfold(input)) == divisor * input + + where ``divisor`` is a tensor that depends only on the shape + and dtype of the ``input``: + + >>> # xdoctest: +SKIP + >>> input_ones = torch.ones(input.shape, dtype=input.dtype) + >>> divisor = fold(unfold(input_ones)) + + When the ``divisor`` tensor contains no zero elements, then + ``fold`` and ``unfold`` operations are inverses of each + other (up to constant divisor). + + .. warning:: + Currently, only 4-D input tensors (batched image-like tensors) are + supported. + + Shape: + - Input: :math:`(N, C, *)` + - Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above + + Examples:: + + >>> unfold = nn.Unfold(kernel_size=(2, 3)) + >>> input = torch.randn(2, 5, 3, 4) + >>> output = unfold(input) + >>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels) + >>> # 4 blocks (2x3 kernels) in total in the 3x4 input + >>> output.size() + torch.Size([2, 30, 4]) + + >>> # xdoctest: +IGNORE_WANT + >>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape) + >>> inp = torch.randn(1, 3, 10, 12) + >>> w = torch.randn(2, 3, 4, 5) + >>> inp_unf = torch.nn.functional.unfold(inp, (4, 5)) + >>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2) + >>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1)) + >>> # or equivalently (and avoiding a copy), + >>> # out = out_unf.view(1, 2, 7, 8) + >>> (torch.nn.functional.conv2d(inp, w) - out).abs().max() + tensor(1.9073e-06) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + + """ + + __constants__ = ['kernel_size', 'dilation', 'padding', 'stride'] + kernel_size: _size_any_t + dilation: _size_any_t + padding: _size_any_t + stride: _size_any_t + + def __init__( + self, + kernel_size: _size_any_t, + dilation: _size_any_t = 1, + padding: _size_any_t = 0, + stride: _size_any_t = 1 + ) -> None: + super().__init__() + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, input: Tensor) -> Tensor: + return F.unfold(input, self.kernel_size, self.dilation, + self.padding, self.stride) + + def extra_repr(self) -> str: + return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \ + ' stride={stride}'.format(**self.__dict__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py new file mode 100644 index 0000000000000000000000000000000000000000..d0c37b72448c3270857ac80303a844dc4ba38a36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py @@ -0,0 +1,434 @@ + +import warnings +from torch import Tensor + +from .batchnorm import _LazyNormBase, _NormBase +from .. import functional as F + +__all__ = ['InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LazyInstanceNorm1d', + 'LazyInstanceNorm2d', 'LazyInstanceNorm3d'] + +class _InstanceNorm(_NormBase): + def __init__( + self, + num_features: int, + eps: float = 1e-5, + momentum: float = 0.1, + affine: bool = False, + track_running_stats: bool = False, + device=None, + dtype=None + ) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__( + num_features, eps, momentum, affine, track_running_stats, **factory_kwargs) + + def _check_input_dim(self, input): + raise NotImplementedError + + def _get_no_batch_dim(self): + raise NotImplementedError + + def _handle_no_batch_input(self, input): + return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0) + + def _apply_instance_norm(self, input): + return F.instance_norm( + input, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, self.momentum, self.eps) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + # at version 1: removed running_mean and running_var when + # track_running_stats=False (default) + if version is None and not self.track_running_stats: + running_stats_keys = [] + for name in ('running_mean', 'running_var'): + key = prefix + name + if key in state_dict: + running_stats_keys.append(key) + if len(running_stats_keys) > 0: + error_msgs.append( + 'Unexpected running stats buffer(s) {names} for {klass} ' + 'with track_running_stats=False. If state_dict is a ' + 'checkpoint saved before 0.4.0, this may be expected ' + 'because {klass} does not track running stats by default ' + 'since 0.4.0. Please remove these keys from state_dict. If ' + 'the running stats are actually needed, instead set ' + 'track_running_stats=True in {klass} to enable them. See ' + 'the documentation of {klass} for details.' + .format(names=" and ".join(f'"{k}"' for k in running_stats_keys), + klass=self.__class__.__name__)) + for key in running_stats_keys: + state_dict.pop(key) + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + def forward(self, input: Tensor) -> Tensor: + self._check_input_dim(input) + + feature_dim = input.dim() - self._get_no_batch_dim() + if input.size(feature_dim) != self.num_features: + if self.affine: + raise ValueError( + f"expected input's size at dim={feature_dim} to match num_features" + f" ({self.num_features}), but got: {input.size(feature_dim)}.") + else: + warnings.warn(f"input's size at dim={feature_dim} does not match num_features. " + "You can silence this warning by not passing in num_features, " + "which is not used because affine=False") + + if input.dim() == self._get_no_batch_dim(): + return self._handle_no_batch_input(input) + + return self._apply_instance_norm(input) + + +class InstanceNorm1d(_InstanceNorm): + r"""Applies Instance Normalization. + + This operation applies Instance Normalization + over a 2D (unbatched) or 3D (batched) input as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization + `__. + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``. + The standard-deviation is calculated via the biased estimator, equivalent to + `torch.var(input, unbiased=False)`. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm1d` is applied + on each channel of channeled data like multidimensional time series, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionally, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm1d` usually don't apply affine + transform. + + Args: + num_features: number of features or channels :math:`C` of the input + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, L)` or :math:`(C, L)` + - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm1d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm1d(100, affine=True) + >>> input = torch.randn(20, 100, 40) + >>> output = m(input) + """ + + def _get_no_batch_dim(self): + return 2 + + def _check_input_dim(self, input): + if input.dim() not in (2, 3): + raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)') + + +class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm): + r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument. + + The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``. + The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`. + + Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation + on lazy modules and their limitations. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, L)` or :math:`(C, L)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, L)` or :math:`(C, L)` + - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input) + """ + + cls_to_become = InstanceNorm1d # type: ignore[assignment] + + def _get_no_batch_dim(self): + return 2 + + def _check_input_dim(self, input): + if input.dim() not in (2, 3): + raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)') + + +class InstanceNorm2d(_InstanceNorm): + r"""Applies Instance Normalization. + + This operation applies Instance Normalization + over a 4D input (a mini-batch of 2D inputs + with additional channel dimension) as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization + `__. + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size) if :attr:`affine` is ``True``. + The standard-deviation is calculated via the biased estimator, equivalent to + `torch.var(input, unbiased=False)`. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm2d` is applied + on each channel of channeled data like RGB images, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionally, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm2d` usually don't apply affine + transform. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, H, W)` or :math:`(C, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` + - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm2d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm2d(100, affine=True) + >>> input = torch.randn(20, 100, 35, 45) + >>> output = m(input) + """ + + def _get_no_batch_dim(self): + return 3 + + def _check_input_dim(self, input): + if input.dim() not in (3, 4): + raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)') + + +class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm): + r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument. + + The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``. + The attributes that will be lazily initialized are `weight`, `bias`, + `running_mean` and `running_var`. + + Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation + on lazy modules and their limitations. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, H, W)` or :math:`(C, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` + - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input) + """ + + cls_to_become = InstanceNorm2d # type: ignore[assignment] + + def _get_no_batch_dim(self): + return 3 + + def _check_input_dim(self, input): + if input.dim() not in (3, 4): + raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)') + + +class InstanceNorm3d(_InstanceNorm): + r"""Applies Instance Normalization. + + This operation applies Instance Normalization + over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization + `__. + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size C (where C is the input size) if :attr:`affine` is ``True``. + The standard-deviation is calculated via the biased estimator, equivalent to + `torch.var(input, unbiased=False)`. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm3d` is applied + on each channel of channeled data like 3D models with RGB color, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionally, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm3d` usually don't apply affine + transform. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm3d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm3d(100, affine=True) + >>> input = torch.randn(20, 100, 35, 45, 10) + >>> output = m(input) + """ + + def _get_no_batch_dim(self): + return 4 + + def _check_input_dim(self, input): + if input.dim() not in (4, 5): + raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)') + + +class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm): + r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument. + + The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``. + The attributes that will be lazily initialized are `weight`, `bias`, + `running_mean` and `running_var`. + + Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation + on lazy modules and their limitations. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input) + """ + + cls_to_become = InstanceNorm3d # type: ignore[assignment] + + def _get_no_batch_dim(self): + return 4 + + def _check_input_dim(self, input): + if input.dim() not in (4, 5): + raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)') diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/module.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/module.py new file mode 100644 index 0000000000000000000000000000000000000000..8fd81d734bc6725027b6ca88c367069fb87d5eec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/module.py @@ -0,0 +1,2577 @@ +from collections import OrderedDict, namedtuple +import itertools +import warnings +import functools +import weakref + +import torch +from torch._prims_common import DeviceLikeType +from ..parameter import Parameter +import torch.utils.hooks as hooks + +from torch import Tensor, device, dtype +from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List +from typing_extensions import Self +from ...utils.hooks import RemovableHandle +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +__all__ = ['register_module_forward_pre_hook', 'register_module_forward_hook', + 'register_module_full_backward_pre_hook', 'register_module_backward_hook', + 'register_module_full_backward_hook', 'register_module_buffer_registration_hook', + 'register_module_module_registration_hook', 'register_module_parameter_registration_hook', 'Module'] + +_grad_t = Union[Tuple[Tensor, ...], Tensor] +# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use +# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be +# the type of the subclass, not the looser type of `Module`. +T = TypeVar('T', bound='Module') + + +class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])): + def __repr__(self): + if not self.missing_keys and not self.unexpected_keys: + return '' + return super().__repr__() + + __str__ = __repr__ + + +def _addindent(s_, numSpaces): + s = s_.split('\n') + # don't do anything for single-line stuff + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(numSpaces * ' ') + line for line in s] + s = '\n'.join(s) + s = first + '\n' + s + return s + +r"""This tracks hooks common to all modules that are executed immediately before +.registering the buffer/module/parameter""" +_global_buffer_registration_hooks: Dict[int, Callable] = OrderedDict() +_global_module_registration_hooks: Dict[int, Callable] = OrderedDict() +_global_parameter_registration_hooks: Dict[int, Callable] = OrderedDict() + +class _WrappedHook: + def __init__(self, hook: Callable, module: Optional["Module"] = None): + self.hook: Callable = hook + functools.update_wrapper(self, hook) + + self.with_module: bool = False + + if module is not None: + self.module: weakref.ReferenceType[Module] = weakref.ref(module) + self.with_module = True + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + if self.with_module: + module = self.module() + if module is None: + raise RuntimeError("You are trying to call the hook of a dead Module!") + return self.hook(module, *args, **kwargs) + return self.hook(*args, **kwargs) + + def __getstate__(self) -> Dict: + result = {"hook": self.hook, "with_module": self.with_module} + if self.with_module: + result["module"] = self.module() + + return result + + def __setstate__(self, state: Dict): + self.hook = state["hook"] + self.with_module = state["with_module"] + + if self.with_module: + if state["module"] is None: + raise RuntimeError("You are trying to revive the hook of a dead Module!") + self.module = weakref.ref(state["module"]) + + +r"""This tracks hooks common to all modules that are executed before/after +calling forward and backward. This is global state used for debugging/profiling +purposes""" +_global_backward_pre_hooks: Dict[int, Callable] = OrderedDict() +_global_backward_hooks: Dict[int, Callable] = OrderedDict() +_global_is_full_backward_hook: Optional[bool] = None +_global_forward_pre_hooks: Dict[int, Callable] = OrderedDict() +_global_forward_hooks: Dict[int, Callable] = OrderedDict() +_global_forward_hooks_always_called: Dict[int, bool] = OrderedDict() + +_EXTRA_STATE_KEY_SUFFIX = '_extra_state' + + +def register_module_buffer_registration_hook(hook: Callable[..., None]) -> RemovableHandle: + r"""Register a buffer registration hook common to all modules. + + .. warning :: + + This adds global state to the `nn.Module` module + + The hook will be called every time :func:`register_buffer` is invoked. + It should have the following signature:: + + hook(module, name, buffer) -> None or new buffer + + The hook can modify the input or return a single modified value in the hook. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_buffer_registration_hooks) + _global_buffer_registration_hooks[handle.id] = hook + return handle + + +def register_module_module_registration_hook(hook: Callable[..., None]) -> RemovableHandle: + r"""Register a module registration hook common to all modules. + + .. warning :: + + This adds global state to the `nn.Module` module + + The hook will be called every time :func:`register_module` is invoked. + It should have the following signature:: + + hook(module, name, submodule) -> None or new submodule + + The hook can modify the input or return a single modified value in the hook. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_module_registration_hooks) + _global_module_registration_hooks[handle.id] = hook + return handle + + +def register_module_parameter_registration_hook(hook: Callable[..., None]) -> RemovableHandle: + r"""Register a parameter registration hook common to all modules. + + .. warning :: + + This adds global state to the `nn.Module` module + + The hook will be called every time :func:`register_parameter` is invoked. + It should have the following signature:: + + hook(module, name, param) -> None or new parameter + + The hook can modify the input or return a single modified value in the hook. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_parameter_registration_hooks) + _global_parameter_registration_hooks[handle.id] = hook + return handle + + +def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle: + r"""Register a forward pre-hook common to all modules. + + .. warning :: + + This adds global state to the `nn.module` module + and it is only intended for debugging/profiling purposes. + + The hook will be called every time before :func:`forward` is invoked. + It should have the following signature:: + + hook(module, input) -> None or modified input + + The input contains only the positional arguments given to the module. + Keyword arguments won't be passed to the hooks and only to the ``forward``. + The hook can modify the input. User can either return a tuple or a + single modified value in the hook. We will wrap the value into a tuple + if a single value is returned(unless that value is already a tuple). + + This hook has precedence over the specific module hooks registered with + ``register_forward_pre_hook``. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_forward_pre_hooks) + _global_forward_pre_hooks[handle.id] = hook + return handle + + +def register_module_forward_hook(hook: Callable[..., None], *, always_call: bool = False) -> RemovableHandle: + r"""Register a global forward hook for all the modules. + + .. warning :: + + This adds global state to the `nn.module` module + and it is only intended for debugging/profiling purposes. + + The hook will be called every time after :func:`forward` has computed an output. + It should have the following signature:: + + hook(module, input, output) -> None or modified output + + The input contains only the positional arguments given to the module. + Keyword arguments won't be passed to the hooks and only to the ``forward``. + The hook can modify the output. It can modify the input inplace but + it will not have effect on forward since this is called after + :func:`forward` is called. + + Parameters: + hook (Callable): The user defined hook to be registered. + always_call (bool): If ``True`` the ``hook`` will be run regardless of + whether an exception is raised while calling the Module. + Default: ``False`` + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + This hook will be executed before specific module hooks registered with + ``register_forward_hook``. + """ + handle = hooks.RemovableHandle(_global_forward_hooks, + extra_dict=_global_forward_hooks_always_called) + _global_forward_hooks[handle.id] = hook + if always_call: + _global_forward_hooks_always_called[handle.id] = True + return handle + + +def register_module_backward_hook( + hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] +) -> RemovableHandle: + r"""Register a backward hook common to all the modules. + + This function is deprecated in favor of + :func:`torch.nn.modules.module.register_module_full_backward_hook` + and the behavior of this function will change in future versions. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + global _global_is_full_backward_hook + if _global_is_full_backward_hook is True: + raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a " + "global Module hook. Please use only one of them.") + + _global_is_full_backward_hook = False + + handle = hooks.RemovableHandle(_global_backward_hooks) + _global_backward_hooks[handle.id] = hook + return handle + + +def register_module_full_backward_pre_hook( + hook: Callable[['Module', _grad_t], Union[None, _grad_t]] +) -> RemovableHandle: + r"""Register a backward pre-hook common to all the modules. + + .. warning :: + This adds global state to the `nn.module` module + and it is only intended for debugging/profiling purposes. + + Hooks registered using this function behave in the same way as those + registered by :meth:`torch.nn.Module.register_full_backward_pre_hook`. + Refer to its documentation for more details. + + Hooks registered using this function will be called before hooks registered + using :meth:`torch.nn.Module.register_full_backward_pre_hook`. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + handle = hooks.RemovableHandle(_global_backward_pre_hooks) + _global_backward_pre_hooks[handle.id] = hook + return handle + + +def register_module_full_backward_hook( + hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] +) -> RemovableHandle: + r"""Register a backward hook common to all the modules. + + .. warning :: + This adds global state to the `nn.module` module + and it is only intended for debugging/profiling purposes. + + Hooks registered using this function behave in the same way as those + registered by :meth:`torch.nn.Module.register_full_backward_hook`. + Refer to its documentation for more details. + + Hooks registered using this function will be called before hooks registered + using :meth:`torch.nn.Module.register_full_backward_hook`. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + global _global_is_full_backward_hook + if _global_is_full_backward_hook is False: + raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a " + "global Module hook. Please use only one of them.") + + _global_is_full_backward_hook = True + + handle = hooks.RemovableHandle(_global_backward_hooks) + _global_backward_hooks[handle.id] = hook + return handle + + +# Trick mypy into not applying contravariance rules to inputs by defining +# forward as a value, rather than a function. See also +# https://github.com/python/mypy/issues/8795 +def _forward_unimplemented(self, *input: Any) -> None: + r"""Define the computation performed at every call. + + Should be overridden by all subclasses. + + .. note:: + Although the recipe for forward pass needs to be defined within + this function, one should call the :class:`Module` instance afterwards + instead of this since the former takes care of running the + registered hooks while the latter silently ignores them. + """ + raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"forward\" function") + + +class Module: + r"""Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + + .. note:: + As per the example above, an ``__init__()`` call to the parent class + must be made before assignment on the child. + + :ivar training: Boolean represents whether this module is in training or + evaluation mode. + :vartype training: bool + """ + + dump_patches: bool = False + + _version: int = 1 + r"""This allows better BC support for :meth:`load_state_dict`. In + :meth:`state_dict`, the version number will be saved as in the attribute + `_metadata` of the returned state dict, and thus pickled. `_metadata` is a + dictionary with keys that follow the naming convention of state dict. See + ``_load_from_state_dict`` on how to use this information in loading. + + If new parameters/buffers are added/removed from a module, this number shall + be bumped, and the module's `_load_from_state_dict` method can compare the + version number and do appropriate changes if the state dict is from before + the change.""" + + training: bool + _parameters: Dict[str, Optional[Parameter]] + _buffers: Dict[str, Optional[Tensor]] + _non_persistent_buffers_set: Set[str] + _backward_pre_hooks: Dict[int, Callable] + _backward_hooks: Dict[int, Callable] + _is_full_backward_hook: Optional[bool] + _forward_hooks: Dict[int, Callable] + # Marks whether the corresponding _forward_hooks accept kwargs or not. + # As JIT does not support Set[int], this dict is used as a set, where all + # hooks represented in this dict accept kwargs. + _forward_hooks_with_kwargs: Dict[int, bool] + # forward hooks that should always be called even if an exception is raised + _forward_hooks_always_called: Dict[int, bool] + _forward_pre_hooks: Dict[int, Callable] + # Marks whether the corresponding _forward_hooks accept kwargs or not. + # As JIT does not support Set[int], this dict is used as a set, where all + # hooks represented in this dict accept kwargs. + _forward_pre_hooks_with_kwargs: Dict[int, bool] + _state_dict_hooks: Dict[int, Callable] + _load_state_dict_pre_hooks: Dict[int, Callable] + _state_dict_pre_hooks: Dict[int, Callable] + _load_state_dict_post_hooks: Dict[int, Callable] + _modules: Dict[str, Optional['Module']] + call_super_init: bool = False + _compiled_call_impl : Optional[Callable] = None + + def __init__(self, *args, **kwargs) -> None: + """Initialize internal Module state, shared by both nn.Module and ScriptModule.""" + torch._C._log_api_usage_once("python.nn_module") + + # Backward compatibility: no args used to be allowed when call_super_init=False + if self.call_super_init is False and bool(kwargs): + raise TypeError("{}.__init__() got an unexpected keyword argument '{}'" + "".format(type(self).__name__, next(iter(kwargs)))) + + if self.call_super_init is False and bool(args): + raise TypeError(f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were" + " given") + + """ + Calls super().__setattr__('a', a) instead of the typical self.a = a + to avoid Module.__setattr__ overhead. Module's __setattr__ has special + handling for parameters, submodules, and buffers but simply calls into + super().__setattr__ for all other attributes. + """ + super().__setattr__('training', True) + super().__setattr__('_parameters', OrderedDict()) + super().__setattr__('_buffers', OrderedDict()) + super().__setattr__('_non_persistent_buffers_set', set()) + super().__setattr__('_backward_pre_hooks', OrderedDict()) + super().__setattr__('_backward_hooks', OrderedDict()) + super().__setattr__('_is_full_backward_hook', None) + super().__setattr__('_forward_hooks', OrderedDict()) + super().__setattr__('_forward_hooks_with_kwargs', OrderedDict()) + super().__setattr__('_forward_hooks_always_called', OrderedDict()) + super().__setattr__('_forward_pre_hooks', OrderedDict()) + super().__setattr__('_forward_pre_hooks_with_kwargs', OrderedDict()) + super().__setattr__('_state_dict_hooks', OrderedDict()) + super().__setattr__('_state_dict_pre_hooks', OrderedDict()) + super().__setattr__('_load_state_dict_pre_hooks', OrderedDict()) + super().__setattr__('_load_state_dict_post_hooks', OrderedDict()) + super().__setattr__('_modules', OrderedDict()) + + if self.call_super_init: + super().__init__(*args, **kwargs) + + forward: Callable[..., Any] = _forward_unimplemented + + def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool = True) -> None: + r"""Add a buffer to the module. + + This is typically used to register a buffer that should not to be + considered a model parameter. For example, BatchNorm's ``running_mean`` + is not a parameter, but is part of the module's state. Buffers, by + default, are persistent and will be saved alongside parameters. This + behavior can be changed by setting :attr:`persistent` to ``False``. The + only difference between a persistent buffer and a non-persistent buffer + is that the latter will not be a part of this module's + :attr:`state_dict`. + + Buffers can be accessed as attributes using given names. + + Args: + name (str): name of the buffer. The buffer can be accessed + from this module using the given name + tensor (Tensor or None): buffer to be registered. If ``None``, then operations + that run on buffers, such as :attr:`cuda`, are ignored. If ``None``, + the buffer is **not** included in the module's :attr:`state_dict`. + persistent (bool): whether the buffer is part of this module's + :attr:`state_dict`. + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> self.register_buffer('running_mean', torch.zeros(num_features)) + + """ + if persistent is False and isinstance(self, torch.jit.ScriptModule): + raise RuntimeError("ScriptModule does not support non-persistent buffers") + + if '_buffers' not in self.__dict__: + raise AttributeError( + "cannot assign buffer before Module.__init__() call") + elif not isinstance(name, str): + raise TypeError(f"buffer name should be a string. Got {torch.typename(name)}") + elif '.' in name: + raise KeyError("buffer name can't contain \".\"") + elif name == '': + raise KeyError("buffer name can't be empty string \"\"") + elif hasattr(self, name) and name not in self._buffers: + raise KeyError(f"attribute '{name}' already exists") + elif tensor is not None and not isinstance(tensor, torch.Tensor): + raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' " + "(torch Tensor or None required)" + ) + else: + for hook in _global_buffer_registration_hooks.values(): + output = hook(self, name, tensor) + if output is not None: + tensor = output + self._buffers[name] = tensor + if persistent: + self._non_persistent_buffers_set.discard(name) + else: + self._non_persistent_buffers_set.add(name) + + def register_parameter(self, name: str, param: Optional[Parameter]) -> None: + r"""Add a parameter to the module. + + The parameter can be accessed as an attribute using given name. + + Args: + name (str): name of the parameter. The parameter can be accessed + from this module using the given name + param (Parameter or None): parameter to be added to the module. If + ``None``, then operations that run on parameters, such as :attr:`cuda`, + are ignored. If ``None``, the parameter is **not** included in the + module's :attr:`state_dict`. + """ + if '_parameters' not in self.__dict__: + raise AttributeError( + "cannot assign parameter before Module.__init__() call") + + elif not isinstance(name, str): + raise TypeError(f"parameter name should be a string. Got {torch.typename(name)}") + elif '.' in name: + raise KeyError("parameter name can't contain \".\"") + elif name == '': + raise KeyError("parameter name can't be empty string \"\"") + elif hasattr(self, name) and name not in self._parameters: + raise KeyError(f"attribute '{name}' already exists") + + if param is None: + self._parameters[name] = None + elif not isinstance(param, Parameter): + raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' " + "(torch.nn.Parameter or None required)" + ) + elif param.grad_fn: + raise ValueError( + f"Cannot assign non-leaf Tensor to parameter '{name}'. Model " + f"parameters must be created explicitly. To express '{name}' " + "as a function of another Tensor, compute the value in " + "the forward() method.") + else: + for hook in _global_parameter_registration_hooks.values(): + output = hook(self, name, param) + if output is not None: + param = output + self._parameters[name] = param + + def add_module(self, name: str, module: Optional['Module']) -> None: + r"""Add a child module to the current module. + + The module can be accessed as an attribute using the given name. + + Args: + name (str): name of the child module. The child module can be + accessed from this module using the given name + module (Module): child module to be added to the module. + """ + if not isinstance(module, Module) and module is not None: + raise TypeError(f"{torch.typename(module)} is not a Module subclass") + elif not isinstance(name, str): + raise TypeError(f"module name should be a string. Got {torch.typename(name)}") + elif hasattr(self, name) and name not in self._modules: + raise KeyError(f"attribute '{name}' already exists") + elif '.' in name: + raise KeyError(f"module name can't contain \".\", got: {name}") + elif name == '': + raise KeyError("module name can't be empty string \"\"") + for hook in _global_module_registration_hooks.values(): + output = hook(self, name, module) + if output is not None: + module = output + self._modules[name] = module + + def register_module(self, name: str, module: Optional['Module']) -> None: + r"""Alias for :func:`add_module`.""" + self.add_module(name, module) + + def get_submodule(self, target: str) -> "Module": + """Return the submodule given by ``target`` if it exists, otherwise throw an error. + + For example, let's say you have an ``nn.Module`` ``A`` that + looks like this: + + .. code-block:: text + + A( + (net_b): Module( + (net_c): Module( + (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2)) + ) + (linear): Linear(in_features=100, out_features=200, bias=True) + ) + ) + + (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested + submodule ``net_b``, which itself has two submodules ``net_c`` + and ``linear``. ``net_c`` then has a submodule ``conv``.) + + To check whether or not we have the ``linear`` submodule, we + would call ``get_submodule("net_b.linear")``. To check whether + we have the ``conv`` submodule, we would call + ``get_submodule("net_b.net_c.conv")``. + + The runtime of ``get_submodule`` is bounded by the degree + of module nesting in ``target``. A query against + ``named_modules`` achieves the same result, but it is O(N) in + the number of transitive modules. So, for a simple check to see + if some submodule exists, ``get_submodule`` should always be + used. + + Args: + target: The fully-qualified string name of the submodule + to look for. (See above example for how to specify a + fully-qualified string.) + + Returns: + torch.nn.Module: The submodule referenced by ``target`` + + Raises: + AttributeError: If the target string references an invalid + path or resolves to something that is not an + ``nn.Module`` + """ + if target == "": + return self + + atoms: List[str] = target.split(".") + mod: torch.nn.Module = self + + for item in atoms: + + if not hasattr(mod, item): + raise AttributeError(mod._get_name() + " has no " + "attribute `" + item + "`") + + mod = getattr(mod, item) + + if not isinstance(mod, torch.nn.Module): + raise AttributeError("`" + item + "` is not " + "an nn.Module") + + return mod + + def get_parameter(self, target: str) -> "Parameter": + """Return the parameter given by ``target`` if it exists, otherwise throw an error. + + See the docstring for ``get_submodule`` for a more detailed + explanation of this method's functionality as well as how to + correctly specify ``target``. + + Args: + target: The fully-qualified string name of the Parameter + to look for. (See ``get_submodule`` for how to specify a + fully-qualified string.) + + Returns: + torch.nn.Parameter: The Parameter referenced by ``target`` + + Raises: + AttributeError: If the target string references an invalid + path or resolves to something that is not an + ``nn.Parameter`` + """ + module_path, _, param_name = target.rpartition(".") + + mod: torch.nn.Module = self.get_submodule(module_path) + + if not hasattr(mod, param_name): + raise AttributeError(mod._get_name() + " has no attribute `" + + param_name + "`") + + param: torch.nn.Parameter = getattr(mod, param_name) + + if not isinstance(param, torch.nn.Parameter): + raise AttributeError("`" + param_name + "` is not an " + "nn.Parameter") + + return param + + def get_buffer(self, target: str) -> "Tensor": + """Return the buffer given by ``target`` if it exists, otherwise throw an error. + + See the docstring for ``get_submodule`` for a more detailed + explanation of this method's functionality as well as how to + correctly specify ``target``. + + Args: + target: The fully-qualified string name of the buffer + to look for. (See ``get_submodule`` for how to specify a + fully-qualified string.) + + Returns: + torch.Tensor: The buffer referenced by ``target`` + + Raises: + AttributeError: If the target string references an invalid + path or resolves to something that is not a + buffer + """ + module_path, _, buffer_name = target.rpartition(".") + + mod: torch.nn.Module = self.get_submodule(module_path) + + if not hasattr(mod, buffer_name): + raise AttributeError(mod._get_name() + " has no attribute `" + + buffer_name + "`") + + buffer: torch.Tensor = getattr(mod, buffer_name) + + if buffer_name not in mod._buffers: + raise AttributeError("`" + buffer_name + "` is not a buffer") + + return buffer + + def get_extra_state(self) -> Any: + """Return any extra state to include in the module's state_dict. + + Implement this and a corresponding :func:`set_extra_state` for your module + if you need to store extra state. This function is called when building the + module's `state_dict()`. + + Note that extra state should be picklable to ensure working serialization + of the state_dict. We only provide provide backwards compatibility guarantees + for serializing Tensors; other objects may break backwards compatibility if + their serialized pickled form changes. + + Returns: + object: Any extra state to store in the module's state_dict + """ + raise RuntimeError( + "Reached a code path in Module.get_extra_state() that should never be called. " + "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " + "to report this bug.") + + def set_extra_state(self, state: Any) -> None: + """Set extra state contained in the loaded `state_dict`. + + This function is called from :func:`load_state_dict` to handle any extra state + found within the `state_dict`. Implement this function and a corresponding + :func:`get_extra_state` for your module if you need to store extra state within its + `state_dict`. + + Args: + state (dict): Extra state from the `state_dict` + """ + raise RuntimeError( + "Reached a code path in Module.set_extra_state() that should never be called. " + "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " + "to report this bug.") + + def _apply(self, fn, recurse=True): + if recurse: + for module in self.children(): + module._apply(fn) + + def compute_should_use_set_data(tensor, tensor_applied): + if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): + # If the new tensor has compatible tensor type as the existing tensor, + # the current behavior is to change the tensor in-place using `.data =`, + # and the future behavior is to overwrite the existing tensor. However, + # changing the current behavior is a BC-breaking change, and we want it + # to happen in future releases. So for now we introduce the + # `torch.__future__.get_overwrite_module_params_on_conversion()` + # global flag to let the user control whether they want the future + # behavior of overwriting the existing tensor or not. + return not torch.__future__.get_overwrite_module_params_on_conversion() + else: + return False + + should_use_swap_tensors = torch.__future__.get_swap_module_params_on_conversion() + + for key, param in self._parameters.items(): + if param is None: + continue + # Tensors stored in modules are graph leaves, and we don't want to + # track autograd history of `param_applied`, so we have to use + # `with torch.no_grad():` + with torch.no_grad(): + param_applied = fn(param) + p_should_use_set_data = compute_should_use_set_data(param, param_applied) + + # subclasses may have multiple child tensors so we need to use swap_tensors + p_should_use_swap_tensors = should_use_swap_tensors or is_traceable_wrapper_subclass(param_applied) + + param_grad = param.grad + if p_should_use_swap_tensors: + try: + if param_grad is not None: + # Accessing param.grad makes its at::Tensor's use_count 2, which will prevent swapping. + # Decrement use count of the gradient by setting to None + param.grad = None + param_applied = torch.nn.Parameter(param_applied, requires_grad=param.requires_grad) + torch.utils.swap_tensors(param, param_applied) + except Exception as e: + if param_grad is not None: + param.grad = param_grad + raise RuntimeError(f"_apply(): Couldn't swap {self._get_name()}.{key}") from e + out_param = param + elif p_should_use_set_data: + param.data = param_applied + out_param = param + else: + assert isinstance(param, Parameter) + assert param.is_leaf + out_param = Parameter(param_applied, param.requires_grad) + self._parameters[key] = out_param + + if param_grad is not None: + with torch.no_grad(): + grad_applied = fn(param_grad) + g_should_use_set_data = compute_should_use_set_data(param_grad, grad_applied) + if p_should_use_swap_tensors: + grad_applied.requires_grad_(param_grad.requires_grad) + try: + torch.utils.swap_tensors(param_grad, grad_applied) + except Exception as e: + raise RuntimeError(f"_apply(): Couldn't swap {self._get_name()}.{key}.grad") from e + out_param.grad = param_grad + elif g_should_use_set_data: + assert out_param.grad is not None + out_param.grad.data = grad_applied + else: + assert param_grad.is_leaf + out_param.grad = grad_applied.requires_grad_(param_grad.requires_grad) + + for key, buf in self._buffers.items(): + if buf is not None: + self._buffers[key] = fn(buf) + + return self + + def apply(self: T, fn: Callable[['Module'], None]) -> T: + r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self. + + Typical use includes initializing the parameters of a model + (see also :ref:`nn-init-doc`). + + Args: + fn (:class:`Module` -> None): function to be applied to each submodule + + Returns: + Module: self + + Example:: + + >>> @torch.no_grad() + >>> def init_weights(m): + >>> print(m) + >>> if type(m) == nn.Linear: + >>> m.weight.fill_(1.0) + >>> print(m.weight) + >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) + >>> net.apply(init_weights) + Linear(in_features=2, out_features=2, bias=True) + Parameter containing: + tensor([[1., 1.], + [1., 1.]], requires_grad=True) + Linear(in_features=2, out_features=2, bias=True) + Parameter containing: + tensor([[1., 1.], + [1., 1.]], requires_grad=True) + Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + ) + + """ + for module in self.children(): + module.apply(fn) + fn(self) + return self + + def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: + r"""Move all model parameters and buffers to the GPU. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on GPU while being optimized. + + .. note:: + This method modifies the module in-place. + + Args: + device (int, optional): if specified, all parameters will be + copied to that device + + Returns: + Module: self + """ + return self._apply(lambda t: t.cuda(device)) + + def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: + r"""Move all model parameters and buffers to the IPU. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on IPU while being optimized. + + .. note:: + This method modifies the module in-place. + + Arguments: + device (int, optional): if specified, all parameters will be + copied to that device + + Returns: + Module: self + """ + return self._apply(lambda t: t.ipu(device)) + + def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: + r"""Move all model parameters and buffers to the XPU. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on XPU while being optimized. + + .. note:: + This method modifies the module in-place. + + Arguments: + device (int, optional): if specified, all parameters will be + copied to that device + + Returns: + Module: self + """ + return self._apply(lambda t: t.xpu(device)) + + def cpu(self: T) -> T: + r"""Move all model parameters and buffers to the CPU. + + .. note:: + This method modifies the module in-place. + + Returns: + Module: self + """ + return self._apply(lambda t: t.cpu()) + + def type(self: T, dst_type: Union[dtype, str]) -> T: + r"""Casts all parameters and buffers to :attr:`dst_type`. + + .. note:: + This method modifies the module in-place. + + Args: + dst_type (type or string): the desired type + + Returns: + Module: self + """ + return self._apply(lambda t: t.type(dst_type)) + + def float(self: T) -> T: + r"""Casts all floating point parameters and buffers to ``float`` datatype. + + .. note:: + This method modifies the module in-place. + + Returns: + Module: self + """ + return self._apply(lambda t: t.float() if t.is_floating_point() else t) + + def double(self: T) -> T: + r"""Casts all floating point parameters and buffers to ``double`` datatype. + + .. note:: + This method modifies the module in-place. + + Returns: + Module: self + """ + return self._apply(lambda t: t.double() if t.is_floating_point() else t) + + def half(self: T) -> T: + r"""Casts all floating point parameters and buffers to ``half`` datatype. + + .. note:: + This method modifies the module in-place. + + Returns: + Module: self + """ + return self._apply(lambda t: t.half() if t.is_floating_point() else t) + + def bfloat16(self: T) -> T: + r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype. + + .. note:: + This method modifies the module in-place. + + Returns: + Module: self + """ + return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t) + + def to_empty(self: T, *, device: Optional[DeviceLikeType], recurse: bool = True) -> T: + r"""Move the parameters and buffers to the specified device without copying storage. + + Args: + device (:class:`torch.device`): The desired device of the parameters + and buffers in this module. + recurse (bool): Whether parameters and buffers of submodules should + be recursively moved to the specified device. + + Returns: + Module: self + """ + return self._apply(lambda t: torch.empty_like(t, device=device), recurse=recurse) + + @overload + def to(self, device: Optional[DeviceLikeType] = ..., dtype: Optional[dtype] = ..., + non_blocking: bool = ...) -> Self: + ... + + @overload + def to(self, dtype: dtype, non_blocking: bool = ...) -> Self: + ... + + @overload + def to(self, tensor: Tensor, non_blocking: bool = ...) -> Self: + ... + + def to(self, *args, **kwargs): + r"""Move and/or cast the parameters and buffers. + + This can be called as + + .. function:: to(device=None, dtype=None, non_blocking=False) + :noindex: + + .. function:: to(dtype, non_blocking=False) + :noindex: + + .. function:: to(tensor, non_blocking=False) + :noindex: + + .. function:: to(memory_format=torch.channels_last) + :noindex: + + Its signature is similar to :meth:`torch.Tensor.to`, but only accepts + floating point or complex :attr:`dtype`\ s. In addition, this method will + only cast the floating point or complex parameters and buffers to :attr:`dtype` + (if given). The integral parameters and buffers will be moved + :attr:`device`, if that is given, but with dtypes unchanged. When + :attr:`non_blocking` is set, it tries to convert/move asynchronously + with respect to the host if possible, e.g., moving CPU Tensors with + pinned memory to CUDA devices. + + See below for examples. + + .. note:: + This method modifies the module in-place. + + Args: + device (:class:`torch.device`): the desired device of the parameters + and buffers in this module + dtype (:class:`torch.dtype`): the desired floating point or complex dtype of + the parameters and buffers in this module + tensor (torch.Tensor): Tensor whose dtype and device are the desired + dtype and device for all parameters and buffers in this module + memory_format (:class:`torch.memory_format`): the desired memory + format for 4D parameters and buffers in this module (keyword + only argument) + + Returns: + Module: self + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> linear = nn.Linear(2, 2) + >>> linear.weight + Parameter containing: + tensor([[ 0.1913, -0.3420], + [-0.5113, -0.2325]]) + >>> linear.to(torch.double) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1913, -0.3420], + [-0.5113, -0.2325]], dtype=torch.float64) + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) + >>> gpu1 = torch.device("cuda:1") + >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1914, -0.3420], + [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') + >>> cpu = torch.device("cpu") + >>> linear.to(cpu) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1914, -0.3420], + [-0.5112, -0.2324]], dtype=torch.float16) + + >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) + >>> linear.weight + Parameter containing: + tensor([[ 0.3741+0.j, 0.2382+0.j], + [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) + >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) + tensor([[0.6122+0.j, 0.1150+0.j], + [0.6122+0.j, 0.1150+0.j], + [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) + + """ + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) + + if dtype is not None: + if not (dtype.is_floating_point or dtype.is_complex): + raise TypeError('nn.Module.to only accepts floating point or complex ' + f'dtypes, but got desired dtype={dtype}') + if dtype.is_complex: + warnings.warn( + "Complex modules are a new feature under active development whose design may change, " + "and some modules might not work as expected when using complex tensors as parameters or buffers. " + "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " + "if a complex module does not work as expected.") + + def convert(t): + try: + if convert_to_format is not None and t.dim() in (4, 5): + return t.to( + device, + dtype if t.is_floating_point() or t.is_complex() else None, + non_blocking, + memory_format=convert_to_format, + ) + return t.to( + device, + dtype if t.is_floating_point() or t.is_complex() else None, + non_blocking, + ) + except NotImplementedError as e: + if str(e) == "Cannot copy out of meta tensor; no data!": + raise NotImplementedError( + f"{e} Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() " + f"when moving module from meta to a different device." + ) from None + else: + raise + + return self._apply(convert) + + def register_full_backward_pre_hook( + self, + hook: Callable[["Module", _grad_t], Union[None, _grad_t]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a backward pre-hook on the module. + + The hook will be called every time the gradients for the module are computed. + The hook should have the following signature:: + + hook(module, grad_output) -> tuple[Tensor] or None + + The :attr:`grad_output` is a tuple. The hook should + not modify its arguments, but it can optionally return a new gradient with + respect to the output that will be used in place of :attr:`grad_output` in + subsequent computations. Entries in :attr:`grad_output` will be ``None`` for + all non-Tensor arguments. + + For technical reasons, when this hook is applied to a Module, its forward function will + receive a view of each Tensor passed to the Module. Similarly the caller will receive a view + of each Tensor returned by the Module's forward function. + + .. warning :: + Modifying inputs inplace is not allowed when using backward hooks and + will raise an error. + + Args: + hook (Callable): The user-defined hook to be registered. + prepend (bool): If true, the provided ``hook`` will be fired before + all existing ``backward_pre`` hooks on this + :class:`torch.nn.modules.Module`. Otherwise, the provided + ``hook`` will be fired after all existing ``backward_pre`` hooks + on this :class:`torch.nn.modules.Module`. Note that global + ``backward_pre`` hooks registered with + :func:`register_module_full_backward_pre_hook` will fire before + all hooks registered by this method. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + handle = hooks.RemovableHandle(self._backward_pre_hooks) + self._backward_pre_hooks[handle.id] = hook + if prepend: + self._backward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + def register_backward_hook( + self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] + ) -> RemovableHandle: + r"""Register a backward hook on the module. + + This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and + the behavior of this function will change in future versions. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + if self._is_full_backward_hook is True: + raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a " + "single Module. Please use only one of them.") + + self._is_full_backward_hook = False + + handle = hooks.RemovableHandle(self._backward_hooks) + self._backward_hooks[handle.id] = hook + return handle + + def register_full_backward_hook( + self, + hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a backward hook on the module. + + The hook will be called every time the gradients with respect to a module + are computed, i.e. the hook will execute if and only if the gradients with + respect to module outputs are computed. The hook should have the following + signature:: + + hook(module, grad_input, grad_output) -> tuple(Tensor) or None + + The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients + with respect to the inputs and outputs respectively. The hook should + not modify its arguments, but it can optionally return a new gradient with + respect to the input that will be used in place of :attr:`grad_input` in + subsequent computations. :attr:`grad_input` will only correspond to the inputs given + as positional arguments and all kwarg arguments are ignored. Entries + in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor + arguments. + + For technical reasons, when this hook is applied to a Module, its forward function will + receive a view of each Tensor passed to the Module. Similarly the caller will receive a view + of each Tensor returned by the Module's forward function. + + .. warning :: + Modifying inputs or outputs inplace is not allowed when using backward hooks and + will raise an error. + + Args: + hook (Callable): The user-defined hook to be registered. + prepend (bool): If true, the provided ``hook`` will be fired before + all existing ``backward`` hooks on this + :class:`torch.nn.modules.Module`. Otherwise, the provided + ``hook`` will be fired after all existing ``backward`` hooks on + this :class:`torch.nn.modules.Module`. Note that global + ``backward`` hooks registered with + :func:`register_module_full_backward_hook` will fire before + all hooks registered by this method. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + """ + if self._is_full_backward_hook is False: + raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a " + "single Module. Please use only one of them.") + + self._is_full_backward_hook = True + + handle = hooks.RemovableHandle(self._backward_hooks) + self._backward_hooks[handle.id] = hook + if prepend: + self._backward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + def _get_backward_hooks(self): + r"""Return the backward hooks for use in the call function. + + It returns two lists, one with the full backward hooks and one with the non-full + backward hooks. + """ + full_backward_hooks: List[Callable] = [] + if (_global_is_full_backward_hook is True): + full_backward_hooks += _global_backward_hooks.values() + if (self._is_full_backward_hook is True): + full_backward_hooks += self._backward_hooks.values() + + non_full_backward_hooks: List[Callable] = [] + if (_global_is_full_backward_hook is False): + non_full_backward_hooks += _global_backward_hooks.values() + if (self._is_full_backward_hook is False): + non_full_backward_hooks += self._backward_hooks.values() + + return full_backward_hooks, non_full_backward_hooks + + def _get_backward_pre_hooks(self): + backward_pre_hooks: List[Callable] = [] + backward_pre_hooks += _global_backward_pre_hooks.values() + backward_pre_hooks += self._backward_pre_hooks.values() + + return backward_pre_hooks + + def _maybe_warn_non_full_backward_hook(self, inputs, result, grad_fn): + if not isinstance(result, torch.Tensor): + if not (isinstance(result, tuple) and all(isinstance(r, torch.Tensor) for r in result)): + warnings.warn("Using non-full backward hooks on a Module that does not return a " + "single Tensor or a tuple of Tensors is deprecated and will be removed " + "in future versions. This hook will be missing some of the grad_output. " + "Please use register_full_backward_hook to get the documented behavior.") + return + else: + result = (result,) + + if not isinstance(inputs, torch.Tensor): + if not (isinstance(inputs, tuple) and all(isinstance(i, torch.Tensor) for i in inputs)): + warnings.warn("Using non-full backward hooks on a Module that does not take as input a " + "single Tensor or a tuple of Tensors is deprecated and will be removed " + "in future versions. This hook will be missing some of the grad_input. " + "Please use register_full_backward_hook to get the documented behavior.") + return + else: + inputs = (inputs,) + + # At this point we are sure that inputs and result are tuple of Tensors + out_grad_fn = {r.grad_fn for r in result if r.grad_fn is not None} + if len(out_grad_fn) == 0 or (len(out_grad_fn) == 1 and grad_fn not in out_grad_fn): + warnings.warn("Using a non-full backward hook when outputs are nested in python data structure " + "is deprecated and will be removed in future versions. This hook will be missing " + "some grad_output.") + elif len(out_grad_fn) > 1: + warnings.warn("Using a non-full backward hook when outputs are generated by different autograd Nodes " + "is deprecated and will be removed in future versions. This hook will be missing " + "some grad_output. Please use register_full_backward_hook to get the documented behavior.") + else: + # At this point the grad_output part of the hook will most likely be correct + inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None} + + next_functions = {n[0] for n in grad_fn.next_functions} + + if inputs_grad_fn != next_functions: + warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes " + "is deprecated and will be removed in future versions. This hook will be missing " + "some grad_input. Please use register_full_backward_hook to get the documented " + "behavior.") + + def register_forward_pre_hook( + self, + hook: Union[ + Callable[[T, Tuple[Any, ...]], Optional[Any]], + Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]], + ], + *, + prepend: bool = False, + with_kwargs: bool = False, + ) -> RemovableHandle: + r"""Register a forward pre-hook on the module. + + The hook will be called every time before :func:`forward` is invoked. + + + If ``with_kwargs`` is false or not specified, the input contains only + the positional arguments given to the module. Keyword arguments won't be + passed to the hooks and only to the ``forward``. The hook can modify the + input. User can either return a tuple or a single modified value in the + hook. We will wrap the value into a tuple if a single value is returned + (unless that value is already a tuple). The hook should have the + following signature:: + + hook(module, args) -> None or modified input + + If ``with_kwargs`` is true, the forward pre-hook will be passed the + kwargs given to the forward function. And if the hook modifies the + input, both the args and kwargs should be returned. The hook should have + the following signature:: + + hook(module, args, kwargs) -> None or a tuple of modified input and kwargs + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If true, the provided ``hook`` will be fired before + all existing ``forward_pre`` hooks on this + :class:`torch.nn.modules.Module`. Otherwise, the provided + ``hook`` will be fired after all existing ``forward_pre`` hooks + on this :class:`torch.nn.modules.Module`. Note that global + ``forward_pre`` hooks registered with + :func:`register_module_forward_pre_hook` will fire before all + hooks registered by this method. + Default: ``False`` + with_kwargs (bool): If true, the ``hook`` will be passed the kwargs + given to the forward function. + Default: ``False`` + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle( + self._forward_pre_hooks, + extra_dict=self._forward_pre_hooks_with_kwargs + ) + self._forward_pre_hooks[handle.id] = hook + if with_kwargs: + self._forward_pre_hooks_with_kwargs[handle.id] = True + + if prepend: + self._forward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + def register_forward_hook( + self, + hook: Union[ + Callable[[T, Tuple[Any, ...], Any], Optional[Any]], + Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]], + ], + *, + prepend: bool = False, + with_kwargs: bool = False, + always_call: bool = False, + ) -> RemovableHandle: + r"""Register a forward hook on the module. + + The hook will be called every time after :func:`forward` has computed an output. + + If ``with_kwargs`` is ``False`` or not specified, the input contains only + the positional arguments given to the module. Keyword arguments won't be + passed to the hooks and only to the ``forward``. The hook can modify the + output. It can modify the input inplace but it will not have effect on + forward since this is called after :func:`forward` is called. The hook + should have the following signature:: + + hook(module, args, output) -> None or modified output + + If ``with_kwargs`` is ``True``, the forward hook will be passed the + ``kwargs`` given to the forward function and be expected to return the + output possibly modified. The hook should have the following signature:: + + hook(module, args, kwargs, output) -> None or modified output + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If ``True``, the provided ``hook`` will be fired + before all existing ``forward`` hooks on this + :class:`torch.nn.modules.Module`. Otherwise, the provided + ``hook`` will be fired after all existing ``forward`` hooks on + this :class:`torch.nn.modules.Module`. Note that global + ``forward`` hooks registered with + :func:`register_module_forward_hook` will fire before all hooks + registered by this method. + Default: ``False`` + with_kwargs (bool): If ``True``, the ``hook`` will be passed the + kwargs given to the forward function. + Default: ``False`` + always_call (bool): If ``True`` the ``hook`` will be run regardless of + whether an exception is raised while calling the Module. + Default: ``False`` + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle( + self._forward_hooks, + extra_dict=[self._forward_hooks_with_kwargs, self._forward_hooks_always_called], + ) + self._forward_hooks[handle.id] = hook + if with_kwargs: + self._forward_hooks_with_kwargs[handle.id] = True + if always_call: + self._forward_hooks_always_called[handle.id] = True + if prepend: + self._forward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + def _slow_forward(self, *input, **kwargs): + tracing_state = torch._C._get_tracing_state() + if not tracing_state or isinstance(self.forward, torch._C.ScriptMethod): + return self.forward(*input, **kwargs) + recording_scopes = torch.jit._trace._trace_module_map is not None + if recording_scopes: + # type ignore was added because at this point one knows that + # torch.jit._trace._trace_module_map is not Optional and has type Dict[Any, Any] + name = torch.jit._trace._trace_module_map[self] if self in torch.jit._trace._trace_module_map else None # type: ignore[index, operator] # noqa: B950 + if name: + tracing_state.push_scope(name) + else: + recording_scopes = False + try: + result = self.forward(*input, **kwargs) + finally: + if recording_scopes: + tracing_state.pop_scope() + return result + + def _wrapped_call_impl(self, *args, **kwargs): + if self._compiled_call_impl is not None: + return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] + else: + return self._call_impl(*args, **kwargs) + + def _call_impl(self, *args, **kwargs): + forward_call = (self._slow_forward if torch._C._get_tracing_state() else self.forward) + # If we don't have any hooks, we want to skip the rest of the logic in + # this function, and just call forward. + if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks + or _global_backward_pre_hooks or _global_backward_hooks + or _global_forward_hooks or _global_forward_pre_hooks): + return forward_call(*args, **kwargs) + + try: + result = None + called_always_called_hooks = set() + + full_backward_hooks, non_full_backward_hooks = [], [] + backward_pre_hooks = [] + if self._backward_pre_hooks or _global_backward_pre_hooks: + backward_pre_hooks = self._get_backward_pre_hooks() + + if self._backward_hooks or _global_backward_hooks: + full_backward_hooks, non_full_backward_hooks = self._get_backward_hooks() + + if _global_forward_pre_hooks or self._forward_pre_hooks: + for hook_id, hook in ( + *_global_forward_pre_hooks.items(), + *self._forward_pre_hooks.items(), + ): + if hook_id in self._forward_pre_hooks_with_kwargs: + args_kwargs_result = hook(self, args, kwargs) # type: ignore[misc] + if args_kwargs_result is not None: + if isinstance(args_kwargs_result, tuple) and len(args_kwargs_result) == 2: + args, kwargs = args_kwargs_result + else: + raise RuntimeError( + "forward pre-hook must return None or a tuple " + f"of (new_args, new_kwargs), but got {args_kwargs_result}." + ) + else: + args_result = hook(self, args) + if args_result is not None: + if not isinstance(args_result, tuple): + args_result = (args_result,) + args = args_result + + bw_hook = None + if full_backward_hooks or backward_pre_hooks: + bw_hook = hooks.BackwardHook(self, full_backward_hooks, backward_pre_hooks) + args = bw_hook.setup_input_hook(args) + + result = forward_call(*args, **kwargs) + if _global_forward_hooks or self._forward_hooks: + for hook_id, hook in ( + *_global_forward_hooks.items(), + *self._forward_hooks.items(), + ): + # mark that always called hook is run + if hook_id in self._forward_hooks_always_called or hook_id in _global_forward_hooks_always_called: + called_always_called_hooks.add(hook_id) + + if hook_id in self._forward_hooks_with_kwargs: + hook_result = hook(self, args, kwargs, result) + else: + hook_result = hook(self, args, result) + + if hook_result is not None: + result = hook_result + + if bw_hook: + if not isinstance(result, (torch.Tensor, tuple)): + warnings.warn("For backward hooks to be called," + " module output should be a Tensor or a tuple of Tensors" + f" but received {type(result)}") + result = bw_hook.setup_output_hook(result) + + # Handle the non-full backward hooks + if non_full_backward_hooks: + var = result + while not isinstance(var, torch.Tensor): + if isinstance(var, dict): + var = next(v for v in var.values() if isinstance(v, torch.Tensor)) + else: + var = var[0] + grad_fn = var.grad_fn + if grad_fn is not None: + for hook in non_full_backward_hooks: + grad_fn.register_hook(_WrappedHook(hook, self)) + self._maybe_warn_non_full_backward_hook(args, result, grad_fn) + + return result + + except Exception: + # run always called hooks if they have not already been run + # For now only forward hooks have the always_call option but perhaps + # this functionality should be added to full backward hooks as well. + for hook_id, hook in _global_forward_hooks.items(): + if hook_id in _global_forward_hooks_always_called and hook_id not in called_always_called_hooks: # type: ignore[possibly-undefined] + try: + hook_result = hook(self, args, result) # type: ignore[possibly-undefined] + if hook_result is not None: + result = hook_result + except Exception as e: + warnings.warn("global module forward hook with ``always_call=True`` raised an exception " + f"that was silenced as another error was raised in forward: {str(e)}") + continue + + for hook_id, hook in self._forward_hooks.items(): + if hook_id in self._forward_hooks_always_called and hook_id not in called_always_called_hooks: # type: ignore[possibly-undefined] + try: + if hook_id in self._forward_hooks_with_kwargs: + hook_result = hook(self, args, kwargs, result) # type: ignore[possibly-undefined] + else: + hook_result = hook(self, args, result) # type: ignore[possibly-undefined] + if hook_result is not None: + result = hook_result + except Exception as e: + warnings.warn("module forward hook with ``always_call=True`` raised an exception " + f"that was silenced as another error was raised in forward: {str(e)}") + continue + # raise exception raised in try block + raise + + + __call__ : Callable[..., Any] = _wrapped_call_impl + + def __getstate__(self): + state = self.__dict__.copy() + state.pop("_compiled_call_impl", None) + return state + + def __setstate__(self, state): + self.__dict__.update(state) + + # Support loading old checkpoints that don't have the following attrs: + if '_forward_pre_hooks' not in self.__dict__: + self._forward_pre_hooks = OrderedDict() + if '_forward_pre_hooks_with_kwargs' not in self.__dict__: + self._forward_pre_hooks_with_kwargs = OrderedDict() + if '_forward_hooks_with_kwargs' not in self.__dict__: + self._forward_hooks_with_kwargs = OrderedDict() + if '_forward_hooks_always_called' not in self.__dict__: + self._forward_hooks_always_called = OrderedDict() + if '_state_dict_hooks' not in self.__dict__: + self._state_dict_hooks = OrderedDict() + if '_state_dict_pre_hooks' not in self.__dict__: + self._state_dict_pre_hooks = OrderedDict() + if '_load_state_dict_pre_hooks' not in self.__dict__: + self._load_state_dict_pre_hooks = OrderedDict() + if '_load_state_dict_post_hooks' not in self.__dict__: + self._load_state_dict_post_hooks = OrderedDict() + if '_non_persistent_buffers_set' not in self.__dict__: + self._non_persistent_buffers_set = set() + if '_is_full_backward_hook' not in self.__dict__: + self._is_full_backward_hook = None + if '_backward_pre_hooks' not in self.__dict__: + self._backward_pre_hooks = OrderedDict() + + # On the return type: + # We choose to return `Any` in the `__getattr__` type signature instead of a more strict `Union[Tensor, Module]`. + # This is done for better interop with various type checkers for the end users. + # Having a stricter return type doesn't play nicely with `register_buffer()` and forces + # people to excessively use type-ignores, asserts, casts, etc. + # See full discussion on the problems with returning `Union` here + # https://github.com/microsoft/pyright/issues/4213 + def __getattr__(self, name: str) -> Any: + if '_parameters' in self.__dict__: + _parameters = self.__dict__['_parameters'] + if name in _parameters: + return _parameters[name] + if '_buffers' in self.__dict__: + _buffers = self.__dict__['_buffers'] + if name in _buffers: + return _buffers[name] + if '_modules' in self.__dict__: + modules = self.__dict__['_modules'] + if name in modules: + return modules[name] + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") + + def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None: + def remove_from(*dicts_or_sets): + for d in dicts_or_sets: + if name in d: + if isinstance(d, dict): + del d[name] + else: + d.discard(name) + + params = self.__dict__.get('_parameters') + if isinstance(value, Parameter): + if params is None: + raise AttributeError( + "cannot assign parameters before Module.__init__() call") + remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set) + self.register_parameter(name, value) + elif params is not None and name in params: + if value is not None: + raise TypeError(f"cannot assign '{torch.typename(value)}' as parameter '{name}' " + "(torch.nn.Parameter or None expected)" + ) + self.register_parameter(name, value) + else: + modules = self.__dict__.get('_modules') + if isinstance(value, Module): + if modules is None: + raise AttributeError( + "cannot assign module before Module.__init__() call") + remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set) + for hook in _global_module_registration_hooks.values(): + output = hook(self, name, value) + if output is not None: + value = output + modules[name] = value + elif modules is not None and name in modules: + if value is not None: + raise TypeError(f"cannot assign '{torch.typename(value)}' as child module '{name}' " + "(torch.nn.Module or None expected)" + ) + for hook in _global_module_registration_hooks.values(): + output = hook(self, name, value) + if output is not None: + value = output + modules[name] = value + else: + buffers = self.__dict__.get('_buffers') + if buffers is not None and name in buffers: + if value is not None and not isinstance(value, torch.Tensor): + raise TypeError(f"cannot assign '{torch.typename(value)}' as buffer '{name}' " + "(torch.Tensor or None expected)" + ) + for hook in _global_buffer_registration_hooks.values(): + output = hook(self, name, value) + if output is not None: + value = output + buffers[name] = value + else: + super().__setattr__(name, value) + + def __delattr__(self, name): + if name in self._parameters: + del self._parameters[name] + elif name in self._buffers: + del self._buffers[name] + self._non_persistent_buffers_set.discard(name) + elif name in self._modules: + del self._modules[name] + else: + super().__delattr__(name) + + def _register_state_dict_hook(self, hook): + r"""Register a state-dict hook. + + These hooks will be called with arguments: `self`, `state_dict`, + `prefix`, `local_metadata`, after the `state_dict` of `self` is set. + Note that only parameters and buffers of `self` or its children are + guaranteed to exist in `state_dict`. The hooks may modify `state_dict` + inplace or return a new one. + """ + handle = hooks.RemovableHandle(self._state_dict_hooks) + self._state_dict_hooks[handle.id] = hook + return handle + + def register_state_dict_pre_hook(self, hook): + r"""Register a pre-hook for the :meth:`~torch.nn.Module.state_dict` method. + + These hooks will be called with arguments: ``self``, ``prefix``, + and ``keep_vars`` before calling ``state_dict`` on ``self``. The registered + hooks can be used to perform pre-processing before the ``state_dict`` + call is made. + """ + handle = hooks.RemovableHandle(self._state_dict_pre_hooks) + self._state_dict_pre_hooks[handle.id] = hook + return handle + + def _save_to_state_dict(self, destination, prefix, keep_vars): + r"""Save module state to the `destination` dictionary. + + The `destination` dictionary will contain the state + of the module, but not its descendants. This is called on every + submodule in :meth:`~torch.nn.Module.state_dict`. + + In rare cases, subclasses can achieve class-specific behavior by + overriding this method with custom logic. + + Args: + destination (dict): a dict where state will be stored + prefix (str): the prefix for parameters and buffers used in this + module + """ + for name, param in self._parameters.items(): + if param is not None: + destination[prefix + name] = param if keep_vars else param.detach() + for name, buf in self._buffers.items(): + if buf is not None and name not in self._non_persistent_buffers_set: + destination[prefix + name] = buf if keep_vars else buf.detach() + extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX + if getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state: + destination[extra_state_key] = self.get_extra_state() + + # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns + # back that same object. But if they pass nothing, an `OrderedDict` is created and returned. + T_destination = TypeVar('T_destination', bound=Dict[str, Any]) + + @overload + def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: + ... + + @overload + def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: + ... + + # TODO: Change `*args` to `*` and remove the corresponding warning in docs when BC allows. + # Also remove the logic for arg parsing together. + def state_dict(self, *args, destination=None, prefix='', keep_vars=False): + r"""Return a dictionary containing references to the whole state of the module. + + Both parameters and persistent buffers (e.g. running averages) are + included. Keys are corresponding parameter and buffer names. + Parameters and buffers set to ``None`` are not included. + + .. note:: + The returned object is a shallow copy. It contains references + to the module's parameters and buffers. + + .. warning:: + Currently ``state_dict()`` also accepts positional arguments for + ``destination``, ``prefix`` and ``keep_vars`` in order. However, + this is being deprecated and keyword arguments will be enforced in + future releases. + + .. warning:: + Please avoid the use of argument ``destination`` as it is not + designed for end-users. + + Args: + destination (dict, optional): If provided, the state of module will + be updated into the dict and the same object is returned. + Otherwise, an ``OrderedDict`` will be created and returned. + Default: ``None``. + prefix (str, optional): a prefix added to parameter and buffer + names to compose the keys in state_dict. Default: ``''``. + keep_vars (bool, optional): by default the :class:`~torch.Tensor` s + returned in the state dict are detached from autograd. If it's + set to ``True``, detaching will not be performed. + Default: ``False``. + + Returns: + dict: + a dictionary containing a whole state of the module + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> module.state_dict().keys() + ['bias', 'weight'] + + """ + # TODO: Remove `args` and the parsing logic when BC allows. + if len(args) > 0: + if destination is None: + destination = args[0] + if len(args) > 1 and prefix == '': + prefix = args[1] + if len(args) > 2 and keep_vars is False: + keep_vars = args[2] + # DeprecationWarning is ignored by default + warnings.warn( + "Positional args are being deprecated, use kwargs instead. Refer to " + "https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict" + " for details.") + + if destination is None: + destination = OrderedDict() + destination._metadata = OrderedDict() + + local_metadata = dict(version=self._version) + if hasattr(destination, "_metadata"): + destination._metadata[prefix[:-1]] = local_metadata + + for hook in self._state_dict_pre_hooks.values(): + hook(self, prefix, keep_vars) + self._save_to_state_dict(destination, prefix, keep_vars) + for name, module in self._modules.items(): + if module is not None: + module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars) + for hook in self._state_dict_hooks.values(): + hook_result = hook(self, destination, prefix, local_metadata) + if hook_result is not None: + destination = hook_result + return destination + + def _register_load_state_dict_pre_hook(self, hook, with_module=False): + r"""Register a pre-hook for the :meth:`~torch.nn.Module.load_state_dict` method. + + These hooks will be called with arguments: `state_dict`, `prefix`, + `local_metadata`, `strict`, `missing_keys`, `unexpected_keys`, + `error_msgs`, before loading `state_dict` into `self`. These arguments + are exactly the same as those of `_load_from_state_dict`. + + If ``with_module`` is ``True``, then the first argument to the hook is + an instance of the module. + + Arguments: + hook (Callable): Callable hook that will be invoked before + loading the state dict. + with_module (bool, optional): Whether or not to pass the module + instance to the hook as the first parameter. + """ + handle = hooks.RemovableHandle(self._load_state_dict_pre_hooks) + self._load_state_dict_pre_hooks[handle.id] = _WrappedHook(hook, self if with_module else None) + return handle + + def register_load_state_dict_post_hook(self, hook): + r"""Register a post hook to be run after module's ``load_state_dict`` is called. + + It should have the following signature:: + hook(module, incompatible_keys) -> None + + The ``module`` argument is the current module that this hook is registered + on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting + of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys`` + is a ``list`` of ``str`` containing the missing keys and + ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys. + + The given incompatible_keys can be modified inplace if needed. + + Note that the checks performed when calling :func:`load_state_dict` with + ``strict=True`` are affected by modifications the hook makes to + ``missing_keys`` or ``unexpected_keys``, as expected. Additions to either + set of keys will result in an error being thrown when ``strict=True``, and + clearing out both missing and unexpected keys will avoid an error. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._load_state_dict_post_hooks) + self._load_state_dict_post_hooks[handle.id] = hook + return handle + + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + r"""Copy parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. + + This is called on every submodule + in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this + module in input :attr:`state_dict` is provided as :attr:`local_metadata`. + For state dicts without metadata, :attr:`local_metadata` is empty. + Subclasses can achieve class-specific backward compatible loading using + the version number at `local_metadata.get("version", None)`. + Additionally, :attr:`local_metadata` can also contain the key + `assign_to_params_buffers` that indicates whether keys should be + assigned their corresponding tensor in the state_dict. + + .. note:: + :attr:`state_dict` is not the same object as the input + :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So + it can be modified. + + Args: + state_dict (dict): a dict containing parameters and + persistent buffers. + prefix (str): the prefix for parameters and buffers used in this + module + local_metadata (dict): a dict containing the metadata for this module. + See + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` with :attr:`prefix` match the names of + parameters and buffers in this module + missing_keys (list of str): if ``strict=True``, add missing keys to + this list + unexpected_keys (list of str): if ``strict=True``, add unexpected + keys to this list + error_msgs (list of str): error messages should be added to this + list, and will be reported together in + :meth:`~torch.nn.Module.load_state_dict` + """ + for hook in self._load_state_dict_pre_hooks.values(): + hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} + local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) + local_state = {k: v for k, v in local_name_params if v is not None} + assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False) + use_swap_tensors = torch.__future__.get_swap_module_params_on_conversion() + + for name, param in local_state.items(): + key = prefix + name + if key in state_dict: + input_param = state_dict[key] + if not torch.overrides.is_tensor_like(input_param): + error_msgs.append(f'While copying the parameter named "{key}", ' + 'expected torch.Tensor or Tensor-like object from checkpoint but ' + f'received {type(input_param)}' + ) + continue + + # This is used to avoid copying uninitialized parameters into + # non-lazy modules, since they dont have the hook to do the checks + # in such case, it will error when accessing the .shape attribute. + is_param_lazy = torch.nn.parameter.is_lazy(param) + # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ + if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: + input_param = input_param[0] + + if not is_param_lazy and input_param.shape != param.shape: + # local shape should match the one in checkpoint + error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, ' + 'the shape in current model is {}.' + .format(key, input_param.shape, param.shape)) + continue + + if param.is_meta and not input_param.is_meta and not assign_to_params_buffers: + warnings.warn(f'for {key}: copying from a non-meta parameter in the checkpoint to a meta ' + 'parameter in the current model, which is a no-op. (Did you mean to ' + 'pass `assign=True` to assign items in the state dictionary to their ' + 'corresponding key in the module instead of copying them in place?)') + + try: + with torch.no_grad(): + if use_swap_tensors: + new_input_param = param.module_load(input_param, assign=assign_to_params_buffers) + if id(new_input_param) == id(input_param) or id(new_input_param) == id(param): + raise RuntimeError("module_load returned one of self or other, please .detach() " + "the result if returning one of the inputs in module_load") + if (isinstance(param, torch.nn.Parameter)): + if not isinstance(new_input_param, torch.nn.Parameter): + new_input_param = torch.nn.Parameter(new_input_param, requires_grad=param.requires_grad) + else: + new_input_param.requires_grad_(param.requires_grad) + torch.utils.swap_tensors(param, new_input_param) + del new_input_param + elif assign_to_params_buffers: + # Shape checks are already done above + if (isinstance(param, torch.nn.Parameter)): + if not isinstance(input_param, torch.nn.Parameter): + input_param = torch.nn.Parameter(input_param, requires_grad=param.requires_grad) + else: + input_param.requires_grad_(param.requires_grad) + setattr(self, name, input_param) + else: + param.copy_(input_param) + except Exception as ex: + action = "swapping" if use_swap_tensors else "copying" + error_msgs.append(f'While {action} the parameter named "{key}", ' + f'whose dimensions in the model are {param.size()} and ' + f'whose dimensions in the checkpoint are {input_param.size()}, ' + f'an exception occurred : {ex.args}.' + ) + elif strict: + missing_keys.append(key) + + extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX + if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state: + if extra_state_key in state_dict: + self.set_extra_state(state_dict[extra_state_key]) + elif strict: + missing_keys.append(extra_state_key) + elif strict and (extra_state_key in state_dict): + unexpected_keys.append(extra_state_key) + + if strict: + for key in state_dict.keys(): + if key.startswith(prefix) and key != extra_state_key: + input_name = key[len(prefix):] + input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child + if input_name not in self._modules and input_name not in local_state: + unexpected_keys.append(key) + + def load_state_dict(self, state_dict: Mapping[str, Any], + strict: bool = True, assign: bool = False): + r"""Copy parameters and buffers from :attr:`state_dict` into this module and its descendants. + + If :attr:`strict` is ``True``, then + the keys of :attr:`state_dict` must exactly match the keys returned + by this module's :meth:`~torch.nn.Module.state_dict` function. + + .. warning:: + If :attr:`assign` is ``True`` the optimizer must be created after + the call to :attr:`load_state_dict` unless + :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``. + + Args: + state_dict (dict): a dict containing parameters and + persistent buffers. + strict (bool, optional): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + assign (bool, optional): When ``False``, the properties of the tensors + in the current module are preserved while when ``True``, the + properties of the Tensors in the state dict are preserved. The only + exception is the ``requires_grad`` field of :class:`~torch.nn.Parameter`s + for which the value from the module is preserved. + Default: ``False`` + + Returns: + ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: + * **missing_keys** is a list of str containing the missing keys + * **unexpected_keys** is a list of str containing the unexpected keys + + Note: + If a parameter or buffer is registered as ``None`` and its corresponding key + exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a + ``RuntimeError``. + """ + if not isinstance(state_dict, Mapping): + raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.") + + missing_keys: List[str] = [] + unexpected_keys: List[str] = [] + error_msgs: List[str] = [] + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = OrderedDict(state_dict) + if metadata is not None: + # mypy isn't aware that "_metadata" exists in state_dict + state_dict._metadata = metadata # type: ignore[attr-defined] + + def load(module, local_state_dict, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + if assign: + local_metadata['assign_to_params_buffers'] = assign + module._load_from_state_dict( + local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + child_prefix = prefix + name + '.' + child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} + load(child, child_state_dict, child_prefix) # noqa: F821 + + # Note that the hook can modify missing_keys and unexpected_keys. + incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys) + for hook in module._load_state_dict_post_hooks.values(): + out = hook(module, incompatible_keys) + assert out is None, ( + "Hooks registered with ``register_load_state_dict_post_hook`` are not" + "expected to return new values, if incompatible_keys need to be modified," + "it should be done inplace." + ) + + load(self, state_dict) + del load + + if strict: + if len(unexpected_keys) > 0: + error_msgs.insert( + 0, 'Unexpected key(s) in state_dict: {}. '.format( + ', '.join(f'"{k}"' for k in unexpected_keys))) + if len(missing_keys) > 0: + error_msgs.insert( + 0, 'Missing key(s) in state_dict: {}. '.format( + ', '.join(f'"{k}"' for k in missing_keys))) + + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + self.__class__.__name__, "\n\t".join(error_msgs))) + return _IncompatibleKeys(missing_keys, unexpected_keys) + + def _named_members(self, get_members_fn, prefix='', recurse=True, remove_duplicate: bool = True): + r"""Help yield various names + members of modules.""" + memo = set() + modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)] + for module_prefix, module in modules: + members = get_members_fn(module) + for k, v in members: + if v is None or v in memo: + continue + if remove_duplicate: + memo.add(v) + name = module_prefix + ('.' if module_prefix else '') + k + yield name, v + + def parameters(self, recurse: bool = True) -> Iterator[Parameter]: + r"""Return an iterator over module parameters. + + This is typically passed to an optimizer. + + Args: + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + + Yields: + Parameter: module parameter + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for param in model.parameters(): + >>> print(type(param), param.size()) + (20L,) + (20L, 1L, 5L, 5L) + + """ + for name, param in self.named_parameters(recurse=recurse): + yield param + + def named_parameters( + self, + prefix: str = '', + recurse: bool = True, + remove_duplicate: bool = True + ) -> Iterator[Tuple[str, Parameter]]: + r"""Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. + + Args: + prefix (str): prefix to prepend to all parameter names. + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + remove_duplicate (bool, optional): whether to remove the duplicated + parameters in the result. Defaults to True. + + Yields: + (str, Parameter): Tuple containing the name and parameter + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for name, param in self.named_parameters(): + >>> if name in ['bias']: + >>> print(param.size()) + + """ + gen = self._named_members( + lambda module: module._parameters.items(), + prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate) + yield from gen + + def buffers(self, recurse: bool = True) -> Iterator[Tensor]: + r"""Return an iterator over module buffers. + + Args: + recurse (bool): if True, then yields buffers of this module + and all submodules. Otherwise, yields only buffers that + are direct members of this module. + + Yields: + torch.Tensor: module buffer + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for buf in model.buffers(): + >>> print(type(buf), buf.size()) + (20L,) + (20L, 1L, 5L, 5L) + + """ + for _, buf in self.named_buffers(recurse=recurse): + yield buf + + def named_buffers(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[Tuple[str, Tensor]]: + r"""Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. + + Args: + prefix (str): prefix to prepend to all buffer names. + recurse (bool, optional): if True, then yields buffers of this module + and all submodules. Otherwise, yields only buffers that + are direct members of this module. Defaults to True. + remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. + + Yields: + (str, torch.Tensor): Tuple containing the name and buffer + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for name, buf in self.named_buffers(): + >>> if name in ['running_var']: + >>> print(buf.size()) + + """ + gen = self._named_members( + lambda module: module._buffers.items(), + prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate) + yield from gen + + def children(self) -> Iterator['Module']: + r"""Return an iterator over immediate children modules. + + Yields: + Module: a child module + """ + for name, module in self.named_children(): + yield module + + def named_children(self) -> Iterator[Tuple[str, 'Module']]: + r"""Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself. + + Yields: + (str, Module): Tuple containing a name and child module + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for name, module in model.named_children(): + >>> if name in ['conv4', 'conv5']: + >>> print(module) + + """ + memo = set() + for name, module in self._modules.items(): + if module is not None and module not in memo: + memo.add(module) + yield name, module + + def modules(self) -> Iterator['Module']: + r"""Return an iterator over all modules in the network. + + Yields: + Module: a module in the network + + Note: + Duplicate modules are returned only once. In the following + example, ``l`` will be returned only once. + + Example:: + + >>> l = nn.Linear(2, 2) + >>> net = nn.Sequential(l, l) + >>> for idx, m in enumerate(net.modules()): + ... print(idx, '->', m) + + 0 -> Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + ) + 1 -> Linear(in_features=2, out_features=2, bias=True) + + """ + for _, module in self.named_modules(): + yield module + + def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = '', remove_duplicate: bool = True): + r"""Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. + + Args: + memo: a memo to store the set of modules already added to the result + prefix: a prefix that will be added to the name of the module + remove_duplicate: whether to remove the duplicated module instances in the result + or not + + Yields: + (str, Module): Tuple of name and module + + Note: + Duplicate modules are returned only once. In the following + example, ``l`` will be returned only once. + + Example:: + + >>> l = nn.Linear(2, 2) + >>> net = nn.Sequential(l, l) + >>> for idx, m in enumerate(net.named_modules()): + ... print(idx, '->', m) + + 0 -> ('', Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + )) + 1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) + + """ + if memo is None: + memo = set() + if self not in memo: + if remove_duplicate: + memo.add(self) + yield prefix, self + for name, module in self._modules.items(): + if module is None: + continue + submodule_prefix = prefix + ('.' if prefix else '') + name + yield from module.named_modules(memo, submodule_prefix, remove_duplicate) + + def train(self: T, mode: bool = True) -> T: + r"""Set the module in training mode. + + This has any effect only on certain modules. See documentations of + particular modules for details of their behaviors in training/evaluation + mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, + etc. + + Args: + mode (bool): whether to set training mode (``True``) or evaluation + mode (``False``). Default: ``True``. + + Returns: + Module: self + """ + if not isinstance(mode, bool): + raise ValueError("training mode is expected to be boolean") + self.training = mode + for module in self.children(): + module.train(mode) + return self + + def eval(self: T) -> T: + r"""Set the module in evaluation mode. + + This has any effect only on certain modules. See documentations of + particular modules for details of their behaviors in training/evaluation + mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, + etc. + + This is equivalent with :meth:`self.train(False) `. + + See :ref:`locally-disable-grad-doc` for a comparison between + `.eval()` and several similar mechanisms that may be confused with it. + + Returns: + Module: self + """ + return self.train(False) + + def requires_grad_(self: T, requires_grad: bool = True) -> T: + r"""Change if autograd should record operations on parameters in this module. + + This method sets the parameters' :attr:`requires_grad` attributes + in-place. + + This method is helpful for freezing part of the module for finetuning + or training parts of a model individually (e.g., GAN training). + + See :ref:`locally-disable-grad-doc` for a comparison between + `.requires_grad_()` and several similar mechanisms that may be confused with it. + + Args: + requires_grad (bool): whether autograd should record operations on + parameters in this module. Default: ``True``. + + Returns: + Module: self + """ + for p in self.parameters(): + p.requires_grad_(requires_grad) + return self + + def zero_grad(self, set_to_none: bool = True) -> None: + r"""Reset gradients of all model parameters. + + See similar function under :class:`torch.optim.Optimizer` for more context. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + See :meth:`torch.optim.Optimizer.zero_grad` for details. + """ + if getattr(self, '_is_replica', False): + warnings.warn( + "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. " + "The parameters are copied (in a differentiable manner) from the original module. " + "This means they are not leaf nodes in autograd and so don't accumulate gradients. " + "If you need gradients in your forward method, consider using autograd.grad instead.") + + for p in self.parameters(): + if p.grad is not None: + if set_to_none: + p.grad = None + else: + if p.grad.grad_fn is not None: + p.grad.detach_() + else: + p.grad.requires_grad_(False) + p.grad.zero_() + + def share_memory(self: T) -> T: + r"""See :meth:`torch.Tensor.share_memory_`.""" + return self._apply(lambda t: t.share_memory_()) + + def _get_name(self): + return self.__class__.__name__ + + def extra_repr(self) -> str: + r"""Set the extra representation of the module. + + To print customized extra information, you should re-implement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + """ + return '' + + def __repr__(self): + # We treat the extra repr like the sub-module, one item per line + extra_lines = [] + extra_repr = self.extra_repr() + # empty string will be split into list [''] + if extra_repr: + extra_lines = extra_repr.split('\n') + child_lines = [] + for key, module in self._modules.items(): + mod_str = repr(module) + mod_str = _addindent(mod_str, 2) + child_lines.append('(' + key + '): ' + mod_str) + lines = extra_lines + child_lines + + main_str = self._get_name() + '(' + if lines: + # simple one-liner info, which most builtin Modules will use + if len(extra_lines) == 1 and not child_lines: + main_str += extra_lines[0] + else: + main_str += '\n ' + '\n '.join(lines) + '\n' + + main_str += ')' + return main_str + + def __dir__(self): + module_attrs = dir(self.__class__) + attrs = list(self.__dict__.keys()) + parameters = list(self._parameters.keys()) + modules = list(self._modules.keys()) + buffers = list(self._buffers.keys()) + keys = module_attrs + attrs + parameters + modules + buffers + + # Eliminate attrs that are not legal Python variable names + keys = [key for key in keys if not key[0].isdigit()] + + return sorted(keys) + + def _replicate_for_data_parallel(self): + replica = self.__new__(type(self)) + replica.__dict__ = self.__dict__.copy() + + # replicas do not have parameters themselves, the replicas reference the original + # module. + replica._parameters = OrderedDict() + replica._buffers = replica._buffers.copy() + replica._modules = replica._modules.copy() + replica._is_replica = True # type: ignore[assignment] + + return replica + + def compile(self, *args, **kwargs): + """ + Compile this Module's forward using :func:`torch.compile`. + + This Module's `__call__` method is compiled and all arguments are passed as-is + to :func:`torch.compile`. + + See :func:`torch.compile` for details on the arguments for this function. + """ + self._compiled_call_impl = torch.compile(self._call_impl, *args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/padding.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/padding.py new file mode 100644 index 0000000000000000000000000000000000000000..0aecca58c305a24bc153fec8d72f03c946b7e191 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/padding.py @@ -0,0 +1,801 @@ +from .module import Module +from .utils import _pair, _quadruple, _ntuple +from .. import functional as F + +from torch import Tensor +from ..common_types import _size_2_t, _size_4_t, _size_6_t +from typing import Sequence, Tuple + + +# TODO: grad_output size asserts in THNN + +__all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d', + 'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', + 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d'] + + +class _CircularPadNd(Module): + __constants__ = ['padding'] + padding: Sequence[int] + + def _check_input_dim(self, input): + raise NotImplementedError + + def forward(self, input: Tensor) -> Tensor: + self._check_input_dim(input) + return F.pad(input, self.padding, 'circular') + + def extra_repr(self) -> str: + return f'{self.padding}' + + +class CircularPad1d(_CircularPadNd): + r"""Pads the input tensor using circular padding of the input boundary. + + Tensor values at the beginning of the dimension are used to pad the end, + and values at the end are used to pad the beginning. If negative padding is + applied then the ends of the tensor get removed. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. + - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") + >>> m = nn.CircularPad1d(2) + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) + >>> input + tensor([[[0., 1., 2., 3.], + [4., 5., 6., 7.]]]) + >>> m(input) + tensor([[[2., 3., 0., 1., 2., 3., 0., 1.], + [6., 7., 4., 5., 6., 7., 4., 5.]]]) + >>> # using different paddings for different sides + >>> m = nn.CircularPad1d((3, 1)) + >>> m(input) + tensor([[[1., 2., 3., 0., 1., 2., 3., 0.], + [5., 6., 7., 4., 5., 6., 7., 4.]]]) + """ + + padding: Tuple[int, int] + + def __init__(self, padding: _size_2_t) -> None: + super().__init__() + self.padding = _pair(padding) + + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError( + f"expected 2D or 3D input (got {input.dim()}D input)" + ) + + +class CircularPad2d(_CircularPadNd): + r"""Pads the input tensor using circular padding of the input boundary. + + Tensor values at the beginning of the dimension are used to pad the end, + and values at the end are used to pad the beginning. If negative padding is + applied then the ends of the tensor get removed. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. + - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.CircularPad2d(2) + >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) + >>> input + tensor([[[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]]) + >>> m(input) + tensor([[[[4., 5., 3., 4., 5., 3., 4.], + [7., 8., 6., 7., 8., 6., 7.], + [1., 2., 0., 1., 2., 0., 1.], + [4., 5., 3., 4., 5., 3., 4.], + [7., 8., 6., 7., 8., 6., 7.], + [1., 2., 0., 1., 2., 0., 1.], + [4., 5., 3., 4., 5., 3., 4.]]]]) + >>> # using different paddings for different sides + >>> m = nn.CircularPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[5., 3., 4., 5., 3.], + [8., 6., 7., 8., 6.], + [2., 0., 1., 2., 0.], + [5., 3., 4., 5., 3.], + [8., 6., 7., 8., 6.]]]]) + """ + + padding: Tuple[int, int, int, int] + + def __init__(self, padding: _size_4_t) -> None: + super().__init__() + self.padding = _quadruple(padding) + + def _check_input_dim(self, input): + if input.dim() != 3 and input.dim() != 4: + raise ValueError( + f"expected 3D or 4D input (got {input.dim()}D input)" + ) + + +class CircularPad3d(_CircularPadNd): + r"""Pads the input tensor using circular padding of the input boundary. + + Tensor values at the beginning of the dimension are used to pad the end, + and values at the end are used to pad the beginning. If negative padding is + applied then the ends of the tensor get removed. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, + where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.CircularPad3d(3) + >>> input = torch.randn(16, 3, 8, 320, 480) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1)) + >>> output = m(input) + """ + + padding: Tuple[int, int, int, int, int, int] + + def __init__(self, padding: _size_6_t) -> None: + super().__init__() + self.padding = _ntuple(6)(padding) + + def _check_input_dim(self, input): + if input.dim() != 4 and input.dim() != 5: + raise ValueError( + f"expected 4D or 5D input (got {input.dim()}D input)" + ) + + +class _ConstantPadNd(Module): + __constants__ = ['padding', 'value'] + value: float + padding: Sequence[int] + + def __init__(self, value: float) -> None: + super().__init__() + self.value = value + + def forward(self, input: Tensor) -> Tensor: + return F.pad(input, self.padding, 'constant', self.value) + + def extra_repr(self) -> str: + return f'padding={self.padding}, value={self.value}' + + +class ConstantPad1d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in both boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. + - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.ConstantPad1d(2, 3.5) + >>> input = torch.randn(1, 2, 4) + >>> input + tensor([[[-1.0491, -0.7152, -0.0749, 0.8530], + [-1.3287, 1.8966, 0.1466, -0.2771]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000, + 3.5000], + [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000, + 3.5000]]]) + >>> m = nn.ConstantPad1d(2, 3.5) + >>> input = torch.randn(1, 2, 3) + >>> input + tensor([[[ 1.6616, 1.4523, -1.1255], + [-3.6372, 0.1182, -1.8652]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000], + [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]]) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad1d((3, 1), 3.5) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000], + [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]]) + """ + + padding: Tuple[int, int] + + def __init__(self, padding: _size_2_t, value: float): + super().__init__(value) + self.padding = _pair(padding) + + +class ConstantPad2d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. + - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.ConstantPad2d(2, 3.5) + >>> input = torch.randn(1, 2, 2) + >>> input + tensor([[[ 1.6585, 0.4320], + [-0.8701, -0.4649]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000], + [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320], + [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) + """ + + __constants__ = ['padding', 'value'] + padding: Tuple[int, int, int, int] + + def __init__(self, padding: _size_4_t, value: float) -> None: + super().__init__(value) + self.padding = _quadruple(padding) + + +class ConstantPad3d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or + :math:`(C, D_{out}, H_{out}, W_{out})`, where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ConstantPad3d(3, 3.5) + >>> input = torch.randn(16, 3, 10, 20, 30) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5) + >>> output = m(input) + """ + + padding: Tuple[int, int, int, int, int, int] + + def __init__(self, padding: _size_6_t, value: float) -> None: + super().__init__(value) + self.padding = _ntuple(6)(padding) + + +class _ReflectionPadNd(Module): + __constants__ = ['padding'] + padding: Sequence[int] + + def forward(self, input: Tensor) -> Tensor: + return F.pad(input, self.padding, 'reflect') + + def extra_repr(self) -> str: + return f'{self.padding}' + + +class ReflectionPad1d(_ReflectionPadNd): + r"""Pads the input tensor using the reflection of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. + - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReflectionPad1d(2) + >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles") + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) + >>> input + tensor([[[0., 1., 2., 3.], + [4., 5., 6., 7.]]]) + >>> m(input) + tensor([[[2., 1., 0., 1., 2., 3., 2., 1.], + [6., 5., 4., 5., 6., 7., 6., 5.]]]) + >>> # using different paddings for different sides + >>> m = nn.ReflectionPad1d((3, 1)) + >>> m(input) + tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], + [7., 6., 5., 4., 5., 6., 7., 6.]]]) + """ + + padding: Tuple[int, int] + + def __init__(self, padding: _size_2_t) -> None: + super().__init__() + self.padding = _pair(padding) + + +class ReflectionPad2d(_ReflectionPadNd): + r"""Pads the input tensor using the reflection of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + Note that padding size should be less than the corresponding input dimension. + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. + - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") + >>> m = nn.ReflectionPad2d(2) + >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) + >>> input + tensor([[[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]]) + >>> m(input) + tensor([[[[8., 7., 6., 7., 8., 7., 6.], + [5., 4., 3., 4., 5., 4., 3.], + [2., 1., 0., 1., 2., 1., 0.], + [5., 4., 3., 4., 5., 4., 3.], + [8., 7., 6., 7., 8., 7., 6.], + [5., 4., 3., 4., 5., 4., 3.], + [2., 1., 0., 1., 2., 1., 0.]]]]) + >>> # using different paddings for different sides + >>> m = nn.ReflectionPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[7., 6., 7., 8., 7.], + [4., 3., 4., 5., 4.], + [1., 0., 1., 2., 1.], + [4., 3., 4., 5., 4.], + [7., 6., 7., 8., 7.]]]]) + """ + + padding: Tuple[int, int, int, int] + + def __init__(self, padding: _size_4_t) -> None: + super().__init__() + self.padding = _quadruple(padding) + + +class ReflectionPad3d(_ReflectionPadNd): + r"""Pads the input tensor using the reflection of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, + where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") + >>> m = nn.ReflectionPad3d(1) + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2) + >>> m(input) + tensor([[[[[7., 6., 7., 6.], + [5., 4., 5., 4.], + [7., 6., 7., 6.], + [5., 4., 5., 4.]], + [[3., 2., 3., 2.], + [1., 0., 1., 0.], + [3., 2., 3., 2.], + [1., 0., 1., 0.]], + [[7., 6., 7., 6.], + [5., 4., 5., 4.], + [7., 6., 7., 6.], + [5., 4., 5., 4.]], + [[3., 2., 3., 2.], + [1., 0., 1., 0.], + [3., 2., 3., 2.], + [1., 0., 1., 0.]]]]]) + """ + + padding: Tuple[int, int, int, int, int, int] + + def __init__(self, padding: _size_6_t) -> None: + super().__init__() + self.padding = _ntuple(6)(padding) + + +class _ReplicationPadNd(Module): + __constants__ = ['padding'] + padding: Sequence[int] + + def forward(self, input: Tensor) -> Tensor: + return F.pad(input, self.padding, 'replicate') + + def extra_repr(self) -> str: + return f'{self.padding}' + + +class ReplicationPad1d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. + - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") + >>> m = nn.ReplicationPad1d(2) + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) + >>> input + tensor([[[0., 1., 2., 3.], + [4., 5., 6., 7.]]]) + >>> m(input) + tensor([[[0., 0., 0., 1., 2., 3., 3., 3.], + [4., 4., 4., 5., 6., 7., 7., 7.]]]) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad1d((3, 1)) + >>> m(input) + tensor([[[0., 0., 0., 0., 1., 2., 3., 3.], + [4., 4., 4., 4., 5., 6., 7., 7.]]]) + """ + + padding: Tuple[int, int] + + def __init__(self, padding: _size_2_t) -> None: + super().__init__() + self.padding = _pair(padding) + + +class ReplicationPad2d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. + - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReplicationPad2d(2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) + >>> input + tensor([[[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]]) + >>> m(input) + tensor([[[[0., 0., 0., 1., 2., 2., 2.], + [0., 0., 0., 1., 2., 2., 2.], + [0., 0., 0., 1., 2., 2., 2.], + [3., 3., 3., 4., 5., 5., 5.], + [6., 6., 6., 7., 8., 8., 8.], + [6., 6., 6., 7., 8., 8., 8.], + [6., 6., 6., 7., 8., 8., 8.]]]]) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[0., 0., 1., 2., 2.], + [0., 0., 1., 2., 2.], + [0., 0., 1., 2., 2.], + [3., 3., 4., 5., 5.], + [6., 6., 7., 8., 8.]]]]) + """ + + padding: Tuple[int, int, int, int] + + def __init__(self, padding: _size_4_t) -> None: + super().__init__() + self.padding = _quadruple(padding) + + +class ReplicationPad3d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, + where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.ReplicationPad3d(3) + >>> input = torch.randn(16, 3, 8, 320, 480) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1)) + >>> output = m(input) + """ + + padding: Tuple[int, int, int, int, int, int] + + def __init__(self, padding: _size_6_t) -> None: + super().__init__() + self.padding = _ntuple(6)(padding) + + +class ZeroPad1d(ConstantPad1d): + r"""Pads the input tensor boundaries with zero. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in both boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. + - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.ZeroPad1d(2) + >>> input = torch.randn(1, 2, 4) + >>> input + tensor([[[-1.0491, -0.7152, -0.0749, 0.8530], + [-1.3287, 1.8966, 0.1466, -0.2771]]]) + >>> m(input) + tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000, + 0.0000], + [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000, + 0.0000]]]) + >>> m = nn.ZeroPad1d(2) + >>> input = torch.randn(1, 2, 3) + >>> input + tensor([[[ 1.6616, 1.4523, -1.1255], + [-3.6372, 0.1182, -1.8652]]]) + >>> m(input) + tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000], + [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]]) + >>> # using different paddings for different sides + >>> m = nn.ZeroPad1d((3, 1)) + >>> m(input) + tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000], + [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]]) + """ + + padding: Tuple[int, int] + + def __init__(self, padding: _size_2_t) -> None: + super().__init__(padding, 0.) + + def extra_repr(self) -> str: + return f'{self.padding}' + +class ZeroPad2d(ConstantPad2d): + r"""Pads the input tensor boundaries with zero. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. + - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = nn.ZeroPad2d(2) + >>> input = torch.randn(1, 1, 3, 3) + >>> input + tensor([[[[-0.1678, -0.4418, 1.9466], + [ 0.9604, -0.4219, -0.5241], + [-0.9162, -0.5436, -0.6446]]]]) + >>> m(input) + tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + >>> # using different paddings for different sides + >>> m = nn.ZeroPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000], + [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000], + [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]]) + """ + + padding: Tuple[int, int, int, int] + + def __init__(self, padding: _size_4_t) -> None: + super().__init__(padding, 0.) + + def extra_repr(self) -> str: + return f'{self.padding}' + +class ZeroPad3d(ConstantPad3d): + r"""Pads the input tensor boundaries with zero. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or + :math:`(C, D_{out}, H_{out}, W_{out})`, where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ZeroPad3d(3) + >>> input = torch.randn(16, 3, 10, 20, 30) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1)) + >>> output = m(input) + """ + + padding: Tuple[int, int, int, int, int, int] + + def __init__(self, padding: _size_6_t) -> None: + super().__init__(padding, 0.) + + def extra_repr(self) -> str: + return f'{self.padding}' diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py new file mode 100644 index 0000000000000000000000000000000000000000..6050b7eaea60cf6ef655d1219c5f2869cac11615 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py @@ -0,0 +1,113 @@ +from .module import Module +from .. import functional as F + +from torch import Tensor + +__all__ = ['PixelShuffle', 'PixelUnshuffle'] + +class PixelShuffle(Module): + r"""Rearrange elements in a tensor according to an upscaling factor. + + Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` + to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor. + + This is useful for implementing efficient sub-pixel convolution + with a stride of :math:`1/r`. + + See the paper: + `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ + by Shi et. al (2016) for more details. + + Args: + upscale_factor (int): factor to increase spatial resolution by + + Shape: + - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions + - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where + + .. math:: + C_{out} = C_{in} \div \text{upscale\_factor}^2 + + .. math:: + H_{out} = H_{in} \times \text{upscale\_factor} + + .. math:: + W_{out} = W_{in} \times \text{upscale\_factor} + + Examples:: + + >>> pixel_shuffle = nn.PixelShuffle(3) + >>> input = torch.randn(1, 9, 4, 4) + >>> output = pixel_shuffle(input) + >>> print(output.size()) + torch.Size([1, 1, 12, 12]) + + .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: + https://arxiv.org/abs/1609.05158 + """ + + __constants__ = ['upscale_factor'] + upscale_factor: int + + def __init__(self, upscale_factor: int) -> None: + super().__init__() + self.upscale_factor = upscale_factor + + def forward(self, input: Tensor) -> Tensor: + return F.pixel_shuffle(input, self.upscale_factor) + + def extra_repr(self) -> str: + return f'upscale_factor={self.upscale_factor}' + + +class PixelUnshuffle(Module): + r"""Reverse the PixelShuffle operation. + + Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements + in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape + :math:`(*, C \times r^2, H, W)`, where r is a downscale factor. + + See the paper: + `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ + by Shi et. al (2016) for more details. + + Args: + downscale_factor (int): factor to decrease spatial resolution by + + Shape: + - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions + - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where + + .. math:: + C_{out} = C_{in} \times \text{downscale\_factor}^2 + + .. math:: + H_{out} = H_{in} \div \text{downscale\_factor} + + .. math:: + W_{out} = W_{in} \div \text{downscale\_factor} + + Examples:: + + >>> pixel_unshuffle = nn.PixelUnshuffle(3) + >>> input = torch.randn(1, 1, 12, 12) + >>> output = pixel_unshuffle(input) + >>> print(output.size()) + torch.Size([1, 9, 4, 4]) + + .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: + https://arxiv.org/abs/1609.05158 + """ + + __constants__ = ['downscale_factor'] + downscale_factor: int + + def __init__(self, downscale_factor: int) -> None: + super().__init__() + self.downscale_factor = downscale_factor + + def forward(self, input: Tensor) -> Tensor: + return F.pixel_unshuffle(input, self.downscale_factor) + + def extra_repr(self) -> str: + return f'downscale_factor={self.downscale_factor}' diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/transformer.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..ec5203ee7cfafeaf9526422724436763376be917 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/transformer.py @@ -0,0 +1,975 @@ +import copy +from typing import Optional, Any, Union, Callable + +import torch +import warnings +from torch import Tensor +from .. import functional as F +from .module import Module +from .activation import MultiheadAttention +from .container import ModuleList +from ..init import xavier_uniform_ +from .dropout import Dropout +from .linear import Linear +from .normalization import LayerNorm + +__all__ = ['Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer'] + +def _generate_square_subsequent_mask( + sz: int, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + r"""Generate a square causal mask for the sequence. + + The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). + """ + if device is None: + device = torch.device('cpu') + if dtype is None: + dtype = torch.float32 + return torch.triu( + torch.full((sz, sz), float('-inf'), dtype=dtype, device=device), + diagonal=1, + ) + + +def _get_seq_len( + src: Tensor, + batch_first: bool +) -> Optional[int]: + + if src.is_nested: + return None + else: + src_size = src.size() + if len(src_size) == 2: + # unbatched: S, E + return src_size[0] + else: + # batched: B, S, E if batch_first else S, B, E + seq_len_pos = 1 if batch_first else 0 + return src_size[seq_len_pos] + + +class Transformer(Module): + r"""A transformer model. + + User is able to modify the attributes as needed. The architecture + is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, + Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and + Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information + Processing Systems, pages 6000-6010. + + Args: + d_model: the number of expected features in the encoder/decoder inputs (default=512). + nhead: the number of heads in the multiheadattention models (default=8). + num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). + num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + activation: the activation function of encoder/decoder intermediate layer, can be a string + ("relu" or "gelu") or a unary callable. Default: relu + custom_encoder: custom encoder (default=None). + custom_decoder: custom decoder (default=None). + layer_norm_eps: the eps value in layer normalization components (default=1e-5). + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` (seq, batch, feature). + norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before + other attention and feedforward operations, otherwise after. Default: ``False`` (after). + bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive + bias. Default: ``True``. + + Examples:: + >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12) + >>> src = torch.rand((10, 32, 512)) + >>> tgt = torch.rand((20, 32, 512)) + >>> out = transformer_model(src, tgt) + + Note: A full example to apply nn.Transformer module for the word language model is available in + https://github.com/pytorch/examples/tree/master/word_language_model + """ + + def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, + num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1, + activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, + custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None, + layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, + bias: bool = True, device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") + + if custom_encoder is not None: + self.encoder = custom_encoder + else: + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, layer_norm_eps, batch_first, norm_first, + bias, **factory_kwargs) + encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + if custom_decoder is not None: + self.decoder = custom_decoder + else: + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, layer_norm_eps, batch_first, norm_first, + bias, **factory_kwargs) + decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + self.batch_first = batch_first + + def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, + src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None, + memory_is_causal: bool = False) -> Tensor: + r"""Take in and process masked source/target sequences. + + .. note:: + + If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are + not allowed to participate in the attention, + which is the opposite of the definition for :attr:`attn_mask` + in :func:`torch.nn.functional.scaled_dot_product_attention`. + + Args: + src: the sequence to the encoder (required). + tgt: the sequence to the decoder (required). + src_mask: the additive mask for the src sequence (optional). + tgt_mask: the additive mask for the tgt sequence (optional). + memory_mask: the additive mask for the encoder output (optional). + src_key_padding_mask: the Tensor mask for src keys per batch (optional). + tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional). + memory_key_padding_mask: the Tensor mask for memory keys per batch (optional). + src_is_causal: If specified, applies a causal mask as ``src_mask``. + Default: ``None``; try to detect a causal mask. + Warning: + ``src_is_causal`` provides a hint that ``src_mask`` is + the causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``. + Default: ``None``; try to detect a causal mask. + Warning: + ``tgt_is_causal`` provides a hint that ``tgt_mask`` is + the causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + memory_is_causal: If specified, applies a causal mask as + ``memory_mask``. + Default: ``False``. + Warning: + ``memory_is_causal`` provides a hint that + ``memory_mask`` is the causal mask. Providing incorrect + hints can result in incorrect execution, including + forward and backward compatibility. + + Shape: + - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or + `(N, S, E)` if `batch_first=True`. + - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or + `(N, T, E)` if `batch_first=True`. + - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`. + - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`. + - memory_mask: :math:`(T, S)`. + - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. + - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`. + - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. + + Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked + positions. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by + the attention. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + + - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or + `(N, T, E)` if `batch_first=True`. + + Note: Due to the multi-head attention architecture in the transformer model, + the output sequence length of a transformer is same as the input sequence + (i.e. target) length of the decoder. + + where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the + batch size, :math:`E` is the feature number + + Examples: + >>> # xdoctest: +SKIP + >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask) + """ + is_batched = src.dim() == 3 + if not self.batch_first and src.size(1) != tgt.size(1) and is_batched: + raise RuntimeError("the batch number of src and tgt must be equal") + elif self.batch_first and src.size(0) != tgt.size(0) and is_batched: + raise RuntimeError("the batch number of src and tgt must be equal") + + if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model: + raise RuntimeError("the feature number of src and tgt must be equal to d_model") + + memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask, + is_causal=src_is_causal) + output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal) + return output + + @staticmethod + def generate_square_subsequent_mask( + sz: int, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> Tensor: + r"""Generate a square causal mask for the sequence. + + The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). + """ + return _generate_square_subsequent_mask(sz, dtype=dtype, device=device) + + def _reset_parameters(self): + r"""Initiate parameters in the transformer model.""" + for p in self.parameters(): + if p.dim() > 1: + xavier_uniform_(p) + + +class TransformerEncoder(Module): + r"""TransformerEncoder is a stack of N encoder layers. + + Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. + + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + enable_nested_tensor: if True, input will automatically convert to nested tensor + (and convert back on output). This will improve the overall performance of + TransformerEncoder when padding rate is high. Default: ``True`` (enabled). + + Examples:: + >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> out = transformer_encoder(src) + """ + + __constants__ = ['norm'] + + def __init__( + self, + encoder_layer: "TransformerEncoderLayer", + num_layers: int, + norm: Optional[Module] = None, + enable_nested_tensor: bool = True, + mask_check: bool = True + ) -> None: + super().__init__() + torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + # this attribute saves the value providedat object construction + self.enable_nested_tensor = enable_nested_tensor + # this attribute controls whether nested tensors are used + self.use_nested_tensor = enable_nested_tensor + self.mask_check = mask_check + + enc_layer = "encoder_layer" + why_not_sparsity_fast_path = '' + if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer): + why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer" + elif encoder_layer.norm_first : + why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True" + elif not encoder_layer.self_attn.batch_first: + why_not_sparsity_fast_path = (f"{enc_layer}.self_attn.batch_first was not True" + + "(use batch_first for better inference performance)") + elif not encoder_layer.self_attn._qkv_same_embed_dim: + why_not_sparsity_fast_path = f"{enc_layer}.self_attn._qkv_same_embed_dim was not True" + elif encoder_layer.self_attn.in_proj_bias is None: + why_not_sparsity_fast_path = f"{enc_layer}.self_attn was passed bias=False" + elif not encoder_layer.activation_relu_or_gelu: + why_not_sparsity_fast_path = f"{enc_layer}.activation_relu_or_gelu was not True" + elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps) : + why_not_sparsity_fast_path = f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps" + elif encoder_layer.self_attn.num_heads % 2 == 1: + why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd" + + if enable_nested_tensor and why_not_sparsity_fast_path: + warnings.warn(f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}") + self.use_nested_tensor = False + + + def forward( + self, + src: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + is_causal: Optional[bool] = None) -> Tensor: + r"""Pass the input through the encoder layers in turn. + + Args: + src: the sequence to the encoder (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + is_causal: If specified, applies a causal mask as ``mask``. + Default: ``None``; try to detect a causal mask. + Warning: + ``is_causal`` provides a hint that ``mask`` is the + causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + + Shape: + see the docs in :class:`~torch.nn.Transformer`. + """ + src_key_padding_mask = F._canonical_mask( + mask=src_key_padding_mask, + mask_name="src_key_padding_mask", + other_type=F._none_or_dtype(mask), + other_name="mask", + target_type=src.dtype + ) + + mask = F._canonical_mask( + mask=mask, + mask_name="mask", + other_type=None, + other_name="", + target_type=src.dtype, + check_other=False, + ) + + output = src + convert_to_nested = False + first_layer = self.layers[0] + src_key_padding_mask_for_layers = src_key_padding_mask + why_not_sparsity_fast_path = '' + str_first_layer = "self.layers[0]" + batch_first = first_layer.self_attn.batch_first + is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled() + + if not is_fastpath_enabled: + why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True" + elif not hasattr(self, "use_nested_tensor"): + why_not_sparsity_fast_path = "use_nested_tensor attribute not present" + elif not self.use_nested_tensor: + why_not_sparsity_fast_path = "self.use_nested_tensor (set in init) was not True" + elif first_layer.training: + why_not_sparsity_fast_path = f"{str_first_layer} was in training mode" + elif not src.dim() == 3: + why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}" + elif src_key_padding_mask is None: + why_not_sparsity_fast_path = "src_key_padding_mask was None" + elif (((not hasattr(self, "mask_check")) or self.mask_check) + and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())): + why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned" + elif output.is_nested: + why_not_sparsity_fast_path = "NestedTensor input is not supported" + elif mask is not None: + why_not_sparsity_fast_path = "src_key_padding_mask and mask were both supplied" + elif torch.is_autocast_enabled(): + why_not_sparsity_fast_path = "autocast is enabled" + + if not why_not_sparsity_fast_path: + tensor_args = ( + src, + first_layer.self_attn.in_proj_weight, + first_layer.self_attn.in_proj_bias, + first_layer.self_attn.out_proj.weight, + first_layer.self_attn.out_proj.bias, + first_layer.norm1.weight, + first_layer.norm1.bias, + first_layer.norm2.weight, + first_layer.norm2.bias, + first_layer.linear1.weight, + first_layer.linear1.bias, + first_layer.linear2.weight, + first_layer.linear2.bias, + ) + _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name] + if torch.overrides.has_torch_function(tensor_args): + why_not_sparsity_fast_path = "some Tensor argument has_torch_function" + elif src.device.type not in _supported_device_type: + why_not_sparsity_fast_path = f"src device is neither one of {_supported_device_type}" + elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args): + why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the " + "input/output projection weights or biases requires_grad") + + if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None): + convert_to_nested = True + output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False) + src_key_padding_mask_for_layers = None + + seq_len = _get_seq_len(src, batch_first) + is_causal = _detect_is_causal_mask(mask, is_causal, seq_len) + + for mod in self.layers: + output = mod(output, src_mask=mask, is_causal=is_causal, src_key_padding_mask=src_key_padding_mask_for_layers) + + if convert_to_nested: + output = output.to_padded_tensor(0., src.size()) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(Module): + r"""TransformerDecoder is a stack of N decoder layers. + + Args: + decoder_layer: an instance of the TransformerDecoderLayer() class (required). + num_layers: the number of sub-decoder-layers in the decoder (required). + norm: the layer normalization component (optional). + + Examples:: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) + >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6) + >>> memory = torch.rand(10, 32, 512) + >>> tgt = torch.rand(20, 32, 512) + >>> out = transformer_decoder(tgt, memory) + """ + + __constants__ = ['norm'] + + def __init__( + self, + decoder_layer: "TransformerDecoderLayer", + num_layers: int, + norm: Optional[Module] = None + ) -> None: + super().__init__() + torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: Optional[bool] = None, + memory_is_causal: bool = False) -> Tensor: + r"""Pass the inputs (and mask) through the decoder layer in turn. + + Args: + tgt: the sequence to the decoder (required). + memory: the sequence from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + tgt_is_causal: If specified, applies a causal mask as ``tgt mask``. + Default: ``None``; try to detect a causal mask. + Warning: + ``tgt_is_causal`` provides a hint that ``tgt_mask`` is + the causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + memory_is_causal: If specified, applies a causal mask as + ``memory mask``. + Default: ``False``. + Warning: + ``memory_is_causal`` provides a hint that + ``memory_mask`` is the causal mask. Providing incorrect + hints can result in incorrect execution, including + forward and backward compatibility. + + Shape: + see the docs in :class:`~torch.nn.Transformer`. + """ + output = tgt + + seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first) + tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len) + + for mod in self.layers: + output = mod(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + tgt_is_causal=tgt_is_causal, + memory_is_causal=memory_is_causal) + + if self.norm is not None: + output = self.norm(output) + + return output + +class TransformerEncoderLayer(Module): + r"""TransformerEncoderLayer is made up of self-attn and feedforward network. + + This standard encoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + TransformerEncoderLayer can handle either traditional torch.tensor inputs, + or Nested Tensor inputs. Derived classes are expected to similarly accept + both input formats. (Not all combinations of inputs are currently + supported by TransformerEncoderLayer while Nested Tensor is in prototype + state.) + + If you are implementing a custom layer, you may derive it either from + the Module or TransformerEncoderLayer class. If your custom layer + supports both torch.Tensors and Nested Tensors inputs, make its + implementation a derived class of TransformerEncoderLayer. If your custom + Layer supports only torch.Tensor inputs, derive its implementation from + Module. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + activation: the activation function of the intermediate layer, can be a string + ("relu" or "gelu") or a unary callable. Default: relu + layer_norm_eps: the eps value in layer normalization components (default=1e-5). + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` (seq, batch, feature). + norm_first: if ``True``, layer norm is done prior to attention and feedforward + operations, respectively. Otherwise it's done after. Default: ``False`` (after). + bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive + bias. Default: ``True``. + + Examples:: + >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> out = encoder_layer(src) + + Alternatively, when ``batch_first`` is ``True``: + >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True) + >>> src = torch.rand(32, 10, 512) + >>> out = encoder_layer(src) + + Fast path: + forward() will use a special optimized implementation described in + `FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following + conditions are met: + + - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor + argument ``requires_grad`` + - training is disabled (using ``.eval()``) + - batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``) + - activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu`` + - at most one of ``src_mask`` and ``src_key_padding_mask`` is passed + - if src is a `NestedTensor `_, neither ``src_mask`` + nor ``src_key_padding_mask`` is passed + - the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case + unless the caller has manually modified one without modifying the other) + + If the optimized implementation is in use, a + `NestedTensor `_ can be + passed for ``src`` to represent padding more efficiently than using a padding + mask. In this case, a `NestedTensor `_ will be + returned, and an additional speedup proportional to the fraction of the input that + is padding can be expected. + + .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`: + https://arxiv.org/abs/2205.14135 + + """ + + __constants__ = ['norm_first'] + + def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, + activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, + layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, + bias: bool = True, device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, + bias=bias, batch_first=batch_first, + **factory_kwargs) + # Implementation of Feedforward model + self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs) + self.dropout = Dropout(dropout) + self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs) + + self.norm_first = norm_first + self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.dropout1 = Dropout(dropout) + self.dropout2 = Dropout(dropout) + + # Legacy string support for activation function. + if isinstance(activation, str): + activation = _get_activation_fn(activation) + + # We can't test self.activation in forward() in TorchScript, + # so stash some information about it instead. + if activation is F.relu or isinstance(activation, torch.nn.ReLU): + self.activation_relu_or_gelu = 1 + elif activation is F.gelu or isinstance(activation, torch.nn.GELU): + self.activation_relu_or_gelu = 2 + else: + self.activation_relu_or_gelu = 0 + self.activation = activation + + def __setstate__(self, state): + super().__setstate__(state) + if not hasattr(self, 'activation'): + self.activation = F.relu + + + def forward( + self, + src: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + is_causal: bool = False) -> Tensor: + r"""Pass the input through the encoder layer. + + Args: + src: the sequence to the encoder layer (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + is_causal: If specified, applies a causal mask as ``src mask``. + Default: ``False``. + Warning: + ``is_causal`` provides a hint that ``src_mask`` is the + causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + + Shape: + see the docs in :class:`~torch.nn.Transformer`. + """ + src_key_padding_mask = F._canonical_mask( + mask=src_key_padding_mask, + mask_name="src_key_padding_mask", + other_type=F._none_or_dtype(src_mask), + other_name="src_mask", + target_type=src.dtype + ) + + src_mask = F._canonical_mask( + mask=src_mask, + mask_name="src_mask", + other_type=None, + other_name="", + target_type=src.dtype, + check_other=False, + ) + + is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled() + + # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf + why_not_sparsity_fast_path = '' + if not is_fastpath_enabled: + why_not_sparsity_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True" + elif not src.dim() == 3: + why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}" + elif self.training: + why_not_sparsity_fast_path = "training is enabled" + elif not self.self_attn.batch_first: + why_not_sparsity_fast_path = "self_attn.batch_first was not True" + elif self.self_attn.in_proj_bias is None: + why_not_sparsity_fast_path = "self_attn was passed bias=False" + elif not self.self_attn._qkv_same_embed_dim: + why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True" + elif not self.activation_relu_or_gelu: + why_not_sparsity_fast_path = "activation_relu_or_gelu was not True" + elif not (self.norm1.eps == self.norm2.eps): + why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps" + elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None): + why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input" + elif self.self_attn.num_heads % 2 == 1: + why_not_sparsity_fast_path = "num_head is odd" + elif torch.is_autocast_enabled(): + why_not_sparsity_fast_path = "autocast is enabled" + if not why_not_sparsity_fast_path: + tensor_args = ( + src, + self.self_attn.in_proj_weight, + self.self_attn.in_proj_bias, + self.self_attn.out_proj.weight, + self.self_attn.out_proj.bias, + self.norm1.weight, + self.norm1.bias, + self.norm2.weight, + self.norm2.bias, + self.linear1.weight, + self.linear1.bias, + self.linear2.weight, + self.linear2.bias, + ) + + # We have to use list comprehensions below because TorchScript does not support + # generator expressions. + _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name] + if torch.overrides.has_torch_function(tensor_args): + why_not_sparsity_fast_path = "some Tensor argument has_torch_function" + elif not all((x.device.type in _supported_device_type) for x in tensor_args): + why_not_sparsity_fast_path = ("some Tensor argument's device is neither one of " + f"{_supported_device_type}") + elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args): + why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the " + "input/output projection weights or biases requires_grad") + + if not why_not_sparsity_fast_path: + merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src) + return torch._transformer_encoder_layer_fwd( + src, + self.self_attn.embed_dim, + self.self_attn.num_heads, + self.self_attn.in_proj_weight, + self.self_attn.in_proj_bias, + self.self_attn.out_proj.weight, + self.self_attn.out_proj.bias, + self.activation_relu_or_gelu == 2, + self.norm_first, + self.norm1.eps, + self.norm1.weight, + self.norm1.bias, + self.norm2.weight, + self.norm2.bias, + self.linear1.weight, + self.linear1.bias, + self.linear2.weight, + self.linear2.bias, + merged_mask, + mask_type, + ) + + + x = src + if self.norm_first: + x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal) + x = x + self._ff_block(self.norm2(x)) + else: + x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal)) + x = self.norm2(x + self._ff_block(x)) + + return x + + # self-attention block + def _sa_block(self, x: Tensor, + attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor: + x = self.self_attn(x, x, x, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + need_weights=False, is_causal=is_causal)[0] + return self.dropout1(x) + + # feed forward block + def _ff_block(self, x: Tensor) -> Tensor: + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + return self.dropout2(x) + + +class TransformerDecoderLayer(Module): + r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. + + This standard decoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + activation: the activation function of the intermediate layer, can be a string + ("relu" or "gelu") or a unary callable. Default: relu + layer_norm_eps: the eps value in layer normalization components (default=1e-5). + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` (seq, batch, feature). + norm_first: if ``True``, layer norm is done prior to self attention, multihead + attention and feedforward operations, respectively. Otherwise it's done after. + Default: ``False`` (after). + bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive + bias. Default: ``True``. + + Examples:: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) + >>> memory = torch.rand(10, 32, 512) + >>> tgt = torch.rand(20, 32, 512) + >>> out = decoder_layer(tgt, memory) + + Alternatively, when ``batch_first`` is ``True``: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True) + >>> memory = torch.rand(32, 10, 512) + >>> tgt = torch.rand(32, 20, 512) + >>> out = decoder_layer(tgt, memory) + """ + + __constants__ = ['norm_first'] + + def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, + activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, + layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, + bias: bool = True, device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, + bias=bias, **factory_kwargs) + self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, + bias=bias, **factory_kwargs) + # Implementation of Feedforward model + self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs) + self.dropout = Dropout(dropout) + self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs) + + self.norm_first = norm_first + self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) + self.dropout1 = Dropout(dropout) + self.dropout2 = Dropout(dropout) + self.dropout3 = Dropout(dropout) + + # Legacy string support for activation function. + if isinstance(activation, str): + self.activation = _get_activation_fn(activation) + else: + self.activation = activation + + def __setstate__(self, state): + if 'activation' not in state: + state['activation'] = F.relu + super().__setstate__(state) + + def forward( + self, + tgt: Tensor, + memory: Tensor, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + tgt_is_causal: bool = False, + memory_is_causal: bool = False, + ) -> Tensor: + r"""Pass the inputs (and mask) through the decoder layer. + + Args: + tgt: the sequence to the decoder layer (required). + memory: the sequence from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + tgt_is_causal: If specified, applies a causal mask as ``tgt mask``. + Default: ``False``. + Warning: + ``tgt_is_causal`` provides a hint that ``tgt_mask`` is + the causal mask. Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + memory_is_causal: If specified, applies a causal mask as + ``memory mask``. + Default: ``False``. + Warning: + ``memory_is_causal`` provides a hint that + ``memory_mask`` is the causal mask. Providing incorrect + hints can result in incorrect execution, including + forward and backward compatibility. + + Shape: + see the docs in :class:`~torch.nn.Transformer`. + """ + # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf + + x = tgt + if self.norm_first: + x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal) + x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal) + x = x + self._ff_block(self.norm3(x)) + else: + x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal)) + x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask, memory_is_causal)) + x = self.norm3(x + self._ff_block(x)) + + return x + + # self-attention block + def _sa_block(self, x: Tensor, + attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor: + x = self.self_attn(x, x, x, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + is_causal=is_causal, + need_weights=False)[0] + return self.dropout1(x) + + # multihead attention block + def _mha_block(self, x: Tensor, mem: Tensor, + attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor: + x = self.multihead_attn(x, mem, mem, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + is_causal=is_causal, + need_weights=False)[0] + return self.dropout2(x) + + # feed forward block + def _ff_block(self, x: Tensor) -> Tensor: + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + return self.dropout3(x) + + +def _get_clones(module, N): + # FIXME: copy.deepcopy() is not defined on nn.module + return ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]: + if activation == "relu": + return F.relu + elif activation == "gelu": + return F.gelu + + raise RuntimeError(f"activation should be relu/gelu, not {activation}") + + +def _detect_is_causal_mask( + mask: Optional[Tensor], + is_causal: Optional[bool] = None, + size: Optional[int] = None, +) -> bool: + """Return whether the given attention mask is causal. + + Warning: + If ``is_causal`` is not ``None``, its value will be returned as is. If a + user supplies an incorrect ``is_causal`` hint, + + ``is_causal=False`` when the mask is in fact a causal attention.mask + may lead to reduced performance relative to what would be achievable + with ``is_causal=True``; + ``is_causal=True`` when the mask is in fact not a causal attention.mask + may lead to incorrect and unpredictable execution - in some scenarios, + a causal mask may be applied based on the hint, in other execution + scenarios the specified mask may be used. The choice may not appear + to be deterministic, in that a number of factors like alignment, + hardware SKU, etc influence the decision whether to use a mask or + rely on the hint. + ``size`` if not None, check whether the mask is a causal mask of the provided size + Otherwise, checks for any causal mask. + """ + # Prevent type refinement + make_causal = (is_causal is True) + + if is_causal is None and mask is not None: + sz = size if size is not None else mask.size(-2) + causal_comparison = _generate_square_subsequent_mask( + sz, device=mask.device, dtype=mask.dtype) + + # Do not use `torch.equal` so we handle batched masks by + # broadcasting the comparison. + if mask.size() == causal_comparison.size(): + make_causal = bool((mask == causal_comparison).all()) + else: + make_causal = False + + return make_causal diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/upsampling.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/upsampling.py new file mode 100644 index 0000000000000000000000000000000000000000..da9b23add18dec8b051730c78a5944d4339e526c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/upsampling.py @@ -0,0 +1,264 @@ +from .module import Module +from .. import functional as F + +from torch import Tensor +from typing import Optional +from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t + +__all__ = ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d'] + + +class Upsample(Module): + r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. + + The input data is assumed to be of the form + `minibatch x channels x [optional depth] x [optional height] x width`. + Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor. + + The algorithms available for upsampling are nearest neighbor and linear, + bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor, + respectively. + + One can either give a :attr:`scale_factor` or the target output :attr:`size` to + calculate the output size. (You cannot give both, as it is ambiguous) + + Args: + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): + output spatial sizes + scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): + multiplier for spatial size. Has to match input size if it is a tuple. + mode (str, optional): the upsampling algorithm: one of ``'nearest'``, + ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. + Default: ``'nearest'`` + align_corners (bool, optional): if ``True``, the corner pixels of the input + and output tensors are aligned, and thus preserving the values at + those pixels. This only has effect when :attr:`mode` is + ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``. + Default: ``False`` + recompute_scale_factor (bool, optional): recompute the scale_factor for use in the + interpolation calculation. If `recompute_scale_factor` is ``True``, then + `scale_factor` must be passed in and `scale_factor` is used to compute the + output `size`. The computed output `size` will be used to infer new scales for + the interpolation. Note that when `scale_factor` is floating-point, it may differ + from the recomputed `scale_factor` due to rounding and precision issues. + If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will + be used directly for interpolation. + + Shape: + - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})` + or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where + + .. math:: + D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + .. warning:: + With ``align_corners = True``, the linearly interpolating modes + (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally + align the output and input pixels, and thus the output values can depend + on the input size. This was the default behavior for these modes up to + version 0.3.1. Since then, the default behavior is + ``align_corners = False``. See below for concrete examples on how this + affects the outputs. + + .. note:: + If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`. + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[1., 2.], + [3., 4.]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='nearest') + >>> m(input) + tensor([[[[1., 1., 2., 2.], + [1., 1., 2., 2.], + [3., 3., 4., 4.], + [3., 3., 4., 4.]]]]) + + >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles") + >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False + >>> m(input) + tensor([[[[1.0000, 1.2500, 1.7500, 2.0000], + [1.5000, 1.7500, 2.2500, 2.5000], + [2.5000, 2.7500, 3.2500, 3.5000], + [3.0000, 3.2500, 3.7500, 4.0000]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + >>> m(input) + tensor([[[[1.0000, 1.3333, 1.6667, 2.0000], + [1.6667, 2.0000, 2.3333, 2.6667], + [2.3333, 2.6667, 3.0000, 3.3333], + [3.0000, 3.3333, 3.6667, 4.0000]]]]) + + >>> # Try scaling the same data in a larger tensor + >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3) + >>> input_3x3[:, :, :2, :2].copy_(input) + tensor([[[[1., 2.], + [3., 4.]]]]) + >>> input_3x3 + tensor([[[[1., 2., 0.], + [3., 4., 0.], + [0., 0., 0.]]]]) + + >>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session") + >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False + >>> # Notice that values in top left corner are the same with the small input (except at boundary) + >>> m(input_3x3) + tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000], + [1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000], + [2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000], + [2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000], + [0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + >>> # Notice that values in top left corner are now changed + >>> m(input_3x3) + tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000], + [1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000], + [2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000], + [2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000], + [1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + """ + + __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor'] + name: str + size: Optional[_size_any_t] + scale_factor: Optional[_ratio_any_t] + mode: str + align_corners: Optional[bool] + recompute_scale_factor: Optional[bool] + + def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None, + mode: str = 'nearest', align_corners: Optional[bool] = None, + recompute_scale_factor: Optional[bool] = None) -> None: + super().__init__() + self.name = type(self).__name__ + self.size = size + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.align_corners = align_corners + self.recompute_scale_factor = recompute_scale_factor + + def forward(self, input: Tensor) -> Tensor: + return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners, + recompute_scale_factor=self.recompute_scale_factor) + + def __setstate__(self, state): + if 'recompute_scale_factor' not in state: + state['recompute_scale_factor'] = True + + super().__setstate__(state) + + def extra_repr(self) -> str: + if self.scale_factor is not None: + info = 'scale_factor=' + repr(self.scale_factor) + else: + info = 'size=' + repr(self.size) + info += ', mode=' + repr(self.mode) + return info + + +class UpsamplingNearest2d(Upsample): + r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels. + + To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` + as it's constructor argument. + + When :attr:`size` is given, it is the output size of the image `(h, w)`. + + Args: + size (int or Tuple[int, int], optional): output spatial sizes + scale_factor (float or Tuple[float, float], optional): multiplier for + spatial size. + + .. warning:: + This class is deprecated in favor of :func:`~nn.functional.interpolate`. + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[1., 2.], + [3., 4.]]]]) + + >>> m = nn.UpsamplingNearest2d(scale_factor=2) + >>> m(input) + tensor([[[[1., 1., 2., 2.], + [1., 1., 2., 2.], + [3., 3., 4., 4.], + [3., 3., 4., 4.]]]]) + """ + + def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None: + super().__init__(size, scale_factor, mode='nearest') + + +class UpsamplingBilinear2d(Upsample): + r"""Applies a 2D bilinear upsampling to an input signal composed of several input channels. + + To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` + as it's constructor argument. + + When :attr:`size` is given, it is the output size of the image `(h, w)`. + + Args: + size (int or Tuple[int, int], optional): output spatial sizes + scale_factor (float or Tuple[float, float], optional): multiplier for + spatial size. + + .. warning:: + This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is + equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``. + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[1., 2.], + [3., 4.]]]]) + + >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?") + >>> m = nn.UpsamplingBilinear2d(scale_factor=2) + >>> m(input) + tensor([[[[1.0000, 1.3333, 1.6667, 2.0000], + [1.6667, 2.0000, 2.3333, 2.6667], + [2.3333, 2.6667, 3.0000, 3.3333], + [3.0000, 3.3333, 3.6667, 4.0000]]]]) + """ + + def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None: + super().__init__(size, scale_factor, mode='bilinear', align_corners=True) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..019dabe3e533f6d31ba41241f65d527fab659a25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/modules/utils.py @@ -0,0 +1,79 @@ +import collections +from itertools import repeat +from typing import List, Dict, Any + +__all__ = ['consume_prefix_in_state_dict_if_present'] + + +def _ntuple(n, name="parse"): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return tuple(x) + return tuple(repeat(x, n)) + + parse.__name__ = name + return parse + + +_single = _ntuple(1, "_single") +_pair = _ntuple(2, "_pair") +_triple = _ntuple(3, "_triple") +_quadruple = _ntuple(4, "_quadruple") + + +def _reverse_repeat_tuple(t, n): + r"""Reverse the order of `t` and repeat each element for `n` times. + + This can be used to translate padding arg used by Conv and Pooling modules + to the ones used by `F.pad`. + """ + return tuple(x for x in reversed(t) for _ in range(n)) + + +def _list_with_default(out_size: List[int], defaults: List[int]) -> List[int]: + import torch + if isinstance(out_size, (int, torch.SymInt)): + return out_size + if len(defaults) <= len(out_size): + raise ValueError( + f"Input dimension should be at least {len(out_size) + 1}" + ) + return [ + v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size) :]) + ] + + +def consume_prefix_in_state_dict_if_present( + state_dict: Dict[str, Any], prefix: str +) -> None: + r"""Strip the prefix in state_dict in place, if any. + + ..note:: + Given a `state_dict` from a DP/DDP model, a local model can load it by applying + `consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling + :meth:`torch.nn.Module.load_state_dict`. + + Args: + state_dict (OrderedDict): a state-dict to be loaded to the model. + prefix (str): prefix. + """ + keys = list(state_dict.keys()) + for key in keys: + if key.startswith(prefix): + newkey = key[len(prefix) :] + state_dict[newkey] = state_dict.pop(key) + + # also strip the prefix in metadata if any. + if hasattr(state_dict, "_metadata"): + keys = list(state_dict._metadata.keys()) + for key in keys: + # for the metadata dict, the key can be: + # '': for the DDP module, which we want to remove. + # 'module': for the actual model. + # 'module.xx.xx': for the rest. + if len(key) == 0: + continue + # handling both, 'module' case and 'module.' cases + if key == prefix.replace('.', '') or key.startswith(prefix): + newkey = key[len(prefix) :] + state_dict._metadata[newkey] = state_dict._metadata.pop(key) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..19313b70c9527fabb4fd65b3e0a06989a573a1cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/__init__.py @@ -0,0 +1,18 @@ +# flake8: noqa: F401 +r"""QAT Dynamic Modules. + +This package is in the process of being deprecated. +Please, use `torch.ao.nn.qat.dynamic` instead. +""" +from . import dynamic # noqa: F403 +from . import modules # noqa: F403 +from .modules import * # noqa: F403 + +__all__ = [ + "Linear", + "Conv1d", + "Conv2d", + "Conv3d", + "Embedding", + "EmbeddingBag", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5a49722b1e9d7e6384e72e5277dee042d252bc28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py @@ -0,0 +1,7 @@ +# flake8: noqa: F401 +r"""QAT Dynamic Modules. + +This package is in the process of being deprecated. +Please, use `torch.ao.nn.qat.dynamic` instead. +""" +from .modules import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b27dd897bb5b877eec21ca447ee021fe89530f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c8168b30406a8b0c27251d466b3a9195016eba64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py @@ -0,0 +1,3 @@ +from .linear import Linear + +__all__ = ["Linear"] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9f287f28f7edb39a910aa728856fcb8abe8f40 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d0f377001e94739676587ca5082fe9af30c9baf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..ea69fba158d3bfedfe49bffef4b1664117fc3246 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r"""QAT Modules. + +This file is in the process of migration to `torch/ao/nn/qat/dynamic`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/qat/dynamic/modules`, +while adding an import statement here. +""" +from torch.ao.nn.qat.dynamic.modules.linear import Linear diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63b1d2c08efd3f8c79cf142276525474f2c3b7f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py @@ -0,0 +1,24 @@ +# flake8: noqa: F401 +r"""QAT Modules. + +This package is in the process of being deprecated. +Please, use `torch.ao.nn.qat.modules` instead. +""" +from torch.ao.nn.qat.modules.linear import Linear +from torch.ao.nn.qat.modules.conv import Conv1d +from torch.ao.nn.qat.modules.conv import Conv2d +from torch.ao.nn.qat.modules.conv import Conv3d +from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag, Embedding + +from . import conv +from . import embedding_ops +from . import linear + +__all__ = [ + "Linear", + "Conv1d", + "Conv2d", + "Conv3d", + "Embedding", + "EmbeddingBag", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..33fa1de6b561f86b3e23044f0caa0e3baa5ac5f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py @@ -0,0 +1,12 @@ +# flake8: noqa: F401 +r"""QAT Modules. + +This file is in the process of migration to `torch/ao/nn/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/qat/modules`, +while adding an import statement here. +""" +from torch.ao.nn.qat.modules.conv import Conv1d +from torch.ao.nn.qat.modules.conv import Conv2d +from torch.ao.nn.qat.modules.conv import Conv3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e308ccd7b5ad80e8cc77b94a19d6c51bf4338818 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45d32d4da0bb44107f36dae204d425d1991dc3b2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/activation.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..43d7fbf19c38453198446ca1b99ab8570a9ef122 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/activation.py @@ -0,0 +1,18 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.modules.activation import ELU +from torch.ao.nn.quantized.modules.activation import Hardswish +from torch.ao.nn.quantized.modules.activation import LeakyReLU +from torch.ao.nn.quantized.modules.activation import MultiheadAttention +from torch.ao.nn.quantized.modules.activation import PReLU +from torch.ao.nn.quantized.modules.activation import ReLU6 +from torch.ao.nn.quantized.modules.activation import Sigmoid +from torch.ao.nn.quantized.modules.activation import Softmax diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/conv.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..63d9dc5d4c7de91d804131a9dcc5c744f013602a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/conv.py @@ -0,0 +1,21 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'] + +from torch.ao.nn.quantized.modules.conv import _reverse_repeat_padding + +from torch.ao.nn.quantized.modules.conv import Conv1d +from torch.ao.nn.quantized.modules.conv import Conv2d +from torch.ao.nn.quantized.modules.conv import Conv3d + +from torch.ao.nn.quantized.modules.conv import ConvTranspose1d +from torch.ao.nn.quantized.modules.conv import ConvTranspose2d +from torch.ao.nn.quantized.modules.conv import ConvTranspose3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/dropout.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..c42d68d595075045712d587f6218e52df810cc97 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/dropout.py @@ -0,0 +1,13 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['Dropout'] + +from torch.ao.nn.quantized.modules.dropout import Dropout diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/embedding_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..73c8d84c76c28584396b59ba1aad08da8ca6d686 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/embedding_ops.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['EmbeddingPackedParams', 'Embedding', 'EmbeddingBag'] + +from torch.ao.nn.quantized.modules.embedding_ops import Embedding +from torch.ao.nn.quantized.modules.embedding_ops import EmbeddingBag +from torch.ao.nn.quantized.modules.embedding_ops import EmbeddingPackedParams diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/functional_modules.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/functional_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..c600f84e776d67c7381b22e30a936edfbcf17438 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/functional_modules.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['FloatFunctional', 'FXFloatFunctional', 'QFunctional'] + +from torch.ao.nn.quantized.modules.functional_modules import FloatFunctional +from torch.ao.nn.quantized.modules.functional_modules import FXFloatFunctional +from torch.ao.nn.quantized.modules.functional_modules import QFunctional diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/linear.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..e558bdb817b3fcba98fee8d4aaa08c91cd7183ff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/linear.py @@ -0,0 +1,14 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['LinearPackedParams', 'Linear'] + +from torch.ao.nn.quantized.modules.linear import Linear +from torch.ao.nn.quantized.modules.linear import LinearPackedParams diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/normalization.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..1127bf9acb81ea9a5803bd18181f25a311cefa07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/normalization.py @@ -0,0 +1,17 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +__all__ = ['LayerNorm', 'GroupNorm', 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d'] + +from torch.ao.nn.quantized.modules.normalization import LayerNorm +from torch.ao.nn.quantized.modules.normalization import GroupNorm +from torch.ao.nn.quantized.modules.normalization import InstanceNorm1d +from torch.ao.nn.quantized.modules.normalization import InstanceNorm2d +from torch.ao.nn.quantized.modules.normalization import InstanceNorm3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..51c81a62b78f1b12ac5fe9a3a71239725b033f7c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/utils.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.modules.utils import _ntuple_from_first +from torch.ao.nn.quantized.modules.utils import _pair_from_first +from torch.ao.nn.quantized.modules.utils import _quantize_weight +from torch.ao.nn.quantized.modules.utils import _hide_packed_params_repr +from torch.ao.nn.quantized.modules.utils import WeightedQuantizedModule diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d41c175cac71c783f5ea92ebe8f30b8f3f68a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8125afaeac392cd0a7f00c1e3b1ce75d9ae72185 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcf420be3a36c8a49c17357dd33e046d56fa2aeb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d355044362a6e51e3160a23ef08f3d2d0d67a742 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a11aeba7d9675c1cf76d680e56c31d1461fe5ad2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfda36f30f5cab00215e43f65ab45e0d1fce18d7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2307ab26adfc3853b3a1117310d7cd06f6638b25 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..637e86c1bcfd9b7812b6623bc24b18c8d7f6093f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..340229ab9aaab90d45514f912fa382870de3a01b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7644cf83e15ee24d09b718bf14ec3ece7541a46 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b68887f5847ac40137a1768dc9aa3f709a8759c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8073888fcc2d8e217353fce5cc507479988a9ae1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aea42ea725ab01dc2cb4d6e7859b9c5ea8a636ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..833dacd4d6084fc268f395e6e944e5547ec0d82c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799461bdb4a1581583d38638d8e6605df23107b8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a448d81f523caa182d2c5d8ddd8d2b763fcb246 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5496d6a7fb2bdbd319c737c093c7f11e2ea2ebf7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08917b59a73d289b2f06796467e14ee4def3b669 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef0bbe91de067ca31b854bcfcc97f562fcf776d7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2ea7f94a28738f22b814d04a8aea24feba1cef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2663d51f24137794af1e716cb6a5f8e7ed929d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..412f61c535036b394e5e68590f72812e2c67a993 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc differ