text
stringlengths
145
7.65M
======================================================================================================================== SOURCE CODE FILE: instancenorm.py LINES: 1 SIZE: 20.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\instancenorm.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import warnings import torch.nn.functional as F from torch import Tensor from .batchnorm import _LazyNormBase, _NormBase __all__ = [ "InstanceNorm1d", "InstanceNorm2d", "InstanceNorm3d", "LazyInstanceNorm1d", "LazyInstanceNorm2d", "LazyInstanceNorm3d", ] class _InstanceNorm(_NormBase): def __init__( self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = False, track_running_stats: bool = False, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__( num_features, eps, momentum, affine, track_running_stats, **factory_kwargs ) def _check_input_dim(self, input): raise NotImplementedError def _get_no_batch_dim(self): raise NotImplementedError def _handle_no_batch_input(self, input): return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0) def _apply_instance_norm(self, input): return F.instance_norm( input, self.running_mean, self.running_var, self.weight, self.bias, self.training or not self.track_running_stats, self.momentum if self.momentum is not None else 0.0, self.eps, ) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): version = local_metadata.get("version", None) # at version 1: removed running_mean and running_var when # track_running_stats=False (default) if version is None and not self.track_running_stats: running_stats_keys = [] for name in ("running_mean", "running_var"): key = prefix + name if key in state_dict: running_stats_keys.append(key) if len(running_stats_keys) > 0: error_msgs.append( "Unexpected running stats buffer(s) {names} for {klass} " "with track_running_stats=False. If state_dict is a " "checkpoint saved before 0.4.0, this may be expected " "because {klass} does not track running stats by default " "since 0.4.0. Please remove these keys from state_dict. If " "the running stats are actually needed, instead set " "track_running_stats=True in {klass} to enable them. See " "the documentation of {klass} for details.".format( names=" and ".join(f'"{k}"' for k in running_stats_keys), klass=self.__class__.__name__, ) ) for key in running_stats_keys: state_dict.pop(key) super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ) def forward(self, input: Tensor) -> Tensor: self._check_input_dim(input) feature_dim = input.dim() - self._get_no_batch_dim() if input.size(feature_dim) != self.num_features: if self.affine: raise ValueError( f"expected input's size at dim={feature_dim} to match num_features" f" ({self.num_features}), but got: {input.size(feature_dim)}." ) else: warnings.warn( f"input's size at dim={feature_dim} does not match num_features. " "You can silence this warning by not passing in num_features, " "which is not used because affine=False" ) if input.dim() == self._get_no_batch_dim(): return self._handle_no_batch_input(input) return self._apply_instance_norm(input) class InstanceNorm1d(_InstanceNorm): r"""Applies Instance Normalization. This operation applies Instance Normalization over a 2D (unbatched) or 3D (batched) input as described in the paper `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`__. .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``. The variance is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. By default, this layer uses instance statistics computed from input data in both training and evaluation modes. If :attr:`track_running_stats` is set to ``True``, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. .. note:: :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but have some subtle differences. :class:`InstanceNorm1d` is applied on each channel of channeled data like multidimensional time series, but :class:`LayerNorm` is usually applied on entire sample and often in NLP tasks. Additionally, :class:`LayerNorm` applies elementwise affine transform, while :class:`InstanceNorm1d` usually don't apply affine transform. Args: num_features: number of features or channels :math:`C` of the input eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, L)` or :math:`(C, L)` - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input) Examples:: >>> # Without Learnable Parameters >>> m = nn.InstanceNorm1d(100) >>> # With Learnable Parameters >>> m = nn.InstanceNorm1d(100, affine=True) >>> input = torch.randn(20, 100, 40) >>> output = m(input) """ def _get_no_batch_dim(self): return 2 def _check_input_dim(self, input): if input.dim() not in (2, 3): raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)") class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm): r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument. The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``. The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`. Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation on lazy modules and their limitations. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, L)` or :math:`(C, L)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, L)` or :math:`(C, L)` - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input) """ cls_to_become = InstanceNorm1d # type: ignore[assignment] def _get_no_batch_dim(self): return 2 def _check_input_dim(self, input): if input.dim() not in (2, 3): raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)") class InstanceNorm2d(_InstanceNorm): r"""Applies Instance Normalization. This operation applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`__. .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the input size) if :attr:`affine` is ``True``. The standard-deviation is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. By default, this layer uses instance statistics computed from input data in both training and evaluation modes. If :attr:`track_running_stats` is set to ``True``, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. .. note:: :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but have some subtle differences. :class:`InstanceNorm2d` is applied on each channel of channeled data like RGB images, but :class:`LayerNorm` is usually applied on entire sample and often in NLP tasks. Additionally, :class:`LayerNorm` applies elementwise affine transform, while :class:`InstanceNorm2d` usually don't apply affine transform. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` or :math:`(C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input) Examples:: >>> # Without Learnable Parameters >>> m = nn.InstanceNorm2d(100) >>> # With Learnable Parameters >>> m = nn.InstanceNorm2d(100, affine=True) >>> input = torch.randn(20, 100, 35, 45) >>> output = m(input) """ def _get_no_batch_dim(self): return 3 def _check_input_dim(self, input): if input.dim() not in (3, 4): raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)") class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm): r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument. The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``. The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`. Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation on lazy modules and their limitations. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` or :math:`(C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input) """ cls_to_become = InstanceNorm2d # type: ignore[assignment] def _get_no_batch_dim(self): return 3 def _check_input_dim(self, input): if input.dim() not in (3, 4): raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)") class InstanceNorm3d(_InstanceNorm): r"""Applies Instance Normalization. This operation applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`__. .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size C (where C is the input size) if :attr:`affine` is ``True``. The standard-deviation is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. By default, this layer uses instance statistics computed from input data in both training and evaluation modes. If :attr:`track_running_stats` is set to ``True``, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. .. note:: :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but have some subtle differences. :class:`InstanceNorm3d` is applied on each channel of channeled data like 3D models with RGB color, but :class:`LayerNorm` is usually applied on entire sample and often in NLP tasks. Additionally, :class:`LayerNorm` applies elementwise affine transform, while :class:`InstanceNorm3d` usually don't apply affine transform. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input) Examples:: >>> # Without Learnable Parameters >>> m = nn.InstanceNorm3d(100) >>> # With Learnable Parameters >>> m = nn.InstanceNorm3d(100, affine=True) >>> input = torch.randn(20, 100, 35, 45, 10) >>> output = m(input) """ def _get_no_batch_dim(self): return 4 def _check_input_dim(self, input): if input.dim() not in (4, 5): raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)") class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm): r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument. The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``. The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`. Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation on lazy modules and their limitations. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input) """ cls_to_become = InstanceNorm3d # type: ignore[assignment] def _get_no_batch_dim(self): return 4 def _check_input_dim(self, input): if input.dim() not in (4, 5): raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)") ```
================================================================================================================ SOURCE CODE FILE: lazy.py LINES: 1 SIZE: 11.85 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\lazy.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import itertools from typing import Any, Optional, Protocol import torch from torch.nn.parameter import is_lazy __all__ = ["LazyModuleMixin"] class _LazyProtocol(Protocol): """This class is used to avoid errors with mypy checks for the attributes in a mixin. https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes """ def _register_load_state_dict_pre_hook(self, hook): ... def register_forward_pre_hook(self, hook, *, prepend=False, with_kwargs=False): ... def _lazy_load_hook( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): ... def _get_name(self): ... def _infer_parameters(self, module, input): ... @property def _parameters(self): ... @property def _buffers(self): ... @property def _non_persistent_buffers_set(self): ... @property def _load_hook(self): ... @property def _initialize_hook(self): ... class LazyModuleMixin: r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules". .. warning: Lazy modules are an experimental new feature under active development, and their API is likely to change. Modules that lazily initialize parameters, or "lazy modules", derive the shapes of their parameters from the first input(s) to their forward method. Until that first forward they contain :class:`torch.nn.UninitializedParameter` s that should not be accessed or used, and afterward they contain regular :class:`torch.nn.Parameter` s. Lazy modules are convenient since they don't require computing some module arguments, like the :attr:`in_features` argument of a typical :class:`torch.nn.Linear`. After construction, networks with lazy modules should first be converted to the desired dtype and placed on the expected device. This is because lazy modules only perform shape inference so the usual dtype and device placement behavior applies. The lazy modules should then perform "dry runs" to initialize all the components in the module. These "dry runs" send inputs of the correct size, dtype, and device through the network and to each one of its lazy modules. After this the network can be used as usual. >>> # xdoctest: +SKIP >>> class LazyMLP(torch.nn.Module): ... def __init__(self) -> None: ... super().__init__() ... self.fc1 = torch.nn.LazyLinear(10) ... self.relu1 = torch.nn.ReLU() ... self.fc2 = torch.nn.LazyLinear(1) ... self.relu2 = torch.nn.ReLU() ... ... def forward(self, input): ... x = self.relu1(self.fc1(input)) ... y = self.relu2(self.fc2(x)) ... return y >>> # constructs a network with lazy modules >>> lazy_mlp = LazyMLP() >>> # transforms the network's device and dtype >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs' >>> lazy_mlp = lazy_mlp.cuda().double() >>> lazy_mlp LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True) (relu1): ReLU() (fc2): LazyLinear(in_features=0, out_features=1, bias=True) (relu2): ReLU() ) >>> # performs a dry run to initialize the network's lazy modules >>> lazy_mlp(torch.ones(10,10).cuda()) >>> # after initialization, LazyLinear modules become regular Linear modules >>> lazy_mlp LazyMLP( (fc1): Linear(in_features=10, out_features=10, bias=True) (relu1): ReLU() (fc2): Linear(in_features=10, out_features=1, bias=True) (relu2): ReLU() ) >>> # attaches an optimizer, since parameters can now be used as usual >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01) A final caveat when using lazy modules is that the order of initialization of a network's parameters may change, since the lazy modules are always initialized after other modules. For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module first and then a regular :class:`torch.nn.Linear` second, the second module would be initialized on construction and the first module would be initialized during the first dry run. This can cause the parameters of a network using lazy modules to be initialized differently than the parameters of a network without lazy modules as the order of parameter initializations, which often depends on a stateful random number generator, is different. Check :doc:`/notes/randomness` for more details. Lazy modules can be serialized with a state dict like other modules. For example: >>> lazy_mlp = LazyMLP() >>> # The state dict shows the uninitialized parameters >>> lazy_mlp.state_dict() OrderedDict([('fc1.weight', Uninitialized parameter), ('fc1.bias', tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30, 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])), ('fc2.weight', Uninitialized parameter), ('fc2.bias', tensor([0.0019]))]) Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize initialized LazyModules and they will remain initialized) >>> full_mlp = LazyMLP() >>> # Dry run to initialize another module >>> full_mlp.forward(torch.ones(10, 1)) >>> # Load an initialized state into a lazy module >>> lazy_mlp.load_state_dict(full_mlp.state_dict()) >>> # The state dict now holds valid values >>> lazy_mlp.state_dict() OrderedDict([('fc1.weight', tensor([[-0.3837], [ 0.0907], [ 0.6708], [-0.5223], [-0.9028], [ 0.2851], [-0.4537], [ 0.6813], [ 0.5766], [-0.8678]])), ('fc1.bias', tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30, 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])), ('fc2.weight', tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807, 0.2479, 0.1091]])), ('fc2.bias', tensor([0.0019]))]) Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized when the state is loaded. This prevents using initialized modules in different contexts. """ # modules inheriting from this will change their __class__ to the specified # one after they are fully initialized cls_to_become: Optional[type[Any]] = None def __init__(self: _LazyProtocol, *args, **kwargs): # Mypy doesnt like this super call in a mixin super().__init__(*args, **kwargs) # type: ignore[misc] self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook) self._initialize_hook = self.register_forward_pre_hook( self._infer_parameters, with_kwargs=True ) def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars): # This should be ideally implemented as a hook, # but we should override `detach` in the UninitializedParameter to return itself # which is not clean for name, param in self._parameters.items(): if param is not None: if not (is_lazy(param) or keep_vars): param = param.detach() destination[prefix + name] = param for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: if not (is_lazy(buf) or keep_vars): buf = buf.detach() destination[prefix + name] = buf def _lazy_load_hook( self: _LazyProtocol, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): """load_state_dict pre-hook function for lazy buffers and parameters. The purpose of this hook is to adjust the current state and/or ``state_dict`` being loaded so that a module instance serialized in both un/initialized state can be deserialized onto both un/initialized module instance. See comment in ``torch.nn.Module._register_load_state_dict_pre_hook`` for the details of the hook specification. """ for name, param in itertools.chain( self._parameters.items(), self._buffers.items() ): key = prefix + name if key in state_dict and param is not None: input_param = state_dict[key] if is_lazy(param): # The current parameter is not initialized but the one being loaded one is # create a new parameter based on the uninitialized one if not is_lazy(input_param): with torch.no_grad(): param.materialize(input_param.shape) def initialize_parameters(self: _LazyProtocol, *args, **kwargs): r"""Initialize parameters according to the input batch properties. This adds an interface to isolate parameter initialization from the forward pass when doing parameter shape inference. """ raise NotImplementedError( f"initialize_parameters is not implemented for {self.__class__.__name__}" ) def has_uninitialized_params(self: _LazyProtocol): r"""Check if a module has parameters that are not initialized.""" # This is to avoid the JIT to track this parameter and force # custom modules __setstate__ to add it params = self._parameters.values() buffers = self._buffers.values() for param in itertools.chain(params, buffers): if is_lazy(param): return True return False # torchrec tests the code consistency with the following code # fmt: off def _infer_parameters(self: _LazyProtocol, module, args, kwargs=None): r"""Infers the size and initializes the parameters according to the provided input batch. Given a module that contains parameters that were declared inferrable using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass in the complete module using the provided input to initialize all the parameters as needed. The module is set into evaluation mode before running the forward pass in order to avoid saving statistics or calculating gradients """ kwargs = kwargs if kwargs else {} module.initialize_parameters(*args, **kwargs) if module.has_uninitialized_params(): raise RuntimeError(f'module {self._get_name()} has not been fully initialized') module._initialize_hook.remove() module._load_hook.remove() delattr(module, '_initialize_hook') delattr(module, '_load_hook') if module.cls_to_become is not None: module.__class__ = module.cls_to_become # fmt: on def _replicate_for_data_parallel(self: _LazyProtocol): raise RuntimeError( "Modules with uninitialized parameters can't be used with `DataParallel`. " "Run a dummy forward pass to correctly initialize the modules" ) ```
================================================================================================================== SOURCE CODE FILE: linear.py LINES: 1 SIZE: 10.81 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\linear.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import math from typing import Any import torch from torch import Tensor from torch.nn import functional as F, init from torch.nn.parameter import Parameter, UninitializedParameter from .lazy import LazyModuleMixin from .module import Module __all__ = [ "Bilinear", "Identity", "LazyLinear", "Linear", ] class Identity(Module): r"""A placeholder identity operator that is argument-insensitive. Args: args: any argument (unused) kwargs: any keyword argument (unused) Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Output: :math:`(*)`, same shape as the input. Examples:: >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 20]) """ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__() def forward(self, input: Tensor) -> Tensor: return input class Linear(Module): r"""Applies an affine linear transformation to the incoming data: :math:`y = xA^T + b`. This module supports :ref:`TensorFloat32<tf32_on_ampere>`. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(*, H_\text{in})` where :math:`*` means any number of dimensions including none and :math:`H_\text{in} = \text{in\_features}`. - Output: :math:`(*, H_\text{out})` where all but the last dimension are the same shape as the input and :math:`H_\text{out} = \text{out\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{in\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ["in_features", "out_features"] in_features: int out_features: int weight: Tensor def __init__( self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter( torch.empty((out_features, in_features), **factory_kwargs) ) if bias: self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) else: self.register_parameter("bias", None) self.reset_parameters() def reset_parameters(self) -> None: # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with # uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see # https://github.com/pytorch/pytorch/issues/57109 init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 init.uniform_(self.bias, -bound, bound) def forward(self, input: Tensor) -> Tensor: return F.linear(input, self.weight, self.bias) def extra_repr(self) -> str: return f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}" # This class exists solely to avoid triggering an obscure error when scripting # an improperly quantized attention layer. See this issue for details: # https://github.com/pytorch/pytorch/issues/58969 # TODO: fail fast on quantization API usage error, then remove this class # and replace uses of it with plain Linear class NonDynamicallyQuantizableLinear(Linear): def __init__( self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, ) -> None: super().__init__( in_features, out_features, bias=bias, device=device, dtype=dtype ) class Bilinear(Module): r"""Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`. Args: in1_features: size of each first input sample in2_features: size of each second input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input1: :math:`(*, H_\text{in1})` where :math:`H_\text{in1}=\text{in1\_features}` and :math:`*` means any number of additional dimensions including none. All but the last dimension of the inputs should be the same. - Input2: :math:`(*, H_\text{in2})` where :math:`H_\text{in2}=\text{in2\_features}`. - Output: :math:`(*, H_\text{out})` where :math:`H_\text{out}=\text{out\_features}` and all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in1\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in1\_features}}` Examples:: >>> m = nn.Bilinear(20, 30, 40) >>> input1 = torch.randn(128, 20) >>> input2 = torch.randn(128, 30) >>> output = m(input1, input2) >>> print(output.size()) torch.Size([128, 40]) """ __constants__ = ["in1_features", "in2_features", "out_features"] in1_features: int in2_features: int out_features: int weight: Tensor def __init__( self, in1_features: int, in2_features: int, out_features: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in1_features = in1_features self.in2_features = in2_features self.out_features = out_features self.weight = Parameter( torch.empty((out_features, in1_features, in2_features), **factory_kwargs) ) if bias: self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) else: self.register_parameter("bias", None) self.reset_parameters() def reset_parameters(self) -> None: bound = 1 / math.sqrt(self.weight.size(1)) init.uniform_(self.weight, -bound, bound) if self.bias is not None: init.uniform_(self.bias, -bound, bound) def forward(self, input1: Tensor, input2: Tensor) -> Tensor: return F.bilinear(input1, input2, self.weight, self.bias) def extra_repr(self) -> str: return ( f"in1_features={self.in1_features}, in2_features={self.in2_features}, " f"out_features={self.out_features}, bias={self.bias is not None}" ) class LazyLinear(LazyModuleMixin, Linear): r"""A :class:`torch.nn.Linear` module where `in_features` is inferred. In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter` class. They will be initialized after the first call to ``forward`` is done and the module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument of the :class:`Linear` is inferred from the ``input.shape[-1]``. Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation on lazy modules and their limitations. Args: out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{in\_features}}` """ cls_to_become = Linear # type: ignore[assignment] weight: UninitializedParameter bias: UninitializedParameter # type: ignore[assignment] def __init__( self, out_features: int, bias: bool = True, device=None, dtype=None ) -> None: factory_kwargs = {"device": device, "dtype": dtype} # bias is hardcoded to False to avoid creating tensor # that will soon be overwritten. super().__init__(0, 0, False) self.weight = UninitializedParameter(**factory_kwargs) self.out_features = out_features if bias: self.bias = UninitializedParameter(**factory_kwargs) def reset_parameters(self) -> None: if not self.has_uninitialized_params() and self.in_features != 0: super().reset_parameters() def initialize_parameters(self, input) -> None: # type: ignore[override] if self.has_uninitialized_params(): with torch.no_grad(): self.in_features = input.shape[-1] self.weight.materialize((self.out_features, self.in_features)) if self.bias is not None: self.bias.materialize((self.out_features,)) self.reset_parameters() # TODO: PartialLinear - maybe in sparse? ```
================================================================================================================ SOURCE CODE FILE: loss.py LINES: 6 SIZE: 93.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\loss.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Callable, Optional, Union from typing_extensions import deprecated from torch import Tensor from torch.nn import _reduction as _Reduction, functional as F from .distance import PairwiseDistance from .module import Module __all__ = [ "L1Loss", "NLLLoss", "NLLLoss2d", "PoissonNLLLoss", "GaussianNLLLoss", "KLDivLoss", "MSELoss", "BCELoss", "BCEWithLogitsLoss", "HingeEmbeddingLoss", "MultiLabelMarginLoss", "SmoothL1Loss", "HuberLoss", "SoftMarginLoss", "CrossEntropyLoss", "MultiLabelSoftMarginLoss", "CosineEmbeddingLoss", "MarginRankingLoss", "MultiMarginLoss", "TripletMarginLoss", "TripletMarginWithDistanceLoss", "CTCLoss", ] class _Loss(Module): reduction: str def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None: super().__init__() if size_average is not None or reduce is not None: self.reduction: str = _Reduction.legacy_get_string(size_average, reduce) else: self.reduction = reduction class _WeightedLoss(_Loss): def __init__( self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(size_average, reduce, reduction) self.register_buffer("weight", weight) self.weight: Optional[Tensor] class L1Loss(_Loss): r"""Creates a criterion that measures the mean absolute error (MAE) between each element in the input :math:`x` and target :math:`y`. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \left| x_n - y_n \right|, where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} :math:`x` and :math:`y` are tensors of arbitrary shapes with a total of :math:`N` elements each. The sum operation still operates over all the elements, and divides by :math:`N`. The division by :math:`N` can be avoided if one sets ``reduction = 'sum'``. Supports real-valued and complex-valued inputs. Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input. Examples:: >>> loss = nn.L1Loss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward() """ __constants__ = ["reduction"] def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None: super().__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.l1_loss(input, target, reduction=self.reduction) class NLLLoss(_WeightedLoss): r"""The negative log likelihood loss. It is useful to train a classification problem with `C` classes. If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set. The `input` given through a forward call is expected to contain log-probabilities of each class. `input` has to be a Tensor of size either :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the `K`-dimensional case. The latter is useful for higher dimension inputs, such as computing NLL loss per-pixel for 2D images. Obtaining log-probabilities in a neural network is easily achieved by adding a `LogSoftmax` layer in the last layer of your network. You may use `CrossEntropyLoss` instead, if you prefer not to add an extra layer. The `target` that this loss expects should be a class index in the range :math:`[0, C-1]` where `C = number of classes`; if `ignore_index` is specified, this loss also accepts this class index (this index may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_{y_n} x_{n,y_n}, \quad w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}, where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & \text{if reduction} = \text{`mean';}\\ \sum_{n=1}^N l_n, & \text{if reduction} = \text{`sum'.} \end{cases} Args: weight (Tensor, optional): a manual rescaling weight given to each class. If given, it has to be a Tensor of size `C`. Otherwise, it is treated as if having all ones. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``None`` ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. When :attr:`size_average` is ``True``, the loss is averaged over non-ignored targets. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``None`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the weighted mean of the output is taken, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape:: - Input: :math:`(N, C)` or :math:`(C)`, where `C = number of classes`, `N = batch size`, or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of `K`-dimensional loss. - Target: :math:`(N)` or :math:`()`, where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss. - Output: If :attr:`reduction` is ``'none'``, shape :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss. Otherwise, scalar. Examples:: >>> log_softmax = nn.LogSoftmax(dim=1) >>> loss_fn = nn.NLLLoss() >>> # input to NLLLoss is of size N x C = 3 x 5 >>> input = torch.randn(3, 5, requires_grad=True) >>> # each element in target must have 0 <= value < C >>> target = torch.tensor([1, 0, 4]) >>> loss = loss_fn(log_softmax(input), target) >>> loss.backward() >>> >>> >>> # 2D loss example (used, for example, with image inputs) >>> N, C = 5, 4 >>> loss_fn = nn.NLLLoss() >>> data = torch.randn(N, 16, 10, 10) >>> conv = nn.Conv2d(16, C, (3, 3)) >>> log_softmax = nn.LogSoftmax(dim=1) >>> # output of conv forward is of shape [N, C, 8, 8] >>> output = log_softmax(conv(data)) >>> # each element in target must have 0 <= value < C >>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C) >>> # input to NLLLoss is of size N x C x height (8) x width (8) >>> loss = loss_fn(output, target) >>> loss.backward() """ __constants__ = ["ignore_index", "reduction"] ignore_index: int def __init__( self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100, reduce=None, reduction: str = "mean", ) -> None: super().__init__(weight, size_average, reduce, reduction) self.ignore_index = ignore_index def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.nll_loss( input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction, ) @deprecated( "`NLLLoss2d` has been deprecated. " "Please use `NLLLoss` instead as a drop-in replacement and see " "https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss for more details.", category=FutureWarning, ) class NLLLoss2d(NLLLoss): def __init__( self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100, reduce=None, reduction: str = "mean", ) -> None: super().__init__(weight, size_average, ignore_index, reduce, reduction) class PoissonNLLLoss(_Loss): r"""Negative log likelihood loss with Poisson distribution of target. The loss can be described as: .. math:: \text{target} \sim \mathrm{Poisson}(\text{input}) \text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input}) + \log(\text{target!}) The last term can be omitted or approximated with Stirling formula. The approximation is used for target values more than 1. For targets less or equal to 1 zeros are added to the loss. Args: log_input (bool, optional): if ``True`` the loss is computed as :math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is :math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`. full (bool, optional): whether to compute full loss, i. e. to add the Stirling approximation term .. math:: \text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}). size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when :attr:`log_input = False`. Default: 1e-8 reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Examples:: >>> loss = nn.PoissonNLLLoss() >>> log_input = torch.randn(5, 2, requires_grad=True) >>> target = torch.randn(5, 2) >>> output = loss(log_input, target) >>> output.backward() Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(*)`, the same shape as the input. """ __constants__ = ["log_input", "full", "eps", "reduction"] log_input: bool full: bool eps: float def __init__( self, log_input: bool = True, full: bool = False, size_average=None, eps: float = 1e-8, reduce=None, reduction: str = "mean", ) -> None: super().__init__(size_average, reduce, reduction) self.log_input = log_input self.full = full self.eps = eps def forward(self, log_input: Tensor, target: Tensor) -> Tensor: return F.poisson_nll_loss( log_input, target, log_input=self.log_input, full=self.full, eps=self.eps, reduction=self.reduction, ) class GaussianNLLLoss(_Loss): r"""Gaussian negative log likelihood loss. The targets are treated as samples from Gaussian distributions with expectations and variances predicted by the neural network. For a ``target`` tensor modelled as having Gaussian distribution with a tensor of expectations ``input`` and a tensor of positive variances ``var`` the loss is: .. math:: \text{loss} = \frac{1}{2}\left(\log\left(\text{max}\left(\text{var}, \ \text{eps}\right)\right) + \frac{\left(\text{input} - \text{target}\right)^2} {\text{max}\left(\text{var}, \ \text{eps}\right)}\right) + \text{const.} where :attr:`eps` is used for stability. By default, the constant term of the loss function is omitted unless :attr:`full` is ``True``. If ``var`` is not the same size as ``input`` (due to a homoscedastic assumption), it must either have a final dimension of 1 or have one fewer dimension (with all other sizes being the same) for correct broadcasting. Args: full (bool, optional): include the constant term in the loss calculation. Default: ``False``. eps (float, optional): value used to clamp ``var`` (see note below), for stability. Default: 1e-6. reduction (str, optional): specifies the reduction to apply to the output:``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the output is the average of all batch member losses, ``'sum'``: the output is the sum of all batch member losses. Default: ``'mean'``. Shape: - Input: :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of additional dimensions - Target: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but with one dimension equal to 1 (to allow for broadcasting) - Var: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but with one dimension equal to 1, or same shape as the input but with one fewer dimension (to allow for broadcasting), or a scalar value - Output: scalar if :attr:`reduction` is ``'mean'`` (default) or ``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same shape as the input Examples:: >>> loss = nn.GaussianNLLLoss() >>> input = torch.randn(5, 2, requires_grad=True) >>> target = torch.randn(5, 2) >>> var = torch.ones(5, 2, requires_grad=True) # heteroscedastic >>> output = loss(input, target, var) >>> output.backward() >>> loss = nn.GaussianNLLLoss() >>> input = torch.randn(5, 2, requires_grad=True) >>> target = torch.randn(5, 2) >>> var = torch.ones(5, 1, requires_grad=True) # homoscedastic >>> output = loss(input, target, var) >>> output.backward() Note: The clamping of ``var`` is ignored with respect to autograd, and so the gradients are unaffected by it. Reference: Nix, D. A. and Weigend, A. S., "Estimating the mean and variance of the target probability distribution", Proceedings of 1994 IEEE International Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60 vol.1, doi: 10.1109/ICNN.1994.374138. """ __constants__ = ["full", "eps", "reduction"] full: bool eps: float def __init__( self, *, full: bool = False, eps: float = 1e-6, reduction: str = "mean" ) -> None: super().__init__(None, None, reduction) self.full = full self.eps = eps def forward( self, input: Tensor, target: Tensor, var: Union[Tensor, float] ) -> Tensor: return F.gaussian_nll_loss( input, target, var, full=self.full, eps=self.eps, reduction=self.reduction ) class KLDivLoss(_Loss): r"""The Kullback-Leibler divergence loss. For tensors of the same shape :math:`y_{\text{pred}},\ y_{\text{true}}`, where :math:`y_{\text{pred}}` is the :attr:`input` and :math:`y_{\text{true}}` is the :attr:`target`, we define the **pointwise KL-divergence** as .. math:: L(y_{\text{pred}},\ y_{\text{true}}) = y_{\text{true}} \cdot \log \frac{y_{\text{true}}}{y_{\text{pred}}} = y_{\text{true}} \cdot (\log y_{\text{true}} - \log y_{\text{pred}}) To avoid underflow issues when computing this quantity, this loss expects the argument :attr:`input` in the log-space. The argument :attr:`target` may also be provided in the log-space if :attr:`log_target`\ `= True`. To summarise, this function is roughly equivalent to computing .. code-block:: python if not log_target: # default loss_pointwise = target * (target.log() - input) else: loss_pointwise = target.exp() * (target - input) and then reducing this result depending on the argument :attr:`reduction` as .. code-block:: python if reduction == "mean": # default loss = loss_pointwise.mean() elif reduction == "batchmean": # mathematically correct loss = loss_pointwise.sum() / input.size(0) elif reduction == "sum": loss = loss_pointwise.sum() else: # reduction == "none" loss = loss_pointwise .. note:: As all the other losses in PyTorch, this function expects the first argument, :attr:`input`, to be the output of the model (e.g. the neural network) and the second, :attr:`target`, to be the observations in the dataset. This differs from the standard mathematical notation :math:`KL(P\ ||\ Q)` where :math:`P` denotes the distribution of the observations and :math:`Q` denotes the model. .. warning:: :attr:`reduction`\ `= "mean"` doesn't return the true KL divergence value, please use :attr:`reduction`\ `= "batchmean"` which aligns with the mathematical definition. Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to `False`, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is `False`. Default: `True` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is `False`, returns a loss per batch element instead and ignores :attr:`size_average`. Default: `True` reduction (str, optional): Specifies the reduction to apply to the output. Default: `"mean"` log_target (bool, optional): Specifies whether `target` is the log space. Default: `False` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar by default. If :attr:`reduction` is `'none'`, then :math:`(*)`, same shape as the input. Examples:: >>> kl_loss = nn.KLDivLoss(reduction="batchmean") >>> # input should be a distribution in the log space >>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True), dim=1) >>> # Sample a batch of distributions. Usually this would come from the dataset >>> target = F.softmax(torch.rand(3, 5), dim=1) >>> output = kl_loss(input, target) >>> kl_loss = nn.KLDivLoss(reduction="batchmean", log_target=True) >>> log_target = F.log_softmax(torch.rand(3, 5), dim=1) >>> output = kl_loss(input, log_target) """ __constants__ = ["reduction"] def __init__( self, size_average=None, reduce=None, reduction: str = "mean", log_target: bool = False, ) -> None: super().__init__(size_average, reduce, reduction) self.log_target = log_target def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.kl_div( input, target, reduction=self.reduction, log_target=self.log_target ) class MSELoss(_Loss): r"""Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input :math:`x` and target :math:`y`. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \left( x_n - y_n \right)^2, where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} :math:`x` and :math:`y` are tensors of arbitrary shapes with a total of :math:`N` elements each. The mean operation still operates over all the elements, and divides by :math:`N`. The division by :math:`N` can be avoided if one sets ``reduction = 'sum'``. Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. Examples:: >>> loss = nn.MSELoss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward() """ __constants__ = ["reduction"] def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None: super().__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.mse_loss(input, target, reduction=self.reduction) class BCELoss(_WeightedLoss): r"""Creates a criterion that measures the Binary Cross Entropy between the target and the input probabilities: The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right], where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} This is used for measuring the error of a reconstruction in for example an auto-encoder. Note that the targets :math:`y` should be numbers between 0 and 1. Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be mathematically undefined in the above loss equation. PyTorch chooses to set :math:`\log (0) = -\infty`, since :math:`\lim_{x\to 0} \log (x) = -\infty`. However, an infinite term in the loss equation is not desirable for several reasons. For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be multiplying 0 with infinity. Secondly, if we have an infinite loss value, then we would also have an infinite term in our gradient, since :math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`. This would make BCELoss's backward method nonlinear with respect to :math:`x_n`, and using it for things like linear regression would not be straight-forward. Our solution is that BCELoss clamps its log function outputs to be greater than or equal to -100. This way, we can always have a finite loss value and a linear backward method. Args: weight (Tensor, optional): a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size `nbatch`. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as input. Examples:: >>> m = nn.Sigmoid() >>> loss = nn.BCELoss() >>> input = torch.randn(3, 2, requires_grad=True) >>> target = torch.rand(3, 2, requires_grad=False) >>> output = loss(m(input), target) >>> output.backward() """ __constants__ = ["reduction"] def __init__( self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(weight, size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.binary_cross_entropy( input, target, weight=self.weight, reduction=self.reduction ) class BCEWithLogitsLoss(_Loss): r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single class. This version is more numerically stable than using a plain `Sigmoid` followed by a `BCELoss` as, by combining the operations into one layer, we take advantage of the log-sum-exp trick for numerical stability. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_n \left[ y_n \cdot \log \sigma(x_n) + (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right], where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} This is used for measuring the error of a reconstruction in for example an auto-encoder. Note that the targets `t[i]` should be numbers between 0 and 1. It's possible to trade off recall and precision by adding weights to positive examples. In the case of multi-label classification the loss can be described as: .. math:: \ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c}) + (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right], where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification, :math:`c = 1` for single-label binary classification), :math:`n` is the number of the sample in the batch and :math:`p_c` is the weight of the positive answer for the class :math:`c`. :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision. For example, if a dataset contains 100 positive and 300 negative examples of a single class, then ``pos_weight`` for the class should be equal to :math:`\frac{300}{100}=3`. The loss would act as if the dataset contains :math:`3\times 100=300` positive examples. Examples:: >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10 >>> output = torch.full([10, 64], 1.5) # A prediction (logit) >>> pos_weight = torch.ones([64]) # All weights are equal to 1 >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight) >>> criterion(output, target) # -log(sigmoid(1.5)) tensor(0.20...) In the above example, the ``pos_weight`` tensor's elements correspond to the 64 distinct classes in a multi-label binary classification scenario. Each element in ``pos_weight`` is designed to adjust the loss function based on the imbalance between negative and positive samples for the respective class. This approach is useful in datasets with varying levels of class imbalance, ensuring that the loss calculation accurately accounts for the distribution in each class. Args: weight (Tensor, optional): a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size `nbatch`. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` pos_weight (Tensor, optional): a weight of positive examples to be broadcasted with target. Must be a tensor with equal size along the class dimension to the number of classes. Pay close attention to PyTorch's broadcasting semantics in order to achieve the desired operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of size [B, C, H, W] will apply different pos_weights to each element of the batch or [C, H, W] the same pos_weights across the batch. To apply the same positive weight along all spacial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1]. Default: ``None`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as input. Examples:: >>> loss = nn.BCEWithLogitsLoss() >>> input = torch.randn(3, requires_grad=True) >>> target = torch.empty(3).random_(2) >>> output = loss(input, target) >>> output.backward() """ def __init__( self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = "mean", pos_weight: Optional[Tensor] = None, ) -> None: super().__init__(size_average, reduce, reduction) self.register_buffer("weight", weight) self.register_buffer("pos_weight", pos_weight) self.weight: Optional[Tensor] self.pos_weight: Optional[Tensor] def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.binary_cross_entropy_with_logits( input, target, self.weight, pos_weight=self.pos_weight, reduction=self.reduction, ) class HingeEmbeddingLoss(_Loss): r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y` (containing 1 or -1). This is usually used for measuring whether two inputs are similar or dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically used for learning nonlinear embeddings or semi-supervised learning. The loss function for :math:`n`-th sample in the mini-batch is .. math:: l_n = \begin{cases} x_n, & \text{if}\; y_n = 1,\\ \max \{0, margin - x_n\}, & \text{if}\; y_n = -1, \end{cases} and the total loss functions is .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} where :math:`L = \{l_1,\dots,l_N\}^\top`. Args: margin (float, optional): Has a default value of `1`. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation operates over all the elements. - Target: :math:`(*)`, same shape as the input - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input """ __constants__ = ["margin", "reduction"] margin: float def __init__( self, margin: float = 1.0, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(size_average, reduce, reduction) self.margin = margin def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.hinge_embedding_loss( input, target, margin=self.margin, reduction=self.reduction ) class MultiLabelMarginLoss(_Loss): r"""Creates a criterion that optimizes a multi-class multi-classification hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and output :math:`y` (which is a 2D `Tensor` of target class indices). For each sample in the mini-batch: .. math:: \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. :math:`y` and :math:`x` must have the same size. The criterion only considers a contiguous block of non-negative targets that starts at the front. This allows for different samples to have variable amounts of target classes. Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C` is the number of classes. - Target: :math:`(C)` or :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. Examples:: >>> loss = nn.MultiLabelMarginLoss() >>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]]) >>> # for target y, only consider labels 3 and 0, not after label -1 >>> y = torch.LongTensor([[3, 0, -1, 1]]) >>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4))) >>> loss(x, y) tensor(0.85...) """ __constants__ = ["reduction"] def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None: super().__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.multilabel_margin_loss(input, target, reduction=self.reduction) class SmoothL1Loss(_Loss): r"""Creates a criterion that uses a squared term if the absolute element-wise error falls below beta and an L1 term otherwise. It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases prevents exploding gradients (e.g. see the paper `Fast R-CNN`_ by Ross Girshick). For a batch of size :math:`N`, the unreduced loss can be described as: .. math:: \ell(x, y) = L = \{l_1, ..., l_N\}^T with .. math:: l_n = \begin{cases} 0.5 (x_n - y_n)^2 / beta, & \text{if } |x_n - y_n| < beta \\ |x_n - y_n| - 0.5 * beta, & \text{otherwise } \end{cases} If `reduction` is not `none`, then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} .. note:: Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta` portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`. The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`. .. note:: Smooth L1 loss is closely related to :class:`HuberLoss`, being equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is also known as delta for Huber). This leads to the following differences: * As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss` converges to a constant 0 loss. When beta is 0, Smooth L1 loss is equivalent to L1 loss. * As beta -> :math:`+\infty`, Smooth L1 loss converges to a constant 0 loss, while :class:`HuberLoss` converges to :class:`MSELoss`. * For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1. For :class:`HuberLoss`, the slope of the L1 segment is beta. .. _`Fast R-CNN`: https://arxiv.org/abs/1504.08083 Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss. The value must be non-negative. Default: 1.0 Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input. """ __constants__ = ["reduction"] def __init__( self, size_average=None, reduce=None, reduction: str = "mean", beta: float = 1.0 ) -> None: super().__init__(size_average, reduce, reduction) self.beta = beta def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.smooth_l1_loss(input, target, reduction=self.reduction, beta=self.beta) class HuberLoss(_Loss): r"""Creates a criterion that uses a squared term if the absolute element-wise error falls below delta and a delta-scaled L1 term otherwise. This loss combines advantages of both :class:`L1Loss` and :class:`MSELoss`; the delta-scaled L1 region makes the loss less sensitive to outliers than :class:`MSELoss`, while the L2 region provides smoothness over :class:`L1Loss` near 0. See `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`_ for more information. For a batch of size :math:`N`, the unreduced loss can be described as: .. math:: \ell(x, y) = L = \{l_1, ..., l_N\}^T with .. math:: l_n = \begin{cases} 0.5 (x_n - y_n)^2, & \text{if } |x_n - y_n| < delta \\ delta * (|x_n - y_n| - 0.5 * delta), & \text{otherwise } \end{cases} If `reduction` is not `none`, then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} .. note:: When delta is set to 1, this loss is equivalent to :class:`SmoothL1Loss`. In general, this loss differs from :class:`SmoothL1Loss` by a factor of delta (AKA beta in Smooth L1). See :class:`SmoothL1Loss` for additional discussion on the differences in behavior between the two losses. Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` delta (float, optional): Specifies the threshold at which to change between delta-scaled L1 and L2 loss. The value must be positive. Default: 1.0 Shape: - Input: :math:`(*)` where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input. """ __constants__ = ["reduction", "delta"] def __init__(self, reduction: str = "mean", delta: float = 1.0) -> None: super().__init__(reduction=reduction) self.delta = delta def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.huber_loss(input, target, reduction=self.reduction, delta=self.delta) class SoftMarginLoss(_Loss): r"""Creates a criterion that optimizes a two-class classification logistic loss between input tensor :math:`x` and target tensor :math:`y` (containing 1 or -1). .. math:: \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} Args: size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Target: :math:`(*)`, same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as input. """ __constants__ = ["reduction"] def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None: super().__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.soft_margin_loss(input, target, reduction=self.reduction) class CrossEntropyLoss(_WeightedLoss): r"""This criterion computes the cross entropy loss between input logits and target. It is useful when training a classification problem with `C` classes. If provided, the optional argument :attr:`weight` should be a 1D `Tensor` assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set. The `input` is expected to contain the unnormalized logits for each class (which do `not` need to be positive or sum to 1, in general). `input` has to be a Tensor of size :math:`(C)` for unbatched input, :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the `K`-dimensional case. The last being useful for higher dimension inputs, such as computing cross entropy loss per-pixel for 2D images. The `target` that this criterion expects should contain either: - Class indices in the range :math:`[0, C)` where :math:`C` is the number of classes; if `ignore_index` is specified, this loss also accepts this class index (this index may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss for this case can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_{y_n} \log \frac{\exp(x_{n,y_n})}{\sum_{c=1}^C \exp(x_{n,c})} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\} where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as :math:`d_1, ..., d_k` for the `K`-dimensional case. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}} l_n, & \text{if reduction} = \text{`mean';}\\ \sum_{n=1}^N l_n, & \text{if reduction} = \text{`sum'.} \end{cases} Note that this case is equivalent to applying :class:`~torch.nn.LogSoftmax` on an input, followed by :class:`~torch.nn.NLLLoss`. - Probabilities for each class; useful when labels beyond a single class per minibatch item are required, such as for blended labels, label smoothing, etc. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss for this case can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - \sum_{c=1}^C w_c \log \frac{\exp(x_{n,c})}{\sum_{i=1}^C \exp(x_{n,i})} y_{n,c} where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as :math:`d_1, ..., d_k` for the `K`-dimensional case. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \frac{\sum_{n=1}^N l_n}{N}, & \text{if reduction} = \text{`mean';}\\ \sum_{n=1}^N l_n, & \text{if reduction} = \text{`sum'.} \end{cases} .. note:: The performance of this criterion is generally better when `target` contains class indices, as this allows for optimized computation. Consider providing `target` as class probabilities only when a single class label per minibatch item is too restrictive. Args: weight (Tensor, optional): a manual rescaling weight given to each class. If given, has to be a Tensor of size `C` and floating point dtype size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. When :attr:`size_average` is ``True``, the loss is averaged over non-ignored targets. Note that :attr:`ignore_index` is only applicable when the target contains class indices. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the weighted mean of the output is taken, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`. Shape: - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of `K`-dimensional loss. - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`. The target data type is required to be long when using class indices. If containing class probabilities, the target must be the same shape input, and each value should be between :math:`[0, 1]`. This means the target data type is required to be float when using class probabilities. - Output: If reduction is 'none', shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss, depending on the shape of the input. Otherwise, scalar. where: .. math:: \begin{aligned} C ={} & \text{number of classes} \\ N ={} & \text{batch size} \\ \end{aligned} Examples:: >>> # Example of target with class indices >>> loss = nn.CrossEntropyLoss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.empty(3, dtype=torch.long).random_(5) >>> output = loss(input, target) >>> output.backward() >>> >>> # Example of target with class probabilities >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5).softmax(dim=1) >>> output = loss(input, target) >>> output.backward() """ __constants__ = ["ignore_index", "reduction", "label_smoothing"] ignore_index: int label_smoothing: float def __init__( self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100, reduce=None, reduction: str = "mean", label_smoothing: float = 0.0, ) -> None: super().__init__(weight, size_average, reduce, reduction) self.ignore_index = ignore_index self.label_smoothing = label_smoothing def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.cross_entropy( input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction, label_smoothing=self.label_smoothing, ) class MultiLabelSoftMarginLoss(_WeightedLoss): r"""Creates a criterion that optimizes a multi-label one-versus-all loss based on max-entropy, between input :math:`x` and target :math:`y` of size :math:`(N, C)`. For each sample in the minibatch: .. math:: loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`, :math:`y[i] \in \left\{0, \; 1\right\}`. Args: weight (Tensor, optional): a manual rescaling weight given to each class. If given, it has to be a Tensor of size `C`. Otherwise, it is treated as if having all ones. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes. - Target: :math:`(N, C)`, label targets must have the same shape as the input. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. """ __constants__ = ["reduction"] def __init__( self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(weight, size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.multilabel_soft_margin_loss( input, target, weight=self.weight, reduction=self.reduction ) class CosineEmbeddingLoss(_Loss): r"""Creates a criterion that measures the loss given input tensors :math:`x_1`, :math:`x_2` and a `Tensor` label :math:`y` with values 1 or -1. Use (:math:`y=1`) to maximize the cosine similarity of two inputs, and (:math:`y=-1`) otherwise. This is typically used for learning nonlinear embeddings or semi-supervised learning. The loss function for each sample is: .. math:: \text{loss}(x, y) = \begin{cases} 1 - \cos(x_1, x_2), & \text{if } y = 1 \\ \max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1 \end{cases} Args: margin (float, optional): Should be a number from :math:`-1` to :math:`1`, :math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the default value is :math:`0`. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input1: :math:`(N, D)` or :math:`(D)`, where `N` is the batch size and `D` is the embedding dimension. - Input2: :math:`(N, D)` or :math:`(D)`, same shape as Input1. - Target: :math:`(N)` or :math:`()`. - Output: If :attr:`reduction` is ``'none'``, then :math:`(N)`, otherwise scalar. Examples:: >>> loss = nn.CosineEmbeddingLoss() >>> input1 = torch.randn(3, 5, requires_grad=True) >>> input2 = torch.randn(3, 5, requires_grad=True) >>> target = torch.ones(3) >>> output = loss(input1, input2, target) >>> output.backward() """ __constants__ = ["margin", "reduction"] margin: float def __init__( self, margin: float = 0.0, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(size_average, reduce, reduction) self.margin = margin def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: return F.cosine_embedding_loss( input1, input2, target, margin=self.margin, reduction=self.reduction ) class MarginRankingLoss(_Loss): r"""Creates a criterion that measures the loss given inputs :math:`x1`, :math:`x2`, two 1D mini-batch or 0D `Tensors`, and a label 1D mini-batch or 0D `Tensor` :math:`y` (containing 1 or -1). If :math:`y = 1` then it assumed the first input should be ranked higher (have a larger value) than the second input, and vice-versa for :math:`y = -1`. The loss function for each pair of samples in the mini-batch is: .. math:: \text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin}) Args: margin (float, optional): Has a default value of :math:`0`. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input1: :math:`(N)` or :math:`()` where `N` is the batch size. - Input2: :math:`(N)` or :math:`()`, same shape as the Input1. - Target: :math:`(N)` or :math:`()`, same shape as the inputs. - Output: scalar. If :attr:`reduction` is ``'none'`` and Input size is not :math:`()`, then :math:`(N)`. Examples:: >>> loss = nn.MarginRankingLoss() >>> input1 = torch.randn(3, requires_grad=True) >>> input2 = torch.randn(3, requires_grad=True) >>> target = torch.randn(3).sign() >>> output = loss(input1, input2, target) >>> output.backward() """ __constants__ = ["margin", "reduction"] margin: float def __init__( self, margin: float = 0.0, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(size_average, reduce, reduction) self.margin = margin def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: return F.margin_ranking_loss( input1, input2, target, margin=self.margin, reduction=self.reduction ) class MultiMarginLoss(_WeightedLoss): r"""Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and output :math:`y` (which is a 1D tensor of target class indices, :math:`0 \leq y \leq \text{x.size}(1)-1`): For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar output :math:`y` is: .. math:: \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)} where :math:`i \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}` and :math:`i \neq y`. Optionally, you can give non-equal weighting on the classes by passing a 1D :attr:`weight` tensor into the constructor. The loss function then becomes: .. math:: \text{loss}(x, y) = \frac{\sum_i w[y] * \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)} Args: p (int, optional): Has a default value of :math:`1`. :math:`1` and :math:`2` are the only supported values. margin (float, optional): Has a default value of :math:`1`. weight (Tensor, optional): a manual rescaling weight given to each class. If given, it has to be a Tensor of size `C`. Otherwise, it is treated as if having all ones. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(N, C)` or :math:`(C)`, where :math:`N` is the batch size and :math:`C` is the number of classes. - Target: :math:`(N)` or :math:`()`, where each value is :math:`0 \leq \text{targets}[i] \leq C-1`. - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the target. Examples:: >>> loss = nn.MultiMarginLoss() >>> x = torch.tensor([[0.1, 0.2, 0.4, 0.8]]) >>> y = torch.tensor([3]) >>> # 0.25 * ((1-(0.8-0.1)) + (1-(0.8-0.2)) + (1-(0.8-0.4))) >>> loss(x, y) tensor(0.32...) """ __constants__ = ["p", "margin", "reduction"] margin: float p: int def __init__( self, p: int = 1, margin: float = 1.0, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = "mean", ) -> None: super().__init__(weight, size_average, reduce, reduction) if p != 1 and p != 2: raise ValueError("only p == 1 and p == 2 supported") if weight is not None and weight.dim() != 1: raise ValueError( f"MultiMarginLoss: expected weight to be None or 1D tensor, got {weight.dim()}D instead" ) self.p = p self.margin = margin def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.multi_margin_loss( input, target, p=self.p, margin=self.margin, weight=self.weight, reduction=self.reduction, ) class TripletMarginLoss(_Loss): r"""Creates a criterion that measures the triplet loss given an input tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`. This is used for measuring a relative similarity between samples. A triplet is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative examples` respectively). The shapes of all input tensors should be :math:`(N, D)`. The distance swap is described in detail in the paper `Learning shallow convolutional feature descriptors with triplet losses`_ by V. Balntas, E. Riba et al. The loss function for each sample in the mini-batch is: .. math:: L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} where .. math:: d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p The norm is calculated using the specified p value and a small constant :math:`\varepsilon` is added for numerical stability. See also :class:`~torch.nn.TripletMarginWithDistanceLoss`, which computes the triplet margin loss for input tensors using a custom distance function. Args: margin (float, optional): Default: :math:`1`. p (int, optional): The norm degree for pairwise distance. Default: :math:`2`. eps (float, optional): Small constant for numerical stability. Default: :math:`1e-6`. swap (bool, optional): The distance swap is described in detail in the paper `Learning shallow convolutional feature descriptors with triplet losses` by V. Balntas, E. Riba et al. Default: ``False``. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(N, D)` or :math:`(D)` where :math:`D` is the vector dimension. - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'`` and input shape is :math:`(N, D)`; a scalar otherwise. Examples:: >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7) >>> anchor = torch.randn(100, 128, requires_grad=True) >>> positive = torch.randn(100, 128, requires_grad=True) >>> negative = torch.randn(100, 128, requires_grad=True) >>> output = triplet_loss(anchor, positive, negative) >>> output.backward() .. _Learning shallow convolutional feature descriptors with triplet losses: https://bmva-archive.org.uk/bmvc/2016/papers/paper119/index.html """ __constants__ = ["margin", "p", "eps", "swap", "reduction"] margin: float p: float eps: float swap: bool def __init__( self, margin: float = 1.0, p: float = 2.0, eps: float = 1e-6, swap: bool = False, size_average=None, reduce=None, reduction: str = "mean", ): super().__init__(size_average, reduce, reduction) if margin <= 0: raise ValueError( f"TripletMarginLoss: expected margin to be greater than 0, got {margin} instead" ) self.margin = margin self.p = p self.eps = eps self.swap = swap def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor: return F.triplet_margin_loss( anchor, positive, negative, margin=self.margin, p=self.p, eps=self.eps, swap=self.swap, reduction=self.reduction, ) class TripletMarginWithDistanceLoss(_Loss): r"""Creates a criterion that measures the triplet loss given input tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor, positive, and negative examples, respectively), and a nonnegative, real-valued function ("distance function") used to compute the relationship between the anchor and positive example ("positive distance") and the anchor and negative example ("negative distance"). The unreduced loss (i.e., with :attr:`reduction` set to ``'none'``) can be described as: .. math:: \ell(a, p, n) = L = \{l_1,\dots,l_N\}^\top, \quad l_i = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} where :math:`N` is the batch size; :math:`d` is a nonnegative, real-valued function quantifying the closeness of two tensors, referred to as the :attr:`distance_function`; and :math:`margin` is a nonnegative margin representing the minimum difference between the positive and negative distances that is required for the loss to be 0. The input tensors have :math:`N` elements each and can be of any shape that the distance function can handle. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} See also :class:`~torch.nn.TripletMarginLoss`, which computes the triplet loss for input tensors using the :math:`l_p` distance as the distance function. Args: distance_function (Callable, optional): A nonnegative, real-valued function that quantifies the closeness of two tensors. If not specified, `nn.PairwiseDistance` will be used. Default: ``None`` margin (float, optional): A nonnegative margin representing the minimum difference between the positive and negative distances required for the loss to be 0. Larger margins penalize cases where the negative examples are not distant enough from the anchors, relative to the positives. Default: :math:`1`. swap (bool, optional): Whether to use the distance swap described in the paper `Learning shallow convolutional feature descriptors with triplet losses` by V. Balntas, E. Riba et al. If True, and if the positive example is closer to the negative example than the anchor is, swaps the positive example and the anchor in the loss computation. Default: ``False``. reduction (str, optional): Specifies the (optional) reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Shape: - Input: :math:`(N, *)` where :math:`*` represents any number of additional dimensions as supported by the distance function. - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar otherwise. Examples:: >>> # Initialize embeddings >>> embedding = nn.Embedding(1000, 128) >>> anchor_ids = torch.randint(0, 1000, (1,)) >>> positive_ids = torch.randint(0, 1000, (1,)) >>> negative_ids = torch.randint(0, 1000, (1,)) >>> anchor = embedding(anchor_ids) >>> positive = embedding(positive_ids) >>> negative = embedding(negative_ids) >>> >>> # Built-in Distance Function >>> triplet_loss = \ >>> nn.TripletMarginWithDistanceLoss(distance_function=nn.PairwiseDistance()) >>> output = triplet_loss(anchor, positive, negative) >>> output.backward() >>> >>> # Custom Distance Function >>> def l_infinity(x1, x2): >>> return torch.max(torch.abs(x1 - x2), dim=1).values >>> >>> # xdoctest: +SKIP("FIXME: Would call backwards a second time") >>> triplet_loss = ( >>> nn.TripletMarginWithDistanceLoss(distance_function=l_infinity, margin=1.5)) >>> output = triplet_loss(anchor, positive, negative) >>> output.backward() >>> >>> # Custom Distance Function (Lambda) >>> triplet_loss = ( >>> nn.TripletMarginWithDistanceLoss( >>> distance_function=lambda x, y: 1.0 - F.cosine_similarity(x, y))) >>> output = triplet_loss(anchor, positive, negative) >>> output.backward() Reference: V. Balntas, et al.: Learning shallow convolutional feature descriptors with triplet losses: https://bmva-archive.org.uk/bmvc/2016/papers/paper119/index.html """ __constants__ = ["margin", "swap", "reduction"] margin: float swap: bool def __init__( self, *, distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None, margin: float = 1.0, swap: bool = False, reduction: str = "mean", ): super().__init__(size_average=None, reduce=None, reduction=reduction) if margin <= 0: raise ValueError( f"TripletMarginWithDistanceLoss: expected margin to be greater than 0, got {margin} instead" ) self.distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ( distance_function if distance_function is not None else PairwiseDistance() ) self.margin = margin self.swap = swap def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor: return F.triplet_margin_with_distance_loss( anchor, positive, negative, distance_function=self.distance_function, margin=self.margin, swap=self.swap, reduction=self.reduction, ) class CTCLoss(_Loss): r"""The Connectionist Temporal Classification loss. Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the probability of possible alignments of input to target, producing a loss value which is differentiable with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which limits the length of the target sequence such that it must be :math:`\leq` the input length. Args: blank (int, optional): blank label. Default :math:`0`. reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the output losses will be divided by the target lengths and then the mean over the batch is taken, ``'sum'``: the output losses will be summed. Default: ``'mean'`` zero_infinity (bool, optional): Whether to zero infinite losses and the associated gradients. Default: ``False`` Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Shape: - Log_probs: Tensor of size :math:`(T, N, C)` or :math:`(T, C)`, where :math:`T = \text{input length}`, :math:`N = \text{batch size}`, and :math:`C = \text{number of classes (including blank)}`. The logarithmized probabilities of the outputs (e.g. obtained with :func:`torch.nn.functional.log_softmax`). - Targets: Tensor of size :math:`(N, S)` or :math:`(\operatorname{sum}(\text{target\_lengths}))`, where :math:`N = \text{batch size}` and :math:`S = \text{max target length, if shape is } (N, S)`. It represents the target sequences. Each element in the target sequence is a class index. And the target index cannot be blank (default=0). In the :math:`(N, S)` form, targets are padded to the length of the longest sequence, and stacked. In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form, the targets are assumed to be un-padded and concatenated within 1 dimension. - Input_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`, where :math:`N = \text{batch size}`. It represents the lengths of the inputs (must each be :math:`\leq T`). And the lengths are specified for each sequence to achieve masking under the assumption that sequences are padded to equal lengths. - Target_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`, where :math:`N = \text{batch size}`. It represents lengths of the targets. Lengths are specified for each sequence to achieve masking under the assumption that sequences are padded to equal lengths. If target shape is :math:`(N,S)`, target_lengths are effectively the stop index :math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for each target in a batch. Lengths must each be :math:`\leq S` If the targets are given as a 1d tensor that is the concatenation of individual targets, the target_lengths must add up to the total length of the tensor. - Output: scalar if :attr:`reduction` is ``'mean'`` (default) or ``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N)` if input is batched or :math:`()` if input is unbatched, where :math:`N = \text{batch size}`. Examples:: >>> # Target are to be padded >>> T = 50 # Input sequence length >>> C = 20 # Number of classes (including blank) >>> N = 16 # Batch size >>> S = 30 # Target sequence length of longest target in batch (padding length) >>> S_min = 10 # Minimum target length, for demonstration purposes >>> >>> # Initialize random batch of input vectors, for *size = (T,N,C) >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() >>> >>> # Initialize random batch of targets (0 = blank, 1:C = classes) >>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long) >>> >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) >>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long) >>> ctc_loss = nn.CTCLoss() >>> loss = ctc_loss(input, target, input_lengths, target_lengths) >>> loss.backward() >>> >>> >>> # Target are to be un-padded >>> T = 50 # Input sequence length >>> C = 20 # Number of classes (including blank) >>> N = 16 # Batch size >>> >>> # Initialize random batch of input vectors, for *size = (T,N,C) >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) >>> >>> # Initialize random batch of targets (0 = blank, 1:C = classes) >>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long) >>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long) >>> ctc_loss = nn.CTCLoss() >>> loss = ctc_loss(input, target, input_lengths, target_lengths) >>> loss.backward() >>> >>> >>> # Target are to be un-padded and unbatched (effectively N=1) >>> T = 50 # Input sequence length >>> C = 20 # Number of classes (including blank) >>> >>> # Initialize random batch of input vectors, for *size = (T,C) >>> # xdoctest: +SKIP("FIXME: error in doctest") >>> input = torch.randn(T, C).log_softmax(1).detach().requires_grad_() >>> input_lengths = torch.tensor(T, dtype=torch.long) >>> >>> # Initialize random batch of targets (0 = blank, 1:C = classes) >>> target_lengths = torch.randint(low=1, high=T, size=(), dtype=torch.long) >>> target = torch.randint(low=1, high=C, size=(target_lengths,), dtype=torch.long) >>> ctc_loss = nn.CTCLoss() >>> loss = ctc_loss(input, target, input_lengths, target_lengths) >>> loss.backward() Reference: A. Graves et al.: Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks: https://www.cs.toronto.edu/~graves/icml_2006.pdf Note: In order to use CuDNN, the following must be satisfied: :attr:`targets` must be in concatenated format, all :attr:`input_lengths` must be `T`. :math:`blank=0`, :attr:`target_lengths` :math:`\leq 256`, the integer arguments must be of dtype :attr:`torch.int32`. The regular implementation uses the (more common in PyTorch) `torch.long` dtype. Note: In some circumstances when using the CUDA backend with CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. Please see the notes on :doc:`/notes/randomness` for background. """ __constants__ = ["blank", "reduction"] blank: int zero_infinity: bool def __init__( self, blank: int = 0, reduction: str = "mean", zero_infinity: bool = False ): super().__init__(reduction=reduction) self.blank = blank self.zero_infinity = zero_infinity def forward( self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, ) -> Tensor: return F.ctc_loss( log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction, self.zero_infinity, ) # TODO: L1HingeEmbeddingCriterion # TODO: MSECriterion weight # TODO: ClassSimplexCriterion ```
================================================================================================================== SOURCE CODE FILE: module.py LINES: 10 SIZE: 125.87 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\module.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import functools import inspect import itertools import warnings import weakref from collections import namedtuple, OrderedDict from collections.abc import Iterator, Mapping from typing import Any, Callable, Optional, overload, TypeVar, Union from typing_extensions import Self import torch from torch import device, dtype, Tensor from torch._prims_common import DeviceLikeType from torch.nn.parameter import Buffer, Parameter from torch.utils._python_dispatch import is_traceable_wrapper_subclass from torch.utils.hooks import BackwardHook, RemovableHandle __all__ = [ "register_module_forward_pre_hook", "register_module_forward_hook", "register_module_full_backward_pre_hook", "register_module_backward_hook", "register_module_full_backward_hook", "register_module_buffer_registration_hook", "register_module_module_registration_hook", "register_module_parameter_registration_hook", "Module", ] _grad_t = Union[tuple[Tensor, ...], Tensor] # See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use # of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be # the type of the subclass, not the looser type of `Module`. T = TypeVar("T", bound="Module") class _IncompatibleKeys( namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"]), ): __slots__ = () def __repr__(self): if not self.missing_keys and not self.unexpected_keys: return "<All keys matched successfully>" return super().__repr__() __str__ = __repr__ def _addindent(s_, numSpaces): s = s_.split("\n") # don't do anything for single-line stuff if len(s) == 1: return s_ first = s.pop(0) s = [(numSpaces * " ") + line for line in s] s = "\n".join(s) s = first + "\n" + s return s r"""This tracks hooks common to all modules that are executed immediately before .registering the buffer/module/parameter""" _global_buffer_registration_hooks: dict[int, Callable] = OrderedDict() _global_module_registration_hooks: dict[int, Callable] = OrderedDict() _global_parameter_registration_hooks: dict[int, Callable] = OrderedDict() class _WrappedHook: def __init__(self, hook: Callable, module: Optional["Module"] = None): self.hook: Callable = hook functools.update_wrapper(self, hook) self.with_module: bool = False if module is not None: self.module: weakref.ReferenceType[Module] = weakref.ref(module) self.with_module = True def __call__(self, *args: Any, **kwargs: Any) -> Any: if self.with_module: module = self.module() if module is None: raise RuntimeError("You are trying to call the hook of a dead Module!") return self.hook(module, *args, **kwargs) return self.hook(*args, **kwargs) def __getstate__(self) -> dict: result = {"hook": self.hook, "with_module": self.with_module} if self.with_module: result["module"] = self.module() return result def __setstate__(self, state: dict): self.hook = state["hook"] self.with_module = state["with_module"] if self.with_module: if state["module"] is None: raise RuntimeError( "You are trying to revive the hook of a dead Module!" ) self.module = weakref.ref(state["module"]) r"""This tracks hooks common to all modules that are executed before/after calling forward and backward. This is global state used for debugging/profiling purposes""" _global_backward_pre_hooks: dict[int, Callable] = OrderedDict() _global_backward_hooks: dict[int, Callable] = OrderedDict() _global_is_full_backward_hook: Optional[bool] = None _global_forward_pre_hooks: dict[int, Callable] = OrderedDict() _global_forward_hooks: dict[int, Callable] = OrderedDict() _global_forward_hooks_always_called: dict[int, bool] = OrderedDict() _global_forward_hooks_with_kwargs: dict[int, bool] = OrderedDict() _EXTRA_STATE_KEY_SUFFIX = "_extra_state" def register_module_buffer_registration_hook( hook: Callable[..., None], ) -> RemovableHandle: r"""Register a buffer registration hook common to all modules. .. warning :: This adds global state to the `nn.Module` module The hook will be called every time :func:`register_buffer` is invoked. It should have the following signature:: hook(module, name, buffer) -> None or new buffer The hook can modify the input or return a single modified value in the hook. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(_global_buffer_registration_hooks) _global_buffer_registration_hooks[handle.id] = hook return handle def register_module_module_registration_hook( hook: Callable[..., None], ) -> RemovableHandle: r"""Register a module registration hook common to all modules. .. warning :: This adds global state to the `nn.Module` module The hook will be called every time :func:`register_module` is invoked. It should have the following signature:: hook(module, name, submodule) -> None or new submodule The hook can modify the input or return a single modified value in the hook. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(_global_module_registration_hooks) _global_module_registration_hooks[handle.id] = hook return handle def register_module_parameter_registration_hook( hook: Callable[..., None], ) -> RemovableHandle: r"""Register a parameter registration hook common to all modules. .. warning :: This adds global state to the `nn.Module` module The hook will be called every time :func:`register_parameter` is invoked. It should have the following signature:: hook(module, name, param) -> None or new parameter The hook can modify the input or return a single modified value in the hook. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(_global_parameter_registration_hooks) _global_parameter_registration_hooks[handle.id] = hook return handle def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle: r"""Register a forward pre-hook common to all modules. .. warning :: This adds global state to the `nn.module` module and it is only intended for debugging/profiling purposes. The hook will be called every time before :func:`forward` is invoked. It should have the following signature:: hook(module, input) -> None or modified input The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple). This hook has precedence over the specific module hooks registered with ``register_forward_pre_hook``. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(_global_forward_pre_hooks) _global_forward_pre_hooks[handle.id] = hook return handle def register_module_forward_hook( hook: Callable[..., None], *, with_kwargs: bool = False, always_call: bool = False, ) -> RemovableHandle: r"""Register a global forward hook for all the modules. .. warning :: This adds global state to the `nn.module` module and it is only intended for debugging/profiling purposes. The hook will be called every time after :func:`forward` has computed an output. It should have the following signature:: hook(module, input, output) -> None or modified output The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. You can optionally modify the output of the module by returning a new value that will replace the output from the :func:`forward` function. Parameters: hook (Callable): The user defined hook to be registered. always_call (bool): If ``True`` the ``hook`` will be run regardless of whether an exception is raised while calling the Module. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` This hook will be executed before specific module hooks registered with ``register_forward_hook``. """ handle = RemovableHandle( _global_forward_hooks, extra_dict=_global_forward_hooks_always_called ) _global_forward_hooks[handle.id] = hook if with_kwargs: _global_forward_hooks_with_kwargs[handle.id] = True if always_call: _global_forward_hooks_always_called[handle.id] = True return handle def register_module_backward_hook( hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]], ) -> RemovableHandle: r"""Register a backward hook common to all the modules. This function is deprecated in favor of :func:`torch.nn.modules.module.register_module_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ global _global_is_full_backward_hook if _global_is_full_backward_hook is True: raise RuntimeError( "Cannot use both regular backward hooks and full backward hooks as a " "global Module hook. Please use only one of them." ) _global_is_full_backward_hook = False handle = RemovableHandle(_global_backward_hooks) _global_backward_hooks[handle.id] = hook return handle def register_module_full_backward_pre_hook( hook: Callable[["Module", _grad_t], Union[None, _grad_t]], ) -> RemovableHandle: r"""Register a backward pre-hook common to all the modules. .. warning :: This adds global state to the `nn.module` module and it is only intended for debugging/profiling purposes. Hooks registered using this function behave in the same way as those registered by :meth:`torch.nn.Module.register_full_backward_pre_hook`. Refer to its documentation for more details. Hooks registered using this function will be called before hooks registered using :meth:`torch.nn.Module.register_full_backward_pre_hook`. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(_global_backward_pre_hooks) _global_backward_pre_hooks[handle.id] = hook return handle def register_module_full_backward_hook( hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]], ) -> RemovableHandle: r"""Register a backward hook common to all the modules. .. warning :: This adds global state to the `nn.module` module and it is only intended for debugging/profiling purposes. Hooks registered using this function behave in the same way as those registered by :meth:`torch.nn.Module.register_full_backward_hook`. Refer to its documentation for more details. Hooks registered using this function will be called before hooks registered using :meth:`torch.nn.Module.register_full_backward_hook`. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ global _global_is_full_backward_hook if _global_is_full_backward_hook is False: raise RuntimeError( "Cannot use both regular backward hooks and full backward hooks as a " "global Module hook. Please use only one of them." ) _global_is_full_backward_hook = True handle = RemovableHandle(_global_backward_hooks) _global_backward_hooks[handle.id] = hook return handle # Trick mypy into not applying contravariance rules to inputs by defining # forward as a value, rather than a function. See also # https://github.com/python/mypy/issues/8795 def _forward_unimplemented(self, *input: Any) -> None: r"""Define the computation performed at every call. Should be overridden by all subclasses. .. note:: Although the recipe for forward pass needs to be defined within this function, one should call the :class:`Module` instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them. """ raise NotImplementedError( f'Module [{type(self).__name__}] is missing the required "forward" function' ) class Module: r"""Base class for all neural network modules. Your models should also subclass this class. Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes:: import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:`to`, etc. .. note:: As per the example above, an ``__init__()`` call to the parent class must be made before assignment on the child. :ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool """ dump_patches: bool = False _version: int = 1 r"""This allows better BC support for :meth:`load_state_dict`. In :meth:`state_dict`, the version number will be saved as in the attribute `_metadata` of the returned state dict, and thus pickled. `_metadata` is a dictionary with keys that follow the naming convention of state dict. See ``_load_from_state_dict`` on how to use this information in loading. If new parameters/buffers are added/removed from a module, this number shall be bumped, and the module's `_load_from_state_dict` method can compare the version number and do appropriate changes if the state dict is from before the change.""" training: bool _parameters: dict[str, Optional[Parameter]] _buffers: dict[str, Optional[Tensor]] _non_persistent_buffers_set: set[str] _backward_pre_hooks: dict[int, Callable] _backward_hooks: dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: dict[int, Callable] # Marks whether the corresponding _forward_hooks accept kwargs or not. # As JIT does not support set[int], this dict is used as a set, where all # hooks represented in this dict accept kwargs. _forward_hooks_with_kwargs: dict[int, bool] # forward hooks that should always be called even if an exception is raised _forward_hooks_always_called: dict[int, bool] _forward_pre_hooks: dict[int, Callable] # Marks whether the corresponding _forward_hooks accept kwargs or not. # As JIT does not support set[int], this dict is used as a set, where all # hooks represented in this dict accept kwargs. _forward_pre_hooks_with_kwargs: dict[int, bool] _state_dict_hooks: dict[int, Callable] _load_state_dict_pre_hooks: dict[int, Callable] _state_dict_pre_hooks: dict[int, Callable] _load_state_dict_post_hooks: dict[int, Callable] _modules: dict[str, Optional["Module"]] call_super_init: bool = False _compiled_call_impl: Optional[Callable] = None def __init__(self, *args, **kwargs) -> None: """Initialize internal Module state, shared by both nn.Module and ScriptModule.""" torch._C._log_api_usage_once("python.nn_module") # Backward compatibility: no args used to be allowed when call_super_init=False if self.call_super_init is False and bool(kwargs): raise TypeError( f"{type(self).__name__}.__init__() got an unexpected keyword argument '{next(iter(kwargs))}'" "" ) if self.call_super_init is False and bool(args): raise TypeError( f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were" " given" ) """ Calls super().__setattr__('a', a) instead of the typical self.a = a to avoid Module.__setattr__ overhead. Module's __setattr__ has special handling for parameters, submodules, and buffers but simply calls into super().__setattr__ for all other attributes. """ super().__setattr__("training", True) super().__setattr__("_parameters", {}) super().__setattr__("_buffers", {}) super().__setattr__("_non_persistent_buffers_set", set()) super().__setattr__("_backward_pre_hooks", OrderedDict()) super().__setattr__("_backward_hooks", OrderedDict()) super().__setattr__("_is_full_backward_hook", None) super().__setattr__("_forward_hooks", OrderedDict()) super().__setattr__("_forward_hooks_with_kwargs", OrderedDict()) super().__setattr__("_forward_hooks_always_called", OrderedDict()) super().__setattr__("_forward_pre_hooks", OrderedDict()) super().__setattr__("_forward_pre_hooks_with_kwargs", OrderedDict()) super().__setattr__("_state_dict_hooks", OrderedDict()) super().__setattr__("_state_dict_pre_hooks", OrderedDict()) super().__setattr__("_load_state_dict_pre_hooks", OrderedDict()) super().__setattr__("_load_state_dict_post_hooks", OrderedDict()) super().__setattr__("_modules", {}) if self.call_super_init: super().__init__(*args, **kwargs) forward: Callable[..., Any] = _forward_unimplemented def register_buffer( self, name: str, tensor: Optional[Tensor], persistent: bool = True ) -> None: r"""Add a buffer to the module. This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's ``running_mean`` is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:`persistent` to ``False``. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:`state_dict`. Buffers can be accessed as attributes using given names. Args: name (str): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor or None): buffer to be registered. If ``None``, then operations that run on buffers, such as :attr:`cuda`, are ignored. If ``None``, the buffer is **not** included in the module's :attr:`state_dict`. persistent (bool): whether the buffer is part of this module's :attr:`state_dict`. Example:: >>> # xdoctest: +SKIP("undefined vars") >>> self.register_buffer('running_mean', torch.zeros(num_features)) """ if persistent is False and isinstance(self, torch.jit.ScriptModule): raise RuntimeError("ScriptModule does not support non-persistent buffers") if "_buffers" not in self.__dict__: raise AttributeError("cannot assign buffer before Module.__init__() call") elif not isinstance(name, str): raise TypeError( f"buffer name should be a string. Got {torch.typename(name)}" ) elif "." in name: raise KeyError('buffer name can\'t contain "."') elif name == "": raise KeyError('buffer name can\'t be empty string ""') elif hasattr(self, name) and name not in self._buffers: raise KeyError(f"attribute '{name}' already exists") elif tensor is not None and not isinstance(tensor, torch.Tensor): raise TypeError( f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' " "(torch Tensor or None required)" ) else: for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, tensor) if output is not None: tensor = output self._buffers[name] = tensor if persistent: self._non_persistent_buffers_set.discard(name) else: self._non_persistent_buffers_set.add(name) def register_parameter(self, name: str, param: Optional[Parameter]) -> None: r"""Add a parameter to the module. The parameter can be accessed as an attribute using given name. Args: name (str): name of the parameter. The parameter can be accessed from this module using the given name param (Parameter or None): parameter to be added to the module. If ``None``, then operations that run on parameters, such as :attr:`cuda`, are ignored. If ``None``, the parameter is **not** included in the module's :attr:`state_dict`. """ if "_parameters" not in self.__dict__: raise AttributeError( "cannot assign parameter before Module.__init__() call" ) elif not isinstance(name, str): raise TypeError( f"parameter name should be a string. Got {torch.typename(name)}" ) elif "." in name: raise KeyError('parameter name can\'t contain "."') elif name == "": raise KeyError('parameter name can\'t be empty string ""') elif hasattr(self, name) and name not in self._parameters: raise KeyError(f"attribute '{name}' already exists") if param is None: self._parameters[name] = None elif not isinstance(param, Parameter): raise TypeError( f"cannot assign '{torch.typename(param)}' object to parameter '{name}' " "(torch.nn.Parameter or None required)" ) elif param.grad_fn: raise ValueError( f"Cannot assign non-leaf Tensor to parameter '{name}'. Model " f"parameters must be created explicitly. To express '{name}' " "as a function of another Tensor, compute the value in " "the forward() method." ) else: for hook in _global_parameter_registration_hooks.values(): output = hook(self, name, param) if output is not None: param = output self._parameters[name] = param def add_module(self, name: str, module: Optional["Module"]) -> None: r"""Add a child module to the current module. The module can be accessed as an attribute using the given name. Args: name (str): name of the child module. The child module can be accessed from this module using the given name module (Module): child module to be added to the module. """ if not isinstance(module, Module) and module is not None: raise TypeError(f"{torch.typename(module)} is not a Module subclass") elif not isinstance(name, str): raise TypeError( f"module name should be a string. Got {torch.typename(name)}" ) elif hasattr(self, name) and name not in self._modules: raise KeyError(f"attribute '{name}' already exists") elif "." in name: raise KeyError(f'module name can\'t contain ".", got: {name}') elif name == "": raise KeyError('module name can\'t be empty string ""') for hook in _global_module_registration_hooks.values(): output = hook(self, name, module) if output is not None: module = output self._modules[name] = module def register_module(self, name: str, module: Optional["Module"]) -> None: r"""Alias for :func:`add_module`.""" self.add_module(name, module) def get_submodule(self, target: str) -> "Module": """Return the submodule given by ``target`` if it exists, otherwise throw an error. For example, let's say you have an ``nn.Module`` ``A`` that looks like this: .. code-block:: text A( (net_b): Module( (net_c): Module( (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2)) ) (linear): Linear(in_features=100, out_features=200, bias=True) ) ) (The diagram shows an ``nn.Module`` ``A``. ``A`` which has a nested submodule ``net_b``, which itself has two submodules ``net_c`` and ``linear``. ``net_c`` then has a submodule ``conv``.) To check whether or not we have the ``linear`` submodule, we would call ``get_submodule("net_b.linear")``. To check whether we have the ``conv`` submodule, we would call ``get_submodule("net_b.net_c.conv")``. The runtime of ``get_submodule`` is bounded by the degree of module nesting in ``target``. A query against ``named_modules`` achieves the same result, but it is O(N) in the number of transitive modules. So, for a simple check to see if some submodule exists, ``get_submodule`` should always be used. Args: target: The fully-qualified string name of the submodule to look for. (See above example for how to specify a fully-qualified string.) Returns: torch.nn.Module: The submodule referenced by ``target`` Raises: AttributeError: If at any point along the path resulting from the target string the (sub)path resolves to a non-existent attribute name or an object that is not an instance of ``nn.Module``. """ if target == "": return self atoms: list[str] = target.split(".") mod: torch.nn.Module = self for item in atoms: if not hasattr(mod, item): raise AttributeError( mod._get_name() + " has no attribute `" + item + "`" ) mod = getattr(mod, item) if not isinstance(mod, torch.nn.Module): raise AttributeError("`" + item + "` is not an nn.Module") return mod def set_submodule( self, target: str, module: "Module", strict: bool = False ) -> None: """ Set the submodule given by ``target`` if it exists, otherwise throw an error. .. note:: If ``strict`` is set to ``False`` (default), the method will replace an existing submodule or create a new submodule if the parent module exists. If ``strict`` is set to ``True``, the method will only attempt to replace an existing submodule and throw an error if the submodule does not exist. For example, let's say you have an ``nn.Module`` ``A`` that looks like this: .. code-block:: text A( (net_b): Module( (net_c): Module( (conv): Conv2d(3, 3, 3) ) (linear): Linear(3, 3) ) ) (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested submodule ``net_b``, which itself has two submodules ``net_c`` and ``linear``. ``net_c`` then has a submodule ``conv``.) To override the ``Conv2d`` with a new submodule ``Linear``, you could call ``set_submodule("net_b.net_c.conv", nn.Linear(1, 1))`` where ``strict`` could be ``True`` or ``False`` To add a new submodule ``Conv2d`` to the existing ``net_b`` module, you would call ``set_submodule("net_b.conv", nn.Conv2d(1, 1, 1))``. In the above if you set ``strict=True`` and call ``set_submodule("net_b.conv", nn.Conv2d(1, 1, 1), strict=True)``, an AttributeError will be raised because ``net_b`` does not have a submodule named ``conv``. Args: target: The fully-qualified string name of the submodule to look for. (See above example for how to specify a fully-qualified string.) module: The module to set the submodule to. strict: If ``False``, the method will replace an existing submodule or create a new submodule if the parent module exists. If ``True``, the method will only attempt to replace an existing submodule and throw an error if the submodule doesn't already exist. Raises: ValueError: If the ``target`` string is empty or if ``module`` is not an instance of ``nn.Module``. AttributeError: If at any point along the path resulting from the ``target`` string the (sub)path resolves to a non-existent attribute name or an object that is not an instance of ``nn.Module``. """ if target == "": raise ValueError("Cannot set the submodule without a target name!") atoms: list[str] = target.split(".") if not isinstance(module, torch.nn.Module): raise ValueError( "`" + "module" + f"` is not an nn.Module, found {type(module)}" ) if len(atoms) == 1: parent: torch.nn.Module = self else: parent_key = ".".join(atoms[:-1]) parent = self.get_submodule(parent_key) if strict and not hasattr(parent, atoms[-1]): raise AttributeError( parent._get_name() + " has no attribute `" + atoms[-1] + "`" ) if hasattr(parent, atoms[-1]): mod = getattr(parent, atoms[-1]) if not isinstance(mod, torch.nn.Module): raise AttributeError("`" + atoms[-1] + "` is not an nn.Module") setattr(parent, atoms[-1], module) def get_parameter(self, target: str) -> "Parameter": """Return the parameter given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the Parameter to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.nn.Parameter: The Parameter referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Parameter`` """ module_path, _, param_name = target.rpartition(".") mod: torch.nn.Module = self.get_submodule(module_path) if not hasattr(mod, param_name): raise AttributeError( mod._get_name() + " has no attribute `" + param_name + "`" ) param: torch.nn.Parameter = getattr(mod, param_name) if not isinstance(param, torch.nn.Parameter): raise AttributeError("`" + param_name + "` is not an nn.Parameter") return param def get_buffer(self, target: str) -> "Tensor": """Return the buffer given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the buffer to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.Tensor: The buffer referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not a buffer """ module_path, _, buffer_name = target.rpartition(".") mod: torch.nn.Module = self.get_submodule(module_path) if not hasattr(mod, buffer_name): raise AttributeError( mod._get_name() + " has no attribute `" + buffer_name + "`" ) buffer: torch.Tensor = getattr(mod, buffer_name) if buffer_name not in mod._buffers: raise AttributeError("`" + buffer_name + "` is not a buffer") return buffer def get_extra_state(self) -> Any: """Return any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`. Note that extra state should be picklable to ensure working serialization of the state_dict. We only provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict """ raise RuntimeError( "Reached a code path in Module.get_extra_state() that should never be called. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "to report this bug." ) def set_extra_state(self, state: Any) -> None: """Set extra state contained in the loaded `state_dict`. This function is called from :func:`load_state_dict` to handle any extra state found within the `state_dict`. Implement this function and a corresponding :func:`get_extra_state` for your module if you need to store extra state within its `state_dict`. Args: state (dict): Extra state from the `state_dict` """ raise RuntimeError( "Reached a code path in Module.set_extra_state() that should never be called. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "to report this bug." ) def _apply(self, fn, recurse=True): if recurse: for module in self.children(): module._apply(fn) def compute_should_use_set_data(tensor, tensor_applied): if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): # If the new tensor has compatible tensor type as the existing tensor, # the current behavior is to change the tensor in-place using `.data =`, # and the future behavior is to overwrite the existing tensor. However, # changing the current behavior is a BC-breaking change, and we want it # to happen in future releases. So for now we introduce the # `torch.__future__.get_overwrite_module_params_on_conversion()` # global flag to let the user control whether they want the future # behavior of overwriting the existing tensor or not. return not torch.__future__.get_overwrite_module_params_on_conversion() else: return False should_use_swap_tensors = ( torch.__future__.get_swap_module_params_on_conversion() ) for key, param in self._parameters.items(): if param is None: continue # Tensors stored in modules are graph leaves, and we don't want to # track autograd history of `param_applied`, so we have to use # `with torch.no_grad():` with torch.no_grad(): param_applied = fn(param) p_should_use_set_data = compute_should_use_set_data(param, param_applied) # subclasses may have multiple child tensors so we need to use swap_tensors p_should_use_swap_tensors = ( should_use_swap_tensors or is_traceable_wrapper_subclass(param_applied) ) param_grad = param.grad if p_should_use_swap_tensors: try: if param_grad is not None: # Accessing param.grad makes its at::Tensor's use_count 2, which will prevent swapping. # Decrement use count of the gradient by setting to None param.grad = None param_applied = torch.nn.Parameter( param_applied, requires_grad=param.requires_grad ) torch.utils.swap_tensors(param, param_applied) except Exception as e: if param_grad is not None: param.grad = param_grad raise RuntimeError( f"_apply(): Couldn't swap {self._get_name()}.{key}" ) from e out_param = param elif p_should_use_set_data: param.data = param_applied out_param = param else: assert isinstance(param, Parameter) assert param.is_leaf out_param = Parameter(param_applied, param.requires_grad) self._parameters[key] = out_param if param_grad is not None: with torch.no_grad(): grad_applied = fn(param_grad) g_should_use_set_data = compute_should_use_set_data( param_grad, grad_applied ) if p_should_use_swap_tensors: grad_applied.requires_grad_(param_grad.requires_grad) try: torch.utils.swap_tensors(param_grad, grad_applied) except Exception as e: raise RuntimeError( f"_apply(): Couldn't swap {self._get_name()}.{key}.grad" ) from e out_param.grad = param_grad elif g_should_use_set_data: assert out_param.grad is not None out_param.grad.data = grad_applied else: assert param_grad.is_leaf out_param.grad = grad_applied.requires_grad_( param_grad.requires_grad ) for key, buf in self._buffers.items(): if buf is not None: self._buffers[key] = fn(buf) return self def apply(self: T, fn: Callable[["Module"], None]) -> T: r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self. Typical use includes initializing the parameters of a model (see also :ref:`nn-init-doc`). Args: fn (:class:`Module` -> None): function to be applied to each submodule Returns: Module: self Example:: >>> @torch.no_grad() >>> def init_weights(m): >>> print(m) >>> if type(m) == nn.Linear: >>> m.weight.fill_(1.0) >>> print(m.weight) >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) >>> net.apply(init_weights) Linear(in_features=2, out_features=2, bias=True) Parameter containing: tensor([[1., 1.], [1., 1.]], requires_grad=True) Linear(in_features=2, out_features=2, bias=True) Parameter containing: tensor([[1., 1.], [1., 1.]], requires_grad=True) Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) """ for module in self.children(): module.apply(fn) fn(self) return self def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on GPU while being optimized. .. note:: This method modifies the module in-place. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.cuda(device)) def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the IPU. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on IPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.ipu(device)) def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the XPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.xpu(device)) def mtia(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the MTIA. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on MTIA while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.mtia(device)) def cpu(self: T) -> T: r"""Move all model parameters and buffers to the CPU. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.cpu()) def type(self: T, dst_type: Union[dtype, str]) -> T: r"""Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self """ return self._apply(lambda t: t.type(dst_type)) def float(self: T) -> T: r"""Casts all floating point parameters and buffers to ``float`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.float() if t.is_floating_point() else t) def double(self: T) -> T: r"""Casts all floating point parameters and buffers to ``double`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.double() if t.is_floating_point() else t) def half(self: T) -> T: r"""Casts all floating point parameters and buffers to ``half`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.half() if t.is_floating_point() else t) def bfloat16(self: T) -> T: r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t) def to_empty( self: T, *, device: Optional[DeviceLikeType], recurse: bool = True ) -> T: r"""Move the parameters and buffers to the specified device without copying storage. Args: device (:class:`torch.device`): The desired device of the parameters and buffers in this module. recurse (bool): Whether parameters and buffers of submodules should be recursively moved to the specified device. Returns: Module: self """ return self._apply( lambda t: torch.empty_like(t, device=device), recurse=recurse ) @overload def to( self, device: Optional[DeviceLikeType] = ..., dtype: Optional[dtype] = ..., non_blocking: bool = ..., ) -> Self: ... @overload def to(self, dtype: dtype, non_blocking: bool = ...) -> Self: ... @overload def to(self, tensor: Tensor, non_blocking: bool = ...) -> Self: ... def to(self, *args, **kwargs): r"""Move and/or cast the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) :noindex: .. function:: to(dtype, non_blocking=False) :noindex: .. function:: to(tensor, non_blocking=False) :noindex: .. function:: to(memory_format=torch.channels_last) :noindex: Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`\ s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) """ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to( *args, **kwargs ) if dtype is not None: if not (dtype.is_floating_point or dtype.is_complex): raise TypeError( "nn.Module.to only accepts floating point or complex " f"dtypes, but got desired dtype={dtype}" ) if dtype.is_complex: warnings.warn( "Complex modules are a new feature under active development whose design may change, " "and some modules might not work as expected when using complex tensors as parameters or buffers. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "if a complex module does not work as expected." ) def convert(t): try: if convert_to_format is not None and t.dim() in (4, 5): return t.to( device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking, memory_format=convert_to_format, ) return t.to( device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking, ) except NotImplementedError as e: if str(e) == "Cannot copy out of meta tensor; no data!": raise NotImplementedError( f"{e} Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() " f"when moving module from meta to a different device." ) from None else: raise return self._apply(convert) def register_full_backward_pre_hook( self, hook: Callable[["Module", _grad_t], Union[None, _grad_t]], prepend: bool = False, ) -> RemovableHandle: r"""Register a backward pre-hook on the module. The hook will be called every time the gradients for the module are computed. The hook should have the following signature:: hook(module, grad_output) -> tuple[Tensor] or None The :attr:`grad_output` is a tuple. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the output that will be used in place of :attr:`grad_output` in subsequent computations. Entries in :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward_pre`` hooks on this :class:`torch.nn.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward_pre`` hooks on this :class:`torch.nn.Module`. Note that global ``backward_pre`` hooks registered with :func:`register_module_full_backward_pre_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(self._backward_pre_hooks) self._backward_pre_hooks[handle.id] = hook if prepend: self._backward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle def register_backward_hook( self, hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]] ) -> RemovableHandle: r"""Register a backward hook on the module. This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ if self._is_full_backward_hook is True: raise RuntimeError( "Cannot use both regular backward hooks and full backward hooks on a " "single Module. Please use only one of them." ) self._is_full_backward_hook = False handle = RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook return handle def register_full_backward_hook( self, hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]], prepend: bool = False, ) -> RemovableHandle: r"""Register a backward hook on the module. The hook will be called every time the gradients with respect to a module are computed, i.e. the hook will execute if and only if the gradients with respect to module outputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward`` hooks on this :class:`torch.nn.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward`` hooks on this :class:`torch.nn.Module`. Note that global ``backward`` hooks registered with :func:`register_module_full_backward_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ if self._is_full_backward_hook is False: raise RuntimeError( "Cannot use both regular backward hooks and full backward hooks on a " "single Module. Please use only one of them." ) self._is_full_backward_hook = True handle = RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook if prepend: self._backward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle def _get_backward_hooks(self): r"""Return the backward hooks for use in the call function. It returns two lists, one with the full backward hooks and one with the non-full backward hooks. """ full_backward_hooks: list[Callable] = [] if _global_is_full_backward_hook is True: full_backward_hooks += _global_backward_hooks.values() if self._is_full_backward_hook is True: full_backward_hooks += self._backward_hooks.values() non_full_backward_hooks: list[Callable] = [] if _global_is_full_backward_hook is False: non_full_backward_hooks += _global_backward_hooks.values() if self._is_full_backward_hook is False: non_full_backward_hooks += self._backward_hooks.values() return full_backward_hooks, non_full_backward_hooks def _get_backward_pre_hooks(self): backward_pre_hooks: list[Callable] = [] backward_pre_hooks += _global_backward_pre_hooks.values() backward_pre_hooks += self._backward_pre_hooks.values() return backward_pre_hooks def _maybe_warn_non_full_backward_hook(self, inputs, result, grad_fn): if not isinstance(result, torch.Tensor): if not ( isinstance(result, tuple) and all(isinstance(r, torch.Tensor) for r in result) ): warnings.warn( "Using non-full backward hooks on a Module that does not return a " "single Tensor or a tuple of Tensors is deprecated and will be removed " "in future versions. This hook will be missing some of the grad_output. " "Please use register_full_backward_hook to get the documented behavior.", FutureWarning, stacklevel=2, ) return else: result = (result,) if not isinstance(inputs, torch.Tensor): if not ( isinstance(inputs, tuple) and all(isinstance(i, torch.Tensor) for i in inputs) ): warnings.warn( "Using non-full backward hooks on a Module that does not take as input a " "single Tensor or a tuple of Tensors is deprecated and will be removed " "in future versions. This hook will be missing some of the grad_input. " "Please use register_full_backward_hook to get the documented behavior.", FutureWarning, stacklevel=2, ) return else: inputs = (inputs,) # At this point we are sure that inputs and result are tuple of Tensors out_grad_fn = {r.grad_fn for r in result if r.grad_fn is not None} if len(out_grad_fn) == 0 or ( len(out_grad_fn) == 1 and grad_fn not in out_grad_fn ): warnings.warn( "Using a non-full backward hook when outputs are nested in python data structure " "is deprecated and will be removed in future versions. This hook will be missing " "some grad_output.", FutureWarning, stacklevel=2, ) elif len(out_grad_fn) > 1: warnings.warn( "Using a non-full backward hook when outputs are generated by different autograd Nodes " "is deprecated and will be removed in future versions. This hook will be missing " "some grad_output. Please use register_full_backward_hook to get the documented behavior.", FutureWarning, stacklevel=2, ) else: # At this point the grad_output part of the hook will most likely be correct inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None} next_functions = {n[0] for n in grad_fn.next_functions} if inputs_grad_fn != next_functions: warnings.warn( "Using a non-full backward hook when the forward contains multiple autograd Nodes " "is deprecated and will be removed in future versions. This hook will be missing " "some grad_input. Please use register_full_backward_hook to get the documented " "behavior.", FutureWarning, stacklevel=2, ) def register_forward_pre_hook( self, hook: Union[ Callable[[T, tuple[Any, ...]], Optional[Any]], Callable[ [T, tuple[Any, ...], dict[str, Any]], Optional[tuple[Any, dict[str, Any]]], ], ], *, prepend: bool = False, with_kwargs: bool = False, ) -> RemovableHandle: r"""Register a forward pre-hook on the module. The hook will be called every time before :func:`forward` is invoked. If ``with_kwargs`` is false or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned (unless that value is already a tuple). The hook should have the following signature:: hook(module, args) -> None or modified input If ``with_kwargs`` is true, the forward pre-hook will be passed the kwargs given to the forward function. And if the hook modifies the input, both the args and kwargs should be returned. The hook should have the following signature:: hook(module, args, kwargs) -> None or a tuple of modified input and kwargs Args: hook (Callable): The user defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``forward_pre`` hooks on this :class:`torch.nn.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward_pre`` hooks on this :class:`torch.nn.Module`. Note that global ``forward_pre`` hooks registered with :func:`register_module_forward_pre_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If true, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle( self._forward_pre_hooks, extra_dict=self._forward_pre_hooks_with_kwargs ) self._forward_pre_hooks[handle.id] = hook if with_kwargs: self._forward_pre_hooks_with_kwargs[handle.id] = True if prepend: self._forward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle def register_forward_hook( self, hook: Union[ Callable[[T, tuple[Any, ...], Any], Optional[Any]], Callable[[T, tuple[Any, ...], dict[str, Any], Any], Optional[Any]], ], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False, ) -> RemovableHandle: r"""Register a forward hook on the module. The hook will be called every time after :func:`forward` has computed an output. If ``with_kwargs`` is ``False`` or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after :func:`forward` is called. The hook should have the following signature:: hook(module, args, output) -> None or modified output If ``with_kwargs`` is ``True``, the forward hook will be passed the ``kwargs`` given to the forward function and be expected to return the output possibly modified. The hook should have the following signature:: hook(module, args, kwargs, output) -> None or modified output Args: hook (Callable): The user defined hook to be registered. prepend (bool): If ``True``, the provided ``hook`` will be fired before all existing ``forward`` hooks on this :class:`torch.nn.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward`` hooks on this :class:`torch.nn.Module`. Note that global ``forward`` hooks registered with :func:`register_module_forward_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If ``True``, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` always_call (bool): If ``True`` the ``hook`` will be run regardless of whether an exception is raised while calling the Module. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle( self._forward_hooks, extra_dict=[ self._forward_hooks_with_kwargs, self._forward_hooks_always_called, ], ) self._forward_hooks[handle.id] = hook if with_kwargs: self._forward_hooks_with_kwargs[handle.id] = True if always_call: self._forward_hooks_always_called[handle.id] = True if prepend: self._forward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle def _slow_forward(self, *input, **kwargs): tracing_state = torch._C._get_tracing_state() if not tracing_state or isinstance(self.forward, torch._C.ScriptMethod): return self.forward(*input, **kwargs) recording_scopes = torch.jit._trace._trace_module_map is not None if recording_scopes: # type ignore was added because at this point one knows that # torch.jit._trace._trace_module_map is not Optional and has type Dict[Any, Any] name = torch.jit._trace._trace_module_map[self] if self in torch.jit._trace._trace_module_map else None # type: ignore[index, operator] # noqa: B950 if name: tracing_state.push_scope(name) else: recording_scopes = False try: result = self.forward(*input, **kwargs) finally: if recording_scopes: tracing_state.pop_scope() return result def _wrapped_call_impl(self, *args, **kwargs): if self._compiled_call_impl is not None: return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] else: return self._call_impl(*args, **kwargs) # torchrec tests the code consistency with the following code # fmt: off def _call_impl(self, *args, **kwargs): forward_call = (self._slow_forward if torch._C._get_tracing_state() else self.forward) # If we don't have any hooks, we want to skip the rest of the logic in # this function, and just call forward. if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_pre_hooks or _global_backward_hooks or _global_forward_hooks or _global_forward_pre_hooks): return forward_call(*args, **kwargs) result = None called_always_called_hooks = set() def inner(): nonlocal result, args, kwargs full_backward_hooks, non_full_backward_hooks = [], [] backward_pre_hooks = [] if self._backward_pre_hooks or _global_backward_pre_hooks: backward_pre_hooks = self._get_backward_pre_hooks() if self._backward_hooks or _global_backward_hooks: full_backward_hooks, non_full_backward_hooks = self._get_backward_hooks() if _global_forward_pre_hooks or self._forward_pre_hooks: for hook_id, hook in ( *_global_forward_pre_hooks.items(), *self._forward_pre_hooks.items(), ): if hook_id in self._forward_pre_hooks_with_kwargs: args_kwargs_result = hook(self, args, kwargs) # type: ignore[misc] if args_kwargs_result is not None: if isinstance(args_kwargs_result, tuple) and len(args_kwargs_result) == 2: args, kwargs = args_kwargs_result else: raise RuntimeError( "forward pre-hook must return None or a tuple " f"of (new_args, new_kwargs), but got {args_kwargs_result}." ) else: args_result = hook(self, args) if args_result is not None: if not isinstance(args_result, tuple): args_result = (args_result,) args = args_result bw_hook = None if full_backward_hooks or backward_pre_hooks: bw_hook = BackwardHook(self, full_backward_hooks, backward_pre_hooks) args = bw_hook.setup_input_hook(args) result = forward_call(*args, **kwargs) if _global_forward_hooks or self._forward_hooks: for hook_id, hook in ( *_global_forward_hooks.items(), *self._forward_hooks.items(), ): # mark that always called hook is run if hook_id in self._forward_hooks_always_called or hook_id in _global_forward_hooks_always_called: called_always_called_hooks.add(hook_id) if hook_id in self._forward_hooks_with_kwargs or hook_id in _global_forward_hooks_with_kwargs: hook_result = hook(self, args, kwargs, result) else: hook_result = hook(self, args, result) if hook_result is not None: result = hook_result if bw_hook: if not isinstance(result, (torch.Tensor, tuple)): warnings.warn("For backward hooks to be called," " module output should be a Tensor or a tuple of Tensors" f" but received {type(result)}") result = bw_hook.setup_output_hook(result) # Handle the non-full backward hooks if non_full_backward_hooks: var = result while not isinstance(var, torch.Tensor): if isinstance(var, dict): var = next(v for v in var.values() if isinstance(v, torch.Tensor)) else: var = var[0] grad_fn = var.grad_fn if grad_fn is not None: for hook in non_full_backward_hooks: grad_fn.register_hook(_WrappedHook(hook, self)) self._maybe_warn_non_full_backward_hook(args, result, grad_fn) return result # This is technically not behavior equivalent when compiling, but it's # incredibly unlikely we will ever support throwing an exception in NN # module, and then catching it here, and then reraising it, and then # catching it again, and expecting the resulting frame to be compiled. # The reraise here just gunks up our exception handling for no good # reason. Don't try to run the always called hooks in event of # exception. if torch.compiler.is_compiling(): return inner() try: return inner() except Exception: # run always called hooks if they have not already been run # For now only forward hooks have the always_call option but perhaps # this functionality should be added to full backward hooks as well. for hook_id, hook in _global_forward_hooks.items(): if hook_id in _global_forward_hooks_always_called and hook_id not in called_always_called_hooks: # type: ignore[possibly-undefined] try: hook_result = hook(self, args, result) # type: ignore[possibly-undefined] if hook_result is not None: result = hook_result except Exception as e: warnings.warn("global module forward hook with ``always_call=True`` raised an exception " f"that was silenced as another error was raised in forward: {str(e)}") continue for hook_id, hook in self._forward_hooks.items(): if hook_id in self._forward_hooks_always_called and hook_id not in called_always_called_hooks: # type: ignore[possibly-undefined] try: if hook_id in self._forward_hooks_with_kwargs: hook_result = hook(self, args, kwargs, result) # type: ignore[possibly-undefined] else: hook_result = hook(self, args, result) # type: ignore[possibly-undefined] if hook_result is not None: result = hook_result except Exception as e: warnings.warn("module forward hook with ``always_call=True`` raised an exception " f"that was silenced as another error was raised in forward: {str(e)}") continue # raise exception raised in try block raise # fmt: on __call__: Callable[..., Any] = _wrapped_call_impl def __getstate__(self): state = self.__dict__.copy() state.pop("_compiled_call_impl", None) return state def __setstate__(self, state): self.__dict__.update(state) # Support loading old checkpoints that don't have the following attrs: if "_forward_pre_hooks" not in self.__dict__: self._forward_pre_hooks = OrderedDict() if "_forward_pre_hooks_with_kwargs" not in self.__dict__: self._forward_pre_hooks_with_kwargs = OrderedDict() if "_forward_hooks_with_kwargs" not in self.__dict__: self._forward_hooks_with_kwargs = OrderedDict() if "_forward_hooks_always_called" not in self.__dict__: self._forward_hooks_always_called = OrderedDict() if "_state_dict_hooks" not in self.__dict__: self._state_dict_hooks = OrderedDict() if "_state_dict_pre_hooks" not in self.__dict__: self._state_dict_pre_hooks = OrderedDict() if "_load_state_dict_pre_hooks" not in self.__dict__: self._load_state_dict_pre_hooks = OrderedDict() if "_load_state_dict_post_hooks" not in self.__dict__: self._load_state_dict_post_hooks = OrderedDict() if "_non_persistent_buffers_set" not in self.__dict__: self._non_persistent_buffers_set = set() if "_is_full_backward_hook" not in self.__dict__: self._is_full_backward_hook = None if "_backward_pre_hooks" not in self.__dict__: self._backward_pre_hooks = OrderedDict() # It is crucial that the return type is not annotated as `Any`, otherwise type checking # on `torch.nn.Module` and all its subclasses is largely disabled as a result. See: # https://github.com/pytorch/pytorch/pull/115074 def __getattr__(self, name: str) -> Union[Tensor, "Module"]: if "_parameters" in self.__dict__: _parameters = self.__dict__["_parameters"] if name in _parameters: return _parameters[name] if "_buffers" in self.__dict__: _buffers = self.__dict__["_buffers"] if name in _buffers: return _buffers[name] if "_modules" in self.__dict__: modules = self.__dict__["_modules"] if name in modules: return modules[name] raise AttributeError( f"'{type(self).__name__}' object has no attribute '{name}'" ) def __setattr__(self, name: str, value: Union[Tensor, "Module"]) -> None: def remove_from(*dicts_or_sets): for d in dicts_or_sets: if name in d: if isinstance(d, dict): del d[name] else: d.discard(name) params = self.__dict__.get("_parameters") if isinstance(value, Parameter): if params is None: raise AttributeError( "cannot assign parameters before Module.__init__() call" ) remove_from( self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set, ) self.register_parameter(name, value) elif params is not None and name in params: if value is not None: raise TypeError( f"cannot assign '{torch.typename(value)}' as parameter '{name}' " "(torch.nn.Parameter or None expected)" ) self.register_parameter(name, value) else: modules = self.__dict__.get("_modules") if isinstance(value, Module): if modules is None: raise AttributeError( "cannot assign module before Module.__init__() call" ) remove_from( self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set, ) for hook in _global_module_registration_hooks.values(): output = hook(self, name, value) if output is not None: value = output modules[name] = value elif modules is not None and name in modules: if value is not None: raise TypeError( f"cannot assign '{torch.typename(value)}' as child module '{name}' " "(torch.nn.Module or None expected)" ) for hook in _global_module_registration_hooks.values(): output = hook(self, name, value) if output is not None: value = output modules[name] = value else: buffers = self.__dict__.get("_buffers") if isinstance(value, Buffer) or buffers is not None and name in buffers: if value is not None and not isinstance(value, torch.Tensor): raise TypeError( f"cannot assign '{torch.typename(value)}' as buffer '{name}' " "(torch.nn.Buffer, torch.Tensor or None expected)" ) if isinstance(value, Buffer): persistent = value.persistent else: persistent = name not in self._non_persistent_buffers_set # === HACK === # This whole block below should just be: # self.register_buffer(name, value, persistent) # But to support subclasses of nn.Module that (wrongfully) implement a # register_buffer() method that doesn't have the "persistent" # argument. Only pass it in if it is accepted otherwise assume # it is always true if self.register_buffer is torch.nn.Module.register_buffer: self.register_buffer(name, value, persistent) else: sign = inspect.signature(self.register_buffer) if "persistent" in sign.parameters: self.register_buffer(name, value, persistent) else: if not persistent: raise RuntimeError( "Registering a non-persistent buffer " "on a Module subclass that implements " "register_buffer() without the persistent " "argument is not allowed." ) # Assume that the implementation without the argument has the # behavior from before the argument was added: persistent=True self.register_buffer(name, value) # === HACK END === else: super().__setattr__(name, value) def __delattr__(self, name): if name in self._parameters: del self._parameters[name] elif name in self._buffers: del self._buffers[name] self._non_persistent_buffers_set.discard(name) elif name in self._modules: del self._modules[name] else: super().__delattr__(name) def _register_state_dict_hook(self, hook): r"""Register a post-hook for the :meth:`~torch.nn.Module.state_dict` method. It should have the following signature:: hook(module, state_dict, prefix, local_metadata) -> None or state_dict The registered hooks can modify the ``state_dict`` inplace or return a new one. If a new ``state_dict`` is returned, it will only be respected if it is the root module that :meth:`~nn.Module.state_dict` is called from. """ if getattr(hook, "_from_public_api", False): raise RuntimeError( "Cannot register the same function as the state dict post hook that was " "previously registered via register_state_dict_post_hook" ) handle = RemovableHandle(self._state_dict_hooks) self._state_dict_hooks[handle.id] = hook return handle def register_state_dict_post_hook(self, hook): r"""Register a post-hook for the :meth:`~torch.nn.Module.state_dict` method. It should have the following signature:: hook(module, state_dict, prefix, local_metadata) -> None The registered hooks can modify the ``state_dict`` inplace. """ # In _register_state_dict_hook there was a bug described in # https://github.com/pytorch/pytorch/issues/117437 where the return value # was only respected for the root module but not child submodules. # We fix this in this public version by only allowing inplace modifications on # the state_dict by the hook. However, since hooks registered via both these # APIs will be added to `_state_dict_hooks` and the type of `_state_dict_hooks` # cannot be changed due to many dependencies on it, we mark a hook # as being registered via the public API by setting `_from_public_api` on it. # In the implementation of `state_dict`, if the callable does not have this # flag, the old behavior of respecting the return value will be preserved # for the root module, otherwise, we ensure that the hook returns None. hook._from_public_api = True handle = RemovableHandle(self._state_dict_hooks) self._state_dict_hooks[handle.id] = hook return handle def register_state_dict_pre_hook(self, hook): r"""Register a pre-hook for the :meth:`~torch.nn.Module.state_dict` method. It should have the following signature:: hook(module, prefix, keep_vars) -> None The registered hooks can be used to perform pre-processing before the ``state_dict`` call is made. """ handle = RemovableHandle(self._state_dict_pre_hooks) self._state_dict_pre_hooks[handle.id] = hook return handle def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Save module state to the `destination` dictionary. The `destination` dictionary will contain the state of the module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, param in self._parameters.items(): if param is not None: destination[prefix + name] = param if keep_vars else param.detach() for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: destination[prefix + name] = buf if keep_vars else buf.detach() extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if ( getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state ): destination[extra_state_key] = self.get_extra_state() # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns # back that same object. But if they pass nothing, an `OrderedDict` is created and returned. T_destination = TypeVar("T_destination", bound=dict[str, Any]) @overload def state_dict( self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ... ) -> T_destination: ... @overload def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> dict[str, Any]: ... # TODO: Change `*args` to `*` and remove the corresponding warning in docs when BC allows. # Also remove the logic for arg parsing together. def state_dict(self, *args, destination=None, prefix="", keep_vars=False): r"""Return a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~torch.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight'] """ # TODO: Remove `args` and the parsing logic when BC allows. if len(args) > 0: # DeprecationWarning is ignored by default warnings.warn( "Positional args are being deprecated, use kwargs instead. Refer to " "https://pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.state_dict" " for details.", FutureWarning, stacklevel=2, ) if destination is None: destination = args[0] if len(args) > 1 and prefix == "": prefix = args[1] if len(args) > 2 and keep_vars is False: keep_vars = args[2] if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() local_metadata = dict(version=self._version) if hasattr(destination, "_metadata"): destination._metadata[prefix[:-1]] = local_metadata for hook in self._state_dict_pre_hooks.values(): hook(self, prefix, keep_vars) self._save_to_state_dict(destination, prefix, keep_vars) for name, module in self._modules.items(): if module is not None: module.state_dict( destination=destination, prefix=prefix + name + ".", keep_vars=keep_vars, ) for hook in self._state_dict_hooks.values(): hook_result = hook(self, destination, prefix, local_metadata) if not getattr(hook, "_from_public_api", False): if hook_result is not None: destination = hook_result else: if hook_result is not None: raise RuntimeError("state_dict post-hook must return None") return destination def _register_load_state_dict_pre_hook(self, hook, with_module=False): r"""See :meth:`~torch.nn.Module.register_load_state_dict_pre_hook` for details. A subtle difference is that if ``with_module`` is set to ``False``, then the hook will not take the ``module`` as the first argument whereas :meth:`~torch.nn.Module.register_load_state_dict_pre_hook` always takes the ``module`` as the first argument. Arguments: hook (Callable): Callable hook that will be invoked before loading the state dict. with_module (bool, optional): Whether or not to pass the module instance to the hook as the first parameter. """ handle = RemovableHandle(self._load_state_dict_pre_hooks) self._load_state_dict_pre_hooks[handle.id] = _WrappedHook( hook, self if with_module else None ) return handle def register_load_state_dict_pre_hook(self, hook): r"""Register a pre-hook to be run before module's :meth:`~nn.Module.load_state_dict` is called. It should have the following signature:: hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) -> None # noqa: B950 Arguments: hook (Callable): Callable hook that will be invoked before loading the state dict. """ return self._register_load_state_dict_pre_hook(hook, with_module=True) def register_load_state_dict_post_hook(self, hook): r"""Register a post-hook to be run after module's :meth:`~nn.Module.load_state_dict` is called. It should have the following signature:: hook(module, incompatible_keys) -> None The ``module`` argument is the current module that this hook is registered on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys`` is a ``list`` of ``str`` containing the missing keys and ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys. The given incompatible_keys can be modified inplace if needed. Note that the checks performed when calling :func:`load_state_dict` with ``strict=True`` are affected by modifications the hook makes to ``missing_keys`` or ``unexpected_keys``, as expected. Additions to either set of keys will result in an error being thrown when ``strict=True``, and clearing out both missing and unexpected keys will avoid an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = RemovableHandle(self._load_state_dict_post_hooks) self._load_state_dict_post_hooks[handle.id] = hook return handle def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): r"""Copy parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. Additionally, :attr:`local_metadata` can also contain the key `assign_to_params_buffers` that indicates whether keys should be assigned their corresponding tensor in the state_dict. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~torch.nn.Module.load_state_dict` """ for hook in self._load_state_dict_pre_hooks.values(): hook( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ) persistent_buffers = { k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set } local_name_params = itertools.chain( self._parameters.items(), persistent_buffers.items() ) local_state = {k: v for k, v in local_name_params if v is not None} assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False) use_swap_tensors = torch.__future__.get_swap_module_params_on_conversion() for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not torch.overrides.is_tensor_like(input_param): error_msgs.append( f'While copying the parameter named "{key}", ' "expected torch.Tensor or Tensor-like object from checkpoint but " f"received {type(input_param)}" ) continue # This is used to avoid copying uninitialized parameters into # non-lazy modules, since they dont have the hook to do the checks # in such case, it will error when accessing the .shape attribute. is_param_lazy = torch.nn.parameter.is_lazy(param) # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ if ( not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1 ): input_param = input_param[0] if not is_param_lazy and input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( f"size mismatch for {key}: copying a param with shape {input_param.shape} from checkpoint, " f"the shape in current model is {param.shape}." ) continue if ( param.is_meta and not input_param.is_meta and not assign_to_params_buffers ): warnings.warn( f"for {key}: copying from a non-meta parameter in the checkpoint to a meta " "parameter in the current model, which is a no-op. (Did you mean to " "pass `assign=True` to assign items in the state dictionary to their " "corresponding key in the module instead of copying them in place?)" ) try: with torch.no_grad(): if use_swap_tensors: new_input_param = param.module_load( input_param, assign=assign_to_params_buffers ) if id(new_input_param) == id(input_param) or id( new_input_param ) == id(param): raise RuntimeError( "module_load returned one of self or other, please .detach() " "the result if returning one of the inputs in module_load" ) if isinstance(param, torch.nn.Parameter): if not isinstance(new_input_param, torch.nn.Parameter): new_input_param = torch.nn.Parameter( new_input_param, requires_grad=param.requires_grad, ) else: new_input_param.requires_grad_(param.requires_grad) torch.utils.swap_tensors(param, new_input_param) del new_input_param elif assign_to_params_buffers: # Shape checks are already done above if isinstance(param, torch.nn.Parameter): if not isinstance(input_param, torch.nn.Parameter): input_param = torch.nn.Parameter( input_param, requires_grad=param.requires_grad ) else: input_param.requires_grad_(param.requires_grad) setattr(self, name, input_param) else: param.copy_(input_param) except Exception as ex: action = "swapping" if use_swap_tensors else "copying" error_msgs.append( f'While {action} the parameter named "{key}", ' f"whose dimensions in the model are {param.size()} and " f"whose dimensions in the checkpoint are {input_param.size()}, " f"an exception occurred : {ex.args}." ) elif strict: missing_keys.append(key) extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if ( getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state ): if extra_state_key in state_dict: self.set_extra_state(state_dict[extra_state_key]) elif strict: missing_keys.append(extra_state_key) elif strict and (extra_state_key in state_dict): unexpected_keys.append(extra_state_key) if strict: for key in state_dict.keys(): if key.startswith(prefix) and key != extra_state_key: input_name = key[len(prefix) :].split(".", 1) # Must be Module if it have attributes if len(input_name) > 1: if input_name[0] not in self._modules: unexpected_keys.append(key) elif input_name[0] not in local_state: unexpected_keys.append(key) def load_state_dict( self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False ): r"""Copy parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict` unless :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): When set to ``False``, the properties of the tensors in the current module are preserved whereas setting it to ``True`` preserves properties of the Tensors in the state dict. The only exception is the ``requires_grad`` field of :class:`~torch.nn.Parameter`s for which the value from the module is preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing any keys that are expected by this module but missing from the provided ``state_dict``. * **unexpected_keys** is a list of str containing the keys that are not expected by this module but present in the provided ``state_dict``. Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ if not isinstance(state_dict, Mapping): raise TypeError( f"Expected state_dict to be dict-like, got {type(state_dict)}." ) missing_keys: list[str] = [] unexpected_keys: list[str] = [] error_msgs: list[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = OrderedDict(state_dict) if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] def load(module, local_state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) if assign: local_metadata["assign_to_params_buffers"] = assign module._load_from_state_dict( local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: child_prefix = prefix + name + "." child_state_dict = { k: v for k, v in local_state_dict.items() if k.startswith(child_prefix) } load(child, child_state_dict, child_prefix) # noqa: F821 # Note that the hook can modify missing_keys and unexpected_keys. incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys) for hook in module._load_state_dict_post_hooks.values(): out = hook(module, incompatible_keys) assert out is None, ( "Hooks registered with ``register_load_state_dict_post_hook`` are not" "expected to return new values, if incompatible_keys need to be modified," "it should be done inplace." ) load(self, state_dict) del load if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, "Unexpected key(s) in state_dict: {}. ".format( ", ".join(f'"{k}"' for k in unexpected_keys) ), ) if len(missing_keys) > 0: error_msgs.insert( 0, "Missing key(s) in state_dict: {}. ".format( ", ".join(f'"{k}"' for k in missing_keys) ), ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( self.__class__.__name__, "\n\t".join(error_msgs) ) ) return _IncompatibleKeys(missing_keys, unexpected_keys) def _named_members( self, get_members_fn, prefix="", recurse=True, remove_duplicate: bool = True ): r"""Help yield various names + members of modules.""" memo = set() modules = ( self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)] ) for module_prefix, module in modules: members = get_members_fn(module) for k, v in members: if v is None or v in memo: continue if remove_duplicate: memo.add(v) name = module_prefix + ("." if module_prefix else "") + k yield name, v def parameters(self, recurse: bool = True) -> Iterator[Parameter]: r"""Return an iterator over module parameters. This is typically passed to an optimizer. Args: recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: Parameter: module parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for param in model.parameters(): >>> print(type(param), param.size()) <class 'torch.Tensor'> (20L,) <class 'torch.Tensor'> (20L, 1L, 5L, 5L) """ for _name, param in self.named_parameters(recurse=recurse): yield param def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[tuple[str, Parameter]]: r"""Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. remove_duplicate (bool, optional): whether to remove the duplicated parameters in the result. Defaults to True. Yields: (str, Parameter): Tuple containing the name and parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, param in self.named_parameters(): >>> if name in ['bias']: >>> print(param.size()) """ gen = self._named_members( lambda module: module._parameters.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate, ) yield from gen def buffers(self, recurse: bool = True) -> Iterator[Tensor]: r"""Return an iterator over module buffers. Args: recurse (bool): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Yields: torch.Tensor: module buffer Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for buf in model.buffers(): >>> print(type(buf), buf.size()) <class 'torch.Tensor'> (20L,) <class 'torch.Tensor'> (20L, 1L, 5L, 5L) """ for _, buf in self.named_buffers(recurse=recurse): yield buf def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[tuple[str, Tensor]]: r"""Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, torch.Tensor): Tuple containing the name and buffer Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size()) """ gen = self._named_members( lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate, ) yield from gen def children(self) -> Iterator["Module"]: r"""Return an iterator over immediate children modules. Yields: Module: a child module """ for _name, module in self.named_children(): yield module def named_children(self) -> Iterator[tuple[str, "Module"]]: r"""Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module) """ memo = set() for name, module in self._modules.items(): if module is not None and module not in memo: memo.add(module) yield name, module def modules(self) -> Iterator["Module"]: r"""Return an iterator over all modules in the network. Yields: Module: a module in the network Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.modules()): ... print(idx, '->', m) 0 -> Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) 1 -> Linear(in_features=2, out_features=2, bias=True) """ for _, module in self.named_modules(): yield module def named_modules( self, memo: Optional[set["Module"]] = None, prefix: str = "", remove_duplicate: bool = True, ): r"""Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.named_modules()): ... print(idx, '->', m) 0 -> ('', Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )) 1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) """ if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield prefix, self for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name yield from module.named_modules( memo, submodule_prefix, remove_duplicate ) def train(self: T, mode: bool = True) -> T: r"""Set the module in training mode. This has an effect only on certain modules. See the documentation of particular modules for details of their behaviors in training/evaluation mode, i.e., whether they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Args: mode (bool): whether to set training mode (``True``) or evaluation mode (``False``). Default: ``True``. Returns: Module: self """ if not isinstance(mode, bool): raise ValueError("training mode is expected to be boolean") self.training = mode for module in self.children(): module.train(mode) return self def eval(self: T) -> T: r"""Set the module in evaluation mode. This has an effect only on certain modules. See the documentation of particular modules for details of their behaviors in training/evaluation mode, i.e. whether they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`. See :ref:`locally-disable-grad-doc` for a comparison between `.eval()` and several similar mechanisms that may be confused with it. Returns: Module: self """ return self.train(False) def requires_grad_(self: T, requires_grad: bool = True) -> T: r"""Change if autograd should record operations on parameters in this module. This method sets the parameters' :attr:`requires_grad` attributes in-place. This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training). See :ref:`locally-disable-grad-doc` for a comparison between `.requires_grad_()` and several similar mechanisms that may be confused with it. Args: requires_grad (bool): whether autograd should record operations on parameters in this module. Default: ``True``. Returns: Module: self """ for p in self.parameters(): p.requires_grad_(requires_grad) return self def zero_grad(self, set_to_none: bool = True) -> None: r"""Reset gradients of all model parameters. See similar function under :class:`torch.optim.Optimizer` for more context. Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. """ if getattr(self, "_is_replica", False): warnings.warn( "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. " "The parameters are copied (in a differentiable manner) from the original module. " "This means they are not leaf nodes in autograd and so don't accumulate gradients. " "If you need gradients in your forward method, consider using autograd.grad instead." ) for p in self.parameters(): if p.grad is not None: if set_to_none: p.grad = None else: if p.grad.grad_fn is not None: p.grad.detach_() else: p.grad.requires_grad_(False) p.grad.zero_() def share_memory(self: T) -> T: r"""See :meth:`torch.Tensor.share_memory_`.""" return self._apply(lambda t: t.share_memory_()) def _get_name(self): return self.__class__.__name__ def extra_repr(self) -> str: r"""Return the extra representation of the module. To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable. """ return "" def __repr__(self): # We treat the extra repr like the sub-module, one item per line extra_lines = [] extra_repr = self.extra_repr() # empty string will be split into list [''] if extra_repr: extra_lines = extra_repr.split("\n") child_lines = [] for key, module in self._modules.items(): mod_str = repr(module) mod_str = _addindent(mod_str, 2) child_lines.append("(" + key + "): " + mod_str) lines = extra_lines + child_lines main_str = self._get_name() + "(" if lines: # simple one-liner info, which most builtin Modules will use if len(extra_lines) == 1 and not child_lines: main_str += extra_lines[0] else: main_str += "\n " + "\n ".join(lines) + "\n" main_str += ")" return main_str def __dir__(self): module_attrs = dir(self.__class__) attrs = list(self.__dict__.keys()) parameters = list(self._parameters.keys()) modules = list(self._modules.keys()) buffers = list(self._buffers.keys()) keys = module_attrs + attrs + parameters + modules + buffers # Eliminate attrs that are not legal Python variable names keys = [key for key in keys if not key[0].isdigit()] return sorted(keys) def _replicate_for_data_parallel(self): replica = self.__new__(type(self)) replica.__dict__ = self.__dict__.copy() # replicas do not have parameters themselves, the replicas reference the original # module. replica._parameters = {} replica._buffers = replica._buffers.copy() replica._modules = replica._modules.copy() replica._is_replica = True # type: ignore[assignment] return replica def compile(self, *args, **kwargs): """ Compile this Module's forward using :func:`torch.compile`. This Module's `__call__` method is compiled and all arguments are passed as-is to :func:`torch.compile`. See :func:`torch.compile` for details on the arguments for this function. """ self._compiled_call_impl = torch.compile(self._call_impl, *args, **kwargs) ```
========================================================================================================================= SOURCE CODE FILE: normalization.py LINES: 1 SIZE: 15.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\normalization.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import numbers from typing import Optional, Union import torch from torch import Size, Tensor from torch.nn import functional as F, init from torch.nn.parameter import Parameter from ._functions import CrossMapLRN2d as _cross_map_lrn2d from .module import Module __all__ = ["LocalResponseNorm", "CrossMapLRN2d", "LayerNorm", "GroupNorm", "RMSNorm"] class LocalResponseNorm(Module): r"""Applies local response normalization over an input signal. The input signal is composed of several input planes, where channels occupy the second dimension. Applies normalization across channels. .. math:: b_{c} = a_{c}\left(k + \frac{\alpha}{n} \sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta} Args: size: amount of neighbouring channels used for normalization alpha: multiplicative factor. Default: 0.0001 beta: exponent. Default: 0.75 k: additive factor. Default: 1 Shape: - Input: :math:`(N, C, *)` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> lrn = nn.LocalResponseNorm(2) >>> signal_2d = torch.randn(32, 5, 24, 24) >>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7) >>> output_2d = lrn(signal_2d) >>> output_4d = lrn(signal_4d) """ __constants__ = ["size", "alpha", "beta", "k"] size: int alpha: float beta: float k: float def __init__( self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0 ) -> None: super().__init__() self.size = size self.alpha = alpha self.beta = beta self.k = k def forward(self, input: Tensor) -> Tensor: return F.local_response_norm(input, self.size, self.alpha, self.beta, self.k) def extra_repr(self): return "{size}, alpha={alpha}, beta={beta}, k={k}".format(**self.__dict__) class CrossMapLRN2d(Module): size: int alpha: float beta: float k: float def __init__( self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1 ) -> None: super().__init__() self.size = size self.alpha = alpha self.beta = beta self.k = k def forward(self, input: Tensor) -> Tensor: return _cross_map_lrn2d.apply(input, self.size, self.alpha, self.beta, self.k) def extra_repr(self) -> str: return "{size}, alpha={alpha}, beta={beta}, k={k}".format(**self.__dict__) _shape_t = Union[int, list[int], Size] class LayerNorm(Module): r"""Applies Layer Normalization over a mini-batch of inputs. This layer implements the operation as described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__ .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated over the last `D` dimensions, where `D` is the dimension of :attr:`normalized_shape`. For example, if :attr:`normalized_shape` is ``(3, 5)`` (a 2-dimensional shape), the mean and standard-deviation are computed over the last 2 dimensions of the input (i.e. ``input.mean((-2, -1))``). :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. The variance is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1] \times \ldots \times \text{normalized\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. bias: If set to ``False``, the layer will not learn an additive bias (only relevant if :attr:`elementwise_affine` is ``True``). Default: ``True``. Attributes: weight: the learnable weights of the module of shape :math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``. The values are initialized to 1. bias: the learnable bias of the module of shape :math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``. The values are initialized to 0. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> # NLP Example >>> batch, sentence_length, embedding_dim = 20, 5, 10 >>> embedding = torch.randn(batch, sentence_length, embedding_dim) >>> layer_norm = nn.LayerNorm(embedding_dim) >>> # Activate module >>> layer_norm(embedding) >>> >>> # Image Example >>> N, C, H, W = 20, 5, 10, 10 >>> input = torch.randn(N, C, H, W) >>> # Normalize over the last three dimensions (i.e. the channel and spatial dimensions) >>> # as shown in the image below >>> layer_norm = nn.LayerNorm([C, H, W]) >>> output = layer_norm(input) .. image:: ../_static/img/nn/layer_norm.jpg :scale: 50 % """ __constants__ = ["normalized_shape", "eps", "elementwise_affine"] normalized_shape: tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, normalized_shape: _shape_t, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter( torch.empty(self.normalized_shape, **factory_kwargs) ) if bias: self.bias = Parameter( torch.empty(self.normalized_shape, **factory_kwargs) ) else: self.register_parameter("bias", None) else: self.register_parameter("weight", None) self.register_parameter("bias", None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: init.ones_(self.weight) if self.bias is not None: init.zeros_(self.bias) def forward(self, input: Tensor) -> Tensor: return F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps ) def extra_repr(self) -> str: return ( "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__) ) class GroupNorm(Module): r"""Applies Group Normalization over a mini-batch of inputs. This layer implements the operation as described in the paper `Group Normalization <https://arxiv.org/abs/1803.08494>`__ .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The input channels are separated into :attr:`num_groups` groups, each containing ``num_channels / num_groups`` channels. :attr:`num_channels` must be divisible by :attr:`num_groups`. The mean and standard-deviation are calculated separately over the each group. :math:`\gamma` and :math:`\beta` are learnable per-channel affine transform parameter vectors of size :attr:`num_channels` if :attr:`affine` is ``True``. The variance is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. This layer uses statistics computed from input data in both training and evaluation modes. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = nn.GroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = nn.GroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = nn.GroupNorm(1, 6) >>> # Activating the module >>> output = m(input) """ __constants__ = ["num_groups", "num_channels", "eps", "affine"] num_groups: int num_channels: int eps: float affine: bool def __init__( self, num_groups: int, num_channels: int, eps: float = 1e-5, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() if num_channels % num_groups != 0: raise ValueError("num_channels must be divisible by num_groups") self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.empty(num_channels, **factory_kwargs)) self.bias = Parameter(torch.empty(num_channels, **factory_kwargs)) else: self.register_parameter("weight", None) self.register_parameter("bias", None) self.reset_parameters() def reset_parameters(self) -> None: if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input: Tensor) -> Tensor: return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps) def extra_repr(self) -> str: return "{num_groups}, {num_channels}, eps={eps}, affine={affine}".format( **self.__dict__ ) class RMSNorm(Module): r"""Applies Root Mean Square Layer Normalization over a mini-batch of inputs. This layer implements the operation as described in the paper `Root Mean Square Layer Normalization <https://arxiv.org/pdf/1910.07467.pdf>`__ .. math:: y_i = \frac{x_i}{\mathrm{RMS}(x)} * \gamma_i, \quad \text{where} \quad \text{RMS}(x) = \sqrt{\epsilon + \frac{1}{n} \sum_{i=1}^{n} x_i^2} The RMS is taken over the last ``D`` dimensions, where ``D`` is the dimension of :attr:`normalized_shape`. For example, if :attr:`normalized_shape` is ``(3, 5)`` (a 2-dimensional shape), the RMS is computed over the last 2 dimensions of the input. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1] \times \ldots \times \text{normalized\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: :func:`torch.finfo(x.dtype).eps` elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> rms_norm = nn.RMSNorm([2, 3]) >>> input = torch.randn(2, 2, 3) >>> rms_norm(input) """ __constants__ = ["normalized_shape", "eps", "elementwise_affine"] normalized_shape: tuple[int, ...] eps: Optional[float] elementwise_affine: bool def __init__( self, normalized_shape: _shape_t, eps: Optional[float] = None, elementwise_affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter( torch.empty(self.normalized_shape, **factory_kwargs) ) else: self.register_parameter("weight", None) self.reset_parameters() def reset_parameters(self) -> None: """ Resets parameters based on their initialization used in __init__. """ if self.elementwise_affine: init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Runs forward pass. """ return F.rms_norm(x, self.normalized_shape, self.weight, self.eps) def extra_repr(self) -> str: """ Extra information about the module. """ return ( "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__) ) # TODO: ContrastiveNorm2d # TODO: DivisiveNorm2d # TODO: SubtractiveNorm2d ```
=================================================================================================================== SOURCE CODE FILE: padding.py LINES: 1 SIZE: 30.35 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\padding.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from collections.abc import Sequence import torch.nn.functional as F from torch import Tensor from torch.nn.common_types import _size_2_t, _size_4_t, _size_6_t from .module import Module from .utils import _ntuple, _pair, _quadruple # TODO: grad_output size asserts in THNN __all__ = [ "CircularPad1d", "CircularPad2d", "CircularPad3d", "ConstantPad1d", "ConstantPad2d", "ConstantPad3d", "ReflectionPad1d", "ReflectionPad2d", "ReflectionPad3d", "ReplicationPad1d", "ReplicationPad2d", "ReplicationPad3d", "ZeroPad1d", "ZeroPad2d", "ZeroPad3d", ] class _CircularPadNd(Module): __constants__ = ["padding"] padding: Sequence[int] def _check_input_dim(self, input): raise NotImplementedError def forward(self, input: Tensor) -> Tensor: self._check_input_dim(input) return F.pad(input, self.padding, "circular") def extra_repr(self) -> str: return f"{self.padding}" class CircularPad1d(_CircularPadNd): r"""Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") >>> m = nn.CircularPad1d(2) >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) >>> input tensor([[[0., 1., 2., 3.], [4., 5., 6., 7.]]]) >>> m(input) tensor([[[2., 3., 0., 1., 2., 3., 0., 1.], [6., 7., 4., 5., 6., 7., 4., 5.]]]) >>> # using different paddings for different sides >>> m = nn.CircularPad1d((3, 1)) >>> m(input) tensor([[[1., 2., 3., 0., 1., 2., 3., 0.], [5., 6., 7., 4., 5., 6., 7., 4.]]]) """ padding: tuple[int, int] def __init__(self, padding: _size_2_t) -> None: super().__init__() self.padding = _pair(padding) def _check_input_dim(self, input): if input.dim() != 2 and input.dim() != 3: raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)") class CircularPad2d(_CircularPadNd): r"""Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.CircularPad2d(2) >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> m(input) tensor([[[[4., 5., 3., 4., 5., 3., 4.], [7., 8., 6., 7., 8., 6., 7.], [1., 2., 0., 1., 2., 0., 1.], [4., 5., 3., 4., 5., 3., 4.], [7., 8., 6., 7., 8., 6., 7.], [1., 2., 0., 1., 2., 0., 1.], [4., 5., 3., 4., 5., 3., 4.]]]]) >>> # using different paddings for different sides >>> m = nn.CircularPad2d((1, 1, 2, 0)) >>> m(input) tensor([[[[5., 3., 4., 5., 3.], [8., 6., 7., 8., 6.], [2., 0., 1., 2., 0.], [5., 3., 4., 5., 3.], [8., 6., 7., 8., 6.]]]]) """ padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t) -> None: super().__init__() self.padding = _quadruple(padding) def _check_input_dim(self, input): if input.dim() != 3 and input.dim() != 4: raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)") class CircularPad3d(_CircularPadNd): r"""Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.CircularPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input) """ padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__() self.padding = _ntuple(6)(padding) def _check_input_dim(self, input): if input.dim() != 4 and input.dim() != 5: raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)") class _ConstantPadNd(Module): __constants__ = ["padding", "value"] value: float padding: Sequence[int] def __init__(self, value: float) -> None: super().__init__() self.value = value def forward(self, input: Tensor) -> Tensor: return F.pad(input, self.padding, "constant", self.value) def extra_repr(self) -> str: return f"padding={self.padding}, value={self.value}" class ConstantPad1d(_ConstantPadNd): r"""Pads the input tensor boundaries with a constant value. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in both boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.ConstantPad1d(2, 3.5) >>> input = torch.randn(1, 2, 4) >>> input tensor([[[-1.0491, -0.7152, -0.0749, 0.8530], [-1.3287, 1.8966, 0.1466, -0.2771]]]) >>> m(input) tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000, 3.5000], [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000, 3.5000]]]) >>> m = nn.ConstantPad1d(2, 3.5) >>> input = torch.randn(1, 2, 3) >>> input tensor([[[ 1.6616, 1.4523, -1.1255], [-3.6372, 0.1182, -1.8652]]]) >>> m(input) tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000], [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]]) >>> # using different paddings for different sides >>> m = nn.ConstantPad1d((3, 1), 3.5) >>> m(input) tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000], [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]]) """ padding: tuple[int, int] def __init__(self, padding: _size_2_t, value: float): super().__init__(value) self.padding = _pair(padding) class ConstantPad2d(_ConstantPadNd): r"""Pads the input tensor boundaries with a constant value. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.ConstantPad2d(2, 3.5) >>> input = torch.randn(1, 2, 2) >>> input tensor([[[ 1.6585, 0.4320], [-0.8701, -0.4649]]]) >>> m(input) tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000], [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000], [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) >>> # using different paddings for different sides >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5) >>> m(input) tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320], [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649], [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) """ __constants__ = ["padding", "value"] padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t, value: float) -> None: super().__init__(value) self.padding = _quadruple(padding) class ConstantPad3d(_ConstantPadNd): r"""Pads the input tensor boundaries with a constant value. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.ConstantPad3d(3, 3.5) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5) >>> output = m(input) """ padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t, value: float) -> None: super().__init__(value) self.padding = _ntuple(6)(padding) class _ReflectionPadNd(Module): __constants__ = ["padding"] padding: Sequence[int] def forward(self, input: Tensor) -> Tensor: return F.pad(input, self.padding, "reflect") def extra_repr(self) -> str: return f"{self.padding}" class ReflectionPad1d(_ReflectionPadNd): r"""Pads the input tensor using the reflection of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.ReflectionPad1d(2) >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles") >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) >>> input tensor([[[0., 1., 2., 3.], [4., 5., 6., 7.]]]) >>> m(input) tensor([[[2., 1., 0., 1., 2., 3., 2., 1.], [6., 5., 4., 5., 6., 7., 6., 5.]]]) >>> # using different paddings for different sides >>> m = nn.ReflectionPad1d((3, 1)) >>> m(input) tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], [7., 6., 5., 4., 5., 6., 7., 6.]]]) """ padding: tuple[int, int] def __init__(self, padding: _size_2_t) -> None: super().__init__() self.padding = _pair(padding) class ReflectionPad2d(_ReflectionPadNd): r"""Pads the input tensor using the reflection of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) Note that padding size should be less than the corresponding input dimension. Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") >>> m = nn.ReflectionPad2d(2) >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> m(input) tensor([[[[8., 7., 6., 7., 8., 7., 6.], [5., 4., 3., 4., 5., 4., 3.], [2., 1., 0., 1., 2., 1., 0.], [5., 4., 3., 4., 5., 4., 3.], [8., 7., 6., 7., 8., 7., 6.], [5., 4., 3., 4., 5., 4., 3.], [2., 1., 0., 1., 2., 1., 0.]]]]) >>> # using different paddings for different sides >>> m = nn.ReflectionPad2d((1, 1, 2, 0)) >>> m(input) tensor([[[[7., 6., 7., 8., 7.], [4., 3., 4., 5., 4.], [1., 0., 1., 2., 1.], [4., 3., 4., 5., 4.], [7., 6., 7., 8., 7.]]]]) """ padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t) -> None: super().__init__() self.padding = _quadruple(padding) class ReflectionPad3d(_ReflectionPadNd): r"""Pads the input tensor using the reflection of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") >>> m = nn.ReflectionPad3d(1) >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2) >>> m(input) tensor([[[[[7., 6., 7., 6.], [5., 4., 5., 4.], [7., 6., 7., 6.], [5., 4., 5., 4.]], [[3., 2., 3., 2.], [1., 0., 1., 0.], [3., 2., 3., 2.], [1., 0., 1., 0.]], [[7., 6., 7., 6.], [5., 4., 5., 4.], [7., 6., 7., 6.], [5., 4., 5., 4.]], [[3., 2., 3., 2.], [1., 0., 1., 0.], [3., 2., 3., 2.], [1., 0., 1., 0.]]]]]) """ padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__() self.padding = _ntuple(6)(padding) class _ReplicationPadNd(Module): __constants__ = ["padding"] padding: Sequence[int] def forward(self, input: Tensor) -> Tensor: return F.pad(input, self.padding, "replicate") def extra_repr(self) -> str: return f"{self.padding}" class ReplicationPad1d(_ReplicationPadNd): r"""Pads the input tensor using replication of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this") >>> m = nn.ReplicationPad1d(2) >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) >>> input tensor([[[0., 1., 2., 3.], [4., 5., 6., 7.]]]) >>> m(input) tensor([[[0., 0., 0., 1., 2., 3., 3., 3.], [4., 4., 4., 5., 6., 7., 7., 7.]]]) >>> # using different paddings for different sides >>> m = nn.ReplicationPad1d((3, 1)) >>> m(input) tensor([[[0., 0., 0., 0., 1., 2., 3., 3.], [4., 4., 4., 4., 5., 6., 7., 7.]]]) """ padding: tuple[int, int] def __init__(self, padding: _size_2_t) -> None: super().__init__() self.padding = _pair(padding) class ReplicationPad2d(_ReplicationPadNd): r"""Pads the input tensor using replication of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.ReplicationPad2d(2) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> m(input) tensor([[[[0., 0., 0., 1., 2., 2., 2.], [0., 0., 0., 1., 2., 2., 2.], [0., 0., 0., 1., 2., 2., 2.], [3., 3., 3., 4., 5., 5., 5.], [6., 6., 6., 7., 8., 8., 8.], [6., 6., 6., 7., 8., 8., 8.], [6., 6., 6., 7., 8., 8., 8.]]]]) >>> # using different paddings for different sides >>> m = nn.ReplicationPad2d((1, 1, 2, 0)) >>> m(input) tensor([[[[0., 0., 1., 2., 2.], [0., 0., 1., 2., 2.], [0., 0., 1., 2., 2.], [3., 3., 4., 5., 5.], [6., 6., 7., 8., 8.]]]]) """ padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t) -> None: super().__init__() self.padding = _quadruple(padding) class ReplicationPad3d(_ReplicationPadNd): r"""Pads the input tensor using replication of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.ReplicationPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input) """ padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__() self.padding = _ntuple(6)(padding) class ZeroPad1d(ConstantPad1d): r"""Pads the input tensor boundaries with zero. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in both boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.ZeroPad1d(2) >>> input = torch.randn(1, 2, 4) >>> input tensor([[[-1.0491, -0.7152, -0.0749, 0.8530], [-1.3287, 1.8966, 0.1466, -0.2771]]]) >>> m(input) tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000, 0.0000], [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000, 0.0000]]]) >>> m = nn.ZeroPad1d(2) >>> input = torch.randn(1, 2, 3) >>> input tensor([[[ 1.6616, 1.4523, -1.1255], [-3.6372, 0.1182, -1.8652]]]) >>> m(input) tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000], [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]]) >>> # using different paddings for different sides >>> m = nn.ZeroPad1d((3, 1)) >>> m(input) tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000], [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]]) """ padding: tuple[int, int] def __init__(self, padding: _size_2_t) -> None: super().__init__(padding, 0.0) def extra_repr(self) -> str: return f"{self.padding}" class ZeroPad2d(ConstantPad2d): r"""Pads the input tensor boundaries with zero. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.ZeroPad2d(2) >>> input = torch.randn(1, 1, 3, 3) >>> input tensor([[[[-0.1678, -0.4418, 1.9466], [ 0.9604, -0.4219, -0.5241], [-0.9162, -0.5436, -0.6446]]]]) >>> m(input) tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000], [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) >>> # using different paddings for different sides >>> m = nn.ZeroPad2d((1, 1, 2, 0)) >>> m(input) tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000], [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000], [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]]) """ padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t) -> None: super().__init__(padding, 0.0) def extra_repr(self) -> str: return f"{self.padding}" class ZeroPad3d(ConstantPad3d): r"""Pads the input tensor boundaries with zero. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.ZeroPad3d(3) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1)) >>> output = m(input) """ padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__(padding, 0.0) def extra_repr(self) -> str: return f"{self.padding}" ```
======================================================================================================================== SOURCE CODE FILE: pixelshuffle.py LINES: 1 SIZE: 3.71 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\pixelshuffle.py ENCODING: utf-8 ```py import torch.nn.functional as F from torch import Tensor from .module import Module __all__ = ["PixelShuffle", "PixelUnshuffle"] class PixelShuffle(Module): r"""Rearrange elements in a tensor according to an upscaling factor. Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor. This is useful for implementing efficient sub-pixel convolution with a stride of :math:`1/r`. See the paper: `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ by Shi et al. (2016) for more details. Args: upscale_factor (int): factor to increase spatial resolution by Shape: - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where .. math:: C_{out} = C_{in} \div \text{upscale\_factor}^2 .. math:: H_{out} = H_{in} \times \text{upscale\_factor} .. math:: W_{out} = W_{in} \times \text{upscale\_factor} Examples:: >>> pixel_shuffle = nn.PixelShuffle(3) >>> input = torch.randn(1, 9, 4, 4) >>> output = pixel_shuffle(input) >>> print(output.size()) torch.Size([1, 1, 12, 12]) .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: https://arxiv.org/abs/1609.05158 """ __constants__ = ["upscale_factor"] upscale_factor: int def __init__(self, upscale_factor: int) -> None: super().__init__() self.upscale_factor = upscale_factor def forward(self, input: Tensor) -> Tensor: return F.pixel_shuffle(input, self.upscale_factor) def extra_repr(self) -> str: return f"upscale_factor={self.upscale_factor}" class PixelUnshuffle(Module): r"""Reverse the PixelShuffle operation. Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape :math:`(*, C \times r^2, H, W)`, where r is a downscale factor. See the paper: `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ by Shi et al. (2016) for more details. Args: downscale_factor (int): factor to decrease spatial resolution by Shape: - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where .. math:: C_{out} = C_{in} \times \text{downscale\_factor}^2 .. math:: H_{out} = H_{in} \div \text{downscale\_factor} .. math:: W_{out} = W_{in} \div \text{downscale\_factor} Examples:: >>> pixel_unshuffle = nn.PixelUnshuffle(3) >>> input = torch.randn(1, 1, 12, 12) >>> output = pixel_unshuffle(input) >>> print(output.size()) torch.Size([1, 9, 4, 4]) .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: https://arxiv.org/abs/1609.05158 """ __constants__ = ["downscale_factor"] downscale_factor: int def __init__(self, downscale_factor: int) -> None: super().__init__() self.downscale_factor = downscale_factor def forward(self, input: Tensor) -> Tensor: return F.pixel_unshuffle(input, self.downscale_factor) def extra_repr(self) -> str: return f"downscale_factor={self.downscale_factor}" ```
=================================================================================================================== SOURCE CODE FILE: pooling.py LINES: 1 SIZE: 58.89 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\pooling.py ENCODING: utf-8 ```py from typing import Optional import torch.nn.functional as F from torch import Tensor from torch.nn.common_types import ( _ratio_2_t, _ratio_3_t, _size_1_t, _size_2_opt_t, _size_2_t, _size_3_opt_t, _size_3_t, _size_any_opt_t, _size_any_t, ) from .module import Module from .utils import _pair, _single, _triple __all__ = [ "MaxPool1d", "MaxPool2d", "MaxPool3d", "MaxUnpool1d", "MaxUnpool2d", "MaxUnpool3d", "AvgPool1d", "AvgPool2d", "AvgPool3d", "FractionalMaxPool2d", "FractionalMaxPool3d", "LPPool1d", "LPPool2d", "LPPool3d", "AdaptiveMaxPool1d", "AdaptiveMaxPool2d", "AdaptiveMaxPool3d", "AdaptiveAvgPool1d", "AdaptiveAvgPool2d", "AdaptiveAvgPool3d", ] class _MaxPoolNd(Module): __constants__ = [ "kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode", ] return_indices: bool ceil_mode: bool def __init__( self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None, padding: _size_any_t = 0, dilation: _size_any_t = 1, return_indices: bool = False, ceil_mode: bool = False, ) -> None: super().__init__() self.kernel_size = kernel_size self.stride = stride if (stride is not None) else kernel_size self.padding = padding self.dilation = dilation self.return_indices = return_indices self.ceil_mode = ceil_mode def extra_repr(self) -> str: return ( "kernel_size={kernel_size}, stride={stride}, padding={padding}" ", dilation={dilation}, ceil_mode={ceil_mode}".format(**self.__dict__) ) class MaxPool1d(_MaxPoolNd): r"""Applies a 1D max pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, L)` and output :math:`(N, C, L_{out})` can be precisely described as: .. math:: out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1} input(N_i, C_j, stride \times k + m) If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the sliding window. This `link`_ has a nice visualization of the pooling parameters. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. Args: kernel_size: The size of the sliding window, must be > 0. stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`. padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2. dilation: The stride between elements within a sliding window, must be > 0. return_indices: If ``True``, will return the argmax along with the max values. Useful for :class:`torch.nn.MaxUnpool1d` later ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This ensures that every element in the input tensor is covered by a sliding window. Shape: - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where .. math:: L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation} \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor Examples:: >>> # pool of size=3, stride=2 >>> m = nn.MaxPool1d(3, stride=2) >>> input = torch.randn(20, 16, 50) >>> output = m(input) .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ kernel_size: _size_1_t stride: _size_1_t padding: _size_1_t dilation: _size_1_t def forward(self, input: Tensor): return F.max_pool1d( input, self.kernel_size, self.stride, self.padding, self.dilation, ceil_mode=self.ceil_mode, return_indices=self.return_indices, ) class MaxPool2d(_MaxPoolNd): r"""Applies a 2D max pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` can be precisely described as: .. math:: \begin{aligned} out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ & \text{input}(N_i, C_j, \text{stride[0]} \times h + m, \text{stride[1]} \times w + n) \end{aligned} If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: kernel_size: the size of the window to take a max over stride: the stride of the window. Default value is :attr:`kernel_size` padding: Implicit negative infinity padding to be added on both sides dilation: a parameter that controls the stride of elements in the window return_indices: if ``True``, will return the max indices along with the outputs. Useful for :class:`torch.nn.MaxUnpool2d` later ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})` - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]} \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]} \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.MaxPool2d(3, stride=2) >>> # pool of non-square window >>> m = nn.MaxPool2d((3, 2), stride=(2, 1)) >>> input = torch.randn(20, 16, 50, 32) >>> output = m(input) .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ kernel_size: _size_2_t stride: _size_2_t padding: _size_2_t dilation: _size_2_t def forward(self, input: Tensor): return F.max_pool2d( input, self.kernel_size, self.stride, self.padding, self.dilation, ceil_mode=self.ceil_mode, return_indices=self.return_indices, ) class MaxPool3d(_MaxPoolNd): r"""Applies a 3D max pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` can be precisely described as: .. math:: \begin{aligned} \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ & \text{input}(N_i, C_j, \text{stride[0]} \times d + k, \text{stride[1]} \times h + m, \text{stride[2]} \times w + n) \end{aligned} If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the depth, height and width dimension - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the height dimension and the third `int` for the width dimension Args: kernel_size: the size of the window to take a max over stride: the stride of the window. Default value is :attr:`kernel_size` padding: Implicit negative infinity padding to be added on all three sides dilation: a parameter that controls the stride of elements in the window return_indices: if ``True``, will return the max indices along with the outputs. Useful for :class:`torch.nn.MaxUnpool3d` later ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.MaxPool3d(3, stride=2) >>> # pool of non-square window >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2)) >>> input = torch.randn(20, 16, 50, 44, 31) >>> output = m(input) .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ # noqa: E501 kernel_size: _size_3_t stride: _size_3_t padding: _size_3_t dilation: _size_3_t def forward(self, input: Tensor): return F.max_pool3d( input, self.kernel_size, self.stride, self.padding, self.dilation, ceil_mode=self.ceil_mode, return_indices=self.return_indices, ) class _MaxUnpoolNd(Module): def extra_repr(self) -> str: return f"kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}" class MaxUnpool1d(_MaxUnpoolNd): r"""Computes a partial inverse of :class:`MaxPool1d`. :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost. :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d` including the indices of the maximal values and computes a partial inverse in which all non-maximal values are set to zero. Note: This operation may behave nondeterministically when the input indices has repeat values. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information. .. note:: :class:`MaxPool1d` can map several input sizes to the same output sizes. Hence, the inversion process can get ambiguous. To accommodate this, you can provide the needed output size as an additional argument :attr:`output_size` in the forward call. See the Inputs and Example below. Args: kernel_size (int or tuple): Size of the max pooling window. stride (int or tuple): Stride of the max pooling window. It is set to :attr:`kernel_size` by default. padding (int or tuple): Padding that was added to the input Inputs: - `input`: the input Tensor to invert - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d` - `output_size` (optional): the targeted output size Shape: - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`. - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where .. math:: H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0] or as given by :attr:`output_size` in the call operator Example:: >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?") >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True) >>> unpool = nn.MaxUnpool1d(2, stride=2) >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]]) >>> output, indices = pool(input) >>> unpool(output, indices) tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]]) >>> # Example showcasing the use of output_size >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]]) >>> output, indices = pool(input) >>> unpool(output, indices, output_size=input.size()) tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]]) >>> unpool(output, indices) tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]]) """ kernel_size: _size_1_t stride: _size_1_t padding: _size_1_t def __init__( self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0, ) -> None: super().__init__() self.kernel_size = _single(kernel_size) self.stride = _single(stride if (stride is not None) else kernel_size) self.padding = _single(padding) def forward( self, input: Tensor, indices: Tensor, output_size: Optional[list[int]] = None ) -> Tensor: return F.max_unpool1d( input, indices, self.kernel_size, self.stride, self.padding, output_size ) class MaxUnpool2d(_MaxUnpoolNd): r"""Computes a partial inverse of :class:`MaxPool2d`. :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost. :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d` including the indices of the maximal values and computes a partial inverse in which all non-maximal values are set to zero. Note: This operation may behave nondeterministically when the input indices has repeat values. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information. .. note:: :class:`MaxPool2d` can map several input sizes to the same output sizes. Hence, the inversion process can get ambiguous. To accommodate this, you can provide the needed output size as an additional argument :attr:`output_size` in the forward call. See the Inputs and Example below. Args: kernel_size (int or tuple): Size of the max pooling window. stride (int or tuple): Stride of the max pooling window. It is set to :attr:`kernel_size` by default. padding (int or tuple): Padding that was added to the input Inputs: - `input`: the input Tensor to invert - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d` - `output_size` (optional): the targeted output size Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where .. math:: H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} .. math:: W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} or as given by :attr:`output_size` in the call operator Example:: >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True) >>> unpool = nn.MaxUnpool2d(2, stride=2) >>> input = torch.tensor([[[[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.], [13., 14., 15., 16.]]]]) >>> output, indices = pool(input) >>> unpool(output, indices) tensor([[[[ 0., 0., 0., 0.], [ 0., 6., 0., 8.], [ 0., 0., 0., 0.], [ 0., 14., 0., 16.]]]]) >>> # Now using output_size to resolve an ambiguous size for the inverse >>> input = torch.tensor([[[[ 1., 2., 3., 4., 5.], [ 6., 7., 8., 9., 10.], [11., 12., 13., 14., 15.], [16., 17., 18., 19., 20.]]]]) >>> output, indices = pool(input) >>> # This call will not work without specifying output_size >>> unpool(output, indices, output_size=input.size()) tensor([[[[ 0., 0., 0., 0., 0.], [ 0., 7., 0., 9., 0.], [ 0., 0., 0., 0., 0.], [ 0., 17., 0., 19., 0.]]]]) """ kernel_size: _size_2_t stride: _size_2_t padding: _size_2_t def __init__( self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0, ) -> None: super().__init__() self.kernel_size = _pair(kernel_size) self.stride = _pair(stride if (stride is not None) else kernel_size) self.padding = _pair(padding) def forward( self, input: Tensor, indices: Tensor, output_size: Optional[list[int]] = None ) -> Tensor: return F.max_unpool2d( input, indices, self.kernel_size, self.stride, self.padding, output_size ) class MaxUnpool3d(_MaxUnpoolNd): r"""Computes a partial inverse of :class:`MaxPool3d`. :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost. :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d` including the indices of the maximal values and computes a partial inverse in which all non-maximal values are set to zero. Note: This operation may behave nondeterministically when the input indices has repeat values. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information. .. note:: :class:`MaxPool3d` can map several input sizes to the same output sizes. Hence, the inversion process can get ambiguous. To accommodate this, you can provide the needed output size as an additional argument :attr:`output_size` in the forward call. See the Inputs section below. Args: kernel_size (int or tuple): Size of the max pooling window. stride (int or tuple): Stride of the max pooling window. It is set to :attr:`kernel_size` by default. padding (int or tuple): Padding that was added to the input Inputs: - `input`: the input Tensor to invert - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d` - `output_size` (optional): the targeted output size Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} .. math:: H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} .. math:: W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]} or as given by :attr:`output_size` in the call operator Example:: >>> # pool of square window of size=3, stride=2 >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True) >>> unpool = nn.MaxUnpool3d(3, stride=2) >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15)) >>> unpooled_output = unpool(output, indices) >>> unpooled_output.size() torch.Size([20, 16, 51, 33, 15]) """ kernel_size: _size_3_t stride: _size_3_t padding: _size_3_t def __init__( self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0, ) -> None: super().__init__() self.kernel_size = _triple(kernel_size) self.stride = _triple(stride if (stride is not None) else kernel_size) self.padding = _triple(padding) def forward( self, input: Tensor, indices: Tensor, output_size: Optional[list[int]] = None ) -> Tensor: return F.max_unpool3d( input, indices, self.kernel_size, self.stride, self.padding, output_size ) class _AvgPoolNd(Module): __constants__ = [ "kernel_size", "stride", "padding", "ceil_mode", "count_include_pad", ] def extra_repr(self) -> str: return f"kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}" class AvgPool1d(_AvgPoolNd): r"""Applies a 1D average pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, L)`, output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k` can be precisely described as: .. math:: \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1} \text{input}(N_i, C_j, \text{stride} \times l + m) If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be an ``int`` or a one-element tuple. Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` padding: implicit zero padding to be added on both sides ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape count_include_pad: when True, will include the zero-padding in the averaging calculation Shape: - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where .. math:: L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor Per the note above, if ``ceil_mode`` is True and :math:`(L_{out} - 1) \times \text{stride} \geq L_{in} + \text{padding}`, we skip the last window as it would start in the right padded region, resulting in :math:`L_{out}` being reduced by one. Examples:: >>> # pool with window of size=3, stride=2 >>> m = nn.AvgPool1d(3, stride=2) >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]])) tensor([[[2., 4., 6.]]]) """ kernel_size: _size_1_t stride: _size_1_t padding: _size_1_t ceil_mode: bool count_include_pad: bool def __init__( self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False, count_include_pad: bool = True, ) -> None: super().__init__() self.kernel_size = _single(kernel_size) self.stride = _single(stride if stride is not None else kernel_size) self.padding = _single(padding) self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad def forward(self, input: Tensor) -> Tensor: return F.avg_pool1d( input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad, ) class AvgPool2d(_AvgPoolNd): r"""Applies a 2D average pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` can be precisely described as: .. math:: out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n) If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` padding: implicit zero padding to be added on both sides ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape count_include_pad: when True, will include the zero-padding in the averaging calculation divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor Per the note above, if ``ceil_mode`` is True and :math:`(H_{out} - 1)\times \text{stride}[0]\geq H_{in} + \text{padding}[0]`, we skip the last window as it would start in the bottom padded region, resulting in :math:`H_{out}` being reduced by one. The same applies for :math:`W_{out}`. Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.AvgPool2d(3, stride=2) >>> # pool of non-square window >>> m = nn.AvgPool2d((3, 2), stride=(2, 1)) >>> input = torch.randn(20, 16, 50, 32) >>> output = m(input) """ __constants__ = [ "kernel_size", "stride", "padding", "ceil_mode", "count_include_pad", "divisor_override", ] kernel_size: _size_2_t stride: _size_2_t padding: _size_2_t ceil_mode: bool count_include_pad: bool def __init__( self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None, ) -> None: super().__init__() self.kernel_size = kernel_size self.stride = stride if (stride is not None) else kernel_size self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad self.divisor_override = divisor_override def forward(self, input: Tensor) -> Tensor: return F.avg_pool2d( input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override, ) class AvgPool3d(_AvgPoolNd): r"""Applies a 3D average pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` can be precisely described as: .. math:: \begin{aligned} \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\ & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k, \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)} {kD \times kH \times kW} \end{aligned} If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides for :attr:`padding` number of points. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. The parameters :attr:`kernel_size`, :attr:`stride` can either be: - a single ``int`` -- in which case the same value is used for the depth, height and width dimension - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the height dimension and the third `int` for the width dimension Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` padding: implicit zero padding to be added on all three sides ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape count_include_pad: when True, will include the zero-padding in the averaging calculation divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor Per the note above, if ``ceil_mode`` is True and :math:`(D_{out} - 1)\times \text{stride}[0]\geq D_{in} + \text{padding}[0]`, we skip the last window as it would start in the padded region, resulting in :math:`D_{out}` being reduced by one. The same applies for :math:`W_{out}` and :math:`H_{out}`. Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.AvgPool3d(3, stride=2) >>> # pool of non-square window >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2)) >>> input = torch.randn(20, 16, 50, 44, 31) >>> output = m(input) """ __constants__ = [ "kernel_size", "stride", "padding", "ceil_mode", "count_include_pad", "divisor_override", ] kernel_size: _size_3_t stride: _size_3_t padding: _size_3_t ceil_mode: bool count_include_pad: bool def __init__( self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None, ) -> None: super().__init__() self.kernel_size = kernel_size self.stride = stride if (stride is not None) else kernel_size self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad self.divisor_override = divisor_override def forward(self, input: Tensor) -> Tensor: return F.avg_pool3d( input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override, ) def __setstate__(self, d): super().__setstate__(d) self.__dict__.setdefault("padding", 0) self.__dict__.setdefault("ceil_mode", False) self.__dict__.setdefault("count_include_pad", True) class FractionalMaxPool2d(Module): r"""Applies a 2D fractional max pooling over an input signal composed of several input planes. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic step size determined by the target output size. The number of output features is equal to the number of input planes. .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined. Args: kernel_size: the size of the window to take a max over. Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)` output_size: the target output size of the image of the form `oH x oW`. Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`. Note that we must have :math:`kH + oH - 1 <= H_{in}` and :math:`kW + oW - 1 <= W_{in}` output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. This has to be a number or tuple in the range (0, 1). Note that we must have :math:`kH + (output\_ratio\_H * H_{in}) - 1 <= H_{in}` and :math:`kW + (output\_ratio\_W * W_{in}) - 1 <= W_{in}` return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False`` Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`(H_{out}, W_{out})=\text{output\_size}` or :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`. Examples: >>> # pool of square window of size=3, and target output size 13x12 >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12)) >>> # pool of square window and target output size being half of input image size >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)) >>> input = torch.randn(20, 16, 50, 32) >>> output = m(input) .. _Fractional MaxPooling: https://arxiv.org/abs/1412.6071 """ __constants__ = ["kernel_size", "return_indices", "output_size", "output_ratio"] kernel_size: _size_2_t return_indices: bool output_size: _size_2_t output_ratio: _ratio_2_t def __init__( self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None, output_ratio: Optional[_ratio_2_t] = None, return_indices: bool = False, _random_samples=None, ) -> None: super().__init__() self.kernel_size = _pair(kernel_size) self.return_indices = return_indices self.register_buffer("_random_samples", _random_samples) self.output_size = _pair(output_size) if output_size is not None else None self.output_ratio = _pair(output_ratio) if output_ratio is not None else None if output_size is None and output_ratio is None: raise ValueError( "FractionalMaxPool2d requires specifying either " "an output size, or a pooling ratio" ) if output_size is not None and output_ratio is not None: raise ValueError( "only one of output_size and output_ratio may be specified" ) if self.output_ratio is not None: if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1): raise ValueError( f"output_ratio must be between 0 and 1 (got {output_ratio})" ) def forward(self, input: Tensor): return F.fractional_max_pool2d( input, self.kernel_size, self.output_size, self.output_ratio, self.return_indices, _random_samples=self._random_samples, ) class FractionalMaxPool3d(Module): r"""Applies a 3D fractional max pooling over an input signal composed of several input planes. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic step size determined by the target output size. The number of output features is equal to the number of input planes. .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined. Args: kernel_size: the size of the window to take a max over. Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)` output_size: the target output size of the image of the form `oT x oH x oW`. Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH` output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. This has to be a number or tuple in the range (0, 1) return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False`` Shape: - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})` Examples: >>> # pool of cubic window of size=3, and target output size 13x12x11 >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11)) >>> # pool of cubic window and target output size being half of input size >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)) >>> input = torch.randn(20, 16, 50, 32, 16) >>> output = m(input) .. _Fractional MaxPooling: https://arxiv.org/abs/1412.6071 """ __constants__ = ["kernel_size", "return_indices", "output_size", "output_ratio"] kernel_size: _size_3_t return_indices: bool output_size: _size_3_t output_ratio: _ratio_3_t def __init__( self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None, output_ratio: Optional[_ratio_3_t] = None, return_indices: bool = False, _random_samples=None, ) -> None: super().__init__() self.kernel_size = _triple(kernel_size) self.return_indices = return_indices self.register_buffer("_random_samples", _random_samples) self.output_size = _triple(output_size) if output_size is not None else None self.output_ratio = _triple(output_ratio) if output_ratio is not None else None if output_size is None and output_ratio is None: raise ValueError( "FractionalMaxPool3d requires specifying either " "an output size, or a pooling ratio" ) if output_size is not None and output_ratio is not None: raise ValueError( "only one of output_size and output_ratio may be specified" ) if self.output_ratio is not None: if not ( 0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1 ): raise ValueError( f"output_ratio must be between 0 and 1 (got {output_ratio})" ) def forward(self, input: Tensor): return F.fractional_max_pool3d( input, self.kernel_size, self.output_size, self.output_ratio, self.return_indices, _random_samples=self._random_samples, ) class _LPPoolNd(Module): __constants__ = ["norm_type", "kernel_size", "stride", "ceil_mode"] norm_type: float ceil_mode: bool def __init__( self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None, ceil_mode: bool = False, ) -> None: super().__init__() self.norm_type = norm_type self.kernel_size = kernel_size self.stride = stride self.ceil_mode = ceil_mode def extra_repr(self) -> str: return ( "norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, " "ceil_mode={ceil_mode}".format(**self.__dict__) ) class LPPool1d(_LPPoolNd): r"""Applies a 1D power-average pooling over an input signal composed of several input planes. On each window, the function computed is: .. math:: f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} - At p = :math:`\infty`, one gets Max Pooling - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling) .. note:: If the sum to the power of `p` is zero, the gradient of this function is not defined. This implementation will set the gradient to zero in this case. Args: kernel_size: a single int, the size of the window stride: a single int, the stride of the window. Default value is :attr:`kernel_size` ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where .. math:: L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor Examples:: >>> # power-2 pool of window of length 3, with stride 2. >>> m = nn.LPPool1d(2, 3, stride=2) >>> input = torch.randn(20, 16, 50) >>> output = m(input) """ kernel_size: _size_1_t stride: _size_1_t def forward(self, input: Tensor) -> Tensor: return F.lp_pool1d( input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode ) class LPPool2d(_LPPoolNd): r"""Applies a 2D power-average pooling over an input signal composed of several input planes. On each window, the function computed is: .. math:: f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} - At p = :math:`\infty`, one gets Max Pooling - At p = 1, one gets Sum Pooling (which is proportional to average pooling) The parameters :attr:`kernel_size`, :attr:`stride` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension .. note:: If the sum to the power of `p` is zero, the gradient of this function is not defined. This implementation will set the gradient to zero in this case. Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where .. math:: H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor Examples:: >>> # power-2 pool of square window of size=3, stride=2 >>> m = nn.LPPool2d(2, 3, stride=2) >>> # pool of non-square window of power 1.2 >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1)) >>> input = torch.randn(20, 16, 50, 32) >>> output = m(input) """ kernel_size: _size_2_t stride: _size_2_t def forward(self, input: Tensor) -> Tensor: return F.lp_pool2d( input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode ) class LPPool3d(_LPPoolNd): r"""Applies a 3D power-average pooling over an input signal composed of several input planes. On each window, the function computed is: .. math:: f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} - At p = :math:`\infty`, one gets Max Pooling - At p = 1, one gets Sum Pooling (which is proportional to average pooling) The parameters :attr:`kernel_size`, :attr:`stride` can either be: - a single ``int`` -- in which case the same value is used for the height, width and depth dimension - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the height dimension and the third `int` for the width dimension .. note:: If the sum to the power of `p` is zero, the gradient of this function is not defined. This implementation will set the gradient to zero in this case. Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = \left\lfloor\frac{D_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor .. math:: H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor Examples:: >>> # power-2 pool of square window of size=3, stride=2 >>> m = nn.LPPool3d(2, 3, stride=2) >>> # pool of non-square window of power 1.2 >>> m = nn.LPPool3d(1.2, (3, 2, 2), stride=(2, 1, 2)) >>> input = torch.randn(20, 16, 50, 44, 31) >>> output = m(input) """ kernel_size: _size_3_t stride: _size_3_t def forward(self, input: Tensor) -> Tensor: return F.lp_pool3d( input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode ) class _AdaptiveMaxPoolNd(Module): __constants__ = ["output_size", "return_indices"] return_indices: bool def __init__( self, output_size: _size_any_opt_t, return_indices: bool = False ) -> None: super().__init__() self.output_size = output_size self.return_indices = return_indices def extra_repr(self) -> str: return f"output_size={self.output_size}" # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and # output shapes are, and how the operation computes output. class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd): r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes. The output size is :math:`L_{out}`, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size :math:`L_{out}`. return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to nn.MaxUnpool1d. Default: ``False`` Shape: - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where :math:`L_{out}=\text{output\_size}`. Examples: >>> # target output size of 5 >>> m = nn.AdaptiveMaxPool1d(5) >>> input = torch.randn(1, 64, 8) >>> output = m(input) """ output_size: _size_1_t def forward(self, input: Tensor): return F.adaptive_max_pool1d(input, self.output_size, self.return_indices) class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd): r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes. The output is of size :math:`H_{out} \times W_{out}`, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`. Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}` can be either a ``int``, or ``None`` which means the size will be the same as that of the input. return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to nn.MaxUnpool2d. Default: ``False`` Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where :math:`(H_{out}, W_{out})=\text{output\_size}`. Examples: >>> # target output size of 5x7 >>> m = nn.AdaptiveMaxPool2d((5, 7)) >>> input = torch.randn(1, 64, 8, 9) >>> output = m(input) >>> # target output size of 7x7 (square) >>> m = nn.AdaptiveMaxPool2d(7) >>> input = torch.randn(1, 64, 10, 9) >>> output = m(input) >>> # target output size of 10x7 >>> m = nn.AdaptiveMaxPool2d((None, 7)) >>> input = torch.randn(1, 64, 10, 9) >>> output = m(input) """ output_size: _size_2_opt_t def forward(self, input: Tensor): return F.adaptive_max_pool2d(input, self.output_size, self.return_indices) class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd): r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes. The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`. Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`. :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a ``int``, or ``None`` which means the size will be the same as that of the input. return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to nn.MaxUnpool3d. Default: ``False`` Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`. Examples: >>> # target output size of 5x7x9 >>> m = nn.AdaptiveMaxPool3d((5, 7, 9)) >>> input = torch.randn(1, 64, 8, 9, 10) >>> output = m(input) >>> # target output size of 7x7x7 (cube) >>> m = nn.AdaptiveMaxPool3d(7) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) >>> # target output size of 7x9x8 >>> m = nn.AdaptiveMaxPool3d((7, None, None)) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) """ output_size: _size_3_opt_t def forward(self, input: Tensor): return F.adaptive_max_pool3d(input, self.output_size, self.return_indices) class _AdaptiveAvgPoolNd(Module): __constants__ = ["output_size"] def __init__(self, output_size: _size_any_opt_t) -> None: super().__init__() self.output_size = output_size def extra_repr(self) -> str: return f"output_size={self.output_size}" class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd): r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes. The output size is :math:`L_{out}`, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size :math:`L_{out}`. Shape: - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where :math:`L_{out}=\text{output\_size}`. Examples: >>> # target output size of 5 >>> m = nn.AdaptiveAvgPool1d(5) >>> input = torch.randn(1, 64, 8) >>> output = m(input) """ output_size: _size_1_t def forward(self, input: Tensor) -> Tensor: return F.adaptive_avg_pool1d(input, self.output_size) class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd): r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes. The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H. H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. Shape: - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`. - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where :math:`S=\text{output\_size}`. Examples: >>> # target output size of 5x7 >>> m = nn.AdaptiveAvgPool2d((5, 7)) >>> input = torch.randn(1, 64, 8, 9) >>> output = m(input) >>> # target output size of 7x7 (square) >>> m = nn.AdaptiveAvgPool2d(7) >>> input = torch.randn(1, 64, 10, 9) >>> output = m(input) >>> # target output size of 10x7 >>> m = nn.AdaptiveAvgPool2d((None, 7)) >>> input = torch.randn(1, 64, 10, 9) >>> output = m(input) """ output_size: _size_2_opt_t def forward(self, input: Tensor) -> Tensor: return F.adaptive_avg_pool2d(input, self.output_size) class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd): r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes. The output is of size D x H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the form D x H x W. Can be a tuple (D, H, W) or a single number D for a cube D x D x D. D, H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`, where :math:`S=\text{output\_size}`. Examples: >>> # target output size of 5x7x9 >>> m = nn.AdaptiveAvgPool3d((5, 7, 9)) >>> input = torch.randn(1, 64, 8, 9, 10) >>> output = m(input) >>> # target output size of 7x7x7 (cube) >>> m = nn.AdaptiveAvgPool3d(7) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) >>> # target output size of 7x9x8 >>> m = nn.AdaptiveAvgPool3d((7, None, None)) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) """ output_size: _size_3_opt_t def forward(self, input: Tensor) -> Tensor: return F.adaptive_avg_pool3d(input, self.output_size) ```
=============================================================================================================== SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 74.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\rnn.py ENCODING: utf-8 ```py # mypy: allow-untyped-decorators # mypy: allow-untyped-defs import math import numbers import warnings import weakref from typing import Optional, overload from typing_extensions import deprecated import torch from torch import _VF, Tensor from torch.nn import init from torch.nn.parameter import Parameter from torch.nn.utils.rnn import PackedSequence from .module import Module __all__ = [ "RNNBase", "RNN", "LSTM", "GRU", "RNNCellBase", "RNNCell", "LSTMCell", "GRUCell", ] _rnn_impls = { "RNN_TANH": _VF.rnn_tanh, "RNN_RELU": _VF.rnn_relu, } def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: return tensor.index_select(dim, permutation) @deprecated( "`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead", category=FutureWarning, ) def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: return _apply_permutation(tensor, permutation, dim) class RNNBase(Module): r"""Base class for RNN modules (RNN, LSTM, GRU). Implements aspects of RNNs shared by the RNN, LSTM, and GRU classes, such as module initialization and utility methods for parameter storage management. .. note:: The forward method is not implemented by the RNNBase class. .. note:: LSTM and GRU classes override some methods implemented by RNNBase. """ __constants__ = [ "mode", "input_size", "hidden_size", "num_layers", "bias", "batch_first", "dropout", "bidirectional", "proj_size", ] __jit_unused_properties__ = ["all_weights"] mode: str input_size: int hidden_size: int num_layers: int bias: bool batch_first: bool dropout: float bidirectional: bool proj_size: int def __init__( self, mode: str, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, batch_first: bool = False, dropout: float = 0.0, bidirectional: bool = False, proj_size: int = 0, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.mode = mode self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = float(dropout) self.bidirectional = bidirectional self.proj_size = proj_size self._flat_weight_refs: list[Optional[weakref.ReferenceType[Parameter]]] = [] num_directions = 2 if bidirectional else 1 if ( not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or isinstance(dropout, bool) ): raise ValueError( "dropout should be a number in range [0, 1] " "representing the probability of an element being " "zeroed" ) if dropout > 0 and num_layers == 1: warnings.warn( "dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " f"num_layers greater than 1, but got dropout={dropout} and " f"num_layers={num_layers}" ) if not isinstance(hidden_size, int): raise TypeError( f"hidden_size should be of type int, got: {type(hidden_size).__name__}" ) if hidden_size <= 0: raise ValueError("hidden_size must be greater than zero") if num_layers <= 0: raise ValueError("num_layers must be greater than zero") if proj_size < 0: raise ValueError( "proj_size should be a positive integer or zero to disable projections" ) if proj_size >= hidden_size: raise ValueError("proj_size has to be smaller than hidden_size") if mode == "LSTM": gate_size = 4 * hidden_size elif mode == "GRU": gate_size = 3 * hidden_size elif mode == "RNN_TANH": gate_size = hidden_size elif mode == "RNN_RELU": gate_size = hidden_size else: raise ValueError("Unrecognized RNN mode: " + mode) self._flat_weights_names = [] self._all_weights = [] for layer in range(num_layers): for direction in range(num_directions): real_hidden_size = proj_size if proj_size > 0 else hidden_size layer_input_size = ( input_size if layer == 0 else real_hidden_size * num_directions ) w_ih = Parameter( torch.empty((gate_size, layer_input_size), **factory_kwargs) ) w_hh = Parameter( torch.empty((gate_size, real_hidden_size), **factory_kwargs) ) b_ih = Parameter(torch.empty(gate_size, **factory_kwargs)) # Second bias vector included for CuDNN compatibility. Only one # bias vector is needed in standard definition. b_hh = Parameter(torch.empty(gate_size, **factory_kwargs)) layer_params: tuple[Tensor, ...] = () if self.proj_size == 0: if bias: layer_params = (w_ih, w_hh, b_ih, b_hh) else: layer_params = (w_ih, w_hh) else: w_hr = Parameter( torch.empty((proj_size, hidden_size), **factory_kwargs) ) if bias: layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr) else: layer_params = (w_ih, w_hh, w_hr) suffix = "_reverse" if direction == 1 else "" param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"] if bias: param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"] if self.proj_size > 0: param_names += ["weight_hr_l{}{}"] param_names = [x.format(layer, suffix) for x in param_names] for name, param in zip(param_names, layer_params): setattr(self, name, param) self._flat_weights_names.extend(param_names) self._all_weights.append(param_names) self._init_flat_weights() self.reset_parameters() def _init_flat_weights(self): self._flat_weights = [ getattr(self, wn) if hasattr(self, wn) else None for wn in self._flat_weights_names ] self._flat_weight_refs = [ weakref.ref(w) if w is not None else None for w in self._flat_weights ] self.flatten_parameters() def __setattr__(self, attr, value): if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names: # keep self._flat_weights up to date if you do self.weight = ... idx = self._flat_weights_names.index(attr) self._flat_weights[idx] = value super().__setattr__(attr, value) def flatten_parameters(self) -> None: """Reset parameter data pointer so that they can use faster code paths. Right now, this works only if the module is on the GPU and cuDNN is enabled. Otherwise, it's a no-op. """ # Short-circuits if _flat_weights is only partially instantiated if len(self._flat_weights) != len(self._flat_weights_names): return for w in self._flat_weights: if not isinstance(w, Tensor): return # Short-circuits if any tensor in self._flat_weights is not acceptable to cuDNN # or the tensors in _flat_weights are of different dtypes first_fw = self._flat_weights[0] # type: ignore[union-attr] dtype = first_fw.dtype # type: ignore[union-attr] for fw in self._flat_weights: if ( not isinstance(fw, Tensor) or not (fw.dtype == dtype) or not fw.is_cuda or not torch.backends.cudnn.is_acceptable(fw) ): return # If any parameters alias, we fall back to the slower, copying code path. This is # a sufficient check, because overlapping parameter buffers that don't completely # alias would break the assumptions of the uniqueness check in # Module.named_parameters(). unique_data_ptrs = { p.data_ptr() for p in self._flat_weights # type: ignore[union-attr] } if len(unique_data_ptrs) != len(self._flat_weights): return with torch.cuda.device_of(first_fw): import torch.backends.cudnn.rnn as rnn # Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is # an inplace operation on self._flat_weights with torch.no_grad(): if torch._use_cudnn_rnn_flatten_weight(): num_weights = 4 if self.bias else 2 if self.proj_size > 0: num_weights += 1 torch._cudnn_rnn_flatten_weight( self._flat_weights, # type: ignore[arg-type] num_weights, self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.proj_size, self.num_layers, self.batch_first, bool(self.bidirectional), ) def _apply(self, fn, recurse=True): self._flat_weight_refs = [] ret = super()._apply(fn, recurse) # Resets _flat_weights # Note: be v. careful before removing this, as 3rd party device types # likely rely on this behavior to properly .to() modules like LSTM. self._init_flat_weights() return ret def reset_parameters(self) -> None: stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0 for weight in self.parameters(): init.uniform_(weight, -stdv, stdv) def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: if not torch.jit.is_scripting(): if ( input.dtype != self._flat_weights[0].dtype # type: ignore[union-attr] and not torch._C._is_any_autocast_enabled() ): raise ValueError( f"input must have the type {self._flat_weights[0].dtype}, got type {input.dtype}" # type: ignore[union-attr] ) expected_input_dim = 2 if batch_sizes is not None else 3 if input.dim() != expected_input_dim: raise RuntimeError( f"input must have {expected_input_dim} dimensions, got {input.dim()}" ) if self.input_size != input.size(-1): raise RuntimeError( f"input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}" ) def get_expected_hidden_size( self, input: Tensor, batch_sizes: Optional[Tensor] ) -> tuple[int, int, int]: if batch_sizes is not None: mini_batch = int(batch_sizes[0]) else: mini_batch = input.size(0) if self.batch_first else input.size(1) num_directions = 2 if self.bidirectional else 1 if self.proj_size > 0: expected_hidden_size = ( self.num_layers * num_directions, mini_batch, self.proj_size, ) else: expected_hidden_size = ( self.num_layers * num_directions, mini_batch, self.hidden_size, ) return expected_hidden_size def check_hidden_size( self, hx: Tensor, expected_hidden_size: tuple[int, int, int], msg: str = "Expected hidden size {}, got {}", ) -> None: if hx.size() != expected_hidden_size: raise RuntimeError(msg.format(expected_hidden_size, list(hx.size()))) def _weights_have_changed(self): # Returns True if the weight tensors have changed since the last forward pass. # This is the case when used with torch.func.functional_call(), for example. weights_changed = False for ref, name in zip(self._flat_weight_refs, self._flat_weights_names): weight = getattr(self, name) if hasattr(self, name) else None if weight is not None and ref is not None and ref() is not weight: weights_changed = True break return weights_changed def check_forward_args( self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor] ): self.check_input(input, batch_sizes) expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) self.check_hidden_size(hidden, expected_hidden_size) def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]): if permutation is None: return hx return _apply_permutation(hx, permutation) def extra_repr(self) -> str: s = "{input_size}, {hidden_size}" if self.proj_size != 0: s += ", proj_size={proj_size}" if self.num_layers != 1: s += ", num_layers={num_layers}" if self.bias is not True: s += ", bias={bias}" if self.batch_first is not False: s += ", batch_first={batch_first}" if self.dropout != 0: s += ", dropout={dropout}" if self.bidirectional is not False: s += ", bidirectional={bidirectional}" return s.format(**self.__dict__) def _update_flat_weights(self): if not torch.jit.is_scripting(): if self._weights_have_changed(): self._init_flat_weights() def __getstate__(self): # If weights have been changed, update the _flat_weights in __getstate__ here. self._update_flat_weights() # Don't serialize the weight references. state = self.__dict__.copy() del state["_flat_weight_refs"] return state def __setstate__(self, d): super().__setstate__(d) if "all_weights" in d: self._all_weights = d["all_weights"] # In PyTorch 1.8 we added a proj_size member variable to LSTM. # LSTMs that were serialized via torch.save(module) before PyTorch 1.8 # don't have it, so to preserve compatibility we set proj_size here. if "proj_size" not in d: self.proj_size = 0 if not isinstance(self._all_weights[0][0], str): num_layers = self.num_layers num_directions = 2 if self.bidirectional else 1 self._flat_weights_names = [] self._all_weights = [] for layer in range(num_layers): for direction in range(num_directions): suffix = "_reverse" if direction == 1 else "" weights = [ "weight_ih_l{}{}", "weight_hh_l{}{}", "bias_ih_l{}{}", "bias_hh_l{}{}", "weight_hr_l{}{}", ] weights = [x.format(layer, suffix) for x in weights] if self.bias: if self.proj_size > 0: self._all_weights += [weights] self._flat_weights_names.extend(weights) else: self._all_weights += [weights[:4]] self._flat_weights_names.extend(weights[:4]) else: if self.proj_size > 0: self._all_weights += [weights[:2]] + [weights[-1:]] self._flat_weights_names.extend( weights[:2] + [weights[-1:]] ) else: self._all_weights += [weights[:2]] self._flat_weights_names.extend(weights[:2]) self._flat_weights = [ getattr(self, wn) if hasattr(self, wn) else None for wn in self._flat_weights_names ] self._flat_weight_refs = [ weakref.ref(w) if w is not None else None for w in self._flat_weights ] @property def all_weights(self) -> list[list[Parameter]]: return [ [getattr(self, weight) for weight in weights] for weights in self._all_weights ] def _replicate_for_data_parallel(self): replica = super()._replicate_for_data_parallel() # Need to copy these caches, otherwise the replica will share the same # flat weights list. replica._flat_weights = replica._flat_weights[:] replica._flat_weights_names = replica._flat_weights_names[:] return replica class RNN(RNNBase): r"""__init__(input_size,hidden_size,num_layers=1,nonlinearity='tanh',bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None) Apply a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an input sequence. For each element in the input sequence, each layer computes the following function: .. math:: h_t = \tanh(x_t W_{ih}^T + b_{ih} + h_{t-1}W_{hh}^T + b_{hh}) where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the previous layer at time `t-1` or the initial hidden state at time `0`. If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`. .. code-block:: python # Efficient implementation equivalent to the following with bidirectional=False def forward(x, hx=None): if batch_first: x = x.transpose(0, 1) seq_len, batch_size, _ = x.size() if hx is None: hx = torch.zeros(num_layers, batch_size, hidden_size) h_t_minus_1 = hx h_t = hx output = [] for t in range(seq_len): for layer in range(num_layers): h_t[layer] = torch.tanh( x[t] @ weight_ih[layer].T + bias_ih[layer] + h_t_minus_1[layer] @ weight_hh[layer].T + bias_hh[layer] ) output.append(h_t[-1]) h_t_minus_1 = h_t output = torch.stack(output) if batch_first: output = output.transpose(0, 1) return output, h_t Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` would mean stacking two RNNs together to form a `stacked RNN`, with the second RNN taking in outputs of the first RNN and computing the final results. Default: 1 nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'`` bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` batch_first: If ``True``, then the input and output tensors are provided as `(batch, seq, feature)` instead of `(seq, batch, feature)`. Note that this does not apply to hidden or cell states. See the Inputs/Outputs sections below for details. Default: ``False`` dropout: If non-zero, introduces a `Dropout` layer on the outputs of each RNN layer except the last layer, with dropout probability equal to :attr:`dropout`. Default: 0 bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False`` Inputs: input, hx * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input, :math:`(L, N, H_{in})` when ``batch_first=False`` or :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of the input sequence. The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` or :func:`torch.nn.utils.rnn.pack_sequence` for details. * **hx**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden state for the input sequence batch. Defaults to zeros if not provided. where: .. math:: \begin{aligned} N ={} & \text{batch size} \\ L ={} & \text{sequence length} \\ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\ H_{in} ={} & \text{input\_size} \\ H_{out} ={} & \text{hidden\_size} \end{aligned} Outputs: output, h_n * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input, :math:`(L, N, D * H_{out})` when ``batch_first=False`` or :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features `(h_t)` from the last layer of the RNN, for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output will also be a packed sequence. * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state for each element in the batch. Attributes: weight_ih_l[k]: the learnable input-hidden weights of the k-th layer, of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is `(hidden_size, num_directions * hidden_size)` weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer, of shape `(hidden_size, hidden_size)` bias_ih_l[k]: the learnable input-hidden bias of the k-th layer, of shape `(hidden_size)` bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer, of shape `(hidden_size)` .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` .. note:: For bidirectional RNNs, forward and backward are directions 0 and 1 respectively. Example of splitting the output layers when ``batch_first=False``: ``output.view(seq_len, batch, num_directions, hidden_size)``. .. note:: ``batch_first`` argument is ignored for unbatched inputs. .. include:: ../cudnn_rnn_determinism.rst .. include:: ../cudnn_persistent_rnn.rst Examples:: >>> rnn = nn.RNN(10, 20, 2) >>> input = torch.randn(5, 3, 10) >>> h0 = torch.randn(2, 3, 20) >>> output, hn = rnn(input, h0) """ @overload def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, nonlinearity: str = "tanh", bias: bool = True, batch_first: bool = False, dropout: float = 0.0, bidirectional: bool = False, device=None, dtype=None, ) -> None: ... @overload def __init__(self, *args, **kwargs): ... def __init__(self, *args, **kwargs): if "proj_size" in kwargs: raise ValueError( "proj_size argument is only supported for LSTM, not RNN or GRU" ) if len(args) > 3: self.nonlinearity = args[3] args = args[:3] + args[4:] else: self.nonlinearity = kwargs.pop("nonlinearity", "tanh") if self.nonlinearity == "tanh": mode = "RNN_TANH" elif self.nonlinearity == "relu": mode = "RNN_RELU" else: raise ValueError( f"Unknown nonlinearity '{self.nonlinearity}'. Select from 'tanh' or 'relu'." ) super().__init__(mode, *args, **kwargs) @overload @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: Tensor, hx: Optional[Tensor] = None ) -> tuple[Tensor, Tensor]: pass @overload @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: PackedSequence, hx: Optional[Tensor] = None ) -> tuple[PackedSequence, Tensor]: pass def forward(self, input, hx=None): # noqa: F811 self._update_flat_weights() num_directions = 2 if self.bidirectional else 1 orig_input = input if isinstance(orig_input, PackedSequence): input, batch_sizes, sorted_indices, unsorted_indices = input max_batch_size = batch_sizes[0] # script() is unhappy when max_batch_size is different type in cond branches, so we duplicate if hx is None: hx = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) else: batch_sizes = None if input.dim() not in (2, 3): raise ValueError( f"RNN: Expected input to be 2D or 3D, got {input.dim()}D tensor instead" ) is_batched = input.dim() == 3 batch_dim = 0 if self.batch_first else 1 if not is_batched: input = input.unsqueeze(batch_dim) if hx is not None: if hx.dim() != 2: raise RuntimeError( f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor" ) hx = hx.unsqueeze(1) else: if hx is not None and hx.dim() != 3: raise RuntimeError( f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor" ) max_batch_size = input.size(0) if self.batch_first else input.size(1) sorted_indices = None unsorted_indices = None if hx is None: hx = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) assert hx is not None self.check_forward_args(input, hx, batch_sizes) assert self.mode == "RNN_TANH" or self.mode == "RNN_RELU" if batch_sizes is None: if self.mode == "RNN_TANH": result = _VF.rnn_tanh( input, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, self.batch_first, ) else: result = _VF.rnn_relu( input, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, self.batch_first, ) else: if self.mode == "RNN_TANH": result = _VF.rnn_tanh( input, batch_sizes, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, ) else: result = _VF.rnn_relu( input, batch_sizes, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, ) output = result[0] hidden = result[1] if isinstance(orig_input, PackedSequence): output_packed = PackedSequence( output, batch_sizes, sorted_indices, unsorted_indices ) return output_packed, self.permute_hidden(hidden, unsorted_indices) if not is_batched: # type: ignore[possibly-undefined] output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] hidden = hidden.squeeze(1) return output, self.permute_hidden(hidden, unsorted_indices) # XXX: LSTM and GRU implementation is different from RNNBase, this is because: # 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in # its current state could not support the python Union Type or Any Type # 2. TorchScript static typing does not allow a Function or Callable type in # Dict values, so we have to separately call _VF instead of using _rnn_impls # 3. This is temporary only and in the transition state that we want to make it # on time for the release # # More discussion details in https://github.com/pytorch/pytorch/pull/23266 # # TODO: remove the overriding implementations for LSTM and GRU when TorchScript # support expressing these two modules generally. class LSTM(RNNBase): r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,proj_size=0,device=None,dtype=None) Apply a multi-layer long short-term memory (LSTM) RNN to an input sequence. For each element in the input sequence, each layer computes the following function: .. math:: \begin{array}{ll} \\ i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\ f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\ g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\ o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\ c_t = f_t \odot c_{t-1} + i_t \odot g_t \\ h_t = o_t \odot \tanh(c_t) \\ \end{array} where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}` is the hidden state of the layer at time `t-1` or the initial hidden state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`, :math:`o_t` are the input, forget, cell, and output gates, respectively. :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random variable which is :math:`0` with probability :attr:`dropout`. If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from ``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly). Second, the output hidden state of each layer will be multiplied by a learnable projection matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128. Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` would mean stacking two LSTMs together to form a `stacked LSTM`, with the second LSTM taking in outputs of the first LSTM and computing the final results. Default: 1 bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` batch_first: If ``True``, then the input and output tensors are provided as `(batch, seq, feature)` instead of `(seq, batch, feature)`. Note that this does not apply to hidden or cell states. See the Inputs/Outputs sections below for details. Default: ``False`` dropout: If non-zero, introduces a `Dropout` layer on the outputs of each LSTM layer except the last layer, with dropout probability equal to :attr:`dropout`. Default: 0 bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False`` proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0 Inputs: input, (h_0, c_0) * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input, :math:`(L, N, H_{in})` when ``batch_first=False`` or :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of the input sequence. The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` or :func:`torch.nn.utils.rnn.pack_sequence` for details. * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden state for each element in the input sequence. Defaults to zeros if (h_0, c_0) is not provided. * **c_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{cell})` containing the initial cell state for each element in the input sequence. Defaults to zeros if (h_0, c_0) is not provided. where: .. math:: \begin{aligned} N ={} & \text{batch size} \\ L ={} & \text{sequence length} \\ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\ H_{in} ={} & \text{input\_size} \\ H_{cell} ={} & \text{hidden\_size} \\ H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\ \end{aligned} Outputs: output, (h_n, c_n) * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input, :math:`(L, N, D * H_{out})` when ``batch_first=False`` or :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features `(h_t)` from the last layer of the LSTM, for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output will also be a packed sequence. When ``bidirectional=True``, `output` will contain a concatenation of the forward and reverse hidden states at each time step in the sequence. * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state for each element in the sequence. When ``bidirectional=True``, `h_n` will contain a concatenation of the final forward and reverse hidden states, respectively. * **c_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or :math:`(D * \text{num\_layers}, N, H_{cell})` containing the final cell state for each element in the sequence. When ``bidirectional=True``, `c_n` will contain a concatenation of the final forward and reverse cell states, respectively. Attributes: weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer `(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`. Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`. If ``proj_size > 0`` was specified, the shape will be `(4*hidden_size, num_directions * proj_size)` for `k > 0` weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer `(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0`` was specified, the shape will be `(4*hidden_size, proj_size)`. bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer `(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)` bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer `(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)` weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was specified. weight_ih_l[k]_reverse: Analogous to `weight_ih_l[k]` for the reverse direction. Only present when ``bidirectional=True``. weight_hh_l[k]_reverse: Analogous to `weight_hh_l[k]` for the reverse direction. Only present when ``bidirectional=True``. bias_ih_l[k]_reverse: Analogous to `bias_ih_l[k]` for the reverse direction. Only present when ``bidirectional=True``. bias_hh_l[k]_reverse: Analogous to `bias_hh_l[k]` for the reverse direction. Only present when ``bidirectional=True``. weight_hr_l[k]_reverse: Analogous to `weight_hr_l[k]` for the reverse direction. Only present when ``bidirectional=True`` and ``proj_size > 0`` was specified. .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` .. note:: For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively. Example of splitting the output layers when ``batch_first=False``: ``output.view(seq_len, batch, num_directions, hidden_size)``. .. note:: For bidirectional LSTMs, `h_n` is not equivalent to the last element of `output`; the former contains the final forward and reverse hidden states, while the latter contains the final forward hidden state and the initial reverse hidden state. .. note:: ``batch_first`` argument is ignored for unbatched inputs. .. note:: ``proj_size`` should be smaller than ``hidden_size``. .. include:: ../cudnn_rnn_determinism.rst .. include:: ../cudnn_persistent_rnn.rst Examples:: >>> rnn = nn.LSTM(10, 20, 2) >>> input = torch.randn(5, 3, 10) >>> h0 = torch.randn(2, 3, 20) >>> c0 = torch.randn(2, 3, 20) >>> output, (hn, cn) = rnn(input, (h0, c0)) """ @overload def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, batch_first: bool = False, dropout: float = 0.0, bidirectional: bool = False, proj_size: int = 0, device=None, dtype=None, ) -> None: ... @overload def __init__(self, *args, **kwargs): ... def __init__(self, *args, **kwargs): super().__init__("LSTM", *args, **kwargs) def get_expected_cell_size( self, input: Tensor, batch_sizes: Optional[Tensor] ) -> tuple[int, int, int]: if batch_sizes is not None: mini_batch = int(batch_sizes[0]) else: mini_batch = input.size(0) if self.batch_first else input.size(1) num_directions = 2 if self.bidirectional else 1 expected_hidden_size = ( self.num_layers * num_directions, mini_batch, self.hidden_size, ) return expected_hidden_size # In the future, we should prevent mypy from applying contravariance rules here. # See torch/nn/modules/module.py::_forward_unimplemented def check_forward_args( self, input: Tensor, hidden: tuple[Tensor, Tensor], # type: ignore[override] batch_sizes: Optional[Tensor], ): self.check_input(input, batch_sizes) self.check_hidden_size( hidden[0], self.get_expected_hidden_size(input, batch_sizes), "Expected hidden[0] size {}, got {}", ) self.check_hidden_size( hidden[1], self.get_expected_cell_size(input, batch_sizes), "Expected hidden[1] size {}, got {}", ) # Same as above, see torch/nn/modules/module.py::_forward_unimplemented def permute_hidden( # type: ignore[override] self, hx: tuple[Tensor, Tensor], permutation: Optional[Tensor], ) -> tuple[Tensor, Tensor]: if permutation is None: return hx return _apply_permutation(hx[0], permutation), _apply_permutation( hx[1], permutation ) # Same as above, see torch/nn/modules/module.py::_forward_unimplemented @overload # type: ignore[override] @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: Tensor, hx: Optional[tuple[Tensor, Tensor]] = None ) -> tuple[Tensor, tuple[Tensor, Tensor]]: # noqa: F811 pass # Same as above, see torch/nn/modules/module.py::_forward_unimplemented @overload @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: PackedSequence, hx: Optional[tuple[Tensor, Tensor]] = None ) -> tuple[PackedSequence, tuple[Tensor, Tensor]]: # noqa: F811 pass def forward(self, input, hx=None): # noqa: F811 self._update_flat_weights() orig_input = input # xxx: isinstance check needs to be in conditional for TorchScript to compile batch_sizes = None num_directions = 2 if self.bidirectional else 1 real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size if isinstance(orig_input, PackedSequence): input, batch_sizes, sorted_indices, unsorted_indices = input max_batch_size = batch_sizes[0] if hx is None: h_zeros = torch.zeros( self.num_layers * num_directions, max_batch_size, real_hidden_size, dtype=input.dtype, device=input.device, ) c_zeros = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) hx = (h_zeros, c_zeros) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) else: if input.dim() not in (2, 3): raise ValueError( f"LSTM: Expected input to be 2D or 3D, got {input.dim()}D instead" ) is_batched = input.dim() == 3 batch_dim = 0 if self.batch_first else 1 if not is_batched: input = input.unsqueeze(batch_dim) max_batch_size = input.size(0) if self.batch_first else input.size(1) sorted_indices = None unsorted_indices = None if hx is None: h_zeros = torch.zeros( self.num_layers * num_directions, max_batch_size, real_hidden_size, dtype=input.dtype, device=input.device, ) c_zeros = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) hx = (h_zeros, c_zeros) self.check_forward_args(input, hx, batch_sizes) else: if is_batched: if hx[0].dim() != 3 or hx[1].dim() != 3: msg = ( "For batched 3-D input, hx and cx should " f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors" ) raise RuntimeError(msg) else: if hx[0].dim() != 2 or hx[1].dim() != 2: msg = ( "For unbatched 2-D input, hx and cx should " f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors" ) raise RuntimeError(msg) hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1)) # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. self.check_forward_args(input, hx, batch_sizes) hx = self.permute_hidden(hx, sorted_indices) if batch_sizes is None: result = _VF.lstm( input, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, self.batch_first, ) else: result = _VF.lstm( input, batch_sizes, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, ) output = result[0] hidden = result[1:] # xxx: isinstance check needs to be in conditional for TorchScript to compile if isinstance(orig_input, PackedSequence): output_packed = PackedSequence( output, batch_sizes, sorted_indices, unsorted_indices ) return output_packed, self.permute_hidden(hidden, unsorted_indices) else: if not is_batched: # type: ignore[possibly-undefined] output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1)) return output, self.permute_hidden(hidden, unsorted_indices) class GRU(RNNBase): r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None) Apply a multi-layer gated recurrent unit (GRU) RNN to an input sequence. For each element in the input sequence, each layer computes the following function: .. math:: \begin{array}{ll} r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\ h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)} \end{array} where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`, :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively. :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random variable which is :math:`0` with probability :attr:`dropout`. Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` would mean stacking two GRUs together to form a `stacked GRU`, with the second GRU taking in outputs of the first GRU and computing the final results. Default: 1 bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` batch_first: If ``True``, then the input and output tensors are provided as `(batch, seq, feature)` instead of `(seq, batch, feature)`. Note that this does not apply to hidden or cell states. See the Inputs/Outputs sections below for details. Default: ``False`` dropout: If non-zero, introduces a `Dropout` layer on the outputs of each GRU layer except the last layer, with dropout probability equal to :attr:`dropout`. Default: 0 bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False`` Inputs: input, h_0 * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input, :math:`(L, N, H_{in})` when ``batch_first=False`` or :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of the input sequence. The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` or :func:`torch.nn.utils.rnn.pack_sequence` for details. * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden state for the input sequence. Defaults to zeros if not provided. where: .. math:: \begin{aligned} N ={} & \text{batch size} \\ L ={} & \text{sequence length} \\ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\ H_{in} ={} & \text{input\_size} \\ H_{out} ={} & \text{hidden\_size} \end{aligned} Outputs: output, h_n * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input, :math:`(L, N, D * H_{out})` when ``batch_first=False`` or :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features `(h_t)` from the last layer of the GRU, for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output will also be a packed sequence. * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state for the input sequence. Attributes: weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`. Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)` weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)` bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer (b_ir|b_iz|b_in), of shape `(3*hidden_size)` bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer (b_hr|b_hz|b_hn), of shape `(3*hidden_size)` .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` .. note:: For bidirectional GRUs, forward and backward are directions 0 and 1 respectively. Example of splitting the output layers when ``batch_first=False``: ``output.view(seq_len, batch, num_directions, hidden_size)``. .. note:: ``batch_first`` argument is ignored for unbatched inputs. .. note:: The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks. In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix `W` and addition of bias: .. math:: \begin{aligned} n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn}) \end{aligned} This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}` .. math:: \begin{aligned} n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \end{aligned} This implementation differs on purpose for efficiency. .. include:: ../cudnn_persistent_rnn.rst Examples:: >>> rnn = nn.GRU(10, 20, 2) >>> input = torch.randn(5, 3, 10) >>> h0 = torch.randn(2, 3, 20) >>> output, hn = rnn(input, h0) """ @overload def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, batch_first: bool = False, dropout: float = 0.0, bidirectional: bool = False, device=None, dtype=None, ) -> None: ... @overload def __init__(self, *args, **kwargs): ... def __init__(self, *args, **kwargs): if "proj_size" in kwargs: raise ValueError( "proj_size argument is only supported for LSTM, not RNN or GRU" ) super().__init__("GRU", *args, **kwargs) @overload # type: ignore[override] @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: Tensor, hx: Optional[Tensor] = None ) -> tuple[Tensor, Tensor]: # noqa: F811 pass @overload @torch._jit_internal._overload_method # noqa: F811 def forward( self, input: PackedSequence, hx: Optional[Tensor] = None ) -> tuple[PackedSequence, Tensor]: # noqa: F811 pass def forward(self, input, hx=None): # noqa: F811 self._update_flat_weights() orig_input = input # xxx: isinstance check needs to be in conditional for TorchScript to compile if isinstance(orig_input, PackedSequence): input, batch_sizes, sorted_indices, unsorted_indices = input max_batch_size = batch_sizes[0] if hx is None: num_directions = 2 if self.bidirectional else 1 hx = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) else: batch_sizes = None if input.dim() not in (2, 3): raise ValueError( f"GRU: Expected input to be 2D or 3D, got {input.dim()}D instead" ) is_batched = input.dim() == 3 batch_dim = 0 if self.batch_first else 1 if not is_batched: input = input.unsqueeze(batch_dim) if hx is not None: if hx.dim() != 2: raise RuntimeError( f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor" ) hx = hx.unsqueeze(1) else: if hx is not None and hx.dim() != 3: raise RuntimeError( f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor" ) max_batch_size = input.size(0) if self.batch_first else input.size(1) sorted_indices = None unsorted_indices = None if hx is None: num_directions = 2 if self.bidirectional else 1 hx = torch.zeros( self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device, ) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) self.check_forward_args(input, hx, batch_sizes) if batch_sizes is None: result = _VF.gru( input, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, self.batch_first, ) else: result = _VF.gru( input, batch_sizes, hx, self._flat_weights, # type: ignore[arg-type] self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, ) output = result[0] hidden = result[1] # xxx: isinstance check needs to be in conditional for TorchScript to compile if isinstance(orig_input, PackedSequence): output_packed = PackedSequence( output, batch_sizes, sorted_indices, unsorted_indices ) return output_packed, self.permute_hidden(hidden, unsorted_indices) else: if not is_batched: # type: ignore[possibly-undefined] output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] hidden = hidden.squeeze(1) return output, self.permute_hidden(hidden, unsorted_indices) class RNNCellBase(Module): __constants__ = ["input_size", "hidden_size", "bias"] input_size: int hidden_size: int bias: bool weight_ih: Tensor weight_hh: Tensor # WARNING: bias_ih and bias_hh purposely not defined here. # See https://github.com/pytorch/pytorch/issues/39670 def __init__( self, input_size: int, hidden_size: int, bias: bool, num_chunks: int, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = Parameter( torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs) ) self.weight_hh = Parameter( torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs) ) if bias: self.bias_ih = Parameter( torch.empty(num_chunks * hidden_size, **factory_kwargs) ) self.bias_hh = Parameter( torch.empty(num_chunks * hidden_size, **factory_kwargs) ) else: self.register_parameter("bias_ih", None) self.register_parameter("bias_hh", None) self.reset_parameters() def extra_repr(self) -> str: s = "{input_size}, {hidden_size}" if "bias" in self.__dict__ and self.bias is not True: s += ", bias={bias}" if "nonlinearity" in self.__dict__ and self.nonlinearity != "tanh": s += ", nonlinearity={nonlinearity}" return s.format(**self.__dict__) def reset_parameters(self) -> None: stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0 for weight in self.parameters(): init.uniform_(weight, -stdv, stdv) class RNNCell(RNNCellBase): r"""An Elman RNN cell with tanh or ReLU non-linearity. .. math:: h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh}) If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh. Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'`` Inputs: input, hidden - **input**: tensor containing input features - **hidden**: tensor containing the initial hidden state Defaults to zero if not provided. Outputs: h' - **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state for each element in the batch Shape: - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where :math:`H_{in}` = `input_size`. - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided. - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state. Attributes: weight_ih: the learnable input-hidden weights, of shape `(hidden_size, input_size)` weight_hh: the learnable hidden-hidden weights, of shape `(hidden_size, hidden_size)` bias_ih: the learnable input-hidden bias, of shape `(hidden_size)` bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)` .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` Examples:: >>> rnn = nn.RNNCell(10, 20) >>> input = torch.randn(6, 3, 10) >>> hx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): ... hx = rnn(input[i], hx) ... output.append(hx) """ __constants__ = ["input_size", "hidden_size", "bias", "nonlinearity"] nonlinearity: str def __init__( self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh", device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs) self.nonlinearity = nonlinearity def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: if input.dim() not in (1, 2): raise ValueError( f"RNNCell: Expected input to be 1D or 2D, got {input.dim()}D instead" ) if hx is not None and hx.dim() not in (1, 2): raise ValueError( f"RNNCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead" ) is_batched = input.dim() == 2 if not is_batched: input = input.unsqueeze(0) if hx is None: hx = torch.zeros( input.size(0), self.hidden_size, dtype=input.dtype, device=input.device ) else: hx = hx.unsqueeze(0) if not is_batched else hx if self.nonlinearity == "tanh": ret = _VF.rnn_tanh_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, ) elif self.nonlinearity == "relu": ret = _VF.rnn_relu_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, ) else: ret = input # TODO: remove when jit supports exception flow raise RuntimeError(f"Unknown nonlinearity: {self.nonlinearity}") if not is_batched: ret = ret.squeeze(0) return ret class LSTMCell(RNNCellBase): r"""A long short-term memory (LSTM) cell. .. math:: \begin{array}{ll} i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\ g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\ o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\ c' = f \odot c + i \odot g \\ h' = o \odot \tanh(c') \\ \end{array} where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` Inputs: input, (h_0, c_0) - **input** of shape `(batch, input_size)` or `(input_size)`: tensor containing input features - **h_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial hidden state - **c_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial cell state If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero. Outputs: (h_1, c_1) - **h_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next hidden state - **c_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next cell state Attributes: weight_ih: the learnable input-hidden weights, of shape `(4*hidden_size, input_size)` weight_hh: the learnable hidden-hidden weights, of shape `(4*hidden_size, hidden_size)` bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)` bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)` .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward. Examples:: >>> rnn = nn.LSTMCell(10, 20) # (input_size, hidden_size) >>> input = torch.randn(2, 3, 10) # (time_steps, batch, input_size) >>> hx = torch.randn(3, 20) # (batch, hidden_size) >>> cx = torch.randn(3, 20) >>> output = [] >>> for i in range(input.size()[0]): ... hx, cx = rnn(input[i], (hx, cx)) ... output.append(hx) >>> output = torch.stack(output, dim=0) """ def __init__( self, input_size: int, hidden_size: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs) def forward( self, input: Tensor, hx: Optional[tuple[Tensor, Tensor]] = None ) -> tuple[Tensor, Tensor]: if input.dim() not in (1, 2): raise ValueError( f"LSTMCell: Expected input to be 1D or 2D, got {input.dim()}D instead" ) if hx is not None: for idx, value in enumerate(hx): if value.dim() not in (1, 2): raise ValueError( f"LSTMCell: Expected hx[{idx}] to be 1D or 2D, got {value.dim()}D instead" ) is_batched = input.dim() == 2 if not is_batched: input = input.unsqueeze(0) if hx is None: zeros = torch.zeros( input.size(0), self.hidden_size, dtype=input.dtype, device=input.device ) hx = (zeros, zeros) else: hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx ret = _VF.lstm_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, ) if not is_batched: ret = (ret[0].squeeze(0), ret[1].squeeze(0)) return ret class GRUCell(RNNCellBase): r"""A gated recurrent unit (GRU) cell. .. math:: \begin{array}{ll} r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\ z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\ n = \tanh(W_{in} x + b_{in} + r \odot (W_{hn} h + b_{hn})) \\ h' = (1 - z) \odot n + z \odot h \end{array} where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. Args: input_size: The number of expected features in the input `x` hidden_size: The number of features in the hidden state `h` bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. Default: ``True`` Inputs: input, hidden - **input** : tensor containing input features - **hidden** : tensor containing the initial hidden state for each element in the batch. Defaults to zero if not provided. Outputs: h' - **h'** : tensor containing the next hidden state for each element in the batch Shape: - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where :math:`H_{in}` = `input_size`. - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided. - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state. Attributes: weight_ih: the learnable input-hidden weights, of shape `(3*hidden_size, input_size)` weight_hh: the learnable hidden-hidden weights, of shape `(3*hidden_size, hidden_size)` bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)` bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)` .. note:: All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{hidden\_size}}` On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward. Examples:: >>> rnn = nn.GRUCell(10, 20) >>> input = torch.randn(6, 3, 10) >>> hx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): ... hx = rnn(input[i], hx) ... output.append(hx) """ def __init__( self, input_size: int, hidden_size: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs) def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: if input.dim() not in (1, 2): raise ValueError( f"GRUCell: Expected input to be 1D or 2D, got {input.dim()}D instead" ) if hx is not None and hx.dim() not in (1, 2): raise ValueError( f"GRUCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead" ) is_batched = input.dim() == 2 if not is_batched: input = input.unsqueeze(0) if hx is None: hx = torch.zeros( input.size(0), self.hidden_size, dtype=input.dtype, device=input.device ) else: hx = hx.unsqueeze(0) if not is_batched else hx ret = _VF.gru_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, ) if not is_batched: ret = ret.squeeze(0) return ret ```
================================================================================================================== SOURCE CODE FILE: sparse.py LINES: 1 SIZE: 24.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\sparse.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch from torch import Tensor from torch.nn import functional as F, init from torch.nn.parameter import Parameter from .module import Module __all__ = ["Embedding", "EmbeddingBag"] class Embedding(Module): r"""A simple lookup table that stores embeddings of a fixed dictionary and size. This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated during training, i.e. it remains as a fixed "pad". For a newly constructed Embedding, the embedding vector at :attr:`padding_idx` will default to all zeros, but can be updated to another value to be used as the padding vector. max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` is renormalized to have norm :attr:`max_norm`. norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. Attributes: weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) initialized from :math:`\mathcal{N}(0, 1)` Shape: - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}` .. note:: Keep in mind that only a limited number of optimizers support sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`), :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`) .. note:: When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be modified in-place, performing a differentiable operation on ``Embedding.weight`` before calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when :attr:`max_norm` is not ``None``. For example:: n, d, m = 3, 5, 7 embedding = nn.Embedding(n, d, max_norm=1.0) W = torch.randn((m, d), requires_grad=True) idx = torch.tensor([1, 2]) a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable b = embedding(idx) @ W.t() # modifies weight in-place out = (a.unsqueeze(0) + b.unsqueeze(1)) loss = out.sigmoid().prod() loss.backward() Examples:: >>> # an Embedding module containing 10 tensors of size 3 >>> embedding = nn.Embedding(10, 3) >>> # a batch of 2 samples of 4 indices each >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]]) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embedding(input) tensor([[[-0.0251, -1.6902, 0.7172], [-0.6431, 0.0748, 0.6969], [ 1.4970, 1.3448, -0.9685], [-0.3677, -2.7265, -0.1685]], [[ 1.4970, 1.3448, -0.9685], [ 0.4362, -0.4004, 0.9400], [-0.6431, 0.0748, 0.6969], [ 0.9124, -2.3616, 1.1151]]]) >>> # example with padding_idx >>> embedding = nn.Embedding(10, 3, padding_idx=0) >>> input = torch.LongTensor([[0, 2, 0, 5]]) >>> embedding(input) tensor([[[ 0.0000, 0.0000, 0.0000], [ 0.1535, -2.0309, 0.9315], [ 0.0000, 0.0000, 0.0000], [-0.1655, 0.9897, 0.0635]]]) >>> # example of changing `pad` vector >>> padding_idx = 0 >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx) >>> embedding.weight Parameter containing: tensor([[ 0.0000, 0.0000, 0.0000], [-0.7895, -0.7089, -0.0364], [ 0.6778, 0.5803, 0.2678]], requires_grad=True) >>> with torch.no_grad(): ... embedding.weight[padding_idx] = torch.ones(3) >>> embedding.weight Parameter containing: tensor([[ 1.0000, 1.0000, 1.0000], [-0.7895, -0.7089, -0.0364], [ 0.6778, 0.5803, 0.2678]], requires_grad=True) """ __constants__ = [ "num_embeddings", "embedding_dim", "padding_idx", "max_norm", "norm_type", "scale_grad_by_freq", "sparse", ] num_embeddings: int embedding_dim: int padding_idx: Optional[int] max_norm: Optional[float] norm_type: float scale_grad_by_freq: bool weight: Tensor freeze: bool sparse: bool def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, _freeze: bool = False, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = Parameter( torch.empty((num_embeddings, embedding_dim), **factory_kwargs), requires_grad=not _freeze, ) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = Parameter(_weight, requires_grad=not _freeze) self.sparse = sparse def reset_parameters(self) -> None: init.normal_(self.weight) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input: Tensor) -> Tensor: return F.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) def extra_repr(self) -> str: s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" return s.format(**self.__dict__) @classmethod def from_pretrained( cls, embeddings, freeze=True, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, ): r"""Create Embedding instance from given 2-dimensional FloatTensor. Args: embeddings (Tensor): FloatTensor containing weights for the Embedding. First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``. freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process. Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True`` padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated during training, i.e. it remains as a fixed "pad". max_norm (float, optional): See module initialization documentation. norm_type (float, optional): See module initialization documentation. Default ``2``. scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``. sparse (bool, optional): See module initialization documentation. Examples:: >>> # FloatTensor containing pretrained weights >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) >>> embedding = nn.Embedding.from_pretrained(weight) >>> # Get embeddings for index 1 >>> input = torch.LongTensor([1]) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embedding(input) tensor([[ 4.0000, 5.1000, 6.3000]]) """ assert ( embeddings.dim() == 2 ), "Embeddings parameter is expected to be 2-dimensional" rows, cols = embeddings.shape embedding = cls( num_embeddings=rows, embedding_dim=cols, _weight=embeddings, _freeze=freeze, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse, ) return embedding class EmbeddingBag(Module): r"""Compute sums or means of 'bags' of embeddings, without instantiating the intermediate embeddings. For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`, and with 2D inputs, this class * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``, * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``, * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``. However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these operations. EmbeddingBag also supports per-sample weights as an argument to the forward pass. This scales the output of the Embedding before performing a weighted reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the only supported ``mode`` is ``"sum"``, which computes a weighted sum according to :attr:`per_sample_weights`. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` is renormalized to have norm :attr:`max_norm`. norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. Note: this option is not supported when ``mode="max"``. mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights` into consideration. ``"mean"`` computes the average of the values in the bag, ``"max"`` computes the max value over each bag. Default: ``"mean"`` sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. Note: this option is not supported when ``mode="max"``. include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element is equivalent to the size of `indices`. This matches the CSR format. padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated during training, i.e. it remains as a fixed "pad". For a newly constructed EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all zeros, but can be updated to another value to be used as the padding vector. Note that the embedding vector at :attr:`padding_idx` is excluded from the reduction. Attributes: weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)` initialized from :math:`\mathcal{N}(0, 1)`. Examples:: >>> # an EmbeddingBag module containing 10 tensors of size 3 >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum') >>> # a batch of 2 samples of 4 indices each >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) >>> offsets = torch.tensor([0, 4], dtype=torch.long) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embedding_sum(input, offsets) tensor([[-0.8861, -5.4350, -0.0523], [ 1.1306, -2.5798, -1.0044]]) >>> # Example with padding_idx >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2) >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long) >>> offsets = torch.tensor([0, 4], dtype=torch.long) >>> embedding_sum(input, offsets) tensor([[ 0.0000, 0.0000, 0.0000], [-0.7082, 3.2145, -2.6251]]) >>> # An EmbeddingBag can be loaded from an Embedding like so >>> embedding = nn.Embedding(10, 3, padding_idx=2) >>> embedding_sum = nn.EmbeddingBag.from_pretrained( embedding.weight, padding_idx=embedding.padding_idx, mode='sum') """ __constants__ = [ "num_embeddings", "embedding_dim", "max_norm", "norm_type", "scale_grad_by_freq", "mode", "sparse", "include_last_offset", "padding_idx", ] num_embeddings: int embedding_dim: int max_norm: Optional[float] norm_type: float scale_grad_by_freq: bool weight: Tensor mode: str sparse: bool include_last_offset: bool padding_idx: Optional[int] def __init__( self, num_embeddings: int, embedding_dim: int, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, mode: str = "mean", sparse: bool = False, _weight: Optional[Tensor] = None, include_last_offset: bool = False, padding_idx: Optional[int] = None, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx if _weight is None: self.weight = Parameter( torch.empty((num_embeddings, embedding_dim), **factory_kwargs) ) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = Parameter(_weight) self.mode = mode self.sparse = sparse self.include_last_offset = include_last_offset def reset_parameters(self) -> None: init.normal_(self.weight) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward( self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None, ) -> Tensor: """Forward pass of EmbeddingBag. Args: input (Tensor): Tensor containing bags of indices into the embedding matrix. offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines the starting index position of each bag (sequence) in :attr:`input`. per_sample_weights (Tensor, optional): a tensor of float / double weights, or None to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights` must have exactly the same shape as input and is treated as having the same :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``. Returns: Tensor output shape of `(B, embedding_dim)`. .. note:: A few notes about ``input`` and ``offsets``: - :attr:`input` and :attr:`offsets` have to be of the same type, either int or long - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and this will return ``B`` values aggregated in a way depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case. - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have returned vectors filled by zeros. """ return F.embedding_bag( input, self.weight, offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights, self.include_last_offset, self.padding_idx, ) def extra_repr(self) -> str: s = "{num_embeddings}, {embedding_dim}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" s += ", mode={mode}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" return s.format(**{k: repr(v) for k, v in self.__dict__.items()}) @classmethod def from_pretrained( cls, embeddings: Tensor, freeze: bool = True, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, mode: str = "mean", sparse: bool = False, include_last_offset: bool = False, padding_idx: Optional[int] = None, ) -> "EmbeddingBag": r"""Create EmbeddingBag instance from given 2-dimensional FloatTensor. Args: embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag. First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'. freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process. Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True`` max_norm (float, optional): See module initialization documentation. Default: ``None`` norm_type (float, optional): See module initialization documentation. Default ``2``. scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``. mode (str, optional): See module initialization documentation. Default: ``"mean"`` sparse (bool, optional): See module initialization documentation. Default: ``False``. include_last_offset (bool, optional): See module initialization documentation. Default: ``False``. padding_idx (int, optional): See module initialization documentation. Default: ``None``. Examples:: >>> # FloatTensor containing pretrained weights >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight) >>> # Get embeddings for index 1 >>> input = torch.LongTensor([[1, 0]]) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embeddingbag(input) tensor([[ 2.5000, 3.7000, 4.6500]]) """ assert ( embeddings.dim() == 2 ), "Embeddings parameter is expected to be 2-dimensional" rows, cols = embeddings.shape embeddingbag = cls( num_embeddings=rows, embedding_dim=cols, _weight=embeddings, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, mode=mode, sparse=sparse, include_last_offset=include_last_offset, padding_idx=padding_idx, ) embeddingbag.weight.requires_grad = not freeze return embeddingbag ```
======================================================================================================================= SOURCE CODE FILE: transformer.py LINES: 1 SIZE: 49.75 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\transformer.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import copy import warnings from typing import Any, Callable, Optional, Union import torch import torch.nn.functional as F from torch import Tensor from torch.nn.init import xavier_uniform_ from .activation import MultiheadAttention from .container import ModuleList from .dropout import Dropout from .linear import Linear from .module import Module from .normalization import LayerNorm __all__ = [ "Transformer", "TransformerEncoder", "TransformerDecoder", "TransformerEncoderLayer", "TransformerDecoderLayer", ] def _generate_square_subsequent_mask( sz: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tensor: r"""Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ return torch.triu( torch.full((sz, sz), float("-inf"), dtype=dtype, device=device), diagonal=1, ) def _get_seq_len(src: Tensor, batch_first: bool) -> Optional[int]: if src.is_nested: return None else: src_size = src.size() if len(src_size) == 2: # unbatched: S, E return src_size[0] else: # batched: B, S, E if batch_first else S, B, E seq_len_pos = 1 if batch_first else 0 return src_size[seq_len_pos] class Transformer(Module): r"""A transformer model. .. note:: See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ for an in depth discussion of the performant building blocks PyTorch offers for building your own transformer layers. User is able to modify the attributes as needed. The architecture is based on the paper `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_. Args: d_model: the number of expected features in the encoder/decoder inputs (default=512). nhead: the number of heads in the multiheadattention models (default=8). num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of encoder/decoder intermediate layer, can be a string ("relu" or "gelu") or a unary callable. Default: relu custom_encoder: custom encoder (default=None). custom_decoder: custom decoder (default=None). layer_norm_eps: the eps value in layer normalization components (default=1e-5). batch_first: If ``True``, then the input and output tensors are provided as (batch, seq, feature). Default: ``False`` (seq, batch, feature). norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before other attention and feedforward operations, otherwise after. Default: ``False`` (after). bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive bias. Default: ``True``. Examples:: >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12) >>> src = torch.rand((10, 32, 512)) >>> tgt = torch.rand((20, 32, 512)) >>> out = transformer_model(src, tgt) Note: A full example to apply nn.Transformer module for the word language model is available in https://github.com/pytorch/examples/tree/master/word_language_model """ def __init__( self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None, layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") if custom_encoder is not None: self.encoder = custom_encoder else: encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, layer_norm_eps, batch_first, norm_first, bias, **factory_kwargs, ) encoder_norm = LayerNorm( d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs ) self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) if custom_decoder is not None: self.decoder = custom_decoder else: decoder_layer = TransformerDecoderLayer( d_model, nhead, dim_feedforward, dropout, activation, layer_norm_eps, batch_first, norm_first, bias, **factory_kwargs, ) decoder_norm = LayerNorm( d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs ) self.decoder = TransformerDecoder( decoder_layer, num_decoder_layers, decoder_norm ) self._reset_parameters() self.d_model = d_model self.nhead = nhead self.batch_first = batch_first def forward( self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None, memory_is_causal: bool = False, ) -> Tensor: r"""Take in and process masked source/target sequences. .. note:: If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are not allowed to participate in the attention, which is the opposite of the definition for :attr:`attn_mask` in :func:`torch.nn.functional.scaled_dot_product_attention`. Args: src: the sequence to the encoder (required). tgt: the sequence to the decoder (required). src_mask: the additive mask for the src sequence (optional). tgt_mask: the additive mask for the tgt sequence (optional). memory_mask: the additive mask for the encoder output (optional). src_key_padding_mask: the Tensor mask for src keys per batch (optional). tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional). memory_key_padding_mask: the Tensor mask for memory keys per batch (optional). src_is_causal: If specified, applies a causal mask as ``src_mask``. Default: ``None``; try to detect a causal mask. Warning: ``src_is_causal`` provides a hint that ``src_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``. Default: ``None``; try to detect a causal mask. Warning: ``tgt_is_causal`` provides a hint that ``tgt_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. memory_is_causal: If specified, applies a causal mask as ``memory_mask``. Default: ``False``. Warning: ``memory_is_causal`` provides a hint that ``memory_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or `(N, S, E)` if `batch_first=True`. - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or `(N, T, E)` if `batch_first=True`. - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`. - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`. - memory_mask: :math:`(T, S)`. - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`. - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked positions. If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor is provided, it will be added to the attention weight. [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by the attention. If a BoolTensor is provided, the positions with the value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or `(N, T, E)` if `batch_first=True`. Note: Due to the multi-head attention architecture in the transformer model, the output sequence length of a transformer is same as the input sequence (i.e. target) length of the decoder. where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the batch size, :math:`E` is the feature number Examples: >>> # xdoctest: +SKIP >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask) """ is_batched = src.dim() == 3 if not self.batch_first and src.size(1) != tgt.size(1) and is_batched: raise RuntimeError("the batch number of src and tgt must be equal") elif self.batch_first and src.size(0) != tgt.size(0) and is_batched: raise RuntimeError("the batch number of src and tgt must be equal") if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model: raise RuntimeError( "the feature number of src and tgt must be equal to d_model" ) memory = self.encoder( src, mask=src_mask, src_key_padding_mask=src_key_padding_mask, is_causal=src_is_causal, ) output = self.decoder( tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal, ) return output @staticmethod def generate_square_subsequent_mask( sz: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tensor: r"""Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ return _generate_square_subsequent_mask(sz, dtype=dtype, device=device) def _reset_parameters(self): r"""Initiate parameters in the transformer model.""" for p in self.parameters(): if p.dim() > 1: xavier_uniform_(p) class TransformerEncoder(Module): r"""TransformerEncoder is a stack of N encoder layers. .. note:: See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ for an in depth discussion of the performant building blocks PyTorch offers for building your own transformer layers. Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. Args: encoder_layer: an instance of the TransformerEncoderLayer() class (required). num_layers: the number of sub-encoder-layers in the encoder (required). norm: the layer normalization component (optional). enable_nested_tensor: if True, input will automatically convert to nested tensor (and convert back on output). This will improve the overall performance of TransformerEncoder when padding rate is high. Default: ``True`` (enabled). Examples:: >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6) >>> src = torch.rand(10, 32, 512) >>> out = transformer_encoder(src) """ __constants__ = ["norm"] def __init__( self, encoder_layer: "TransformerEncoderLayer", num_layers: int, norm: Optional[Module] = None, enable_nested_tensor: bool = True, mask_check: bool = True, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm # this attribute saves the value providedat object construction self.enable_nested_tensor = enable_nested_tensor # this attribute controls whether nested tensors are used self.use_nested_tensor = enable_nested_tensor self.mask_check = mask_check enc_layer = "encoder_layer" why_not_sparsity_fast_path = "" if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer): why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer" elif encoder_layer.norm_first: why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True" elif not encoder_layer.self_attn.batch_first: why_not_sparsity_fast_path = ( f"{enc_layer}.self_attn.batch_first was not True" + "(use batch_first for better inference performance)" ) elif not encoder_layer.self_attn._qkv_same_embed_dim: why_not_sparsity_fast_path = ( f"{enc_layer}.self_attn._qkv_same_embed_dim was not True" ) elif encoder_layer.self_attn.in_proj_bias is None: why_not_sparsity_fast_path = f"{enc_layer}.self_attn was passed bias=False" elif not encoder_layer.activation_relu_or_gelu: why_not_sparsity_fast_path = ( f"{enc_layer}.activation_relu_or_gelu was not True" ) elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps): why_not_sparsity_fast_path = ( f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps" ) elif encoder_layer.self_attn.num_heads % 2 == 1: why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd" if enable_nested_tensor and why_not_sparsity_fast_path: warnings.warn( f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}" ) self.use_nested_tensor = False def forward( self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, is_causal: Optional[bool] = None, ) -> Tensor: r"""Pass the input through the encoder layers in turn. Args: src: the sequence to the encoder (required). mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). is_causal: If specified, applies a causal mask as ``mask``. Default: ``None``; try to detect a causal mask. Warning: ``is_causal`` provides a hint that ``mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: see the docs in :class:`~torch.nn.Transformer`. """ src_key_padding_mask = F._canonical_mask( mask=src_key_padding_mask, mask_name="src_key_padding_mask", other_type=F._none_or_dtype(mask), other_name="mask", target_type=src.dtype, ) mask = F._canonical_mask( mask=mask, mask_name="mask", other_type=None, other_name="", target_type=src.dtype, check_other=False, ) output = src convert_to_nested = False first_layer = self.layers[0] src_key_padding_mask_for_layers = src_key_padding_mask why_not_sparsity_fast_path = "" str_first_layer = "self.layers[0]" batch_first = first_layer.self_attn.batch_first is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled() if not is_fastpath_enabled: why_not_sparsity_fast_path = ( "torch.backends.mha.get_fastpath_enabled() was not True" ) elif not hasattr(self, "use_nested_tensor"): why_not_sparsity_fast_path = "use_nested_tensor attribute not present" elif not self.use_nested_tensor: why_not_sparsity_fast_path = ( "self.use_nested_tensor (set in init) was not True" ) elif first_layer.training: why_not_sparsity_fast_path = f"{str_first_layer} was in training mode" elif not src.dim() == 3: why_not_sparsity_fast_path = ( f"input not batched; expected src.dim() of 3 but got {src.dim()}" ) elif src_key_padding_mask is None: why_not_sparsity_fast_path = "src_key_padding_mask was None" elif ( (not hasattr(self, "mask_check")) or self.mask_check ) and not torch._nested_tensor_from_mask_left_aligned( src, src_key_padding_mask.logical_not() ): why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned" elif output.is_nested: why_not_sparsity_fast_path = "NestedTensor input is not supported" elif mask is not None: why_not_sparsity_fast_path = ( "src_key_padding_mask and mask were both supplied" ) elif torch.is_autocast_enabled(): why_not_sparsity_fast_path = "autocast is enabled" if not why_not_sparsity_fast_path: tensor_args = ( src, first_layer.self_attn.in_proj_weight, first_layer.self_attn.in_proj_bias, first_layer.self_attn.out_proj.weight, first_layer.self_attn.out_proj.bias, first_layer.norm1.weight, first_layer.norm1.bias, first_layer.norm2.weight, first_layer.norm2.bias, first_layer.linear1.weight, first_layer.linear1.bias, first_layer.linear2.weight, first_layer.linear2.bias, ) _supported_device_type = [ "cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name, ] if torch.overrides.has_torch_function(tensor_args): why_not_sparsity_fast_path = "some Tensor argument has_torch_function" elif src.device.type not in _supported_device_type: why_not_sparsity_fast_path = ( f"src device is neither one of {_supported_device_type}" ) elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args): why_not_sparsity_fast_path = ( "grad is enabled and at least one of query or the " "input/output projection weights or biases requires_grad" ) if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None): convert_to_nested = True output = torch._nested_tensor_from_mask( output, src_key_padding_mask.logical_not(), mask_check=False ) src_key_padding_mask_for_layers = None seq_len = _get_seq_len(src, batch_first) is_causal = _detect_is_causal_mask(mask, is_causal, seq_len) for mod in self.layers: output = mod( output, src_mask=mask, is_causal=is_causal, src_key_padding_mask=src_key_padding_mask_for_layers, ) if convert_to_nested: output = output.to_padded_tensor(0.0, src.size()) if self.norm is not None: output = self.norm(output) return output class TransformerDecoder(Module): r"""TransformerDecoder is a stack of N decoder layers. .. note:: See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ for an in depth discussion of the performant building blocks PyTorch offers for building your own transformer layers. Args: decoder_layer: an instance of the TransformerDecoderLayer() class (required). num_layers: the number of sub-decoder-layers in the decoder (required). norm: the layer normalization component (optional). Examples:: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6) >>> memory = torch.rand(10, 32, 512) >>> tgt = torch.rand(20, 32, 512) >>> out = transformer_decoder(tgt, memory) """ __constants__ = ["norm"] def __init__( self, decoder_layer: "TransformerDecoderLayer", num_layers: int, norm: Optional[Module] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm def forward( self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: Optional[bool] = None, memory_is_causal: bool = False, ) -> Tensor: r"""Pass the inputs (and mask) through the decoder layer in turn. Args: tgt: the sequence to the decoder (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). tgt_is_causal: If specified, applies a causal mask as ``tgt mask``. Default: ``None``; try to detect a causal mask. Warning: ``tgt_is_causal`` provides a hint that ``tgt_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. memory_is_causal: If specified, applies a causal mask as ``memory mask``. Default: ``False``. Warning: ``memory_is_causal`` provides a hint that ``memory_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: see the docs in :class:`~torch.nn.Transformer`. """ output = tgt seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first) tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len) for mod in self.layers: output = mod( output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal, ) if self.norm is not None: output = self.norm(output) return output class TransformerEncoderLayer(Module): r"""TransformerEncoderLayer is made up of self-attn and feedforward network. .. note:: See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ for an in depth discussion of the performant building blocks PyTorch offers for building your own transformer layers. This standard encoder layer is based on the paper `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_. Users may modify or implement in a different way during application. TransformerEncoderLayer can handle either traditional torch.tensor inputs, or Nested Tensor inputs. Derived classes are expected to similarly accept both input formats. (Not all combinations of inputs are currently supported by TransformerEncoderLayer while Nested Tensor is in prototype state.) If you are implementing a custom layer, you may derive it either from the Module or TransformerEncoderLayer class. If your custom layer supports both torch.Tensors and Nested Tensors inputs, make its implementation a derived class of TransformerEncoderLayer. If your custom Layer supports only torch.Tensor inputs, derive its implementation from Module. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of the intermediate layer, can be a string ("relu" or "gelu") or a unary callable. Default: relu layer_norm_eps: the eps value in layer normalization components (default=1e-5). batch_first: If ``True``, then the input and output tensors are provided as (batch, seq, feature). Default: ``False`` (seq, batch, feature). norm_first: if ``True``, layer norm is done prior to attention and feedforward operations, respectively. Otherwise it's done after. Default: ``False`` (after). bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive bias. Default: ``True``. Examples:: >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) Alternatively, when ``batch_first`` is ``True``: >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True) >>> src = torch.rand(32, 10, 512) >>> out = encoder_layer(src) Fast path: forward() will use a special optimized implementation described in `FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following conditions are met: - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad`` - training is disabled (using ``.eval()``) - batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``) - activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu`` - at most one of ``src_mask`` and ``src_key_padding_mask`` is passed - if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask`` nor ``src_key_padding_mask`` is passed - the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case unless the caller has manually modified one without modifying the other) If the optimized implementation is in use, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for ``src`` to represent padding more efficiently than using a padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be returned, and an additional speedup proportional to the fraction of the input that is padding can be expected. .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`: https://arxiv.org/abs/2205.14135 """ __constants__ = ["norm_first"] def __init__( self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.self_attn = MultiheadAttention( d_model, nhead, dropout=dropout, bias=bias, batch_first=batch_first, **factory_kwargs, ) # Implementation of Feedforward model self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs) self.norm_first = norm_first self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) # Legacy string support for activation function. if isinstance(activation, str): activation = _get_activation_fn(activation) # We can't test self.activation in forward() in TorchScript, # so stash some information about it instead. if activation is F.relu or isinstance(activation, torch.nn.ReLU): self.activation_relu_or_gelu = 1 elif activation is F.gelu or isinstance(activation, torch.nn.GELU): self.activation_relu_or_gelu = 2 else: self.activation_relu_or_gelu = 0 self.activation = activation def __setstate__(self, state): super().__setstate__(state) if not hasattr(self, "activation"): self.activation = F.relu def forward( self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, is_causal: bool = False, ) -> Tensor: r"""Pass the input through the encoder layer. Args: src: the sequence to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). is_causal: If specified, applies a causal mask as ``src mask``. Default: ``False``. Warning: ``is_causal`` provides a hint that ``src_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: see the docs in :class:`~torch.nn.Transformer`. """ src_key_padding_mask = F._canonical_mask( mask=src_key_padding_mask, mask_name="src_key_padding_mask", other_type=F._none_or_dtype(src_mask), other_name="src_mask", target_type=src.dtype, ) src_mask = F._canonical_mask( mask=src_mask, mask_name="src_mask", other_type=None, other_name="", target_type=src.dtype, check_other=False, ) is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled() why_not_sparsity_fast_path = "" if not is_fastpath_enabled: why_not_sparsity_fast_path = ( "torch.backends.mha.get_fastpath_enabled() was not True" ) elif not src.dim() == 3: why_not_sparsity_fast_path = ( f"input not batched; expected src.dim() of 3 but got {src.dim()}" ) elif self.training: why_not_sparsity_fast_path = "training is enabled" elif not self.self_attn.batch_first: why_not_sparsity_fast_path = "self_attn.batch_first was not True" elif self.self_attn.in_proj_bias is None: why_not_sparsity_fast_path = "self_attn was passed bias=False" elif not self.self_attn._qkv_same_embed_dim: why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True" elif not self.activation_relu_or_gelu: why_not_sparsity_fast_path = "activation_relu_or_gelu was not True" elif not (self.norm1.eps == self.norm2.eps): why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps" elif src.is_nested and ( src_key_padding_mask is not None or src_mask is not None ): why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input" elif self.self_attn.num_heads % 2 == 1: why_not_sparsity_fast_path = "num_head is odd" elif torch.is_autocast_enabled(): why_not_sparsity_fast_path = "autocast is enabled" elif any( len(getattr(m, "_forward_hooks", {})) + len(getattr(m, "_forward_pre_hooks", {})) for m in self.modules() ): why_not_sparsity_fast_path = "forward pre-/hooks are attached to the module" if not why_not_sparsity_fast_path: tensor_args = ( src, self.self_attn.in_proj_weight, self.self_attn.in_proj_bias, self.self_attn.out_proj.weight, self.self_attn.out_proj.bias, self.norm1.weight, self.norm1.bias, self.norm2.weight, self.norm2.bias, self.linear1.weight, self.linear1.bias, self.linear2.weight, self.linear2.bias, ) # We have to use list comprehensions below because TorchScript does not support # generator expressions. _supported_device_type = [ "cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name, ] if torch.overrides.has_torch_function(tensor_args): why_not_sparsity_fast_path = "some Tensor argument has_torch_function" elif not all( (x.device.type in _supported_device_type) for x in tensor_args ): why_not_sparsity_fast_path = ( "some Tensor argument's device is neither one of " f"{_supported_device_type}" ) elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args): why_not_sparsity_fast_path = ( "grad is enabled and at least one of query or the " "input/output projection weights or biases requires_grad" ) if not why_not_sparsity_fast_path: merged_mask, mask_type = self.self_attn.merge_masks( src_mask, src_key_padding_mask, src ) return torch._transformer_encoder_layer_fwd( src, self.self_attn.embed_dim, self.self_attn.num_heads, self.self_attn.in_proj_weight, self.self_attn.in_proj_bias, self.self_attn.out_proj.weight, self.self_attn.out_proj.bias, self.activation_relu_or_gelu == 2, self.norm_first, self.norm1.eps, self.norm1.weight, self.norm1.bias, self.norm2.weight, self.norm2.bias, self.linear1.weight, self.linear1.bias, self.linear2.weight, self.linear2.bias, merged_mask, mask_type, ) # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf x = src if self.norm_first: x = x + self._sa_block( self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal ) x = x + self._ff_block(self.norm2(x)) else: x = self.norm1( x + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal) ) x = self.norm2(x + self._ff_block(x)) return x # self-attention block def _sa_block( self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False, ) -> Tensor: x = self.self_attn( x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False, is_causal=is_causal, )[0] return self.dropout1(x) # feed forward block def _ff_block(self, x: Tensor) -> Tensor: x = self.linear2(self.dropout(self.activation(self.linear1(x)))) return self.dropout2(x) class TransformerDecoderLayer(Module): r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. .. note:: See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ for an in depth discussion of the performant building blocks PyTorch offers for building your own transformer layers. This standard decoder layer is based on the paper `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of the intermediate layer, can be a string ("relu" or "gelu") or a unary callable. Default: relu layer_norm_eps: the eps value in layer normalization components (default=1e-5). batch_first: If ``True``, then the input and output tensors are provided as (batch, seq, feature). Default: ``False`` (seq, batch, feature). norm_first: if ``True``, layer norm is done prior to self attention, multihead attention and feedforward operations, respectively. Otherwise it's done after. Default: ``False`` (after). bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive bias. Default: ``True``. Examples:: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) >>> memory = torch.rand(10, 32, 512) >>> tgt = torch.rand(20, 32, 512) >>> out = decoder_layer(tgt, memory) Alternatively, when ``batch_first`` is ``True``: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True) >>> memory = torch.rand(32, 10, 512) >>> tgt = torch.rand(32, 20, 512) >>> out = decoder_layer(tgt, memory) """ __constants__ = ["norm_first"] def __init__( self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.self_attn = MultiheadAttention( d_model, nhead, dropout=dropout, batch_first=batch_first, bias=bias, **factory_kwargs, ) self.multihead_attn = MultiheadAttention( d_model, nhead, dropout=dropout, batch_first=batch_first, bias=bias, **factory_kwargs, ) # Implementation of Feedforward model self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs) self.norm_first = norm_first self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.dropout3 = Dropout(dropout) # Legacy string support for activation function. if isinstance(activation, str): self.activation = _get_activation_fn(activation) else: self.activation = activation def __setstate__(self, state): if "activation" not in state: state["activation"] = F.relu super().__setstate__(state) def forward( self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: bool = False, memory_is_causal: bool = False, ) -> Tensor: r"""Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). tgt_is_causal: If specified, applies a causal mask as ``tgt mask``. Default: ``False``. Warning: ``tgt_is_causal`` provides a hint that ``tgt_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. memory_is_causal: If specified, applies a causal mask as ``memory mask``. Default: ``False``. Warning: ``memory_is_causal`` provides a hint that ``memory_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: see the docs in :class:`~torch.nn.Transformer`. """ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf x = tgt if self.norm_first: x = x + self._sa_block( self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal ) x = x + self._mha_block( self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal, ) x = x + self._ff_block(self.norm3(x)) else: x = self.norm1( x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal) ) x = self.norm2( x + self._mha_block( x, memory, memory_mask, memory_key_padding_mask, memory_is_causal ) ) x = self.norm3(x + self._ff_block(x)) return x # self-attention block def _sa_block( self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False, ) -> Tensor: x = self.self_attn( x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, is_causal=is_causal, need_weights=False, )[0] return self.dropout1(x) # multihead attention block def _mha_block( self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False, ) -> Tensor: x = self.multihead_attn( x, mem, mem, attn_mask=attn_mask, key_padding_mask=key_padding_mask, is_causal=is_causal, need_weights=False, )[0] return self.dropout2(x) # feed forward block def _ff_block(self, x: Tensor) -> Tensor: x = self.linear2(self.dropout(self.activation(self.linear1(x)))) return self.dropout3(x) def _get_clones(module, N): # FIXME: copy.deepcopy() is not defined on nn.module return ModuleList([copy.deepcopy(module) for i in range(N)]) def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]: if activation == "relu": return F.relu elif activation == "gelu": return F.gelu raise RuntimeError(f"activation should be relu/gelu, not {activation}") def _detect_is_causal_mask( mask: Optional[Tensor], is_causal: Optional[bool] = None, size: Optional[int] = None, ) -> bool: """Return whether the given attention mask is causal. Warning: If ``is_causal`` is not ``None``, its value will be returned as is. If a user supplies an incorrect ``is_causal`` hint, ``is_causal=False`` when the mask is in fact a causal attention.mask may lead to reduced performance relative to what would be achievable with ``is_causal=True``; ``is_causal=True`` when the mask is in fact not a causal attention.mask may lead to incorrect and unpredictable execution - in some scenarios, a causal mask may be applied based on the hint, in other execution scenarios the specified mask may be used. The choice may not appear to be deterministic, in that a number of factors like alignment, hardware SKU, etc influence the decision whether to use a mask or rely on the hint. ``size`` if not None, check whether the mask is a causal mask of the provided size Otherwise, checks for any causal mask. """ # Prevent type refinement make_causal = is_causal is True if is_causal is None and mask is not None: sz = size if size is not None else mask.size(-2) causal_comparison = _generate_square_subsequent_mask( sz, device=mask.device, dtype=mask.dtype ) # Do not use `torch.equal` so we handle batched masks by # broadcasting the comparison. if mask.size() == causal_comparison.size(): make_causal = bool((mask == causal_comparison).all()) else: make_causal = False return make_causal ```
====================================================================================================================== SOURCE CODE FILE: upsampling.py LINES: 1 SIZE: 11.56 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\upsampling.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch.nn.functional as F from torch import Tensor from torch.nn.common_types import _ratio_2_t, _ratio_any_t, _size_2_t, _size_any_t from .module import Module __all__ = ["Upsample", "UpsamplingNearest2d", "UpsamplingBilinear2d"] class Upsample(Module): r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. The input data is assumed to be of the form `minibatch x channels x [optional depth] x [optional height] x width`. Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor. The algorithms available for upsampling are nearest neighbor and linear, bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor, respectively. One can either give a :attr:`scale_factor` or the target output :attr:`size` to calculate the output size. (You cannot give both, as it is ambiguous) Args: size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): output spatial sizes scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): multiplier for spatial size. Has to match input size if it is a tuple. mode (str, optional): the upsampling algorithm: one of ``'nearest'``, ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. Default: ``'nearest'`` align_corners (bool, optional): if ``True``, the corner pixels of the input and output tensors are aligned, and thus preserving the values at those pixels. This only has effect when :attr:`mode` is ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``. Default: ``False`` recompute_scale_factor (bool, optional): recompute the scale_factor for use in the interpolation calculation. If `recompute_scale_factor` is ``True``, then `scale_factor` must be passed in and `scale_factor` is used to compute the output `size`. The computed output `size` will be used to infer new scales for the interpolation. Note that when `scale_factor` is floating-point, it may differ from the recomputed `scale_factor` due to rounding and precision issues. If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will be used directly for interpolation. Shape: - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})` - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})` or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor .. math:: H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor .. math:: W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor .. warning:: With ``align_corners = True``, the linearly interpolating modes (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally align the output and input pixels, and thus the output values can depend on the input size. This was the default behavior for these modes up to version 0.3.1. Since then, the default behavior is ``align_corners = False``. See below for concrete examples on how this affects the outputs. .. note:: If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`. Examples:: >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) >>> input tensor([[[[1., 2.], [3., 4.]]]]) >>> m = nn.Upsample(scale_factor=2, mode='nearest') >>> m(input) tensor([[[[1., 1., 2., 2.], [1., 1., 2., 2.], [3., 3., 4., 4.], [3., 3., 4., 4.]]]]) >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles") >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False >>> m(input) tensor([[[[1.0000, 1.2500, 1.7500, 2.0000], [1.5000, 1.7500, 2.2500, 2.5000], [2.5000, 2.7500, 3.2500, 3.5000], [3.0000, 3.2500, 3.7500, 4.0000]]]]) >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) >>> m(input) tensor([[[[1.0000, 1.3333, 1.6667, 2.0000], [1.6667, 2.0000, 2.3333, 2.6667], [2.3333, 2.6667, 3.0000, 3.3333], [3.0000, 3.3333, 3.6667, 4.0000]]]]) >>> # Try scaling the same data in a larger tensor >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3) >>> input_3x3[:, :, :2, :2].copy_(input) tensor([[[[1., 2.], [3., 4.]]]]) >>> input_3x3 tensor([[[[1., 2., 0.], [3., 4., 0.], [0., 0., 0.]]]]) >>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session") >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False >>> # Notice that values in top left corner are the same with the small input (except at boundary) >>> m(input_3x3) tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000], [1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000], [2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000], [2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000], [0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) >>> # Notice that values in top left corner are now changed >>> m(input_3x3) tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000], [1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000], [2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000], [2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000], [1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) """ __constants__ = [ "size", "scale_factor", "mode", "align_corners", "name", "recompute_scale_factor", ] name: str size: Optional[_size_any_t] scale_factor: Optional[_ratio_any_t] mode: str align_corners: Optional[bool] recompute_scale_factor: Optional[bool] def __init__( self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None, mode: str = "nearest", align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, ) -> None: super().__init__() self.name = type(self).__name__ self.size = size if isinstance(scale_factor, tuple): self.scale_factor = tuple(float(factor) for factor in scale_factor) else: self.scale_factor = float(scale_factor) if scale_factor else None self.mode = mode self.align_corners = align_corners self.recompute_scale_factor = recompute_scale_factor def forward(self, input: Tensor) -> Tensor: return F.interpolate( input, self.size, self.scale_factor, self.mode, self.align_corners, recompute_scale_factor=self.recompute_scale_factor, ) def __setstate__(self, state): if "recompute_scale_factor" not in state: state["recompute_scale_factor"] = True super().__setstate__(state) def extra_repr(self) -> str: if self.scale_factor is not None: info = "scale_factor=" + repr(self.scale_factor) else: info = "size=" + repr(self.size) info += ", mode=" + repr(self.mode) return info class UpsamplingNearest2d(Upsample): r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels. To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` as it's constructor argument. When :attr:`size` is given, it is the output size of the image `(h, w)`. Args: size (int or Tuple[int, int], optional): output spatial sizes scale_factor (float or Tuple[float, float], optional): multiplier for spatial size. .. warning:: This class is deprecated in favor of :func:`~nn.functional.interpolate`. Shape: - Input: :math:`(N, C, H_{in}, W_{in})` - Output: :math:`(N, C, H_{out}, W_{out})` where .. math:: H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor .. math:: W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor Examples:: >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) >>> input tensor([[[[1., 2.], [3., 4.]]]]) >>> m = nn.UpsamplingNearest2d(scale_factor=2) >>> m(input) tensor([[[[1., 1., 2., 2.], [1., 1., 2., 2.], [3., 3., 4., 4.], [3., 3., 4., 4.]]]]) """ def __init__( self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None, ) -> None: super().__init__(size, scale_factor, mode="nearest") class UpsamplingBilinear2d(Upsample): r"""Applies a 2D bilinear upsampling to an input signal composed of several input channels. To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` as it's constructor argument. When :attr:`size` is given, it is the output size of the image `(h, w)`. Args: size (int or Tuple[int, int], optional): output spatial sizes scale_factor (float or Tuple[float, float], optional): multiplier for spatial size. .. warning:: This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``. Shape: - Input: :math:`(N, C, H_{in}, W_{in})` - Output: :math:`(N, C, H_{out}, W_{out})` where .. math:: H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor .. math:: W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor Examples:: >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) >>> input tensor([[[[1., 2.], [3., 4.]]]]) >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?") >>> m = nn.UpsamplingBilinear2d(scale_factor=2) >>> m(input) tensor([[[[1.0000, 1.3333, 1.6667, 2.0000], [1.6667, 2.0000, 2.3333, 2.6667], [2.3333, 2.6667, 3.0000, 3.3333], [3.0000, 3.3333, 3.6667, 4.0000]]]]) """ def __init__( self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None, ) -> None: super().__init__(size, scale_factor, mode="bilinear", align_corners=True) ```
================================================================================================================= SOURCE CODE FILE: utils.py LINES: 1 SIZE: 2.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import collections from itertools import repeat from typing import Any __all__ = ["consume_prefix_in_state_dict_if_present"] def _ntuple(n, name="parse"): def parse(x): if isinstance(x, collections.abc.Iterable): return tuple(x) return tuple(repeat(x, n)) parse.__name__ = name return parse _single = _ntuple(1, "_single") _pair = _ntuple(2, "_pair") _triple = _ntuple(3, "_triple") _quadruple = _ntuple(4, "_quadruple") def _reverse_repeat_tuple(t, n): r"""Reverse the order of `t` and repeat each element for `n` times. This can be used to translate padding arg used by Conv and Pooling modules to the ones used by `F.pad`. """ return tuple(x for x in reversed(t) for _ in range(n)) def _list_with_default(out_size: list[int], defaults: list[int]) -> list[int]: import torch if isinstance(out_size, (int, torch.SymInt)): return out_size if len(defaults) <= len(out_size): raise ValueError(f"Input dimension should be at least {len(out_size) + 1}") return [ v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size) :]) ] def consume_prefix_in_state_dict_if_present( state_dict: dict[str, Any], prefix: str, ) -> None: r"""Strip the prefix in state_dict in place, if any. .. note:: Given a `state_dict` from a DP/DDP model, a local model can load it by applying `consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling :meth:`torch.nn.Module.load_state_dict`. Args: state_dict (OrderedDict): a state-dict to be loaded to the model. prefix (str): prefix. """ keys = list(state_dict.keys()) for key in keys: if key.startswith(prefix): newkey = key[len(prefix) :] state_dict[newkey] = state_dict.pop(key) # also strip the prefix in metadata if any. if hasattr(state_dict, "_metadata"): keys = list(state_dict._metadata.keys()) for key in keys: # for the metadata dict, the key can be: # '': for the DDP module, which we want to remove. # 'module': for the actual model. # 'module.xx.xx': for the rest. if len(key) == 0: continue # handling both, 'module' case and 'module.' cases if key == prefix.replace(".", "") or key.startswith(prefix): newkey = key[len(prefix) :] state_dict._metadata[newkey] = state_dict._metadata.pop(key) ```
===================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.77 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\__init__.py ENCODING: utf-8 ```py from typing_extensions import deprecated from torch.nn.parallel.data_parallel import data_parallel, DataParallel from torch.nn.parallel.distributed import DistributedDataParallel from torch.nn.parallel.parallel_apply import parallel_apply from torch.nn.parallel.replicate import replicate from torch.nn.parallel.scatter_gather import gather, scatter __all__ = [ "replicate", "scatter", "parallel_apply", "gather", "data_parallel", "DataParallel", "DistributedDataParallel", ] @deprecated( "`torch.nn.parallel.DistributedDataParallelCPU` is deprecated, " "please use `torch.nn.parallel.DistributedDataParallel` instead.", category=FutureWarning, ) class DistributedDataParallelCPU(DistributedDataParallel): pass ```
======================================================================================================================= SOURCE CODE FILE: _functions.py LINES: 1 SIZE: 4.96 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\_functions.py ENCODING: utf-8 ```py import warnings from itertools import chain from typing import Optional import torch from torch._utils import _get_device_index from torch.autograd import Function from torch.nn.parallel import comm class Broadcast(Function): @staticmethod def forward(ctx, target_gpus, *inputs): assert all( i.device.type != "cpu" for i in inputs ), "Broadcast function not implemented for CPU tensors" target_gpus = [_get_device_index(x, True) for x in target_gpus] ctx.target_gpus = target_gpus if len(inputs) == 0: return () ctx.num_inputs = len(inputs) ctx.input_device = inputs[0].get_device() outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus) non_differentiables = [] for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]): if not input_requires_grad: non_differentiables.extend(output[idx] for output in outputs) ctx.mark_non_differentiable(*non_differentiables) return tuple(chain.from_iterable(outputs)) @staticmethod def backward(ctx, *grad_outputs): return (None,) + ReduceAddCoalesced.apply( ctx.input_device, ctx.num_inputs, *grad_outputs ) class ReduceAddCoalesced(Function): @staticmethod def forward(ctx, destination, num_inputs, *grads): ctx.target_gpus = [ grads[i].get_device() for i in range(0, len(grads), num_inputs) ] grads_ = [grads[i : i + num_inputs] for i in range(0, len(grads), num_inputs)] return comm.reduce_add_coalesced(grads_, destination) @staticmethod def backward(ctx, *grad_outputs): return ( None, None, ) + Broadcast.apply(ctx.target_gpus, *grad_outputs) class Gather(Function): @staticmethod def forward(ctx, target_device, dim, *inputs): assert all( i.device.type != "cpu" for i in inputs ), "Gather function not implemented for CPU tensors" if target_device == "cpu": ctx.target_device = "cpu" else: target_device = _get_device_index(target_device, True) ctx.target_device = target_device ctx.dim = dim ctx.input_gpus = tuple(i.get_device() for i in inputs) if all(t.dim() == 0 for t in inputs) and dim == 0: inputs = tuple(t.view(1) for t in inputs) warnings.warn( "Was asked to gather along dimension 0, but all " "input tensors were scalars; will instead unsqueeze " "and return a vector." ) ctx.unsqueezed_scalar = True else: ctx.unsqueezed_scalar = False ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs) return comm.gather(inputs, ctx.dim, ctx.target_device) @staticmethod def backward(ctx, grad_output): scattered_grads = Scatter.apply( ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output ) if ctx.unsqueezed_scalar: scattered_grads = tuple(g[0] for g in scattered_grads) return (None, None) + scattered_grads class Scatter(Function): @staticmethod def forward(ctx, target_gpus, chunk_sizes, dim, input): target_gpus = [_get_device_index(x, True) for x in target_gpus] ctx.dim = dim ctx.input_device = input.get_device() if input.device.type != "cpu" else -1 streams = None if torch.cuda.is_available() and ctx.input_device == -1: # Perform CPU to GPU copies in a background stream streams = [ _get_stream(torch.device("cuda", device)) for device in target_gpus ] outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams) # Synchronize with the copy stream if streams is not None: for i, output in enumerate(outputs): with torch.cuda.device(target_gpus[i]): main_stream = torch.cuda.current_stream() main_stream.wait_stream(streams[i]) output.record_stream(main_stream) return outputs @staticmethod def backward(ctx, *grad_output): return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output) # background streams used for copying _streams: Optional[list[Optional[torch.Stream]]] = None def _get_stream(device: torch.device): """Get a background stream for copying between CPU and target device.""" global _streams if device.type == "cpu": return None device_mod = getattr(torch, device.type, None) if device_mod is None: return None if _streams is None: _streams = [None] * device_mod.device_count() if _streams[device.index] is None: _streams[device.index] = device_mod.Stream(device.index) return _streams[device.index] ```
================================================================================================================= SOURCE CODE FILE: comm.py LINES: 1 SIZE: 10.89 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\comm.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import warnings import torch from torch._utils import ( _flatten_dense_tensors, _get_device_index, _handle_complex, _reorder_tensors_as, _take_tensors, _unflatten_dense_tensors, ) from torch.cuda import nccl def broadcast(tensor, devices=None, *, out=None): r"""Broadcasts a tensor to specified GPU devices. Args: tensor (Tensor): tensor to broadcast. Can be on CPU or GPU. devices (Iterable[torch.device, str or int], optional): an iterable of GPU devices, among which to broadcast. out (Sequence[Tensor], optional, keyword-only): the GPU tensors to store output results. .. note:: Exactly one of :attr:`devices` and :attr:`out` must be specified. Returns: - If :attr:`devices` is specified, a tuple containing copies of :attr:`tensor`, placed on :attr:`devices`. - If :attr:`out` is specified, a tuple containing :attr:`out` tensors, each containing a copy of :attr:`tensor`. """ tensor = _handle_complex(tensor) if not ((devices is None) ^ (out is None)): raise RuntimeError( f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}" ) if devices is not None: devices = [_get_device_index(d) for d in devices] return torch._C._broadcast(tensor, devices) else: return torch._C._broadcast_out(tensor, out) def broadcast_coalesced(tensors, devices, buffer_size=10485760): """Broadcast a sequence of tensors to the specified GPUs. Small tensors are first coalesced into a buffer to reduce the number of synchronizations. Args: tensors (sequence): tensors to broadcast. Must be on the same device, either CPU or GPU. devices (Iterable[torch.device, str or int]): an iterable of GPU devices, among which to broadcast. buffer_size (int): maximum size of the buffer used for coalescing Returns: A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`. """ devices = [_get_device_index(d) for d in devices] tensors = [_handle_complex(t) for t in tensors] return torch._C._broadcast_coalesced(tensors, devices, buffer_size) def reduce_add(inputs, destination=None): """Sum tensors from multiple GPUs. All inputs should have matching shapes, dtype, and layout. The output tensor will be of the same shape, dtype, and layout. Args: inputs (Iterable[Tensor]): an iterable of tensors to add. destination (int, optional): a device on which the output will be placed (default: current device). Returns: A tensor containing an elementwise sum of all inputs, placed on the :attr:`destination` device. """ destination = _get_device_index(destination, optional=True) input_size = inputs[0].size() root_index = None # index of input tensor that already is on the correct device for i, inp in enumerate(inputs): assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs" if inp.get_device() == destination: root_index = i if inp.size() != input_size: got = "x".join(str(x) for x in inp.size()) expected = "x".join(str(x) for x in input_size) raise ValueError( f"input {i} has invalid size: got {got}, but expected {expected}" ) if root_index is None: raise RuntimeError( "reduce_add expects destination to be on the same GPU with one of the tensors" ) if len(inputs) == 1: return inputs[0] if nccl.is_available(inputs): result = torch.empty_like(inputs[root_index]) nccl.reduce(inputs, output=result, root=root_index) else: destination_device = torch.device(inputs[root_index].device.type, destination) nonroot = [t for i, t in enumerate(inputs) if i != root_index] # make a new tensor w/o clone result = inputs[root_index] + nonroot[0].to( device=destination_device, non_blocking=True ) for other in nonroot[1:]: result.add_(other.to(device=destination_device, non_blocking=True)) return result def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760): """Sum tensors from multiple GPUs. Small tensors are first coalesced into a buffer to reduce the number of synchronizations. Args: inputs (Iterable[Iterable[Tensor]]): iterable of iterables that contain tensors from a single device. destination (int, optional): a device on which the output will be placed (default: current device). buffer_size (int): maximum size of the buffer used for coalescing Returns: A tuple of tensors containing an elementwise sum of each group of inputs, placed on the ``destination`` device. """ # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just # return `inputs`. dense_tensors: list[list] = [[] for _ in inputs] # shape (num_gpus, num_tensors) output = [] ref_order = [] # process sparse ones first since they may have different sizes on different gpus for tensor_at_gpus in zip(*inputs): if all(t.is_sparse for t in tensor_at_gpus): result = reduce_add(tensor_at_gpus, destination) # this will be sparse too output.append(result) ref_order.append(tensor_at_gpus[0]) else: for coll, t in zip(dense_tensors, tensor_at_gpus): coll.append(t.to_dense() if t.is_sparse else t) ref_order.append(dense_tensors[0][-1]) itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors] # now the dense ones, which have consistent sizes for chunks in zip(*itrs): flat_tensors = [ _flatten_dense_tensors(chunk) for chunk in chunks ] # (num_gpus,) flat_result = reduce_add(flat_tensors, destination) for t in _unflatten_dense_tensors(flat_result, chunks[0]): # The unflattened tensors do not share storage, and we don't expose # base flat tensor anyways, so give them different version counters. # See NOTE [ Version Counter in comm.*_coalesced ] output.append(t.data) return tuple(_reorder_tensors_as(output, ref_order)) def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None): """Scatters tensor across multiple GPUs. Args: tensor (Tensor): tensor to scatter. Can be on CPU or GPU. devices (Iterable[torch.device, str or int], optional): an iterable of GPU devices, among which to scatter. chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on each device. It should match :attr:`devices` in length and sums to ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided into equal chunks. dim (int, optional): A dimension along which to chunk :attr:`tensor`. Default: ``0``. streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among which to execute the scatter. If not specified, the default stream will be utilized. out (Sequence[Tensor], optional, keyword-only): the GPU tensors to store output results. Sizes of these tensors must match that of :attr:`tensor`, except for :attr:`dim`, where the total size must sum to ``tensor.size(dim)``. .. note:: Exactly one of :attr:`devices` and :attr:`out` must be specified. When :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and will be inferred from sizes of :attr:`out`. Returns: - If :attr:`devices` is specified, a tuple containing chunks of :attr:`tensor`, placed on :attr:`devices`. - If :attr:`out` is specified, a tuple containing :attr:`out` tensors, each containing a chunk of :attr:`tensor`. """ tensor = _handle_complex(tensor) if out is None: devices = [_get_device_index(d) for d in devices] return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams)) else: if devices is not None: raise RuntimeError( f"'devices' must not be specified when 'out' is specified, but got devices={devices}" ) if chunk_sizes is not None: raise RuntimeError( f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}" ) return tuple(torch._C._scatter_out(tensor, out, dim, streams)) def gather(tensors, dim=0, destination=None, *, out=None): r"""Gathers tensors from multiple GPU devices. Args: tensors (Iterable[Tensor]): an iterable of tensors to gather. Tensor sizes in all dimensions other than :attr:`dim` have to match. dim (int, optional): a dimension along which the tensors will be concatenated. Default: ``0``. destination (torch.device, str, or int, optional): the output device. Can be CPU or CUDA. Default: the current CUDA device. out (Tensor, optional, keyword-only): the tensor to store gather result. Its sizes must match those of :attr:`tensors`, except for :attr:`dim`, where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``. Can be on CPU or CUDA. .. note:: :attr:`destination` must not be specified when :attr:`out` is specified. Returns: - If :attr:`destination` is specified, a tensor located on :attr:`destination` device, that is a result of concatenating :attr:`tensors` along :attr:`dim`. - If :attr:`out` is specified, the :attr:`out` tensor, now containing results of concatenating :attr:`tensors` along :attr:`dim`. """ tensors = [_handle_complex(t) for t in tensors] if out is None: if destination == -1: warnings.warn( "Using -1 to represent CPU tensor is deprecated. Please use a " 'device object or string instead, e.g., "cpu".', FutureWarning, stacklevel=2, ) destination = _get_device_index(destination, allow_cpu=True, optional=True) return torch._C._gather(tensors, dim, destination) else: if destination is not None: raise RuntimeError( f"'destination' must not be specified when 'out' is specified, but got destination={destination}" ) return torch._C._gather_out(tensors, out, dim) ```
========================================================================================================================== SOURCE CODE FILE: data_parallel.py LINES: 1 SIZE: 11.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\data_parallel.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import operator import warnings from collections.abc import Sequence from itertools import chain from typing import Any, Generic, Optional, TypeVar, Union import torch from torch._utils import ( _get_all_device_indices, _get_available_device_type, _get_device_index, _get_devices_properties, ) from torch.nn.modules import Module from torch.nn.parallel.parallel_apply import parallel_apply from torch.nn.parallel.replicate import replicate from torch.nn.parallel.scatter_gather import gather, scatter_kwargs __all__ = ["DataParallel", "data_parallel"] def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None: imbalance_warn = """ There is an imbalance between your GPUs. You may want to exclude GPU {} which has less than 75% of the memory or cores of GPU {}. You can do so by setting the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES environment variable.""" device_ids = [_get_device_index(x, True) for x in device_ids] dev_props = _get_devices_properties(device_ids) def warn_imbalance(get_prop): values = [get_prop(props) for props in dev_props] min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1)) max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1)) if min_val / max_val < 0.75: warnings.warn( imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]) ) return True return False if warn_imbalance(lambda props: props.total_memory): return if warn_imbalance(lambda props: props.multi_processor_count): return T = TypeVar("T", bound=Module) class DataParallel(Module, Generic[T]): r"""Implements data parallelism at the module level. This container parallelizes the application of the given :attr:`module` by splitting the input across the specified devices by chunking in the batch dimension (other objects will be copied once per device). In the forward pass, the module is replicated on each device, and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module. The batch size should be larger than the number of GPUs used. .. warning:: It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`, instead of this class, to do multi-GPU training, even if there is only a single node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`. Arbitrary positional and keyword inputs are allowed to be passed into DataParallel but some types are specially handled. tensors will be **scattered** on dim specified (default 0). tuple, list and dict types will be shallow copied. The other types will be shared among different threads and can be corrupted if written to in the model's forward pass. The parallelized :attr:`module` must have its parameters and buffers on ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel` module. .. warning:: In each forward, :attr:`module` is **replicated** on each device, so any updates to the running module in ``forward`` will be lost. For example, if :attr:`module` has a counter attribute that is incremented in each ``forward``, it will always stay at the initial value because the update is done on the replicas which are destroyed after ``forward``. However, :class:`~torch.nn.DataParallel` guarantees that the replica on ``device[0]`` will have its parameters and buffers sharing storage with the base parallelized :attr:`module`. So **in-place** updates to the parameters or buffers on ``device[0]`` will be recorded. E.g., :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm` rely on this behavior to update the buffers. .. warning:: Forward and backward hooks defined on :attr:`module` and its submodules will be invoked ``len(device_ids)`` times, each with inputs located on a particular device. Particularly, the hooks are only guaranteed to be executed in correct order with respect to operations on corresponding devices. For example, it is not guaranteed that hooks set via :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but that each such hook be executed before the corresponding :meth:`~torch.nn.Module.forward` call of that device. .. warning:: When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in :func:`forward`, this wrapper will return a vector of length equal to number of devices used in data parallelism, containing the result from each device. .. note:: There is a subtlety in using the ``pack sequence -> recurrent network -> unpack sequence`` pattern in a :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for details. Args: module (Module): module to be parallelized device_ids (list of int or torch.device): CUDA devices (default: all devices) output_device (int or torch.device): device location of output (default: device_ids[0]) Attributes: module (Module): the module to be parallelized Example:: >>> # xdoctest: +SKIP >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) >>> output = net(input_var) # input_var can be on any device, including CPU """ # TODO: update notes/cuda.rst when this class handles 8+ GPUs well def __init__( self, module: T, device_ids: Optional[Sequence[Union[int, torch.device]]] = None, output_device: Optional[Union[int, torch.device]] = None, dim: int = 0, ) -> None: super().__init__() torch._C._log_api_usage_once("torch.nn.parallel.DataParallel") device_type = _get_available_device_type() if device_type is None or device_type == "mps": self.module = module self.device_ids = [] return if device_ids is None: device_ids = _get_all_device_indices() if device_ids is None: raise RuntimeError("no available devices were found") if output_device is None: output_device = device_ids[0] self.dim = dim self.module = module self.device_ids = [_get_device_index(x, True) for x in device_ids] self.output_device = _get_device_index(output_device, True) self.src_device_obj = torch.device(device_type, self.device_ids[0]) if device_type == "cuda": _check_balance(self.device_ids) if len(self.device_ids) == 1: self.module.to(self.src_device_obj) def forward(self, *inputs: Any, **kwargs: Any) -> Any: with torch.autograd.profiler.record_function("DataParallel.forward"): if not self.device_ids: return self.module(*inputs, **kwargs) for t in chain(self.module.parameters(), self.module.buffers()): if t.device != self.src_device_obj: raise RuntimeError( "module must have its parameters and buffers " f"on device {self.src_device_obj} (device_ids[0]) but found one of " f"them on device: {t.device}" ) inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) # for forward function without any inputs, empty list and dict will be created # so the module can be executed on one device which is the first one in device_ids if not inputs and not module_kwargs: inputs = ((),) module_kwargs = ({},) if len(self.device_ids) == 1: return self.module(*inputs[0], **module_kwargs[0]) replicas = self.replicate(self.module, self.device_ids[: len(inputs)]) outputs = self.parallel_apply(replicas, inputs, module_kwargs) return self.gather(outputs, self.output_device) def replicate( self, module: T, device_ids: Sequence[Union[int, torch.device]] ) -> list[T]: return replicate(module, device_ids, not torch.is_grad_enabled()) def scatter( self, inputs: tuple[Any, ...], kwargs: Optional[dict[str, Any]], device_ids: Sequence[Union[int, torch.device]], ) -> Any: return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) def parallel_apply( self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any ) -> list[Any]: return parallel_apply( replicas, inputs, kwargs, self.device_ids[: len(replicas)] ) def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any: return gather(outputs, output_device, dim=self.dim) def data_parallel( module: Module, inputs: Any, device_ids: Optional[Sequence[Union[int, torch.device]]] = None, output_device: Optional[Union[int, torch.device]] = None, dim: int = 0, module_kwargs: Optional[Any] = None, ) -> torch.Tensor: r"""Evaluate module(input) in parallel across the GPUs given in device_ids. This is the functional version of the DataParallel module. Args: module (Module): the module to evaluate in parallel inputs (Tensor): inputs to the module device_ids (list of int or torch.device): GPU ids on which to replicate module output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU. (default: device_ids[0]) Returns: a Tensor containing the result of module(input) located on output_device """ if not isinstance(inputs, tuple): inputs = (inputs,) if inputs is not None else () device_type = _get_available_device_type() if device_type is None: raise RuntimeError("device type could not be determined") if device_ids is None: device_ids = _get_all_device_indices() if device_ids is None: raise RuntimeError("no available devices were found") if output_device is None: output_device = device_ids[0] device_ids = [_get_device_index(x, True) for x in device_ids] output_device = _get_device_index(output_device, True) src_device_obj = torch.device(device_type, device_ids[0]) for t in chain(module.parameters(), module.buffers()): if t.device != src_device_obj: raise RuntimeError( "module must have its parameters and buffers " f"on device {src_device_obj} (device_ids[0]) but found one of " f"them on device: {t.device}" ) inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) # for module without any inputs, empty list and dict will be created # so the module can be executed on one device which is the first one in device_ids if not inputs and not module_kwargs: inputs = ((),) module_kwargs = ({},) assert module_kwargs is not None if len(device_ids) == 1: return module(*inputs[0], **module_kwargs[0]) used_device_ids = device_ids[: len(inputs)] replicas = replicate(module, used_device_ids) outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) return gather(outputs, output_device, dim) ```
======================================================================================================================== SOURCE CODE FILE: distributed.py LINES: 2 SIZE: 108.35 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\distributed.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import copy import functools import inspect import itertools import logging import os import sys import warnings import weakref from collections import defaultdict, deque from contextlib import contextmanager from dataclasses import dataclass, fields, is_dataclass from enum import auto, Enum from typing import Any, Callable, Optional, TYPE_CHECKING import torch import torch.distributed as dist from torch._utils import _get_device_index from torch.autograd import Function, Variable from torch.distributed.algorithms.join import Join, Joinable, JoinHook from torch.nn.modules import Module from torch.nn.parallel.scatter_gather import gather, scatter_kwargs from torch.utils._pytree import tree_flatten, tree_unflatten RPC_AVAILABLE = False if dist.is_available(): from torch.distributed.distributed_c10d import ( _get_default_group, _rank_not_in_group, ReduceOp, ) from torch.distributed.utils import ( _alloc_storage, _cast_forward_inputs, _free_storage, _sync_module_states, _to_kwargs, _verify_param_shape_across_processes, ) if dist.rpc.is_available(): RPC_AVAILABLE = True from torch.distributed.rpc import RRef if TYPE_CHECKING: from torch.utils.hooks import RemovableHandle __all__ = ["DistributedDataParallel"] logger = logging.getLogger(__name__) @dataclass class _MixedPrecision: """ This configures DDP-native mixed precision training. Attributes: param_dtype (torch.dtype): This specifies the dtype for model parameters, inputs (when ``cast_forward_inputs`` is set to ``True``), and therefore the dtype for computation. However, outside the forward and backward passes, parameters are in full precision. Model checkpointing always happens in full precision. reduce_dtype (torch.dtype): This specifies the dtype for gradient reduction, which is permitted to differ from ``param_dtype``. buffer_dtype (torch.dtype): This specifies the dtype for buffers. .. note:: This API is experimental and subject to change. .. note:: Only floating point tensors are cast to their specified dtypes. .. note:: ``state_dict`` checkpoints parameters and buffers in full precision. .. note:: Each low precision dtype must be specified explicitly. For example, ``_MixedPrecision(reduce_dtype=torch.float16)`` only specifies the reduction dtype to be low precision, and DDP will not cast parameters or buffers. .. note:: If a ``reduce_dtype`` is not specified, then gradient reduction happens in ``param_dtype`` if specified or the original parameter dtype otherwise. For example, ``_MixedPrecision(param_dtype=torch.float16)`` would result in communication occurring in fp16. """ param_dtype: Optional[torch.dtype] = None reduce_dtype: Optional[torch.dtype] = None buffer_dtype: Optional[torch.dtype] = None # TODO (rohan-varma): keep_low_precision_grads: bool = False # TODO (rohan-varma): APIs to allow users to run batchnorm and layernorm # in full precision. For DDP, this can be implemented by not performing the # parameter cast for BN and LN units. def _cast_buffers(mixed_precision_config, root_module): """Casts buffers to the given ``buffer_dtype``.""" for buf in root_module.buffers(): if hasattr(buf, "_ddp_ignored") and buf._ddp_ignored: continue buf.data = buf.to(dtype=mixed_precision_config.buffer_dtype) def _setup_mixed_precision_params(mixed_precision_config, root_module): """Create and free storage for the mixed precision parameters.""" for param in root_module.parameters(): # Do not setup mixed precision for DDP ignored parameters. if hasattr(param, "_ddp_ignored") and param._ddp_ignored: continue if not hasattr(param, "_mp_param"): param._mp_param = torch.zeros_like( param, device=param.device, dtype=mixed_precision_config.param_dtype, requires_grad=param.requires_grad, ) _free_storage(param._mp_param) # _fp_param will point to the full precision param so it can be switched # back to at the end of forward / backward. param._fp_param = param.data def _tree_flatten_with_rref(output): output_is_rref = RPC_AVAILABLE and isinstance(output, RRef) if output_is_rref: output_tensor_list, treespec = tree_flatten(output.local_value()) else: output_tensor_list, treespec = tree_flatten(output) # Need to return flattened tensors, spec to re-pack them, as well # as if the return type was actually an RRef to reconstruct. return output_tensor_list, treespec, output_is_rref def _tree_unflatten_with_rref(output, treespec, output_is_rref): output = tree_unflatten(output, treespec) if output_is_rref: output = RRef(output) return output def _find_tensors(obj): r"""Recursively find all tensors contained in the specified object.""" if RPC_AVAILABLE and isinstance(obj, RRef): # If the current node is the owner of the RRef, unwrap it and try to # find Tensors. # TODO: Expand to remote RRefs. if obj.is_owner(): return _find_tensors(obj.local_value()) if isinstance(obj, torch.Tensor): return [obj] if isinstance(obj, (list, tuple)): return itertools.chain.from_iterable(map(_find_tensors, obj)) if isinstance(obj, dict): return itertools.chain.from_iterable(map(_find_tensors, obj.values())) if is_dataclass(obj): return itertools.chain.from_iterable( map(_find_tensors, (getattr(obj, f.name) for f in fields(obj))) ) return [] def _dump_DDP_relevant_env_vars(): relevant_env_vars = [ "RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_PORT", "MASTER_ADDR", "CUDA_VISIBLE_DEVICES", "GLOO_SOCKET_IFNAME", "GLOO_DEVICE_TRANSPORT", "NCCL_SOCKET_IFNAME", "TORCH_NCCL_BLOCKING_WAIT", "NCCL_DEBUG", "NCCL_DEBUG_SUBSYS", "NCCL_IB_DISABLE", # More NCCL env vars: "NCCL_P2P_DISABLE", "NCCL_P2P_LEVEL", "NCCL_SHM_DISABLE", "NCCL_SOCKET_NTHREADS", "NCCL_NSOCKS_PERTHREAD", "NCCL_BUFFSIZE", "NCCL_NTHREADS", "NCCL_RINGS", "NCCL_MAX_NCHANNELS", "NCCL_MIN_NCHANNELS", "NCCL_CHECKS_DISABLE", "NCCL_CHECK_POINTERS", "NCCL_LAUNCH_MODE", "NCCL_IB_HCA", "NCCL_IB_TIMEOUT", "NCCL_IB_RETRY_CNT", "NCCL_IB_GID_INDEX", "NCCL_IB_SL", "NCCL_IB_TC", "NCCL_IB_AR_THRESHOLD", "NCCL_IB_CUDA_SUPPORT", "NCCL_NET_GDR_LEVEL", "NCCL_NET_GDR_READ", "NCCL_SINGLE_RING_THRESHOLD", "NCCL_LL_THRESHOLD", "NCCL_TREE_THRESHOLD", "NCCL_ALGO", "NCCL_PROTO", "NCCL_IGNORE_CPU_AFFINITY", "NCCL_DEBUG_FILE", "NCCL_COLLNET_ENABLE", "NCCL_TOPO_FILE", "NCCL_TOPO_DUMP_FILE", "TORCH_NCCL_ASYNC_ERROR_HANDLING", ] formatted_output = "" for var in relevant_env_vars: value = os.environ[var] if var in os.environ else "N/A" formatted_output += f"env:{var}={value}\n" print(formatted_output) class _BufferCommHookLocation(Enum): PRE_FORWARD = auto() POST_FORWARD = auto() @dataclass class _BufferCommHook: buffer_comm_hook: Callable buffer_comm_hook_state: Any buffer_comm_hook_location: _BufferCommHookLocation # Add a DDPSink to run various functions when backwards starts, such as # queueing call back of out-most backward/graph task, # this helps call back is fired after all gradients' calculation # is completed. class _DDPSink(Function): @staticmethod def forward(ctx, ddp_weakref, *inputs): # set_materialize_grads(False) will ensure that None gradients stay as # None and are not filled with zeros. ctx.set_materialize_grads(False) ctx.ddp_weakref = ddp_weakref ret = inputs if ddp_weakref()._ddp_sink_clone: ret = tuple( inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs ) return ret @staticmethod def backward(ctx, *grad_outputs): # Enqueue delay allreduce for static graph training on the first # iteration. ddp_weakref = ctx.ddp_weakref() reducer = ddp_weakref.reducer static_graph = ddp_weakref.static_graph delay_ar_enqueued = ( static_graph and ddp_weakref._static_graph_delay_allreduce_enqueued ) if static_graph and not delay_ar_enqueued: Variable._execution_engine.queue_callback( # type: ignore[call-arg,misc] reducer._delay_all_reduce ) ddp_weakref._static_graph_delay_allreduce_enqueued = True return (None, *grad_outputs) class _DDPJoinHook(JoinHook): def __init__(self, ddp, divide_by_initial_world_size): """Set config variables for internal usage.""" assert isinstance(ddp, DistributedDataParallel), ( "DDP join hook requires passing in a DistributedDataParallel " "instance as the state" ) assert ddp.logger is not None ddp.logger._set_uneven_input_join() self.ddp = ddp self.ddp._divide_by_initial_world_size = divide_by_initial_world_size super().__init__() def main_hook(self): """Shadow the DDP collective communication operations in the forward and backward passes.""" ddp = self.ddp # Buckets are rebuilt only once during a training period ddp.reducer._rebuild_buckets() # Schedule a broadcast if we are syncing module buffers in the # forward pass # TODO: make DDP uneven inputs context manager support buffer # comm hook (https://github.com/pytorch/pytorch/issues/65436) ddp._check_and_sync_module_buffers() # Check if need to sync in the backward pass should_sync_backwards = ddp._check_global_requires_backward_grad_sync( is_joined_rank=True ) # Forward parameter sync is disabled in the next iteration if we # are skipping gradient sync this iteration, so set # `require_forward_param_sync` accordingly ddp.require_forward_param_sync = should_sync_backwards if not should_sync_backwards: return # Schedule one allreduce per gradient bucket to match the backward # pass allreduce ddp._match_all_reduce_for_bwd_pass() # Check if we need to allreduce locally unused parameters if ddp.find_unused_parameters: ddp._match_unused_params_allreduce() # Rebuilt parameters are pushed only once during a training period ddp.reducer._push_all_rebuilt_params() def post_hook(self, is_last_joiner: bool): """Sync the final model to ensure that the model is the same across all processes.""" self.ddp._sync_final_model(is_last_joiner) class DistributedDataParallel(Module, Joinable): r"""Implement distributed data parallelism based on ``torch.distributed`` at module level. This container provides data parallelism by synchronizing gradients across each model replica. The devices to synchronize across are specified by the input ``process_group``, which is the entire world by default. Note that ``DistributedDataParallel`` does not chunk or otherwise shard the input across participating GPUs; the user is responsible for defining how to do so, for example through the use of a :class:`DistributedSampler`. See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`. The same constraints on input as in :class:`torch.nn.DataParallel` apply. Creation of this class requires that ``torch.distributed`` to be already initialized, by calling :func:`torch.distributed.init_process_group`. ``DistributedDataParallel`` is proven to be significantly faster than :class:`torch.nn.DataParallel` for single-node multi-GPU data parallel training. To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn up ``N`` processes, ensuring that each process exclusively works on a single GPU from 0 to N-1. This can be done by either setting ``CUDA_VISIBLE_DEVICES`` for every process or by calling: >>> # xdoctest: +SKIP("undefined variables") >>> torch.cuda.set_device(i) where i is from 0 to N-1. In each process, you should refer the following to construct this module: >>> # xdoctest: +SKIP("undefined variables") >>> torch.distributed.init_process_group( >>> backend='nccl', world_size=N, init_method='...' >>> ) >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i) In order to spawn up multiple processes per node, you can use either ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``. .. note:: Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__ for a brief introduction to all features related to distributed training. .. note:: ``DistributedDataParallel`` can be used in conjunction with :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce per-rank optimizer states memory footprint. Please refer to `ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__ for more details. .. note:: ``nccl`` backend is currently the fastest and highly recommended backend when using GPUs. This applies to both single-node and multi-node distributed training. .. note:: This module also supports mixed-precision distributed training. This means that your model can have different types of parameters such as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these mixed types of parameters will just work fine. .. note:: If you use ``torch.save`` on one process to checkpoint the module, and ``torch.load`` on some other processes to recover it, make sure that ``map_location`` is configured properly for every process. Without ``map_location``, ``torch.load`` would recover the module to devices where the module was saved from. .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the gradient will be ``M`` times smaller when compared to the same model trained on a single node with ``batch=M*N`` if the loss is summed (NOT averaged as usual) across instances in a batch (because the gradients between different nodes are averaged). You should take this into consideration when you want to obtain a mathematically equivalent training process compared to the local training counterpart. But in most cases, you can just treat a DistributedDataParallel wrapped model, a DataParallel wrapped model and an ordinary model on a single GPU as the same (E.g. using the same learning rate for equivalent batch size). .. note:: Parameters are never broadcast between processes. The module performs an all-reduce step on gradients and assumes that they will be modified by the optimizer in all processes in the same way. Buffers (e.g. BatchNorm stats) are broadcast from the module in process of rank 0, to all other replicas in the system in every iteration. .. note:: If you are using DistributedDataParallel in conjunction with the :ref:`distributed-rpc-framework`, you should always use :meth:`torch.distributed.autograd.backward` to compute gradients and :class:`torch.distributed.optim.DistributedOptimizer` for optimizing parameters. Example:: >>> # xdoctest: +SKIP("undefined variables") >>> import torch.distributed.autograd as dist_autograd >>> from torch.nn.parallel import DistributedDataParallel as DDP >>> import torch >>> from torch import optim >>> from torch.distributed.optim import DistributedOptimizer >>> import torch.distributed.rpc as rpc >>> from torch.distributed.rpc import RRef >>> >>> t1 = torch.rand((3, 3), requires_grad=True) >>> t2 = torch.rand((3, 3), requires_grad=True) >>> rref = rpc.remote("worker1", torch.add, args=(t1, t2)) >>> ddp_model = DDP(my_model) >>> >>> # Setup optimizer >>> optimizer_params = [rref] >>> for param in ddp_model.parameters(): >>> optimizer_params.append(RRef(param)) >>> >>> dist_optim = DistributedOptimizer( >>> optim.SGD, >>> optimizer_params, >>> lr=0.05, >>> ) >>> >>> with dist_autograd.context() as context_id: >>> pred = ddp_model(rref.to_here()) >>> loss = loss_func(pred, target) >>> dist_autograd.backward(context_id, [loss]) >>> dist_optim.step(context_id) .. note:: DistributedDataParallel currently offers limited support for gradient checkpointing with :meth:`torch.utils.checkpoint`. If the checkpoint is done with use_reentrant=False (recommended), DDP will work as expected without any limitations. If, however, the checkpoint is done with use_reentrant=True (the default), DDP will work as expected when there are no unused parameters in the model and each layer is checkpointed at most once (make sure you are not passing `find_unused_parameters=True` to DDP). We currently do not support the case where a layer is checkpointed multiple times, or when there unused parameters in the checkpointed model. .. note:: To let a non-DDP model load a state dict from a DDP model, :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present` needs to be applied to strip the prefix "module." in the DDP state dict before loading. .. warning:: Constructor, forward method, and differentiation of the output (or a function of the output of this module) are distributed synchronization points. Take that into account in case different processes might be executing different code. .. warning:: This module assumes all parameters are registered in the model by the time it is created. No parameters should be added nor removed later. Same applies to buffers. .. warning:: This module assumes all parameters are registered in the model of each distributed processes are in the same order. The module itself will conduct gradient ``allreduce`` following the reverse order of the registered parameters of the model. In other words, it is users' responsibility to ensure that each distributed process has the exact same model and thus the exact same parameter registration order. .. warning:: This module allows parameters with non-rowmajor-contiguous strides. For example, your model may contain some parameters whose :class:`torch.memory_format` is ``torch.contiguous_format`` and others whose format is ``torch.channels_last``. However, corresponding parameters in different processes must have the same strides. .. warning:: This module doesn't work with :func:`torch.autograd.grad` (i.e. it will only work if gradients are to be accumulated in ``.grad`` attributes of parameters). .. warning:: If you plan on using this module with a ``nccl`` backend or a ``gloo`` backend (that uses Infiniband), together with a DataLoader that uses multiple workers, please change the multiprocessing start method to ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will likely experience deadlocks if you don't change this setting. .. warning:: You should never try to change your model's parameters after wrapping up your model with ``DistributedDataParallel``. Because, when wrapping up your model with ``DistributedDataParallel``, the constructor of ``DistributedDataParallel`` will register the additional gradient reduction functions on all the parameters of the model itself at the time of construction. If you change the model's parameters afterwards, gradient reduction functions no longer match the correct set of parameters. .. warning:: Using ``DistributedDataParallel`` in conjunction with the :ref:`distributed-rpc-framework` is experimental and subject to change. Args: module (Module): module to be parallelized device_ids (list of int or torch.device): CUDA devices. 1) For single-device modules, ``device_ids`` can contain exactly one device id, which represents the only CUDA device where the input module corresponding to this process resides. Alternatively, ``device_ids`` can also be ``None``. 2) For multi-device modules and CPU modules, ``device_ids`` must be ``None``. When ``device_ids`` is ``None`` for both cases, both the input data for the forward pass and the actual module must be placed on the correct device. (default: ``None``) output_device (int or torch.device): Device location of output for single-device CUDA modules. For multi-device modules and CPU modules, it must be ``None``, and the module itself dictates the output location. (default: ``device_ids[0]`` for single-device modules) broadcast_buffers (bool): Flag that enables syncing (broadcasting) buffers of the module at beginning of the ``forward`` function. (default: ``True``) init_sync (bool): Whether to sync during initialization to verify param shapes and broadcast parameters and buffers. WARNING: if this is set to False the user is required to ensure themselves that the weights are the same on all ranks. (default: ``True``) process_group: The process group to be used for distributed data all-reduction. If ``None``, the default process group, which is created by :func:`torch.distributed.init_process_group`, will be used. (default: ``None``) bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into multiple buckets so that gradient reduction of each bucket can potentially overlap with backward computation. :attr:`bucket_cap_mb` controls the bucket size in MebiBytes (MiB). If ``None``, a default size of 25 MiB will be used. (default: ``None``) find_unused_parameters (bool): Traverse the autograd graph from all tensors contained in the return value of the wrapped module's ``forward`` function. Parameters that don't receive gradients as part of this graph are preemptively marked as being ready to be reduced. In addition, parameters that may have been used in the wrapped module's ``forward`` function but were not part of loss computation and thus would also not receive gradients are preemptively marked as ready to be reduced. (default: ``False``) check_reduction: This argument is deprecated. gradient_as_bucket_view (bool): When set to ``True``, gradients will be views pointing to different offsets of ``allreduce`` communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. Moreover, it avoids the overhead of copying between gradients and ``allreduce`` communication buckets. When gradients are views, ``detach_()`` cannot be called on the gradients. If hitting such errors, please fix it by referring to the :meth:`~torch.optim.Optimizer.zero_grad` function in ``torch/optim/optimizer.py`` as a solution. Note that gradients will be views after first iteration, so the peak memory saving should be checked after first iteration. static_graph (bool): When set to ``True``, DDP knows the trained graph is static. Static graph means 1) The set of used and unused parameters will not change during the whole training loop; in this case, it does not matter whether users set ``find_unused_parameters = True`` or not. 2) How the graph is trained will not change during the whole training loop (meaning there is no control flow depending on iterations). When static_graph is set to be ``True``, DDP will support cases that can not be supported in the past: 1) Reentrant backwards. 2) Activation checkpointing multiple times. 3) Activation checkpointing when model has unused parameters. 4) There are model parameters that are outside of forward function. 5) Potentially improve performance when there are unused parameters, as DDP will not search graph in each iteration to detect unused parameters when static_graph is set to be ``True``. To check whether you can set static_graph to be ``True``, one way is to check ddp logging data at the end of your previous model training, if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you can set ``static_graph = True`` as well. Example:: >>> # xdoctest: +SKIP("undefined variables") >>> model_DDP = torch.nn.parallel.DistributedDataParallel(model) >>> # Training loop >>> ... >>> ddp_logging_data = model_DDP._get_ddp_logging_data() >>> static_graph = ddp_logging_data.get("can_set_static_graph") delay_all_reduce_named_params (list of tuple of str and torch.nn.Parameter): a list of named parameters whose all reduce will be delayed when the gradient of the parameter specified in ``param_to_hook_all_reduce`` is ready. Other arguments of DDP do not apply to named params specified in this argument as these named params will be ignored by DDP reducer. param_to_hook_all_reduce (torch.nn.Parameter): a parameter to hook delayed all reduce of parameters specified in ``delay_all_reduce_named_params``. Attributes: module (Module): the module to be parallelized. Example:: >>> # xdoctest: +SKIP("undefined variables") >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') >>> net = torch.nn.parallel.DistributedDataParallel(model) """ # used to track whether the given thread is inside ddp forward for torchdynamo purposes _active_ddp_module: Optional["DistributedDataParallel"] = None def __init__( self, module, device_ids=None, output_device=None, dim=0, broadcast_buffers=True, init_sync=True, process_group=None, bucket_cap_mb=None, find_unused_parameters=False, check_reduction=False, gradient_as_bucket_view=False, static_graph=False, delay_all_reduce_named_params=None, param_to_hook_all_reduce=None, mixed_precision: Optional[_MixedPrecision] = None, device_mesh=None, ): super().__init__() Joinable.__init__(self) self.logger: Optional[dist.Logger] = None if bool(delay_all_reduce_named_params is not None) != bool( param_to_hook_all_reduce is not None ): self._log_and_throw( ValueError, "delay_all_reduce_named_params and param_to_hook_all_reduce " "need to be set at the same time.", ) if process_group and device_mesh is not None: raise RuntimeError( "Cannot specify both process_group and device_mesh arguments." ) elif process_group is None and device_mesh is None: self.process_group = _get_default_group() elif device_mesh is None: self.process_group = process_group else: if device_mesh.ndim != 1: raise RuntimeError( f"Only 1D device mesh is supported, but got {device_mesh}." ) self.device_mesh = device_mesh self.process_group = device_mesh.get_group(mesh_dim=0) from torch.distributed.device_mesh import _mesh_resources root_mesh = _mesh_resources.get_root_mesh(device_mesh) # if a root mesh is not the same as device_mesh, # meaning the device_mesh is sliced out from the root mesh. if root_mesh != device_mesh: # TODO: This is a temporary work around to enable DDP + TP. # We should do the logic in DDP so that the 2D implementation is # sound and the state_dict works out of the box. # This has to be done before check UninitializedParameter. from torch.distributed.tensor.parallel.ddp import ( _pre_dp_module_transform, ) _pre_dp_module_transform(module) self._delay_all_reduce_params = [] if hasattr(module, "_ddp_params_and_buffers_to_ignore"): self.parameters_to_ignore = set(module._ddp_params_and_buffers_to_ignore) else: self.parameters_to_ignore = set() if delay_all_reduce_named_params is not None: for name, param in delay_all_reduce_named_params: self.parameters_to_ignore.add(name) self._delay_all_reduce_params.append(param) self._module_parameters = [ p for n, p in module.named_parameters() if n not in self.parameters_to_ignore ] if not any(p.requires_grad for p in self._module_parameters): if len(self._delay_all_reduce_params): logger.info("Delay the AllReduce of all parameters.") else: self._log_and_throw( RuntimeError, "DistributedDataParallel is not needed when a module " "doesn't have any parameter that requires a gradient.", ) if device_ids is not None and len(device_ids) > 1: self._log_and_throw( ValueError, "device_ids can only be None or contain a single element.", ) self.is_multi_device_module = ( len({p.device for p in self._module_parameters}) > 1 ) distinct_device_types = { p.device.type for p in self._module_parameters if p.device is not None } if len(distinct_device_types) != 1: self._log_and_throw( ValueError, "DistributedDataParallel's input module must be on " f"the same type of devices, but input module parameters locate in {distinct_device_types}.", ) self.device_type = next(iter(distinct_device_types)) if ( device_ids is None or len(device_ids) == 0 # For backward compatibility. or self.device_type == "cpu" or self.is_multi_device_module ): if device_ids or output_device: self._log_and_throw( ValueError, "DistributedDataParallel device_ids and output_device arguments " "only work with single-device/multiple-device GPU modules or CPU modules, " f"but got device_ids {device_ids}, output_device {output_device}, " f"and module parameters {({p.device for p in self._module_parameters})}.", ) self.device_ids = None self.output_device = None else: self.device_ids = [_get_device_index(x, True) for x in device_ids] if output_device is None: output_device = device_ids[0] self.output_device = _get_device_index(output_device, True) self.static_graph = False self.dim = dim self.module = module self.device = next(iter(self._module_parameters)).device self.broadcast_buffers = broadcast_buffers self.find_unused_parameters = find_unused_parameters self.require_backward_grad_sync = True self.require_forward_param_sync = True self.gradient_as_bucket_view = gradient_as_bucket_view self.mixed_precision = mixed_precision if self.mixed_precision is not None: logger.warning("Received mixed precision config %s", self.mixed_precision) if check_reduction: # This argument is no longer used since the reducer # will ensure reduction completes even if some parameters # do not receive gradients. warnings.warn( "The `check_reduction` argument in `DistributedDataParallel` " "module is deprecated. Please avoid using it.", FutureWarning, stacklevel=2, ) # Check that a module does not have Uninitialized parameters for param in self._module_parameters: if isinstance(param, torch.nn.parameter.UninitializedParameter): self._log_and_throw( RuntimeError, "Modules with uninitialized parameters can't be used with `DistributedDataParallel`. " "Run a dummy forward pass to correctly initialize the modules", ) # used for intra-node param sync and inter-node sync as well self.broadcast_bucket_size = int(250 * 1024 * 1024) # reduction bucket size if bucket_cap_mb is None: # default case (bucket cap is 25 MiB) bucket_cap_mb = 25 self.bucket_bytes_cap_default = True else: self.bucket_bytes_cap_default = False self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024) # Whether to perform input tensor CPU to GPU copies on a side-stream self.use_side_stream_for_tensor_copies = ( os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1" ) # Initialize gradient buffers and register all reduce hook self._delay_grad_buffer: Optional[torch.Tensor] = None self._delay_grad_views: list[torch.Tensor] = [] self._delay_all_reduce_all_params = False if len(self._delay_all_reduce_params) != 0: self._register_delay_all_reduce_hook( bucket_cap_mb=bucket_cap_mb, param_to_hook_all_reduce=param_to_hook_all_reduce, device_ids=device_ids, ) if self._delay_all_reduce_all_params: return # Build parameters for reducer. parameters, expect_sparse_gradient = self._build_params_for_reducer() # All collectives during initialization are gated by this flag. if init_sync: # Verify model equivalence. _verify_param_shape_across_processes(self.process_group, parameters) # Sync params and buffers. Ensures all DDP models start off at the same value. _sync_module_states( module=self.module, process_group=self.process_group, broadcast_bucket_size=self.broadcast_bucket_size, src=0, params_and_buffers_to_ignore=self.parameters_to_ignore, broadcast_buffers=self.broadcast_buffers, ) # In debug mode, build a mapping of parameter index -> parameter. param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) # Builds reducer. self._ddp_init_helper( parameters, expect_sparse_gradient, param_to_name_mapping, static_graph, ) self._comm_hooks: list[tuple[Callable, object]] = [] if self.mixed_precision is not None: _setup_mixed_precision_params(self.mixed_precision, self.module) _cast_buffers(self.mixed_precision, self.module) # Stream used for async low precision copies. self._mp_stream = torch.Stream() self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] # Add forward pre-hook to root module to kick off copies to lower # precision. self.module.register_forward_pre_hook( self._root_copy_hook, prepend=False, with_kwargs=True ) # Add forward pre hook to all submodules to wait for copy events # before running computation. for module in self.module.modules(): module.register_forward_pre_hook( self._module_wait_for_copy_hook, prepend=False, with_kwargs=True, ) # Set up callbacks in backward to upcast and use full precision # params. TODO (rohan-varma): Make this compose with general # comm hooks and apply_optimizer_in_backward. Importing inline to # avoid circular import issue. from torch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooks import ( _AllreduceUpcastHookState, _reducer_allreduce_and_upcast_hook, ) upcast_hook_state = _AllreduceUpcastHookState( ddp_weakref=weakref.ref(self), upcast_stream=torch.Stream(), ) self.register_comm_hook( upcast_hook_state, _reducer_allreduce_and_upcast_hook, ) # Inform reducer of reduced precision param dtype for correctness # of type checks between gradient and bucket. self.reducer._set_mixed_precision_param_dtype( # type: ignore[attr-defined] self.mixed_precision.param_dtype ) self._has_rebuilt_buckets = False if static_graph: self._set_static_graph() self._lazy_init_ran = False # Register the AccumulateGrad post hooks if optimize_ddp is # True. The hooks will be deregistered if compiled_autograd is not # enabled. self._accum_grad_hooks: list[RemovableHandle] = [] optimize_ddp = torch._dynamo.utils.get_optimize_ddp_mode() self._use_python_reducer = optimize_ddp == "python_reducer" if self._use_python_reducer: torch._inductor.config._fuse_ddp_communication = True torch._inductor.config._fuse_ddp_bucket_size = bucket_cap_mb # Directly adding this to the trace rule will disturb the users # who are using DDPOptimizer. torch._dynamo.trace_rules.LEGACY_MOD_INLINELIST.add( "torch.nn.parallel.distributed" ) torch._dynamo.trace_rules.get_legacy_mod_inlinelist.cache_clear() # NOTE: we should init these lazily self._register_accum_grad_hook() # Whether or not DDPSink performs a clone. self._ddp_sink_clone = True def _register_accum_grad_hook(self): import torch.distributed._functional_collectives as fcol def compiled_accum_grad_hook( param, *, param_index: int, ): if not self.require_backward_grad_sync: return if param.grad is None: return if self._comm_hooks: for hook, state in self._comm_hooks: hook(state, (param.grad, param)) else: gradient = param.grad / self.process_group.size() gradient = fcol.all_reduce(gradient, "sum", self.process_group) param.grad.copy_(gradient) for index, param in enumerate(self._module_parameters): if not param.requires_grad: continue self._accum_grad_hooks.append( param.register_post_accumulate_grad_hook( functools.partial( compiled_accum_grad_hook, param_index=index, ) ) ) def _delayed_all_reduce_hook(self, grad): world_size = dist.get_world_size(self.process_group) self._delay_grad_buffer.div_(world_size) # type: ignore[union-attr] _ = dist.all_reduce( self._delay_grad_buffer, group=self.process_group, async_op=True ) return grad def _register_delay_all_reduce_hook( self, bucket_cap_mb, param_to_hook_all_reduce, device_ids, ): # 1. Create gradient buffer device = torch.device("cpu") if device_ids is None else device_ids[0] self._delay_grad_buffer = torch.zeros( sum(p.numel() for p in self._delay_all_reduce_params), device=device, ) # 2. Broadcast the parameters detached_params = [p.detach() for p in self._delay_all_reduce_params] dist._broadcast_coalesced(self.process_group, detached_params, bucket_cap_mb, 0) # 3. Hook all reduce to the specified parameter param_to_hook_all_reduce.register_hook(self._delayed_all_reduce_hook) # 4. Build tensor views for gradients offset = 0 for param in self._delay_all_reduce_params: grad_view = self._delay_grad_buffer[offset : (offset + param.numel())].view( param.shape ) self._delay_grad_views.append(grad_view) offset = offset + param.numel() # 5. Check whether the all reduce of all params requiring grad is delayed. for module_name, module in self.module.named_modules(): for param_name, param in module.named_parameters(recurse=False): if param.requires_grad: full_name = f"{module_name}.{param_name}" if full_name not in self.parameters_to_ignore: # There is at least a param whose all reduce will not be delayed. # In this case, we should not set self._delay_all_reduce_all_params # to True. return self._delay_all_reduce_all_params = True def _setup_in_backward_optimizers(self): # Check if user has used apply_optim_in_backward to overlap optimizer # step + DDP backward. Current constraints: # 1. Only allreduce is supported at the moment, no custom communication. # 2. For DDP-managed parameters that have their optimizer run in # backward, their gradients are set to ``None``. If your use case # requires DDP parameters grad not to be set to ``None`` after their # in-backward optimizer runs, please ping # https://github.com/pytorch/pytorch/issues/90052. # NOTE: we use self._module_parameters instead of .parameters() since # the former excludes ignored (non-DDP managed) parameters. if any(hasattr(p, "_in_backward_optimizers") for p in self._module_parameters): torch._C._log_api_usage_once("ddp.optimizer_in_backward") # Remove hooks that apply_optim_in_backward had registered because # DDP customizes how optimizer is overlapped with backward due to # the allreduce. param_to_handle_map = ( dist.optim.apply_optimizer_in_backward.param_to_optim_hook_handle_map ) for p in self._module_parameters: for handle in param_to_handle_map.get(p, []): handle.remove() # Need a weakref to DDP instance to run all_reduce (from reducer) # and get managed DDP parameters. ddp_weakref = weakref.ref(self) # Note: importing in function, otherwise this will cause a circular # import. from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import ( _apply_optim_in_backward_hook, ) self.register_comm_hook( ddp_weakref, _apply_optim_in_backward_hook( gradient_is_bucket_view=self.gradient_as_bucket_view ), ) self.reducer._set_optimizer_in_backward() # type: ignore[attr-defined] def _fire_reducer_autograd_hook(self, idx, *unused): """ Fire the reducer's autograd hook to allreduce params in a Reducer bucket. Note that this is only used during mixed precision training as the Reducer's hooks installed during construction time would not be called as we're working in the low precision parameter setting. """ self.reducer._autograd_hook(idx) # type: ignore[attr-defined] def _root_copy_hook(self, *args: Any, **kwargs: Any) -> None: """ For DDP mixed precision, put low precision copies on separate stream and create events to wait for them. When training with DDP mixed precision, this root pre-forward hook kicks off low precision copies on a separate stream and creates respective events to wait for them. """ # Clear out previous iteration submodule to event. This is because we # may have populated some events for modules that didn't end up being # used. self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] with self._mp_stream: for submodule in self.module.modules(): for param in submodule.parameters(recurse=False): # Do not cast DDP ignored parameters. if hasattr(param, "_ddp_ignored") and param._ddp_ignored: continue _alloc_storage(param._mp_param, param.size()) # copy() implicitly casts to low precision with torch.no_grad(): param._mp_param.copy_(param.data) # TODO: when zero_grad(set_to_none=False) or in grad # accumulation case, accumulated grads can be in fp32 # which can cause errors when running DDP backwards due # to mismatched incoming and accumulated gradient types. # So we manually cast the accumulated grad down for now, # in the future we may shift to FSDP style gradient # accumulation management where the accumulated gradient # is saved and .grad field is set to None, bypassing # this issue. if param.grad is not None: param.grad.data = param.grad.to( self.mixed_precision.param_dtype # type: ignore[union-attr] ) param.data = param._mp_param copy_event = torch.Event() copy_event.record() self._submodule_to_event[submodule].append(copy_event) def _module_wait_for_copy_hook( self, module, *args: Any, **kwargs: Any, ) -> None: """Before carrying out computation, wait on the appropriate event to ensure low precision copies have finished.""" try: event = self._submodule_to_event[module].popleft() except IndexError: # copy event has already been waited on return event.wait(stream=torch.accelerator.current_stream()) for p in module.parameters(recurse=False): # Don't register hooks if param does not require grad if not p.requires_grad or (hasattr(p, "_ddp_ignored") and p._ddp_ignored): continue # We need to register autograd hook here instead of DDP's ctor # since we're working with the low precision param. Register them # via obtaining the gradient accumulator. tmp = p.expand_as(p) grad_acc = tmp.grad_fn.next_functions[0][0] hook = grad_acc.register_hook( functools.partial(self._fire_reducer_autograd_hook, p._idx) ) p._ddp_mp_hook_state = (grad_acc, hook) def _log_and_throw(self, err_type, err_msg): if self.logger is not None: self.logger.set_error_and_log(f"{str(err_type)}: {err_msg}") raise err_type(err_msg) def _ddp_init_helper( self, parameters, expect_sparse_gradient, param_to_name_mapping, static_graph, ): """ DDP init helper function to manage parameters, grad hooks, logging, and SyncBatchNorm. Initialization helper function that does the following: (1) bucketing the parameters for reductions (2) resetting the bucketing states (3) registering the grad hooks (4) Logging construction-time DDP logging data (5) passing a handle of DDP to SyncBatchNorm Layer """ # Notice, the parameters order is not in the order in which they are used, # especially in models with control flow. # # Alongside parameters are not presented in the real execution order, # if a certain model happens to also # 1) have other collectives comm ops in its backward graph. # 2) have unused parameter in subset ranks of the whole world. # bucketing could insert ALL-REDUCE comm op too early on the rank with unused parameter, # matching up with other collectives comm ops on other ranks unexpectedly. # # In order to handle this corner case, when the parameters are not in the real execution order, # we don't do bucketing, thus only one ALL-REDUCE is inserted after all the gradients # of the whole graph are computed. # # Notice, here we only disable bucketing for the first iteration. # After the first iteration, it's OK to rebuild buckets, # because "bucket rebuild" bucketizes parameters based on its real execution order in backward graph. # Can remove this branching once #73732 is landed. if static_graph is True or self.find_unused_parameters is False: bucket_size_limits = [sys.maxsize] else: if self.bucket_bytes_cap_default: bucket_size_limits = [ dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap, ] else: bucket_size_limits = [self.bucket_bytes_cap] ( bucket_indices, per_bucket_size_limits, ) = dist._compute_bucket_assignment_by_size( parameters, bucket_size_limits, expect_sparse_gradient, ) # Remember index for parameters if we are in mixed precision, as we # need to pass in index to Reducer's autograd hook via python. if self.mixed_precision is not None: for i, p in enumerate(parameters): p._idx = i # Note: reverse list of buckets because we want to approximate the # order in which their gradients are produced, and assume they # are used in the forward pass in the order they are defined. self.reducer = dist.Reducer( parameters, list(reversed(bucket_indices)), list(reversed(per_bucket_size_limits)), self.process_group, expect_sparse_gradient, # The bucket size limit is specified in the constructor. # Additionally, we allow for a single small bucket for parameters # that are defined first, such that their gradients don't spill into # a much larger bucket, adding unnecessary latency after gradient # computation finishes. Experiments showed 1MB is a reasonable value. self.bucket_bytes_cap, self.find_unused_parameters, self.gradient_as_bucket_view, param_to_name_mapping, # User can set dist._DEFAULT_FIRST_BUCKET_BYTES to tune DDP first # bucket. ( dist._DEFAULT_FIRST_BUCKET_BYTES if self.bucket_bytes_cap_default else self.bucket_bytes_cap ), ) self.logger = dist.Logger(self.reducer) # Set as a weak reference to avoid reference cycle between # logger and reducer. self.reducer.set_logger(self.logger) has_sync_bn = False for submodule in self.module.modules(): if isinstance(submodule, torch.nn.SyncBatchNorm): has_sync_bn = True break # Set logging data that can be got during construction time. self.logger.set_construction_data_and_log( self.module.__class__.__name__, [] if self.device_ids is None else self.device_ids, -1 if self.output_device is None else self.output_device, self.broadcast_buffers, has_sync_bn, static_graph, ) # passing a handle to torch.nn.SyncBatchNorm layer self._passing_sync_batchnorm_handle(self.module) def __getstate__(self): self._check_default_group() attrs = copy.copy(self.__dict__) del attrs["process_group"] del attrs["reducer"] del attrs["logger"] return attrs def __setstate__(self, state): # If serializable, then the process group should be the default one self.process_group = _get_default_group() super().__setstate__(state) self.__dict__.setdefault("require_forward_param_sync", True) self.__dict__.setdefault("require_backward_grad_sync", True) parameters, expect_sparse_gradient = self._build_params_for_reducer() # In debug mode, build a mapping of parameter index -> parameter. param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) # Builds reducer. self._ddp_init_helper( parameters, expect_sparse_gradient, param_to_name_mapping, self.static_graph, ) if self.static_graph: self.reducer._set_static_graph() assert self.logger is not None self.logger._set_static_graph() def _build_params_for_reducer(self): # Build tuple of (module, parameter) for all parameters that require grads. modules_and_parameters = [ (module, parameter) for module_name, module in self.module.named_modules() for parameter in [ param # Note that we access module.named_parameters instead of # parameters(module). parameters(module) is only needed in the # single-process multi device case, where it accesses replicated # parameters through _former_parameters. for param_name, param in module.named_parameters(recurse=False) if param.requires_grad and f"{module_name}.{param_name}" not in self.parameters_to_ignore ] ] # Deduplicate any parameters that might be shared across child modules. memo = set() modules_and_parameters = [ # "p not in memo" is the deduplication check. # "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed. (m, p) for m, p in modules_and_parameters if p not in memo and not memo.add(p) # type: ignore[func-returns-value] ] # Build list of parameters. parameters = [parameter for _, parameter in modules_and_parameters] # Checks if a module will produce a sparse gradient. def produces_sparse_gradient(module): if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)): return module.sparse return False # Build list of booleans indicating whether or not to expect sparse # gradients for the corresponding parameters. expect_sparse_gradient = [ produces_sparse_gradient(module) for module, _ in modules_and_parameters ] self._assign_modules_buffers() return parameters, expect_sparse_gradient def _assign_modules_buffers(self): """ Assign self.module.named_buffers to self.modules_buffers. Assigns module buffers to self.modules_buffers which are then used to broadcast across ranks when broadcast_buffers=True. Note that this must be called every time buffers need to be synced because buffers can be reassigned by user module, see https://github.com/pytorch/pytorch/issues/63916. """ # Collect buffers for modules, filtering out buffers that should be ignored. named_module_buffers = [ (buffer, buffer_name) for buffer_name, buffer in self.module.named_buffers() if buffer_name not in self.parameters_to_ignore ] self.modules_buffers = [ buffer for (buffer, buffer_name) in named_module_buffers ] # Dict[str, tensor] representing module buffers not ignored by DDP. self.named_module_buffers = { buffer_name: buffer for (buffer, buffer_name) in named_module_buffers } def _build_debug_param_to_name_mapping(self, parameters): param_to_param_index = {parameters[i]: i for i in range(len(parameters))} param_set = set(parameters) param_index_to_param_fqn = {} for module_name, module in self.module.named_modules(): for param_name, param in module.named_parameters(recurse=False): fqn = f"{module_name}.{param_name}" # Bypass ignored parameters since those are not reduced by DDP # to begin with. if fqn not in self.parameters_to_ignore and param.requires_grad: if param not in param_set: self._log_and_throw( ValueError, f"Param with name {fqn} found in module parameters, but not DDP parameters." " This indicates a bug in DDP, please report an issue to PyTorch.", ) param_index = param_to_param_index[param] param_index_to_param_fqn[param_index] = fqn # Ensure we covered all parameters if len(param_set) != len(param_index_to_param_fqn): self._log_and_throw( ValueError, ( "Expected param to name mapping to cover all parameters, but" f" got conflicting lengths: {len(param_set)} vs " f"{len(param_index_to_param_fqn)}. This indicates a bug in DDP" ", please report an issue to PyTorch." ), ) return param_index_to_param_fqn def _get_parameters(self, m, recurse=True): """Return a generator of module parameters.""" def model_parameters(m): ps = ( m._former_parameters.values() if hasattr(m, "_former_parameters") else m.parameters(recurse=False) ) yield from ps for mod in m.modules() if recurse else [m]: yield from model_parameters(mod) def _check_default_group(self): pickle_not_supported = False try: if self.process_group != _get_default_group(): pickle_not_supported = True except RuntimeError: pickle_not_supported = True if pickle_not_supported: self._log_and_throw( RuntimeError, "DDP Pickling/Unpickling are only supported " "when using DDP with the default process " "group. That is, when you have called " "init_process_group and have not passed " "process_group argument to DDP constructor", ) @contextmanager def no_sync(self): r""" Context manager to disable gradient synchronizations across DDP processes. Within this context, gradients will be accumulated on module variables, which will later be synchronized in the first forward-backward pass exiting the context. Example:: >>> # xdoctest: +SKIP("undefined variables") >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg) >>> with ddp.no_sync(): >>> for input in inputs: >>> ddp(input).backward() # no synchronization, accumulate grads >>> ddp(another_input).backward() # synchronize grads .. warning:: The forward pass should be included inside the context manager, or else gradients will still be synchronized. """ old_require_backward_grad_sync = self.require_backward_grad_sync self.require_backward_grad_sync = False try: yield finally: self.require_backward_grad_sync = old_require_backward_grad_sync @classmethod def _get_active_ddp_module(cls): """`TorchDynamo` requires DDP's status and module for cooperative optimization.""" return cls._active_ddp_module # note, this ctxmgr function is marked 'skip' in torchdynamo, so dynamo only kicks in # for the 'module_to_run' underneath # see torch._dynamo/eval_frame.py TorchPatcher.patch for more details @contextmanager @torch._disable_dynamo(recursive=False) def _inside_ddp_forward(self): DistributedDataParallel._active_ddp_module = self try: yield finally: DistributedDataParallel._active_ddp_module = None def _run_ddp_forward(self, *inputs, **kwargs): if self._use_python_reducer: return self.module(*inputs, **kwargs) # type: ignore[index] else: with self._inside_ddp_forward(): return self.module(*inputs, **kwargs) # type: ignore[index] def _clear_grad_buffer(self): # Making param.grad points to the grad buffers before backward is based on the # assumption that the grad accumulation is done in place in autograd engine, # for some edge cases, if the grad accumulation in autograd engine is not in # place, then the param.grad and grad buffers are detached. if self._delay_grad_buffer is not None: # We batch zero_grad for all params by resetting the whole grad # buffer when the grad of all params is set to None. all_param_grad_none = all( param.grad is None for param in self._delay_all_reduce_params ) for index, param in enumerate(self._delay_all_reduce_params): if param.grad is None: param.grad = self._delay_grad_views[index] if not all_param_grad_none: param.grad.zero_() if all_param_grad_none: self._delay_grad_buffer.zero_() def _lazy_init(self): # Initialization for DDP that occurs after construction, but lazily # before the first forward pass. self._setup_in_backward_optimizers() self._lazy_init_ran = True def _pre_forward(self, *inputs, **kwargs): if self._use_python_reducer: return inputs, kwargs if not self._lazy_init_ran and not torch.compiler.is_compiling(): self._lazy_init() if self._delay_all_reduce_all_params: return inputs, kwargs if torch.is_grad_enabled() and self.require_backward_grad_sync: assert self.logger is not None self.logger.set_runtime_stats_and_log() self.reducer.prepare_for_forward() # Notify the join context that this process has not joined, if # needed work = Join.notify_join_context(self) if work: self.reducer._set_forward_pass_work_handle( work, self._divide_by_initial_world_size # type: ignore[arg-type] ) # Calling _rebuild_buckets before forward computation, # It may allocate new buckets before deallocating old buckets # inside _rebuild_buckets. To save peak memory usage, # call _rebuild_buckets before the peak memory usage increases # during forward computation. # This should be called only once during whole training period. if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): logger.info("Reducer buckets have been rebuilt in this iteration.") self._has_rebuilt_buckets = True # sync params according to location (before/after forward) user # specified as part of hook, if hook was specified. if self._check_sync_bufs_pre_fwd(): self._sync_buffers() if self._join_config.enable: # Notify joined ranks whether they should sync in backwards pass or not. self._check_global_requires_backward_grad_sync(is_joined_rank=False) if self.device_ids: moved_inputs, moved_kwargs = _to_kwargs( inputs, kwargs, torch.device(self.device_type, self.device_ids[0]), self.use_side_stream_for_tensor_copies, ) args, kwargs = moved_inputs[0], moved_kwargs[0] # Cast inputs to reduced precision if needed. if self.mixed_precision is not None: args, kwargs = _cast_forward_inputs( self.mixed_precision.param_dtype, *args, **kwargs, ) return args, kwargs else: # Cast inputs to reduced precision if needed. # TODO (rohan-varma) test this codepath. if self.mixed_precision is not None: inputs, kwargs = _cast_forward_inputs( self.mixed_precision.param_dtype, *inputs, **kwargs, ) return inputs, kwargs def _post_forward(self, output): if self._use_python_reducer: return output if self._delay_all_reduce_all_params: self._clear_grad_buffer() return output # sync params according to location (before/after forward) user # specified as part of hook, if hook was specified. if self._check_sync_bufs_post_fwd(): self._sync_buffers() if torch.is_grad_enabled() and self.require_backward_grad_sync: self.require_forward_param_sync = True # We'll return the output object verbatim since it is a freeform # object. We need to find any tensors in this object, though, # because we need to figure out which parameters were used during # this forward pass, to ensure we short circuit reduction for any # unused parameters. Only if `find_unused_parameters` is set. if self.find_unused_parameters and not self.static_graph: # Do not need to populate this for static graph. self.reducer.prepare_for_backward(list(_find_tensors(output))) else: self.reducer.prepare_for_backward([]) else: self.require_forward_param_sync = False # TODO: DDPSink is currently enabled for unused parameter detection and # static graph training for first iteration. if (self.find_unused_parameters and not self.static_graph) or ( self.static_graph and not self._static_graph_delay_allreduce_enqueued ): ( output_tensor_list, treespec, output_is_rref, ) = _tree_flatten_with_rref(output) output_placeholders: list[Optional[torch.Tensor]] = [ None for _ in range(len(output_tensor_list)) ] # Do not touch tensors that have no grad_fn, which can cause issues # such as https://github.com/pytorch/pytorch/issues/60733 for i, output in enumerate(output_tensor_list): if torch.is_tensor(output) and output.grad_fn is None: output_placeholders[i] = output # When find_unused_parameters=True, makes tensors which require grad # run through the DDPSink backward pass. When not all outputs are # used in loss, this makes those corresponding tensors receive # undefined gradient which the reducer then handles to ensure # param.grad field is not touched and we don't error out. passthrough_tensor_list = _DDPSink.apply( weakref.ref(self), *output_tensor_list, ) for i in range(len(output_placeholders)): if output_placeholders[i] is None: output_placeholders[i] = passthrough_tensor_list[i] # Reconstruct output data structure. output = _tree_unflatten_with_rref( output_placeholders, treespec, output_is_rref ) # At the end of the forward pass, reset the grad buffer and grad views self._clear_grad_buffer() return output def forward(self, *inputs, **kwargs): with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): inputs, kwargs = self._pre_forward(*inputs, **kwargs) output = ( self.module.forward(*inputs, **kwargs) if self._delay_all_reduce_all_params else self._run_ddp_forward(*inputs, **kwargs) ) return self._post_forward(output) def scatter(self, inputs, kwargs, device_ids): return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) def to_kwargs(self, inputs, kwargs, device_id): # Kept for BC return _to_kwargs( inputs, kwargs, torch.device(self.device_type, device_id), self.use_side_stream_for_tensor_copies, ) def gather(self, outputs, output_device): return gather(outputs, output_device, dim=self.dim) def train(self, mode=True): super().train(mode) return self # When running in join mode, schedules an allreduce to notify joined ranks # of whether backwards pass synchronization will run this iteration or not. def _check_global_requires_backward_grad_sync(self, is_joined_rank): if not is_joined_rank and self.require_backward_grad_sync: requires_sync_tensor = torch.ones(1, device=self.device) else: requires_sync_tensor = torch.zeros(1, device=self.device) work = dist.all_reduce( requires_sync_tensor, group=self.process_group, async_op=True ) # (kwen2501) This if condition is a plain translation of previous # behavior, i.e. in the `is_joined_rank=False` case, `work.wait()` # is not called and it doesn't care about the result. I am guessing # that it just wants to fire a matching all-reduce and does not want # the main stream to wait. if is_joined_rank: work.wait() should_sync_backwards = requires_sync_tensor.item() != 0 return should_sync_backwards else: return None # Return value is not/should not be used. # When running in join mode, checks and performs sync of module buffers if # the models have buffers that should be synchronized in the forward pass. def _check_and_sync_module_buffers(self): if self._check_sync_bufs_pre_fwd(): authoritative_rank = self._find_common_rank(self._distributed_rank, False) self._sync_module_buffers(authoritative_rank) # When running in join model, agrees upon a common rank and broadcast model # parameters to all other ranks. def _sync_final_model(self, is_last_joiner): # Agree upon the process that will be the authoritative model copy. # The current rank is a candidate for being the authoritative copy if # is_last_joiner=True. We break ties via picking the larger rank. self._authoritative_rank = self._find_common_rank( self._distributed_rank, is_last_joiner ) _sync_module_states( module=self.module, process_group=self.process_group, broadcast_bucket_size=self.broadcast_bucket_size, src=self._authoritative_rank, params_and_buffers_to_ignore=self.parameters_to_ignore, broadcast_buffers=self.broadcast_buffers, ) # Schedule comm ops to match those scheduled in the reducer's backward # pass. def _match_all_reduce_for_bwd_pass(self): comm_work = [] # Schedule comm in the same order as Reducer schedules them, i.e. # the order of the buckets. Retrieving the bucket order from the reducer # ensures that we keep the same order in join mode, such as when bucket # order is rebuilt dynamically. # Returns grad_buckets in order, but real tensors are substituted with # zero tensors of the same shape. grad_buckets = self.reducer._get_zeros_like_grad_buckets() for grad_bucket in grad_buckets: # Joined processes contribute zero gradient. In the case that # divide_by_initial_world_size=True, we divide grads by the static # world size, if not, the dividing factor is reduced by the number # of joined processes. work = self.reducer._run_comm_hook(grad_bucket) comm_work.append(work) for work in comm_work: work.wait() # Allreduces the used parameter mapping across ranks. def _match_unused_params_allreduce(self): locally_used_param_map = self.reducer._get_local_used_map() self.process_group.allreduce(locally_used_param_map) def join( self, divide_by_initial_world_size: bool = True, enable: bool = True, throw_on_early_termination: bool = False, ): r""" Context manager for training with uneven inputs across processes in DDP. This context manager will keep track of already-joined DDP processes, and "shadow" the forward and backward passes by inserting collective communication operations to match with the ones created by non-joined DDP processes. This will ensure each collective call has a corresponding call by already-joined DDP processes, preventing hangs or errors that would otherwise happen when training with uneven inputs across processes. Alternatively, if the flag ``throw_on_early_termination`` is specified to be ``True``, all trainers will throw an error once one rank runs out of inputs, allowing these errors to be caught and handled according to application logic. Once all DDP processes have joined, the context manager will broadcast the model corresponding to the last joined process to all processes to ensure the model is the same across all processes (which is guaranteed by DDP). To use this to enable training with uneven inputs across processes, simply wrap this context manager around your training loop. No further modifications to the model or data loading is required. .. warning:: If the model or training loop this context manager is wrapped around has additional distributed collective operations, such as ``SyncBatchNorm`` in the model's forward pass, then the flag ``throw_on_early_termination`` must be enabled. This is because this context manager is not aware of non-DDP collective communication. This flag will cause all ranks to throw when any one rank exhausts inputs, allowing these errors to be caught and recovered from across all ranks. Args: divide_by_initial_world_size (bool): If ``True``, will divide gradients by the initial ``world_size`` DDP training was launched with. If ``False``, will compute the effective world size (number of ranks that have not depleted their inputs yet) and divide gradients by that during allreduce. Set ``divide_by_initial_world_size=True`` to ensure every input sample including the uneven inputs have equal weight in terms of how much they contribute to the global gradient. This is achieved by always dividing the gradient by the initial ``world_size`` even when we encounter uneven inputs. If you set this to ``False``, we divide the gradient by the remaining number of nodes. This ensures parity with training on a smaller ``world_size`` although it also means the uneven inputs would contribute more towards the global gradient. Typically, you would want to set this to ``True`` for cases where the last few inputs of your training job are uneven. In extreme cases, where there is a large discrepancy in the number of inputs, setting this to ``False`` might provide better results. enable (bool): Whether to enable uneven input detection or not. Pass in ``enable=False`` to disable in cases where you know that inputs are even across participating processes. Default is ``True``. throw_on_early_termination (bool): Whether to throw an error or continue training when at least one rank has exhausted inputs. If ``True``, will throw upon the first rank reaching end of data. If ``False``, will continue training with a smaller effective world size until all ranks are joined. Note that if this flag is specified, then the flag ``divide_by_initial_world_size`` would be ignored. Default is ``False``. Example:: >>> # xdoctest: +SKIP("Distributed") >>> import torch >>> import torch.distributed as dist >>> import os >>> import torch.multiprocessing as mp >>> import torch.nn as nn >>> # On each spawned worker >>> def worker(rank): >>> dist.init_process_group("nccl", rank=rank, world_size=2) >>> torch.cuda.set_device(rank) >>> model = nn.Linear(1, 1, bias=False).to(rank) >>> model = torch.nn.parallel.DistributedDataParallel( >>> model, device_ids=[rank], output_device=rank >>> ) >>> # Rank 1 gets one more input than rank 0. >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)] >>> with model.join(): >>> for _ in range(5): >>> for inp in inputs: >>> loss = model(inp).sum() >>> loss.backward() >>> # Without the join() API, the below synchronization will hang >>> # blocking for rank 1's allreduce to complete. >>> torch.cuda.synchronize(device=rank) """ return Join( [self], enable, throw_on_early_termination, divide_by_initial_world_size=divide_by_initial_world_size, ) def join_hook( self, **kwargs, ): r""" DDP join hook enables training on uneven inputs by mirroring communications in forward and backward passes. Arguments: kwargs (dict): a :class:`dict` containing any keyword arguments to modify the behavior of the join hook at run time; all :class:`Joinable` instances sharing the same join context manager are forwarded the same value for ``kwargs``. The hook supports the following keyword arguments: divide_by_initial_world_size (bool, optional): If ``True``, then gradients are divided by the initial world size that DDP was launched with. If ``False``, then gradients are divided by the effective world size (i.e. the number of non-joined processes), meaning that the uneven inputs contribute more toward the global gradient. Typically, this should be set to ``True`` if the degree of unevenness is small but can be set to ``False`` in extreme cases for possibly better results. Default is ``True``. """ divide_by_initial_world_size = kwargs.get("divide_by_initial_world_size", True) return _DDPJoinHook( self, divide_by_initial_world_size=divide_by_initial_world_size ) @property def join_device(self): return self.device @property def join_process_group(self): return self.process_group def _register_buffer_comm_hook( self, state, hook: Callable, comm_hook_location=_BufferCommHookLocation.POST_FORWARD, ): r""" Allow custom registration of hooks that define how buffer are synchronized across ranks. The hook takes in an optional state and is passed in a Dict[str, Tensor] corresponding to buffer names and the buffers, and can run arbitrary reductions on buffers as opposed to DDP's default broadcast from rank 0. This is useful for example if a counter needs to be summed or averaged across ranks every iteration. Args: state (Any): Optional state that is passed to the hook. hook (Callable): Callable with the following signature: ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]`` comm_hook_location (_BufferCommHookLocation): Enum value indicating where to run the hook. _BufferCommHookLocation.PRE_FORWARD means that the hook will run _before_ the forward pass, and _BufferCommHookLocation.POST_FORWARD means that the hook will run _after_ the forward pass. NOTE: To maximize performance, users can return a List[torch.futures.Future] from their hook, and DDP will install and await these hooks appropriately at the end of the backward pass. This will ensure all buffers are synchronized by the end of the backward pass. If this setting is used, it is recommended to pass comm_hook_location=_BufferCommHookLocation.POST_FORWARD, which will trigger the hook after the forward pass. If _BufferCommHookLocation.PRE_FORWARD is used, users must ensure appropriate synchronization when manipulating GPU buffers in the forward pass. """ assert callable(hook) self.buffer_hook = _BufferCommHook( buffer_comm_hook=hook, buffer_comm_hook_state=state, buffer_comm_hook_location=comm_hook_location, ) def register_comm_hook(self, state: object, hook: Callable): r""" Register communication hook for user-defined DDP aggregation of gradients across multiple workers. This hook would be very useful for researchers to try out new ideas. For example, this hook can be used to implement several algorithms like GossipGrad and gradient compression which involve different communication strategies for parameter syncs while running Distributed DataParallel training. Args: state (object): Passed to the hook to maintain any state information during the training process. Examples include error feedback in gradient compression, peers to communicate with next in GossipGrad, etc. It is locally stored by each worker and shared by all the gradient tensors on the worker. hook (Callable): Callable with the following signature: ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``: This function is called once the bucket is ready. The hook can perform whatever processing is needed and return a Future indicating completion of any async work (ex: allreduce). If the hook doesn't perform any communication, it still must return a completed Future. The Future should hold the new value of grad bucket's tensors. Once a bucket is ready, c10d reducer would call this hook and use the tensors returned by the Future and copy grads to individual parameters. Note that the future's return type must be a single tensor. We also provide an API called ``get_future`` to retrieve a Future associated with the completion of ``c10d.ProcessGroup.Work``. ``get_future`` is currently supported for NCCL and also supported for most operations on GLOO and MPI, except for peer to peer operations (send/recv). .. warning :: Grad bucket's tensors will not be predivided by world_size. User is responsible to divide by the world_size in case of operations like allreduce. .. warning :: DDP communication hook can only be registered once and should be registered before calling backward. .. warning :: The Future object that hook returns should contain a single tensor that has the same shape with the tensors inside grad bucket. .. warning :: ``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``. Example:: Below is an example of a noop hook that returns the same tensor. >>> # xdoctest: +SKIP('undefined name') >>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: >>> fut = torch.futures.Future() >>> fut.set_result(bucket.buffer()) >>> return fut >>> ddp.register_comm_hook(state=None, hook=noop) Example:: Below is an example of a Parallel SGD algorithm where gradients are encoded before allreduce, and then decoded after allreduce. >>> # xdoctest: +SKIP('undefined name') >>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: >>> encoded_tensor = encode(bucket.buffer()) # encode gradients >>> fut = torch.distributed.all_reduce(encoded_tensor).get_future() >>> # Define the then callback to decode. >>> def decode(fut): >>> decoded_tensor = decode(fut.value()[0]) # decode gradients >>> return decoded_tensor >>> return fut.then(decode) >>> ddp.register_comm_hook(state=None, hook=encode_and_decode) """ self._check_comm_hook(hook) assert self.logger is not None self.logger._set_comm_hook_name(hook.__qualname__) self._comm_hooks.append((hook, state)) dist._register_comm_hook(self.reducer, state, hook) def _register_builtin_comm_hook(self, comm_hook_type): r""" Register a built-in communication hook that specifies how DDP aggregates gradients across multiple workers. The built-in hooks aim to provide efficient C++ implementations for certain hooks, which might not be as efficient if implemented in Python using a Python communication hook. Args: comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc. .. warning :: DDP communication hook can only be registered once and should be registered before calling backward. Example:: Below is an example of a FP16 compression where gradients are compressed into 16-bit floating-point numbers before allreduce, and then decompressed after allreduce. >>> # xdoctest: +SKIP('undefined name') >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS) """ assert self.logger is not None self.logger._set_comm_hook_name(str(comm_hook_type)) dist._register_builtin_comm_hook(self.reducer, comm_hook_type) def _register_fused_optim(self, optim: type, *args, optim_params=None, **kwargs): r""" Register an optimizer in DDP to optimize parameter immediately after its gradient reduction. Registers an optimizer with DDP such that the optimization for a parameter will run immediately when that parameter's gradient is finished with reduction, instead of waiting for all parameters' gradients to finish reduction. This can result in a training speedup depending on your workload since the optimizer can run while gradient reduction for other parameters are still ongoing. In addition, this has the potential to reduce peak memory consumption during training, as it only needs to load the per-parameter optimizer states of a single parameter at a time, instead of loading all per-parameter optimizer states at once. Args: optim (Type): a ``torch.optim.Optimizer`` class to be registered as a fused optimizer. *args (Sequence[Any]): Arguments to forward to `optim`. optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters to optimize, similar to `params` argument of traditional `torch.optim` Optimizers. If this is omitted, all DDP model parameters will be optimized. **kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim`. .. warning :: _register_fused_optim should only be called once on a DDP instance, and registering multiple fused optimizers for the same DDP model is not currently supported. Please ping https://github.com/pytorch/pytorch/issues/71595 if this is necessary for your use case. .. warning :: _register_fused_optim and register_comm_hook currently do not compose together, meaning that custom DDP communication hooks are not supported with overlapped optimizers. Please ping https://github.com/pytorch/pytorch/issues/71595 if this is necessary for your use case. .. warning :: Gradient accumulation and DDP `no_sync` are currently not supported with overlapped optimizer. Please ping https://github.com/pytorch/pytorch/issues/71595 if this is necessary for your use case. Example:: >>> # xdoctest: +SKIP("No rendezvous handler") >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') >>> net = torch.nn.parallel.DistributedDataParallel(model, pg) >>> lr = 1e-2 >>> betas = (0.9, 0.99) >>> eps = 1e-6 >>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps) >>> # Example with subset of parameters >>> params_to_opt = [list(net.parameters())[0]] >>> net._register_fused_optim( ... torch.optim.Adam, lr, optim_params=params_to_opt, betas=betas, eps=eps ... ) """ # Note: importing in function, otherwise this will cause a circular # import as optimizer_overlap module needs to import DistributedDataParallel. from torch.distributed.algorithms._optimizer_overlap import _as_overlapped_optim overlapped_optim = _as_overlapped_optim(optim, optim_params, *args, **kwargs) try: overlapped_optim.register_ddp(self) except NotImplementedError as e: raise RuntimeError( f"{optim} does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of {optim}." ) from e def _distributed_broadcast_coalesced( self, tensors, buffer_size, authoritative_rank=0 ): dist._broadcast_coalesced( self.process_group, tensors, buffer_size, authoritative_rank ) def _check_sync_bufs_post_fwd(self): return ( self.will_sync_module_buffers() and hasattr(self, "buffer_hook") and self.buffer_hook.buffer_comm_hook_location == _BufferCommHookLocation.POST_FORWARD ) def _check_sync_bufs_pre_fwd(self): return self.will_sync_module_buffers() and ( not hasattr(self, "buffer_hook") or self.buffer_hook.buffer_comm_hook_location == _BufferCommHookLocation.PRE_FORWARD ) def will_sync_module_buffers(self): return ( self.require_forward_param_sync and self.broadcast_buffers and len(self.modules_buffers) > 0 ) def _find_common_rank(self, input_rank, rank_cond): # -1 indicates that this rank is not under consideration to be the # common_rank rank_to_use = torch.tensor( [input_rank if rank_cond else -1], device=self.device, ) dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group) if rank_to_use.item() == -1: self._log_and_throw( ValueError, "BUG! Expected rank_cond to be true for at least one process." " This indicates a bug in PyTorch, please report an issue.", ) return rank_to_use.item() def _sync_buffers(self): with torch.no_grad(): # module buffer sync # Synchronize buffers across processes. # If we are running DDP with the join manager, we have to agree # upon a rank to sync module buffers from, since rank 0 may # already have been joined and have stale module buffers. if self._join_config.enable: authoritative_rank = self._find_common_rank( self._distributed_rank, True ) else: # The process with rank 0 is considered the authoritative copy. authoritative_rank = 0 # Update self.modules_buffers incase any buffers were # reassigned. self._assign_modules_buffers() self._sync_module_buffers(authoritative_rank) def _sync_module_buffers(self, authoritative_rank): if not hasattr(self, "buffer_hook"): self._default_broadcast_coalesced(authoritative_rank=authoritative_rank) else: hook = self.buffer_hook.buffer_comm_hook state = self.buffer_hook.buffer_comm_hook_state futs = hook(state, self.named_module_buffers) if futs is not None: self.reducer._install_post_backward_futures(futs) def _default_broadcast_coalesced( self, bufs=None, bucket_size=None, authoritative_rank=0 ): """ Broadcasts buffers from rank 0 to rest of workers. If bufs, bucket_size are None, default values self.modules_buffers and self.broadcast_bucket_size are used instead. """ if bufs is None: bufs = self.modules_buffers if bucket_size is None: bucket_size = self.broadcast_bucket_size self._distributed_broadcast_coalesced(bufs, bucket_size, authoritative_rank) def _passing_sync_batchnorm_handle(self, module): for layer in module.modules(): if isinstance(layer, torch.nn.modules.SyncBatchNorm): if self.device_type == "cpu": self._log_and_throw( ValueError, "SyncBatchNorm layers only work with GPU modules", ) def _check_comm_hook(self, hook): if not callable(hook): self._log_and_throw(TypeError, "Communication hook must be callable.") sig = inspect.signature(hook) if ( sig.parameters["bucket"].annotation != inspect._empty and sig.parameters["bucket"].annotation != dist.GradBucket ): self._log_and_throw( ValueError, "Communication hook: bucket annotation should be dist.GradBucket.", ) if ( sig.return_annotation != inspect._empty and sig.return_annotation != torch.futures.Future[torch.Tensor] ): self._log_and_throw( ValueError, "Communication hook: return annotation should be torch.futures.Future[torch.Tensor].", ) if hook.__name__ in ["bf16_compress_hook", "bf16_compress_wrapper_hook"]: cuda_supported = ( torch.version.cuda is not None ) or torch.version.hip is not None nccl_supported = ( dist.is_available() and dist.is_nccl_available() and torch.cuda.nccl.version() >= (2, 10) ) xpu_xccl_supported = ( dist.is_available() and dist.is_xccl_available() and torch.xpu.is_available() ) if not ((cuda_supported and nccl_supported) or xpu_xccl_supported): self._log_and_throw( TypeError, "BF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+ or XPU and XCCL", ) @property def _distributed_rank(self): return dist.get_rank(self.process_group) @staticmethod def _get_data_parallel_params(module, named_params=False): """Return a generator of parameters managed by a given DDP unit.""" for param in ( module.parameters() if not named_params else module.named_parameters() ): if not hasattr(param, "_ddp_ignored"): yield param @staticmethod def _set_params_and_buffers_to_ignore_for_model( module, params_and_buffers_to_ignore ): """ Set parameters and buffers to be ignored by DDP. Expected format for parameters is the fully qualified name: {module_name}.{param_name}, and similarly, {module_name}.{buffer_name} for buffers. For example: params_to_ignore = [] # NB: model here is vanilla PyTorch module, not yet wrapped with DDP. for module_name, module in model.named_modules(): for param_name, param in module.named_parameters(recurse=False): if should_ignore(param): # Create expected format fqn = f"{module_name}.{param_name}" params_to_ignore.append(fqn) torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, params_to_ignore ) """ # This is a workaround to set parameters and buffers DDP should ignore # during synchronization. It will be removed when the API is finalized # as part of addressing https://github.com/pytorch/pytorch/issues/43690. module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore for name, param in module.named_parameters(): if name in params_and_buffers_to_ignore: param._ddp_ignored = True for name, buffer in module.named_buffers(): if name in params_and_buffers_to_ignore: buffer._ddp_ignored = True def _get_ddp_logging_data(self): r""" Return a dictionary of logging data for debugging and analysis. This interface can be called after DistributedDataParallel() is constructed. It returns a dictionary of logging data. It could help for debugging and analysis. The logging data includes DistributedDataParallel constructor input parameters, some internal states of DistributedDataParallel and performance metrics. Simply print the dictionary and see what these metrics are. This is a prototype interface and subject to change in the future. """ assert self.logger is not None ddp_logging_data = self.logger._get_ddp_logging_data() return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map} def _set_ddp_runtime_logging_sample_rate(self, sample_rate): r""" Set sample_rate of collecting runtime stats. This interface allows users to set sample_rate of collecting runtime stats. The runtime stats will be recorded for the first 10 iterations, after 10 iterations runtime stats will be recorded once every "sample_rate" training iterations. In default, runtime stats are recorded for the first 10 iterations, after 10 iterations runtime stats are recorded once every "kDDPRuntimeLoggingSampleRate=100" training iterations. This is a prototype interface and subject to change in the future. """ if sample_rate < 1: self._log_and_throw( ValueError, "DDP runtime logging sample rate should be equal or greater than 1", ) self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate) def _set_static_graph(self): """ Set static graph for DDP. It is recommended to set static graph in the DDP constructor, which will call this private API internally. """ # If self.static_graph has been set, no need to set it again if self.static_graph: warnings.warn( "You've set static_graph to be True, no need to set it again." ) return self.static_graph = True self._static_graph_delay_allreduce_enqueued = False self.reducer._set_static_graph() assert self.logger is not None self.logger._set_static_graph() if self.find_unused_parameters: warnings.warn( "You passed find_unused_parameters=true to DistributedDataParallel, " "`_set_static_graph` will detect unused parameters automatically, so " "you do not need to set find_unused_parameters=true, just be sure these " "unused parameters will not change during training loop while calling " "`_set_static_graph`." ) def _remove_autograd_hooks(self): """Remove autograd hooks registered by the reducer on the model parameters.""" self.reducer._remove_autograd_hooks() def _check_reducer_finalized(self): """ Check if the reducer has processed all buckets and finalized the backward appropriately. It is useful to call this method after calling .backward() in your training loop in order to avoid subsequent hard to debug errors down the road due to the reducer not finalizing backward. """ self.reducer._check_reducer_finalized() def _set_sparse_metadata(self, global_unique_ids): self.reducer._set_sparse_metadata(global_unique_ids) def _update_process_group(self, new_process_group): """ Dynamically updates the process group for DDP so that we can shrink/expand DDP world size without having to reinitialize DDP. NOTE: If you are using custom communications hooks via, register_comm_hook, you need to update the process groups for those hooks separately. """ # Force a rebuild of buckets for a new process group. This ensures all ranks # are synchronized in terms of when they will rebuild buckets and also # re-evaluates previous assumptions of buckets given the world size might have # changed. self._has_rebuilt_buckets = False self.reducer._reset_state() if not _rank_not_in_group(new_process_group): self.process_group = new_process_group self.reducer._update_process_group(new_process_group) def _set_ddp_sink_clone(self, val: bool): """ Sets whether or not DDPSink should clone the output tensors or not. The default is True since if the loss is modified in place we run into the view is modified in-place error. Although, cloning the tensors can add significant memory and performance hit if the number and size of tensors are large. As a result, this can be set to False if you are not modifying the loss in place. """ self._ddp_sink_clone = val ```
=========================================================================================================================== SOURCE CODE FILE: parallel_apply.py LINES: 1 SIZE: 4.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\parallel_apply.py ENCODING: utf-8 ```py import threading from collections.abc import Sequence from typing import Any, cast, Optional, Union import torch from torch._utils import ExceptionWrapper from torch.cuda._utils import _get_device_index from torch.nn.modules import Module __all__ = ["get_a_var", "parallel_apply"] def get_a_var( obj: Union[torch.Tensor, list[Any], tuple[Any, ...], dict[Any, Any]], ) -> Optional[torch.Tensor]: if isinstance(obj, torch.Tensor): return obj if isinstance(obj, (list, tuple)): for result in map(get_a_var, obj): if isinstance(result, torch.Tensor): return result if isinstance(obj, dict): for result in map(get_a_var, obj.items()): if isinstance(result, torch.Tensor): return result return None def parallel_apply( modules: Sequence[Module], inputs: Sequence[Any], kwargs_tup: Optional[Sequence[dict[str, Any]]] = None, devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None, ) -> list[Any]: r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`. Args: modules (Module): modules to be parallelized inputs (tensor): inputs to the modules devices (list of int or torch.device): CUDA devices :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and :attr:`devices` (if given) should all have same length. Moreover, each element of :attr:`inputs` can either be a single object as the only argument to a module, or a collection of positional arguments. """ assert len(modules) == len( inputs ), f"The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}" if kwargs_tup is not None: assert len(modules) == len(kwargs_tup) else: kwargs_tup = (cast(dict[str, Any], {}),) * len(modules) if devices is not None: assert len(modules) == len(devices) else: devices = [None] * len(modules) devices = [_get_device_index(x, True) for x in devices] streams = [torch.cuda.current_stream(x) for x in devices] lock = threading.Lock() results = {} grad_enabled, autocast_enabled = ( torch.is_grad_enabled(), torch.is_autocast_enabled(), ) def _worker( i: int, module: Module, input: Any, kwargs: dict[str, Any], device: Optional[Union[int, torch.device]] = None, stream: Optional[torch.cuda.Stream] = None, ) -> None: torch.set_grad_enabled(grad_enabled) if device is None: t = get_a_var(input) if t is None: with lock: results[i] = ExceptionWrapper( where=f"in replica {i}, no device was provided and no tensor input was found; " "device cannot be resolved" ) return device = t.get_device() if stream is None: stream = torch.cuda.current_stream(device) try: with torch.cuda.device(device), torch.cuda.stream( stream ), torch.amp.autocast("cuda", enabled=autocast_enabled): # this also avoids accidental slicing of `input` if it is a Tensor if not isinstance(input, (list, tuple)): input = (input,) output = module(*input, **kwargs) with lock: results[i] = output except Exception: with lock: results[i] = ExceptionWrapper( where=f"in replica {i} on device {device}" ) if len(modules) > 1: threads = [ threading.Thread( target=_worker, args=(i, module, input, kwargs, device, stream) ) for i, (module, input, kwargs, device, stream) in enumerate( zip(modules, inputs, kwargs_tup, devices, streams) ) ] for thread in threads: thread.start() for thread in threads: thread.join() else: _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0]) outputs = [] for i in range(len(inputs)): output = results[i] if isinstance(output, ExceptionWrapper): output.reraise() outputs.append(output) return outputs ```
====================================================================================================================== SOURCE CODE FILE: replicate.py LINES: 1 SIZE: 7.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\replicate.py ENCODING: utf-8 ```py from collections import OrderedDict from collections.abc import Iterator, Sequence from typing import cast, Optional, TYPE_CHECKING, TypeVar, Union from typing_extensions import TypeIs import torch from torch._utils import _get_device_index from torch.nn.modules import Module from torch.nn.parallel import comm if TYPE_CHECKING: from torch._C import ScriptMethod from torch.jit import ScriptModule from torch.jit._state import EnabledProxy __all__ = ["replicate"] def _is_script_module(module: Module) -> TypeIs["ScriptModule"]: import torch.jit return isinstance(module, torch.jit.ScriptModule) def _is_script_method(module: object) -> TypeIs["ScriptMethod"]: import torch.jit return isinstance(module, torch._C.ScriptMethod) def _init_script_module() -> "ScriptModule": import torch.jit return torch.jit.ScriptModule() def _is_jit_enabled() -> "EnabledProxy": import torch.jit._state return torch.jit._state._enabled # Check if we can safely replicate the module. # there are two types of module: # 1. python modules # 2. ScriptModule # # currently a module cannot be replicated properly if the descendants of # any ScriptModule contains python module (type 1 above) def _replicatable_module(module: Module, memo: Optional[set[Module]] = None) -> bool: # module.modules() contains module itself as the first element def descendant_modules(module: Module) -> Iterator[Module]: gen = module.modules() next(gen) return gen if not _is_jit_enabled(): return True if memo is None: memo = set() # memoize visited modules memo.add(module) if _is_script_module(module): memo.update(descendant_modules(module)) return all( _is_script_module(descendant) for descendant in descendant_modules(module) ) for child in module.children(): # since any unreplicatable module will cause the check to return # False early, visited modules here can be safely ignored. if child in memo: continue if not _replicatable_module(child, memo): return False return True def _broadcast_coalesced_reshape( tensors: Sequence[torch.Tensor], devices: Sequence[Union[int, torch.device]], detach: bool = False, ) -> list[list[torch.Tensor]]: from torch.nn.parallel._functions import Broadcast if detach: return comm.broadcast_coalesced(tensors, devices) else: # Use the autograd function to broadcast if not detach if len(tensors) > 0: tensor_copies = Broadcast.apply(devices, *tensors) return [ tensor_copies[i : i + len(tensors)] for i in range(0, len(tensor_copies), len(tensors)) ] else: return [] T = TypeVar("T", bound=Module) def replicate( network: T, devices: Sequence[Union[int, torch.device]], detach: bool = False, ) -> list[T]: if not _replicatable_module(network): raise RuntimeError( "Cannot replicate network where python modules are " "childrens of ScriptModule" ) if not devices: return [] devices = [_get_device_index(x, True) for x in devices] num_replicas = len(devices) params = list(network.parameters()) param_indices = {param: idx for idx, param in enumerate(params)} param_copies = _broadcast_coalesced_reshape(params, devices, detach) buffers = list(network.buffers()) buffers_rg: list[torch.Tensor] = [] buffers_not_rg: list[torch.Tensor] = [] for buf in buffers: if buf.requires_grad and not detach: buffers_rg.append(buf) else: buffers_not_rg.append(buf) buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)} buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)} buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach) buffer_copies_not_rg = _broadcast_coalesced_reshape( buffers_not_rg, devices, detach=True ) modules = list(network.modules()) module_copies: list[list[Module]] = [[] for _ in devices] module_indices: dict[Module, int] = {} for i, module in enumerate(modules): module_indices[module] = i for j in range(num_replicas): replica = module._replicate_for_data_parallel() # This is a temporary fix for DDP. DDP needs to access the # replicated model parameters. It used to do so through # `mode.parameters()`. The fix added in #33907 for DP stops the # `parameters()` API from exposing the replicated parameters. # Hence, we add a `_former_parameters` dict here to support DDP. replica._former_parameters = OrderedDict() module_copies[j].append(replica) for i, module in enumerate(modules): for key, child in module._modules.items(): if child is None: for j in range(num_replicas): replica = module_copies[j][i] replica._modules[key] = None else: module_idx = module_indices[child] for j in range(num_replicas): replica = module_copies[j][i] setattr(replica, key, module_copies[j][module_idx]) for key, param in module._parameters.items(): if param is None: for j in range(num_replicas): replica = module_copies[j][i] replica._parameters[key] = None else: param_idx = param_indices[param] for j in range(num_replicas): replica = module_copies[j][i] param_copy = param_copies[j][param_idx] # parameters in replicas are no longer leaves, # so setattr them as non-parameter attributes setattr(replica, key, param_copy) # expose the parameter for DDP replica._former_parameters[key] = param_copy for key, buf in module._buffers.items(): # type: ignore[assignment] if buf is None: for j in range(num_replicas): replica = module_copies[j][i] replica._buffers[key] = None else: if buf.requires_grad and not detach: buffer_copies = buffer_copies_rg buffer_idx = buffer_indices_rg[buf] else: buffer_copies = buffer_copies_not_rg buffer_idx = buffer_indices_not_rg[buf] for j in range(num_replicas): replica = module_copies[j][i] setattr(replica, key, buffer_copies[j][buffer_idx]) return [cast(T, module_copies[j][0]) for j in range(num_replicas)] ```
=========================================================================================================================== SOURCE CODE FILE: scatter_gather.py LINES: 1 SIZE: 5.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parallel\scatter_gather.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from collections.abc import Sequence from typing import Any, Optional, overload, TypeVar, Union from typing_extensions import deprecated import torch from torch.nn.parallel._functions import Gather, Scatter __all__ = ["scatter", "scatter_kwargs", "gather"] @deprecated( "`is_namedtuple` is deprecated, please use the python checks instead", category=FutureWarning, ) def is_namedtuple(obj: Any) -> bool: # Check if type was created from collections.namedtuple or a typing.NamedTuple. return _is_namedtuple(obj) def _is_namedtuple(obj: Any) -> bool: # Check if type was created from collections.namedtuple or a typing.NamedTuple. return ( isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields") ) T = TypeVar("T", dict, list, tuple) # For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise. @overload def scatter( inputs: torch.Tensor, target_gpus: Sequence[Union[int, torch.device]], dim: int = ..., ) -> tuple[torch.Tensor, ...]: ... @overload def scatter( inputs: T, target_gpus: Sequence[Union[int, torch.device]], dim: int = ..., ) -> list[T]: ... def scatter(inputs, target_gpus, dim=0): r"""Slice tensors into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not tensors. """ def scatter_map(obj): if isinstance(obj, torch.Tensor): return Scatter.apply(target_gpus, None, dim, obj) if _is_namedtuple(obj): return [type(obj)(*args) for args in zip(*map(scatter_map, obj))] if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: return [list(i) for i in zip(*map(scatter_map, obj))] if isinstance(obj, dict) and len(obj) > 0: return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))] return [obj for _ in target_gpus] # After scatter_map is called, a scatter_map cell will exist. This cell # has a reference to the actual function scatter_map, which has references # to a closure that has a reference to the scatter_map cell (because the # fn is recursive). To avoid this reference cycle, we set the function to # None, clearing the cell try: res = scatter_map(inputs) finally: scatter_map = None # type: ignore[assignment] return res def scatter_kwargs( inputs: tuple[Any, ...], kwargs: Optional[dict[str, Any]], target_gpus: Sequence[Union[int, torch.device]], dim: int = 0, ) -> tuple[tuple[Any, ...], tuple[dict[str, Any], ...]]: r"""Scatter with support for kwargs dictionary.""" scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else [] scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] if len(scattered_inputs) < len(scattered_kwargs): scattered_inputs.extend( () for _ in range(len(scattered_kwargs) - len(scattered_inputs)) ) elif len(scattered_kwargs) < len(inputs): scattered_kwargs.extend( {} for _ in range(len(scattered_inputs) - len(scattered_kwargs)) ) return tuple(scattered_inputs), tuple(scattered_kwargs) def gather(outputs: Any, target_device: Union[int, torch.device], dim: int = 0) -> Any: r"""Gather tensors from different GPUs on a specified device. This function is useful for gathering the results of a distributed computation. It takes a sequence of objects, one for each GPU, and returns a single object on the specified device. Args: outputs (Any): A sequence of objects (potentially tensors) to gather. target_device (Union[int, torch.device]): The device to gather the tensors to. Use 'cpu' for CPU to avoid a deprecation warning. dim (int, optional): The dimension along which to gather. Default: 0. Returns: Any: A gathered object (potentially tensor) on the specified device. """ def gather_map(outputs): out = outputs[0] if isinstance(out, torch.Tensor): return Gather.apply(target_device, dim, *outputs) if out is None: return None if isinstance(out, dict): if not all(len(out) == len(d) for d in outputs): raise ValueError("All dicts must have the same number of keys") return type(out)((k, gather_map([d[k] for d in outputs])) for k in out) if _is_namedtuple(out): return type(out)._make(map(gather_map, zip(*outputs))) return type(out)(map(gather_map, zip(*outputs))) # Recursive function calls like this create reference cycles. # Setting the function to None clears the refcycle. try: res = gather_map(outputs) finally: gather_map = None # type: ignore[assignment] return res ```
============================================================================================================= SOURCE CODE FILE: parameter.py LINES: 2 SIZE: 11.38 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\parameter.py ENCODING: utf-8 ```py from collections import OrderedDict import torch from torch._C import _disabled_torch_function_impl # Metaclass to combine _TensorMeta and the instance check override for Parameter. class _ParameterMeta(torch._C._TensorMeta): # Make `isinstance(t, Parameter)` return True for custom tensor instances that have the _is_param flag. def __instancecheck__(self, instance): if self is Parameter: if isinstance(instance, torch.Tensor) and getattr( instance, "_is_param", False ): return True return super().__instancecheck__(instance) class Parameter(torch.Tensor, metaclass=_ParameterMeta): r"""A kind of Tensor that is to be considered a module parameter. Parameters are :class:`~torch.Tensor` subclasses, that have a very special property when used with :class:`Module` s - when they're assigned as Module attributes they are automatically added to the list of its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator. Assigning a Tensor doesn't have such effect. This is because one might want to cache some temporary state, like last hidden state of the RNN, in the model. If there was no such class as :class:`Parameter`, these temporaries would get registered too. Args: data (Tensor): parameter tensor. requires_grad (bool, optional): if the parameter requires gradient. Note that the torch.no_grad() context does NOT affect the default behavior of Parameter creation--the Parameter will still have `requires_grad=True` in :class:`~no_grad` mode. See :ref:`locally-disable-grad-doc` for more details. Default: `True` """ def __new__(cls, data=None, requires_grad=True): if data is None: data = torch.empty(0) if type(data) is torch.Tensor or type(data) is Parameter: # For ease of BC maintenance, keep this path for standard Tensor. # Eventually (tm), we should change the behavior for standard Tensor to match. return torch.Tensor._make_subclass(cls, data, requires_grad) # Path for custom tensors: set a flag on the instance to indicate parameter-ness. t = data.detach().requires_grad_(requires_grad) if type(t) is not type(data): raise RuntimeError( f"Creating a Parameter from an instance of type {type(data).__name__} " "requires that detach() returns an instance of the same type, but return " f"type {type(t).__name__} was found instead. To use the type as a " "Parameter, please correct the detach() semantics defined by " "its __torch_dispatch__() implementation." ) t._is_param = True return t # Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types # are still considered that custom tensor type and these methods will not be called for them. def __deepcopy__(self, memo): if id(self) in memo: return memo[id(self)] else: result = type(self)( self.data.clone(memory_format=torch.preserve_format), self.requires_grad ) memo[id(self)] = result return result def __repr__(self): return "Parameter containing:\n" + super().__repr__() def __reduce_ex__(self, proto): state = torch._utils._get_obj_state(self) # See Note [Don't serialize hooks] hooks = OrderedDict() if not state: return ( torch._utils._rebuild_parameter, (self.data, self.requires_grad, hooks), ) return ( torch._utils._rebuild_parameter_with_state, (self.data, self.requires_grad, hooks, state), ) __torch_function__ = _disabled_torch_function_impl class UninitializedTensorMixin: _allowed_methods = [ torch.Tensor.__hash__, torch.Tensor.size, torch.Tensor.copy_, torch.Tensor.is_complex, torch.Tensor.is_floating_point, torch.Tensor.half, torch.Tensor.float, torch.Tensor.double, torch.Tensor.char, torch.Tensor.short, torch.Tensor.int, torch.Tensor.long, torch.Tensor.cuda, torch.Tensor.cpu, torch.Tensor.to, torch.Tensor.get_device, torch._has_compatible_shallow_copy_type, ] def materialize(self, shape, device=None, dtype=None): r"""Create a Parameter or Tensor with the same properties of the uninitialized one. Given a shape, it materializes a parameter in the same device and with the same `dtype` as the current one or the specified ones in the arguments. Args: shape : (tuple): the shape for the materialized tensor. device (:class:`torch.device`): the desired device of the parameters and buffers in this module. Optional. dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers in this module. Optional. """ if device is None: device = self.data.device if dtype is None: dtype = self.data.dtype self.data = torch.empty(shape, device=device, dtype=dtype) self.__class__ = self.cls_to_become @property def shape(self): raise RuntimeError( "Can't access the shape of an uninitialized parameter or buffer. " "This error usually happens in `load_state_dict` when trying to load " "an uninitialized parameter into an initialized one. " "Call `forward` to initialize the parameters before accessing their attributes." ) def share_memory_(self): raise RuntimeError( "Can't share memory on an uninitialized parameter or buffer. " "Call `forward` to initialize the parameters before calling " "`module.share_memory()`." ) def __repr__(self): return f"<{self.__class__.__name__}>" def __reduce_ex__(self, proto): # See Note [Don't serialize hooks] return (self.__class__, (self.requires_grad,)) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): # method-wrapper is to detect access to Tensor properties that are # wrapped in descriptors if func in cls._allowed_methods or func.__class__.__name__ == "method-wrapper": if kwargs is None: kwargs = {} return super().__torch_function__(func, types, args, kwargs) raise ValueError( f"Attempted to use an uninitialized parameter in {func}. " "This error happens when you are using a `LazyModule` or " f"explicitly manipulating `torch.nn.parameter.{cls.__name__}` " "objects. When using LazyModules Call `forward` with a dummy batch " "to initialize the parameters before calling torch functions" ) def is_lazy(param): return isinstance(param, UninitializedTensorMixin) class UninitializedParameter(UninitializedTensorMixin, Parameter): r"""A parameter that is not initialized. Uninitialized Parameters are a special case of :class:`torch.nn.Parameter` where the shape of the data is still unknown. Unlike a :class:`torch.nn.Parameter`, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:`torch.nn.Parameter`. The default device or dtype to use when the parameter is materialized can be set during construction using e.g. ``device='cuda'``. """ cls_to_become = Parameter def __new__(cls, requires_grad=True, device=None, dtype=None) -> None: factory_kwargs = {"device": device, "dtype": dtype} data = torch.empty(0, **factory_kwargs) return torch.Tensor._make_subclass(cls, data, requires_grad) def __deepcopy__(self, memo): if id(self) in memo: return memo[id(self)] else: result = type(self)(self.requires_grad, self.data.device, self.data.dtype) memo[id(self)] = result return result # Metaclass to combine _TensorMeta and the instance check override for Buffer. class _BufferMeta(torch._C._TensorMeta): # Make `isinstance(t, Buffer)` return True for custom tensor instances that have the _is_buffer flag. def __instancecheck__(self, instance): if self is Buffer: if isinstance(instance, torch.Tensor) and getattr( instance, "_is_buffer", False ): return True return super().__instancecheck__(instance) class Buffer(torch.Tensor, metaclass=_BufferMeta): r"""A kind of Tensor that should not be considered a model parameter. For example, BatchNorm's ``running_mean`` is not a parameter, but is part of the module's state. Buffers are :class:`~torch.Tensor` subclasses, that have a very special property when used with :class:`Module` s -- when they're assigned as Module attributes they are automatically added to the list of its buffers, and will appear e.g. in :meth:`~torch.nn.Module.buffers` iterator. Assigning a Tensor doesn't have such effect. One can still assign a Tensor as explicitly by using the :meth:`~torch.nn.Module.register_buffer` function. Args: data (Tensor): buffer tensor. persistent (bool, optional): whether the buffer is part of the module's :attr:`state_dict`. Default: ``True`` """ def __new__(cls, data=None, *, persistent=True): if data is None: data = torch.empty(0) t = data.detach().requires_grad_(data.requires_grad) t.persistent = persistent t._is_buffer = True return t __torch_function__ = _disabled_torch_function_impl class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor): r"""A buffer that is not initialized. Uninitialized Buffer is a a special case of :class:`torch.Tensor` where the shape of the data is still unknown. Unlike a :class:`torch.Tensor`, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:`torch.Tensor`. The default device or dtype to use when the buffer is materialized can be set during construction using e.g. ``device='cuda'``. """ cls_to_become = torch.Tensor def __new__( cls, requires_grad=False, device=None, dtype=None, persistent=True ) -> None: factory_kwargs = {"device": device, "dtype": dtype} data = torch.empty(0, **factory_kwargs) ret = torch.Tensor._make_subclass(cls, data, requires_grad) ret.persistent = persistent ret._is_buffer = True return ret ```
================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.37 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\__init__.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Dynamic Modules. This package is in the process of being deprecated. Please, use `torch.ao.nn.qat.dynamic` instead. """ from torch.nn.qat import dynamic, modules # noqa: F403 from torch.nn.qat.modules import * # noqa: F403 __all__ = [ "Linear", "Conv1d", "Conv2d", "Conv3d", "Embedding", "EmbeddingBag", ] ```
======================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\dynamic\__init__.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Dynamic Modules. This package is in the process of being deprecated. Please, use `torch.ao.nn.qat.dynamic` instead. """ from torch.nn.qat.dynamic.modules import * # noqa: F403 ```
================================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.08 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\dynamic\modules\__init__.py ENCODING: utf-8 ```py from torch.nn.qat.dynamic.modules.linear import Linear __all__ = ["Linear"] ```
============================================================================================================================== SOURCE CODE FILE: linear.py LINES: 1 SIZE: 0.42 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\dynamic\modules\linear.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Modules. This file is in the process of migration to `torch/ao/nn/qat/dynamic`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/qat/dynamic/modules`, while adding an import statement here. """ from torch.ao.nn.qat.dynamic.modules.linear import Linear ```
======================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.51 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\modules\__init__.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Modules. This package is in the process of being deprecated. Please, use `torch.ao.nn.qat.modules` instead. """ from torch.ao.nn.qat.modules.conv import Conv1d, Conv2d, Conv3d from torch.ao.nn.qat.modules.embedding_ops import Embedding, EmbeddingBag from torch.ao.nn.qat.modules.linear import Linear from torch.nn.qat.modules import conv, embedding_ops, linear __all__ = [ "Linear", "Conv1d", "Conv2d", "Conv3d", "Embedding", "EmbeddingBag", ] ```
==================================================================================================================== SOURCE CODE FILE: conv.py LINES: 1 SIZE: 0.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\modules\conv.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Modules. This file is in the process of migration to `torch/ao/nn/qat`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/qat/modules`, while adding an import statement here. """ from torch.ao.nn.qat.modules.conv import Conv1d, Conv2d, Conv3d ```
============================================================================================================================= SOURCE CODE FILE: embedding_ops.py LINES: 1 SIZE: 0.46 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\modules\embedding_ops.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Modules. This file is in the process of migration to `torch/ao/nn/qat`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/qat/modules`, while adding an import statement here. """ from torch.ao.nn.qat.modules.embedding_ops import Embedding, EmbeddingBag __all__ = ["Embedding", "EmbeddingBag"] ```
====================================================================================================================== SOURCE CODE FILE: linear.py LINES: 1 SIZE: 0.39 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\qat\modules\linear.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""QAT Modules. This file is in the process of migration to `torch/ao/nn/qat`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/qat/modules`, while adding an import statement here. """ from torch.ao.nn.qat.modules.linear import Linear ```
======================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.06 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantizable\__init__.py ENCODING: utf-8 ```py from torch.nn.quantizable.modules import * # noqa: F403 ```
================================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantizable\modules\__init__.py ENCODING: utf-8 ```py from torch.ao.nn.quantizable.modules.activation import MultiheadAttention from torch.ao.nn.quantizable.modules.rnn import LSTM, LSTMCell __all__ = [ "LSTM", "LSTMCell", "MultiheadAttention", ] ```
================================================================================================================================== SOURCE CODE FILE: activation.py LINES: 1 SIZE: 0.44 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantizable\modules\activation.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantizable Modules. This file is in the process of migration to `torch/ao/nn/quantizable`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantizable/modules`, while adding an import statement here. """ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention ```
=========================================================================================================================== SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 0.43 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantizable\modules\rnn.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantizable Modules. This file is in the process of migration to `torch/ao/nn/quantizable`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantizable/modules`, while adding an import statement here. """ from torch.ao.nn.quantizable.modules.rnn import LSTM, LSTMCell ```
====================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\__init__.py ENCODING: utf-8 ```py from torch.nn.quantized import dynamic, functional, modules # noqa: F403 from torch.nn.quantized.modules import * # noqa: F403 from torch.nn.quantized.modules import MaxPool2d __all__ = [ "BatchNorm2d", "BatchNorm3d", "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", "DeQuantize", "Dropout", "ELU", "Embedding", "EmbeddingBag", "GroupNorm", "Hardswish", "InstanceNorm1d", "InstanceNorm2d", "InstanceNorm3d", "LayerNorm", "LeakyReLU", "Linear", "LSTM", "MultiheadAttention", "PReLU", "Quantize", "ReLU6", "Sigmoid", "Softmax", # Wrapper modules "FloatFunctional", "FXFloatFunctional", "QFunctional", ] ```
================================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.07 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\__init__.py ENCODING: utf-8 ```py from torch.nn.quantized._reference.modules import * # noqa: F403 ```
========================================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 1.03 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\__init__.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.conv import ( Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) from torch.ao.nn.quantized.reference.modules.linear import Linear from torch.ao.nn.quantized.reference.modules.rnn import GRUCell, LSTM, LSTMCell, RNNCell from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag __all__ = [ "Linear", "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", "RNNCell", "LSTMCell", "GRUCell", "LSTM", "Embedding", "EmbeddingBag", ] ```
===================================================================================================================================== SOURCE CODE FILE: conv.py LINES: 1 SIZE: 0.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\conv.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.conv import ( _ConvNd, _ConvTransposeNd, Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) ```
======================================================================================================================================= SOURCE CODE FILE: linear.py LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\linear.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.linear import Linear ```
==================================================================================================================================== SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 0.53 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\rnn.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.rnn import ( GRUCell, LSTM, LSTMCell, RNNBase, RNNCell, RNNCellBase, ) ```
======================================================================================================================================= SOURCE CODE FILE: sparse.py LINES: 1 SIZE: 0.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\sparse.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag ```
====================================================================================================================================== SOURCE CODE FILE: utils.py LINES: 1 SIZE: 0.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\_reference\modules\utils.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Reference Modules. This module is in the process of migration to `torch/ao/nn/quantized/reference`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/reference`, while adding an import statement here. """ from torch.ao.nn.quantized.reference.modules.utils import ( _get_weight_qparam_keys, _quantize_and_dequantize_weight, _quantize_weight, _save_weight_qparams, ReferenceQuantizedModule, ) ```
============================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.06 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\dynamic\__init__.py ENCODING: utf-8 ```py from torch.ao.nn.quantized.dynamic import * # noqa: F403 ```
====================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 1.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\dynamic\modules\__init__.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Dynamic Modules. This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/dynamic`, while adding an import statement here. """ from torch.ao.nn.quantized.dynamic.modules import conv, linear, rnn from torch.ao.nn.quantized.dynamic.modules.conv import ( Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) from torch.ao.nn.quantized.dynamic.modules.linear import Linear from torch.ao.nn.quantized.dynamic.modules.rnn import ( GRU, GRUCell, LSTM, LSTMCell, RNNCell, ) __all__ = [ "Linear", "LSTM", "GRU", "LSTMCell", "RNNCell", "GRUCell", "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", ] ```
================================================================================================================================== SOURCE CODE FILE: conv.py LINES: 1 SIZE: 0.68 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\dynamic\modules\conv.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Dynamic Modules. This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.dynamic.modules.conv import ( Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) __all__ = [ "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", ] ```
==================================================================================================================================== SOURCE CODE FILE: linear.py LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\dynamic\modules\linear.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Dynamic Modules. This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.dynamic.modules.linear import Linear ```
================================================================================================================================= SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 0.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\dynamic\modules\rnn.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Dynamic Modules. This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.dynamic.modules.rnn import ( GRU, GRUCell, LSTM, LSTMCell, pack_weight_bias, PackedParameter, RNNBase, RNNCell, RNNCellBase, ) __all__ = [ "pack_weight_bias", "PackedParameter", "RNNBase", "LSTM", "GRU", "RNNCellBase", "RNNCell", "LSTMCell", "GRUCell", ] ```
======================================================================================================================== SOURCE CODE FILE: functional.py LINES: 1 SIZE: 0.28 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\functional.py ENCODING: utf-8 ```py r"""nn.quantized.functional. Quantized equivalents of the `nn.functional`. Note:: This location is in the process of being deprecated. Please, use the `torch.ao.nn.quantized.functional` instead. """ from torch.ao.nn.quantized.functional import * # noqa: F401,F403 ```
============================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 2.15 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\__init__.py ENCODING: utf-8 ```py r"""Quantized Modules. Note:: The `torch.nn.quantized` namespace is in the process of being deprecated. Please, use `torch.ao.nn.quantized` instead. """ # The following imports are needed in case the user decides # to import the files directly, # s.a. `from torch.nn.quantized.modules.conv import ...`. # No need to add them to the `__all__`. from torch.ao.nn.quantized.modules import ( activation, batchnorm, conv, DeQuantize, dropout, embedding_ops, functional_modules, linear, MaxPool2d, normalization, Quantize, rnn, utils, ) from torch.ao.nn.quantized.modules.activation import ( ELU, Hardswish, LeakyReLU, MultiheadAttention, PReLU, ReLU6, Sigmoid, Softmax, ) from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d from torch.ao.nn.quantized.modules.conv import ( Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) from torch.ao.nn.quantized.modules.dropout import Dropout from torch.ao.nn.quantized.modules.embedding_ops import Embedding, EmbeddingBag from torch.ao.nn.quantized.modules.functional_modules import ( FloatFunctional, FXFloatFunctional, QFunctional, ) from torch.ao.nn.quantized.modules.linear import Linear from torch.ao.nn.quantized.modules.normalization import ( GroupNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, LayerNorm, ) from torch.ao.nn.quantized.modules.rnn import LSTM __all__ = [ "BatchNorm2d", "BatchNorm3d", "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", "DeQuantize", "ELU", "Embedding", "EmbeddingBag", "GroupNorm", "Hardswish", "InstanceNorm1d", "InstanceNorm2d", "InstanceNorm3d", "LayerNorm", "LeakyReLU", "Linear", "LSTM", "MultiheadAttention", "Quantize", "ReLU6", "Sigmoid", "Softmax", "Dropout", "PReLU", # Wrapper modules "FloatFunctional", "FXFloatFunctional", "QFunctional", ] ```
================================================================================================================================ SOURCE CODE FILE: activation.py LINES: 1 SIZE: 0.54 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\activation.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.activation import ( ELU, Hardswish, LeakyReLU, MultiheadAttention, PReLU, ReLU6, Sigmoid, Softmax, ) ```
=============================================================================================================================== SOURCE CODE FILE: batchnorm.py LINES: 1 SIZE: 0.44 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\batchnorm.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d ```
========================================================================================================================== SOURCE CODE FILE: conv.py LINES: 1 SIZE: 0.68 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\conv.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.conv import ( _reverse_repeat_padding, Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, ) __all__ = [ "Conv1d", "Conv2d", "Conv3d", "ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d", ] ```
============================================================================================================================= SOURCE CODE FILE: dropout.py LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\dropout.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.dropout import Dropout __all__ = ["Dropout"] ```
=================================================================================================================================== SOURCE CODE FILE: embedding_ops.py LINES: 1 SIZE: 0.55 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\embedding_ops.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.embedding_ops import ( Embedding, EmbeddingBag, EmbeddingPackedParams, ) __all__ = ["EmbeddingPackedParams", "Embedding", "EmbeddingBag"] ```
======================================================================================================================================== SOURCE CODE FILE: functional_modules.py LINES: 1 SIZE: 0.56 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\functional_modules.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.functional_modules import ( FloatFunctional, FXFloatFunctional, QFunctional, ) __all__ = ["FloatFunctional", "FXFloatFunctional", "QFunctional"] ```
============================================================================================================================ SOURCE CODE FILE: linear.py LINES: 1 SIZE: 0.48 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\linear.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.linear import Linear, LinearPackedParams __all__ = ["LinearPackedParams", "Linear"] ```
=================================================================================================================================== SOURCE CODE FILE: normalization.py LINES: 1 SIZE: 0.64 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\normalization.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.normalization import ( GroupNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, LayerNorm, ) __all__ = [ "LayerNorm", "GroupNorm", "InstanceNorm1d", "InstanceNorm2d", "InstanceNorm3d", ] ```
========================================================================================================================= SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 0.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\rnn.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.rnn import LSTM ```
=========================================================================================================================== SOURCE CODE FILE: utils.py LINES: 1 SIZE: 0.54 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\quantized\modules\utils.py ENCODING: utf-8 ```py # flake8: noqa: F401 r"""Quantized Modules. This file is in the process of migration to `torch/ao/nn/quantized`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate file under the `torch/ao/nn/quantized/modules`, while adding an import statement here. """ from torch.ao.nn.quantized.modules.utils import ( _hide_packed_params_repr, _ntuple_from_first, _pair_from_first, _quantize_weight, WeightedQuantizedModule, ) ```
================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 1.26 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\__init__.py ENCODING: utf-8 ```py from . import parametrizations, rnn, stateless from .clip_grad import ( _clip_grads_with_norm_ as clip_grads_with_norm_, _get_total_norm as get_total_norm, clip_grad_norm, clip_grad_norm_, clip_grad_value_, ) from .convert_parameters import parameters_to_vector, vector_to_parameters from .fusion import ( fuse_conv_bn_eval, fuse_conv_bn_weights, fuse_linear_bn_eval, fuse_linear_bn_weights, ) from .init import skip_init from .memory_format import ( convert_conv2d_weight_memory_format, convert_conv3d_weight_memory_format, ) from .spectral_norm import remove_spectral_norm, spectral_norm from .weight_norm import remove_weight_norm, weight_norm __all__ = [ "clip_grad_norm", "clip_grad_norm_", "clip_grads_with_norm_", "clip_grad_value_", "convert_conv2d_weight_memory_format", "convert_conv3d_weight_memory_format", "fuse_conv_bn_eval", "fuse_conv_bn_weights", "fuse_linear_bn_eval", "fuse_linear_bn_weights", "get_total_norm", "parameters_to_vector", "parametrizations", "remove_spectral_norm", "remove_weight_norm", "rnn", "skip_init", "spectral_norm", "stateless", "vector_to_parameters", "weight_norm", ] ```
============================================================================================================================ SOURCE CODE FILE: _deprecation_utils.py LINES: 1 SIZE: 1.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_deprecation_utils.py ENCODING: utf-8 ```py import importlib import warnings from typing import Callable _MESSAGE_TEMPLATE = ( r"Usage of '{old_location}' is deprecated; please use '{new_location}' instead." ) def lazy_deprecated_import( all: list[str], old_module: str, new_module: str, ) -> Callable: r"""Import utility to lazily import deprecated packages / modules / functional. The old_module and new_module are also used in the deprecation warning defined by the `_MESSAGE_TEMPLATE`. Args: all: The list of the functions that are imported. Generally, the module's __all__ list of the module. old_module: Old module location new_module: New module location / Migrated location Returns: Callable to assign to the `__getattr__` Usage: # In the `torch/nn/quantized/functional.py` from torch.nn.utils._deprecation_utils import lazy_deprecated_import _MIGRATED_TO = "torch.ao.nn.quantized.functional" __getattr__ = lazy_deprecated_import( all=__all__, old_module=__name__, new_module=_MIGRATED_TO) """ warning_message = _MESSAGE_TEMPLATE.format( old_location=old_module, new_location=new_module ) def getattr_dunder(name: str) -> None: if name in all: # We are using the "RuntimeWarning" to make sure it is not # ignored by default. warnings.warn(warning_message, RuntimeWarning) package = importlib.import_module(new_module) return getattr(package, name) raise AttributeError(f"Module {new_module!r} has no attribute {name!r}.") return getattr_dunder ```
==================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\__init__.py ENCODING: utf-8 ```py from .conv_expanded_weights import ConvPerSampleGrad from .embedding_expanded_weights import EmbeddingPerSampleGrad from .expanded_weights_impl import ExpandedWeight from .group_norm_expanded_weights import GroupNormPerSampleGrad from .instance_norm_expanded_weights import InstanceNormPerSampleGrad from .layer_norm_expanded_weights import LayerNormPerSampleGrad from .linear_expanded_weights import LinearPerSampleGrad __all__ = ["ExpandedWeight"] ```
================================================================================================================================================= SOURCE CODE FILE: conv_expanded_weights.py LINES: 1 SIZE: 2.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\conv_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch import torch.nn.functional as F from .conv_utils import ( conv_args_and_kwargs, conv_backward, conv_input_for_string_padding, conv_picker, ) from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads from .expanded_weights_utils import forward_helper @implements_per_sample_grads(F.conv1d) @implements_per_sample_grads(F.conv2d) @implements_per_sample_grads(F.conv3d) class ConvPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, kwarg_names, conv_fn, *expanded_args_and_kwargs): expanded_args, expanded_kwargs = conv_args_and_kwargs( kwarg_names, expanded_args_and_kwargs ) orig_input = expanded_args[0] was_same_padding = expanded_kwargs["padding"] == "same" if isinstance(expanded_kwargs["padding"], str): # if padding is a string, we'll do the necessary padding (slowly) using F.pad kernel_size = expanded_args[1].shape[2:] padding, dilation = expanded_kwargs["padding"], expanded_kwargs["dilation"] input = conv_input_for_string_padding( conv_fn, padding, expanded_args[0], dilation, kernel_size ) expanded_args = (input, expanded_args[1]) # since we've already done the padding, don't need any more expanded_kwargs["padding"] = 0 output = forward_helper(conv_fn, expanded_args, expanded_kwargs) input, weight = expanded_args batched_dim_size = conv_picker(conv_fn, 3, 4, 5) if input.dim() != batched_dim_size: raise RuntimeError( f"Expanded Weights only support convolution with batched input, got {conv_fn} with an" f"unbatched input of dim {input.dim()}, expected input of dim {batched_dim_size}" ) ctx.conv_fn = conv_fn ctx.batch_size = orig_input.shape[0] ctx.input_required_grad = orig_input.requires_grad ctx.orig_input_shape = orig_input.shape ctx.was_same_padding = was_same_padding ctx.stride, ctx.padding = expanded_kwargs["stride"], expanded_kwargs["padding"] ctx.dilation, ctx.groups = ( expanded_kwargs["dilation"], expanded_kwargs["groups"], ) if isinstance(weight, ExpandedWeight): ctx.input = input ctx.weight = weight ctx.bias = expanded_kwargs["bias"] return output @staticmethod def backward(ctx, grad_output): return conv_backward(ctx.conv_fn, ctx, grad_output) ```
====================================================================================================================================== SOURCE CODE FILE: conv_utils.py LINES: 1 SIZE: 10.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\conv_utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import numpy as np import torch import torch.nn.functional as F from .expanded_weights_utils import ( set_grad_sample_if_exists, unpack_expanded_weight_or_tensor, ) THRESHOLD = 32 def conv_picker(func, conv1dOpt, conv2dOpt, conv3dOpt): if func == F.conv1d: return conv1dOpt if func == F.conv2d: return conv2dOpt else: assert func == F.conv3d return conv3dOpt def conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs): args = expanded_args_and_kwargs[: len(expanded_args_and_kwargs) - len(kwarg_names)] kwargs = expanded_args_and_kwargs[ len(expanded_args_and_kwargs) - len(kwarg_names) : ] kwargs = dict(zip(kwarg_names, kwargs)) return conv_normalizer(*args, **kwargs) def conv_normalizer( input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, ): return (input, weight), { "bias": bias, "stride": stride, "padding": padding, "dilation": dilation, "groups": groups, } def conv_input_for_string_padding(func, padding_style, input, dilation, kernel_size): if padding_style == "valid": return input else: padding = int_padding_for_string_padding( func, padding_style, dilation, kernel_size ) return F.pad(input, padding) def int_padding_for_string_padding(func, padding_style, dilation, kernel_size): def get_dilation(i): return dilation[i] if isinstance(dilation, tuple) else dilation if padding_style == "same": padding: list[int] = [] # F.pad needs the padding in reverse order from what conv expects for i in range(conv_picker(func, 0, 1, 2), -1, -1): padding += conv_padding_for_same(get_dilation(i), kernel_size[i]) return padding elif padding_style == "valid": return conv_picker(func, 2, 4, 6) * (0,) else: raise RuntimeError( f"got padding type of {padding_style}, only accept 'same' or 'valid'" ) def conv_padding_for_same(dilation, kernel_size): total_pad = dilation * (kernel_size - 1) left_pad = total_pad // 2 right_pad = total_pad - left_pad return left_pad, right_pad def conv_backward(func, ctx, grad_output): def weight_grad_sample(weight): if batch_size < THRESHOLD and groups == 1: return conv_group_weight_grad_sample( ctx.input, grad_output, weight_shape, stride, padding, dilation, batch_size, func, ) else: return conv_unfold_weight_grad_sample( ctx.input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func, ) def expand(param): if isinstance(param, int): return conv_picker(func, (param,), (param, param), (param, param, param)) else: return param def calc_total_padding(func, was_same, padding, dilation, kernel_size): if was_same: all_padding = int_padding_for_string_padding( func, "same", dilation, kernel_size ) # F.pad needs the padding in reverse order from what conv expects total_padding = tuple( all_padding[i] + all_padding[i - 1] for i in range(len(all_padding) - 1, -1, -2) ) return total_padding else: return tuple(2 * pad for pad in padding) weight_shape = ctx.weight.shape stride, padding, dilation, groups = ( expand(ctx.stride), expand(ctx.padding), expand(ctx.dilation), ctx.groups, ) kernel_size = [weight_shape[i] for i in range(2, conv_picker(func, 3, 4, 5))] batch_size = ctx.batch_size results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg names results.append(None) # for op reference # "same" padding may give uneven padding on either side so we need to separate the "padding" attr and total padding total_padding = calc_total_padding( func, ctx.was_same_padding, padding, dilation, kernel_size ) if ctx.input_required_grad: output_padding = [] input_dims = conv_picker(func, 1, 2, 3) for i in range(input_dims): input_dim = ctx.orig_input_shape[2 + i] output_padding.append( ( total_padding[i] + input_dim - (kernel_size[i] * dilation[i] - dilation[i] + 1) ) % stride[i] ) weight_ = unpack_expanded_weight_or_tensor(ctx.weight) transpose_func = conv_picker( func, F.conv_transpose1d, F.conv_transpose2d, F.conv_transpose3d ) out = transpose_func( grad_output, weight_, None, stride, padding, tuple(output_padding), groups, dilation, ) if ctx.was_same_padding: for i in range(len(total_padding)): out = torch.narrow( out, 2 + i, total_padding[i] // 2, ctx.orig_input_shape[2 + i] ) results.append(out) else: results.append(None) # weight and bias don't compute batched gradients; no other arguments are differentiable results = results + [None] * 6 # set grad_sample field for weight and bias with per sample gradients set_grad_sample_if_exists(ctx.weight, weight_grad_sample) set_grad_sample_if_exists( ctx.bias, lambda _: grad_output.reshape(*grad_output.shape[:2], -1).sum(dim=2) ) return tuple(results) def conv_unfold_weight_grad_sample( input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func, ): n = input.shape[0] in_channels = input.shape[1] unfold_func = conv_picker( func, lambda: F.unfold( input.unsqueeze(-2), kernel_size=(1, kernel_size[0]), dilation=(1, dilation[0]), padding=(0, padding[0]), stride=(1, stride[0]), ), lambda: F.unfold( input, kernel_size, dilation=dilation, padding=padding, stride=stride ), lambda: unfold3d(input, kernel_size, padding, stride, dilation), ) input = unfold_func() grad_output = grad_output.reshape(n, -1, input.shape[-1]) # n=batch_sz; o=num_out_channels; p=(num_in_channels/groups)*kernel_sz weight_grad_sample = torch.einsum("noq,npq->nop", grad_output, input) # rearrange the above tensor and extract diagonals. weight_grad_sample = weight_grad_sample.view( n, groups, -1, groups, int(in_channels / groups), np.prod(kernel_size), ) weight_grad_sample = torch.einsum( "ngrg...->ngr...", weight_grad_sample ).contiguous() shape = [n] + list(weight_shape) weight_grad_sample = weight_grad_sample.view(shape) return weight_grad_sample def conv_group_weight_grad_sample( input, grad_output, weight_shape, stride, padding, dilation, batch_size, func, ): I = input.shape[1] O = grad_output.shape[1] input_ = input.transpose(0, 1) grad_output_ = grad_output.view( grad_output.shape[0] * grad_output.shape[1], 1, *grad_output.shape[2:] ) weight_grad_sample = func( input_, grad_output_, None, stride=dilation, padding=padding, dilation=stride, groups=batch_size, ) input_dims = conv_picker(func, 3, 4, 5) for i in range(2, input_dims): weight_grad_sample = weight_grad_sample.narrow(i, 0, weight_shape[i]) weight_grad_sample = weight_grad_sample.view( I, batch_size, O, *weight_grad_sample.shape[2:] ) weight_grad_sample = weight_grad_sample.movedim(0, 2) return weight_grad_sample def unfold3d( tensor, kernel_size, padding, stride, dilation, ): r""" Extract sliding local blocks from an batched input tensor. :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors). This method implements the same action for 5D inputs Args: tensor: An input tensor of shape ``(B, C, D, H, W)``. kernel_size: the size of the sliding blocks padding: implicit zero padding to be added on both sides of input stride: the stride of the sliding blocks in the input spatial dimensions dilation: the spacing between the kernel points. Returns: A tensor of shape ``(B, C * np.prod(kernel_size), L)``, where L - output spatial dimensions. See :class:`torch.nn.Unfold` for more details Example: >>> # xdoctest: +SKIP >>> B, C, D, H, W = 3, 4, 5, 6, 7 >>> tensor = torch.arange(1, B * C * D * H * W + 1.).view(B, C, D, H, W) >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape torch.Size([3, 32, 120]) """ if len(tensor.shape) != 5: raise ValueError( f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}" ) if dilation != (1, 1, 1): raise NotImplementedError(f"dilation={dilation} not supported.") batch_size, channels, _, _, _ = tensor.shape # Input shape: (B, C, D, H, W) tensor = F.pad( tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]) ) # Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0]) tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0]) tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1]) tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2]) # Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2]) # For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold` tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7) # Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2]) tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose( 1, 2 ) # Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2] return tensor ```
====================================================================================================================================================== SOURCE CODE FILE: embedding_expanded_weights.py LINES: 1 SIZE: 2.86 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\embedding_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch import torch.nn.functional as F from .expanded_weights_impl import implements_per_sample_grads from .expanded_weights_utils import ( forward_helper, set_grad_sample_if_exists, standard_kwargs, ) @implements_per_sample_grads(F.embedding) class EmbeddingPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): expanded_args, expanded_kwargs = standard_kwargs( kwarg_names, expanded_args_and_kwargs ) if len(expanded_args[0].shape) == 1: raise RuntimeError( f"Expanded Weights needs an input with a batch size, got a 1D tensor, {expanded_args[0]}" ) output = forward_helper(F.embedding, expanded_args, expanded_kwargs) ctx.input, ctx.weight = expanded_args ctx.padding_idx, ctx.scale_grad_by_freq = ( expanded_kwargs["padding_idx"], expanded_kwargs["scale_grad_by_freq"], ) ctx.sparse = expanded_kwargs["sparse"] return output @staticmethod def backward(ctx, grad_output): input, weight = ctx.input, ctx.weight padding_idx, scale_grad_by_freq, sparse = ( ctx.padding_idx, ctx.scale_grad_by_freq, ctx.sparse, ) def weight_per_sample_grad(weight): batch_size = input.shape[0] embedding_dim = weight.shape[1] index = ( input.unsqueeze(-1) .expand(*input.shape, embedding_dim) .reshape(batch_size, -1, embedding_dim) ) grad_sample = torch.zeros( batch_size, *weight.shape, device=weight.device, dtype=grad_output.dtype ) return grad_sample.scatter_add_( 1, index, grad_output.reshape(batch_size, -1, embedding_dim) ) results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg names results.append(None) # for op reference if input.requires_grad: bw_fn = torch.ops.aten.embedding_backward results.append( bw_fn( grad_output, input, weight.shape[0], padding_idx, scale_grad_by_freq, sparse, ) ) else: results.append(None) # weight doesn't compute batched gradients; no other arguments are differentiable (2 not saved from forward) results = results + [None] * 6 # set grad_sample field for weight with per sample gradients set_grad_sample_if_exists(weight, weight_per_sample_grad) return tuple(results) ```
================================================================================================================================================= SOURCE CODE FILE: expanded_weights_impl.py LINES: 1 SIZE: 6.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\expanded_weights_impl.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import functools from contextlib import contextmanager from typing import Callable import torch from torch._decomp import decomposition_table from torch.utils._pytree import tree_map_only HANDLED_FUNCTIONS: dict[Callable, torch.autograd.Function] = {} aten = torch._ops.ops.aten # __torch_function__ runs before the pydispatcher so we need to manually use the same # decompositions indexed by their torch equivalent expanded_weights_rnn_decomps = { # func: (input_decomp, data_decomp) torch.rnn_relu: ( decomposition_table[aten.rnn_relu.input], decomposition_table[aten.rnn_relu.data], ), torch.rnn_tanh: ( decomposition_table[aten.rnn_tanh.input], decomposition_table[aten.rnn_tanh.data], ), torch.lstm: ( decomposition_table[aten.lstm.input], decomposition_table[aten.lstm.data], ), torch.gru: ( decomposition_table[aten.gru.input], decomposition_table[aten.gru.data], ), } # all of the RNN decomps run linear with the batch dimension second, even if batch_first was set @contextmanager def batch_second(args, kwargs): def set_batch_second(ew): ew.set_batch_first(False) def reset_batch_first(ew): ew.set_batch_first(True) tree_map_only(ExpandedWeight, set_batch_second, args) tree_map_only(ExpandedWeight, set_batch_second, kwargs) try: yield finally: tree_map_only(ExpandedWeight, reset_batch_first, args) tree_map_only(ExpandedWeight, reset_batch_first, kwargs) # to support packed sequences, we need to allow for smaller batches. Expanded weights represents the largest batch @contextmanager def allow_smaller_batches(args, kwargs): def allow(ew): ew.set_allow_smaller_batches(True) def reset(ew): ew.set_allow_smaller_batches(False) tree_map_only(ExpandedWeight, allow, args) tree_map_only(ExpandedWeight, allow, kwargs) try: yield finally: tree_map_only(ExpandedWeight, reset, args) tree_map_only(ExpandedWeight, reset, kwargs) @contextmanager def setup_rnn(use_input_variant, args, kwargs): with batch_second(args, kwargs) if use_input_variant else allow_smaller_batches( args, kwargs ): yield def implements_per_sample_grads(torch_function): @functools.wraps(torch_function) def decorator(autograd_func): HANDLED_FUNCTIONS[torch_function] = autograd_func return autograd_func return decorator # ExpandedWeight represents a weight (parameter) Tensor that has an expanded # batch dimension. Operations on the ExpandedWeight Tensor act exactly like # those without an expanded batch dimension but a call to .backward() populates # the original (unexpanded) tensor with per-sample-gradients for in the grad_sample field # # ExpandedWeight has a fallback that always fails since we cannot know what the batch # dimension of the input tensor is and therefore cannot know if this is a valid call # # This is a __torch_function__ object but it could have also been a Tensor Extension # with a dispatch key. # # Needs to be a tensor subclass to allow reparamaterization class ExpandedWeight(torch.Tensor): def __init__(self, orig_weight, batch_size, loss_reduction): self.batch_size = batch_size self.batch_first = True self.allow_smaller_batches = False self.orig_weight = orig_weight self.loss_reduction = loss_reduction handled_functions = HANDLED_FUNCTIONS def __new__(cls, orig_weight, batch_size, loss_reduction): if not isinstance(orig_weight, torch.Tensor): raise RuntimeError( f"Can only make Expanded Weights of Tensors, got {type(orig_weight).__name__}" ) if not orig_weight.requires_grad: raise RuntimeError( "Can only build ExpandedWeights objects of tensors that require_grad" ) ret = torch.Tensor._make_subclass(cls, orig_weight, True) return ret @classmethod def __torch_function__(cls, func, _, args=(), kwargs=None): if kwargs is None: kwargs = {} if func in expanded_weights_rnn_decomps: # in aten, choosing the input or data variants is done by parsing logic. This mimics some of that decomp_opts = expanded_weights_rnn_decomps[func] use_input_variant = isinstance( args[2], list ) # data variant uses a list here decomp = decomp_opts[0] if use_input_variant else decomp_opts[1] if decomp is not None: with setup_rnn(use_input_variant, args, kwargs): return decomp(*args, **kwargs) if func == torch._cudnn_rnn_flatten_weight: # since we aren't using the fused cuda kernels for RNNs, don't do this return if func in cls.handled_functions: return cls.handled_functions[func].apply( tuple(kwargs.keys()), func, *(args + tuple(kwargs.values())) ) # We cannot use a fallback here because we do not know the batch dimension for any regular tensor inputs, # i.e. torch.add(torch.Tensor, ExpandedWeight) raise RuntimeError( f"Expanded Weights encountered but cannot handle function {func.__name__}" ) @property def dtype(self): return self.orig_weight.dtype @property def data(self): return self.orig_weight.data @property def shape(self): return self.orig_weight.shape @property def device(self): return self.orig_weight.device @property def is_cuda(self): return self.orig_weight.is_cuda def data_ptr(self): return self.orig_weight.data_ptr() def get_device(self): return self.orig_weight.get_device() def set_allow_smaller_batches(self, is_allow_smaller_batches): self.allow_smaller_batches = is_allow_smaller_batches def set_batch_first(self, is_batch_first=True): self.batch_first = is_batch_first ```
================================================================================================================================================== SOURCE CODE FILE: expanded_weights_utils.py LINES: 1 SIZE: 7.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\expanded_weights_utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch from .expanded_weights_impl import ExpandedWeight def is_batch_first(expanded_args_and_kwargs): batch_first = None for arg in expanded_args_and_kwargs: if not isinstance(arg, ExpandedWeight): continue if not batch_first: batch_first = arg.batch_first elif arg.batch_first != batch_first: raise RuntimeError( "Got conflicting batch_first arguments in the same layer" ) return batch_first def standard_kwargs(kwarg_names, expanded_args): r"""Separate args and kwargs from `__torch_function__`s that standardize kwargs. Most `__torch_function__`s standardize the kwargs that they give, so this will separate the args and kwargs they pass. Functions that don't are linear and convND. """ kwarg_values = expanded_args[len(expanded_args) - len(kwarg_names) :] expanded_args_without_kwargs = expanded_args[ : len(expanded_args) - len(kwarg_names) ] expanded_kwargs = dict(zip(kwarg_names, kwarg_values)) return expanded_args_without_kwargs, expanded_kwargs def forward_helper(func, expanded_args, expanded_kwargs): r"""Compute the forward pass for a function that has expanded weight(s) passed to it. It will run the forward pass where all ExpandedWeights are their original weight. It runs checks on the given arguments and detaches the outputs. .. note:: First argument in :attr:`expanded_args` must be the input with the batch dimension as the first element of the shape .. note:: :attr:`func` must return a Tensor or tuple of Tensors Args: func: The function to be called expanded_args: Arguments to be passed to :attr:`func`. Will include arguments that need to be unpacked because they are ExpandedWeights expanded_kwargs: Keyword arguments to be passed to :attr:`func`. Similar to :attr:`expanded_args`. """ unexpanded_args, unexpanded_kwargs = _check_and_unexpand_args( func, expanded_args, expanded_kwargs ) return func(*unexpanded_args, **unexpanded_kwargs) def _check_and_unexpand_args(func, expanded_args, expanded_kwargs): # input must be the first argument passed input = expanded_args[0] if isinstance(input, ExpandedWeight): raise RuntimeError( "Expanded Weights do not support inputs that are also ExpandedWeights. " f"Input must be a Tensor, got {type(input).__name__} in function {func.__name__}" ) if not isinstance(input, torch.Tensor): raise RuntimeError( "Expanded Weights requires a Tensor as the first input to get the batch dimension, " f"got {type(input).__name__} in function {func.__name__}" ) if len(input.shape) == 0: raise RuntimeError( f"Expanded Weights requires a batch dimension but got an input of size 0 in function {func.__name__}" ) if input.shape[0] == 0: raise RuntimeError( "0 is not a valid batch size for Expanded Weights but got input tensor of " f"{input} in function {func.__name__}" ) for arg in expanded_args + tuple(expanded_kwargs.values()): if not isinstance(arg, ExpandedWeight): continue batch_size = input.shape[0] if arg.batch_first else input.shape[1] if (arg.allow_smaller_batches and batch_size > arg.batch_size) or ( not arg.allow_smaller_batches and arg.batch_size != batch_size ): raise RuntimeError( "Expected ExpandedWeights to have batch size matching input but got " f"input batch size of {batch_size} with ExpandedWeight of batch size {arg.batch_size}" ) loss_reduction: Optional[str] = None for arg in expanded_args + tuple(expanded_kwargs.values()): if isinstance(arg, ExpandedWeight): if loss_reduction is None: loss_reduction = arg.loss_reduction elif loss_reduction != arg.loss_reduction: raise RuntimeError( "Expected ExpandedWeights to all have the same loss_reduction argument but got one" f"with {loss_reduction} and one with {arg.loss_reduction}" ) unexpanded_args = tuple( arg.orig_weight if isinstance(arg, ExpandedWeight) else arg for arg in expanded_args ) unexpanded_kwargs = { name: arg.orig_weight if isinstance(arg, ExpandedWeight) else arg for (name, arg) in expanded_kwargs.items() } return unexpanded_args, unexpanded_kwargs def maybe_scale_by_batch_size(grad_sample, expanded_weight): if expanded_weight.loss_reduction == "mean": return grad_sample * expanded_weight.batch_size else: return grad_sample def set_grad_sample_if_exists(maybe_expanded_weight, per_sample_grad_fn): unpacked = unpack_expanded_weight_or_tensor(maybe_expanded_weight) if isinstance(maybe_expanded_weight, ExpandedWeight): grad_sample_contribution = maybe_scale_by_batch_size( per_sample_grad_fn(unpacked), maybe_expanded_weight ) if maybe_expanded_weight.batch_size > grad_sample_contribution.shape[0]: # this only passes the other checks if the arg allows smaller batch sizes intermediate = torch.zeros( maybe_expanded_weight.batch_size, *grad_sample_contribution.shape[1:], dtype=grad_sample_contribution.dtype, device=grad_sample_contribution.device, ) intermediate[: grad_sample_contribution.shape[0]] = grad_sample_contribution grad_sample_contribution = intermediate if hasattr(unpacked, "grad_sample") and unpacked.grad_sample is not None: unpacked.grad_sample = unpacked.grad_sample + grad_sample_contribution else: unpacked.grad_sample = grad_sample_contribution def unpack_expanded_weight_or_tensor(maybe_expanded_weight, func=lambda x: x): if isinstance(maybe_expanded_weight, ExpandedWeight): orig_weight = maybe_expanded_weight.orig_weight return func(orig_weight) elif ( isinstance(maybe_expanded_weight, torch.Tensor) and not maybe_expanded_weight.requires_grad ): return func(maybe_expanded_weight) elif isinstance(maybe_expanded_weight, torch.Tensor): raise RuntimeError( "ExpandedWeights currently does not support a mixture of ExpandedWeight parameters " "and normal Parameters. Please file and issue with pytorch/pytorch" ) def sum_over_all_but_batch_and_last_n( tensor: torch.Tensor, n_dims: int, ) -> torch.Tensor: r""" Calculate the sum over all dimensions, except the first (batch dimension), and excluding the last n_dims. This function will ignore the first dimension and it will not aggregate over the last n_dims dimensions. Args: tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``. n_dims: Number of dimensions to keep. Example: >>> tensor = torch.ones(1, 2, 3, 4, 5) >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape torch.Size([1, 4, 5]) Returns: A tensor of shape ``(B, ..., X[n_dims-1])`` """ if tensor.dim() == n_dims + 1: return tensor else: dims = list(range(1, tensor.dim() - n_dims)) return tensor.sum(dim=dims) ```
======================================================================================================================================================= SOURCE CODE FILE: group_norm_expanded_weights.py LINES: 1 SIZE: 3.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\group_norm_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import operator from functools import reduce from typing import Optional import torch import torch.nn.functional as F from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads from .expanded_weights_utils import ( forward_helper, set_grad_sample_if_exists, standard_kwargs, unpack_expanded_weight_or_tensor, ) @implements_per_sample_grads(F.group_norm) class GroupNormPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): expanded_args, expanded_kwargs = standard_kwargs( kwarg_names, expanded_args_and_kwargs ) input, num_groups = expanded_args N = input.shape[0] C = input.shape[1] HxW = reduce(operator.mul, input.shape[2:], 1) weight, bias, eps = ( expanded_kwargs["weight"], expanded_kwargs["bias"], expanded_kwargs["eps"], ) output, mean, rstd = forward_helper( torch.native_group_norm, (input, weight, bias, N, C, HxW, num_groups, eps), {}, ) ctx.input, ctx.num_groups = input, num_groups ctx.weight, ctx.eps = weight, eps ctx.mean, ctx.rstd = mean, rstd if isinstance(bias, ExpandedWeight): ctx.bias = bias if input.requires_grad and isinstance(weight, ExpandedWeight): ctx.weight = weight return output @staticmethod def backward(ctx, grad_output): input, num_groups = ctx.input, ctx.num_groups weight, bias, eps = ctx.weight, ctx.bias, ctx.eps mean, rstd = ctx.mean, ctx.rstd results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg names results.append(None) # for op reference if input.requires_grad: weight_c = unpack_expanded_weight_or_tensor( weight, lambda t: t.contiguous() ) input_c = input.contiguous() grad_output_c = ( grad_output.contiguous() if grad_output is not None else None ) N = input.shape[0] C = input.shape[1] HxW = 1 for s in input.shape[2:]: HxW *= s bw_fn = torch.ops.aten.native_group_norm_backward results.append( bw_fn( grad_output_c, input_c, mean, rstd, weight_c, N, C, HxW, num_groups, (True, False, False), )[0] ) else: results.append(None) # weight and bias don't compute batched gradients; no other arguments are differentiable results = results + [None] * 4 # set grad_sample field for weight and bias with per sample gradients if hasattr(ctx, "weight"): set_grad_sample_if_exists( weight, lambda _: torch.einsum( "ni...->ni", F.group_norm(input, num_groups, eps=eps) * grad_output ), ) if hasattr(ctx, "bias"): set_grad_sample_if_exists( bias, lambda _: torch.einsum("ni...->ni", grad_output) ) return tuple(results) ```
========================================================================================================================================================== SOURCE CODE FILE: instance_norm_expanded_weights.py LINES: 1 SIZE: 3.74 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\instance_norm_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from functools import partial from typing import Optional import torch import torch.nn.functional as F from .expanded_weights_impl import implements_per_sample_grads from .expanded_weights_utils import ( forward_helper, set_grad_sample_if_exists, standard_kwargs, unpack_expanded_weight_or_tensor, ) @implements_per_sample_grads(F.instance_norm) class InstanceNormPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): instance_norm = partial(torch.instance_norm, cudnn_enabled=True) expanded_args, expanded_kwargs = standard_kwargs( kwarg_names, expanded_args_and_kwargs ) output = forward_helper(instance_norm, expanded_args, expanded_kwargs) ctx.input = expanded_args[0] ctx.running_mean, ctx.running_var = ( expanded_kwargs["running_mean"], expanded_kwargs["running_var"], ) ctx.weight, ctx.bias, ctx.eps = ( expanded_kwargs["weight"], expanded_kwargs["bias"], expanded_kwargs["eps"], ) return output @staticmethod def backward(ctx, grad_output): input, running_mean, running_var = ctx.input, ctx.running_mean, ctx.running_var weight, bias, eps = ctx.weight, ctx.bias, ctx.eps results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg names results.append(None) # for op reference if input.requires_grad: b = input.shape[0] c = input.shape[1] new_shape = (1, b * c, *input.shape[2:]) weight_ = unpack_expanded_weight_or_tensor( weight, lambda orig_weight: orig_weight.repeat(b) ) running_mean_ = running_mean.repeat(b) if running_mean is not None else None running_var_ = running_var.repeat(b) if running_var is not None else None input_reshaped = input.contiguous().view(new_shape) grad_output_reshaped = grad_output.contiguous().view(new_shape) mean = torch.mean( input_reshaped, (0,) + tuple(range(2, input.dim())), False ) var = torch.var( input_reshaped, (0,) + tuple(range(2, input.dim())), keepdim=False, unbiased=False, ) rstd = 1 / torch.sqrt(var + eps) # must use native batch norm since it supports all inputs. This may have used cuda or openmi during the forward but # it didn't save the metadata, so we don't know during the backward res = torch.ops.aten.native_batch_norm_backward( grad_output_reshaped, input_reshaped, weight_, running_mean_, running_var_, mean, rstd, True, eps, (True, False, False), ) results.append(res[0].reshape(input.shape)) else: results.append(None) # weight and bias don't compute batched gradients; no other arguments are differentiable (2 are not saved from the forward) results = results + [None] * 7 # set grad_sample field for weight and bias with per sample gradients set_grad_sample_if_exists( weight, lambda _: torch.einsum( "ni...->ni", F.instance_norm(input, eps=eps) * grad_output ), ) set_grad_sample_if_exists( bias, lambda _: torch.einsum("ni...->ni", grad_output) ) return tuple(results) ```
======================================================================================================================================================= SOURCE CODE FILE: layer_norm_expanded_weights.py LINES: 1 SIZE: 3.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\layer_norm_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch import torch.nn.functional as F from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads from .expanded_weights_utils import ( forward_helper, set_grad_sample_if_exists, standard_kwargs, sum_over_all_but_batch_and_last_n, unpack_expanded_weight_or_tensor, ) @implements_per_sample_grads(F.layer_norm) class LayerNormPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): expanded_args, expanded_kwargs = standard_kwargs( kwarg_names, expanded_args_and_kwargs ) input = expanded_args[0] normalized_shape = expanded_args[1] if len(input.shape) <= len(normalized_shape): raise RuntimeError( "Expanded Weights: Layer norm should not normalize over batch dimension for per sample gradient" f"computations but got that normalized shape, {normalized_shape}, matched input shape." ) output, mean, rstd = forward_helper( torch.native_layer_norm, expanded_args, expanded_kwargs ) ctx.args = expanded_args if input.requires_grad or isinstance(expanded_kwargs["weight"], ExpandedWeight): ctx.weight = expanded_kwargs["weight"] if input.requires_grad or isinstance(expanded_kwargs["bias"], ExpandedWeight): ctx.bias = expanded_kwargs["bias"] ctx.eps = expanded_kwargs["eps"] ctx.mean, ctx.rstd = mean, rstd return output @staticmethod def backward(ctx, grad_output): def weight_per_sample_grad(weight): return sum_over_all_but_batch_and_last_n( F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output, weight.dim(), ) input, normalized_shape = ctx.args mean, rstd = ctx.mean, ctx.rstd results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg names results.append(None) # for op reference if input.requires_grad: weight_ = unpack_expanded_weight_or_tensor(ctx.weight) bias_ = unpack_expanded_weight_or_tensor(ctx.bias) results.append( torch.ops.aten.native_layer_norm_backward( grad_output, input, normalized_shape, mean, rstd, weight_, bias_, (True, False, False), )[0] ) else: results.append(None) # weight and bias don't compute batched gradients; no other arguments are differentiable results = results + [None] * 4 # set grad_sample field for weight and bias with per sample gradients if hasattr(ctx, "weight"): set_grad_sample_if_exists(ctx.weight, weight_per_sample_grad) if hasattr(ctx, "bias"): set_grad_sample_if_exists( ctx.bias, lambda bias: sum_over_all_but_batch_and_last_n(grad_output, bias.dim()), ) return tuple(results) ```
=================================================================================================================================================== SOURCE CODE FILE: linear_expanded_weights.py LINES: 1 SIZE: 2.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_expanded_weights\linear_expanded_weights.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch import torch.nn.functional as F from .expanded_weights_impl import implements_per_sample_grads from .expanded_weights_utils import ( forward_helper, is_batch_first, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor, ) @implements_per_sample_grads(F.linear) class LinearPerSampleGrad(torch.autograd.Function): @staticmethod def forward(ctx, _, __, *expanded_args_and_kwargs): if len(expanded_args_and_kwargs[0].shape) <= 1: raise RuntimeError( "Input does not have a batch dimension. Expanded Weights expected input " f"of at least rank 2, got of rank {len(expanded_args_and_kwargs[0].shape)}" ) expanded_kwargs = { "bias": expanded_args_and_kwargs[2] if len(expanded_args_and_kwargs) == 3 else None } expanded_args = expanded_args_and_kwargs[:2] ctx.batch_first = is_batch_first(expanded_args_and_kwargs) output = forward_helper(F.linear, expanded_args, expanded_kwargs) ctx.args = expanded_args ctx.kwargs = expanded_kwargs return output @staticmethod def backward(ctx, grad_output): input, weight = ctx.args bias = ctx.kwargs["bias"] results: list[Optional[torch.Tensor]] = [] results.append(None) # for kwarg_names results.append(None) # for op reference if input.requires_grad: results.append(grad_output.matmul(unpack_expanded_weight_or_tensor(weight))) else: results.append(None) results.extend([None] * 2) # weight and bias don't compute batched gradients if not ctx.batch_first: grad_output = grad_output.transpose(0, 1) input = input.transpose(0, 1) # weight and bias get their grad_sample fields set directly if they exist set_grad_sample_if_exists( weight, lambda _: torch.einsum("n...i,n...j->nij", grad_output, input) ) set_grad_sample_if_exists( bias, lambda _: torch.einsum("n...k->nk", grad_output) ) return tuple(results) ```
================================================================================================================================ SOURCE CODE FILE: _named_member_accessor.py LINES: 1 SIZE: 14.19 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_named_member_accessor.py ENCODING: utf-8 ```py # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Iterable import torch _MISSING: torch.Tensor = object() # type: ignore[assignment] def set_tensor(module: "torch.nn.Module", name: str, tensor: torch.Tensor) -> None: if not isinstance(module, torch.nn.Module): raise TypeError(f"{module} is not an instance of torch.nn.Module") if not isinstance(tensor, torch.Tensor) and tensor is not None: raise TypeError(f"{tensor} is not an instance of torch.Tensor") if "." in name: raise KeyError('tensor name can\'t contain "."') if name == "": raise KeyError('tensor name can\'t be empty string ""') if name in module._parameters: module._parameters[name] = tensor # type: ignore[assignment] elif name in module._buffers: module._buffers[name] = tensor else: setattr(module, name, tensor) def swap_tensor( module: "torch.nn.Module", name: str, tensor: torch.Tensor, allow_missing: bool = False, ) -> torch.Tensor: if not isinstance(module, torch.nn.Module): raise TypeError(f"{module} is not an instance of torch.nn.Module") if ( tensor is not _MISSING and not isinstance(tensor, torch.Tensor) and tensor is not None ): raise TypeError(f"{tensor} is not an instance of torch.Tensor") if "." in name: raise KeyError('tensor name can\'t contain "."') if name == "": raise KeyError('tensor name can\'t be empty string ""') orig_tensor: torch.Tensor if name in module._parameters: orig_tensor = module._parameters[name] # type: ignore[assignment] if tensor is not _MISSING: module._parameters[name] = tensor # type: ignore[assignment] else: del module._parameters[name] elif name in module._buffers: orig_tensor = module._buffers[name] # type: ignore[assignment] if tensor is not _MISSING: module._buffers[name] = tensor else: del module._buffers[name] else: if hasattr(module, name): orig_tensor = getattr(module, name) else: if not allow_missing: raise AttributeError(f"{module._get_name()} has no attribute `{name}`") orig_tensor = _MISSING if ( orig_tensor is not _MISSING and not isinstance(orig_tensor, torch.Tensor) and orig_tensor is not None ): raise TypeError( f"attribute `{name}`: {orig_tensor} is not an instance of torch.Tensor" ) if tensor is not _MISSING: setattr(module, name, tensor) elif hasattr(module, name): delattr(module, name) return orig_tensor def swap_submodule( module: "torch.nn.Module", name: str, submodule: "torch.nn.Module", ) -> "torch.nn.Module": if not isinstance(module, torch.nn.Module): raise TypeError(f"{module} is not an instance of torch.nn.Module") if not isinstance(submodule, torch.nn.Module): raise TypeError(f"{submodule} is not an instance of torch.nn.Module") if "." in name: raise KeyError('submodule name can\'t contain "."') if name == "": raise KeyError('submodule name can\'t be empty string ""') if name not in module._modules: raise KeyError(f"submodule {name} does not exist") orig_submodule = module._modules[name] if not isinstance(orig_submodule, torch.nn.Module): raise TypeError(f"{name} attribute is not an instance of torch.nn.Module") module._modules[name] = submodule return orig_submodule class NamedMemberAccessor: """ A class that provides a way to access the submodules and parameters/buffers of a module. It provides caching mechanism to speed up submodule lookups. This is useful for functional programming to manipulate the module state. """ def __init__(self, module: "torch.nn.Module") -> None: self.module = module self.memo: dict[str, torch.nn.Module] = {} # Nested attribute access def get_submodule(self, name: str) -> "torch.nn.Module": """ Return the submodule specified by the given path. For example, to get the submodule mod.layer1.conv1, use accessor.get_submodule("layer1.conv1") Compare to mod.get_submodule("layer1.conv1"), this method will cache the intermediate submodule access to speed up future lookups. """ if not name: return self.module if name in self.memo: return self.memo[name] else: prefix, dot, attr = name.rpartition(".") if dot: module = self.get_submodule(prefix) else: module = self.module try: submodule = getattr(module, attr) except AttributeError as ex: raise AttributeError( f"{module._get_name()} has no attribute `{attr}`" ) from ex if not isinstance(submodule, torch.nn.Module): raise TypeError( # noqa: B904 f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module" ) self.memo[name] = submodule return submodule def swap_submodule(self, path: str, value: "torch.nn.Module") -> "torch.nn.Module": """ Swap the submodule specified by the given ``path`` to ``value``. For example, to swap the attribute mod.layer1.conv1 use ``accessor.swap_submodule("layer1.conv1", conv2)``. """ prefix, _, attr = path.rpartition(".") return swap_submodule(self.get_submodule(prefix), attr, value) def get_tensor(self, name: str) -> torch.Tensor: """ Get the tensor specified by the given path to value. For example, to get the attribute mod.layer1.conv1.weight, use accessor.get_tensor('layer1.conv1.weight') Compare to mod.get_parameter("layer1.conv1.weight"), this method will cache the intermediate submodule access to speed up future lookups. """ prefix, _, attr = name.rpartition(".") submodule = self.get_submodule(prefix) try: tensor = getattr(submodule, attr) except AttributeError as ex: raise AttributeError( f"{submodule._get_name()} has no attribute `{name}`" ) from ex if not isinstance(tensor, torch.Tensor) and tensor is not None: raise TypeError(f"{tensor} is not an instance of torch.Tensor") return tensor # type: ignore[return-value] def set_tensor(self, name: str, value: torch.Tensor) -> None: """ Set the attribute specified by the given path to value. For example, to set the attribute mod.layer1.conv1.weight, use accessor.set_tensor("layer1.conv1.weight", value) """ prefix, _, attr = name.rpartition(".") set_tensor(self.get_submodule(prefix), attr, value) def del_tensor(self, name: str) -> None: """ Delete the attribute specified by the given path. For example, to delete the attribute mod.layer1.conv1.weight, use accessor.del_tensor("layer1.conv1.weight") """ prefix, _, attr = name.rpartition(".") submodule = self.get_submodule(prefix) try: delattr(submodule, attr) except AttributeError as ex: raise AttributeError( f"{submodule._get_name()} has no attribute `{name}`" ) from ex def swap_tensor( self, name: str, value: torch.Tensor, allow_missing: bool = False ) -> torch.Tensor: """ Swap the attribute specified by the given path to value. For example, to swap the attribute mod.layer1.conv1.weight, use accessor.swap_tensor("layer1.conv1.weight", value) """ prefix, _, attr = name.rpartition(".") return swap_tensor( self.get_submodule(prefix), attr, value, allow_missing=allow_missing ) # Batched operations def get_tensors(self, names: Iterable[str]) -> list[torch.Tensor]: """ Get the tensors specified by the given paths. For example, to get the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.get_tensors(["layer1.conv1.weight", "layer1.conv1.bias"]) """ return [self.get_tensor(name) for name in names] def set_tensors(self, names: Iterable[str], values: Iterable[torch.Tensor]) -> None: """ Set the attributes specified by the given paths to values. For example, to set the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.set_tensors(["layer1.conv1.weight", "layer1.conv1.bias"], [weight, bias]) """ if not isinstance(names, (list, tuple)): names = list(names) if not isinstance(values, (list, tuple)): values = list(values) assert len(names) == len(values), "names and values must have the same length" for name, value in zip(names, values): self.set_tensor(name, value) def set_tensors_dict(self, named_tensors: dict[str, torch.Tensor]) -> None: """ Set the attributes specified by the given paths to values. For example, to set the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.set_tensors_dict({ "layer1.conv1.weight": weight, "layer1.conv1.bias": bias, }) """ for name, value in named_tensors.items(): self.set_tensor(name, value) def del_tensors(self, names: Iterable[str]) -> None: """ Delete the attributes specified by the given paths. For example, to delete the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.del_tensors(["layer1.conv1.weight", "layer1.conv1.bias"]) """ for name in names: self.del_tensor(name) def swap_tensors( self, names: Iterable[str], values: Iterable[torch.Tensor], allow_missing: bool = False, ) -> list[torch.Tensor]: """ Swap the attributes specified by the given paths to values. For example, to swap the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.swap_tensors(["layer1.conv1.weight", "layer1.conv1.bias"], [weight, bias]) """ if not isinstance(names, (list, tuple)): names = list(names) if not isinstance(values, (list, tuple)): values = list(values) assert len(names) == len(values), "names and values must have the same length" return [ self.swap_tensor(name, value, allow_missing=allow_missing) for name, value in zip(names, values) ] def swap_tensors_dict( self, named_tensors: dict[str, torch.Tensor], allow_missing: bool = False ) -> tuple[dict[str, torch.Tensor], list[str]]: """ Swap the attributes specified by the given paths to values. For example, to swap the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.swap_tensors_dict({ "layer1.conv1.weight": weight, "layer1.conv1.bias": bias, }) """ orig_named_tensors = {} missing_keys = [] try: for name, tensor in named_tensors.items(): orig_tensor = self.swap_tensor(name, tensor, allow_missing=True) if orig_tensor is _MISSING: missing_keys.append(name) orig_named_tensors[name] = orig_tensor except Exception: # Swap back if any exception occurs for name, orig_tensor in orig_named_tensors.items(): self.swap_tensor(name, orig_tensor, allow_missing=True) raise if missing_keys and not allow_missing: # Swap back if any key is missing when allow_missing is False for name, orig_tensor in orig_named_tensors.items(): self.swap_tensor(name, orig_tensor, allow_missing=True) raise RuntimeError(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") return orig_named_tensors, missing_keys def check_keys(self, keys: Iterable[str]) -> tuple[list[str], list[str]]: """Check that the given keys are valid.""" keys = set(keys) valid_keys = {name for name, _ in self.named_tensors(remove_duplicate=False)} missing_keys = valid_keys - keys unexpected_keys = keys - valid_keys return sorted(missing_keys), sorted(unexpected_keys) # Shortcut methods def named_parameters( self, remove_duplicate: bool = True, ) -> Iterable[tuple[str, torch.Tensor]]: """Iterate over all the parameters in the module.""" yield from self.module.named_parameters(remove_duplicate=remove_duplicate) def named_buffers( self, remove_duplicate: bool = True, ) -> Iterable[tuple[str, torch.Tensor]]: """Iterate over all the buffers in the module.""" yield from self.module.named_buffers(remove_duplicate=remove_duplicate) def named_tensors( self, remove_duplicate: bool = True, ) -> Iterable[tuple[str, torch.Tensor]]: """Iterate over all the tensors in the module.""" yield from self.module.named_parameters(remove_duplicate=remove_duplicate) yield from self.module.named_buffers(remove_duplicate=remove_duplicate) def named_modules( self, remove_duplicate: bool = True, ) -> Iterable[tuple[str, "torch.nn.Module"]]: """Iterate over all the modules in the module.""" yield from self.module.named_modules(remove_duplicate=remove_duplicate) ```
========================================================================================================================== SOURCE CODE FILE: _per_sample_grad.py LINES: 1 SIZE: 5.73 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\_per_sample_grad.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import functools import torch from torch.nn.utils._expanded_weights.expanded_weights_impl import ExpandedWeight from torch.utils import _pytree as pytree # dependency on `functional_call` means that this can't be exposed in utils # without creating circular dependency def call_for_per_sample_grads( module, *, batch_size=None, loss_reduction="sum", batch_first=True, ): r""" Return a forward function for a module, populating grad_sample with per sample gradients on backward invocation. Args: module: The ``nn.Module`` to get per sample gradients with respect to. All trainable parameters will compute per sample gradients, located in a ``grad_sample`` field when ``backward`` is invoked batch_size: The batch size of the input. If None is passed, all tensor arguments in args and kwargs must have the same batch size, which is the size of the first dimension. Otherwise, it must be passed manually. Default: None loss_reduction: Indicates if the loss reduction (for aggregating the gradients) is a sum or a mean operation. If "mean", per sample gradients will be scaled by the batch size to offset the crossbatch interaction from running mean across a batch. Must be "mean" or "sum". Default: "sum" batch_first: Indicates if the batch dimension is the first dimension. If True, the batch dimension is the first dimension. If False, it's the second dimension. Default: True. Examples:: >>> # xdoctest: +SKIP >>> model = nn.Linear(4, 3) >>> batched_input = torch.randn(5, 4) # batch size of 5 >>> res = call_for_per_sample_grads(model)(batched_input).sum() >>> res.backward() >>> assert model.weight.shape == (3, 4) >>> assert model.weight.grad_sample.shape == (5, 3, 4) >>> assert model.weight.grad is None >>> assert model.bias.shape == (3,) >>> assert model.bias.grad_sample.shape == (5, 3) >>> assert model.bias.grad is None An example using "mean" loss reduction. The grad_sample fields will be scaled by batch_size from what they would be if we ran the same code with loss_reduction="sum". This is because the mean at the end will scale all grad_outputs by 1 / batch_size from cross batch interaction. >>> model = nn.Linear(4, 3) >>> batched_input = torch.randn(5, 4) # batch size of 5 >>> res = call_for_per_sample_grads(model, 5, loss_reduction="mean")(batched_input).mean() >>> res.backward() Note:: Does not work with any `nn.RNN`, including `nn.GRU` or `nn.LSTM`. Please use custom rewrites that wrap an `nn.Linear` module. See Opacus for an example """ def maybe_build_expanded_weight(og_tensor, batch_size): if og_tensor.requires_grad: return ExpandedWeight(og_tensor, batch_size, loss_reduction) else: return og_tensor def compute_batch_size(*args, **kwargs): args_and_kwargs = pytree.arg_tree_leaves(*args, **kwargs) batch_size = None for arg in args_and_kwargs: if not isinstance(arg, torch.Tensor): continue arg_batch_size = arg.shape[0] if batch_first else arg.shape[1] if batch_size is not None and batch_size != arg_batch_size: raise RuntimeError( "When computing batch size, found at least one input with batch size " f"{batch_size} and one with batch size {arg_batch_size}. Please specify it " "explicitly using the batch size kwarg in call_for_per_sample_grads" ) batch_size = arg_batch_size if batch_size is None: raise RuntimeError( "Unable to find a tensor in the passed args and kwargs. They may not be pytree-able " "and so ExpandedWeights cannot compute the batch size from the inputs. Please specify " "it explicitly" ) return batch_size if loss_reduction not in ["sum", "mean"]: raise RuntimeError( f"Expected loss_reduction argument to be sum or mean, got {loss_reduction}" ) if not isinstance(module, torch.nn.Module): raise RuntimeError( f"Module passed must be nn.Module, got {type(module).__name__}" ) if not (batch_size is None or isinstance(batch_size, int)): raise RuntimeError( f"Batch size passed must be None or an integer, got {type(batch_size).__name__}" ) if batch_size is not None and batch_size < 1: raise RuntimeError(f"Batch size must be positive, got {batch_size}") for weight in module.parameters(): if hasattr(weight, "grad_sample") and weight.grad_sample is not None: # type: ignore[attr-defined] raise RuntimeError( "Current Expanded Weights accumulates the gradients, which will be incorrect for multiple " f"calls without clearing gradients. Please clear out the grad_sample parameter of {weight} or " "post an issue to pytorch/pytorch to prioritize correct behavior" ) @functools.wraps(module.forward) def wrapper(*args, **kwargs): wrapper_batch_size = batch_size if wrapper_batch_size is None: wrapper_batch_size = compute_batch_size(*args, **kwargs) params = { name: maybe_build_expanded_weight(value, wrapper_batch_size) for (name, value) in module.named_parameters() } return torch.func.functional_call(module, params, args, kwargs) return wrapper ```
=================================================================================================================== SOURCE CODE FILE: clip_grad.py LINES: 1 SIZE: 10.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\clip_grad.py ENCODING: utf-8 ```py # mypy: allow-untyped-decorators # mypy: allow-untyped-defs import functools import typing from typing import cast, Optional, Union from typing_extensions import deprecated import torch from torch import Tensor from torch.utils._foreach_utils import ( _device_has_foreach_support, _group_tensors_by_device_and_dtype, _has_foreach_support, ) __all__ = [ "clip_grad_norm_", "clip_grad_norm", "clip_grad_value_", ] _tensor_or_tensors = Union[ torch.Tensor, typing.Iterable[torch.Tensor], # noqa: UP006 - needed until XLA's patch is updated ] def _no_grad(func): """ This wrapper is needed to avoid a circular import when using @torch.no_grad on the exposed functions clip_grad_norm_ and clip_grad_value_ themselves. """ def _no_grad_wrapper(*args, **kwargs): with torch.no_grad(): return func(*args, **kwargs) functools.update_wrapper(_no_grad_wrapper, func) return _no_grad_wrapper @_no_grad def _get_total_norm( tensors: _tensor_or_tensors, norm_type: float = 2.0, error_if_nonfinite: bool = False, foreach: Optional[bool] = None, ) -> torch.Tensor: r"""Compute the norm of an iterable of tensors. The norm is computed over the norms of the individual tensors, as if the norms of the individual tensors were concatenated into a single vector. Args: tensors (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will be normalized norm_type (float): type of the used p-norm. Can be ``'inf'`` for infinity norm. error_if_nonfinite (bool): if True, an error is thrown if the total norm of :attr:`tensors` is ``nan``, ``inf``, or ``-inf``. Default: ``False`` foreach (bool): use the faster foreach-based implementation. If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently fall back to the slow implementation for other device types. Default: ``None`` Returns: Total norm of the tensors (viewed as a single vector). """ if isinstance(tensors, torch.Tensor): tensors = [tensors] else: tensors = list(tensors) norm_type = float(norm_type) if len(tensors) == 0: return torch.tensor(0.0) first_device = tensors[0].device grouped_tensors: dict[ tuple[torch.device, torch.dtype], tuple[list[list[Tensor]], list[int]] ] = _group_tensors_by_device_and_dtype( [tensors] # type: ignore[list-item] ) # type: ignore[assignment] norms: list[Tensor] = [] for (device, _), ([device_tensors], _) in grouped_tensors.items(): if (foreach is None and _has_foreach_support(device_tensors, device)) or ( foreach and _device_has_foreach_support(device) ): norms.extend(torch._foreach_norm(device_tensors, norm_type)) elif foreach: raise RuntimeError( f"foreach=True was passed, but can't use the foreach API on {device.type} tensors" ) else: norms.extend( [torch.linalg.vector_norm(g, norm_type) for g in device_tensors] ) total_norm = torch.linalg.vector_norm( torch.stack([norm.to(first_device) for norm in norms]), norm_type ) if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()): raise RuntimeError( f"The total norm of order {norm_type} for gradients from " "`parameters` is non-finite, so it cannot be clipped. To disable " "this error and scale the gradients by the non-finite norm anyway, " "set `error_if_nonfinite=False`" ) return total_norm @_no_grad def _clip_grads_with_norm_( parameters: _tensor_or_tensors, max_norm: float, total_norm: torch.Tensor, foreach: Optional[bool] = None, ) -> None: r"""Scale the gradients of an iterable of parameters given a pre-calculated total norm and desired max norm. The gradients will be scaled by the following calculation .. math:: grad = grad * \frac{max\_norm}{total\_norm + 1e-6} Gradients are modified in-place. This function is equivalent to :func:`torch.nn.utils.clip_grad_norm_` with a pre-calculated total norm. Args: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float): max norm of the gradients total_norm (Tensor): total norm of the gradients to use for clipping foreach (bool): use the faster foreach-based implementation. If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently fall back to the slow implementation for other device types. Default: ``None`` Returns: None """ if isinstance(parameters, torch.Tensor): parameters = [parameters] grads = [p.grad for p in parameters if p.grad is not None] max_norm = float(max_norm) if len(grads) == 0: return grouped_grads: dict[ tuple[torch.device, torch.dtype], tuple[list[list[Tensor]], list[int]] ] = _group_tensors_by_device_and_dtype( [grads] ) # type: ignore[assignment] clip_coef = max_norm / (total_norm + 1e-6) # Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so # avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization # when the gradients do not reside in CPU memory. clip_coef_clamped = torch.clamp(clip_coef, max=1.0) for (device, _), ([device_grads], _) in grouped_grads.items(): if (foreach is None and _has_foreach_support(device_grads, device)) or ( foreach and _device_has_foreach_support(device) ): torch._foreach_mul_(device_grads, clip_coef_clamped.to(device)) elif foreach: raise RuntimeError( f"foreach=True was passed, but can't use the foreach API on {device.type} tensors" ) else: clip_coef_clamped_device = clip_coef_clamped.to(device) for g in device_grads: g.mul_(clip_coef_clamped_device) @_no_grad def clip_grad_norm_( parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0, error_if_nonfinite: bool = False, foreach: Optional[bool] = None, ) -> torch.Tensor: r"""Clip the gradient norm of an iterable of parameters. The norm is computed over the norms of the individual gradients of all parameters, as if the norms of the individual gradients were concatenated into a single vector. Gradients are modified in-place. This function is equivalent to :func:`torch.nn.utils.get_total_norm` followed by :func:`torch.nn.utils.clip_grads_with_norm_` with the ``total_norm`` returned by ``get_total_norm``. Args: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float): max norm of the gradients norm_type (float): type of the used p-norm. Can be ``'inf'`` for infinity norm. error_if_nonfinite (bool): if True, an error is thrown if the total norm of the gradients from :attr:`parameters` is ``nan``, ``inf``, or ``-inf``. Default: False (will switch to True in the future) foreach (bool): use the faster foreach-based implementation. If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently fall back to the slow implementation for other device types. Default: ``None`` Returns: Total norm of the parameter gradients (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] else: # prevent generators from being exhausted parameters = list(parameters) grads = [p.grad for p in parameters if p.grad is not None] total_norm = _get_total_norm(grads, norm_type, error_if_nonfinite, foreach) _clip_grads_with_norm_(parameters, max_norm, total_norm, foreach) return total_norm @deprecated( "`torch.nn.utils.clip_grad_norm` is now deprecated " "in favor of `torch.nn.utils.clip_grad_norm_`.", category=FutureWarning, ) def clip_grad_norm( parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0, error_if_nonfinite: bool = False, foreach: Optional[bool] = None, ) -> torch.Tensor: r"""Clip the gradient norm of an iterable of parameters. .. warning:: This method is now deprecated in favor of :func:`torch.nn.utils.clip_grad_norm_`. """ return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach) @_no_grad def clip_grad_value_( parameters: _tensor_or_tensors, clip_value: float, foreach: Optional[bool] = None, ) -> None: r"""Clip the gradients of an iterable of parameters at specified value. Gradients are modified in-place. Args: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized clip_value (float): maximum allowed value of the gradients. The gradients are clipped in the range :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]` foreach (bool): use the faster foreach-based implementation If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently fall back to the slow implementation for other device types. Default: ``None`` """ if isinstance(parameters, torch.Tensor): parameters = [parameters] clip_value = float(clip_value) grads = [p.grad for p in parameters if p.grad is not None] grouped_grads = _group_tensors_by_device_and_dtype([grads]) for (device, _), ([grads], _) in grouped_grads.items(): if ( foreach is None and _has_foreach_support(cast(list[Tensor], grads), device=device) ) or (foreach and _device_has_foreach_support(device)): torch._foreach_clamp_min_(cast(list[Tensor], grads), -clip_value) torch._foreach_clamp_max_(cast(list[Tensor], grads), clip_value) elif foreach: raise RuntimeError( f"foreach=True was passed, but can't use the foreach API on {device.type} tensors" ) else: for grad in grads: cast(Tensor, grad).clamp_(min=-clip_value, max=clip_value) ```
============================================================================================================================ SOURCE CODE FILE: convert_parameters.py LINES: 1 SIZE: 3.26 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\convert_parameters.py ENCODING: utf-8 ```py from collections.abc import Iterable from typing import Optional import torch def parameters_to_vector(parameters: Iterable[torch.Tensor]) -> torch.Tensor: r"""Flatten an iterable of parameters into a single vector. Args: parameters (Iterable[Tensor]): an iterable of Tensors that are the parameters of a model. Returns: The parameters represented by a single vector """ # Flag for the device where the parameter is located param_device = None vec = [] for param in parameters: # Ensure the parameters are located in the same device param_device = _check_param_device(param, param_device) vec.append(param.view(-1)) return torch.cat(vec) def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) -> None: r"""Copy slices of a vector into an iterable of parameters. Args: vec (Tensor): a single vector representing the parameters of a model. parameters (Iterable[Tensor]): an iterable of Tensors that are the parameters of a model. """ # Ensure vec of type Tensor if not isinstance(vec, torch.Tensor): raise TypeError(f"expected torch.Tensor, but got: {torch.typename(vec)}") # Flag for the device where the parameter is located param_device = None # Pointer for slicing the vector for each parameter pointer = 0 for param in parameters: # Ensure the parameters are located in the same device param_device = _check_param_device(param, param_device) # The length of the parameter num_param = param.numel() # Slice the vector, reshape it, and replace the old data of the parameter param.data = vec[pointer : pointer + num_param].view_as(param).data # Increment the pointer pointer += num_param def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int: r"""Check if the parameters are located on the same device. Currently, the conversion between model parameters and single vector form is not supported for multiple allocations, e.g. parameters in different GPUs/PrivateUse1s, or mixture of CPU/GPU/PrivateUse1. Args: param ([Tensor]): a Tensor of a parameter of a model old_param_device (int): the device where the first parameter of a model is allocated. Returns: old_param_device (int): report device for the first time """ # Meet the first parameter support_device_types = ["cuda", torch._C._get_privateuse1_backend_name()] if old_param_device is None: old_param_device = ( param.get_device() if param.device.type in support_device_types else -1 ) else: warn = False if ( param.device.type in support_device_types ): # Check if in same GPU/PrivateUse1 warn = param.get_device() != old_param_device else: # Check if in CPU warn = old_param_device != -1 if warn: raise TypeError( "Found two parameters on different devices, " "this is currently not supported." ) return old_param_device ```
================================================================================================================ SOURCE CODE FILE: fusion.py LINES: 1 SIZE: 6.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\fusion.py ENCODING: utf-8 ```py from __future__ import annotations import copy from typing import TypeVar import torch __all__ = [ "fuse_conv_bn_eval", "fuse_conv_bn_weights", "fuse_linear_bn_eval", "fuse_linear_bn_weights", ] ConvT = TypeVar("ConvT", bound="torch.nn.modules.conv._ConvNd") LinearT = TypeVar("LinearT", bound="torch.nn.Linear") def fuse_conv_bn_eval( conv: ConvT, bn: torch.nn.modules.batchnorm._BatchNorm, transpose: bool = False, ) -> ConvT: r"""Fuse a convolutional module and a BatchNorm module into a single, new convolutional module. Args: conv (torch.nn.modules.conv._ConvNd): A convolutional module. bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. transpose (bool, optional): If True, transpose the convolutional weight. Defaults to False. Returns: torch.nn.modules.conv._ConvNd: The fused convolutional module. .. note:: Both ``conv`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. """ assert not (conv.training or bn.training), "Fusion only for eval!" fused_conv = copy.deepcopy(conv) assert bn.running_mean is not None and bn.running_var is not None fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights( fused_conv.weight, fused_conv.bias, bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose, ) return fused_conv def fuse_conv_bn_weights( conv_w: torch.Tensor, conv_b: torch.Tensor | None, bn_rm: torch.Tensor, bn_rv: torch.Tensor, bn_eps: float, bn_w: torch.Tensor | None, bn_b: torch.Tensor | None, transpose: bool = False, ) -> tuple[torch.nn.Parameter, torch.nn.Parameter]: r"""Fuse convolutional module parameters and BatchNorm module parameters into new convolutional module parameters. Args: conv_w (torch.Tensor): Convolutional weight. conv_b (Optional[torch.Tensor]): Convolutional bias. bn_rm (torch.Tensor): BatchNorm running mean. bn_rv (torch.Tensor): BatchNorm running variance. bn_eps (float): BatchNorm epsilon. bn_w (Optional[torch.Tensor]): BatchNorm weight. bn_b (Optional[torch.Tensor]): BatchNorm bias. transpose (bool, optional): If True, transpose the conv weight. Defaults to False. Returns: Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused convolutional weight and bias. """ conv_weight_dtype = conv_w.dtype conv_bias_dtype = conv_b.dtype if conv_b is not None else conv_weight_dtype if conv_b is None: conv_b = torch.zeros_like(bn_rm) if bn_w is None: bn_w = torch.ones_like(bn_rm) if bn_b is None: bn_b = torch.zeros_like(bn_rm) bn_var_rsqrt = torch.rsqrt(bn_rv + bn_eps) if transpose: shape = [1, -1] + [1] * (len(conv_w.shape) - 2) else: shape = [-1, 1] + [1] * (len(conv_w.shape) - 2) fused_conv_w = (conv_w * (bn_w * bn_var_rsqrt).reshape(shape)).to( dtype=conv_weight_dtype ) fused_conv_b = ((conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b).to( dtype=conv_bias_dtype ) return ( torch.nn.Parameter(fused_conv_w, conv_w.requires_grad), torch.nn.Parameter(fused_conv_b, conv_b.requires_grad), ) def fuse_linear_bn_eval( linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm, ) -> LinearT: r"""Fuse a linear module and a BatchNorm module into a single, new linear module. Args: linear (torch.nn.Linear): A Linear module. bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. Returns: torch.nn.Linear: The fused linear module. .. note:: Both ``linear`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. """ assert not (linear.training or bn.training), "Fusion only for eval!" fused_linear = copy.deepcopy(linear) """ Linear-BN needs to be fused while preserving the shapes of linear weight/bias. To preserve the shapes of linear weight/bias, the channel dim of bn needs to be broadcastable with the last dim of linear, because bn operates over the channel dim, (N, C_in, H, W) while linear operates over the last dim, (*, H_in). To be broadcastable, the number of features in bn and the number of output features from linear must satisfy the following condition: 1. they are equal, or 2. the number of features in bn is 1 Otherwise, skip the folding path """ assert ( linear.out_features == bn.num_features or bn.num_features == 1 ), "To fuse, linear.out_features == bn.num_features or bn.num_features == 1" assert bn.running_mean is not None and bn.running_var is not None fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights( fused_linear.weight, fused_linear.bias, bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, ) return fused_linear def fuse_linear_bn_weights( linear_w: torch.Tensor, linear_b: torch.Tensor | None, bn_rm: torch.Tensor, bn_rv: torch.Tensor, bn_eps: float, bn_w: torch.Tensor, bn_b: torch.Tensor, ) -> tuple[torch.nn.Parameter, torch.nn.Parameter]: r"""Fuse linear module parameters and BatchNorm module parameters into new linear module parameters. Args: linear_w (torch.Tensor): Linear weight. linear_b (Optional[torch.Tensor]): Linear bias. bn_rm (torch.Tensor): BatchNorm running mean. bn_rv (torch.Tensor): BatchNorm running variance. bn_eps (float): BatchNorm epsilon. bn_w (torch.Tensor): BatchNorm weight. bn_b (torch.Tensor): BatchNorm bias. Returns: Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused linear weight and bias. """ linear_weight_dtype = linear_w.dtype linear_bias_dtype = linear_b.dtype if linear_b is not None else linear_weight_dtype if linear_b is None: linear_b = torch.zeros_like(bn_rm) bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps) fused_w = linear_w * bn_scale.unsqueeze(-1).to(dtype=linear_weight_dtype) fused_b = ((linear_b - bn_rm) * bn_scale + bn_b).to(dtype=linear_bias_dtype) return torch.nn.Parameter(fused_w, linear_w.requires_grad), torch.nn.Parameter( fused_b, linear_b.requires_grad ) ```
============================================================================================================== SOURCE CODE FILE: init.py LINES: 1 SIZE: 2.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\init.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import inspect import torch def skip_init(module_cls, *args, **kwargs): r""" Given a module class object and args / kwargs, instantiate the module without initializing parameters / buffers. This can be useful if initialization is slow or if custom initialization will be performed, making the default initialization unnecessary. There are some caveats to this, due to the way this function is implemented: 1. The module must accept a `device` arg in its constructor that is passed to any parameters or buffers created during construction. 2. The module must not perform any computation on parameters in its constructor except initialization (i.e. functions from :mod:`torch.nn.init`). If these conditions are satisfied, the module can be instantiated with parameter / buffer values uninitialized, as if having been created using :func:`torch.empty`. Args: module_cls: Class object; should be a subclass of :class:`torch.nn.Module` args: args to pass to the module's constructor kwargs: kwargs to pass to the module's constructor Returns: Instantiated module with uninitialized parameters / buffers Example:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> import torch >>> m = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1) >>> m.weight Parameter containing: tensor([[0.0000e+00, 1.5846e+29, 7.8307e+00, 2.5250e-29, 1.1210e-44]], requires_grad=True) >>> m2 = torch.nn.utils.skip_init(torch.nn.Linear, in_features=6, out_features=1) >>> m2.weight Parameter containing: tensor([[-1.4677e+24, 4.5915e-41, 1.4013e-45, 0.0000e+00, -1.4677e+24, 4.5915e-41]], requires_grad=True) """ if not issubclass(module_cls, torch.nn.Module): raise RuntimeError(f"Expected a Module; got {module_cls}") if "device" not in inspect.signature(module_cls).parameters: raise RuntimeError("Module must support a 'device' arg to skip initialization") final_device = kwargs.pop("device", "cpu") kwargs["device"] = "meta" return module_cls(*args, **kwargs).to_empty(device=final_device) ```
======================================================================================================================= SOURCE CODE FILE: memory_format.py LINES: 1 SIZE: 7.86 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\memory_format.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch def convert_conv2d_weight_memory_format(module, memory_format): r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``. The conversion recursively applies to nested ``nn.Module``, including ``module``. Note that it only changes the memory_format, but not the semantics of each dimensions. This function is used to facilitate the computation to adopt NHWC kernels, which provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 .. note:: Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive than the utility function ``convert_conv2d_weight_memory_format``. Any layer with 4d weight will be affected by ``model.to``, which does not necessarily benefit from conversion to specified ``memory_format``. One place we are confident in is that NHWC(channels_last) conversion for convolution in cuDNN, as it is beneficial to run convolution in NHWC, even in cases where we have to apply permutation to input tensors. Hence our strategy here is to convert only the weight of convolution to channels_last. This ensures that; 1. Fast convolution kernels will be used, the benefit of which could outweigh overhead of permutation (if input is not in the same format). 2. No unnecessary permutations are applied on layers that do not benefit from memory_format conversion. The optimal case is that, layers between convolution layers are channels last compatible. Input tensor would be permuted to channels last when it encounters the first convolution layer and stay in that memory format. Hence following convolutions will not need to permute its input tensor. In case where a channels last incompatible layer is between convolution layers, we need to permute the input tensor back to contiguous format for that layer. The input tensor will go through the remaining layers in contiguous format and be permuted to channels last when it encounters another convolution layer. There's no point in propagating that permutation to an earlier layer, as most layers are quite agnostic to ``memory_format``. This claim might change when PyTorch supports fusion of permutation, as there might have been a better spot to fuse the permutation other than immediately before a convolution. Args: module (nn.Module): ``nn.Conv2d`` & ``nn.ConvTranspose2d`` or container ``nn.Module`` memory_format: user specified ``memory_format``, e.g. ``torch.channels_last`` or ``torch.contiguous_format`` Returns: The original module with updated ``nn.Conv2d`` Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) >>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device="cuda") >>> model = nn.Sequential( >>> nn.Conv2d(8, 4, 3)).cuda().half() >>> # This is identical to: >>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) >>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) >>> out = model(input) """ # TODO: expand this to `_ConvNd` when channels_last support is extended # beyond only 4d tensors. if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): weight_data = ( module.weight.detach().clone().contiguous(memory_format=memory_format) ) module.weight.data = weight_data.resize_( weight_data.size(), memory_format=memory_format ) for child in module.children(): convert_conv2d_weight_memory_format(child, memory_format) return module def convert_conv3d_weight_memory_format(module, memory_format): r"""Convert ``memory_format`` of ``nn.Conv3d.weight`` to ``memory_format`` The conversion recursively applies to nested ``nn.Module``, including ``module``. Note that it only changes the memory_format, but not the semantics of each dimensions. This function is used to facilitate the computation to adopt NHWC kernels, which provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 .. note:: Calling ``model.to(memory_format=torch.channels_last_3d)`` is more aggressive than the utility function ``convert_conv3d_weight_memory_format``. Any layer with 4d weight will be affected by ``model.to``, which does not necessarily benefit from conversion to specified ``memory_format``. One place we are confident in is that NDHWC(channels_last_3d) conversion for convolution in cuDNN, as it is beneficial to run convolution in NDHWC, even in cases where we have to apply permutation to input tensors. Hence our strategy here is to convert only the weight of convolution to channels_last_3d. This ensures that; 1. Fast convolution kernels will be used, the benefit of which could outweigh overhead of permutation (if input is not in the same format). 2. No unnecessary permutations are applied on layers that do not benefit from memory_format conversion. The optimal case is that, layers between convolution layers are channels last compatible. Input tensor would be permuted to channels last when it encounters the first convolution layer and stay in that memory format. Hence following convolutions will not need to permute its input tensor. In case where a channels last incompatible layer is between convolution layers, we need to permute the input tensor back to contiguous format for that layer. The input tensor will go through the remaining layers in contiguous format and be permuted to channels last when it encounters another convolution layer. There's no point in propagating that permutation to an earlier layer, as most layers are quite agnostic to ``memory_format``. This claim might change when PyTorch supports fusion of permutation, as there might have been a better spot to fuse the permutation other than immediately before a convolution. Args: module (nn.Module): ``nn.Conv3d`` & ``nn.ConvTranspose3d`` or container ``nn.Module`` memory_format: user specified ``memory_format``, e.g. ``torch.channels_last`` or ``torch.contiguous_format`` Returns: The original module with updated ``nn.Conv3d`` Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) >>> input = torch.randint(1, 10, (2, 8, 4, 4, 4), dtype=torch.float16, device="cuda") >>> model = nn.Sequential( >>> nn.Conv3d(8, 4, 3)).cuda().half() >>> # This is identical to: >>> # nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> model = nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> out = model(input) """ # TODO: expand this to `_ConvNd` when channels_last support is extended # beyond only 4d tensors. if isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)): weight_data = ( module.weight.detach().clone().contiguous(memory_format=memory_format) ) module.weight.data = weight_data.resize_( weight_data.size(), memory_format=memory_format ) for child in module.children(): convert_conv3d_weight_memory_format(child, memory_format) return module ```
========================================================================================================================== SOURCE CODE FILE: parametrizations.py LINES: 2 SIZE: 25.68 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\parametrizations.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from enum import auto, Enum from typing import Optional import torch import torch.nn.functional as F from torch import Tensor from torch.nn.modules import Module from torch.nn.utils import parametrize __all__ = ["orthogonal", "spectral_norm", "weight_norm"] def _is_orthogonal(Q, eps=None): n, k = Q.size(-2), Q.size(-1) Id = torch.eye(k, dtype=Q.dtype, device=Q.device) # A reasonable eps, but not too large eps = 10.0 * n * torch.finfo(Q.dtype).eps return torch.allclose(Q.mH @ Q, Id, atol=eps) def _make_orthogonal(A): """Assume that A is a tall matrix. Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative. """ X, tau = torch.geqrf(A) Q = torch.linalg.householder_product(X, tau) # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2) return Q class _OrthMaps(Enum): matrix_exp = auto() cayley = auto() householder = auto() class _Orthogonal(Module): base: Tensor def __init__( self, weight, orthogonal_map: _OrthMaps, *, use_trivialization=True ) -> None: super().__init__() # Note [Householder complex] # For complex tensors, it is not possible to compute the tensor `tau` necessary for # linalg.householder_product from the reflectors. # To see this, note that the reflectors have a shape like: # 0 0 0 # * 0 0 # * * 0 # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters # to parametrize the unitary matrices. Saving tau on its own does not work either, because # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise # them as independent tensors we would not maintain the constraint # An equivalent reasoning holds for rectangular matrices if weight.is_complex() and orthogonal_map == _OrthMaps.householder: raise ValueError( "The householder parametrization does not support complex tensors." ) self.shape = weight.shape self.orthogonal_map = orthogonal_map if use_trivialization: self.register_buffer("base", None) def forward(self, X: torch.Tensor) -> torch.Tensor: n, k = X.size(-2), X.size(-1) transposed = n < k if transposed: X = X.mT n, k = k, n # Here n > k and X is a tall matrix if ( self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley ): # We just need n x k - k(k-1)/2 parameters X = X.tril() if n != k: # Embed into a square matrix X = torch.cat( [X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1 ) A = X - X.mH # A is skew-symmetric (or skew-hermitian) if self.orthogonal_map == _OrthMaps.matrix_exp: Q = torch.matrix_exp(A) elif self.orthogonal_map == _OrthMaps.cayley: # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1} Id = torch.eye(n, dtype=A.dtype, device=A.device) Q = torch.linalg.solve( torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5) ) # Q is now orthogonal (or unitary) of size (..., n, n) if n != k: Q = Q[..., :k] # Q is now the size of the X (albeit perhaps transposed) else: # X is real here, as we do not support householder with complex numbers A = X.tril(diagonal=-1) tau = 2.0 / (1.0 + (A * A).sum(dim=-2)) Q = torch.linalg.householder_product(A, tau) # The diagonal of X is 1's and -1's # We do not want to differentiate through this or update the diagonal of X hence the casting Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2) if hasattr(self, "base"): Q = self.base @ Q if transposed: Q = Q.mT return Q # type: ignore[possibly-undefined] @torch.autograd.no_grad() def right_inverse(self, Q: torch.Tensor) -> torch.Tensor: if Q.shape != self.shape: raise ValueError( f"Expected a matrix or batch of matrices of shape {self.shape}. " f"Got a tensor of shape {Q.shape}." ) Q_init = Q n, k = Q.size(-2), Q.size(-1) transpose = n < k if transpose: Q = Q.mT n, k = k, n # We always make sure to always copy Q in every path if not hasattr(self, "base"): # Note [right_inverse expm cayley] # If we do not have use_trivialization=True, we just implement the inverse of the forward # map for the Householder. To see why, think that for the Cayley map, # we would need to find the matrix X \in R^{n x k} such that: # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) # A = Y - Y.mH # cayley(A)[:, :k] # gives the original tensor. It is not clear how to do this. # Perhaps via some algebraic manipulation involving the QR like that of # Corollary 2.2 in Edelman, Arias and Smith? if ( self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp ): raise NotImplementedError( "It is not possible to assign to the matrix exponential " "or the Cayley parametrizations when use_trivialization=False." ) # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition. # Here Q is always real because we do not support householder and complex matrices. # See note [Householder complex] A, tau = torch.geqrf(Q) # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition # The diagonal of Q is the diagonal of R from the qr decomposition A.diagonal(dim1=-2, dim2=-1).sign_() # Equality with zero is ok because LAPACK returns exactly zero when it does not want # to use a particular reflection A.diagonal(dim1=-2, dim2=-1)[tau == 0.0] *= -1 return A.mT if transpose else A else: if n == k: # We check whether Q is orthogonal if not _is_orthogonal(Q): Q = _make_orthogonal(Q) else: # Is orthogonal Q = Q.clone() else: # Complete Q into a full n x n orthogonal matrix N = torch.randn( *(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device ) Q = torch.cat([Q, N], dim=-1) Q = _make_orthogonal(Q) self.base = Q # It is necessary to return the -Id, as we use the diagonal for the # Householder parametrization. Using -Id makes: # householder(torch.zeros(m,n)) == torch.eye(m,n) # Poor man's version of eye_like neg_Id = torch.zeros_like(Q_init) neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.0) return neg_Id def orthogonal( module: Module, name: str = "weight", orthogonal_map: Optional[str] = None, *, use_trivialization: bool = True, ) -> Module: r"""Apply an orthogonal or unitary parametrization to a matrix or a batch of matrices. Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as .. math:: \begin{align*} Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\ QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n} \end{align*} where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex and the transpose when :math:`Q` is real-valued, and :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n` and orthonormal rows otherwise. If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`. The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor: - ``"matrix_exp"``/``"cayley"``: the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_ :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric :math:`A` to give an orthogonal matrix. - ``"householder"``: computes a product of Householder reflectors (:func:`~torch.linalg.householder_product`). ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than ``"householder"``, but they are slower to compute for very thin or very wide matrices. If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework", where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under ``module.parametrizations.weight[0].base``. This helps the convergence of the parametrized layer at the expense of some extra memory use. See `Trivializations for Gradient-Based Optimization on Manifolds`_ . Initial value of :math:`Q`: If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case) and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`). Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``. Otherwise, the initial value is the result of the composition of all the registered parametrizations applied to the original tensor. .. note:: This function is implemented using the parametrization functionality in :func:`~torch.nn.utils.parametrize.register_parametrization`. .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501 Args: module (nn.Module): module on which to register the parametrization. name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``. orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``. Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise. use_trivialization (bool, optional): whether to use the dynamic trivialization framework. Default: ``True``. Returns: The original module with an orthogonal parametrization registered to the specified weight Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) >>> orth_linear = orthogonal(nn.Linear(20, 40)) >>> orth_linear ParametrizedLinear( in_features=20, out_features=40, bias=True (parametrizations): ModuleDict( (weight): ParametrizationList( (0): _Orthogonal() ) ) ) >>> # xdoctest: +IGNORE_WANT >>> Q = orth_linear.weight >>> torch.dist(Q.T @ Q, torch.eye(20)) tensor(4.9332e-07) """ weight = getattr(module, name, None) if not isinstance(weight, Tensor): raise ValueError( f"Module '{module}' has no parameter or buffer with name '{name}'" ) # We could implement this for 1-dim tensors as the maps on the sphere # but I believe it'd bite more people than it'd help if weight.ndim < 2: raise ValueError( "Expected a matrix or batch of matrices. " f"Got a tensor of {weight.ndim} dimensions." ) if orthogonal_map is None: orthogonal_map = ( "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder" ) orth_enum = getattr(_OrthMaps, orthogonal_map, None) if orth_enum is None: raise ValueError( 'orthogonal_map has to be one of "matrix_exp", "cayley", "householder". ' f"Got: {orthogonal_map}" ) orth = _Orthogonal(weight, orth_enum, use_trivialization=use_trivialization) parametrize.register_parametrization(module, name, orth, unsafe=True) return module class _WeightNorm(Module): def __init__( self, dim: Optional[int] = 0, ) -> None: super().__init__() if dim is None: dim = -1 self.dim = dim def forward(self, weight_g, weight_v): return torch._weight_norm(weight_v, weight_g, self.dim) def right_inverse(self, weight): weight_g = torch.norm_except_dim(weight, 2, self.dim) weight_v = weight return weight_g, weight_v def weight_norm(module: Module, name: str = "weight", dim: int = 0): r"""Apply weight normalization to a parameter in the given module. .. math:: \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by :attr:`name` with two parameters: one specifying the magnitude and one specifying the direction. By default, with ``dim=0``, the norm is computed independently per output channel/plane. To compute a norm over the entire weight tensor, use ``dim=None``. See https://arxiv.org/abs/1602.07868 Args: module (Module): containing module name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute the norm Returns: The original module with the weight norm hook Example:: >>> m = weight_norm(nn.Linear(20, 40), name='weight') >>> m ParametrizedLinear( in_features=20, out_features=40, bias=True (parametrizations): ModuleDict( (weight): ParametrizationList( (0): _WeightNorm() ) ) ) >>> m.parametrizations.weight.original0.size() torch.Size([40, 1]) >>> m.parametrizations.weight.original1.size() torch.Size([40, 20]) """ _weight_norm = _WeightNorm(dim) parametrize.register_parametrization(module, name, _weight_norm, unsafe=True) def _weight_norm_compat_hook( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): g_key = f"{prefix}{name}_g" v_key = f"{prefix}{name}_v" if g_key in state_dict and v_key in state_dict: original0 = state_dict.pop(g_key) original1 = state_dict.pop(v_key) state_dict[f"{prefix}parametrizations.{name}.original0"] = original0 state_dict[f"{prefix}parametrizations.{name}.original1"] = original1 module._register_load_state_dict_pre_hook(_weight_norm_compat_hook) return module class _SpectralNorm(Module): def __init__( self, weight: torch.Tensor, n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12, ) -> None: super().__init__() ndim = weight.ndim if dim >= ndim or dim < -ndim: raise IndexError( "Dimension out of range (expected to be in range of " f"[-{ndim}, {ndim - 1}] but got {dim})" ) if n_power_iterations <= 0: raise ValueError( "Expected n_power_iterations to be positive, but " f"got n_power_iterations={n_power_iterations}" ) self.dim = dim if dim >= 0 else dim + ndim self.eps = eps if ndim > 1: # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward) self.n_power_iterations = n_power_iterations weight_mat = self._reshape_weight_to_matrix(weight) h, w = weight_mat.size() u = weight_mat.new_empty(h).normal_(0, 1) v = weight_mat.new_empty(w).normal_(0, 1) self.register_buffer("_u", F.normalize(u, dim=0, eps=self.eps)) self.register_buffer("_v", F.normalize(v, dim=0, eps=self.eps)) # Start with u, v initialized to some reasonable values by performing a number # of iterations of the power method self._power_method(weight_mat, 15) def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: # Precondition assert weight.ndim > 1 if self.dim != 0: # permute dim to front weight = weight.permute( self.dim, *(d for d in range(weight.dim()) if d != self.dim) ) return weight.flatten(1) @torch.autograd.no_grad() def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None: # See original note at torch/nn/utils/spectral_norm.py # NB: If `do_power_iteration` is set, the `u` and `v` vectors are # updated in power iteration **in-place**. This is very important # because in `DataParallel` forward, the vectors (being buffers) are # broadcast from the parallelized module to each module replica, # which is a new module object created on the fly. And each replica # runs its own spectral norm power iteration. So simply assigning # the updated vectors to the module this function runs on will cause # the update to be lost forever. And the next time the parallelized # module is replicated, the same randomly initialized vectors are # broadcast and used! # # Therefore, to make the change propagate back, we rely on two # important behaviors (also enforced via tests): # 1. `DataParallel` doesn't clone storage if the broadcast tensor # is already on correct device; and it makes sure that the # parallelized module is already on `device[0]`. # 2. If the out tensor in `out=` kwarg has correct shape, it will # just fill in the values. # Therefore, since the same power iteration is performed on all # devices, simply updating the tensors in-place will make sure that # the module replica on `device[0]` will update the _u vector on the # parallelized module (by shared storage). # # However, after we update `u` and `v` in-place, we need to **clone** # them before using them to normalize the weight. This is to support # backproping through two forward passes, e.g., the common pattern in # GAN training: loss = D(real) - D(fake). Otherwise, engine will # complain that variables needed to do backward for the first forward # (i.e., the `u` and `v` vectors) are changed in the second forward. # Precondition assert weight_mat.ndim > 1 for _ in range(n_power_iterations): # Spectral norm of weight equals to `u^T W v`, where `u` and `v` # are the first left and right singular vectors. # This power iteration produces approximations of `u` and `v`. self._u = F.normalize( torch.mv(weight_mat, self._v), # type: ignore[has-type] dim=0, eps=self.eps, out=self._u, # type: ignore[has-type] ) self._v = F.normalize( torch.mv(weight_mat.H, self._u), # type: ignore[has-type] dim=0, eps=self.eps, out=self._v, # type: ignore[has-type] ) def forward(self, weight: torch.Tensor) -> torch.Tensor: if weight.ndim == 1: # Faster and more exact path, no need to approximate anything return F.normalize(weight, dim=0, eps=self.eps) else: weight_mat = self._reshape_weight_to_matrix(weight) if self.training: self._power_method(weight_mat, self.n_power_iterations) # See above on why we need to clone u = self._u.clone(memory_format=torch.contiguous_format) v = self._v.clone(memory_format=torch.contiguous_format) # The proper way of computing this should be through F.bilinear, but # it seems to have some efficiency issues: # https://github.com/pytorch/pytorch/issues/58093 sigma = torch.vdot(u, torch.mv(weight_mat, v)) return weight / sigma def right_inverse(self, value: torch.Tensor) -> torch.Tensor: # we may want to assert here that the passed value already # satisfies constraints return value def spectral_norm( module: Module, name: str = "weight", n_power_iterations: int = 1, eps: float = 1e-12, dim: Optional[int] = None, ) -> Module: r"""Apply spectral normalization to a parameter in the given module. .. math:: \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} When applied on a vector, it simplifies to .. math:: \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant of the model. :math:`\sigma` is approximated performing one iteration of the `power method`_ every time the weight is accessed. If the dimension of the weight tensor is greater than 2, it is reshaped to 2D in power iteration method to get spectral norm. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 .. note:: This function is implemented using the parametrization functionality in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a reimplementation of :func:`torch.nn.utils.spectral_norm`. .. note:: When this constraint is registered, the singular vectors associated to the largest singular value are estimated rather than sampled at random. These are then updated performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor is accessed with the module on `training` mode. .. note:: If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`, is in training mode on removal, it will perform another power iteration. If you'd like to avoid this iteration, set the module to eval mode before its removal. Args: module (nn.Module): containing module name (str, optional): name of weight parameter. Default: ``"weight"``. n_power_iterations (int, optional): number of power iterations to calculate spectral norm. Default: ``1``. eps (float, optional): epsilon for numerical stability in calculating norms. Default: ``1e-12``. dim (int, optional): dimension corresponding to number of outputs. Default: ``0``, except for modules that are instances of ConvTranspose{1,2,3}d, when it is ``1`` Returns: The original module with a new parametrization registered to the specified weight Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> snm = spectral_norm(nn.Linear(20, 40)) >>> snm ParametrizedLinear( in_features=20, out_features=40, bias=True (parametrizations): ModuleDict( (weight): ParametrizationList( (0): _SpectralNorm() ) ) ) >>> torch.linalg.matrix_norm(snm.weight, 2) tensor(1.0081, grad_fn=<AmaxBackward0>) """ weight = getattr(module, name, None) if not isinstance(weight, Tensor): raise ValueError( f"Module '{module}' has no parameter or buffer with name '{name}'" ) if dim is None: if isinstance( module, ( torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d, ), ): dim = 1 else: dim = 0 parametrize.register_parametrization( module, name, _SpectralNorm(weight, n_power_iterations, dim, eps) ) return module ```
===================================================================================================================== SOURCE CODE FILE: parametrize.py LINES: 16 SIZE: 35.99 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\parametrize.py ENCODING: utf-8 ```py # mypy: allow-untyped-decorators # mypy: allow-untyped-defs import collections import copyreg from collections.abc import Sequence from contextlib import contextmanager from copy import deepcopy from typing import Optional, Union import torch from torch import Tensor from torch.__future__ import get_swap_module_params_on_conversion from torch.nn.modules.container import Module, ModuleDict, ModuleList from torch.nn.parameter import Parameter from torch.utils._python_dispatch import is_traceable_wrapper_subclass __all__ = [ "cached", "ParametrizationList", "register_parametrization", "is_parametrized", "remove_parametrizations", "type_before_parametrizations", "transfer_parametrizations_and_params", ] _cache_enabled = 0 _cache: dict[tuple[int, str], Optional[Tensor]] = {} @contextmanager def cached(): r"""Context manager that enables the caching system within parametrizations registered with :func:`register_parametrization`. The value of the parametrized objects is computed and cached the first time they are required when this context manager is active. The cached values are discarded when leaving the context manager. This is useful when using a parametrized parameter more than once in the forward pass. An example of this is when parametrizing the recurrent kernel of an RNN or when sharing weights. The simplest way to activate the cache is by wrapping the forward pass of the neural network .. code-block:: python import torch.nn.utils.parametrize as P ... with P.cached(): output = model(inputs) in training and evaluation. One may also wrap the parts of the modules that use several times the parametrized tensors. For example, the loop of an RNN with a parametrized recurrent kernel: .. code-block:: python with P.cached(): for x in xs: out_rnn = self.rnn_cell(x, out_rnn) """ global _cache global _cache_enabled _cache_enabled += 1 try: yield finally: _cache_enabled -= 1 if not _cache_enabled: _cache = {} def _register_parameter_or_buffer(module, name, X): if isinstance(X, Parameter): module.register_parameter(name, X) else: module.register_buffer(name, X) def _maybe_set(dest: Tensor, src: Tensor) -> None: should_swap = ( get_swap_module_params_on_conversion() or is_traceable_wrapper_subclass(dest) ) if should_swap: if isinstance(dest, Parameter) and not isinstance(src, Parameter): src = Parameter(src, requires_grad=dest.requires_grad) torch.utils.swap_tensors(dest, src) else: dest.set_(src) # type: ignore[call-overload] class ParametrizationList(ModuleList): r"""A sequential container that holds and manages the original parameters or buffers of a parametrized :class:`torch.nn.Module`. It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]`` has been parametrized with :func:`register_parametrization`. If the first registered parametrization has a ``right_inverse`` that returns one tensor or does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity), it will hold the tensor under the name ``original``. If it has a ``right_inverse`` that returns more than one tensor, these will be registered as ``original0``, ``original1``, ... .. warning:: This class is used internally by :func:`register_parametrization`. It is documented here for completeness. It shall not be instantiated by the user. Args: modules (sequence): sequence of modules representing the parametrizations original (Parameter or Tensor): parameter or buffer that is parametrized unsafe (bool): a boolean flag that denotes whether the parametrization may change the dtype and shape of the tensor. Default: `False` Warning: the parametrization is not checked for consistency upon registration. Enable this flag at your own risk. """ original: Tensor unsafe: bool def __init__( self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False, ) -> None: # We require this because we need to treat differently the first parametrization # This should never throw, unless this class is used from the outside if len(modules) == 0: raise ValueError("ParametrizationList requires one or more modules.") super().__init__(modules) self.unsafe = unsafe # In plain words: # module.weight must keep its dtype and shape. # Furthermore, if there is no right_inverse or the right_inverse returns a tensor, # this should be of the same dtype as the original tensor # # We check that the following invariants hold: # X = module.weight # Y = param.right_inverse(X) # assert isinstance(Y, Tensor) or # (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y)) # Z = param(Y) if isinstance(Y, Tensor) else param(*Y) # # Consistency checks # assert X.dtype == Z.dtype and X.shape == Z.shape # # If it has one input, this allows to be able to use set_ to be able to # # move data to/from the original tensor without changing its id (which is what the # # optimizer uses to track parameters) # if isinstance(Y, Tensor) # assert X.dtype == Y.dtype # Below we use original = X, new = Y original_shape = original.shape original_dtype = original.dtype # Compute new with torch.no_grad(): new = original for module in reversed(self): # type: ignore[call-overload] if hasattr(module, "right_inverse"): try: new = module.right_inverse(new) # type: ignore[operator] except NotImplementedError: pass # else, or if it throws, we assume that right_inverse is the identity if not isinstance(new, Tensor) and not isinstance(new, Sequence): raise ValueError( "'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). " f"Got {type(new).__name__}" ) # Set the number of original tensors self.is_tensor = isinstance(new, Tensor) self.ntensors = 1 if self.is_tensor else len(new) # Register the tensor(s) if self.is_tensor: if original.dtype != new.dtype: raise ValueError( "When `right_inverse` outputs one tensor, it may not change the dtype.\n" f"original.dtype: {original.dtype}\n" f"right_inverse(original).dtype: {new.dtype}" ) # Set the original to original so that the user does not need to re-register the parameter # manually in the optimiser with torch.no_grad(): _maybe_set(original, new) _register_parameter_or_buffer(self, "original", original) else: for i, originali in enumerate(new): if not isinstance(originali, Tensor): raise ValueError( "'right_inverse' must return a Tensor or a Sequence of tensors " "(list, tuple...). " f"Got element {i} of the sequence with type {type(originali).__name__}." ) # If the original tensor was a Parameter that required grad, we expect the user to # add the new parameters to the optimizer after registering the parametrization # (this is documented) if isinstance(original, Parameter): originali = Parameter(originali, original.requires_grad) originali.requires_grad_(original.requires_grad) _register_parameter_or_buffer(self, f"original{i}", originali) if not self.unsafe: # Consistency checks: # Since f : A -> B, right_inverse : B -> A, Z and original should live in B # Z = forward(right_inverse(original)) Z = self() if not isinstance(Z, Tensor): raise ValueError( f"A parametrization must return a tensor. Got {type(Z).__name__}." ) if Z.dtype != original_dtype: raise ValueError( "Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n" f"unparametrized dtype: {original_dtype}\n" f"parametrized dtype: {Z.dtype}" ) if Z.shape != original_shape: raise ValueError( "Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n" f"unparametrized shape: {original_shape}\n" f"parametrized shape: {Z.shape}" ) def right_inverse(self, value: Tensor) -> None: r"""Call the ``right_inverse`` methods of the parametrizations in the inverse registration order. Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor or in ``self.original0``, ``self.original1``, ... if it outputs several. Args: value (Tensor): Value to which initialize the module """ # All the exceptions in this function should almost never throw. # They could throw if, for example, right_inverse function returns a different # dtype when given a different input, which should most likely be caused by a # bug in the user's code with torch.no_grad(): # See https://github.com/pytorch/pytorch/issues/53103 for module in reversed(self): # type: ignore[call-overload] if hasattr(module, "right_inverse"): value = module.right_inverse(value) # type: ignore[operator] else: raise RuntimeError( f"parametrization {type(module).__name__} does not implement " "right_inverse." ) if self.is_tensor: # These exceptions should only throw when a right_inverse function does not # return the same dtype for every input, which should most likely be caused by a bug if not isinstance(value, Tensor): raise ValueError( f"`right_inverse` should return a tensor. Got {type(value).__name__}" ) if value.dtype != self.original.dtype: raise ValueError( f"The tensor returned by `right_inverse` has dtype {value.dtype} " f"while `original` has dtype {self.original.dtype}" ) # We know that the result is going to have the same dtype _maybe_set(self.original, value) else: if not isinstance(value, collections.abc.Sequence): raise ValueError( "'right_inverse' must return a sequence of tensors. " f"Got {type(value).__name__}." ) if len(value) != self.ntensors: raise ValueError( "'right_inverse' must return a sequence of tensors of length " f"{self.ntensors}. Got a sequence of length {len(value)}." ) for i, tensor in enumerate(value): original_i = getattr(self, f"original{i}") if not isinstance(tensor, Tensor): raise ValueError( f"`right_inverse` must return a sequence of tensors. " f"Got element {i} of type {type(tensor).__name__}" ) if original_i.dtype != tensor.dtype: raise ValueError( f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} " f"while `original{i}` has dtype {original_i.dtype}" ) _maybe_set(original_i, tensor) def forward(self) -> Tensor: if torch.jit.is_scripting(): raise RuntimeError("Parametrization is not working with scripting.") # Unpack the originals for the first parametrization if self.is_tensor: x = self[0](self.original) else: originals = (getattr(self, f"original{i}") for i in range(self.ntensors)) x = self[0](*originals) # It's not possible to call self[1:] here, so we have to be a bit more cryptic # Also we want to skip all non-integer keys curr_idx = 1 while hasattr(self, str(curr_idx)): x = self[curr_idx](x) curr_idx += 1 return x def _inject_new_class(module: Module) -> None: r"""Set up a module to be parametrized. This works by substituting the class of the module by a class that extends it to be able to inject a property Args: module (nn.Module): module into which to inject the property """ cls = module.__class__ def default_deepcopy(self, memo): # Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class. obj = memo.get(id(self), None) if obj is not None: return obj replica = self.__new__(self.__class__) memo[id(self)] = replica replica.__dict__ = deepcopy(self.__dict__, memo) # Also save all slots if they exist. slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined] for slot in slots_to_save: if hasattr(self, slot): setattr(replica, slot, deepcopy(getattr(self, slot), memo)) return replica def getstate(self): raise RuntimeError( "Serialization of parametrized modules is only " "supported through state_dict(). See:\n" "https://pytorch.org/tutorials/beginner/saving_loading_models.html" "#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training" ) dct = {"__getstate__": getstate} # We don't allow serialization of parametrized modules but should still allow deepcopying. # Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists. if not hasattr(cls, "__deepcopy__"): dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment] param_cls = type( f"Parametrized{cls.__name__}", (cls,), dct, ) module.__class__ = param_cls def _inject_property(module: Module, tensor_name: str) -> None: r"""Injects a property into module[tensor_name]. It assumes that the class in the module has already been modified from its original one using _inject_new_class and that the tensor under :attr:`tensor_name` has already been moved out Args: module (nn.Module): module into which to inject the property tensor_name (str): name of the name of the property to create """ # We check the precondition. # This should never fire if register_parametrization is correctly implemented assert not hasattr(module, tensor_name) @torch.jit.unused def get_cached_parametrization(parametrization) -> Tensor: global _cache key = (id(module), tensor_name) tensor = _cache.get(key) if tensor is None: tensor = parametrization() _cache[key] = tensor return tensor def get_parametrized(self) -> Tensor: if torch.jit.is_scripting(): raise RuntimeError("Parametrization is not working with scripting.") parametrization = self.parametrizations[tensor_name] if _cache_enabled: if torch.jit.is_scripting(): # Scripting raise RuntimeError( "Caching is not implemented for scripting. " "Either disable caching or avoid scripting." ) elif torch._C._get_tracing_state() is not None: # Tracing raise RuntimeError( "Cannot trace a model while caching parametrizations." ) else: return get_cached_parametrization(parametrization) else: # If caching is not active, this function just evaluates the parametrization return parametrization() def set_original(self, value: Tensor) -> None: if torch.jit.is_scripting(): raise RuntimeError("Parametrization is not working with scripting.") self.parametrizations[tensor_name].right_inverse(value) setattr(module.__class__, tensor_name, property(get_parametrized, set_original)) def register_parametrization( module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False, ) -> Module: r"""Register a parametrization to a tensor in a module. Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``, the module will return the parametrized version ``parametrization(module.weight)``. If the original tensor requires a gradient, the backward pass will differentiate through :attr:`parametrization`, and the optimizer will update the tensor accordingly. The first time that a module registers a parametrization, this function will add an attribute ``parametrizations`` to the module of type :class:`~ParametrizationList`. The list of parametrizations on the tensor ``weight`` will be accessible under ``module.parametrizations.weight``. The original tensor will be accessible under ``module.parametrizations.weight.original``. Parametrizations may be concatenated by registering several parametrizations on the same attribute. The training mode of a registered parametrization is updated on registration to match the training mode of the host module Parametrized parameters and buffers have an inbuilt caching system that can be activated using the context manager :func:`cached`. A :attr:`parametrization` may optionally implement a method with signature .. code-block:: python def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]] This method is called on the unparametrized tensor when the first parametrization is registered to compute the initial value of the original tensor. If this method is not implemented, the original tensor will be just the unparametrized tensor. If all the parametrizations registered on a tensor implement `right_inverse` it is possible to initialize a parametrized tensor by assigning to it, as shown in the example below. It is possible for the first parametrization to depend on several inputs. This may be implemented returning a tuple of tensors from ``right_inverse`` (see the example implementation of a ``RankOne`` parametrization below). In this case, the unconstrained tensors are also located under ``module.parametrizations.weight`` with names ``original0``, ``original1``,... .. note:: If unsafe=False (default) both the forward and right_inverse methods will be called once to perform a number of consistency checks. If unsafe=True, then right_inverse will be called if the tensor is not parametrized, and nothing will be called otherwise. .. note:: In most situations, ``right_inverse`` will be a function such that ``forward(right_inverse(X)) == X`` (see `right inverse <https://en.wikipedia.org/wiki/Inverse_function#Right_inverses>`_). Sometimes, when the parametrization is not surjective, it may be reasonable to relax this. .. warning:: If a parametrization depends on several inputs, :func:`~register_parametrization` will register a number of new parameters. If such parametrization is registered after the optimizer is created, these new parameters will need to be added manually to the optimizer. See :meth:`torch.Optimizer.add_param_group`. Args: module (nn.Module): module on which to register the parametrization tensor_name (str): name of the parameter or buffer on which to register the parametrization parametrization (nn.Module): the parametrization to register Keyword args: unsafe (bool): a boolean flag that denotes whether the parametrization may change the dtype and shape of the tensor. Default: `False` Warning: the parametrization is not checked for consistency upon registration. Enable this flag at your own risk. Raises: ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name` Examples: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) >>> import torch >>> import torch.nn as nn >>> import torch.nn.utils.parametrize as P >>> >>> class Symmetric(nn.Module): >>> def forward(self, X): >>> return X.triu() + X.triu(1).T # Return a symmetric matrix >>> >>> def right_inverse(self, A): >>> return A.triu() >>> >>> m = nn.Linear(5, 5) >>> P.register_parametrization(m, "weight", Symmetric()) >>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric True >>> A = torch.rand(5, 5) >>> A = A + A.T # A is now symmetric >>> m.weight = A # Initialize the weight to be the symmetric matrix A >>> print(torch.allclose(m.weight, A)) True >>> class RankOne(nn.Module): >>> def forward(self, x, y): >>> # Form a rank 1 matrix multiplying two vectors >>> return x.unsqueeze(-1) @ y.unsqueeze(-2) >>> >>> def right_inverse(self, Z): >>> # Project Z onto the rank 1 matrices >>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False) >>> # Return rescaled singular vectors >>> s0_sqrt = S[0].sqrt().unsqueeze(-1) >>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt >>> >>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne()) >>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item()) 1 """ parametrization.train(module.training) if is_parametrized(module, tensor_name): # Correctness checks. # If A is the space of tensors with shape and dtype equal to module.weight # we check that parametrization.forward and parametrization.right_inverse are # functions from A to A if not unsafe: Y = getattr(module, tensor_name) X = parametrization(Y) if not isinstance(X, Tensor): raise ValueError( f"A parametrization must return a tensor. Got {type(X).__name__}." ) if X.dtype != Y.dtype: raise ValueError( "Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n" f"module.{tensor_name}.dtype: {Y.dtype}\n" f"parametrization(module.{tensor_name}).dtype: {X.dtype}" ) if X.shape != Y.shape: raise ValueError( "Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n" f"module.{tensor_name}.shape: {Y.shape}\n" f"parametrization(module.{tensor_name}).shape: {X.shape}" ) if hasattr(parametrization, "right_inverse"): try: Z = parametrization.right_inverse(X) # type: ignore[operator] except NotImplementedError: pass else: if not isinstance(Z, Tensor): raise ValueError( f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}" ) if Z.dtype != Y.dtype: raise ValueError( "The tensor returned by parametrization.right_inverse must have the same dtype " f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" f"module.{tensor_name}.dtype: {Y.dtype}\n" f"returned dtype: {Z.dtype}" ) if Z.shape != Y.shape: raise ValueError( "The tensor returned by parametrization.right_inverse must have the same shape " f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" f"module.{tensor_name}.shape: {Y.shape}\n" f"returned shape: {Z.shape}" ) # else right_inverse is assumed to be the identity # add the new parametrization to the parametrization list assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy module.parametrizations[tensor_name].append(parametrization) # If unsafe was True in previous parametrization, keep it enabled module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr] elif tensor_name in module._buffers or tensor_name in module._parameters: # Set the parametrization mechanism # Fetch the original buffer or parameter original = getattr(module, tensor_name) # We create this early to check for possible errors parametrizations = ParametrizationList( [parametrization], original, unsafe=unsafe ) # Delete the previous parameter or buffer delattr(module, tensor_name) # If this is the first parametrization registered on the module, # we prepare the module to inject the property if not is_parametrized(module): # Change the class _inject_new_class(module) # Inject a ``ModuleDict`` into the instance under module.parametrizations module.parametrizations = ModuleDict() # Add a property into the class _inject_property(module, tensor_name) # Add a ParametrizationList assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy module.parametrizations[tensor_name] = parametrizations else: raise ValueError( f"Module '{module}' does not have a parameter, a buffer, or a " f"parametrized element with name '{tensor_name}'" ) return module def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool: r"""Determine if a module has a parametrization. Args: module (nn.Module): module to query tensor_name (str, optional): name of the parameter in the module Default: ``None`` Returns: ``True`` if :attr:`module` has a parametrization for the parameter named :attr:`tensor_name`, or if it has any parametrization when :attr:`tensor_name` is ``None``; otherwise ``False`` """ parametrizations = getattr(module, "parametrizations", None) if parametrizations is None or not isinstance(parametrizations, ModuleDict): return False if tensor_name is None: # Check that there is at least one parametrized buffer or Parameter return len(parametrizations) > 0 else: return tensor_name in parametrizations def remove_parametrizations( module: Module, tensor_name: str, leave_parametrized: bool = True, ) -> Module: r"""Remove the parametrizations on a tensor in a module. - If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to its current output. In this case, the parametrization shall not change the ``dtype`` of the tensor. - If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to the unparametrised tensor in ``module.parametrizations[tensor_name].original``. This is only possible when the parametrization depends on just one tensor. Args: module (nn.Module): module from which remove the parametrization tensor_name (str): name of the parametrization to be removed leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized. Default: ``True`` Returns: Module: module Raises: ValueError: if ``module[tensor_name]`` is not parametrized ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors """ if not is_parametrized(module, tensor_name): raise ValueError( f"Module {module} does not have a parametrization on {tensor_name}" ) # Fetch the original tensor assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy parametrizations = module.parametrizations[tensor_name] if parametrizations.is_tensor: original = parametrizations.original if leave_parametrized: with torch.no_grad(): t = getattr(module, tensor_name) # We know they have the same dtype because we have checked this when registering the # parametrizations. As such, we can use set_ # We do this so that the parameter does not to change the id() # This way the user does not need to update the optimizer with torch.no_grad(): if type(original) is torch.Tensor: _maybe_set(original, t) else: try: _maybe_set(original, t) except RuntimeError as e: # TODO: Fix this for tensor subclasses that are parameters: # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach(). raise RuntimeError( "Calling remove_parametrizations() with leave_parametrized=True " "for a parameter that is an instance of a tensor subclass requires " "set_() to be implemented correctly for the tensor subclass." "Alternatively, one can opt into the swap_tensors path" "Either set leave_parametrized=False or provide a working implementation" "for set_() in the tensor subclass or set " "torch.__future__.set_swap_module_params_on_conversion(True)." ) from e else: if leave_parametrized: # We cannot use no_grad because we need to know whether one or more # original tensors required grad t = getattr(module, tensor_name) # We'll have to trust the user to add it to the optimizer original = Parameter(t) if t.requires_grad else t else: raise ValueError( "Cannot leave unparametrized (`leave_parametrized=False`) a tensor " "that is parametrized in terms of a sequence of tensors." ) # Delete the property that manages the parametrization delattr(module.__class__, tensor_name) # Delete the ParametrizationList del module.parametrizations[tensor_name] # Restore the parameter / buffer into the main class _register_parameter_or_buffer(module, tensor_name, original) # Roll back the parametrized class if no other buffer or parameter # is currently parametrized in this class if not is_parametrized(module): delattr(module, "parametrizations") # Restore class orig_cls = module.__class__.__bases__[0] module.__class__ = orig_cls return module def type_before_parametrizations(module: Module) -> type: r"""Return the module type before parametrizations were applied and if not, then it returns the module type. Args: module (nn.Module): module to get type of """ if is_parametrized(module): return module.__class__.__bases__[0] else: return type(module) def transfer_parametrizations_and_params( from_module: Module, to_module: Module, tensor_name: Optional[str] = None, ) -> Module: r"""Transfer parametrizations and the parameters they parametrize from :attr:`from_module` to :attr:`to_module`. If :attr:`tensor_name` is specified, only transfers the specified parameter, otherwise transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them. Does nothing if from_module is not parametrized. Args: from_module (nn.Module): module to transfer from to_module (nn.Module): module to transfer to tensor_name (str, optional): parameter to transfer Returns: Module: to_module """ if is_parametrized(from_module): assert isinstance(from_module.parametrizations, ModuleDict) # for mypy # get list of all params or the single param to transfer parameters_to_transfer: Union[list, ModuleDict] = ( from_module.parametrizations if tensor_name is None else [tensor_name] ) assert hasattr(parameters_to_transfer, "__iter__") # for mypy for parameter_name in parameters_to_transfer: # initialize the to-be-transferred param in to_module if it doesn't exist already if not hasattr(to_module, parameter_name): setattr( to_module, parameter_name, Parameter(getattr(from_module, parameter_name)), ) # apply the params's parametrizations to to_module for param_func in from_module.parametrizations[parameter_name]: register_parametrization(to_module, parameter_name, param_func) assert isinstance(to_module.parametrizations, ModuleDict) # for mypy # make values match, original values can be stored in either original or # original0, original1..., need to check both cases if hasattr(from_module.parametrizations[parameter_name], "original"): to_module.parametrizations[ parameter_name ].original = from_module.parametrizations[parameter_name].original else: num = 0 orig_num = "original" + str(num) # loop through each original# until all values have been set while hasattr(from_module.parametrizations[parameter_name], orig_num): setattr( to_module.parametrizations[parameter_name], orig_num, getattr(from_module.parametrizations[parameter_name], orig_num), ) num = num + 1 orig_num = "original" + str(num) return to_module ```
=============================================================================================================== SOURCE CODE FILE: prune.py LINES: 1 SIZE: 57.73 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\prune.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs r"""Pruning methods.""" import numbers from abc import ABC, abstractmethod from collections.abc import Iterable import torch class BasePruningMethod(ABC): r"""Abstract base class for creation of new pruning techniques. Provides a skeleton for customization requiring the overriding of methods such as :meth:`compute_mask` and :meth:`apply`. """ _tensor_name: str def __call__(self, module, inputs): r"""Multiply the mask into original tensor and store the result. Multiplies the mask (stored in ``module[name + '_mask']``) into the original tensor (stored in ``module[name + '_orig']``) and stores the result into ``module[name]`` by using :meth:`apply_mask`. Args: module (nn.Module): module containing the tensor to prune inputs: not used. """ setattr(module, self._tensor_name, self.apply_mask(module)) @abstractmethod def compute_mask(self, t, default_mask): r"""Compute and returns a mask for the input tensor ``t``. Starting from a base ``default_mask`` (which should be a mask of ones if the tensor has not been pruned yet), generate a random mask to apply on top of the ``default_mask`` according to the specific pruning method recipe. Args: t (torch.Tensor): tensor representing the importance scores of the parameter to prune. default_mask (torch.Tensor): Base mask from previous pruning iterations, that need to be respected after the new mask is applied. Same dims as ``t``. Returns: mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` """ def apply_mask(self, module): r"""Simply handles the multiplication between the parameter being pruned and the generated mask. Fetches the mask and the original tensor from the module and returns the pruned version of the tensor. Args: module (nn.Module): module containing the tensor to prune Returns: pruned_tensor (torch.Tensor): pruned version of the input tensor """ # to carry out the multiplication, the mask needs to have been computed, # so the pruning method must know what tensor it's operating on assert ( self._tensor_name is not None ), f"Module {module} has to be pruned" # this gets set in apply() mask = getattr(module, self._tensor_name + "_mask") orig = getattr(module, self._tensor_name + "_orig") pruned_tensor = mask.to(dtype=orig.dtype) * orig return pruned_tensor @classmethod def apply(cls, module, name, *args, importance_scores=None, **kwargs): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. args: arguments passed on to a subclass of :class:`BasePruningMethod` importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the parameter will be used in its place. kwargs: keyword arguments passed on to a subclass of a :class:`BasePruningMethod` """ def _get_composite_method(cls, module, name, *args, **kwargs): # Check if a pruning method has already been applied to # `module[name]`. If so, store that in `old_method`. old_method = None found = 0 # there should technically be only 1 hook with hook.name == name # assert this using `found` hooks_to_remove = [] for k, hook in module._forward_pre_hooks.items(): # if it exists, take existing thing, remove hook, then # go through normal thing if isinstance(hook, BasePruningMethod) and hook._tensor_name == name: old_method = hook hooks_to_remove.append(k) found += 1 assert ( found <= 1 ), f"Avoid adding multiple pruning hooks to the\ same tensor {name} of module {module}. Use a PruningContainer." for k in hooks_to_remove: del module._forward_pre_hooks[k] # Apply the new pruning method, either from scratch or on top of # the previous one. method = cls(*args, **kwargs) # new pruning # Have the pruning method remember what tensor it's been applied to method._tensor_name = name # combine `methods` with `old_method`, if `old_method` exists if old_method is not None: # meaning that there was a hook # if the hook is already a pruning container, just add the # new pruning method to the container if isinstance(old_method, PruningContainer): old_method.add_pruning_method(method) method = old_method # rename old_method --> method # if the hook is simply a single pruning method, create a # container, add the old pruning method and the new one elif isinstance(old_method, BasePruningMethod): container = PruningContainer(old_method) # Have the pruning method remember the name of its tensor # setattr(container, '_tensor_name', name) container.add_pruning_method(method) method = container # rename container --> method return method method = _get_composite_method(cls, module, name, *args, **kwargs) # at this point we have no forward_pre_hooks but we could have an # active reparametrization of the tensor if another pruning method # had been applied (in which case `method` would be a PruningContainer # and not a simple pruning method). # Pruning is to be applied to the module's tensor named `name`, # starting from the state it is found in prior to this iteration of # pruning. The pruning mask is calculated based on importances scores. orig = getattr(module, name) if importance_scores is not None: assert ( importance_scores.shape == orig.shape ), f"importance_scores should have the same shape as parameter {name} of {module}" else: importance_scores = orig # If this is the first time pruning is applied, take care of moving # the original tensor to a new parameter called name + '_orig' and # and deleting the original parameter if not isinstance(method, PruningContainer): # copy `module[name]` to `module[name + '_orig']` module.register_parameter(name + "_orig", orig) # temporarily delete `module[name]` del module._parameters[name] default_mask = torch.ones_like(orig) # temp # If this is not the first time pruning is applied, all of the above # has been done before in a previous pruning iteration, so we're good # to go else: default_mask = ( getattr(module, name + "_mask") .detach() .clone(memory_format=torch.contiguous_format) ) # Use try/except because if anything goes wrong with the mask # computation etc., you'd want to roll back. try: # get the final mask, computed according to the specific method mask = method.compute_mask(importance_scores, default_mask=default_mask) # reparameterize by saving mask to `module[name + '_mask']`... module.register_buffer(name + "_mask", mask) # ... and the new pruned tensor to `module[name]` setattr(module, name, method.apply_mask(module)) # associate the pruning method to the module via a hook to # compute the function before every forward() (compile by run) module.register_forward_pre_hook(method) except Exception as e: if not isinstance(method, PruningContainer): orig = getattr(module, name + "_orig") module.register_parameter(name, orig) del module._parameters[name + "_orig"] raise e return method def prune(self, t, default_mask=None, importance_scores=None): r"""Compute and returns a pruned version of input tensor ``t``. According to the pruning rule specified in :meth:`compute_mask`. Args: t (torch.Tensor): tensor to prune (of same dimensions as ``default_mask``). importance_scores (torch.Tensor): tensor of importance scores (of same shape as ``t``) used to compute mask for pruning ``t``. The values in this tensor indicate the importance of the corresponding elements in the ``t`` that is being pruned. If unspecified or None, the tensor ``t`` will be used in its place. default_mask (torch.Tensor, optional): mask from previous pruning iteration, if any. To be considered when determining what portion of the tensor that pruning should act on. If None, default to a mask of ones. Returns: pruned version of tensor ``t``. """ if importance_scores is not None: assert ( importance_scores.shape == t.shape ), "importance_scores should have the same shape as tensor t" else: importance_scores = t default_mask = default_mask if default_mask is not None else torch.ones_like(t) return t * self.compute_mask(importance_scores, default_mask=default_mask) def remove(self, module): r"""Remove the pruning reparameterization from a module. The pruned parameter named ``name`` remains permanently pruned, and the parameter named ``name+'_orig'`` is removed from the parameter list. Similarly, the buffer named ``name+'_mask'`` is removed from the buffers. Note: Pruning itself is NOT undone or reversed! """ # before removing pruning from a tensor, it has to have been applied assert ( self._tensor_name is not None ), f"Module {module} has to be pruned before pruning can be removed" # this gets set in apply() # to update module[name] to latest trained weights weight = self.apply_mask(module) # masked weights # delete and reset if hasattr(module, self._tensor_name): delattr(module, self._tensor_name) orig = module._parameters[self._tensor_name + "_orig"] orig.data = weight.data del module._parameters[self._tensor_name + "_orig"] del module._buffers[self._tensor_name + "_mask"] setattr(module, self._tensor_name, orig) class PruningContainer(BasePruningMethod): """Container holding a sequence of pruning methods for iterative pruning. Keeps track of the order in which pruning methods are applied and handles combining successive pruning calls. Accepts as argument an instance of a BasePruningMethod or an iterable of them. """ def __init__(self, *args): self._pruning_methods: tuple[BasePruningMethod, ...] = () if not isinstance(args, Iterable): # only 1 item self._tensor_name = args._tensor_name self.add_pruning_method(args) elif len(args) == 1: # only 1 item in a tuple self._tensor_name = args[0]._tensor_name self.add_pruning_method(args[0]) else: # manual construction from list or other iterable (or no args) for method in args: self.add_pruning_method(method) def add_pruning_method(self, method): r"""Add a child pruning ``method`` to the container. Args: method (subclass of BasePruningMethod): child pruning method to be added to the container. """ # check that we're adding a pruning method to the container if not isinstance(method, BasePruningMethod) and method is not None: raise TypeError(f"{type(method)} is not a BasePruningMethod subclass") elif method is not None and self._tensor_name != method._tensor_name: raise ValueError( "Can only add pruning methods acting on " f"the parameter named '{self._tensor_name}' to PruningContainer {self}." + f" Found '{method._tensor_name}'" ) # if all checks passed, add to _pruning_methods tuple self._pruning_methods += (method,) # type: ignore[operator] def __len__(self): return len(self._pruning_methods) def __iter__(self): return iter(self._pruning_methods) def __getitem__(self, idx): return self._pruning_methods[idx] def compute_mask(self, t, default_mask): r"""Apply the latest ``method`` by computing the new partial masks and returning its combination with the ``default_mask``. The new partial mask should be computed on the entries or channels that were not zeroed out by the ``default_mask``. Which portions of the tensor ``t`` the new mask will be calculated from depends on the ``PRUNING_TYPE`` (handled by the type handler): * for 'unstructured', the mask will be computed from the raveled list of nonmasked entries; * for 'structured', the mask will be computed from the nonmasked channels in the tensor; * for 'global', the mask will be computed across all entries. Args: t (torch.Tensor): tensor representing the parameter to prune (of same dimensions as ``default_mask``). default_mask (torch.Tensor): mask from previous pruning iteration. Returns: mask (torch.Tensor): new mask that combines the effects of the ``default_mask`` and the new mask from the current pruning ``method`` (of same dimensions as ``default_mask`` and ``t``). """ def _combine_masks(method, t, mask): r"""Combine the masks from all pruning methods and returns a new mask. Args: method (a BasePruningMethod subclass): pruning method currently being applied. t (torch.Tensor): tensor representing the parameter to prune (of same dimensions as mask). mask (torch.Tensor): mask from previous pruning iteration Returns: new_mask (torch.Tensor): new mask that combines the effects of the old mask and the new mask from the current pruning method (of same dimensions as mask and t). """ new_mask = mask # start off from existing mask new_mask = new_mask.to(dtype=t.dtype) # compute a slice of t onto which the new pruning method will operate if method.PRUNING_TYPE == "unstructured": # prune entries of t where the mask is 1 slc = mask == 1 # for struct pruning, exclude channels that have already been # entirely pruned elif method.PRUNING_TYPE == "structured": if not hasattr(method, "dim"): raise AttributeError( "Pruning methods of PRUNING_TYPE " '"structured" need to have the attribute `dim` defined.' ) # find the channels to keep by removing the ones that have been # zeroed out already (i.e. where sum(entries) == 0) n_dims = t.dim() # "is this a 2D tensor? 3D? ..." dim = method.dim # convert negative indexing if dim < 0: dim = n_dims + dim # if dim is still negative after subtracting it from n_dims if dim < 0: raise IndexError( f"Index is out of bounds for tensor with dimensions {n_dims}" ) # find channels along dim = dim that aren't already tots 0ed out keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0 # create slice to identify what to prune slc = [slice(None)] * n_dims slc[dim] = keep_channel elif method.PRUNING_TYPE == "global": n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..." slc = [slice(None)] * n_dims else: raise ValueError(f"Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}") # compute the new mask on the unpruned slice of the tensor t partial_mask = method.compute_mask(t[slc], default_mask=mask[slc]) new_mask[slc] = partial_mask.to(dtype=new_mask.dtype) return new_mask method = self._pruning_methods[-1] mask = _combine_masks(method, t, default_mask) return mask class Identity(BasePruningMethod): r"""Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones.""" PRUNING_TYPE = "unstructured" def compute_mask(self, t, default_mask): mask = default_mask return mask @classmethod def apply(cls, module, name): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. """ return super().apply(module, name) class RandomUnstructured(BasePruningMethod): r"""Prune (currently unpruned) units in a tensor at random. Args: name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. """ PRUNING_TYPE = "unstructured" def __init__(self, amount): # Check range of validity of pruning amount _validate_pruning_amount_init(amount) self.amount = amount def compute_mask(self, t, default_mask): # Check that the amount of units to prune is not > than the number of # parameters in t tensor_size = t.nelement() # Compute number of units to prune: amount if int, # else amount * tensor_size nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) # This should raise an error if the number of units to prune is larger # than the number of units in the tensor _validate_pruning_amount(nparams_toprune, tensor_size) mask = default_mask.clone(memory_format=torch.contiguous_format) if nparams_toprune != 0: # k=0 not supported by torch.kthvalue prob = torch.rand_like(t) topk = torch.topk(prob.view(-1), k=nparams_toprune) mask.view(-1)[topk.indices] = 0 return mask @classmethod def apply(cls, module, name, amount): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. """ return super().apply(module, name, amount=amount) class L1Unstructured(BasePruningMethod): r"""Prune (currently unpruned) units in a tensor by zeroing out the ones with the lowest L1-norm. Args: amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. """ PRUNING_TYPE = "unstructured" def __init__(self, amount): # Check range of validity of pruning amount _validate_pruning_amount_init(amount) self.amount = amount def compute_mask(self, t, default_mask): # Check that the amount of units to prune is not > than the number of # parameters in t tensor_size = t.nelement() # Compute number of units to prune: amount if int, # else amount * tensor_size nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) # This should raise an error if the number of units to prune is larger # than the number of units in the tensor _validate_pruning_amount(nparams_toprune, tensor_size) mask = default_mask.clone(memory_format=torch.contiguous_format) if nparams_toprune != 0: # k=0 not supported by torch.kthvalue # largest=True --> top k; largest=False --> bottom k # Prune the smallest k topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False) # topk will have .indices and .values mask.view(-1)[topk.indices] = 0 return mask @classmethod def apply(cls, module, name, amount, importance_scores=None): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. """ return super().apply( module, name, amount=amount, importance_scores=importance_scores ) class RandomStructured(BasePruningMethod): r"""Prune entire (currently unpruned) channels in a tensor at random. Args: amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. dim (int, optional): index of the dim along which we define channels to prune. Default: -1. """ PRUNING_TYPE = "structured" def __init__(self, amount, dim=-1): # Check range of validity of amount _validate_pruning_amount_init(amount) self.amount = amount self.dim = dim def compute_mask(self, t, default_mask): r"""Compute and returns a mask for the input tensor ``t``. Starting from a base ``default_mask`` (which should be a mask of ones if the tensor has not been pruned yet), generate a random mask to apply on top of the ``default_mask`` by randomly zeroing out channels along the specified dim of the tensor. Args: t (torch.Tensor): tensor representing the parameter to prune default_mask (torch.Tensor): Base mask from previous pruning iterations, that need to be respected after the new mask is applied. Same dims as ``t``. Returns: mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` Raises: IndexError: if ``self.dim >= len(t.shape)`` """ # Check that tensor has structure (i.e. more than 1 dimension) such # that the concept of "channels" makes sense _validate_structured_pruning(t) # Check that self.dim is a valid dim to index t, else raise IndexError _validate_pruning_dim(t, self.dim) # Check that the amount of channels to prune is not > than the number of # channels in t along the dim to prune tensor_size = t.shape[self.dim] # Compute number of units to prune: amount if int, # else amount * tensor_size nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) # This should raise an error if the number of units to prune is larger # than the number of units in the tensor _validate_pruning_amount(nparams_toprune, tensor_size) # Compute binary mask by initializing it to all 0s and then filling in # 1s wherever topk.indices indicates, along self.dim. # mask has the same shape as tensor t def make_mask(t, dim, nchannels, nchannels_toprune): # generate a random number in [0, 1] to associate to each channel prob = torch.rand(nchannels) # generate mask for each channel by 0ing out the channels that # got assigned the k = nchannels_toprune lowest values in prob threshold = torch.kthvalue(prob, k=nchannels_toprune).values channel_mask = prob > threshold mask = torch.zeros_like(t) slc = [slice(None)] * len(t.shape) slc[dim] = channel_mask mask[slc] = 1 return mask if nparams_toprune == 0: # k=0 not supported by torch.kthvalue mask = default_mask else: # apply the new structured mask on top of prior (potentially # unstructured) mask mask = make_mask(t, self.dim, tensor_size, nparams_toprune) mask *= default_mask.to(dtype=mask.dtype) return mask @classmethod def apply(cls, module, name, amount, dim=-1): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. dim (int, optional): index of the dim along which we define channels to prune. Default: -1. """ return super().apply(module, name, amount=amount, dim=dim) class LnStructured(BasePruningMethod): r"""Prune entire (currently unpruned) channels in a tensor based on their L\ ``n``-norm. Args: amount (int or float): quantity of channels to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid entries for argument ``p`` in :func:`torch.norm`. dim (int, optional): index of the dim along which we define channels to prune. Default: -1. """ PRUNING_TYPE = "structured" def __init__(self, amount, n, dim=-1): # Check range of validity of amount _validate_pruning_amount_init(amount) self.amount = amount self.n = n self.dim = dim def compute_mask(self, t, default_mask): r"""Compute and returns a mask for the input tensor ``t``. Starting from a base ``default_mask`` (which should be a mask of ones if the tensor has not been pruned yet), generate a mask to apply on top of the ``default_mask`` by zeroing out the channels along the specified dim with the lowest L\ ``n``-norm. Args: t (torch.Tensor): tensor representing the parameter to prune default_mask (torch.Tensor): Base mask from previous pruning iterations, that need to be respected after the new mask is applied. Same dims as ``t``. Returns: mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` Raises: IndexError: if ``self.dim >= len(t.shape)`` """ # Check that tensor has structure (i.e. more than 1 dimension) such # that the concept of "channels" makes sense _validate_structured_pruning(t) # Check that self.dim is a valid dim to index t, else raise IndexError _validate_pruning_dim(t, self.dim) # Check that the amount of channels to prune is not > than the number of # channels in t along the dim to prune tensor_size = t.shape[self.dim] # Compute number of units to prune: amount if int, # else amount * tensor_size nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) nparams_tokeep = tensor_size - nparams_toprune # This should raise an error if the number of units to prune is larger # than the number of units in the tensor _validate_pruning_amount(nparams_toprune, tensor_size) # Structured pruning prunes entire channels so we need to know the # L_n norm along each channel to then find the topk based on this # metric norm = _compute_norm(t, self.n, self.dim) # largest=True --> top k; largest=False --> bottom k # Keep the largest k channels along dim=self.dim topk = torch.topk(norm, k=nparams_tokeep, largest=True) # topk will have .indices and .values # Compute binary mask by initializing it to all 0s and then filling in # 1s wherever topk.indices indicates, along self.dim. # mask has the same shape as tensor t def make_mask(t, dim, indices): # init mask to 0 mask = torch.zeros_like(t) # e.g.: slc = [None, None, None], if len(t.shape) = 3 slc = [slice(None)] * len(t.shape) # replace a None at position=dim with indices # e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3] slc[dim] = indices # use slc to slice mask and replace all its entries with 1s # e.g.: mask[:, :, [0, 2, 3]] = 1 mask[slc] = 1 return mask if nparams_toprune == 0: # k=0 not supported by torch.kthvalue mask = default_mask else: mask = make_mask(t, self.dim, topk.indices) mask *= default_mask.to(dtype=mask.dtype) return mask @classmethod def apply(cls, module, name, amount, n, dim, importance_scores=None): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid entries for argument ``p`` in :func:`torch.norm`. dim (int): index of the dim along which we define channels to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. """ return super().apply( module, name, amount=amount, n=n, dim=dim, importance_scores=importance_scores, ) class CustomFromMask(BasePruningMethod): PRUNING_TYPE = "global" def __init__(self, mask): self.mask = mask def compute_mask(self, t, default_mask): assert default_mask.shape == self.mask.shape mask = default_mask * self.mask.to(dtype=default_mask.dtype) return mask @classmethod def apply(cls, module, name, mask): r"""Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. """ return super().apply(module, name, mask=mask) def identity(module, name): r"""Apply pruning reparametrization without pruning any units. Applies pruning reparametrization to the tensor corresponding to the parameter called ``name`` in ``module`` without actually pruning any units. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Note: The mask is a tensor of ones. Args: module (nn.Module): module containing the tensor to prune. name (str): parameter name within ``module`` on which pruning will act. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.identity(nn.Linear(2, 3), 'bias') >>> print(m.bias_mask) tensor([1., 1., 1.]) """ Identity.apply(module, name) return module def random_unstructured(module, name, amount): r"""Prune tensor by removing random (currently unpruned) units. Prunes tensor corresponding to parameter called ``name`` in ``module`` by removing the specified ``amount`` of (currently unpruned) units selected at random. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1) >>> torch.sum(m.weight_mask == 0) tensor(1) """ RandomUnstructured.apply(module, name, amount) return module def l1_unstructured(module, name, amount, importance_scores=None): r"""Prune tensor by removing units with the lowest L1-norm. Prunes tensor corresponding to parameter called ``name`` in ``module`` by removing the specified `amount` of (currently unpruned) units with the lowest L1-norm. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2) >>> m.state_dict().keys() odict_keys(['bias', 'weight_orig', 'weight_mask']) """ L1Unstructured.apply( module, name, amount=amount, importance_scores=importance_scores ) return module def random_structured(module, name, amount, dim): r"""Prune tensor by removing random channels along the specified dimension. Prunes tensor corresponding to parameter called ``name`` in ``module`` by removing the specified ``amount`` of (currently unpruned) channels along the specified ``dim`` selected at random. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. dim (int): index of the dim along which we define channels to prune. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.random_structured( ... nn.Linear(5, 3), 'weight', amount=3, dim=1 ... ) >>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0)) >>> print(columns_pruned) 3 """ RandomStructured.apply(module, name, amount, dim) return module def ln_structured(module, name, amount, n, dim, importance_scores=None): r"""Prune tensor by removing channels with the lowest L\ ``n``-norm along the specified dimension. Prunes tensor corresponding to parameter called ``name`` in ``module`` by removing the specified ``amount`` of (currently unpruned) channels along the specified ``dim`` with the lowest L\ ``n``-norm. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. amount (int or float): quantity of parameters to prune. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid entries for argument ``p`` in :func:`torch.norm`. dim (int): index of the dim along which we define channels to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> from torch.nn.utils import prune >>> m = prune.ln_structured( ... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf') ... ) """ LnStructured.apply( module, name, amount, n, dim, importance_scores=importance_scores ) return module def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs): r""" Globally prunes tensors corresponding to all parameters in ``parameters`` by applying the specified ``pruning_method``. Modifies modules in place by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: parameters (Iterable of (module, name) tuples): parameters of the model to prune in a global fashion, i.e. by aggregating all weights prior to deciding which ones to prune. module must be of type :class:`nn.Module`, and name must be a string. pruning_method (function): a valid pruning function from this module, or a custom one implemented by the user that satisfies the implementation guidelines and has ``PRUNING_TYPE='unstructured'``. importance_scores (dict): a dictionary mapping (module, name) tuples to the corresponding parameter's importance scores tensor. The tensor should be the same shape as the parameter, and is used for computing mask for pruning. If unspecified or None, the parameter will be used in place of its importance scores. kwargs: other keyword arguments such as: amount (int or float): quantity of parameters to prune across the specified parameters. If ``float``, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If ``int``, it represents the absolute number of parameters to prune. Raises: TypeError: if ``PRUNING_TYPE != 'unstructured'`` Note: Since global structured pruning doesn't make much sense unless the norm is normalized by the size of the parameter, we now limit the scope of global pruning to unstructured methods. Examples: >>> from torch.nn.utils import prune >>> from collections import OrderedDict >>> net = nn.Sequential(OrderedDict([ ... ('first', nn.Linear(10, 4)), ... ('second', nn.Linear(4, 1)), ... ])) >>> parameters_to_prune = ( ... (net.first, 'weight'), ... (net.second, 'weight'), ... ) >>> prune.global_unstructured( ... parameters_to_prune, ... pruning_method=prune.L1Unstructured, ... amount=10, ... ) >>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0)) tensor(10) """ # ensure parameters is a list or generator of tuples if not isinstance(parameters, Iterable): raise TypeError("global_unstructured(): parameters is not an Iterable") importance_scores = importance_scores if importance_scores is not None else {} if not isinstance(importance_scores, dict): raise TypeError("global_unstructured(): importance_scores must be of type dict") # flatten importance scores to consider them all at once in global pruning relevant_importance_scores = torch.nn.utils.parameters_to_vector( [ importance_scores.get((module, name), getattr(module, name)) for (module, name) in parameters ] ) # similarly, flatten the masks (if they exist), or use a flattened vector # of 1s of the same dimensions as t default_mask = torch.nn.utils.parameters_to_vector( [ getattr(module, name + "_mask", torch.ones_like(getattr(module, name))) for (module, name) in parameters ] ) # use the canonical pruning methods to compute the new mask, even if the # parameter is now a flattened out version of `parameters` container = PruningContainer() container._tensor_name = "temp" # to make it match that of `method` method = pruning_method(**kwargs) method._tensor_name = "temp" # to make it match that of `container` if method.PRUNING_TYPE != "unstructured": raise TypeError( 'Only "unstructured" PRUNING_TYPE supported for ' f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}" ) container.add_pruning_method(method) # use the `compute_mask` method from `PruningContainer` to combine the # mask computed by the new method with the pre-existing mask final_mask = container.compute_mask(relevant_importance_scores, default_mask) # Pointer for slicing the mask to match the shape of each parameter pointer = 0 for module, name in parameters: param = getattr(module, name) # The length of the parameter num_param = param.numel() # Slice the mask, reshape it param_mask = final_mask[pointer : pointer + num_param].view_as(param) # Assign the correct pre-computed mask to each parameter and add it # to the forward_pre_hooks like any other pruning method custom_from_mask(module, name, mask=param_mask) # Increment the pointer to continue slicing the final_mask pointer += num_param def custom_from_mask(module, name, mask): r"""Prune tensor corresponding to parameter called ``name`` in ``module`` by applying the pre-computed mask in ``mask``. Modifies module in place (and also return the modified module) by: 1) adding a named buffer called ``name+'_mask'`` corresponding to the binary mask applied to the parameter ``name`` by the pruning method. 2) replacing the parameter ``name`` by its pruned version, while the original (unpruned) parameter is stored in a new parameter named ``name+'_orig'``. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. mask (Tensor): binary mask to be applied to the parameter. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> from torch.nn.utils import prune >>> m = prune.custom_from_mask( ... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0]) ... ) >>> print(m.bias_mask) tensor([0., 1., 0.]) """ CustomFromMask.apply(module, name, mask) return module def remove(module, name): r"""Remove the pruning reparameterization from a module and the pruning method from the forward hook. The pruned parameter named ``name`` remains permanently pruned, and the parameter named ``name+'_orig'`` is removed from the parameter list. Similarly, the buffer named ``name+'_mask'`` is removed from the buffers. Note: Pruning itself is NOT undone or reversed! Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``module`` on which pruning will act. Examples: >>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2) >>> m = remove(m, name='weight') """ for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BasePruningMethod) and hook._tensor_name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError( f"Parameter '{name}' of module {module} has to be pruned before pruning can be removed" ) def is_pruned(module): r"""Check if a module is pruned by looking for pruning pre-hooks. Check whether ``module`` is pruned by looking for ``forward_pre_hooks`` in its modules that inherit from the :class:`BasePruningMethod`. Args: module (nn.Module): object that is either pruned or unpruned Returns: binary answer to whether ``module`` is pruned. Examples: >>> from torch.nn.utils import prune >>> m = nn.Linear(5, 7) >>> print(prune.is_pruned(m)) False >>> prune.random_unstructured(m, name='weight', amount=0.2) >>> print(prune.is_pruned(m)) True """ for _, submodule in module.named_modules(): for hook in submodule._forward_pre_hooks.values(): if isinstance(hook, BasePruningMethod): return True return False def _validate_pruning_amount_init(amount): r"""Validate helper to check the range of amount at init. Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. Raises: ValueError: if amount is a float not in [0, 1], or if it's a negative integer. TypeError: if amount is neither a float nor an integer. Note: This does not take into account the number of parameters in the tensor to be pruned, which is known only at prune. """ if not isinstance(amount, numbers.Real): raise TypeError(f"Invalid type for amount: {amount}. Must be int or float.") if (isinstance(amount, numbers.Integral) and amount < 0) or ( not isinstance(amount, numbers.Integral) # so it's a float and (float(amount) > 1.0 or float(amount) < 0.0) ): raise ValueError( f"amount={amount} should either be a float in the range [0, 1] or a non-negative integer" ) def _validate_pruning_amount(amount, tensor_size): r"""Validate that the pruning amount is meaningful wrt to the size of the data. Validation helper to check that the amount of parameters to prune is meaningful wrt to the size of the data (`tensor_size`). Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. tensor_size (int): absolute number of parameters in the tensor to prune. """ # TODO: consider removing this check and allowing users to specify # a number of units to prune that is greater than the number of units # left to prune. In this case, the tensor will just be fully pruned. if isinstance(amount, numbers.Integral) and amount > tensor_size: raise ValueError( f"amount={amount} should be smaller than the number of parameters to prune={tensor_size}" ) def _validate_structured_pruning(t): r"""Validate that the tensor to be pruned is at least 2-Dimensional. Validation helper to check that the tensor to be pruned is multi- dimensional, such that the concept of "channels" is well-defined. Args: t (torch.Tensor): tensor representing the parameter to prune Raises: ValueError: if the tensor `t` is not at least 2D. """ shape = t.shape if len(shape) <= 1: raise ValueError( "Structured pruning can only be applied to " "multidimensional tensors. Found tensor of shape " f"{shape} with {len(shape)} dims" ) def _compute_nparams_toprune(amount, tensor_size): r"""Convert the pruning amount from a percentage to absolute value. Since amount can be expressed either in absolute value or as a percentage of the number of units/channels in a tensor, this utility function converts the percentage to absolute value to standardize the handling of pruning. Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. tensor_size (int): absolute number of parameters in the tensor to prune. Returns: int: the number of units to prune in the tensor """ # incorrect type already checked in _validate_pruning_amount_init if isinstance(amount, numbers.Integral): return amount else: return round(amount * tensor_size) def _validate_pruning_dim(t, dim): r"""Validate that the pruning dimension is within the bounds of the tensor dimension. Args: t (torch.Tensor): tensor representing the parameter to prune dim (int): index of the dim along which we define channels to prune """ if dim >= t.dim(): raise IndexError(f"Invalid index {dim} for tensor of size {t.shape}") def _compute_norm(t, n, dim): r"""Compute the L_n-norm of a tensor along all dimensions except for the specified dimension. The L_n-norm will be computed across all entries in tensor `t` along all dimension except for the one identified by dim. Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim), then norm will have Size [4], and each entry will represent the `L_n`-norm computed using the 3x2=6 entries for each of the 4 channels. Args: t (torch.Tensor): tensor representing the parameter to prune n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid entries for argument p in torch.norm dim (int): dim identifying the channels to prune Returns: norm (torch.Tensor): L_n norm computed across all dimensions except for `dim`. By construction, `norm.shape = t.shape[-1]`. """ # dims = all axes, except for the one identified by `dim` dims = list(range(t.dim())) # convert negative indexing if dim < 0: dim = dims[dim] dims.remove(dim) norm = torch.norm(t, p=n, dim=dims) return norm ```
============================================================================================================= SOURCE CODE FILE: rnn.py LINES: 1 SIZE: 23.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\rnn.py ENCODING: utf-8 ```py import warnings from collections.abc import Iterable from typing import Any, Callable, NamedTuple, Optional, overload, TypeVar, Union from typing_extensions import Self import torch from torch import _VF, Tensor __all__ = [ "PackedSequence", "invert_permutation", "pack_padded_sequence", "pad_packed_sequence", "pad_sequence", "unpad_sequence", "pack_sequence", "unpack_sequence", ] _T = TypeVar("_T") _R = TypeVar("_R") class PackedSequence_(NamedTuple): data: torch.Tensor batch_sizes: torch.Tensor sorted_indices: Optional[torch.Tensor] unsorted_indices: Optional[torch.Tensor] def bind(optional: Optional[_T], fn: Callable[[_T], _R]) -> Optional[_R]: if optional is None: return None return fn(optional) class PackedSequence(PackedSequence_): r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence. All RNN modules accept packed sequences as inputs. Note: Instances of this class should never be created manually. They are meant to be instantiated by functions like :func:`pack_padded_sequence`. Batch sizes represent the number elements at each sequence step in the batch, not the varying sequence lengths passed to :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x`` the :class:`PackedSequence` would contain data ``axbc`` with ``batch_sizes=[2,1,1]``. Attributes: data (Tensor): Tensor containing packed sequence batch_sizes (Tensor): Tensor of integers holding information about the batch size at each sequence step sorted_indices (Tensor, optional): Tensor of integers holding how this :class:`PackedSequence` is constructed from sequences. unsorted_indices (Tensor, optional): Tensor of integers holding how this to recover the original sequences with correct order. .. note:: :attr:`data` can be on arbitrary device and of arbitrary dtype. :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64`` tensors on the same device as :attr:`data`. However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor. This invariant is maintained throughout :class:`PackedSequence` class, and all functions that construct a :class:`PackedSequence` in PyTorch (i.e., they only pass in tensors conforming to this constraint). """ def __new__( cls, data: Tensor, batch_sizes: Optional[Tensor] = None, sorted_indices: Optional[Tensor] = None, unsorted_indices: Optional[Tensor] = None, ) -> Self: return super().__new__( cls, *_packed_sequence_init_args( data, batch_sizes, sorted_indices, unsorted_indices ), ) # NOTE [ device and dtype of a PackedSequence ] # # See the note above in doc string (starting with ":attr:`data` can be on # arbitrary device..."). def pin_memory(self) -> Self: # Why not convert `batch_sizes`? # See NOTE [ device and dtype of a PackedSequence ] return type(self)( self.data.pin_memory(), self.batch_sizes, bind(self.sorted_indices, lambda t: t.pin_memory()), bind(self.unsorted_indices, lambda t: t.pin_memory()), ) @overload def to( self, dtype: torch.dtype, non_blocking: bool = ..., copy: bool = ..., ) -> Self: ... @overload def to( self, device: Optional[Union[str, torch.device, int]] = ..., dtype: Optional[torch.dtype] = ..., non_blocking: bool = ..., copy: bool = ..., ) -> Self: ... @overload def to( self, other: Tensor, non_blocking: bool = ..., copy: bool = ..., ) -> Self: ... def to(self, *args: Any, **kwargs: Any) -> Self: r"""Perform dtype and/or device conversion on `self.data`. It has similar signature as :meth:`torch.Tensor.to`, except optional arguments like `non_blocking` and `copy` should be passed as kwargs, not args, or they will not apply to the index tensors. .. note:: If the ``self.data`` Tensor already has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned. Otherwise, returns a copy with the desired configuration. """ # Why not convert `batch_sizes`? # See NOTE [ device and dtype of a PackedSequence ] data = self.data.to(*args, **kwargs) if data is self.data: return self else: # Does not forward device or dtype arg/kwargs, device is set from data.device kwargs = dict( filter(lambda t: t[0] != "device" and t[0] != "dtype", kwargs.items()) ) sorted_indices = bind( self.sorted_indices, lambda t: t.to(data.device, **kwargs) ) unsorted_indices = bind( self.unsorted_indices, lambda t: t.to(data.device, **kwargs) ) return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices) def cuda(self, *args: Any, **kwargs: Any) -> Self: # Tests to see if 'cuda' should be added to kwargs ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to( *args, **kwargs ) if ex.is_cuda: return self.to(*args, **kwargs) kwargs["device"] = "cuda" return self.to(*args, **kwargs) def cpu(self, *args: Any, **kwargs: Any) -> Self: ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to( *args, **kwargs ) if ex.device.type == "cpu": return self.to(*args, **kwargs) kwargs["device"] = "cpu" return self.to(*args, **kwargs) def double(self) -> Self: return self.to(dtype=torch.double) def float(self) -> Self: return self.to(dtype=torch.float) def half(self) -> Self: return self.to(dtype=torch.half) def long(self) -> Self: return self.to(dtype=torch.long) def int(self) -> Self: return self.to(dtype=torch.int) def short(self) -> Self: return self.to(dtype=torch.short) def char(self) -> Self: return self.to(dtype=torch.int8) def byte(self) -> Self: return self.to(dtype=torch.uint8) @property def is_cuda(self) -> bool: r"""Return true if `self.data` stored on a gpu.""" return self.data.is_cuda def is_pinned(self) -> bool: r"""Return true if `self.data` stored on in pinned memory.""" return self.data.is_pinned() # TorchScript doesn't support constructors on named tuples, so we use this helper # method to construct PackedSequence def _packed_sequence_init_args( data: Tensor, batch_sizes: Optional[Tensor] = None, sorted_indices: Optional[Tensor] = None, unsorted_indices: Optional[Tensor] = None, ) -> tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: # NB: if unsorted_indices is provided, it should be the inverse permutation # to sorted_indices. Don't assert it here because the PackedSequence ctor # should only be used internally. if unsorted_indices is None: unsorted_indices = invert_permutation(sorted_indices) # support being called as `PackedSequence(data, batch_sizes, sorted_indices)` if batch_sizes is not None: # TODO: Re-enable this check (.type isn't supported in TorchScript) if batch_sizes.device.type != "cpu": raise ValueError( "batch_sizes should always be on CPU. " "Instances of PackedSequence should never be created manually. " "They should be instantiated by functions like pack_sequence " "and pack_padded_sequences in nn.utils.rnn. " "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence" ) return data, batch_sizes, sorted_indices, unsorted_indices # support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)` else: assert isinstance(data, (list, tuple)) and len(data) == 2 return data[0], data[1], sorted_indices, unsorted_indices def _packed_sequence_init( data: Tensor, batch_sizes: Optional[Tensor] = None, sorted_indices: Optional[Tensor] = None, unsorted_indices: Optional[Tensor] = None, ) -> PackedSequence: data, batch_sizes, sorted_indices, unsorted_indices = _packed_sequence_init_args( data, batch_sizes, sorted_indices, unsorted_indices ) return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices) def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]: if permutation is None: return None output = torch.empty_like(permutation, memory_format=torch.legacy_contiguous_format) output.scatter_( 0, permutation, torch.arange(0, permutation.numel(), device=permutation.device) ) return output def pack_padded_sequence( input: Tensor, lengths: Union[Tensor, list[int]], batch_first: bool = False, enforce_sorted: bool = True, ) -> PackedSequence: r"""Packs a Tensor containing padded sequences of variable length. :attr:`input` can be of size ``T x B x *`` (if :attr:`batch_first` is ``False``) or ``B x T x *`` (if :attr:`batch_first` is ``True``) where ``T`` is the length of the longest sequence, ``B`` is the batch size, and ``*`` is any number of dimensions (including 0). For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is ``True``, the sequences should be sorted by length in a decreasing order, i.e. ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest one. `enforce_sorted = True` is only necessary for ONNX export. It is an inverse operation to :func:`pad_packed_sequence`, and hence :func:`pad_packed_sequence` can be used to recover the underlying tensor packed in :class:`PackedSequence`. Note: This function accepts any input that has at least two dimensions. You can apply it to pack the labels, and use the output of the RNN with them to compute the loss directly. A Tensor can be retrieved from a :class:`PackedSequence` object by accessing its ``.data`` attribute. Args: input (Tensor): padded batch of variable length sequences. lengths (Tensor or list(int)): list of sequence lengths of each batch element (must be on the CPU if provided as a tensor). batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *`` format, ``T x B x *`` otherwise. Default: ``False``. enforce_sorted (bool, optional): if ``True``, the input is expected to contain sequences sorted by length in a decreasing order. If ``False``, the input will get sorted unconditionally. Default: ``True``. .. warning:: The dim of ``input`` tensor will be truncated if its length larger than correspond value in ``length``. Returns: a :class:`PackedSequence` object """ if not isinstance(lengths, torch.Tensor): if torch._C._get_tracing_state(): warnings.warn( "pack_padded_sequence has been called with a Python list of " "sequence lengths. The tracer cannot track the data flow of Python " "values, and it will treat them as constants, likely rendering " "the trace incorrect for any other combination of lengths.", stacklevel=2, ) lengths = torch.as_tensor(lengths, dtype=torch.int64, device="cpu") else: lengths = lengths.to(dtype=torch.int64) if enforce_sorted: sorted_indices = None else: lengths, sorted_indices = torch.sort(lengths, descending=True) sorted_indices = sorted_indices.to(input.device) batch_dim = 0 if batch_first else 1 input = input.index_select(batch_dim, sorted_indices) data, batch_sizes = _VF._pack_padded_sequence(input, lengths, batch_first) return _packed_sequence_init(data, batch_sizes, sorted_indices, None) def pad_packed_sequence( sequence: PackedSequence, batch_first: bool = False, padding_value: float = 0.0, total_length: Optional[int] = None, ) -> tuple[Tensor, Tensor]: r"""Pad a packed batch of variable length sequences. It is an inverse operation to :func:`pack_padded_sequence`. The returned Tensor's data will be of size ``T x B x *`` (if :attr:`batch_first` is ``False``) or ``B x T x *`` (if :attr:`batch_first` is ``True``) , where ``T`` is the length of the longest sequence and ``B`` is the batch size. Example: >>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence >>> seq = torch.tensor([[1, 2, 0], [3, 0, 0], [4, 5, 6]]) >>> lens = [2, 1, 3] >>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False) >>> packed PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]), sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0])) >>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True) >>> seq_unpacked tensor([[1, 2, 0], [3, 0, 0], [4, 5, 6]]) >>> lens_unpacked tensor([2, 1, 3]) .. note:: :attr:`total_length` is useful to implement the ``pack sequence -> recurrent network -> unpack sequence`` pattern in a :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. See :ref:`this FAQ section <pack-rnn-unpack-with-data-parallelism>` for details. Args: sequence (PackedSequence): batch to pad batch_first (bool, optional): if ``True``, the output will be in ``B x T x *`` format, ``T x B x *`` otherwise. padding_value (float, optional): values for padded elements. total_length (int, optional): if not ``None``, the output will be padded to have length :attr:`total_length`. This method will throw :class:`ValueError` if :attr:`total_length` is less than the max sequence length in :attr:`sequence`. Returns: Tuple of Tensor containing the padded sequence, and a Tensor containing the list of lengths of each sequence in the batch. Batch elements will be re-ordered as they were ordered originally when the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``. """ max_seq_length = sequence.batch_sizes.size(0) if total_length is not None: if total_length < max_seq_length: raise ValueError( "Expected total_length to be at least the length " "of the longest sequence in input, but got " f"total_length={total_length} and max sequence length being {max_seq_length}" ) max_seq_length = total_length padded_output, lengths = _VF._pad_packed_sequence( sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length ) unsorted_indices = sequence.unsorted_indices if unsorted_indices is not None: batch_dim = 0 if batch_first else 1 return ( padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()], ) return padded_output, lengths # NOTE: for JIT-compatibility, we need to be more restrictive here and use specific types instead of Iterable. def pad_sequence( sequences: Union[Tensor, list[Tensor]], batch_first: bool = False, padding_value: float = 0.0, padding_side: str = "right", ) -> Tensor: r"""Pad a list of variable length Tensors with :attr:`padding_value`. ``pad_sequence`` stacks a list of Tensors along a new dimension, and pads them to equal length. :attr:`sequences` can be list of sequences with size ``L x *``, where `L` is length of the sequence and ``*`` is any number of dimensions (including ``0``). If :attr:`batch_first` is ``False``, the output is of size ``T x B x *``, and ``B x T x *`` otherwise, where ``B`` is the batch size (the number of elements in :attr:`sequences`), ``T`` is the length of the longest sequence. Example: >>> from torch.nn.utils.rnn import pad_sequence >>> a = torch.ones(25, 300) >>> b = torch.ones(22, 300) >>> c = torch.ones(15, 300) >>> pad_sequence([a, b, c]).size() torch.Size([25, 3, 300]) Note: This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` where `T` is the length of the longest sequence. This function assumes trailing dimensions and type of all the Tensors in sequences are same. Args: sequences (list[Tensor]): list of variable length sequences. batch_first (bool, optional): if ``True``, the output will be in ``B x T x *`` format, ``T x B x *`` otherwise. padding_value (float, optional): value for padded elements. Default: ``0``. padding_side (str, optional): the side to pad the sequences on. Default: ``'right'``. Returns: Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. Tensor of size ``B x T x *`` otherwise """ if not (torch.jit.is_tracing() or torch.jit.is_scripting()): # JIT doesn't support `Iterable` if not isinstance(sequences, Iterable): msg = ( "pad_sequence: Expected iterable for input sequences, but got arg of type: " f"{type(sequences)}" ) raise RuntimeError(msg) # In JIT context this leads to, # RuntimeError: cannot statically infer the expected size of a list in this context sequences = tuple(sequences) # type: ignore[assignment] else: # For JIT, we only support Union[Tensor, Tuple[Tensor]] if isinstance(sequences, torch.Tensor): sequences = sequences.unbind(0) # type: ignore[assignment] # assuming trailing dimensions and type of all the Tensors # in sequences are same and fetching those from sequences[0] return torch._C._nn.pad_sequence( sequences, batch_first, padding_value, padding_side # type: ignore[arg-type] ) def unpad_sequence( padded_sequences: Tensor, lengths: Tensor, batch_first: bool = False, ) -> list[Tensor]: r"""Unpad padded Tensor into a list of variable length Tensors. ``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors. Example: >>> from torch.nn.utils.rnn import pad_sequence, unpad_sequence >>> a = torch.ones(25, 300) >>> b = torch.ones(22, 300) >>> c = torch.ones(15, 300) >>> sequences = [a, b, c] >>> padded_sequences = pad_sequence(sequences) >>> lengths = torch.as_tensor([v.size(0) for v in sequences]) >>> unpadded_sequences = unpad_sequence(padded_sequences, lengths) >>> torch.allclose(sequences[0], unpadded_sequences[0]) True >>> torch.allclose(sequences[1], unpadded_sequences[1]) True >>> torch.allclose(sequences[2], unpadded_sequences[2]) True Args: padded_sequences (Tensor): padded sequences. lengths (Tensor): length of original (unpadded) sequences. batch_first (bool, optional): whether batch dimension first or not. Default: ``False``. Returns: a list of :class:`Tensor` objects """ unpadded_sequences = [] if not batch_first: padded_sequences.transpose_(0, 1) max_length = padded_sequences.shape[1] idx = torch.arange(max_length, device=lengths.device) for seq, length in zip(padded_sequences, lengths): mask = idx < length unpacked_seq = seq[mask] unpadded_sequences.append(unpacked_seq) return unpadded_sequences def pack_sequence( sequences: list[Tensor], enforce_sorted: bool = True, ) -> PackedSequence: r"""Packs a list of variable length Tensors. Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``. ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is the length of a sequence and `*` is any number of trailing dimensions, including ``0``. For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted`` is ``True``, the sequences should be sorted in the order of decreasing length. ``enforce_sorted = True`` is only necessary for ONNX export. Example: >>> from torch.nn.utils.rnn import pack_sequence >>> a = torch.tensor([1, 2, 3]) >>> b = torch.tensor([4, 5]) >>> c = torch.tensor([6]) >>> pack_sequence([a, b, c]) PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) Args: sequences (list[Tensor]): A list of sequences of decreasing length. enforce_sorted (bool, optional): if ``True``, checks that the input contains sequences sorted by length in a decreasing order. If ``False``, this condition is not checked. Default: ``True``. Returns: a :class:`PackedSequence` object """ lengths = torch.as_tensor([v.size(0) for v in sequences]) return pack_padded_sequence( pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted ) def unpack_sequence(packed_sequences: PackedSequence) -> list[Tensor]: r"""Unpack PackedSequence into a list of variable length Tensors. ``packed_sequences`` should be a PackedSequence object. Example: >>> from torch.nn.utils.rnn import pack_sequence, unpack_sequence >>> a = torch.tensor([1, 2, 3]) >>> b = torch.tensor([4, 5]) >>> c = torch.tensor([6]) >>> sequences = [a, b, c] >>> print(sequences) [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] >>> packed_sequences = pack_sequence(sequences) >>> print(packed_sequences) PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) >>> unpacked_sequences = unpack_sequence(packed_sequences) >>> print(unpacked_sequences) [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] Args: packed_sequences (PackedSequence): A PackedSequence object. Returns: a list of :class:`Tensor` objects """ padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True) unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True) return unpacked_sequences ```
======================================================================================================================= SOURCE CODE FILE: spectral_norm.py LINES: 2 SIZE: 14.92 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\spectral_norm.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """Spectral Normalization from https://arxiv.org/abs/1802.05957.""" from typing import Any, Optional, TypeVar import torch import torch.nn.functional as F from torch.nn.modules import Module __all__ = [ "SpectralNorm", "SpectralNormLoadStateDictPreHook", "SpectralNormStateDictHook", "spectral_norm", "remove_spectral_norm", ] class SpectralNorm: # Invariant before and after each forward call: # u = F.normalize(W @ v) # NB: At initialization, this invariant is not enforced _version: int = 1 # At version 1: # made `W` not a buffer, # added `v` as a buffer, and # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`. name: str dim: int n_power_iterations: int eps: float def __init__( self, name: str = "weight", n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12, ) -> None: self.name = name self.dim = dim if n_power_iterations <= 0: raise ValueError( "Expected n_power_iterations to be positive, but " f"got n_power_iterations={n_power_iterations}" ) self.n_power_iterations = n_power_iterations self.eps = eps def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: weight_mat = weight if self.dim != 0: # permute dim to front weight_mat = weight_mat.permute( self.dim, *[d for d in range(weight_mat.dim()) if d != self.dim] ) height = weight_mat.size(0) return weight_mat.reshape(height, -1) def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor: # NB: If `do_power_iteration` is set, the `u` and `v` vectors are # updated in power iteration **in-place**. This is very important # because in `DataParallel` forward, the vectors (being buffers) are # broadcast from the parallelized module to each module replica, # which is a new module object created on the fly. And each replica # runs its own spectral norm power iteration. So simply assigning # the updated vectors to the module this function runs on will cause # the update to be lost forever. And the next time the parallelized # module is replicated, the same randomly initialized vectors are # broadcast and used! # # Therefore, to make the change propagate back, we rely on two # important behaviors (also enforced via tests): # 1. `DataParallel` doesn't clone storage if the broadcast tensor # is already on correct device; and it makes sure that the # parallelized module is already on `device[0]`. # 2. If the out tensor in `out=` kwarg has correct shape, it will # just fill in the values. # Therefore, since the same power iteration is performed on all # devices, simply updating the tensors in-place will make sure that # the module replica on `device[0]` will update the _u vector on the # parallelized module (by shared storage). # # However, after we update `u` and `v` in-place, we need to **clone** # them before using them to normalize the weight. This is to support # backproping through two forward passes, e.g., the common pattern in # GAN training: loss = D(real) - D(fake). Otherwise, engine will # complain that variables needed to do backward for the first forward # (i.e., the `u` and `v` vectors) are changed in the second forward. weight = getattr(module, self.name + "_orig") u = getattr(module, self.name + "_u") v = getattr(module, self.name + "_v") weight_mat = self.reshape_weight_to_matrix(weight) if do_power_iteration: with torch.no_grad(): for _ in range(self.n_power_iterations): # Spectral norm of weight equals to `u^T W v`, where `u` and `v` # are the first left and right singular vectors. # This power iteration produces approximations of `u` and `v`. v = F.normalize( torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v ) u = F.normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) if self.n_power_iterations > 0: # See above on why we need to clone u = u.clone(memory_format=torch.contiguous_format) v = v.clone(memory_format=torch.contiguous_format) sigma = torch.dot(u, torch.mv(weight_mat, v)) weight = weight / sigma return weight def remove(self, module: Module) -> None: with torch.no_grad(): weight = self.compute_weight(module, do_power_iteration=False) delattr(module, self.name) delattr(module, self.name + "_u") delattr(module, self.name + "_v") delattr(module, self.name + "_orig") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) def __call__(self, module: Module, inputs: Any) -> None: setattr( module, self.name, self.compute_weight(module, do_power_iteration=module.training), ) def _solve_v_and_rescale(self, weight_mat, u, target_sigma): # Tries to returns a vector `v` s.t. `u = F.normalize(W @ v)` # (the invariant at top of this class) and `u @ W @ v = sigma`. # This uses pinverse in case W^T W is not invertible. v = torch.linalg.multi_dot( [weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)] ).squeeze(1) return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v))) @staticmethod def apply( module: Module, name: str, n_power_iterations: int, dim: int, eps: float ) -> "SpectralNorm": for hook in module._forward_pre_hooks.values(): if isinstance(hook, SpectralNorm) and hook.name == name: raise RuntimeError( f"Cannot register two spectral_norm hooks on the same parameter {name}" ) fn = SpectralNorm(name, n_power_iterations, dim, eps) weight = module._parameters[name] if weight is None: raise ValueError( f"`SpectralNorm` cannot be applied as parameter `{name}` is None" ) if isinstance(weight, torch.nn.parameter.UninitializedParameter): raise ValueError( "The module passed to `SpectralNorm` can't have uninitialized parameters. " "Make sure to run the dummy forward before applying spectral normalization" ) with torch.no_grad(): weight_mat = fn.reshape_weight_to_matrix(weight) h, w = weight_mat.size() # randomly initialize `u` and `v` u = F.normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) v = F.normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) delattr(module, fn.name) module.register_parameter(fn.name + "_orig", weight) # We still need to assign weight back as fn.name because all sorts of # things may assume that it exists, e.g., when initializing weights. # However, we can't directly assign as it could be an nn.Parameter and # gets added as a parameter. Instead, we register weight.data as a plain # attribute. setattr(module, fn.name, weight.data) module.register_buffer(fn.name + "_u", u) module.register_buffer(fn.name + "_v", v) module.register_forward_pre_hook(fn) module._register_state_dict_hook(SpectralNormStateDictHook(fn)) module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) return fn # This is a top level class because Py2 pickle doesn't like inner class nor an # instancemethod. class SpectralNormLoadStateDictPreHook: # See docstring of SpectralNorm._version on the changes to spectral_norm. def __init__(self, fn) -> None: self.fn = fn # For state_dict with version None, (assuming that it has gone through at # least one training forward), we have # # u = F.normalize(W_orig @ v) # W = W_orig / sigma, where sigma = u @ W_orig @ v # # To compute `v`, we solve `W_orig @ x = u`, and let # v = x / (u @ W_orig @ x) * (W / W_orig). def __call__( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ) -> None: fn = self.fn version = local_metadata.get("spectral_norm", {}).get( fn.name + ".version", None ) if version is None or version < 1: weight_key = prefix + fn.name if ( version is None and all(weight_key + s in state_dict for s in ("_orig", "_u", "_v")) and weight_key not in state_dict ): # Detect if it is the updated state dict and just missing metadata. # This could happen if the users are crafting a state dict themselves, # so we just pretend that this is the newest. return has_missing_keys = False for suffix in ("_orig", "", "_u"): key = weight_key + suffix if key not in state_dict: has_missing_keys = True if strict: missing_keys.append(key) if has_missing_keys: return with torch.no_grad(): weight_orig = state_dict[weight_key + "_orig"] weight = state_dict.pop(weight_key) sigma = (weight_orig / weight).mean() weight_mat = fn.reshape_weight_to_matrix(weight_orig) u = state_dict[weight_key + "_u"] v = fn._solve_v_and_rescale(weight_mat, u, sigma) state_dict[weight_key + "_v"] = v # This is a top level class because Py2 pickle doesn't like inner class nor an # instancemethod. class SpectralNormStateDictHook: # See docstring of SpectralNorm._version on the changes to spectral_norm. def __init__(self, fn) -> None: self.fn = fn def __call__(self, module, state_dict, prefix, local_metadata) -> None: if "spectral_norm" not in local_metadata: local_metadata["spectral_norm"] = {} key = self.fn.name + ".version" if key in local_metadata["spectral_norm"]: raise RuntimeError(f"Unexpected key in metadata['spectral_norm']: {key}") local_metadata["spectral_norm"][key] = self.fn._version T_module = TypeVar("T_module", bound=Module) def spectral_norm( module: T_module, name: str = "weight", n_power_iterations: int = 1, eps: float = 1e-12, dim: Optional[int] = None, ) -> T_module: r"""Apply spectral normalization to a parameter in the given module. .. math:: \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generative Adversarial Networks (GANs) by rescaling the weight tensor with spectral norm :math:`\sigma` of the weight matrix calculated using power iteration method. If the dimension of the weight tensor is greater than 2, it is reshaped to 2D in power iteration method to get spectral norm. This is implemented via a hook that calculates spectral norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name of weight parameter n_power_iterations (int, optional): number of power iterations to calculate spectral norm eps (float, optional): epsilon for numerical stability in calculating norms dim (int, optional): dimension corresponding to number of outputs, the default is ``0``, except for modules that are instances of ConvTranspose{1,2,3}d, when it is ``1`` Returns: The original module with the spectral norm hook .. note:: This function has been reimplemented as :func:`torch.nn.utils.parametrizations.spectral_norm` using the new parametrization functionality in :func:`torch.nn.utils.parametrize.register_parametrization`. Please use the newer version. This function will be deprecated in a future version of PyTorch. Example:: >>> m = spectral_norm(nn.Linear(20, 40)) >>> m Linear(in_features=20, out_features=40, bias=True) >>> m.weight_u.size() torch.Size([40]) """ if dim is None: if isinstance( module, ( torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d, ), ): dim = 1 else: dim = 0 SpectralNorm.apply(module, name, n_power_iterations, dim, eps) return module def remove_spectral_norm(module: T_module, name: str = "weight") -> T_module: r"""Remove the spectral normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) """ for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] break else: raise ValueError(f"spectral_norm of '{name}' not found in {module}") for k, hook in module._state_dict_hooks.items(): if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: del module._state_dict_hooks[k] break for k, hook in module._load_state_dict_pre_hooks.items(): if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: del module._load_state_dict_pre_hooks[k] break return module ```
=================================================================================================================== SOURCE CODE FILE: stateless.py LINES: 3 SIZE: 11.70 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\stateless.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import contextlib from typing import Any, Optional, Union from typing_extensions import deprecated import torch from torch import Tensor from torch.nn.utils._named_member_accessor import NamedMemberAccessor __all__ = ["functional_call"] def _untie_named_tensors_map( module: "torch.nn.Module", parameters_and_buffers: dict[str, Tensor], ) -> dict[str, Tensor]: """ Unties all tied tensors in the module to parameters_and_buffers. This function returns a new untied_parameters_and_buffers dictionary and leave the original untied_parameters_and_buffers dictionary unchanged. It adds new (missing) keys for tied tensors in the module to untied_parameters_and_buffers. The value of the new key is the user-given value in the original parameters_and_buffers dictionary. If there are more than one user-given values for the same tied tensor, it will raise an error. For example, if the module has two tied weights self.foo and self.tied_foo and the user passes {'foo': foo_value, ...}, this will return {'foo': foo_value, 'tied_foo': foo_value, ...}. If the user passes {'foo': foo_value, 'tied_foo': tied_foo_value, ...}, it will raise an error. If the user passes {'foo': foo_value, 'tied_foo': foo_value, ...}, it will not raise an error. Args: module (torch.nn.Module): the module to determine which tensors are tied. parameters_and_buffers (Dict[str, Tensor]): a map of {name: tensor} for reparamaterizing the module. Returns: A new untied version of the parameters_and_buffers dictionary. Raises: ValueError: if there are more than one user-given values for the same tied tensor. """ # A map of {name: tensor} for all tensors (including tied ones) in the module. all_named_tensors: dict[str, Tensor] = {} all_named_tensors.update(module.named_parameters(remove_duplicate=False)) all_named_tensors.update(module.named_buffers(remove_duplicate=False)) # A map of {tensor: set(all_tied_names)} for all tensor names in the module. tensor_to_tied_names_map: dict[Tensor, set[str]] = {} for name, tensor in all_named_tensors.items(): if tensor not in tensor_to_tied_names_map: tensor_to_tied_names_map[tensor] = set() tensor_to_tied_names_map[tensor].add(name) # A map of {tied_name: set(all_tied_names)} for all tensor names in the module. # If a name is not tied, it will not be in this map. tied_names_map: dict[str, set[str]] = {} for tied_names in tensor_to_tied_names_map.values(): if len(tied_names) > 1: for tied_name in tied_names: tied_names_map[tied_name] = tied_names # Make sure the user didn't pass multiple values for the same tied tensor. given_names = set(parameters_and_buffers.keys()) # same as given_names.intersection(tied_names_map.keys()) but dynamo can't # handle that given_names_for_tied_tensors: set[str] = set() for name in given_names: if name in tied_names_map: given_names_for_tied_tensors.add(name) for given_name in given_names_for_tied_tensors: tied_names = tied_names_map[given_name] if ( # Detect if there are multiple keys present for the same tied tensor. len(tied_names.intersection(given_names_for_tied_tensors)) > 1 # Only raise an error if the user passed multiple values for the same tied tensor. # If all given values are the same, don't raise. and len({parameters_and_buffers[tied_name] for tied_name in tied_names}) != 1 ): raise ValueError( f"functional_call got multiple values for keys {sorted(tied_names)}, " f"which are tied. Consider using tie_weights=False" ) # Untie the given named tensor map # Make a copy for not modifying the original dict untied_parameters_and_buffers = parameters_and_buffers.copy() for given_name in given_names_for_tied_tensors: for tied_name in tied_names_map[given_name]: untied_parameters_and_buffers[tied_name] = parameters_and_buffers[ given_name ] return untied_parameters_and_buffers @contextlib.contextmanager def _reparametrize_module( module: "torch.nn.Module", parameters_and_buffers: dict[str, Tensor], tie_weights: bool = False, strict: bool = False, stack_weights: bool = False, ): parameters_and_buffers = parameters_and_buffers stack_weights = stack_weights if tie_weights: untied_parameters_and_buffers = _untie_named_tensors_map( module, parameters_and_buffers ) else: untied_parameters_and_buffers = parameters_and_buffers accessor = NamedMemberAccessor(module) if strict: missing_keys, unexpected_keys = accessor.check_keys( untied_parameters_and_buffers ) error_msgs = [] if len(unexpected_keys) > 0: error_msgs.append( f"Unexpected key(s): {', '.join(map(repr, unexpected_keys))}." ) if len(missing_keys) > 0: error_msgs.append(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") if len(error_msgs) > 0: raise RuntimeError( "Error(s) in reparametrizing for {}:\n\t{}".format( module._get_name(), "\n\t".join(error_msgs) ) ) orig_parameters_and_buffers: dict[str, Tensor] = {} try: orig_parameters_and_buffers, _ = accessor.swap_tensors_dict( untied_parameters_and_buffers, allow_missing=True ) yield finally: if stack_weights: # When stacking is enabled, we will restore the weights in LIFO order. orig_parameters_and_buffers = dict( reversed(orig_parameters_and_buffers.items()) ) new_parameters_and_buffers, _ = accessor.swap_tensors_dict( orig_parameters_and_buffers, allow_missing=True ) # Sometimes the module is not completely stateless and has some in-place modifications on # the _parameters and _buffers dictionaries. # Write the changed parameters and buffers back to the original dict. parameters_and_buffers.update( { k: new_parameters_and_buffers[k] for k in parameters_and_buffers if k in new_parameters_and_buffers } ) @deprecated( "`torch.nn.utils.stateless.functional_call` is deprecated as of PyTorch 2.0 " "and will be removed in a future version of PyTorch. " "Please use `torch.func.functional_call` instead which is a drop-in replacement.", category=FutureWarning, ) def functional_call( module: "torch.nn.Module", parameters_and_buffers: dict[str, Tensor], args: Optional[Union[Any, tuple]] = None, kwargs: Optional[dict[str, Any]] = None, *, tie_weights: bool = True, strict: bool = False, ): r"""Perform a functional call on the module by replacing the module parameters and buffers with the provided ones. .. warning:: This API is deprecated as of PyTorch 2.0 and will be removed in a future version of PyTorch. Please use :func:`torch.func.functional_call` instead, which is a drop-in replacement for this API. .. note:: If the module has active parametrizations, passing a value in the :attr:`parameters_and_buffers` argument with the name set to the regular parameter name will completely disable the parametrization. If you want to apply the parametrization function to the value passed please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``. .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected in the `parameters_and_buffers` input. Example:: >>> a = {'foo': torch.zeros(())} >>> # xdoctest: +SKIP >>> mod = Foo() # does self.foo = self.foo + 1 >>> print(mod.foo) # tensor(0.) >>> functional_call(mod, a, torch.ones(())) >>> print(mod.foo) # tensor(0.) >>> print(a['foo']) # tensor(1.) .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the tie_weights flag. Example:: >>> a = {'foo': torch.zeros(())} >>> # xdoctest: +SKIP >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied >>> print(mod.foo) # tensor(1.) >>> mod(torch.zeros(())) # tensor(2.) >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())} >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.) Args: module (torch.nn.Module): the module to call parameters_and_buffers (dict of str and Tensor): the parameters that will be used in the module call. args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument. kwargs (dict): keyword arguments to be passed to the module call tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as tied in the reparamaterized version. Therefore, if True and different values are passed for the tied parameters and buffers, it will error. If False, it will not respect the originally tied parameters and buffers unless the values passed for both weights are the same. Default: True. strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will error. Default: False. Returns: Any: the result of calling ``module``. """ return _functional_call( module, parameters_and_buffers, args, kwargs, tie_weights=tie_weights, strict=strict, ) def _functional_call( module: "torch.nn.Module", parameters_and_buffers: dict[str, Tensor], args: Optional[Union[Any, tuple]] = None, kwargs: Optional[dict[str, Any]] = None, *, tie_weights: bool = True, strict: bool = False, ): # TODO allow kwargs such as unsafe and others for parametrization if ( torch.jit.is_tracing() or torch.jit.is_scripting() or isinstance( module, ( torch.jit.RecursiveScriptModule, torch.jit.ScriptModule, torch.jit.ScriptFunction, ), ) ): raise RuntimeError("The stateless API can't be used with Jitted modules") if isinstance(module, torch.nn.DataParallel): raise RuntimeError( "The stateless API can't be used with nn.DataParallel module" ) if kwargs is None: kwargs = {} if args is None: args = () elif not isinstance(args, tuple): args = (args,) with _reparametrize_module( module, parameters_and_buffers, tie_weights=tie_weights, strict=strict ): return module(*args, **kwargs) ```
===================================================================================================================== SOURCE CODE FILE: weight_norm.py LINES: 1 SIZE: 5.90 KB PATH: scripts\freecad_env\Lib\site-packages\torch\nn\utils\weight_norm.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs r"""Weight Normalization from https://arxiv.org/abs/1602.07868.""" from typing import Any, TypeVar from typing_extensions import deprecated from torch import _weight_norm, norm_except_dim from torch.nn.modules import Module from torch.nn.parameter import Parameter, UninitializedParameter __all__ = ["WeightNorm", "weight_norm", "remove_weight_norm"] class WeightNorm: name: str dim: int def __init__(self, name: str, dim: int) -> None: if dim is None: dim = -1 self.name = name self.dim = dim # TODO Make return type more specific def compute_weight(self, module: Module) -> Any: g = getattr(module, self.name + "_g") v = getattr(module, self.name + "_v") return _weight_norm(v, g, self.dim) @staticmethod @deprecated( "`torch.nn.utils.weight_norm` is deprecated " "in favor of `torch.nn.utils.parametrizations.weight_norm`.", category=FutureWarning, ) def apply(module, name: str, dim: int) -> "WeightNorm": for hook in module._forward_pre_hooks.values(): if isinstance(hook, WeightNorm) and hook.name == name: raise RuntimeError( f"Cannot register two weight_norm hooks on the same parameter {name}" ) if dim is None: dim = -1 fn = WeightNorm(name, dim) weight = getattr(module, name) if isinstance(weight, UninitializedParameter): raise ValueError( "The module passed to `WeightNorm` can't have uninitialized parameters. " "Make sure to run the dummy forward before applying weight normalization" ) # remove w from parameter list del module._parameters[name] # add g and v as new parameters and express w as g/||v|| * v module.register_parameter( name + "_g", Parameter(norm_except_dim(weight, 2, dim).data) ) module.register_parameter(name + "_v", Parameter(weight.data)) setattr(module, name, fn.compute_weight(module)) # recompute weight before every forward() module.register_forward_pre_hook(fn) return fn def remove(self, module: Module) -> None: weight = self.compute_weight(module) delattr(module, self.name) del module._parameters[self.name + "_g"] del module._parameters[self.name + "_v"] setattr(module, self.name, Parameter(weight.data)) def __call__(self, module: Module, inputs: Any) -> None: setattr(module, self.name, self.compute_weight(module)) T_module = TypeVar("T_module", bound=Module) def weight_norm(module: T_module, name: str = "weight", dim: int = 0) -> T_module: r"""Apply weight normalization to a parameter in the given module. .. math:: \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``). Weight normalization is implemented via a hook that recomputes the weight tensor from the magnitude and direction before every :meth:`~Module.forward` call. By default, with ``dim=0``, the norm is computed independently per output channel/plane. To compute a norm over the entire weight tensor, use ``dim=None``. See https://arxiv.org/abs/1602.07868 .. warning:: This function is deprecated. Use :func:`torch.nn.utils.parametrizations.weight_norm` which uses the modern parametrization API. The new ``weight_norm`` is compatible with ``state_dict`` generated from old ``weight_norm``. Migration guide: * The magnitude (``weight_g``) and direction (``weight_v``) are now expressed as ``parametrizations.weight.original0`` and ``parametrizations.weight.original1`` respectively. If this is bothering you, please comment on https://github.com/pytorch/pytorch/issues/102999 * To remove the weight normalization reparametrization, use :func:`torch.nn.utils.parametrize.remove_parametrizations`. * The weight is no longer recomputed once at module forward; instead, it will be recomputed on every access. To restore the old behavior, use :func:`torch.nn.utils.parametrize.cached` before invoking the module in question. Args: module (Module): containing module name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute the norm Returns: The original module with the weight norm hook Example:: >>> m = weight_norm(nn.Linear(20, 40), name='weight') >>> m Linear(in_features=20, out_features=40, bias=True) >>> m.weight_g.size() torch.Size([40, 1]) >>> m.weight_v.size() torch.Size([40, 20]) """ WeightNorm.apply(module, name, dim) return module def remove_weight_norm(module: T_module, name: str = "weight") -> T_module: r"""Remove the weight normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = weight_norm(nn.Linear(20, 40)) >>> remove_weight_norm(m) """ for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, WeightNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(f"weight_norm of '{name}' not found in {module}") ```
============================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 19.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\__init__.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from __future__ import annotations __all__ = [ # Modules "symbolic_helper", "utils", "errors", # All opsets "symbolic_caffe2", "symbolic_opset7", "symbolic_opset8", "symbolic_opset9", "symbolic_opset10", "symbolic_opset11", "symbolic_opset12", "symbolic_opset13", "symbolic_opset14", "symbolic_opset15", "symbolic_opset16", "symbolic_opset17", "symbolic_opset18", "symbolic_opset19", "symbolic_opset20", # Enums "OperatorExportTypes", "TrainingMode", "TensorProtoDataType", "JitScalarType", # Public functions "export", "is_in_onnx_export", "select_model_mode_for_export", "register_custom_op_symbolic", "unregister_custom_op_symbolic", # Base error "OnnxExporterError", # Dynamo Exporter "DiagnosticOptions", "ExportOptions", "ONNXProgram", "ONNXRuntimeOptions", "OnnxRegistry", "dynamo_export", "enable_fake_mode", # DORT / torch.compile "is_onnxrt_backend_supported", ] from typing import Any, Callable, TYPE_CHECKING from typing_extensions import deprecated import torch from torch import _C from torch._C import _onnx as _C_onnx from torch._C._onnx import OperatorExportTypes, TensorProtoDataType, TrainingMode from ._internal.exporter._onnx_program import ONNXProgram from ._internal.onnxruntime import ( is_onnxrt_backend_supported, OrtBackend as _OrtBackend, OrtBackendOptions as _OrtBackendOptions, OrtExecutionProvider as _OrtExecutionProvider, ) from ._type_utils import JitScalarType from .errors import OnnxExporterError from .utils import ( _run_symbolic_function, _run_symbolic_method, is_in_onnx_export, register_custom_op_symbolic, select_model_mode_for_export, unregister_custom_op_symbolic, ) from . import ( # usort: skip. Keep the order instead of sorting lexicographically errors, symbolic_caffe2, symbolic_helper, symbolic_opset7, symbolic_opset8, symbolic_opset9, symbolic_opset10, symbolic_opset11, symbolic_opset12, symbolic_opset13, symbolic_opset14, symbolic_opset15, symbolic_opset16, symbolic_opset17, symbolic_opset18, symbolic_opset19, symbolic_opset20, utils, ) from ._internal._exporter_legacy import ( # usort: skip. needs to be last to avoid circular import DiagnosticOptions, ExportOptions, ONNXRuntimeOptions, OnnxRegistry, enable_fake_mode, ) if TYPE_CHECKING: import os from collections.abc import Collection, Mapping, Sequence # Set namespace for exposed private names DiagnosticOptions.__module__ = "torch.onnx" ExportOptions.__module__ = "torch.onnx" JitScalarType.__module__ = "torch.onnx" ONNXProgram.__module__ = "torch.onnx" ONNXRuntimeOptions.__module__ = "torch.onnx" OnnxExporterError.__module__ = "torch.onnx" OnnxRegistry.__module__ = "torch.onnx" _OrtBackend.__module__ = "torch.onnx" _OrtBackendOptions.__module__ = "torch.onnx" _OrtExecutionProvider.__module__ = "torch.onnx" enable_fake_mode.__module__ = "torch.onnx" is_onnxrt_backend_supported.__module__ = "torch.onnx" producer_name = "pytorch" producer_version = _C_onnx.PRODUCER_VERSION def export( model: torch.nn.Module | torch.export.ExportedProgram | torch.jit.ScriptModule | torch.jit.ScriptFunction, args: tuple[Any, ...] = (), f: str | os.PathLike | None = None, *, kwargs: dict[str, Any] | None = None, export_params: bool = True, verbose: bool | None = None, input_names: Sequence[str] | None = None, output_names: Sequence[str] | None = None, opset_version: int | None = None, dynamic_axes: Mapping[str, Mapping[int, str]] | Mapping[str, Sequence[int]] | None = None, keep_initializers_as_inputs: bool = False, dynamo: bool = False, # Dynamo only options external_data: bool = True, dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None, custom_translation_table: dict[Callable, Callable | Sequence[Callable]] | None = None, report: bool = False, optimize: bool = True, verify: bool = False, profile: bool = False, dump_exported_program: bool = False, artifacts_dir: str | os.PathLike = ".", fallback: bool = False, # Deprecated options training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL, operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX, do_constant_folding: bool = True, custom_opsets: Mapping[str, int] | None = None, export_modules_as_functions: bool | Collection[type[torch.nn.Module]] = False, autograd_inlining: bool = True, ) -> ONNXProgram | None: r"""Exports a model into ONNX format. Args: model: The model to be exported. args: Example positional inputs. Any non-Tensor arguments will be hard-coded into the exported model; any Tensor arguments will become inputs of the exported model, in the order they occur in the tuple. f: Path to the output ONNX model file. E.g. "model.onnx". kwargs: Optional example keyword inputs. export_params: If false, parameters (weights) will not be exported. verbose: Whether to enable verbose logging. input_names: names to assign to the input nodes of the graph, in order. output_names: names to assign to the output nodes of the graph, in order. opset_version: The version of the `default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_ to target. Must be >= 7. dynamic_axes: By default the exported model will have the shapes of all input and output tensors set to exactly match those given in ``args``. To specify axes of tensors as dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema: * KEY (str): an input or output name. Each name must also be provided in ``input_names`` or ``output_names``. * VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a list, each element is an axis index. For example:: class SumModule(torch.nn.Module): def forward(self, x): return torch.sum(x, dim=1) torch.onnx.export( SumModule(), (torch.ones(2, 2),), "onnx.pb", input_names=["x"], output_names=["sum"], ) Produces:: input { name: "x" ... shape { dim { dim_value: 2 # axis 0 } dim { dim_value: 2 # axis 1 ... output { name: "sum" ... shape { dim { dim_value: 2 # axis 0 ... While:: torch.onnx.export( SumModule(), (torch.ones(2, 2),), "onnx.pb", input_names=["x"], output_names=["sum"], dynamic_axes={ # dict value: manually named axes "x": {0: "my_custom_axis_name"}, # list value: automatic names "sum": [0], }, ) Produces:: input { name: "x" ... shape { dim { dim_param: "my_custom_axis_name" # axis 0 } dim { dim_value: 2 # axis 1 ... output { name: "sum" ... shape { dim { dim_param: "sum_dynamic_axes_1" # axis 0 ... keep_initializers_as_inputs: If True, all the initializers (typically corresponding to model weights) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the user inputs are added as inputs. Set this to True if you intend to supply model weights at runtime. Set it to False if the weights are static to allow for better optimizations (e.g. constant folding) by backends/runtimes. dynamo: Whether to export the model with ``torch.export`` ExportedProgram instead of TorchScript. external_data: Whether to save the model weights as an external data file. This is required for models with large weights that exceed the ONNX file size limit (2GB). When False, the weights are saved in the ONNX file with the model architecture. dynamic_shapes: A dictionary or a tuple of dynamic shapes for the model inputs. Refer to :func:`torch.export.export` for more details. This is only used (and preferred) when dynamo is True. Note that dynamic_shapes is designed to be used when the model is exported with dynamo=True, while dynamic_axes is used when dynamo=False. custom_translation_table: A dictionary of custom decompositions for operators in the model. The dictionary should have the callable target in the fx Node as the key (e.g. ``torch.ops.aten.stft.default``), and the value should be a function that builds that graph using ONNX Script. This option is only valid when dynamo is True. report: Whether to generate a markdown report for the export process. This option is only valid when dynamo is True. optimize: Whether to optimize the exported model. This option is only valid when dynamo is True. Default is True. verify: Whether to verify the exported model using ONNX Runtime. This option is only valid when dynamo is True. profile: Whether to profile the export process. This option is only valid when dynamo is True. dump_exported_program: Whether to dump the :class:`torch.export.ExportedProgram` to a file. This is useful for debugging the exporter. This option is only valid when dynamo is True. artifacts_dir: The directory to save the debugging artifacts like the report and the serialized exported program. This option is only valid when dynamo is True. fallback: Whether to fallback to the TorchScript exporter if the dynamo exporter fails. This option is only valid when dynamo is True. When fallback is enabled, It is recommended to set dynamic_axes even when dynamic_shapes is provided. training: Deprecated option. Instead, set the training mode of the model before exporting. operator_export_type: Deprecated option. Only ONNX is supported. do_constant_folding: Deprecated option. custom_opsets: Deprecated. A dictionary: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. export_modules_as_functions: Deprecated option. Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. 1. Annotated attributes: class variables that have type annotations via `PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_ will be exported as attributes. Annotated attributes are not used inside the subgraph of ONNX local function because they are not created by PyTorch JIT tracing, but they may be used by consumers to determine whether or not to replace the function with a particular fused kernel. 2. Inferred attributes: variables that are used by operators inside the module. Attribute names will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from python module annotations. Inferred attributes are used inside the subgraph of ONNX local function. * ``False`` (default): export ``nn.Module`` forward calls as fine grained nodes. * ``True``: export all ``nn.Module`` forward calls as local function nodes. * Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes, only if the type of the ``nn.Module`` is found in the set. autograd_inlining: Deprecated. Flag used to control whether to inline autograd functions. Refer to https://github.com/pytorch/pytorch/pull/74765 for more details. Returns: :class:`torch.onnx.ONNXProgram` if dynamo is True, otherwise None. .. versionchanged:: 2.6 *training* is now deprecated. Instead, set the training mode of the model before exporting. *operator_export_type* is now deprecated. Only ONNX is supported. *do_constant_folding* is now deprecated. It is always enabled. *export_modules_as_functions* is now deprecated. *autograd_inlining* is now deprecated. .. versionchanged:: 2.7 *optimize* is now True by default. """ if dynamo is True or isinstance(model, torch.export.ExportedProgram): from torch.onnx._internal.exporter import _compat if isinstance(args, torch.Tensor): args = (args,) return _compat.export_compat( model, args, f, kwargs=kwargs, export_params=export_params, verbose=verbose, input_names=input_names, output_names=output_names, opset_version=opset_version, custom_translation_table=custom_translation_table, dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs, external_data=external_data, dynamic_shapes=dynamic_shapes, report=report, optimize=optimize, verify=verify, profile=profile, dump_exported_program=dump_exported_program, artifacts_dir=artifacts_dir, fallback=fallback, ) else: from torch.onnx.utils import export if dynamic_shapes: raise ValueError( "The exporter only supports dynamic shapes " "through parameter dynamic_axes when dynamo=False." ) export( model, args, f, # type: ignore[arg-type] kwargs=kwargs, export_params=export_params, verbose=verbose is True, input_names=input_names, output_names=output_names, opset_version=opset_version, dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs, training=training, operator_export_type=operator_export_type, do_constant_folding=do_constant_folding, custom_opsets=custom_opsets, export_modules_as_functions=export_modules_as_functions, autograd_inlining=autograd_inlining, ) return None @deprecated( "torch.onnx.dynamo_export is deprecated since 2.6.0. Please use torch.onnx.export(..., dynamo=True) instead." ) def dynamo_export( model: torch.nn.Module | Callable | torch.export.ExportedProgram, # type: ignore[name-defined] /, *model_args, export_options: ExportOptions | None = None, **model_kwargs, ) -> ONNXProgram: """Export a torch.nn.Module to an ONNX graph. .. deprecated:: 2.7 Please use ``torch.onnx.export(..., dynamo=True)`` instead. Args: model: The PyTorch model to be exported to ONNX. model_args: Positional inputs to ``model``. model_kwargs: Keyword inputs to ``model``. export_options: Options to influence the export to ONNX. Returns: An in-memory representation of the exported ONNX model. """ import warnings from torch.onnx import _flags from torch.onnx._internal.exporter import _compat from torch.utils import _pytree if isinstance(model, torch.export.ExportedProgram): return _compat.export_compat( model, # type: ignore[arg-type] model_args, f=None, kwargs=model_kwargs, opset_version=18, external_data=True, export_params=True, fallback=True, ) elif _flags.USE_EXPERIMENTAL_LOGIC: if export_options is not None: warnings.warn( "You are using an experimental ONNX export logic, which currently only supports dynamic shapes. " "For a more comprehensive set of export options, including advanced features, please consider using " "`torch.onnx.export(..., dynamo=True)`. ", category=DeprecationWarning, ) if export_options is not None and export_options.dynamic_shapes: # Make all shapes dynamic if it's possible def _to_dynamic_shape(x): if isinstance(x, torch.Tensor): rank = len(x.shape) dynamic_shape = {} for i in range(rank): dynamic_shape[i] = torch.export.Dim.AUTO # type: ignore[attr-defined] return dynamic_shape else: return None # model_args could be nested dynamic_shapes = _pytree.tree_map( _to_dynamic_shape, model_args, ) else: dynamic_shapes = None return _compat.export_compat( model, # type: ignore[arg-type] model_args, f=None, kwargs=model_kwargs, dynamic_shapes=dynamic_shapes, opset_version=18, external_data=True, export_params=True, fallback=True, ) else: from torch.onnx._internal._exporter_legacy import dynamo_export return dynamo_export( model, *model_args, export_options=export_options, **model_kwargs ) ```
================================================================================================================ SOURCE CODE FILE: _constants.py LINES: 1 SIZE: 0.62 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_constants.py ENCODING: utf-8 ```py """Constant values used in ONNX.""" ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO" ONNX_BASE_OPSET = 9 ONNX_MIN_OPSET = 7 ONNX_MAX_OPSET = 20 ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 20 # ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py ONNX_DEFAULT_OPSET = 17 ONNX_CONSTANT_FOLDING_MIN_OPSET = 9 PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" INT64_MAX = 9223372036854775807 INT32_MAX = 2147483647 INT16_MAX = 32767 INT8_MAX = 127 UINT8_MAX = 255 INT64_MIN = -9223372036854775808 INT32_MIN = -2147483648 INT16_MIN = -32768 INT8_MIN = -128 UINT8_MIN = 0 ```
=================================================================================================================== SOURCE CODE FILE: _experimental.py LINES: 1 SIZE: 1.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_experimental.py ENCODING: utf-8 ```py """Experimental classes and functions used by ONNX export.""" import dataclasses from collections.abc import Mapping, Sequence from typing import Optional, Union import torch import torch._C._onnx as _C_onnx @dataclasses.dataclass class ExportOptions: """Arguments used by :func:`torch.onnx.export`.""" # TODO(justinchuby): Deprecate and remove this class. export_params: bool = True verbose: bool = False training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL input_names: Optional[Sequence[str]] = None output_names: Optional[Sequence[str]] = None operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX opset_version: Optional[int] = None do_constant_folding: bool = True dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None keep_initializers_as_inputs: Optional[bool] = None custom_opsets: Optional[Mapping[str, int]] = None export_modules_as_functions: Union[bool, set[type[torch.nn.Module]]] = False ```
============================================================================================================ SOURCE CODE FILE: _flags.py LINES: 1 SIZE: 1.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_flags.py ENCODING: utf-8 ```py """Internal feature flags for torch.onnx. NOTE: These flags are experimental only. Any flag here can be removed at any time without notice. """ import logging import os logger = logging.getLogger(__name__) def _load_boolean_flag( name: str, *, this_will: str, deprecated: bool = False, default: bool = False, ) -> bool: """Load a boolean flag from environment variable. Args: name: The name of the environment variable. this_will: A string that describes what this flag will do. deprecated: Whether this flag is deprecated. default: The default value if envvar not defined. """ undefined = os.getenv(name) is None state = os.getenv(name) == "1" if state: if deprecated: logger.error( "Experimental flag %s is deprecated. Please remove it from your environment.", name, ) else: logger.warning( "Experimental flag %s is enabled. This will %s.", name, this_will ) if undefined: state = default return state USE_EXPERIMENTAL_LOGIC: bool = _load_boolean_flag( "TORCH_ONNX_USE_EXPERIMENTAL_LOGIC", this_will="use ExportedProgram and the new torch.onnx export logic", default=True, ) ```
============================================================================================================== SOURCE CODE FILE: _globals.py LINES: 1 SIZE: 3.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_globals.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """Globals used internally by the ONNX exporter. Do not use this module outside of `torch.onnx` and its tests. Be very judicious when adding any new global variables. Do not create new global variables unless they are absolutely necessary. """ import torch._C._onnx as _C_onnx # This module should only depend on _constants and nothing else in torch.onnx to keep # dependency direction clean. from torch.onnx import _constants class _InternalGlobals: """Globals used internally by ONNX exporter. NOTE: Be very judicious when adding any new variables. Do not create new global variables unless they are absolutely necessary. """ def __init__(self) -> None: self._export_onnx_opset_version = _constants.ONNX_DEFAULT_OPSET self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL self._in_onnx_export: bool = False # Whether the user's model is training during export self.export_training: bool = False self.operator_export_type: _C_onnx.OperatorExportTypes = ( _C_onnx.OperatorExportTypes.ONNX ) self.onnx_shape_inference: bool = True self._autograd_inlining: bool = True @property def training_mode(self): """The training mode for the exporter.""" return self._training_mode @training_mode.setter def training_mode(self, training_mode: _C_onnx.TrainingMode): if not isinstance(training_mode, _C_onnx.TrainingMode): raise TypeError( "training_mode must be of type 'torch.onnx.TrainingMode'. This is " "likely a bug in torch.onnx." ) self._training_mode = training_mode @property def export_onnx_opset_version(self) -> int: """Opset version used during export.""" return self._export_onnx_opset_version @export_onnx_opset_version.setter def export_onnx_opset_version(self, value: int): supported_versions = range( _constants.ONNX_MIN_OPSET, _constants.ONNX_MAX_OPSET + 1 ) if value not in supported_versions: raise ValueError(f"Unsupported ONNX opset version: {value}") self._export_onnx_opset_version = value @property def in_onnx_export(self) -> bool: """Whether it is in the middle of ONNX export.""" return self._in_onnx_export @in_onnx_export.setter def in_onnx_export(self, value: bool): if type(value) is not bool: raise TypeError("in_onnx_export must be a boolean") self._in_onnx_export = value @property def autograd_inlining(self) -> bool: """Whether Autograd must be inlined.""" return self._autograd_inlining @autograd_inlining.setter def autograd_inlining(self, value: bool): if type(value) is not bool: raise TypeError("autograd_inlining must be a boolean") self._autograd_inlining = value GLOBALS = _InternalGlobals() ```
======================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\__init__.py ENCODING: utf-8 ```py ```
================================================================================================================================ SOURCE CODE FILE: _exporter_legacy.py LINES: 6 SIZE: 35.24 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\_exporter_legacy.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from __future__ import annotations __all__ = [ "DiagnosticOptions", "ExportOptions", "ONNXRuntimeOptions", "InvalidExportOptionsError", "OnnxRegistry", "UnsatisfiedDependencyError", "dynamo_export", "enable_fake_mode", ] import abc import contextlib import dataclasses import logging import warnings from collections import defaultdict from typing import Any, Callable, TYPE_CHECKING, TypeVar from typing_extensions import deprecated import torch import torch._ops import torch.utils._pytree as pytree from torch.onnx import errors from torch.onnx._internal import io_adapter from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir from torch.onnx._internal.diagnostics import infra from torch.onnx._internal.exporter import _constants, _onnx_program from torch.onnx._internal.fx import ( decomposition_table, patcher as patcher, registration, ) # We can only import onnx from this module in a type-checking context to ensure that # 'import torch.onnx' continues to work without having 'onnx' installed. We fully # 'import onnx' inside of dynamo_export (by way of _assert_dependencies). if TYPE_CHECKING: import io from collections.abc import Mapping, Sequence import onnxruntime import onnxscript from torch._subclasses import fake_tensor from torch.onnx._internal.fx import diagnostics _PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" """The URL to the PyTorch GitHub issues page.""" _DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH = "report_dynamo_export.sarif" """The default path to write the SARIF log to if the export fails.""" log = logging.getLogger(__name__) DiagnosticOptions = infra.DiagnosticOptions @dataclasses.dataclass class ONNXFakeContext: """A dataclass used to store context for model export using FakeTensor. This dataclass stores the FakeTensorMode instance used to convert real tensors and model parameters into fake tensors. This :attr:`ONNXFakeContext.fake_mode` is reused internally during tracing of a :class:`torch.nn.Module` into a FX :class:`GraphModule`. """ fake_mode: fake_tensor.FakeTensorMode """The fake tensor mode used for tracing model using fake tensors and parameters.""" state_dict_paths: tuple[str | io.BytesIO | dict[str, Any]] | None = None """List of paths of files that contain the model :meth:`state_dict`""" @deprecated( "torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.", ) class OnnxRegistry: """Registry for ONNX functions. .. deprecated:: 2.7 Please use ``torch.onnx.export(..., dynamo=True)`` instead. The registry maintains a mapping from qualified names to symbolic functions under a fixed opset version. It supports registering custom onnx-script functions and for dispatcher to dispatch calls to the appropriate function. """ def __init__(self) -> None: """Initializes the registry""" # NOTE: _registry is the registry maps OpNameto a list of ONNXFunctions. It is important # not to directly modify this variable. Instead, access to it should be done through # the public methods: register_custom_op, get_ops, and is_registered_op. self._registry: dict[registration.OpName, list[registration.ONNXFunction]] = ( defaultdict(list) ) self._opset_version = _constants.TORCHLIB_OPSET warnings.warn( f"torch.onnx.dynamo_export only implements opset version {self._opset_version} for now. If you need to use a " "different opset version, please register them with register_custom_op." ) self._initiate_registry_from_torchlib() @property def opset_version(self) -> int: """The ONNX opset version the exporter should target.""" return self._opset_version def _initiate_registry_from_torchlib(self) -> None: """Populates the registry with ATen functions from torchlib. Args: torchlib_registry: The torchlib registry to use for populating the registry. """ for meta in onnxscript_apis.get_torchlib_ops(): internal_name_instance = registration.OpName.from_qualified_name( meta.qualified_name ) symbolic_function = registration.ONNXFunction( onnx_function=meta.function, # type: ignore[arg-type] op_full_name=internal_name_instance.qualified_name(), is_custom=False, is_complex=meta.is_complex, ) self._register(internal_name_instance, symbolic_function) def _register( self, internal_qualified_name: registration.OpName, symbolic_function: registration.ONNXFunction, ) -> None: """Registers a ONNXFunction to an operator. Args: internal_qualified_name: The qualified name of the operator to register: OpName. symbolic_function: The ONNXFunction to register. """ self._registry[internal_qualified_name].append(symbolic_function) def register_op( self, function: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction, namespace: str, op_name: str, overload: str | None = None, is_complex: bool = False, ) -> None: """Registers a custom operator: torch.ops.<namespace>.<op_name>.<overload>. Args: function: The onnx-sctip function to register. namespace: The namespace of the operator to register. op_name: The name of the operator to register. overload: The overload of the operator to register. If it's default overload, leave it to None. is_complex: Whether the function is a function that handles complex valued inputs. Raises: ValueError: If the name is not in the form of 'namespace::op'. """ internal_name_instance = registration.OpName.from_name_parts( namespace=namespace, op_name=op_name, overload=overload ) symbolic_function = registration.ONNXFunction( onnx_function=function, op_full_name=internal_name_instance.qualified_name(), is_custom=True, is_complex=is_complex, ) self._register(internal_name_instance, symbolic_function) def get_op_functions( self, namespace: str, op_name: str, overload: str | None = None ) -> list[registration.ONNXFunction] | None: """Returns a list of ONNXFunctions for the given op: torch.ops.<namespace>.<op_name>.<overload>. The list is ordered by the time of registration. The custom operators should be in the second half of the list. Args: namespace: The namespace of the operator to get. op_name: The name of the operator to get. overload: The overload of the operator to get. If it's default overload, leave it to None. Returns: A list of ONNXFunctions corresponding to the given name, or None if the name is not in the registry. """ internal_name_instance = registration.OpName.from_name_parts( namespace=namespace, op_name=op_name, overload=overload ) return self._registry.get(internal_name_instance) def is_registered_op( self, namespace: str, op_name: str, overload: str | None = None ) -> bool: """Returns whether the given op is registered: torch.ops.<namespace>.<op_name>.<overload>. Args: namespace: The namespace of the operator to check. op_name: The name of the operator to check. overload: The overload of the operator to check. If it's default overload, leave it to None. Returns: True if the given op is registered, otherwise False. """ functions = self.get_op_functions( namespace=namespace, op_name=op_name, overload=overload ) return functions is not None def _all_registered_ops(self) -> set[str]: """Returns the set of all registered function names.""" return { op_name_class.qualified_name() for op_name_class in self._registry.keys() } @deprecated( "torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.", category=None, ) class ExportOptions: """Options to influence the TorchDynamo ONNX exporter. .. deprecated:: 2.7 Please use ``torch.onnx.export(..., dynamo=True)`` instead. Attributes: dynamic_shapes: Shape information hint for input/output tensors. When ``None``, the exporter determines the most compatible setting. When ``True``, all input shapes are considered dynamic. When ``False``, all input shapes are considered static. diagnostic_options: The diagnostic options for the exporter. fake_context: The fake context used for symbolic tracing. onnx_registry: The ONNX registry used to register ATen operators to ONNX functions. """ dynamic_shapes: bool | None = None """Shape information hint for input/output tensors. - ``None``: the exporter determines the most compatible setting. - ``True``: all input shapes are considered dynamic. - ``False``: all input shapes are considered static. """ diagnostic_options: DiagnosticOptions """The diagnostic options for the exporter.""" fake_context: ONNXFakeContext | None = None """The fake context used for symbolic tracing.""" onnx_registry: OnnxRegistry | None = None """The ONNX registry used to register ATen operators to ONNX functions.""" def __init__( self, *, dynamic_shapes: bool | None = None, fake_context: ONNXFakeContext | None = None, onnx_registry: OnnxRegistry | None = None, diagnostic_options: DiagnosticOptions | None = None, ): self.dynamic_shapes = dynamic_shapes self.fake_context = fake_context self.onnx_registry = onnx_registry self.diagnostic_options = diagnostic_options or DiagnosticOptions() @deprecated( "torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.", category=None, ) class ResolvedExportOptions(ExportOptions): """Consolidates :class:`ExportOptions` with default values. All unspecified options from :class:`ExportOptions` are assigned a default value. This is an internal class and its API may be changed at any time without notice. """ # Public attributes MUST be redefined below without ``Optional[]`` from ``ExportOptions`` dynamic_shapes: bool diagnostic_options: DiagnosticOptions fake_context: ONNXFakeContext onnx_registry: OnnxRegistry # Private only attributes decomposition_table: dict[torch._ops.OpOverload, Callable] """A dictionary that maps operators to their decomposition functions.""" onnxfunction_dispatcher: ( torch.onnx._internal.fx.onnxfunction_dispatcher.OnnxFunctionDispatcher ) """The ONNX dispatcher used to dispatch ATen operators to ONNX functions.""" fx_tracer: FXGraphExtractor """The FXGraphExtractor instance used to extract the FX graph from the model.""" diagnostic_context: diagnostics.DiagnosticContext """The diagnostics context for the export. Responsible for recording diagnostics, logging diagnostics, and generating the SARIF log.""" def __init__( self, options: ExportOptions | ResolvedExportOptions, model: torch.nn.Module | Callable | None = None, # type: ignore[name-defined] ): from torch.onnx._internal.fx import ( # TODO: Prevent circular dep diagnostics, dynamo_graph_extractor, ) if isinstance(options, ResolvedExportOptions): self.dynamic_shapes = options.dynamic_shapes self.diagnostic_options = options.diagnostic_options self.fake_context = options.fake_context self.fx_tracer = options.fx_tracer self.onnx_registry = options.onnx_registry self.onnxfunction_dispatcher = options.onnxfunction_dispatcher self.decomposition_table = options.decomposition_table self.diagnostic_context = options.diagnostic_context else: T = TypeVar("T") def resolve(value: T | None, fallback: T | Callable[[], T]) -> T: if value is not None: return value if callable(fallback): return fallback() return fallback self.dynamic_shapes = resolve(options.dynamic_shapes, False) self.diagnostic_options = resolve( options.diagnostic_options, DiagnosticOptions() ) self.fx_tracer = dynamo_graph_extractor.DynamoExport() self.fake_context = resolve(options.fake_context, None) # type: ignore[arg-type] self.diagnostic_context = diagnostics.DiagnosticContext( "torch.onnx.dynamo_export", torch.__version__, self.diagnostic_options, ) self.onnx_registry = resolve(options.onnx_registry, OnnxRegistry()) self.decomposition_table = ( decomposition_table.create_onnx_friendly_decomposition_table( # type: ignore[assignment] self.onnx_registry ) ) from torch.onnx._internal.fx import onnxfunction_dispatcher self.onnxfunction_dispatcher = ( onnxfunction_dispatcher.OnnxFunctionDispatcher( self.onnx_registry, self.diagnostic_context, ) ) for key in dir(options): if not key.startswith("_"): # skip private attributes assert hasattr(self, key), f"Unresolved option '{key}'" @contextlib.contextmanager def enable_fake_mode(): """Enable fake mode for the duration of the context. Internally it instantiates a :class:`torch._subclasses.fake_tensor.FakeTensorMode` context manager that converts user input and model parameters into :class:`torch._subclasses.fake_tensor.FakeTensor`. A :class:`torch._subclasses.fake_tensor.FakeTensor` is a :class:`torch.Tensor` with the ability to run PyTorch code without having to actually do computation through tensors allocated on a ``meta`` device. Because there is no actual data being allocated on the device, this API allows for initializing and exporting large models without the actual memory footprint needed for executing it. It is highly recommended to initialize the model in fake mode when exporting models that are too large to fit into memory. .. note:: This function does not support torch.onnx.export(..., dynamo=True, optimize=True). Please call ONNXProgram.optimize() outside of the function after the model is exported. Example:: # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) >>> import torch >>> class MyModel(torch.nn.Module): # Model with a parameter ... def __init__(self) -> None: ... super().__init__() ... self.weight = torch.nn.Parameter(torch.tensor(42.0)) ... def forward(self, x): ... return self.weight + x >>> with torch.onnx.enable_fake_mode(): ... # When initialized in fake mode, the model's parameters are fake tensors ... # They do not take up memory so we can initialize large models ... my_nn_module = MyModel() ... arg1 = torch.randn(2, 2, 2) >>> onnx_program = torch.onnx.export(my_nn_module, (arg1,), dynamo=True, optimize=False) >>> # Saving model WITHOUT initializers (only the architecture) >>> onnx_program.save( ... "my_model_without_initializers.onnx", ... include_initializers=False, ... keep_initializers_as_inputs=True, ... ) >>> # Saving model WITH initializers after applying concrete weights >>> onnx_program.apply_weights({"weight": torch.tensor(42.0)}) >>> onnx_program.save("my_model_with_initializers.onnx") .. warning:: This API is experimental and is *NOT* backward-compatible. """ from torch._subclasses import fake_tensor from torch.fx.experimental.symbolic_shapes import ShapeEnv # This overrides the internal `FakeTensorMode` instance created by `torch._dynamo.export`[1]. # It is a good idea to keep them in sync (constructor args) to maintain the same default behavior # [1] `torch/_dynamo/output_graph.py::InstructionTranslator::OutputGraph.__init__` # Mixed fake/real tensors are only allowed when `torch.onnx.dynamo_export` is not called within `FakeTensorMode` # This is needed because models can create new parameters during `forward(self, *args, **kwargs)` run fake_mode = fake_tensor.FakeTensorMode( allow_non_fake_inputs=not torch._guards.detect_fake_mode(), shape_env=ShapeEnv( allow_scalar_outputs=False, allow_dynamic_output_shape_ops=False ), ) # The patcher is needed for when user calls `fake_model.load_state_dict(...)` within fake mode patcher_context = patcher.ONNXTorchPatcher() fake_context = ONNXFakeContext(fake_mode=fake_mode) with fake_mode, patcher_context: yield fake_context fake_context.state_dict_paths = tuple( patcher_context.paths, ) # type: ignore[assignment] @deprecated( "torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.", ) class ONNXRuntimeOptions: """Options to influence the execution of the ONNX model through ONNX Runtime. .. deprecated:: 2.7 Please use ``torch.onnx.export(..., dynamo=True)`` instead. Attributes: session_options: ONNX Runtime session options. execution_providers: ONNX Runtime execution providers to use during model execution. execution_provider_options: ONNX Runtime execution provider options. """ session_options: Sequence[onnxruntime.SessionOptions] | None = None """ONNX Runtime session options.""" execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None = None """ONNX Runtime execution providers to use during model execution.""" execution_provider_options: Sequence[dict[Any, Any]] | None = None """ONNX Runtime execution provider options.""" def __init__( self, *, session_options: Sequence[onnxruntime.SessionOptions] | None = None, execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None = None, execution_provider_options: Sequence[dict[Any, Any]] | None = None, ): self.session_options = session_options self.execution_providers = execution_providers self.execution_provider_options = execution_provider_options class FXGraphExtractor(abc.ABC): """Abstract interface for FX graph extractor engines. This class isolates FX extraction logic from the rest of the export logic. That allows a single ONNX exporter that can leverage different FX graphs.""" def __init__(self) -> None: super().__init__() self.input_adapter: io_adapter.InputAdapter = io_adapter.InputAdapter() self.output_adapter: io_adapter.OutputAdapter = io_adapter.OutputAdapter() @abc.abstractmethod def generate_fx( self, options: ResolvedExportOptions, model: torch.nn.Module | Callable, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], ) -> torch.fx.GraphModule: """Analyzes user ``model`` and generates a FX graph. Args: options: The export options. model: The user model. model_args: The model's positional input arguments. model_kwargs: The model's keyword input arguments. Returns: The generated FX Graph. """ ... # TODO: Design the passes API @abc.abstractmethod def pre_export_passes( self, options: ResolvedExportOptions, original_model: torch.nn.Module | Callable, fx_module: torch.fx.GraphModule, fx_module_args: Sequence[Any], ): """Applies pre-export passes to the FX graph. Pre-export passes are FX-to-FX graph transformations that make the graph more palatable for the FX-to-ONNX conversion. For example, it can be used to flatten model input/output, add explicit casts to the graph, replace/decompose operators, functionalize the graph, etc. """ ... class Exporter: def __init__( self, options: ResolvedExportOptions, model: torch.nn.Module | Callable, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], ): self.options = options assert self.options is not None self.model = model self.model_args = model_args self.model_kwargs = model_kwargs # TODO: https://github.com/pytorch/pytorch/issues/107714 # NOTE: FXSymbolicTracer would fail in this assert, as it does not use `enable_fake_mode` from torch.onnx._internal.fx import fx_symbolic_graph_extractor if not isinstance( self.options.fx_tracer, fx_symbolic_graph_extractor.FXSymbolicTracer ): self._assert_fake_tensor_mode() def export(self) -> _onnx_program.ONNXProgram: from torch.export._trace import ( # TODO: Prevent circular dependency DEFAULT_EXPORT_DYNAMO_CONFIG, ) # TODO: Defer `import onnxscript` out of `import torch` path # https://github.com/pytorch/pytorch/issues/103764 from torch.onnx._internal.fx import decomposition_skip with ( self.options.diagnostic_context, decomposition_skip.enable_decomposition_skips(self.options), torch._dynamo.config.patch( dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG) ), ): graph_module = self.options.fx_tracer.generate_fx( self.options, self.model, self.model_args, self.model_kwargs ) # TODO: Defer `import onnxscript` out of `import torch` path # https://github.com/pytorch/pytorch/issues/103764 from torch.onnx._internal.fx import fx_onnx_interpreter fx_interpreter = fx_onnx_interpreter.FxOnnxInterpreter( diagnostic_context=self.options.diagnostic_context ) onnxscript_graph = fx_interpreter.run( fx_graph_module=graph_module, onnxfunction_dispatcher=self.options.onnxfunction_dispatcher, ) # NOTE: Filter out the initializers with fake tensors when it's fake_mode exporting. # Otherwise, the ONNX exporter will fail: RuntimeError: basic_string::_M_construct null # not valid. # Concrete data is expected to be filled for those initializers later during `ONNXProgram.save`. if self.options.fake_context is not None: initializers_with_real_tensors: dict[str, torch.Tensor] = {} for ( initializer_name, initializer, ) in onnxscript_graph.initializers.items(): if not isinstance(initializer, torch._subclasses.FakeTensor): initializers_with_real_tensors[initializer_name] = initializer onnxscript_graph.initializers = initializers_with_real_tensors # Export TorchScript graph to ONNX ModelProto. onnx_model = onnxscript_graph.to_model_proto( self.options.onnx_registry.opset_version, ) ir_model = ir.serde.deserialize_model(onnx_model) try: ir_model = onnxscript_apis.optimize(ir_model) except Exception as e: warnings.warn( "ONNXScript optimizer failed. Skipping optimization. " "\n\nPLEASE REPORT A BUG AT https://github.com/microsoft/onnxscript/issues " f"\n\nDetail:\n{e}" ) return _onnx_program.ONNXProgram(ir_model, None) def _assert_fake_tensor_mode(self): """Asserts that the model and its input do not contain fake tensors.""" # Case 1: Model with fake inputs/weights and without enabling fake mode has_any_fake_tensor = pytree.tree_any( lambda x: isinstance(x, torch._subclasses.FakeTensor), (self.model_args, self.model_kwargs), ) has_any_fake_param_or_buffer = False if isinstance(self.model, torch.nn.Module): has_any_fake_param_or_buffer = pytree.tree_any( lambda x: isinstance(x, torch._subclasses.FakeTensor), (self.model.parameters(), self.model.buffers()), ) if ( has_any_fake_tensor or has_any_fake_param_or_buffer ) and not self.options.fake_context: raise RuntimeError( "Cannot export a model with fake inputs/weights without enabling fake mode.", ) # Case 2: Model with non fake inputs/weights and enabled fake mode has_any_non_fake_tensors = pytree.tree_any( lambda x: isinstance(x, torch.Tensor) and not isinstance(x, torch._subclasses.FakeTensor), (self.model_args, self.model_kwargs), ) has_any_non_fake_param_or_buffer = False if isinstance(self.model, torch.nn.Module): has_any_non_fake_param_or_buffer = pytree.tree_any( lambda x: isinstance(x, torch.Tensor) and not isinstance(x, torch._subclasses.FakeTensor), (self.model.parameters(), self.model.buffers()), ) if ( has_any_non_fake_tensors or has_any_non_fake_param_or_buffer ) and self.options.fake_context: raise RuntimeError( "Cannot export a model with non fake inputs/weights and enabled fake mode.", ) class UnsatisfiedDependencyError(RuntimeError): """Raised when an ONNX exporter dependency cannot be satisfied.""" def __init__(self, package_name: str, message: str): super().__init__(message) self.package_name = package_name class InvalidExportOptionsError(RuntimeError): """Raised when user specified an invalid value for the :class:`ExportOptions`.""" def _assert_dependencies(export_options: ResolvedExportOptions): opset_version = export_options.onnx_registry.opset_version def missing_package(package_name: str, exc_info: logging._ExcInfoType): message = ( f"Please install the `{package_name}` package " f"(e.g. `python -m pip install {package_name}`)." ) log.fatal(message, exc_info=exc_info) return UnsatisfiedDependencyError(package_name, message) def missing_opset(package_name: str): message = ( f"The installed `{package_name}` does not support the specified ONNX opset " f"version {opset_version}. Install a newer `{package_name}` package or " f"specify an older opset version." ) log.fatal(message) return UnsatisfiedDependencyError(package_name, message) try: import onnx except ImportError as e: raise missing_package("onnx", e) from e if onnx.defs.onnx_opset_version() < opset_version: raise missing_opset("onnx") try: # PyTorch runs lintrunner in CI without onnxscript installed import onnxscript # type: ignore[import] except ImportError as e: raise missing_package("onnxscript", e) from e if not isinstance( onnxscript.onnx_opset.all_opsets[("", opset_version)], onnxscript.values.Opset, ): raise missing_opset("onnxscript") def dynamo_export( model: torch.nn.Module | Callable, /, *model_args, export_options: ExportOptions | None = None, **model_kwargs, ) -> _onnx_program.ONNXProgram: """Export a torch.nn.Module to an ONNX graph. .. deprecated:: 2.7 Please use ``torch.onnx.export(..., dynamo=True)`` instead. Args: model: The PyTorch model to be exported to ONNX. model_args: Positional inputs to ``model``. model_kwargs: Keyword inputs to ``model``. export_options: Options to influence the export to ONNX. Returns: An in-memory representation of the exported ONNX model. **Example 1 - Simplest export** :: class MyModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x, bias=None): out = self.linear(x) out = out + bias return out model = MyModel() kwargs = {"bias": 3.0} args = (torch.randn(2, 2, 2),) onnx_program = torch.onnx.dynamo_export(model, *args, **kwargs).save( "my_simple_model.onnx" ) **Example 2 - Exporting with dynamic shapes** :: # The previous model can be exported with dynamic shapes export_options = torch.onnx.ExportOptions(dynamic_shapes=True) onnx_program = torch.onnx.dynamo_export( model, *args, **kwargs, export_options=export_options ) onnx_program.save("my_dynamic_model.onnx") By printing input dynamic dimensions we can see the input shape is no longer (2,2,2) :: >>> print(onnx_program.model_proto.graph.input[0]) name: "arg0" type { tensor_type { elem_type: 1 shape { dim { dim_param: "arg0_dim_0" } dim { dim_param: "arg0_dim_1" } dim { dim_param: "arg0_dim_2" } } } } """ if export_options is not None: resolved_export_options = ( export_options if isinstance(export_options, ResolvedExportOptions) else ResolvedExportOptions(export_options, model=model) ) else: resolved_export_options = ResolvedExportOptions(ExportOptions(), model=model) _assert_dependencies(resolved_export_options) try: from torch._dynamo import config as _dynamo_config with _dynamo_config.patch(do_not_emit_runtime_asserts=True): return Exporter( options=resolved_export_options, model=model, model_args=model_args, model_kwargs=model_kwargs, ).export() except Exception as e: sarif_report_path = _DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH resolved_export_options.diagnostic_context.dump(sarif_report_path) message = ( f"Failed to export the model to ONNX. Generating SARIF report at '{sarif_report_path}'. " "SARIF is a standard format for the output of static analysis tools. " "SARIF logs can be loaded in VS Code SARIF viewer extension, " "or SARIF web viewer (https://microsoft.github.io/sarif-web-component/). " f"Please report a bug on PyTorch Github: {_PYTORCH_GITHUB_ISSUES_URL}" ) raise errors.OnnxExporterError(message) from e def common_pre_export_passes( options: ResolvedExportOptions, original_model: torch.nn.Module | Callable, fx_module: torch.fx.GraphModule, fx_module_args: Sequence[Any], ): # TODO: Import here to prevent circular dependency from torch.onnx._internal.fx import analysis, passes diagnostic_context = options.diagnostic_context # Apply decomposition table to the input graph. module = passes.Decompose( diagnostic_context, fx_module, options.decomposition_table, enable_dynamic_axes=options.dynamic_shapes, allow_fake_constant=options.fake_context is not None, ).run(*fx_module_args) # ONNX does not support views and mutations. # Functionalize to get a semantically equivalent graph without mutations. module = passes.Functionalize( diagnostic_context, module, enable_dynamic_axes=options.dynamic_shapes, allow_fake_constant=options.fake_context is not None, ).run(*fx_module_args) # Input mutations are detected and distilled after `Functionalize` pass. # Remove them since ONNX inference does not need them. module = passes.RemoveInputMutation(diagnostic_context, module).run(*fx_module_args) # ONNX does not support concept of (implicit) type promotion. # Insert type casts explicitly where needed. module = passes.InsertTypePromotion(diagnostic_context, module).run() analysis.UnsupportedFxNodesAnalysis( diagnostic_context, module, options.onnxfunction_dispatcher ).analyze(infra.levels.ERROR) if isinstance(original_model, torch.nn.Module): module = passes.RestoreParameterAndBufferNames( diagnostic_context, module, original_model ).run() # This operation should be invoked as the last pre export pass. # See [NOTE: Modularize pass ordering] module = passes.Modularize(diagnostic_context, module).run() # ONNX does not support None inputs. During graph building, all None inputs # are removed. Here we register this step to input adapter. options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNoneInputStep()) # NOTE: temp workaround for https://github.com/pytorch/pytorch/issues/99534 # Dynamo doesn't support non-tensor inputs. options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNonTensorInputStep()) # ONNX does not support complex inputs. During graph building, all complex inputs # are converted to real representation inputs. Here we register this step to # input/output adapter. options.fx_tracer.input_adapter.append_step( io_adapter.ConvertComplexToRealRepresentationInputStep() ) # ONNX can't represent collection types (e.g., dictionary, tuple of tuple of # tensor, etc), we flatten the collection and register each element as output. options.fx_tracer.output_adapter.append_step(io_adapter.FlattenOutputStep()) # Output post-processing steps should happen after `FlattenOutputStep`. options.fx_tracer.output_adapter.append_step( io_adapter.ConvertComplexToRealRepresentationOutputStep() ) return module ```
============================================================================================================================ SOURCE CODE FILE: _lazy_import.py LINES: 1 SIZE: 1.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\_lazy_import.py ENCODING: utf-8 ```py """Utility to lazily import modules.""" from __future__ import annotations import importlib from typing import Any, TYPE_CHECKING class _LazyModule: """Lazily import a module.""" def __init__(self, module_name: str) -> None: self._name = module_name self._module: Any = None def __repr__(self) -> str: return f"<lazy module '{self._name}'>" def __getattr__(self, attr: str) -> object: if self._module is None: self._module = importlib.import_module(".", self._name) return getattr(self._module, attr) # Import the following modules during type checking to enable code intelligence features, # such as auto-completion in tools like pylance, even when these modules are not explicitly # imported in user code. # NOTE: Add additional used imports here. if TYPE_CHECKING: import onnx import onnxscript import onnxscript._framework_apis.torch_2_7 as onnxscript_apis onnxscript_ir = onnxscript.ir else: onnx = _LazyModule("onnx") onnxscript = _LazyModule("onnxscript") onnxscript_ir = _LazyModule("onnxscript.ir") onnxscript_apis = _LazyModule("onnxscript._framework_apis.torch_2_7") ```
==================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\__init__.py ENCODING: utf-8 ```py from ._diagnostic import ( create_export_diagnostic_context, diagnose, engine, export_context, ExportDiagnosticEngine, TorchScriptOnnxExportDiagnostic, ) from ._rules import rules from .infra import levels __all__ = [ "TorchScriptOnnxExportDiagnostic", "ExportDiagnosticEngine", "rules", "levels", "engine", "export_context", "create_export_diagnostic_context", "diagnose", ] ```
======================================================================================================================================= SOURCE CODE FILE: _diagnostic.py LINES: 2 SIZE: 7.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\_diagnostic.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """Diagnostic components for TorchScript based ONNX export, i.e. `torch.onnx.export`.""" from __future__ import annotations import contextlib import gzip from typing import TYPE_CHECKING import torch from torch.onnx._internal.diagnostics import infra from torch.onnx._internal.diagnostics.infra import formatter, sarif from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version from torch.utils import cpp_backtrace if TYPE_CHECKING: from collections.abc import Generator def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack: """Returns the current C++ call stack. This function utilizes `torch.utils.cpp_backtrace` to get the current C++ call stack. The returned C++ call stack is a concatenated string of the C++ call stack frames. Each frame is separated by a newline character, in the same format of r"frame #[0-9]+: (?P<frame_info>.*)". More info at `c10/util/Backtrace.cpp`. """ frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split("\n") frame_messages = [] for frame in frames: segments = frame.split(":", 1) if len(segments) == 2: frame_messages.append(segments[1].strip()) else: frame_messages.append("<unknown frame>") return infra.Stack( frames=[ infra.StackFrame(location=infra.Location(message=message)) for message in frame_messages ] ) class TorchScriptOnnxExportDiagnostic(infra.Diagnostic): """Base class for all export diagnostics. This class is used to represent all export diagnostics. It is a subclass of infra.Diagnostic, and adds additional methods to add more information to the diagnostic. """ python_call_stack: infra.Stack | None = None cpp_call_stack: infra.Stack | None = None def __init__( self, *args, frames_to_skip: int = 1, cpp_stack: bool = False, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.python_call_stack = self.record_python_call_stack( frames_to_skip=frames_to_skip ) if cpp_stack: self.cpp_call_stack = self.record_cpp_call_stack( frames_to_skip=frames_to_skip ) def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack: """Records the current C++ call stack in the diagnostic.""" stack = _cpp_call_stack(frames_to_skip=frames_to_skip) stack.message = "C++ call stack" self.with_stack(stack) return stack class ExportDiagnosticEngine: """PyTorch ONNX Export diagnostic engine. The only purpose of creating this class instead of using `DiagnosticContext` directly is to provide a background context for `diagnose` calls inside exporter. By design, one `torch.onnx.export` call should initialize one diagnostic context. All `diagnose` calls inside exporter should be made in the context of that export. However, since diagnostic context is currently being accessed via a global variable, there is no guarantee that the context is properly initialized. Therefore, we need to provide a default background context to fallback to, otherwise any invocation of exporter internals, e.g. unit tests, will fail due to missing diagnostic context. This can be removed once the pipeline for context to flow through the exporter is established. """ contexts: list[infra.DiagnosticContext] _background_context: infra.DiagnosticContext def __init__(self) -> None: self.contexts = [] self._background_context = infra.DiagnosticContext( name="torch.onnx", version=torch.__version__, ) @property def background_context(self) -> infra.DiagnosticContext: return self._background_context def create_diagnostic_context( self, name: str, version: str, options: infra.DiagnosticOptions | None = None, ) -> infra.DiagnosticContext: """Creates a new diagnostic context. Args: name: The subject name for the diagnostic context. version: The subject version for the diagnostic context. options: The options for the diagnostic context. Returns: A new diagnostic context. """ if options is None: options = infra.DiagnosticOptions() context: infra.DiagnosticContext[infra.Diagnostic] = infra.DiagnosticContext( name, version, options ) self.contexts.append(context) return context def clear(self): """Clears all diagnostic contexts.""" self.contexts.clear() self._background_context.diagnostics.clear() def to_json(self) -> str: return formatter.sarif_to_json(self.sarif_log()) def dump(self, file_path: str, compress: bool = False) -> None: """Dumps the SARIF log to a file.""" if compress: with gzip.open(file_path, "wt") as f: f.write(self.to_json()) else: with open(file_path, "w") as f: f.write(self.to_json()) def sarif_log(self): log = sarif.SarifLog( version=sarif_version.SARIF_VERSION, schema_uri=sarif_version.SARIF_SCHEMA_LINK, runs=[context.sarif() for context in self.contexts], ) log.runs.append(self._background_context.sarif()) return log engine = ExportDiagnosticEngine() _context = engine.background_context @contextlib.contextmanager def create_export_diagnostic_context() -> Generator[ infra.DiagnosticContext, None, None ]: """Create a diagnostic context for export. This is a workaround for code robustness since diagnostic context is accessed by export internals via global variable. See `ExportDiagnosticEngine` for more details. """ global _context assert _context == engine.background_context, ( "Export context is already set. Nested export is not supported." ) _context = engine.create_diagnostic_context( "torch.onnx.export", torch.__version__, ) try: yield _context finally: _context = engine.background_context def diagnose( rule: infra.Rule, level: infra.Level, message: str | None = None, frames_to_skip: int = 2, **kwargs, ) -> TorchScriptOnnxExportDiagnostic: """Creates a diagnostic and record it in the global diagnostic context. This is a wrapper around `context.log` that uses the global diagnostic context. """ diagnostic = TorchScriptOnnxExportDiagnostic( rule, level, message, frames_to_skip=frames_to_skip, **kwargs ) export_context().log(diagnostic) return diagnostic def export_context() -> infra.DiagnosticContext: global _context return _context ```
================================================================================================================================== SOURCE CODE FILE: _rules.py LINES: 155 SIZE: 36.93 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\_rules.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """ GENERATED CODE - DO NOT EDIT DIRECTLY This file is generated by gen_diagnostics.py. See tools/onnx/gen_diagnostics.py for more information. Diagnostic rules for PyTorch ONNX export. """ import dataclasses from typing import Tuple # flake8: noqa from torch.onnx._internal.diagnostics import infra """ GENERATED CODE - DO NOT EDIT DIRECTLY The purpose of generating a class for each rule is to override the `format_message` method to provide more details in the signature about the format arguments. """ class _NodeMissingOnnxShapeInference(infra.Rule): """Node is missing ONNX shape inference.""" def format_message(self, op_name) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' """ return self.message_default_template.format(op_name=op_name) def format( # type: ignore[override] self, level: infra.Level, op_name ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' """ return self, level, self.format_message(op_name=op_name) class _MissingCustomSymbolicFunction(infra.Rule): """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" def format_message(self, op_name) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' """ return self.message_default_template.format(op_name=op_name) def format( # type: ignore[override] self, level: infra.Level, op_name ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' """ return self, level, self.format_message(op_name=op_name) class _MissingStandardSymbolicFunction(infra.Rule): """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" def format_message( # type: ignore[override] self, op_name, opset_version, issue_url ) -> str: """Returns the formatted default message of this Rule. Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." """ return self.message_default_template.format( op_name=op_name, opset_version=opset_version, issue_url=issue_url ) def format( # type: ignore[override] self, level: infra.Level, op_name, opset_version, issue_url ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." """ return ( self, level, self.format_message( op_name=op_name, opset_version=opset_version, issue_url=issue_url ), ) class _OperatorSupportedInNewerOpsetVersion(infra.Rule): """Operator is supported in newer opset version.""" def format_message( # type: ignore[override] self, op_name, opset_version, supported_opset_version ) -> str: """Returns the formatted default message of this Rule. Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." """ return self.message_default_template.format( op_name=op_name, opset_version=opset_version, supported_opset_version=supported_opset_version, ) def format( # type: ignore[override] self, level: infra.Level, op_name, opset_version, supported_opset_version ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." """ return ( self, level, self.format_message( op_name=op_name, opset_version=opset_version, supported_opset_version=supported_opset_version, ), ) class _FxGraphToOnnx(infra.Rule): """Transforms graph from FX IR to ONNX IR.""" def format_message(self, graph_name) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'Transforming FX graph {graph_name} to ONNX graph.' """ return self.message_default_template.format(graph_name=graph_name) def format( # type: ignore[override] self, level: infra.Level, graph_name ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Transforming FX graph {graph_name} to ONNX graph.' """ return self, level, self.format_message(graph_name=graph_name) class _FxNodeToOnnx(infra.Rule): """Transforms an FX node to an ONNX node.""" def format_message(self, node_repr) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'Transforming FX node {node_repr} to ONNX node.' """ return self.message_default_template.format(node_repr=node_repr) def format( # type: ignore[override] self, level: infra.Level, node_repr ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Transforming FX node {node_repr} to ONNX node.' """ return self, level, self.format_message(node_repr=node_repr) class _FxPass(infra.Rule): """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" def format_message(self, pass_name) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'Running {pass_name} pass.' """ return self.message_default_template.format(pass_name=pass_name) def format( # type: ignore[override] self, level: infra.Level, pass_name ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Running {pass_name} pass.' """ return self, level, self.format_message(pass_name=pass_name) class _NoSymbolicFunctionForCallFunction(infra.Rule): """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" def format_message(self, target) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' """ return self.message_default_template.format(target=target) def format( # type: ignore[override] self, level: infra.Level, target ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' """ return self, level, self.format_message(target=target) class _UnsupportedFxNodeAnalysis(infra.Rule): """Result from FX graph analysis to reveal unsupported FX nodes.""" def format_message( # type: ignore[override] self, node_op_to_target_mapping ) -> str: """Returns the formatted default message of this Rule. Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' """ return self.message_default_template.format( node_op_to_target_mapping=node_op_to_target_mapping ) def format( # type: ignore[override] self, level: infra.Level, node_op_to_target_mapping ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' """ return ( self, level, self.format_message(node_op_to_target_mapping=node_op_to_target_mapping), ) class _OpLevelDebugging(infra.Rule): """Report any op level validation failure in warnings.""" def format_message(self, node, symbolic_fn) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' """ return self.message_default_template.format(node=node, symbolic_fn=symbolic_fn) def format( # type: ignore[override] self, level: infra.Level, node, symbolic_fn ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' """ return self, level, self.format_message(node=node, symbolic_fn=symbolic_fn) class _FindOpschemaMatchedSymbolicFunction(infra.Rule): """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" def format_message(self, symbolic_fn, node) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' """ return self.message_default_template.format(symbolic_fn=symbolic_fn, node=node) def format( # type: ignore[override] self, level: infra.Level, symbolic_fn, node ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' """ return self, level, self.format_message(symbolic_fn=symbolic_fn, node=node) class _FxNodeInsertTypePromotion(infra.Rule): """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" def format_message(self, target) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'Performing explicit type promotion for node {target}. ' """ return self.message_default_template.format(target=target) def format( # type: ignore[override] self, level: infra.Level, target ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Performing explicit type promotion for node {target}. ' """ return self, level, self.format_message(target=target) class _FindOperatorOverloadsInOnnxRegistry(infra.Rule): """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" def format_message(self, node) -> str: # type: ignore[override] """Returns the formatted default message of this Rule. Message template: 'Checking if the FX node: {node} is supported in onnx registry.' """ return self.message_default_template.format(node=node) def format( # type: ignore[override] self, level: infra.Level, node ) -> Tuple[infra.Rule, infra.Level, str]: """Returns a tuple of (Rule, Level, message) for this Rule. Message template: 'Checking if the FX node: {node} is supported in onnx registry.' """ return self, level, self.format_message(node=node) @dataclasses.dataclass class _POERules(infra.RuleCollection): node_missing_onnx_shape_inference: _NodeMissingOnnxShapeInference = dataclasses.field( default=_NodeMissingOnnxShapeInference.from_sarif( **{ "id": "POE0001", "name": "node-missing-onnx-shape-inference", "short_description": {"text": "Node is missing ONNX shape inference."}, "full_description": { "text": "Node is missing ONNX shape inference. This usually happens when the node is not valid under standard ONNX operator spec.", "markdown": "Node is missing ONNX shape inference.\nThis usually happens when the node is not valid under standard ONNX operator spec.\n", }, "message_strings": { "default": { "text": "The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Node is missing ONNX shape inference.""" missing_custom_symbolic_function: _MissingCustomSymbolicFunction = dataclasses.field( default=_MissingCustomSymbolicFunction.from_sarif( **{ "id": "POE0002", "name": "missing-custom-symbolic-function", "short_description": { "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX." }, "full_description": { "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.", "markdown": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.\n", }, "message_strings": { "default": { "text": "ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" missing_standard_symbolic_function: _MissingStandardSymbolicFunction = dataclasses.field( default=_MissingStandardSymbolicFunction.from_sarif( **{ "id": "POE0003", "name": "missing-standard-symbolic-function", "short_description": { "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX." }, "full_description": { "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.", "markdown": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.\n", }, "message_strings": { "default": { "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" operator_supported_in_newer_opset_version: _OperatorSupportedInNewerOpsetVersion = dataclasses.field( default=_OperatorSupportedInNewerOpsetVersion.from_sarif( **{ "id": "POE0004", "name": "operator-supported-in-newer-opset-version", "short_description": { "text": "Operator is supported in newer opset version." }, "full_description": { "text": "Operator is supported in newer opset version.", "markdown": "Operator is supported in newer opset version.\n\nExample:\n```python\ntorch.onnx.export(model, args, ..., opset_version=9)\n```\n", }, "message_strings": { "default": { "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Operator is supported in newer opset version.""" fx_graph_to_onnx: _FxGraphToOnnx = dataclasses.field( default=_FxGraphToOnnx.from_sarif( **{ "id": "FXE0007", "name": "fx-graph-to-onnx", "short_description": { "text": "Transforms graph from FX IR to ONNX IR." }, "full_description": { "text": "Transforms graph from FX IR to ONNX IR.", "markdown": "This diagnostic tracks the transformation process from an FX Graph (in FX IR) to an ONNX Graph (in ONNX IR).\n\n## Key Representations:\n\n- **FX Graph**: The graph in FX IR produced by dynamo or symbolic tracing.\n- **ONNX Graph**: The graph in ONNX IR and [operators](https://onnx.ai/onnx/operators/).\n\n## Additional Notes:\n\n- Prior to this transformation step, the FX graph undergoes preprocessing through multiple FX passes.\n To gain insight into these transformations, refer to diagnostic `FXE0010`.\n- To enable a detailed view of the graph transformation in progress within this diagnostic, switch to the DEBUG mode.\n\n - Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n - Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\n- For specific information related to node-level FX to ONNX transformations, explore the diagnostic `FXE0008`.\n", }, "message_strings": { "default": { "text": "Transforming FX graph {graph_name} to ONNX graph." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Transforms graph from FX IR to ONNX IR.""" fx_node_to_onnx: _FxNodeToOnnx = dataclasses.field( default=_FxNodeToOnnx.from_sarif( **{ "id": "FXE0008", "name": "fx-node-to-onnx", "short_description": {"text": "Transforms an FX node to an ONNX node."}, "full_description": { "text": "Transforms an FX node to an ONNX node.", "markdown": "This diagnostic tracks the transformation process from an FX Node to ONNX [Operators](https://onnx.ai/onnx/operators/).\n\nThe process of converting FX Node to ONNX Node involves dealing with six distinct node types:\n 1. `placeholder`: Represents a module input, maps to an ONNX graph input.\n 2. `call_module`: Symbolizes a call to a submodule, maps to an ONNX\n 3. `call_method`: Symbolizes a method call. Not yet implemented.\n 4. `call_function`: Symbolizes a function call. [Core ATen](https://pytorch.org/docs/stable/ir.html#core-aten-ir) is expected\n as the function call target. The mapping from ATen to ONNX is implemented by [ONNXScript torchlib](https://github.com/microsoft/onnxscript/tree/main/onnxscript/function_libs/torch_lib/ops).\n This [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) shows how to write and register a custom symbolic function for call_function FX node.\n 5. `get_attr`: Indicates an attribute access within the current module. Maps to an ONNX graph initializer.\n 6. `output`: Represents the module's output. Maps to an ONNX graph output.\n\nFor a granular understanding of how each node type is transformed, refer to the implementation details in `FxOnnxInterpreter`.\n", }, "message_strings": { "default": { "text": "Transforming FX node {node_repr} to ONNX node." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Transforms an FX node to an ONNX node.""" fx_pass: _FxPass = dataclasses.field( default=_FxPass.from_sarif( **{ "id": "FXE0010", "name": "fx-pass", "short_description": { "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR." }, "full_description": { "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR.", "markdown": "This diagnostic tracks the FX passes executed during the ONNX export process prior\nto converting from FX IR (Intermediate Representation) to ONNX IR.\n\nUnder the scope of ONNX export, an FX pass refers to a specific transformation applied to the FX GraphModule.\nThe primary aim of these passes is to streamline the graph into a format that aligns more with the ONNX IR.\nMoreover, these passes work to substitute unsupported FX IR features with those recognized and endorsed by\nONNX IR. Common transformations include, but aren't limited to, decomposition, functionalization and\ntype promotion.\n\nFor those who are interested in a comprehensive log detailing the modifications made during these passes,\nthere are a couple of options:\n\n- Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n- Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\nHowever, it's noteworthy that by default, such detailed logging is turned off. The primary reason being\nits considerable impact on performance.\n\nFor an in-depth understanding of each specific pass, please refer to the directory: torch/onnx/_internal/fx/passes.\n", }, "message_strings": {"default": {"text": "Running {pass_name} pass."}}, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" no_symbolic_function_for_call_function: _NoSymbolicFunctionForCallFunction = dataclasses.field( default=_NoSymbolicFunctionForCallFunction.from_sarif( **{ "id": "FXE0011", "name": "no-symbolic-function-for-call-function", "short_description": { "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX.' }, "full_description": { "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX. ', "markdown": 'This error occurs when the ONNX converter is unable to find a corresponding symbolic function\nto convert a "call_function" node in the input graph to its equivalence in ONNX. The "call_function"\nnode represents a normalized function call in PyTorch, such as "torch.aten.ops.add".\n\nTo resolve this error, you can try one of the following:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/tutorials/beginner/onnx/onnx_registry_tutorial.html#overview) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n', }, "message_strings": { "default": { "text": 'No symbolic function to convert the "call_function" node {target} to ONNX. ' } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" unsupported_fx_node_analysis: _UnsupportedFxNodeAnalysis = dataclasses.field( default=_UnsupportedFxNodeAnalysis.from_sarif( **{ "id": "FXE0012", "name": "unsupported-fx-node-analysis", "short_description": { "text": "Result from FX graph analysis to reveal unsupported FX nodes." }, "full_description": { "text": "Result from FX graph analysis to reveal unsupported FX nodes.", "markdown": "This error indicates that an FX graph contains one or more unsupported nodes. The error message\nis typically accompanied by a list of the unsupported nodes found during analysis.\n\nTo resolve this error, you can try resolving each individual unsupported node error by following\nthe suggestions by its diagnostic. Typically, options include:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n", }, "message_strings": { "default": { "text": "Unsupported FX nodes: {node_op_to_target_mapping}. " } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Result from FX graph analysis to reveal unsupported FX nodes.""" op_level_debugging: _OpLevelDebugging = dataclasses.field( default=_OpLevelDebugging.from_sarif( **{ "id": "FXE0013", "name": "op-level-debugging", "short_description": { "text": "Report any op level validation failure in warnings." }, "full_description": { "text": "Report any op level validation failure in warnings.", "markdown": "This warning message indicates that during op level debugging, certain symbolic functions\nhave failed to match the results of torch ops when using real tensors generated from fake\ntensors. It is important to note that the symbolic functions may not necessarily be\nincorrect, as the validation process is non-deterministic and should only be used as a\nreference.\n\nThere are two categories of warnings that can be triggered:\n\n1. Non-validated operators:\n If the warnings are caused by the following errors, they can be disregarded by users,\n as these errors occur due to the non-deterministic nature of the validation. However,\n it is important to be aware that the operators have not been validated.\n\n - IndexError: Unsupported input arguments of randomized dimensions/indices(INT64).\n - RuntimeError: Unsupported input arguments for torch ops are generated.\n - ValueError: Arguments/keyword arguments do not match the signature of the symbolic function.\n\n2. Potentially wrong torchlib operators:\n If the warnings are triggered by the following error, users should be aware that the symbolic functions\n may be incorrect in dispatching or implementation. In such cases, it is recommended to report\n the issue to the PyTorch-ONNX team, or create/register a custom symbolic function to replace the default one.\n\n - AssertionError: The symbolic function is potentially wrong as the results do not match the results of torch ops.\n - TypeError: The symbolic function is potentially wrong as the opschema doesn't match inputs.\n", }, "message_strings": { "default": { "text": "FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Report any op level validation failure in warnings.""" find_opschema_matched_symbolic_function: _FindOpschemaMatchedSymbolicFunction = dataclasses.field( default=_FindOpschemaMatchedSymbolicFunction.from_sarif( **{ "id": "FXE0014", "name": "find-opschema-matched-symbolic-function", "short_description": { "text": "Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas." }, "full_description": { "text": "Find the OnnxFunction that matches the input dtypes by comparing them with their opschemas. A warning will be issued if the matched OnnxFunction is not an exact match.", "markdown": "When an ATen/Custom operator is registered and needs to be dispatched to an OnnxFunction, the input/attribute\ndtypes of the ATen/Custom operator are compared with the input/attribute dtypes of the OnnxFunction opschemas\nto find a match. However, if a perfect/exact match is not found, the dispatcher will attempt to find\nthe nearest match with the highest number of input/attribute dtypes matching the OnnxFunction opschemas, while\nissuing a warning.\n\nThere are two types of level that can be triggered in this rule:\n\n1. NOTE: A perfect match is found, and no warning is issued.\n2. WARNING: The matched OnnxFunction is not a perfect/exact match.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning,\n as the definition of OnnxFunction schema is usually more stringent.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the issue to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n", }, "message_strings": { "default": { "text": "The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" fx_node_insert_type_promotion: _FxNodeInsertTypePromotion = dataclasses.field( default=_FxNodeInsertTypePromotion.from_sarif( **{ "id": "FXE0015", "name": "fx-node-insert-type-promotion", "short_description": { "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed." }, "full_description": { "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed.", "markdown": "This diagnostic monitors the node-level type promotion insertion process. In PyTorch, there is an automatic process called implicit type promotion,\nwhere the input types of an operator are promoted to a common type. The determination of the common type is based on the type promotion rule specific to each operator.\nTo learn more about PyTorch's type promotion rules, refer to the [elementwise_dtypes doc](https://github.com/pytorch/pytorch/blob/f044613f78df713fb57f70c608483c9f10ad332e/torch/_prims_common/__init__.py#L1252-L1335)\nand [torch._refs ops](https://github.com/pytorch/pytorch/blob/a475ea4542dfe961c9d097e33ab5041f61c8c17f/torch/_refs/__init__.py#L484).\n\nHowever, implicit type promotion is not supported in ONNX. Therefore, to replicate the PyTorch behavior, we need to explicitly insert cast nodes.\nThis diagnostic tracks the process of node-level type promotion insertion.\n\nThe type promotion rules used by this process can be found in `torch/onnx/_internal/fx/passes/type_promotion.py.`\nTo update or add new type promotion rules, please refer to the [Note: Update type promotion rule] section.\n", }, "message_strings": { "default": { "text": "Performing explicit type promotion for node {target}. " } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" find_operator_overloads_in_onnx_registry: _FindOperatorOverloadsInOnnxRegistry = dataclasses.field( default=_FindOperatorOverloadsInOnnxRegistry.from_sarif( **{ "id": "FXE0016", "name": "find-operator-overloads-in-onnx-registry", "short_description": { "text": "Find the list of OnnxFunction of the PyTorch operator in onnx registry." }, "full_description": { "text": "This rule involves finding the list of OnnxFunction for the PyTorch operator overload in the ONNX registry. If the operator overload is not supported but its default overload is, a warning will be issued. If both the operator overload and its default overload are not supported, an error will be issued.", "markdown": "The operator overload name serves the purpose of verifying whether a PyTorch operator is registered in the ONNX registry.\nIf it's not found, the dispatcher takes a fallback approach and tries to locate the default overload of the PyTorch\noperator in the registry. If even the default overload is absent, it signifies that the operator is officially unsupported.\n\nThere are three types of level that can be triggered in this rule:\n\n1. NOTE: The op overload is supported.\n2. WARNING: The op overload is not supported, but it's default overload is supported.\n3. ERROR: The op overload is not supported, and it's default overload is also not supported.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the unsupported overload to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n\nHere are some suggestions based on the ERROR situation:\n\n1. Report the unsupported operator to the PyTorch-ONNX team.\n2. Create/register a custom symbolic function to replace the default one.\n", }, "message_strings": { "default": { "text": "Checking if the FX node: {node} is supported in onnx registry." } }, "help_uri": None, "properties": {"deprecated": False, "tags": []}, } ), init=False, ) """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" rules = _POERules() ```
========================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\__init__.py ENCODING: utf-8 ```py from ._infra import ( DiagnosticOptions, Graph, Invocation, Level, levels, Location, Rule, RuleCollection, Stack, StackFrame, Tag, ThreadFlowLocation, ) from .context import Diagnostic, DiagnosticContext, RuntimeErrorWithDiagnostic __all__ = [ "Diagnostic", "DiagnosticContext", "DiagnosticOptions", "Graph", "Invocation", "Level", "levels", "Location", "Rule", "RuleCollection", "RuntimeErrorWithDiagnostic", "Stack", "StackFrame", "Tag", "ThreadFlowLocation", ] ```
======================================================================================================================================== SOURCE CODE FILE: _infra.py LINES: 1 SIZE: 9.83 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\_infra.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """This file defines an additional layer of abstraction on top of the SARIF OM.""" from __future__ import annotations import dataclasses import enum import logging from typing import TYPE_CHECKING from torch.onnx._internal.diagnostics.infra import formatter, sarif if TYPE_CHECKING: from collections.abc import Mapping, Sequence class Level(enum.IntEnum): """The level of a diagnostic. This class is used to represent the level of a diagnostic. The levels are defined by the SARIF specification, and are not modifiable. For alternative categories, please use infra.Tag instead. When selecting a level, please consider the following guidelines: - NONE: Informational result that does not indicate the presence of a problem. - NOTE: An opportunity for improvement was found. - WARNING: A potential problem was found. - ERROR: A serious problem was found. This level is a subclass of enum.IntEnum, and can be used as an integer. Its integer value maps to the logging levels in Python's logging module. The mapping is as follows: Level.NONE = logging.DEBUG = 10 Level.NOTE = logging.INFO = 20 Level.WARNING = logging.WARNING = 30 Level.ERROR = logging.ERROR = 40 """ NONE = 10 NOTE = 20 WARNING = 30 ERROR = 40 levels = Level class Tag(enum.Enum): """The tag of a diagnostic. This class can be inherited to define custom tags.""" class PatchedPropertyBag(sarif.PropertyBag): """Key/value pairs that provide additional information about the object. The definition of PropertyBag via SARIF spec is "A property bag is an object (section 3.6) containing an unordered set of properties with arbitrary names." However it is not reflected in the json file, and therefore not captured by the python representation. This patch adds additional **kwargs to the `__init__` method to allow recording arbitrary key/value pairs. """ def __init__(self, tags: list[str] | None = None, **kwargs): super().__init__(tags=tags) self.__dict__.update(kwargs) @dataclasses.dataclass(frozen=True) class Rule: id: str name: str message_default_template: str short_description: str | None = None full_description: str | None = None full_description_markdown: str | None = None help_uri: str | None = None @classmethod def from_sarif(cls, **kwargs): """Returns a rule from the SARIF reporting descriptor.""" short_description = kwargs.get("short_description", {}).get("text") full_description = kwargs.get("full_description", {}).get("text") full_description_markdown = kwargs.get("full_description", {}).get("markdown") help_uri = kwargs.get("help_uri") rule = cls( id=kwargs["id"], name=kwargs["name"], message_default_template=kwargs["message_strings"]["default"]["text"], short_description=short_description, full_description=full_description, full_description_markdown=full_description_markdown, help_uri=help_uri, ) return rule def sarif(self) -> sarif.ReportingDescriptor: """Returns a SARIF reporting descriptor of this Rule.""" short_description = ( sarif.MultiformatMessageString(text=self.short_description) if self.short_description is not None else None ) full_description = ( sarif.MultiformatMessageString( text=self.full_description, markdown=self.full_description_markdown ) if self.full_description is not None else None ) return sarif.ReportingDescriptor( id=self.id, name=self.name, short_description=short_description, full_description=full_description, help_uri=self.help_uri, ) def format(self, level: Level, *args, **kwargs) -> tuple[Rule, Level, str]: """Returns a tuple of (rule, level, message) for a diagnostic. This method is used to format the message of a diagnostic. The message is formatted using the default template of this rule, and the arguments passed in as `*args` and `**kwargs`. The level is used to override the default level of this rule. """ return (self, level, self.format_message(*args, **kwargs)) def format_message(self, *args, **kwargs) -> str: """Returns the formatted default message of this Rule. This method should be overridden (with code generation) by subclasses to reflect the exact arguments needed by the message template. This is a helper method to create the default message for a diagnostic. """ return self.message_default_template.format(*args, **kwargs) @dataclasses.dataclass class Location: uri: str | None = None line: int | None = None message: str | None = None start_column: int | None = None end_column: int | None = None snippet: str | None = None function: str | None = None def sarif(self) -> sarif.Location: """Returns the SARIF representation of this location.""" return sarif.Location( physical_location=sarif.PhysicalLocation( artifact_location=sarif.ArtifactLocation(uri=self.uri), region=sarif.Region( start_line=self.line, start_column=self.start_column, end_column=self.end_column, snippet=sarif.ArtifactContent(text=self.snippet), ), ), message=sarif.Message(text=self.message) if self.message is not None else None, ) @dataclasses.dataclass class StackFrame: location: Location def sarif(self) -> sarif.StackFrame: """Returns the SARIF representation of this stack frame.""" return sarif.StackFrame(location=self.location.sarif()) @dataclasses.dataclass class Stack: """Records a stack trace. The frames are in order from newest to oldest stack frame.""" frames: list[StackFrame] = dataclasses.field(default_factory=list) message: str | None = None def sarif(self) -> sarif.Stack: """Returns the SARIF representation of this stack.""" return sarif.Stack( frames=[frame.sarif() for frame in self.frames], message=sarif.Message(text=self.message) if self.message is not None else None, ) @dataclasses.dataclass class ThreadFlowLocation: """Records code location and the initial state.""" location: Location state: Mapping[str, str] index: int stack: Stack | None = None def sarif(self) -> sarif.ThreadFlowLocation: """Returns the SARIF representation of this thread flow location.""" return sarif.ThreadFlowLocation( location=self.location.sarif(), state=self.state, stack=self.stack.sarif() if self.stack is not None else None, ) @dataclasses.dataclass class Graph: """A graph of diagnostics. This class stores the string representation of a model graph. The `nodes` and `edges` fields are unused in the current implementation. """ graph: str name: str description: str | None = None def sarif(self) -> sarif.Graph: """Returns the SARIF representation of this graph.""" return sarif.Graph( description=sarif.Message(text=self.graph), properties=PatchedPropertyBag(name=self.name, description=self.description), ) @dataclasses.dataclass class RuleCollection: _rule_id_name_set: frozenset[tuple[str, str]] = dataclasses.field(init=False) def __post_init__(self) -> None: self._rule_id_name_set = frozenset( { (field.default.id, field.default.name) for field in dataclasses.fields(self) if isinstance(field.default, Rule) } ) def __contains__(self, rule: Rule) -> bool: """Checks if the rule is in the collection.""" return (rule.id, rule.name) in self._rule_id_name_set @classmethod def custom_collection_from_list( cls, new_collection_class_name: str, rules: Sequence[Rule] ) -> RuleCollection: """Creates a custom class inherited from RuleCollection with the list of rules.""" return dataclasses.make_dataclass( new_collection_class_name, [ ( formatter.kebab_case_to_snake_case(rule.name), type(rule), dataclasses.field(default=rule), ) for rule in rules ], bases=(cls,), )() class Invocation: # TODO: Implement this. # Tracks top level call arguments and diagnostic options. def __init__(self) -> None: raise NotImplementedError @dataclasses.dataclass class DiagnosticOptions: """Options for diagnostic context. Attributes: verbosity_level: Set the amount of information logged for each diagnostics, equivalent to the 'level' in Python logging module. warnings_as_errors: When True, warning diagnostics are treated as error diagnostics. """ verbosity_level: int = dataclasses.field(default=logging.INFO) """Set the amount of information logged for each diagnostics, equivalent to the 'level' in Python logging module.""" warnings_as_errors: bool = dataclasses.field(default=False) """If True, warning diagnostics are treated as error diagnostics.""" ```
========================================================================================================================================= SOURCE CODE FILE: context.py LINES: 6 SIZE: 16.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\context.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """A diagnostic context based on SARIF.""" from __future__ import annotations import contextlib import dataclasses import gzip import logging from typing import Callable, Generic, Literal, TYPE_CHECKING, TypeVar from typing_extensions import Self from torch.onnx._internal.diagnostics import infra from torch.onnx._internal.diagnostics.infra import formatter, sarif, utils from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version if TYPE_CHECKING: from collections.abc import Generator, Mapping # This is a workaround for mypy not supporting Self from typing_extensions. _Diagnostic = TypeVar("_Diagnostic", bound="Diagnostic") diagnostic_logger: logging.Logger = logging.getLogger(__name__) @dataclasses.dataclass class Diagnostic: rule: infra.Rule level: infra.Level message: str | None = None locations: list[infra.Location] = dataclasses.field(default_factory=list) stacks: list[infra.Stack] = dataclasses.field(default_factory=list) graphs: list[infra.Graph] = dataclasses.field(default_factory=list) thread_flow_locations: list[infra.ThreadFlowLocation] = dataclasses.field( default_factory=list ) additional_messages: list[str] = dataclasses.field(default_factory=list) tags: list[infra.Tag] = dataclasses.field(default_factory=list) source_exception: Exception | None = None """The exception that caused this diagnostic to be created.""" logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) """The logger for this diagnostic. Defaults to 'diagnostic_logger' which has the same log level setting with `DiagnosticOptions.verbosity_level`.""" _current_log_section_depth: int = 0 def __post_init__(self) -> None: pass def sarif(self) -> sarif.Result: """Returns the SARIF Result representation of this diagnostic.""" message = self.message or self.rule.message_default_template if self.additional_messages: additional_message = "\n".join(self.additional_messages) message_markdown = ( f"{message}\n\n## Additional Message:\n\n{additional_message}" ) else: message_markdown = message kind: Literal["informational", "fail"] = ( "informational" if self.level == infra.Level.NONE else "fail" ) sarif_result = sarif.Result( message=sarif.Message(text=message, markdown=message_markdown), level=self.level.name.lower(), # type: ignore[arg-type] rule_id=self.rule.id, kind=kind, ) sarif_result.locations = [location.sarif() for location in self.locations] sarif_result.stacks = [stack.sarif() for stack in self.stacks] sarif_result.graphs = [graph.sarif() for graph in self.graphs] sarif_result.code_flows = [ sarif.CodeFlow( thread_flows=[ sarif.ThreadFlow( locations=[loc.sarif() for loc in self.thread_flow_locations] ) ] ) ] sarif_result.properties = sarif.PropertyBag( tags=[tag.value for tag in self.tags] ) return sarif_result def with_location(self: Self, location: infra.Location) -> Self: """Adds a location to the diagnostic.""" self.locations.append(location) return self def with_thread_flow_location( self: Self, location: infra.ThreadFlowLocation ) -> Self: """Adds a thread flow location to the diagnostic.""" self.thread_flow_locations.append(location) return self def with_stack(self: Self, stack: infra.Stack) -> Self: """Adds a stack to the diagnostic.""" self.stacks.append(stack) return self def with_graph(self: Self, graph: infra.Graph) -> Self: """Adds a graph to the diagnostic.""" self.graphs.append(graph) return self @contextlib.contextmanager def log_section( self, level: int, message: str, *args, **kwargs ) -> Generator[None, None, None]: """ Context manager for a section of log messages, denoted by a title message and increased indentation. Same api as `logging.Logger.log`. This context manager logs the given title at the specified log level, increases the current section depth for subsequent log messages, and ensures that the section depth is decreased again when exiting the context. Args: level: The log level. message: The title message to log. *args: The arguments to the message. Use `LazyString` to defer the expensive evaluation of the arguments until the message is actually logged. **kwargs: The keyword arguments for `logging.Logger.log`. Yields: None: This context manager does not yield any value. Example: >>> with DiagnosticContext("DummyContext", "1.0"): ... rule = infra.Rule("RuleID", "DummyRule", "Rule message") ... diagnostic = Diagnostic(rule, infra.Level.WARNING) ... with diagnostic.log_section(logging.INFO, "My Section"): ... diagnostic.log(logging.INFO, "My Message") ... with diagnostic.log_section(logging.INFO, "My Subsection"): ... diagnostic.log(logging.INFO, "My Submessage") ... diagnostic.additional_messages ['## My Section', 'My Message', '### My Subsection', 'My Submessage'] """ if self.logger.isEnabledFor(level): indented_format_message = ( f"##{'#' * self._current_log_section_depth} {message}" ) self.log( level, indented_format_message, *args, **kwargs, ) self._current_log_section_depth += 1 try: yield finally: self._current_log_section_depth -= 1 def log(self, level: int, message: str, *args, **kwargs) -> None: """Logs a message within the diagnostic. Same api as `logging.Logger.log`. If logger is not enabled for the given level, the message will not be logged. Otherwise, the message will be logged and also added to the diagnostic's additional_messages. The default setting for `DiagnosticOptions.verbosity_level` is `logging.INFO`. Based on this default, the log level recommendations are as follows. If you've set a different default verbosity level in your application, please adjust accordingly: - logging.ERROR: Log any events leading to application failure. - logging.WARNING: Log events that might result in application issues or failures, although not guaranteed. - logging.INFO: Log general useful information, ensuring minimal performance overhead. - logging.DEBUG: Log detailed debug information, which might affect performance when logged. Args: level: The log level. message: The message to log. *args: The arguments to the message. Use `LazyString` to defer the expensive evaluation of the arguments until the message is actually logged. **kwargs: The keyword arguments for `logging.Logger.log`. """ if self.logger.isEnabledFor(level): formatted_message = message % args self.logger.log(level, formatted_message, **kwargs) self.additional_messages.append(formatted_message) def debug(self, message: str, *args, **kwargs) -> None: """Logs a debug message within the diagnostic. Same api as logging.Logger.debug. Checkout `log` for more details. """ self.log(logging.DEBUG, message, *args, **kwargs) def info(self, message: str, *args, **kwargs) -> None: """Logs an info message within the diagnostic. Same api as logging.Logger.info. Checkout `log` for more details. """ self.log(logging.INFO, message, *args, **kwargs) def warning(self, message: str, *args, **kwargs) -> None: """Logs a warning message within the diagnostic. Same api as logging.Logger.warning. Checkout `log` for more details. """ self.log(logging.WARNING, message, *args, **kwargs) def error(self, message: str, *args, **kwargs) -> None: """Logs an error message within the diagnostic. Same api as logging.Logger.error. Checkout `log` for more details. """ self.log(logging.ERROR, message, *args, **kwargs) def log_source_exception(self, level: int, exception: Exception) -> None: """Logs a source exception within the diagnostic. Invokes `log_section` and `log` to log the exception in markdown section format. """ self.source_exception = exception with self.log_section(level, "Exception log"): self.log(level, "%s", formatter.lazy_format_exception(exception)) def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack: """Records the current Python call stack.""" frames_to_skip += 1 # Skip this function. stack = utils.python_call_stack(frames_to_skip=frames_to_skip) self.with_stack(stack) if len(stack.frames) > 0: self.with_location(stack.frames[0].location) return stack def record_python_call( self, fn: Callable, state: Mapping[str, str], message: str | None = None, frames_to_skip: int = 0, ) -> infra.ThreadFlowLocation: """Records a python call as one thread flow step.""" frames_to_skip += 1 # Skip this function. stack = utils.python_call_stack(frames_to_skip=frames_to_skip, frames_to_log=5) location = utils.function_location(fn) location.message = message # Add function location to the top of the stack. stack.frames.insert(0, infra.StackFrame(location=location)) thread_flow_location = infra.ThreadFlowLocation( location=location, state=state, index=len(self.thread_flow_locations), stack=stack, ) self.with_thread_flow_location(thread_flow_location) return thread_flow_location class RuntimeErrorWithDiagnostic(RuntimeError): """Runtime error with enclosed diagnostic information.""" def __init__(self, diagnostic: Diagnostic): super().__init__(diagnostic.message) self.diagnostic = diagnostic @dataclasses.dataclass class DiagnosticContext(Generic[_Diagnostic]): name: str version: str options: infra.DiagnosticOptions = dataclasses.field( default_factory=infra.DiagnosticOptions ) diagnostics: list[_Diagnostic] = dataclasses.field(init=False, default_factory=list) # TODO(bowbao): Implement this. # _invocation: infra.Invocation = dataclasses.field(init=False) _inflight_diagnostics: list[_Diagnostic] = dataclasses.field( init=False, default_factory=list ) _previous_log_level: int = dataclasses.field(init=False, default=logging.WARNING) logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) _bound_diagnostic_type: type = dataclasses.field(init=False, default=Diagnostic) def __enter__(self): self._previous_log_level = self.logger.level self.logger.setLevel(self.options.verbosity_level) return self def __exit__(self, exc_type, exc_val, exc_tb): self.logger.setLevel(self._previous_log_level) return None def sarif(self) -> sarif.Run: """Returns the SARIF Run object.""" unique_rules = {diagnostic.rule for diagnostic in self.diagnostics} return sarif.Run( sarif.Tool( driver=sarif.ToolComponent( name=self.name, version=self.version, rules=[rule.sarif() for rule in unique_rules], ) ), results=[diagnostic.sarif() for diagnostic in self.diagnostics], ) def sarif_log(self) -> sarif.SarifLog: # type: ignore[name-defined] """Returns the SARIF Log object.""" return sarif.SarifLog( version=sarif_version.SARIF_VERSION, schema_uri=sarif_version.SARIF_SCHEMA_LINK, runs=[self.sarif()], ) def to_json(self) -> str: return formatter.sarif_to_json(self.sarif_log()) def dump(self, file_path: str, compress: bool = False) -> None: """Dumps the SARIF log to a file.""" if compress: with gzip.open(file_path, "wt") as f: f.write(self.to_json()) else: with open(file_path, "w") as f: f.write(self.to_json()) def log(self, diagnostic: _Diagnostic) -> None: """Logs a diagnostic. This method should be used only after all the necessary information for the diagnostic has been collected. Args: diagnostic: The diagnostic to add. """ if not isinstance(diagnostic, self._bound_diagnostic_type): raise TypeError( f"Expected diagnostic of type {self._bound_diagnostic_type}, got {type(diagnostic)}" ) if self.options.warnings_as_errors and diagnostic.level == infra.Level.WARNING: # type: ignore[attr-defined] diagnostic.level = infra.Level.ERROR # type: ignore[attr-defined] self.diagnostics.append(diagnostic) # type: ignore[arg-type] def log_and_raise_if_error(self, diagnostic: _Diagnostic) -> None: """Logs a diagnostic and raises an exception if it is an error. Use this method for logging non inflight diagnostics where diagnostic level is not known or lower than ERROR. If it is always expected raise, use `log` and explicit `raise` instead. Otherwise there is no way to convey the message that it always raises to Python intellisense and type checking tools. This method should be used only after all the necessary information for the diagnostic has been collected. Args: diagnostic: The diagnostic to add. """ self.log(diagnostic) if diagnostic.level == infra.Level.ERROR: if diagnostic.source_exception is not None: raise diagnostic.source_exception raise RuntimeErrorWithDiagnostic(diagnostic) @contextlib.contextmanager def add_inflight_diagnostic( self, diagnostic: _Diagnostic ) -> Generator[_Diagnostic, None, None]: """Adds a diagnostic to the context. Use this method to add diagnostics that are not created by the context. Args: diagnostic: The diagnostic to add. """ self._inflight_diagnostics.append(diagnostic) try: yield diagnostic finally: self._inflight_diagnostics.pop() def push_inflight_diagnostic(self, diagnostic: _Diagnostic) -> None: """Pushes a diagnostic to the inflight diagnostics stack. Args: diagnostic: The diagnostic to push. Raises: ValueError: If the rule is not supported by the tool. """ self._inflight_diagnostics.append(diagnostic) def pop_inflight_diagnostic(self) -> _Diagnostic: """Pops the last diagnostic from the inflight diagnostics stack. Returns: The popped diagnostic. """ return self._inflight_diagnostics.pop() def inflight_diagnostic(self, rule: infra.Rule | None = None) -> _Diagnostic: if rule is None: # TODO(bowbao): Create builtin-rules and create diagnostic using that. if len(self._inflight_diagnostics) <= 0: raise AssertionError("No inflight diagnostics") return self._inflight_diagnostics[-1] else: for diagnostic in reversed(self._inflight_diagnostics): if diagnostic.rule == rule: return diagnostic raise AssertionError(f"No inflight diagnostic for rule {rule.name}") ```